]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
Merge branch 'master' into net-next
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43
44 #include "bpf_util.h"
45 #include "bpf_elf.h"
46 #include "bpf_scm.h"
47
48 struct bpf_prog_meta {
49 const char *type;
50 const char *subdir;
51 const char *section;
52 bool may_uds_export;
53 };
54
55 static const enum bpf_prog_type __bpf_types[] = {
56 BPF_PROG_TYPE_SCHED_CLS,
57 BPF_PROG_TYPE_SCHED_ACT,
58 };
59
60 static const struct bpf_prog_meta __bpf_prog_meta[] = {
61 [BPF_PROG_TYPE_SCHED_CLS] = {
62 .type = "cls",
63 .subdir = "tc",
64 .section = ELF_SECTION_CLASSIFIER,
65 .may_uds_export = true,
66 },
67 [BPF_PROG_TYPE_SCHED_ACT] = {
68 .type = "act",
69 .subdir = "tc",
70 .section = ELF_SECTION_ACTION,
71 .may_uds_export = true,
72 },
73 };
74
75 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
76 {
77 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
78 __bpf_prog_meta[type].subdir);
79 return __bpf_prog_meta[type].subdir;
80 }
81
82 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
83 {
84 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
85 __bpf_prog_meta[type].section);
86 return __bpf_prog_meta[type].section;
87 }
88
89 #ifdef HAVE_ELF
90 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
91 const char *sec, bool verbose);
92 #else
93 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
94 const char *sec, bool verbose)
95 {
96 fprintf(stderr, "No ELF library support compiled in.\n");
97 errno = ENOSYS;
98 return -1;
99 }
100 #endif
101
102 static inline __u64 bpf_ptr_to_u64(const void *ptr)
103 {
104 return (__u64)(unsigned long)ptr;
105 }
106
107 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
108 {
109 #ifdef __NR_bpf
110 return syscall(__NR_bpf, cmd, attr, size);
111 #else
112 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
113 errno = ENOSYS;
114 return -1;
115 #endif
116 }
117
118 static int bpf_map_update(int fd, const void *key, const void *value,
119 uint64_t flags)
120 {
121 union bpf_attr attr = {};
122
123 attr.map_fd = fd;
124 attr.key = bpf_ptr_to_u64(key);
125 attr.value = bpf_ptr_to_u64(value);
126 attr.flags = flags;
127
128 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
129 }
130
131 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
132 char **bpf_string, bool *need_release,
133 const char separator)
134 {
135 char sp;
136
137 if (from_file) {
138 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
139 char *tmp_string, *last;
140 FILE *fp;
141
142 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
143 tmp_string = calloc(1, tmp_len);
144 if (tmp_string == NULL)
145 return -ENOMEM;
146
147 fp = fopen(arg, "r");
148 if (fp == NULL) {
149 perror("Cannot fopen");
150 free(tmp_string);
151 return -ENOENT;
152 }
153
154 if (!fgets(tmp_string, tmp_len, fp)) {
155 free(tmp_string);
156 fclose(fp);
157 return -EIO;
158 }
159
160 fclose(fp);
161
162 last = &tmp_string[strlen(tmp_string) - 1];
163 if (*last == '\n')
164 *last = 0;
165
166 *need_release = true;
167 *bpf_string = tmp_string;
168 } else {
169 *need_release = false;
170 *bpf_string = arg;
171 }
172
173 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
174 sp != separator) {
175 if (*need_release)
176 free(*bpf_string);
177 return -EINVAL;
178 }
179
180 return 0;
181 }
182
183 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
184 bool from_file)
185 {
186 char *bpf_string, *token, separator = ',';
187 int ret = 0, i = 0;
188 bool need_release;
189 __u16 bpf_len = 0;
190
191 if (argc < 1)
192 return -EINVAL;
193 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
194 &need_release, separator))
195 return -EINVAL;
196 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
197 ret = -EINVAL;
198 goto out;
199 }
200
201 token = bpf_string;
202 while ((token = strchr(token, separator)) && (++token)[0]) {
203 if (i >= bpf_len) {
204 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
205 ret = -EINVAL;
206 goto out;
207 }
208
209 if (sscanf(token, "%hu %hhu %hhu %u,",
210 &bpf_ops[i].code, &bpf_ops[i].jt,
211 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
212 fprintf(stderr, "Error at instruction %d!\n", i);
213 ret = -EINVAL;
214 goto out;
215 }
216
217 i++;
218 }
219
220 if (i != bpf_len) {
221 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
222 ret = -EINVAL;
223 goto out;
224 }
225 ret = bpf_len;
226 out:
227 if (need_release)
228 free(bpf_string);
229
230 return ret;
231 }
232
233 void bpf_print_ops(FILE *f, struct rtattr *bpf_ops, __u16 len)
234 {
235 struct sock_filter *ops = (struct sock_filter *) RTA_DATA(bpf_ops);
236 int i;
237
238 if (len == 0)
239 return;
240
241 fprintf(f, "bytecode \'%u,", len);
242
243 for (i = 0; i < len - 1; i++)
244 fprintf(f, "%hu %hhu %hhu %u,", ops[i].code, ops[i].jt,
245 ops[i].jf, ops[i].k);
246
247 fprintf(f, "%hu %hhu %hhu %u\'", ops[i].code, ops[i].jt,
248 ops[i].jf, ops[i].k);
249 }
250
251 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
252 const struct bpf_elf_map *obj)
253 {
254 fprintf(stderr, "Map specification differs from pinned file!\n");
255
256 if (obj->type != pin->type)
257 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
258 obj->type, pin->type);
259 if (obj->size_key != pin->size_key)
260 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
261 obj->size_key, pin->size_key);
262 if (obj->size_value != pin->size_value)
263 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
264 obj->size_value, pin->size_value);
265 if (obj->max_elem != pin->max_elem)
266 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
267 obj->max_elem, pin->max_elem);
268 if (obj->flags != pin->flags)
269 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
270 obj->flags, pin->flags);
271
272 fprintf(stderr, "\n");
273 }
274
275 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
276 int length)
277 {
278 char file[PATH_MAX], buff[4096];
279 struct bpf_elf_map tmp = {}, zero = {};
280 unsigned int val;
281 FILE *fp;
282
283 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
284
285 fp = fopen(file, "r");
286 if (!fp) {
287 fprintf(stderr, "No procfs support?!\n");
288 return -EIO;
289 }
290
291 while (fgets(buff, sizeof(buff), fp)) {
292 if (sscanf(buff, "map_type:\t%u", &val) == 1)
293 tmp.type = val;
294 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
295 tmp.size_key = val;
296 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
297 tmp.size_value = val;
298 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
299 tmp.max_elem = val;
300 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
301 tmp.flags = val;
302 }
303
304 fclose(fp);
305
306 if (!memcmp(&tmp, map, length)) {
307 return 0;
308 } else {
309 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
310 * so just accept it. We know we do have an eBPF fd and in this
311 * case, everything is 0. It is guaranteed that no such map exists
312 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
313 */
314 if (!memcmp(&tmp, &zero, length))
315 return 0;
316
317 bpf_map_pin_report(&tmp, map);
318 return -EINVAL;
319 }
320 }
321
322 static int bpf_mnt_fs(const char *target)
323 {
324 bool bind_done = false;
325
326 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
327 if (errno != EINVAL || bind_done) {
328 fprintf(stderr, "mount --make-private %s failed: %s\n",
329 target, strerror(errno));
330 return -1;
331 }
332
333 if (mount(target, target, "none", MS_BIND, NULL)) {
334 fprintf(stderr, "mount --bind %s %s failed: %s\n",
335 target, target, strerror(errno));
336 return -1;
337 }
338
339 bind_done = true;
340 }
341
342 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
343 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
344 target, strerror(errno));
345 return -1;
346 }
347
348 return 0;
349 }
350
351 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
352 {
353 struct statfs st_fs;
354
355 if (statfs(mnt, &st_fs) < 0)
356 return -ENOENT;
357 if ((unsigned long)st_fs.f_type != magic)
358 return -ENOENT;
359
360 return 0;
361 }
362
363 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
364 char *mnt, int len,
365 const char * const *known_mnts)
366 {
367 const char * const *ptr;
368 char type[100];
369 FILE *fp;
370
371 if (known_mnts) {
372 ptr = known_mnts;
373 while (*ptr) {
374 if (bpf_valid_mntpt(*ptr, magic) == 0) {
375 strncpy(mnt, *ptr, len - 1);
376 mnt[len - 1] = 0;
377 return mnt;
378 }
379 ptr++;
380 }
381 }
382
383 fp = fopen("/proc/mounts", "r");
384 if (fp == NULL || len != PATH_MAX)
385 return NULL;
386
387 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
388 mnt, type) == 2) {
389 if (strcmp(type, fstype) == 0)
390 break;
391 }
392
393 fclose(fp);
394 if (strcmp(type, fstype) != 0)
395 return NULL;
396
397 return mnt;
398 }
399
400 int bpf_trace_pipe(void)
401 {
402 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
403 static const char * const tracefs_known_mnts[] = {
404 TRACE_DIR_MNT,
405 "/sys/kernel/debug/tracing",
406 "/tracing",
407 "/trace",
408 0,
409 };
410 char tpipe[PATH_MAX];
411 const char *mnt;
412 int fd;
413
414 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
415 sizeof(tracefs_mnt), tracefs_known_mnts);
416 if (!mnt) {
417 fprintf(stderr, "tracefs not mounted?\n");
418 return -1;
419 }
420
421 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
422
423 fd = open(tpipe, O_RDONLY);
424 if (fd < 0)
425 return -1;
426
427 fprintf(stderr, "Running! Hang up with ^C!\n\n");
428 while (1) {
429 static char buff[4096];
430 ssize_t ret;
431
432 ret = read(fd, buff, sizeof(buff) - 1);
433 if (ret > 0) {
434 write(2, buff, ret);
435 fflush(stderr);
436 }
437 }
438
439 return 0;
440 }
441
442 static int bpf_gen_global(const char *bpf_sub_dir)
443 {
444 char bpf_glo_dir[PATH_MAX];
445 int ret;
446
447 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
448 bpf_sub_dir, BPF_DIR_GLOBALS);
449
450 ret = mkdir(bpf_glo_dir, S_IRWXU);
451 if (ret && errno != EEXIST) {
452 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
453 strerror(errno));
454 return ret;
455 }
456
457 return 0;
458 }
459
460 static int bpf_gen_master(const char *base, const char *name)
461 {
462 char bpf_sub_dir[PATH_MAX];
463 int ret;
464
465 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
466
467 ret = mkdir(bpf_sub_dir, S_IRWXU);
468 if (ret && errno != EEXIST) {
469 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
470 strerror(errno));
471 return ret;
472 }
473
474 return bpf_gen_global(bpf_sub_dir);
475 }
476
477 static int bpf_slave_via_bind_mnt(const char *full_name,
478 const char *full_link)
479 {
480 int ret;
481
482 ret = mkdir(full_name, S_IRWXU);
483 if (ret) {
484 assert(errno != EEXIST);
485 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
486 strerror(errno));
487 return ret;
488 }
489
490 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
491 if (ret) {
492 rmdir(full_name);
493 fprintf(stderr, "mount --bind %s %s failed: %s\n",
494 full_link, full_name, strerror(errno));
495 }
496
497 return ret;
498 }
499
500 static int bpf_gen_slave(const char *base, const char *name,
501 const char *link)
502 {
503 char bpf_lnk_dir[PATH_MAX];
504 char bpf_sub_dir[PATH_MAX];
505 struct stat sb = {};
506 int ret;
507
508 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
509 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
510
511 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
512 if (ret) {
513 if (errno != EEXIST) {
514 if (errno != EPERM) {
515 fprintf(stderr, "symlink %s failed: %s\n",
516 bpf_sub_dir, strerror(errno));
517 return ret;
518 }
519
520 return bpf_slave_via_bind_mnt(bpf_sub_dir,
521 bpf_lnk_dir);
522 }
523
524 ret = lstat(bpf_sub_dir, &sb);
525 if (ret) {
526 fprintf(stderr, "lstat %s failed: %s\n",
527 bpf_sub_dir, strerror(errno));
528 return ret;
529 }
530
531 if ((sb.st_mode & S_IFMT) != S_IFLNK)
532 return bpf_gen_global(bpf_sub_dir);
533 }
534
535 return 0;
536 }
537
538 static int bpf_gen_hierarchy(const char *base)
539 {
540 int ret, i;
541
542 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
543 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
544 ret = bpf_gen_slave(base,
545 bpf_prog_to_subdir(__bpf_types[i]),
546 bpf_prog_to_subdir(__bpf_types[0]));
547 return ret;
548 }
549
550 static const char *bpf_get_work_dir(enum bpf_prog_type type)
551 {
552 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
553 static char bpf_wrk_dir[PATH_MAX];
554 static const char *mnt;
555 static bool bpf_mnt_cached;
556 static const char * const bpf_known_mnts[] = {
557 BPF_DIR_MNT,
558 "/bpf",
559 0,
560 };
561 int ret;
562
563 if (bpf_mnt_cached) {
564 const char *out = mnt;
565
566 if (out) {
567 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
568 out, bpf_prog_to_subdir(type));
569 out = bpf_tmp;
570 }
571 return out;
572 }
573
574 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp, sizeof(bpf_tmp),
575 bpf_known_mnts);
576 if (!mnt) {
577 mnt = getenv(BPF_ENV_MNT);
578 if (!mnt)
579 mnt = BPF_DIR_MNT;
580 ret = bpf_mnt_fs(mnt);
581 if (ret) {
582 mnt = NULL;
583 goto out;
584 }
585 }
586
587 snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
588
589 ret = bpf_gen_hierarchy(bpf_wrk_dir);
590 if (ret) {
591 mnt = NULL;
592 goto out;
593 }
594
595 mnt = bpf_wrk_dir;
596 out:
597 bpf_mnt_cached = true;
598 return mnt;
599 }
600
601 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
602 {
603 union bpf_attr attr = {};
604 char tmp[PATH_MAX];
605
606 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
607 pathname[1] == ':' && bpf_get_work_dir(type)) {
608 snprintf(tmp, sizeof(tmp), "%s/%s",
609 bpf_get_work_dir(type), pathname + 2);
610 pathname = tmp;
611 }
612
613 attr.pathname = bpf_ptr_to_u64(pathname);
614
615 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
616 }
617
618 enum bpf_mode {
619 CBPF_BYTECODE,
620 CBPF_FILE,
621 EBPF_OBJECT,
622 EBPF_PINNED,
623 BPF_MODE_MAX,
624 };
625
626 static int bpf_parse(enum bpf_prog_type *type, enum bpf_mode *mode,
627 struct bpf_cfg_in *cfg, const bool *opt_tbl)
628 {
629 const char *file, *section, *uds_name;
630 bool verbose = false;
631 int i, ret, argc;
632 char **argv;
633
634 argv = cfg->argv;
635 argc = cfg->argc;
636
637 if (opt_tbl[CBPF_BYTECODE] &&
638 (matches(*argv, "bytecode") == 0 ||
639 strcmp(*argv, "bc") == 0)) {
640 *mode = CBPF_BYTECODE;
641 } else if (opt_tbl[CBPF_FILE] &&
642 (matches(*argv, "bytecode-file") == 0 ||
643 strcmp(*argv, "bcf") == 0)) {
644 *mode = CBPF_FILE;
645 } else if (opt_tbl[EBPF_OBJECT] &&
646 (matches(*argv, "object-file") == 0 ||
647 strcmp(*argv, "obj") == 0)) {
648 *mode = EBPF_OBJECT;
649 } else if (opt_tbl[EBPF_PINNED] &&
650 (matches(*argv, "object-pinned") == 0 ||
651 matches(*argv, "pinned") == 0 ||
652 matches(*argv, "fd") == 0)) {
653 *mode = EBPF_PINNED;
654 } else {
655 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
656 return -1;
657 }
658
659 NEXT_ARG();
660 file = section = uds_name = NULL;
661 if (*mode == EBPF_OBJECT || *mode == EBPF_PINNED) {
662 file = *argv;
663 NEXT_ARG_FWD();
664
665 if (*type == BPF_PROG_TYPE_UNSPEC) {
666 if (argc > 0 && matches(*argv, "type") == 0) {
667 NEXT_ARG();
668 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
669 i++) {
670 if (!__bpf_prog_meta[i].type)
671 continue;
672 if (!matches(*argv,
673 __bpf_prog_meta[i].type)) {
674 *type = i;
675 break;
676 }
677 }
678
679 if (*type == BPF_PROG_TYPE_UNSPEC) {
680 fprintf(stderr, "What type is \"%s\"?\n",
681 *argv);
682 return -1;
683 }
684 NEXT_ARG_FWD();
685 } else {
686 *type = BPF_PROG_TYPE_SCHED_CLS;
687 }
688 }
689
690 section = bpf_prog_to_default_section(*type);
691 if (argc > 0 && matches(*argv, "section") == 0) {
692 NEXT_ARG();
693 section = *argv;
694 NEXT_ARG_FWD();
695 }
696
697 if (__bpf_prog_meta[*type].may_uds_export) {
698 uds_name = getenv(BPF_ENV_UDS);
699 if (argc > 0 && !uds_name &&
700 matches(*argv, "export") == 0) {
701 NEXT_ARG();
702 uds_name = *argv;
703 NEXT_ARG_FWD();
704 }
705 }
706
707 if (argc > 0 && matches(*argv, "verbose") == 0) {
708 verbose = true;
709 NEXT_ARG_FWD();
710 }
711
712 PREV_ARG();
713 }
714
715 if (*mode == CBPF_BYTECODE || *mode == CBPF_FILE)
716 ret = bpf_ops_parse(argc, argv, cfg->ops, *mode == CBPF_FILE);
717 else if (*mode == EBPF_OBJECT)
718 ret = bpf_obj_open(file, *type, section, verbose);
719 else if (*mode == EBPF_PINNED)
720 ret = bpf_obj_get(file, *type);
721 else
722 return -1;
723
724 cfg->object = file;
725 cfg->section = section;
726 cfg->uds = uds_name;
727 cfg->argc = argc;
728 cfg->argv = argv;
729
730 return ret;
731 }
732
733 static int bpf_parse_opt_tbl(enum bpf_prog_type type, struct bpf_cfg_in *cfg,
734 const struct bpf_cfg_ops *ops, void *nl,
735 const bool *opt_tbl)
736 {
737 struct sock_filter opcodes[BPF_MAXINSNS];
738 char annotation[256];
739 enum bpf_mode mode;
740 int ret;
741
742 cfg->ops = opcodes;
743 ret = bpf_parse(&type, &mode, cfg, opt_tbl);
744 cfg->ops = NULL;
745 if (ret < 0)
746 return ret;
747
748 if (mode == CBPF_BYTECODE || mode == CBPF_FILE)
749 ops->cbpf_cb(nl, opcodes, ret);
750 if (mode == EBPF_OBJECT || mode == EBPF_PINNED) {
751 snprintf(annotation, sizeof(annotation), "%s:[%s]",
752 basename(cfg->object), mode == EBPF_PINNED ?
753 "*fsobj" : cfg->section);
754 ops->ebpf_cb(nl, ret, annotation);
755 }
756
757 return 0;
758 }
759
760 int bpf_parse_common(enum bpf_prog_type type, struct bpf_cfg_in *cfg,
761 const struct bpf_cfg_ops *ops, void *nl)
762 {
763 bool opt_tbl[BPF_MODE_MAX] = {};
764
765 if (ops->cbpf_cb) {
766 opt_tbl[CBPF_BYTECODE] = true;
767 opt_tbl[CBPF_FILE] = true;
768 }
769
770 if (ops->ebpf_cb) {
771 opt_tbl[EBPF_OBJECT] = true;
772 opt_tbl[EBPF_PINNED] = true;
773 }
774
775 return bpf_parse_opt_tbl(type, cfg, ops, nl, opt_tbl);
776 }
777
778 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
779 {
780 enum bpf_prog_type type = BPF_PROG_TYPE_UNSPEC;
781 const bool opt_tbl[BPF_MODE_MAX] = {
782 [EBPF_OBJECT] = true,
783 [EBPF_PINNED] = true,
784 };
785 const struct bpf_elf_map test = {
786 .type = BPF_MAP_TYPE_PROG_ARRAY,
787 .size_key = sizeof(int),
788 .size_value = sizeof(int),
789 };
790 struct bpf_cfg_in cfg = {
791 .argc = argc,
792 .argv = argv,
793 };
794 int ret, prog_fd, map_fd;
795 enum bpf_mode mode;
796 uint32_t map_key;
797
798 prog_fd = bpf_parse(&type, &mode, &cfg, opt_tbl);
799 if (prog_fd < 0)
800 return prog_fd;
801 if (key) {
802 map_key = *key;
803 } else {
804 ret = sscanf(cfg.section, "%*i/%i", &map_key);
805 if (ret != 1) {
806 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
807 ret = -EINVAL;
808 goto out_prog;
809 }
810 }
811
812 map_fd = bpf_obj_get(map_path, type);
813 if (map_fd < 0) {
814 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
815 map_path, strerror(errno));
816 ret = map_fd;
817 goto out_prog;
818 }
819
820 ret = bpf_map_selfcheck_pinned(map_fd, &test,
821 offsetof(struct bpf_elf_map, max_elem));
822 if (ret < 0) {
823 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
824 goto out_map;
825 }
826
827 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
828 if (ret < 0)
829 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
830 out_map:
831 close(map_fd);
832 out_prog:
833 close(prog_fd);
834 return ret;
835 }
836
837 #ifdef HAVE_ELF
838 struct bpf_elf_prog {
839 enum bpf_prog_type type;
840 const struct bpf_insn *insns;
841 size_t size;
842 const char *license;
843 };
844
845 struct bpf_hash_entry {
846 unsigned int pinning;
847 const char *subpath;
848 struct bpf_hash_entry *next;
849 };
850
851 struct bpf_elf_ctx {
852 Elf *elf_fd;
853 GElf_Ehdr elf_hdr;
854 Elf_Data *sym_tab;
855 Elf_Data *str_tab;
856 int obj_fd;
857 int map_fds[ELF_MAX_MAPS];
858 struct bpf_elf_map maps[ELF_MAX_MAPS];
859 int sym_num;
860 int map_num;
861 int map_len;
862 bool *sec_done;
863 int sec_maps;
864 char license[ELF_MAX_LICENSE_LEN];
865 enum bpf_prog_type type;
866 bool verbose;
867 struct bpf_elf_st stat;
868 struct bpf_hash_entry *ht[256];
869 char *log;
870 size_t log_size;
871 };
872
873 struct bpf_elf_sec_data {
874 GElf_Shdr sec_hdr;
875 Elf_Data *sec_data;
876 const char *sec_name;
877 };
878
879 struct bpf_map_data {
880 int *fds;
881 const char *obj;
882 struct bpf_elf_st *st;
883 struct bpf_elf_map *ent;
884 };
885
886 static __check_format_string(2, 3) void
887 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
888 {
889 va_list vl;
890
891 va_start(vl, format);
892 vfprintf(stderr, format, vl);
893 va_end(vl);
894
895 if (ctx->log && ctx->log[0]) {
896 if (ctx->verbose) {
897 fprintf(stderr, "%s\n", ctx->log);
898 } else {
899 unsigned int off = 0, len = strlen(ctx->log);
900
901 if (len > BPF_MAX_LOG) {
902 off = len - BPF_MAX_LOG;
903 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
904 off);
905 }
906 fprintf(stderr, "%s\n", ctx->log + off);
907 }
908
909 memset(ctx->log, 0, ctx->log_size);
910 }
911 }
912
913 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
914 {
915 size_t log_size = ctx->log_size;
916 void *ptr;
917
918 if (!ctx->log) {
919 log_size = 65536;
920 } else {
921 log_size <<= 1;
922 if (log_size > (UINT_MAX >> 8))
923 return -EINVAL;
924 }
925
926 ptr = realloc(ctx->log, log_size);
927 if (!ptr)
928 return -ENOMEM;
929
930 ctx->log = ptr;
931 ctx->log_size = log_size;
932
933 return 0;
934 }
935
936 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
937 uint32_t size_value, uint32_t max_elem,
938 uint32_t flags)
939 {
940 union bpf_attr attr = {};
941
942 attr.map_type = type;
943 attr.key_size = size_key;
944 attr.value_size = size_value;
945 attr.max_entries = max_elem;
946 attr.map_flags = flags;
947
948 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
949 }
950
951 static int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
952 size_t size_insns, const char *license, char *log,
953 size_t size_log)
954 {
955 union bpf_attr attr = {};
956
957 attr.prog_type = type;
958 attr.insns = bpf_ptr_to_u64(insns);
959 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
960 attr.license = bpf_ptr_to_u64(license);
961
962 if (size_log > 0) {
963 attr.log_buf = bpf_ptr_to_u64(log);
964 attr.log_size = size_log;
965 attr.log_level = 1;
966 }
967
968 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
969 }
970
971 static int bpf_obj_pin(int fd, const char *pathname)
972 {
973 union bpf_attr attr = {};
974
975 attr.pathname = bpf_ptr_to_u64(pathname);
976 attr.bpf_fd = fd;
977
978 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
979 }
980
981 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
982 {
983 struct sockaddr_alg alg = {
984 .salg_family = AF_ALG,
985 .salg_type = "hash",
986 .salg_name = "sha1",
987 };
988 int ret, cfd, ofd, ffd;
989 struct stat stbuff;
990 ssize_t size;
991
992 if (!object || len != 20)
993 return -EINVAL;
994
995 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
996 if (cfd < 0) {
997 fprintf(stderr, "Cannot get AF_ALG socket: %s\n",
998 strerror(errno));
999 return cfd;
1000 }
1001
1002 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1003 if (ret < 0) {
1004 fprintf(stderr, "Error binding socket: %s\n", strerror(errno));
1005 goto out_cfd;
1006 }
1007
1008 ofd = accept(cfd, NULL, 0);
1009 if (ofd < 0) {
1010 fprintf(stderr, "Error accepting socket: %s\n",
1011 strerror(errno));
1012 ret = ofd;
1013 goto out_cfd;
1014 }
1015
1016 ffd = open(object, O_RDONLY);
1017 if (ffd < 0) {
1018 fprintf(stderr, "Error opening object %s: %s\n",
1019 object, strerror(errno));
1020 ret = ffd;
1021 goto out_ofd;
1022 }
1023
1024 ret = fstat(ffd, &stbuff);
1025 if (ret < 0) {
1026 fprintf(stderr, "Error doing fstat: %s\n",
1027 strerror(errno));
1028 goto out_ffd;
1029 }
1030
1031 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1032 if (size != stbuff.st_size) {
1033 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1034 size, stbuff.st_size, strerror(errno));
1035 ret = -1;
1036 goto out_ffd;
1037 }
1038
1039 size = read(ofd, out, len);
1040 if (size != len) {
1041 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1042 size, len, strerror(errno));
1043 ret = -1;
1044 } else {
1045 ret = 0;
1046 }
1047 out_ffd:
1048 close(ffd);
1049 out_ofd:
1050 close(ofd);
1051 out_cfd:
1052 close(cfd);
1053 return ret;
1054 }
1055
1056 static const char *bpf_get_obj_uid(const char *pathname)
1057 {
1058 static bool bpf_uid_cached;
1059 static char bpf_uid[64];
1060 uint8_t tmp[20];
1061 int ret;
1062
1063 if (bpf_uid_cached)
1064 goto done;
1065
1066 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
1067 if (ret) {
1068 fprintf(stderr, "Object hashing failed!\n");
1069 return NULL;
1070 }
1071
1072 hexstring_n2a(tmp, sizeof(tmp), bpf_uid, sizeof(bpf_uid));
1073 bpf_uid_cached = true;
1074 done:
1075 return bpf_uid;
1076 }
1077
1078 static int bpf_init_env(const char *pathname)
1079 {
1080 struct rlimit limit = {
1081 .rlim_cur = RLIM_INFINITY,
1082 .rlim_max = RLIM_INFINITY,
1083 };
1084
1085 /* Don't bother in case we fail! */
1086 setrlimit(RLIMIT_MEMLOCK, &limit);
1087
1088 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC)) {
1089 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1090 return 0;
1091 }
1092
1093 if (!bpf_get_obj_uid(pathname))
1094 return -1;
1095
1096 return 0;
1097 }
1098
1099 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1100 uint32_t pinning)
1101 {
1102 struct bpf_hash_entry *entry;
1103
1104 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1105 while (entry && entry->pinning != pinning)
1106 entry = entry->next;
1107
1108 return entry ? entry->subpath : NULL;
1109 }
1110
1111 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1112 uint32_t pinning)
1113 {
1114 switch (pinning) {
1115 case PIN_OBJECT_NS:
1116 case PIN_GLOBAL_NS:
1117 return false;
1118 case PIN_NONE:
1119 return true;
1120 default:
1121 return !bpf_custom_pinning(ctx, pinning);
1122 }
1123 }
1124
1125 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1126 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1127 {
1128 switch (pinning) {
1129 case PIN_OBJECT_NS:
1130 snprintf(pathname, len, "%s/%s/%s",
1131 bpf_get_work_dir(ctx->type),
1132 bpf_get_obj_uid(NULL), name);
1133 break;
1134 case PIN_GLOBAL_NS:
1135 snprintf(pathname, len, "%s/%s/%s",
1136 bpf_get_work_dir(ctx->type),
1137 BPF_DIR_GLOBALS, name);
1138 break;
1139 default:
1140 snprintf(pathname, len, "%s/../%s/%s",
1141 bpf_get_work_dir(ctx->type),
1142 bpf_custom_pinning(ctx, pinning), name);
1143 break;
1144 }
1145 }
1146
1147 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1148 uint32_t pinning)
1149 {
1150 char pathname[PATH_MAX];
1151
1152 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1153 return 0;
1154
1155 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1156 return bpf_obj_get(pathname, ctx->type);
1157 }
1158
1159 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1160 {
1161 char tmp[PATH_MAX];
1162 int ret;
1163
1164 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1165 bpf_get_obj_uid(NULL));
1166
1167 ret = mkdir(tmp, S_IRWXU);
1168 if (ret && errno != EEXIST) {
1169 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1170 return ret;
1171 }
1172
1173 return 0;
1174 }
1175
1176 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1177 const char *todo)
1178 {
1179 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1180 int ret;
1181
1182 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1183 snprintf(rem, sizeof(rem), "%s/", todo);
1184 sub = strtok(rem, "/");
1185
1186 while (sub) {
1187 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1188 return -EINVAL;
1189
1190 strcat(tmp, sub);
1191 strcat(tmp, "/");
1192
1193 ret = mkdir(tmp, S_IRWXU);
1194 if (ret && errno != EEXIST) {
1195 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1196 strerror(errno));
1197 return ret;
1198 }
1199
1200 sub = strtok(NULL, "/");
1201 }
1202
1203 return 0;
1204 }
1205
1206 static int bpf_place_pinned(int fd, const char *name,
1207 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1208 {
1209 char pathname[PATH_MAX];
1210 const char *tmp;
1211 int ret = 0;
1212
1213 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1214 return 0;
1215
1216 if (pinning == PIN_OBJECT_NS)
1217 ret = bpf_make_obj_path(ctx);
1218 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1219 ret = bpf_make_custom_path(ctx, tmp);
1220 if (ret < 0)
1221 return ret;
1222
1223 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1224 return bpf_obj_pin(fd, pathname);
1225 }
1226
1227 static void bpf_prog_report(int fd, const char *section,
1228 const struct bpf_elf_prog *prog,
1229 struct bpf_elf_ctx *ctx)
1230 {
1231 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1232
1233 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1234 fd < 0 ? "rejected: " : "loaded",
1235 fd < 0 ? strerror(errno) : "",
1236 fd < 0 ? errno : fd);
1237
1238 fprintf(stderr, " - Type: %u\n", prog->type);
1239 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1240 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1241 fprintf(stderr, " - License: %s\n\n", prog->license);
1242
1243 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1244 }
1245
1246 static int bpf_prog_attach(const char *section,
1247 const struct bpf_elf_prog *prog,
1248 struct bpf_elf_ctx *ctx)
1249 {
1250 int tries = 0, fd;
1251 retry:
1252 errno = 0;
1253 fd = bpf_prog_load(prog->type, prog->insns, prog->size,
1254 prog->license, ctx->log, ctx->log_size);
1255 if (fd < 0 || ctx->verbose) {
1256 /* The verifier log is pretty chatty, sometimes so chatty
1257 * on larger programs, that we could fail to dump everything
1258 * into our buffer. Still, try to give a debuggable error
1259 * log for the user, so enlarge it and re-fail.
1260 */
1261 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1262 if (tries++ < 6 && !bpf_log_realloc(ctx))
1263 goto retry;
1264
1265 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1266 ctx->log_size, tries);
1267 return fd;
1268 }
1269
1270 bpf_prog_report(fd, section, prog, ctx);
1271 }
1272
1273 return fd;
1274 }
1275
1276 static void bpf_map_report(int fd, const char *name,
1277 const struct bpf_elf_map *map,
1278 struct bpf_elf_ctx *ctx)
1279 {
1280 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1281 fd < 0 ? "rejected: " : "loaded",
1282 fd < 0 ? strerror(errno) : "",
1283 fd < 0 ? errno : fd);
1284
1285 fprintf(stderr, " - Type: %u\n", map->type);
1286 fprintf(stderr, " - Identifier: %u\n", map->id);
1287 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1288 fprintf(stderr, " - Size key: %u\n", map->size_key);
1289 fprintf(stderr, " - Size value: %u\n", map->size_value);
1290 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1291 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1292 }
1293
1294 static int bpf_map_attach(const char *name, const struct bpf_elf_map *map,
1295 struct bpf_elf_ctx *ctx)
1296 {
1297 int fd, ret;
1298
1299 fd = bpf_probe_pinned(name, ctx, map->pinning);
1300 if (fd > 0) {
1301 ret = bpf_map_selfcheck_pinned(fd, map,
1302 offsetof(struct bpf_elf_map,
1303 id));
1304 if (ret < 0) {
1305 close(fd);
1306 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1307 name);
1308 return ret;
1309 }
1310 if (ctx->verbose)
1311 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1312 name);
1313 return fd;
1314 }
1315
1316 errno = 0;
1317 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1318 map->max_elem, map->flags);
1319 if (fd < 0 || ctx->verbose) {
1320 bpf_map_report(fd, name, map, ctx);
1321 if (fd < 0)
1322 return fd;
1323 }
1324
1325 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1326 if (ret < 0 && errno != EEXIST) {
1327 fprintf(stderr, "Could not pin %s map: %s\n", name,
1328 strerror(errno));
1329 close(fd);
1330 return ret;
1331 }
1332
1333 return fd;
1334 }
1335
1336 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1337 const GElf_Sym *sym)
1338 {
1339 return ctx->str_tab->d_buf + sym->st_name;
1340 }
1341
1342 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1343 {
1344 GElf_Sym sym;
1345 int i;
1346
1347 for (i = 0; i < ctx->sym_num; i++) {
1348 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1349 continue;
1350
1351 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1352 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1353 sym.st_shndx != ctx->sec_maps ||
1354 sym.st_value / ctx->map_len != which)
1355 continue;
1356
1357 return bpf_str_tab_name(ctx, &sym);
1358 }
1359
1360 return NULL;
1361 }
1362
1363 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1364 {
1365 const char *map_name;
1366 int i, fd;
1367
1368 for (i = 0; i < ctx->map_num; i++) {
1369 map_name = bpf_map_fetch_name(ctx, i);
1370 if (!map_name)
1371 return -EIO;
1372
1373 fd = bpf_map_attach(map_name, &ctx->maps[i], ctx);
1374 if (fd < 0)
1375 return fd;
1376
1377 ctx->map_fds[i] = fd;
1378 }
1379
1380 return 0;
1381 }
1382
1383 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1384 {
1385 int i, num = 0;
1386 GElf_Sym sym;
1387
1388 for (i = 0; i < ctx->sym_num; i++) {
1389 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1390 continue;
1391
1392 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1393 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1394 sym.st_shndx != ctx->sec_maps)
1395 continue;
1396 num++;
1397 }
1398
1399 return num;
1400 }
1401
1402 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1403 struct bpf_elf_sec_data *data)
1404 {
1405 Elf_Data *sec_edata;
1406 GElf_Shdr sec_hdr;
1407 Elf_Scn *sec_fd;
1408 char *sec_name;
1409
1410 memset(data, 0, sizeof(*data));
1411
1412 sec_fd = elf_getscn(ctx->elf_fd, section);
1413 if (!sec_fd)
1414 return -EINVAL;
1415 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1416 return -EIO;
1417
1418 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1419 sec_hdr.sh_name);
1420 if (!sec_name || !sec_hdr.sh_size)
1421 return -ENOENT;
1422
1423 sec_edata = elf_getdata(sec_fd, NULL);
1424 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1425 return -EIO;
1426
1427 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1428
1429 data->sec_name = sec_name;
1430 data->sec_data = sec_edata;
1431 return 0;
1432 }
1433
1434 struct bpf_elf_map_min {
1435 __u32 type;
1436 __u32 size_key;
1437 __u32 size_value;
1438 __u32 max_elem;
1439 };
1440
1441 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1442 struct bpf_elf_sec_data *data)
1443 {
1444 ctx->map_num = data->sec_data->d_size;
1445 ctx->sec_maps = section;
1446 ctx->sec_done[section] = true;
1447
1448 if (ctx->map_num > sizeof(ctx->maps)) {
1449 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1450 return -ENOMEM;
1451 }
1452
1453 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1454 return 0;
1455 }
1456
1457 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1458 {
1459 GElf_Sym sym;
1460 int off, i;
1461
1462 for (off = 0; off < end; off += ctx->map_len) {
1463 /* Order doesn't need to be linear here, hence we walk
1464 * the table again.
1465 */
1466 for (i = 0; i < ctx->sym_num; i++) {
1467 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1468 continue;
1469 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1470 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1471 sym.st_shndx != ctx->sec_maps)
1472 continue;
1473 if (sym.st_value == off)
1474 break;
1475 if (i == ctx->sym_num - 1)
1476 return -1;
1477 }
1478 }
1479
1480 return off == end ? 0 : -1;
1481 }
1482
1483 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1484 {
1485 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1486 int i, sym_num = bpf_map_num_sym(ctx);
1487 __u8 *buff;
1488
1489 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1490 fprintf(stderr, "%u maps not supported in current map section!\n",
1491 sym_num);
1492 return -EINVAL;
1493 }
1494
1495 if (ctx->map_num % sym_num != 0 ||
1496 ctx->map_num % sizeof(__u32) != 0) {
1497 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1498 return -EINVAL;
1499 }
1500
1501 ctx->map_len = ctx->map_num / sym_num;
1502 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1503 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1504 return -EINVAL;
1505 }
1506
1507 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1508 ctx->map_num = sym_num;
1509 return 0;
1510 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1511 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1512 return -EINVAL;
1513 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1514 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1515 return -EINVAL;
1516 }
1517
1518 ctx->map_num = sym_num;
1519 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
1520 i++, buff += ctx->map_len) {
1521 /* The fixup leaves the rest of the members as zero, which
1522 * is fine currently, but option exist to set some other
1523 * default value as well when needed in future.
1524 */
1525 memcpy(&fixup[i], buff, ctx->map_len);
1526 }
1527
1528 memcpy(ctx->maps, fixup, sizeof(fixup));
1529
1530 printf("Note: %zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
1531 sizeof(struct bpf_elf_map) - ctx->map_len);
1532 return 0;
1533 }
1534
1535 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
1536 struct bpf_elf_sec_data *data)
1537 {
1538 if (data->sec_data->d_size > sizeof(ctx->license))
1539 return -ENOMEM;
1540
1541 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
1542 ctx->sec_done[section] = true;
1543 return 0;
1544 }
1545
1546 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
1547 struct bpf_elf_sec_data *data)
1548 {
1549 ctx->sym_tab = data->sec_data;
1550 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
1551 ctx->sec_done[section] = true;
1552 return 0;
1553 }
1554
1555 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
1556 struct bpf_elf_sec_data *data)
1557 {
1558 ctx->str_tab = data->sec_data;
1559 ctx->sec_done[section] = true;
1560 return 0;
1561 }
1562
1563 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
1564 {
1565 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
1566 }
1567
1568 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx)
1569 {
1570 struct bpf_elf_sec_data data;
1571 int i, ret = -1;
1572
1573 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1574 ret = bpf_fill_section_data(ctx, i, &data);
1575 if (ret < 0)
1576 continue;
1577
1578 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1579 !strcmp(data.sec_name, ELF_SECTION_MAPS))
1580 ret = bpf_fetch_maps_begin(ctx, i, &data);
1581 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1582 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
1583 ret = bpf_fetch_license(ctx, i, &data);
1584 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
1585 !strcmp(data.sec_name, ".symtab"))
1586 ret = bpf_fetch_symtab(ctx, i, &data);
1587 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
1588 !strcmp(data.sec_name, ".strtab"))
1589 ret = bpf_fetch_strtab(ctx, i, &data);
1590 if (ret < 0) {
1591 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
1592 i);
1593 return ret;
1594 }
1595 }
1596
1597 if (bpf_has_map_data(ctx)) {
1598 ret = bpf_fetch_maps_end(ctx);
1599 if (ret < 0) {
1600 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
1601 return ret;
1602 }
1603
1604 ret = bpf_maps_attach_all(ctx);
1605 if (ret < 0) {
1606 fprintf(stderr, "Error loading maps into kernel!\n");
1607 return ret;
1608 }
1609 }
1610
1611 return ret;
1612 }
1613
1614 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
1615 bool *sseen)
1616 {
1617 struct bpf_elf_sec_data data;
1618 struct bpf_elf_prog prog;
1619 int ret, i, fd = -1;
1620
1621 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1622 if (ctx->sec_done[i])
1623 continue;
1624
1625 ret = bpf_fill_section_data(ctx, i, &data);
1626 if (ret < 0 ||
1627 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
1628 data.sec_hdr.sh_flags & SHF_EXECINSTR &&
1629 !strcmp(data.sec_name, section)))
1630 continue;
1631
1632 *sseen = true;
1633
1634 memset(&prog, 0, sizeof(prog));
1635 prog.type = ctx->type;
1636 prog.insns = data.sec_data->d_buf;
1637 prog.size = data.sec_data->d_size;
1638 prog.license = ctx->license;
1639
1640 fd = bpf_prog_attach(section, &prog, ctx);
1641 if (fd < 0)
1642 return fd;
1643
1644 ctx->sec_done[i] = true;
1645 break;
1646 }
1647
1648 return fd;
1649 }
1650
1651 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
1652 struct bpf_elf_sec_data *data_relo,
1653 struct bpf_elf_sec_data *data_insn)
1654 {
1655 Elf_Data *idata = data_insn->sec_data;
1656 GElf_Shdr *rhdr = &data_relo->sec_hdr;
1657 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
1658 struct bpf_insn *insns = idata->d_buf;
1659 unsigned int num_insns = idata->d_size / sizeof(*insns);
1660
1661 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
1662 unsigned int ioff, rmap;
1663 GElf_Rel relo;
1664 GElf_Sym sym;
1665
1666 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
1667 return -EIO;
1668
1669 ioff = relo.r_offset / sizeof(struct bpf_insn);
1670 if (ioff >= num_insns ||
1671 insns[ioff].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1672 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
1673 ioff);
1674 if (ioff < num_insns &&
1675 insns[ioff].code == (BPF_JMP | BPF_CALL))
1676 fprintf(stderr, " - Try to annotate functions with always_inline attribute!\n");
1677 return -EINVAL;
1678 }
1679
1680 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
1681 return -EIO;
1682 if (sym.st_shndx != ctx->sec_maps) {
1683 fprintf(stderr, "ELF contains non-map related relo data in entry %u pointing to section %u! Compiler bug?!\n",
1684 relo_ent, sym.st_shndx);
1685 return -EIO;
1686 }
1687
1688 rmap = sym.st_value / ctx->map_len;
1689 if (rmap >= ARRAY_SIZE(ctx->map_fds))
1690 return -EINVAL;
1691 if (!ctx->map_fds[rmap])
1692 return -EINVAL;
1693
1694 if (ctx->verbose)
1695 fprintf(stderr, "Map \'%s\' (%d) injected into prog section \'%s\' at offset %u!\n",
1696 bpf_str_tab_name(ctx, &sym), ctx->map_fds[rmap],
1697 data_insn->sec_name, ioff);
1698
1699 insns[ioff].src_reg = BPF_PSEUDO_MAP_FD;
1700 insns[ioff].imm = ctx->map_fds[rmap];
1701 }
1702
1703 return 0;
1704 }
1705
1706 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
1707 bool *lderr, bool *sseen)
1708 {
1709 struct bpf_elf_sec_data data_relo, data_insn;
1710 struct bpf_elf_prog prog;
1711 int ret, idx, i, fd = -1;
1712
1713 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1714 ret = bpf_fill_section_data(ctx, i, &data_relo);
1715 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
1716 continue;
1717
1718 idx = data_relo.sec_hdr.sh_info;
1719
1720 ret = bpf_fill_section_data(ctx, idx, &data_insn);
1721 if (ret < 0 ||
1722 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
1723 data_insn.sec_hdr.sh_flags & SHF_EXECINSTR &&
1724 !strcmp(data_insn.sec_name, section)))
1725 continue;
1726
1727 *sseen = true;
1728
1729 ret = bpf_apply_relo_data(ctx, &data_relo, &data_insn);
1730 if (ret < 0)
1731 return ret;
1732
1733 memset(&prog, 0, sizeof(prog));
1734 prog.type = ctx->type;
1735 prog.insns = data_insn.sec_data->d_buf;
1736 prog.size = data_insn.sec_data->d_size;
1737 prog.license = ctx->license;
1738
1739 fd = bpf_prog_attach(section, &prog, ctx);
1740 if (fd < 0) {
1741 *lderr = true;
1742 return fd;
1743 }
1744
1745 ctx->sec_done[i] = true;
1746 ctx->sec_done[idx] = true;
1747 break;
1748 }
1749
1750 return fd;
1751 }
1752
1753 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
1754 {
1755 bool lderr = false, sseen = false;
1756 int ret = -1;
1757
1758 if (bpf_has_map_data(ctx))
1759 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen);
1760 if (ret < 0 && !lderr)
1761 ret = bpf_fetch_prog(ctx, section, &sseen);
1762 if (ret < 0 && !sseen)
1763 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
1764 section);
1765 return ret;
1766 }
1767
1768 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
1769 {
1770 int i;
1771
1772 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
1773 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
1774 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
1775 return i;
1776 return -1;
1777 }
1778
1779 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
1780 {
1781 struct bpf_elf_sec_data data;
1782 uint32_t map_id, key_id;
1783 int fd, i, ret, idx;
1784
1785 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1786 if (ctx->sec_done[i])
1787 continue;
1788
1789 ret = bpf_fill_section_data(ctx, i, &data);
1790 if (ret < 0)
1791 continue;
1792
1793 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
1794 if (ret != 2)
1795 continue;
1796
1797 idx = bpf_find_map_by_id(ctx, map_id);
1798 if (idx < 0)
1799 continue;
1800
1801 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
1802 if (fd < 0)
1803 return -EIO;
1804
1805 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
1806 &fd, BPF_ANY);
1807 if (ret < 0) {
1808 if (errno == E2BIG)
1809 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
1810 key_id, map_id);
1811 return -errno;
1812 }
1813
1814 ctx->sec_done[i] = true;
1815 }
1816
1817 return 0;
1818 }
1819
1820 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
1821 {
1822 struct stat st;
1823 int ret;
1824
1825 memset(&ctx->stat, 0, sizeof(ctx->stat));
1826
1827 ret = fstat(ctx->obj_fd, &st);
1828 if (ret < 0) {
1829 fprintf(stderr, "Stat of elf file failed: %s\n",
1830 strerror(errno));
1831 return;
1832 }
1833
1834 ctx->stat.st_dev = st.st_dev;
1835 ctx->stat.st_ino = st.st_ino;
1836 }
1837
1838 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
1839 {
1840 char buff[PATH_MAX];
1841
1842 while (fgets(buff, sizeof(buff), fp)) {
1843 char *ptr = buff;
1844
1845 while (*ptr == ' ' || *ptr == '\t')
1846 ptr++;
1847
1848 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
1849 continue;
1850
1851 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
1852 sscanf(ptr, "%i %s #", id, path) != 2) {
1853 strcpy(path, ptr);
1854 return -1;
1855 }
1856
1857 return 1;
1858 }
1859
1860 return 0;
1861 }
1862
1863 static bool bpf_pinning_reserved(uint32_t pinning)
1864 {
1865 switch (pinning) {
1866 case PIN_NONE:
1867 case PIN_OBJECT_NS:
1868 case PIN_GLOBAL_NS:
1869 return true;
1870 default:
1871 return false;
1872 }
1873 }
1874
1875 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
1876 {
1877 struct bpf_hash_entry *entry;
1878 char subpath[PATH_MAX] = {};
1879 uint32_t pinning;
1880 FILE *fp;
1881 int ret;
1882
1883 fp = fopen(db_file, "r");
1884 if (!fp)
1885 return;
1886
1887 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
1888 if (ret == -1) {
1889 fprintf(stderr, "Database %s is corrupted at: %s\n",
1890 db_file, subpath);
1891 fclose(fp);
1892 return;
1893 }
1894
1895 if (bpf_pinning_reserved(pinning)) {
1896 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
1897 db_file, pinning);
1898 continue;
1899 }
1900
1901 entry = malloc(sizeof(*entry));
1902 if (!entry) {
1903 fprintf(stderr, "No memory left for db entry!\n");
1904 continue;
1905 }
1906
1907 entry->pinning = pinning;
1908 entry->subpath = strdup(subpath);
1909 if (!entry->subpath) {
1910 fprintf(stderr, "No memory left for db entry!\n");
1911 free(entry);
1912 continue;
1913 }
1914
1915 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1916 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
1917 }
1918
1919 fclose(fp);
1920 }
1921
1922 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
1923 {
1924 struct bpf_hash_entry *entry;
1925 int i;
1926
1927 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
1928 while ((entry = ctx->ht[i]) != NULL) {
1929 ctx->ht[i] = entry->next;
1930 free((char *)entry->subpath);
1931 free(entry);
1932 }
1933 }
1934 }
1935
1936 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
1937 {
1938 if (ctx->elf_hdr.e_type != ET_REL ||
1939 (ctx->elf_hdr.e_machine != EM_NONE &&
1940 ctx->elf_hdr.e_machine != EM_BPF) ||
1941 ctx->elf_hdr.e_version != EV_CURRENT) {
1942 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
1943 return -EINVAL;
1944 }
1945
1946 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
1947 default:
1948 fprintf(stderr, "ELF format error, wrong endianness info?\n");
1949 return -EINVAL;
1950 case ELFDATA2LSB:
1951 if (htons(1) == 1) {
1952 fprintf(stderr,
1953 "We are big endian, eBPF object is little endian!\n");
1954 return -EIO;
1955 }
1956 break;
1957 case ELFDATA2MSB:
1958 if (htons(1) != 1) {
1959 fprintf(stderr,
1960 "We are little endian, eBPF object is big endian!\n");
1961 return -EIO;
1962 }
1963 break;
1964 }
1965
1966 return 0;
1967 }
1968
1969 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
1970 enum bpf_prog_type type, bool verbose)
1971 {
1972 int ret = -EINVAL;
1973
1974 if (elf_version(EV_CURRENT) == EV_NONE ||
1975 bpf_init_env(pathname))
1976 return ret;
1977
1978 memset(ctx, 0, sizeof(*ctx));
1979 ctx->verbose = verbose;
1980 ctx->type = type;
1981
1982 ctx->obj_fd = open(pathname, O_RDONLY);
1983 if (ctx->obj_fd < 0)
1984 return ctx->obj_fd;
1985
1986 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
1987 if (!ctx->elf_fd) {
1988 ret = -EINVAL;
1989 goto out_fd;
1990 }
1991
1992 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
1993 ret = -EINVAL;
1994 goto out_fd;
1995 }
1996
1997 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
1998 &ctx->elf_hdr) {
1999 ret = -EIO;
2000 goto out_elf;
2001 }
2002
2003 ret = bpf_elf_check_ehdr(ctx);
2004 if (ret < 0)
2005 goto out_elf;
2006
2007 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2008 sizeof(*(ctx->sec_done)));
2009 if (!ctx->sec_done) {
2010 ret = -ENOMEM;
2011 goto out_elf;
2012 }
2013
2014 if (ctx->verbose && bpf_log_realloc(ctx)) {
2015 ret = -ENOMEM;
2016 goto out_free;
2017 }
2018
2019 bpf_save_finfo(ctx);
2020 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2021
2022 return 0;
2023 out_free:
2024 free(ctx->sec_done);
2025 out_elf:
2026 elf_end(ctx->elf_fd);
2027 out_fd:
2028 close(ctx->obj_fd);
2029 return ret;
2030 }
2031
2032 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2033 {
2034 int i, count = 0;
2035
2036 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2037 if (!ctx->map_fds[i])
2038 break;
2039 count++;
2040 }
2041
2042 return count;
2043 }
2044
2045 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2046 {
2047 int i;
2048
2049 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2050 if (ctx->map_fds[i])
2051 close(ctx->map_fds[i]);
2052 }
2053 }
2054
2055 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2056 {
2057 if (failure)
2058 bpf_maps_teardown(ctx);
2059
2060 bpf_hash_destroy(ctx);
2061
2062 free(ctx->sec_done);
2063 free(ctx->log);
2064
2065 elf_end(ctx->elf_fd);
2066 close(ctx->obj_fd);
2067 }
2068
2069 static struct bpf_elf_ctx __ctx;
2070
2071 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2072 const char *section, bool verbose)
2073 {
2074 struct bpf_elf_ctx *ctx = &__ctx;
2075 int fd = 0, ret;
2076
2077 ret = bpf_elf_ctx_init(ctx, pathname, type, verbose);
2078 if (ret < 0) {
2079 fprintf(stderr, "Cannot initialize ELF context!\n");
2080 return ret;
2081 }
2082
2083 ret = bpf_fetch_ancillary(ctx);
2084 if (ret < 0) {
2085 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2086 goto out;
2087 }
2088
2089 fd = bpf_fetch_prog_sec(ctx, section);
2090 if (fd < 0) {
2091 fprintf(stderr, "Error fetching program/map!\n");
2092 ret = fd;
2093 goto out;
2094 }
2095
2096 ret = bpf_fill_prog_arrays(ctx);
2097 if (ret < 0)
2098 fprintf(stderr, "Error filling program arrays!\n");
2099 out:
2100 bpf_elf_ctx_destroy(ctx, ret < 0);
2101 if (ret < 0) {
2102 if (fd)
2103 close(fd);
2104 return ret;
2105 }
2106
2107 return fd;
2108 }
2109
2110 static int
2111 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
2112 const struct bpf_map_data *aux, unsigned int entries)
2113 {
2114 struct bpf_map_set_msg msg = {
2115 .aux.uds_ver = BPF_SCM_AUX_VER,
2116 .aux.num_ent = entries,
2117 };
2118 int *cmsg_buf, min_fd;
2119 char *amsg_buf;
2120 int i;
2121
2122 strncpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
2123 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
2124
2125 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
2126 amsg_buf = (char *)msg.aux.ent;
2127
2128 for (i = 0; i < entries; i += min_fd) {
2129 int ret;
2130
2131 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
2132 bpf_map_set_init_single(&msg, min_fd);
2133
2134 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
2135 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
2136
2137 ret = sendmsg(fd, &msg.hdr, 0);
2138 if (ret <= 0)
2139 return ret ? : -1;
2140 }
2141
2142 return 0;
2143 }
2144
2145 static int
2146 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
2147 unsigned int entries)
2148 {
2149 struct bpf_map_set_msg msg;
2150 int *cmsg_buf, min_fd;
2151 char *amsg_buf, *mmsg_buf;
2152 unsigned int needed = 1;
2153 int i;
2154
2155 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
2156 amsg_buf = (char *)msg.aux.ent;
2157 mmsg_buf = (char *)&msg.aux;
2158
2159 for (i = 0; i < min(entries, needed); i += min_fd) {
2160 struct cmsghdr *cmsg;
2161 int ret;
2162
2163 min_fd = min(entries, entries - i);
2164 bpf_map_set_init_single(&msg, min_fd);
2165
2166 ret = recvmsg(fd, &msg.hdr, 0);
2167 if (ret <= 0)
2168 return ret ? : -1;
2169
2170 cmsg = CMSG_FIRSTHDR(&msg.hdr);
2171 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
2172 return -EINVAL;
2173 if (msg.hdr.msg_flags & MSG_CTRUNC)
2174 return -EIO;
2175 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
2176 return -ENOSYS;
2177
2178 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
2179 if (min_fd > entries || min_fd <= 0)
2180 return -EINVAL;
2181
2182 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
2183 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
2184 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
2185
2186 needed = aux->num_ent;
2187 }
2188
2189 return 0;
2190 }
2191
2192 int bpf_send_map_fds(const char *path, const char *obj)
2193 {
2194 struct bpf_elf_ctx *ctx = &__ctx;
2195 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2196 struct bpf_map_data bpf_aux = {
2197 .fds = ctx->map_fds,
2198 .ent = ctx->maps,
2199 .st = &ctx->stat,
2200 .obj = obj,
2201 };
2202 int fd, ret;
2203
2204 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2205 if (fd < 0) {
2206 fprintf(stderr, "Cannot open socket: %s\n",
2207 strerror(errno));
2208 return -1;
2209 }
2210
2211 strncpy(addr.sun_path, path, sizeof(addr.sun_path));
2212
2213 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
2214 if (ret < 0) {
2215 fprintf(stderr, "Cannot connect to %s: %s\n",
2216 path, strerror(errno));
2217 return -1;
2218 }
2219
2220 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
2221 bpf_maps_count(ctx));
2222 if (ret < 0)
2223 fprintf(stderr, "Cannot send fds to %s: %s\n",
2224 path, strerror(errno));
2225
2226 bpf_maps_teardown(ctx);
2227 close(fd);
2228 return ret;
2229 }
2230
2231 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
2232 unsigned int entries)
2233 {
2234 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2235 int fd, ret;
2236
2237 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2238 if (fd < 0) {
2239 fprintf(stderr, "Cannot open socket: %s\n",
2240 strerror(errno));
2241 return -1;
2242 }
2243
2244 strncpy(addr.sun_path, path, sizeof(addr.sun_path));
2245
2246 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
2247 if (ret < 0) {
2248 fprintf(stderr, "Cannot bind to socket: %s\n",
2249 strerror(errno));
2250 return -1;
2251 }
2252
2253 ret = bpf_map_set_recv(fd, fds, aux, entries);
2254 if (ret < 0)
2255 fprintf(stderr, "Cannot recv fds from %s: %s\n",
2256 path, strerror(errno));
2257
2258 unlink(addr.sun_path);
2259 close(fd);
2260 return ret;
2261 }
2262 #endif /* HAVE_ELF */