]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
Merge branch 'master' into net-next
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43
44 #include "bpf_util.h"
45 #include "bpf_elf.h"
46 #include "bpf_scm.h"
47
48 struct bpf_prog_meta {
49 const char *type;
50 const char *subdir;
51 const char *section;
52 bool may_uds_export;
53 };
54
55 static const enum bpf_prog_type __bpf_types[] = {
56 BPF_PROG_TYPE_SCHED_CLS,
57 BPF_PROG_TYPE_SCHED_ACT,
58 BPF_PROG_TYPE_XDP,
59 BPF_PROG_TYPE_LWT_IN,
60 BPF_PROG_TYPE_LWT_OUT,
61 BPF_PROG_TYPE_LWT_XMIT,
62 };
63
64 static const struct bpf_prog_meta __bpf_prog_meta[] = {
65 [BPF_PROG_TYPE_SCHED_CLS] = {
66 .type = "cls",
67 .subdir = "tc",
68 .section = ELF_SECTION_CLASSIFIER,
69 .may_uds_export = true,
70 },
71 [BPF_PROG_TYPE_SCHED_ACT] = {
72 .type = "act",
73 .subdir = "tc",
74 .section = ELF_SECTION_ACTION,
75 .may_uds_export = true,
76 },
77 [BPF_PROG_TYPE_XDP] = {
78 .type = "xdp",
79 .subdir = "xdp",
80 .section = ELF_SECTION_PROG,
81 },
82 [BPF_PROG_TYPE_LWT_IN] = {
83 .type = "lwt_in",
84 .subdir = "ip",
85 .section = ELF_SECTION_PROG,
86 },
87 [BPF_PROG_TYPE_LWT_OUT] = {
88 .type = "lwt_out",
89 .subdir = "ip",
90 .section = ELF_SECTION_PROG,
91 },
92 [BPF_PROG_TYPE_LWT_XMIT] = {
93 .type = "lwt_xmit",
94 .subdir = "ip",
95 .section = ELF_SECTION_PROG,
96 },
97 };
98
99 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
100 {
101 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
102 __bpf_prog_meta[type].subdir);
103 return __bpf_prog_meta[type].subdir;
104 }
105
106 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
107 {
108 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
109 __bpf_prog_meta[type].section);
110 return __bpf_prog_meta[type].section;
111 }
112
113 #ifdef HAVE_ELF
114 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
115 const char *sec, bool verbose);
116 #else
117 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
118 const char *sec, bool verbose)
119 {
120 fprintf(stderr, "No ELF library support compiled in.\n");
121 errno = ENOSYS;
122 return -1;
123 }
124 #endif
125
126 static inline __u64 bpf_ptr_to_u64(const void *ptr)
127 {
128 return (__u64)(unsigned long)ptr;
129 }
130
131 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
132 {
133 #ifdef __NR_bpf
134 return syscall(__NR_bpf, cmd, attr, size);
135 #else
136 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
137 errno = ENOSYS;
138 return -1;
139 #endif
140 }
141
142 static int bpf_map_update(int fd, const void *key, const void *value,
143 uint64_t flags)
144 {
145 union bpf_attr attr = {};
146
147 attr.map_fd = fd;
148 attr.key = bpf_ptr_to_u64(key);
149 attr.value = bpf_ptr_to_u64(value);
150 attr.flags = flags;
151
152 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
153 }
154
155 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
156 char **bpf_string, bool *need_release,
157 const char separator)
158 {
159 char sp;
160
161 if (from_file) {
162 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
163 char *tmp_string, *last;
164 FILE *fp;
165
166 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
167 tmp_string = calloc(1, tmp_len);
168 if (tmp_string == NULL)
169 return -ENOMEM;
170
171 fp = fopen(arg, "r");
172 if (fp == NULL) {
173 perror("Cannot fopen");
174 free(tmp_string);
175 return -ENOENT;
176 }
177
178 if (!fgets(tmp_string, tmp_len, fp)) {
179 free(tmp_string);
180 fclose(fp);
181 return -EIO;
182 }
183
184 fclose(fp);
185
186 last = &tmp_string[strlen(tmp_string) - 1];
187 if (*last == '\n')
188 *last = 0;
189
190 *need_release = true;
191 *bpf_string = tmp_string;
192 } else {
193 *need_release = false;
194 *bpf_string = arg;
195 }
196
197 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
198 sp != separator) {
199 if (*need_release)
200 free(*bpf_string);
201 return -EINVAL;
202 }
203
204 return 0;
205 }
206
207 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
208 bool from_file)
209 {
210 char *bpf_string, *token, separator = ',';
211 int ret = 0, i = 0;
212 bool need_release;
213 __u16 bpf_len = 0;
214
215 if (argc < 1)
216 return -EINVAL;
217 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
218 &need_release, separator))
219 return -EINVAL;
220 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
221 ret = -EINVAL;
222 goto out;
223 }
224
225 token = bpf_string;
226 while ((token = strchr(token, separator)) && (++token)[0]) {
227 if (i >= bpf_len) {
228 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
229 ret = -EINVAL;
230 goto out;
231 }
232
233 if (sscanf(token, "%hu %hhu %hhu %u,",
234 &bpf_ops[i].code, &bpf_ops[i].jt,
235 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
236 fprintf(stderr, "Error at instruction %d!\n", i);
237 ret = -EINVAL;
238 goto out;
239 }
240
241 i++;
242 }
243
244 if (i != bpf_len) {
245 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
246 ret = -EINVAL;
247 goto out;
248 }
249 ret = bpf_len;
250 out:
251 if (need_release)
252 free(bpf_string);
253
254 return ret;
255 }
256
257 void bpf_print_ops(FILE *f, struct rtattr *bpf_ops, __u16 len)
258 {
259 struct sock_filter *ops = RTA_DATA(bpf_ops);
260 int i;
261
262 if (len == 0)
263 return;
264
265 fprintf(f, "bytecode \'%u,", len);
266
267 for (i = 0; i < len - 1; i++)
268 fprintf(f, "%hu %hhu %hhu %u,", ops[i].code, ops[i].jt,
269 ops[i].jf, ops[i].k);
270
271 fprintf(f, "%hu %hhu %hhu %u\'", ops[i].code, ops[i].jt,
272 ops[i].jf, ops[i].k);
273 }
274
275 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
276 const struct bpf_elf_map *obj)
277 {
278 fprintf(stderr, "Map specification differs from pinned file!\n");
279
280 if (obj->type != pin->type)
281 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
282 obj->type, pin->type);
283 if (obj->size_key != pin->size_key)
284 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
285 obj->size_key, pin->size_key);
286 if (obj->size_value != pin->size_value)
287 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
288 obj->size_value, pin->size_value);
289 if (obj->max_elem != pin->max_elem)
290 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
291 obj->max_elem, pin->max_elem);
292 if (obj->flags != pin->flags)
293 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
294 obj->flags, pin->flags);
295
296 fprintf(stderr, "\n");
297 }
298
299 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
300 int length, enum bpf_prog_type type)
301 {
302 char file[PATH_MAX], buff[4096];
303 struct bpf_elf_map tmp = {}, zero = {};
304 unsigned int val, owner_type = 0;
305 FILE *fp;
306
307 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
308
309 fp = fopen(file, "r");
310 if (!fp) {
311 fprintf(stderr, "No procfs support?!\n");
312 return -EIO;
313 }
314
315 while (fgets(buff, sizeof(buff), fp)) {
316 if (sscanf(buff, "map_type:\t%u", &val) == 1)
317 tmp.type = val;
318 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
319 tmp.size_key = val;
320 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
321 tmp.size_value = val;
322 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
323 tmp.max_elem = val;
324 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
325 tmp.flags = val;
326 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
327 owner_type = val;
328 }
329
330 fclose(fp);
331
332 /* The decision to reject this is on kernel side eventually, but
333 * at least give the user a chance to know what's wrong.
334 */
335 if (owner_type && owner_type != type)
336 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
337 type, owner_type);
338
339 if (!memcmp(&tmp, map, length)) {
340 return 0;
341 } else {
342 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
343 * so just accept it. We know we do have an eBPF fd and in this
344 * case, everything is 0. It is guaranteed that no such map exists
345 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
346 */
347 if (!memcmp(&tmp, &zero, length))
348 return 0;
349
350 bpf_map_pin_report(&tmp, map);
351 return -EINVAL;
352 }
353 }
354
355 static int bpf_mnt_fs(const char *target)
356 {
357 bool bind_done = false;
358
359 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
360 if (errno != EINVAL || bind_done) {
361 fprintf(stderr, "mount --make-private %s failed: %s\n",
362 target, strerror(errno));
363 return -1;
364 }
365
366 if (mount(target, target, "none", MS_BIND, NULL)) {
367 fprintf(stderr, "mount --bind %s %s failed: %s\n",
368 target, target, strerror(errno));
369 return -1;
370 }
371
372 bind_done = true;
373 }
374
375 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
376 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
377 target, strerror(errno));
378 return -1;
379 }
380
381 return 0;
382 }
383
384 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
385 {
386 struct statfs st_fs;
387
388 if (statfs(mnt, &st_fs) < 0)
389 return -ENOENT;
390 if ((unsigned long)st_fs.f_type != magic)
391 return -ENOENT;
392
393 return 0;
394 }
395
396 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
397 char *mnt, int len,
398 const char * const *known_mnts)
399 {
400 const char * const *ptr;
401 char type[100];
402 FILE *fp;
403
404 if (known_mnts) {
405 ptr = known_mnts;
406 while (*ptr) {
407 if (bpf_valid_mntpt(*ptr, magic) == 0) {
408 strncpy(mnt, *ptr, len - 1);
409 mnt[len - 1] = 0;
410 return mnt;
411 }
412 ptr++;
413 }
414 }
415
416 fp = fopen("/proc/mounts", "r");
417 if (fp == NULL || len != PATH_MAX)
418 return NULL;
419
420 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
421 mnt, type) == 2) {
422 if (strcmp(type, fstype) == 0)
423 break;
424 }
425
426 fclose(fp);
427 if (strcmp(type, fstype) != 0)
428 return NULL;
429
430 return mnt;
431 }
432
433 int bpf_trace_pipe(void)
434 {
435 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
436 static const char * const tracefs_known_mnts[] = {
437 TRACE_DIR_MNT,
438 "/sys/kernel/debug/tracing",
439 "/tracing",
440 "/trace",
441 0,
442 };
443 char tpipe[PATH_MAX];
444 const char *mnt;
445 int fd;
446
447 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
448 sizeof(tracefs_mnt), tracefs_known_mnts);
449 if (!mnt) {
450 fprintf(stderr, "tracefs not mounted?\n");
451 return -1;
452 }
453
454 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
455
456 fd = open(tpipe, O_RDONLY);
457 if (fd < 0)
458 return -1;
459
460 fprintf(stderr, "Running! Hang up with ^C!\n\n");
461 while (1) {
462 static char buff[4096];
463 ssize_t ret;
464
465 ret = read(fd, buff, sizeof(buff) - 1);
466 if (ret > 0) {
467 write(2, buff, ret);
468 fflush(stderr);
469 }
470 }
471
472 return 0;
473 }
474
475 static int bpf_gen_global(const char *bpf_sub_dir)
476 {
477 char bpf_glo_dir[PATH_MAX];
478 int ret;
479
480 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
481 bpf_sub_dir, BPF_DIR_GLOBALS);
482
483 ret = mkdir(bpf_glo_dir, S_IRWXU);
484 if (ret && errno != EEXIST) {
485 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
486 strerror(errno));
487 return ret;
488 }
489
490 return 0;
491 }
492
493 static int bpf_gen_master(const char *base, const char *name)
494 {
495 char bpf_sub_dir[PATH_MAX];
496 int ret;
497
498 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
499
500 ret = mkdir(bpf_sub_dir, S_IRWXU);
501 if (ret && errno != EEXIST) {
502 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
503 strerror(errno));
504 return ret;
505 }
506
507 return bpf_gen_global(bpf_sub_dir);
508 }
509
510 static int bpf_slave_via_bind_mnt(const char *full_name,
511 const char *full_link)
512 {
513 int ret;
514
515 ret = mkdir(full_name, S_IRWXU);
516 if (ret) {
517 assert(errno != EEXIST);
518 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
519 strerror(errno));
520 return ret;
521 }
522
523 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
524 if (ret) {
525 rmdir(full_name);
526 fprintf(stderr, "mount --bind %s %s failed: %s\n",
527 full_link, full_name, strerror(errno));
528 }
529
530 return ret;
531 }
532
533 static int bpf_gen_slave(const char *base, const char *name,
534 const char *link)
535 {
536 char bpf_lnk_dir[PATH_MAX];
537 char bpf_sub_dir[PATH_MAX];
538 struct stat sb = {};
539 int ret;
540
541 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
542 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
543
544 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
545 if (ret) {
546 if (errno != EEXIST) {
547 if (errno != EPERM) {
548 fprintf(stderr, "symlink %s failed: %s\n",
549 bpf_sub_dir, strerror(errno));
550 return ret;
551 }
552
553 return bpf_slave_via_bind_mnt(bpf_sub_dir,
554 bpf_lnk_dir);
555 }
556
557 ret = lstat(bpf_sub_dir, &sb);
558 if (ret) {
559 fprintf(stderr, "lstat %s failed: %s\n",
560 bpf_sub_dir, strerror(errno));
561 return ret;
562 }
563
564 if ((sb.st_mode & S_IFMT) != S_IFLNK)
565 return bpf_gen_global(bpf_sub_dir);
566 }
567
568 return 0;
569 }
570
571 static int bpf_gen_hierarchy(const char *base)
572 {
573 int ret, i;
574
575 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
576 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
577 ret = bpf_gen_slave(base,
578 bpf_prog_to_subdir(__bpf_types[i]),
579 bpf_prog_to_subdir(__bpf_types[0]));
580 return ret;
581 }
582
583 static const char *bpf_get_work_dir(enum bpf_prog_type type)
584 {
585 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
586 static char bpf_wrk_dir[PATH_MAX];
587 static const char *mnt;
588 static bool bpf_mnt_cached;
589 static const char * const bpf_known_mnts[] = {
590 BPF_DIR_MNT,
591 "/bpf",
592 0,
593 };
594 int ret;
595
596 if (bpf_mnt_cached) {
597 const char *out = mnt;
598
599 if (out && type) {
600 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
601 out, bpf_prog_to_subdir(type));
602 out = bpf_tmp;
603 }
604 return out;
605 }
606
607 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp, sizeof(bpf_tmp),
608 bpf_known_mnts);
609 if (!mnt) {
610 mnt = getenv(BPF_ENV_MNT);
611 if (!mnt)
612 mnt = BPF_DIR_MNT;
613 ret = bpf_mnt_fs(mnt);
614 if (ret) {
615 mnt = NULL;
616 goto out;
617 }
618 }
619
620 snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
621
622 ret = bpf_gen_hierarchy(bpf_wrk_dir);
623 if (ret) {
624 mnt = NULL;
625 goto out;
626 }
627
628 mnt = bpf_wrk_dir;
629 out:
630 bpf_mnt_cached = true;
631 return mnt;
632 }
633
634 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
635 {
636 union bpf_attr attr = {};
637 char tmp[PATH_MAX];
638
639 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
640 pathname[1] == ':' && bpf_get_work_dir(type)) {
641 snprintf(tmp, sizeof(tmp), "%s/%s",
642 bpf_get_work_dir(type), pathname + 2);
643 pathname = tmp;
644 }
645
646 attr.pathname = bpf_ptr_to_u64(pathname);
647
648 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
649 }
650
651 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
652 {
653 int prog_fd = bpf_obj_get(pathname, type);
654
655 if (prog_fd < 0)
656 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
657 pathname, strerror(errno));
658 return prog_fd;
659 }
660
661 enum bpf_mode {
662 CBPF_BYTECODE,
663 CBPF_FILE,
664 EBPF_OBJECT,
665 EBPF_PINNED,
666 BPF_MODE_MAX,
667 };
668
669 static int bpf_parse(enum bpf_prog_type *type, enum bpf_mode *mode,
670 struct bpf_cfg_in *cfg, const bool *opt_tbl)
671 {
672 const char *file, *section, *uds_name;
673 bool verbose = false;
674 int i, ret, argc;
675 char **argv;
676
677 argv = cfg->argv;
678 argc = cfg->argc;
679
680 if (opt_tbl[CBPF_BYTECODE] &&
681 (matches(*argv, "bytecode") == 0 ||
682 strcmp(*argv, "bc") == 0)) {
683 *mode = CBPF_BYTECODE;
684 } else if (opt_tbl[CBPF_FILE] &&
685 (matches(*argv, "bytecode-file") == 0 ||
686 strcmp(*argv, "bcf") == 0)) {
687 *mode = CBPF_FILE;
688 } else if (opt_tbl[EBPF_OBJECT] &&
689 (matches(*argv, "object-file") == 0 ||
690 strcmp(*argv, "obj") == 0)) {
691 *mode = EBPF_OBJECT;
692 } else if (opt_tbl[EBPF_PINNED] &&
693 (matches(*argv, "object-pinned") == 0 ||
694 matches(*argv, "pinned") == 0 ||
695 matches(*argv, "fd") == 0)) {
696 *mode = EBPF_PINNED;
697 } else {
698 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
699 return -1;
700 }
701
702 NEXT_ARG();
703 file = section = uds_name = NULL;
704 if (*mode == EBPF_OBJECT || *mode == EBPF_PINNED) {
705 file = *argv;
706 NEXT_ARG_FWD();
707
708 if (*type == BPF_PROG_TYPE_UNSPEC) {
709 if (argc > 0 && matches(*argv, "type") == 0) {
710 NEXT_ARG();
711 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
712 i++) {
713 if (!__bpf_prog_meta[i].type)
714 continue;
715 if (!matches(*argv,
716 __bpf_prog_meta[i].type)) {
717 *type = i;
718 break;
719 }
720 }
721
722 if (*type == BPF_PROG_TYPE_UNSPEC) {
723 fprintf(stderr, "What type is \"%s\"?\n",
724 *argv);
725 return -1;
726 }
727 NEXT_ARG_FWD();
728 } else {
729 *type = BPF_PROG_TYPE_SCHED_CLS;
730 }
731 }
732
733 section = bpf_prog_to_default_section(*type);
734 if (argc > 0 && matches(*argv, "section") == 0) {
735 NEXT_ARG();
736 section = *argv;
737 NEXT_ARG_FWD();
738 }
739
740 if (__bpf_prog_meta[*type].may_uds_export) {
741 uds_name = getenv(BPF_ENV_UDS);
742 if (argc > 0 && !uds_name &&
743 matches(*argv, "export") == 0) {
744 NEXT_ARG();
745 uds_name = *argv;
746 NEXT_ARG_FWD();
747 }
748 }
749
750 if (argc > 0 && matches(*argv, "verbose") == 0) {
751 verbose = true;
752 NEXT_ARG_FWD();
753 }
754
755 PREV_ARG();
756 }
757
758 if (*mode == CBPF_BYTECODE || *mode == CBPF_FILE)
759 ret = bpf_ops_parse(argc, argv, cfg->ops, *mode == CBPF_FILE);
760 else if (*mode == EBPF_OBJECT)
761 ret = bpf_obj_open(file, *type, section, verbose);
762 else if (*mode == EBPF_PINNED)
763 ret = bpf_obj_pinned(file, *type);
764 else
765 return -1;
766
767 cfg->object = file;
768 cfg->section = section;
769 cfg->uds = uds_name;
770 cfg->argc = argc;
771 cfg->argv = argv;
772
773 return ret;
774 }
775
776 static int bpf_parse_opt_tbl(enum bpf_prog_type type, struct bpf_cfg_in *cfg,
777 const struct bpf_cfg_ops *ops, void *nl,
778 const bool *opt_tbl)
779 {
780 struct sock_filter opcodes[BPF_MAXINSNS];
781 char annotation[256];
782 enum bpf_mode mode;
783 int ret;
784
785 cfg->ops = opcodes;
786 ret = bpf_parse(&type, &mode, cfg, opt_tbl);
787 cfg->ops = NULL;
788 if (ret < 0)
789 return ret;
790
791 if (mode == CBPF_BYTECODE || mode == CBPF_FILE)
792 ops->cbpf_cb(nl, opcodes, ret);
793 if (mode == EBPF_OBJECT || mode == EBPF_PINNED) {
794 snprintf(annotation, sizeof(annotation), "%s:[%s]",
795 basename(cfg->object), mode == EBPF_PINNED ?
796 "*fsobj" : cfg->section);
797 ops->ebpf_cb(nl, ret, annotation);
798 }
799
800 return 0;
801 }
802
803 int bpf_parse_common(enum bpf_prog_type type, struct bpf_cfg_in *cfg,
804 const struct bpf_cfg_ops *ops, void *nl)
805 {
806 bool opt_tbl[BPF_MODE_MAX] = {};
807
808 if (ops->cbpf_cb) {
809 opt_tbl[CBPF_BYTECODE] = true;
810 opt_tbl[CBPF_FILE] = true;
811 }
812
813 if (ops->ebpf_cb) {
814 opt_tbl[EBPF_OBJECT] = true;
815 opt_tbl[EBPF_PINNED] = true;
816 }
817
818 return bpf_parse_opt_tbl(type, cfg, ops, nl, opt_tbl);
819 }
820
821 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
822 {
823 enum bpf_prog_type type = BPF_PROG_TYPE_UNSPEC;
824 const bool opt_tbl[BPF_MODE_MAX] = {
825 [EBPF_OBJECT] = true,
826 [EBPF_PINNED] = true,
827 };
828 const struct bpf_elf_map test = {
829 .type = BPF_MAP_TYPE_PROG_ARRAY,
830 .size_key = sizeof(int),
831 .size_value = sizeof(int),
832 };
833 struct bpf_cfg_in cfg = {
834 .argc = argc,
835 .argv = argv,
836 };
837 int ret, prog_fd, map_fd;
838 enum bpf_mode mode;
839 uint32_t map_key;
840
841 prog_fd = bpf_parse(&type, &mode, &cfg, opt_tbl);
842 if (prog_fd < 0)
843 return prog_fd;
844 if (key) {
845 map_key = *key;
846 } else {
847 ret = sscanf(cfg.section, "%*i/%i", &map_key);
848 if (ret != 1) {
849 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
850 ret = -EINVAL;
851 goto out_prog;
852 }
853 }
854
855 map_fd = bpf_obj_get(map_path, type);
856 if (map_fd < 0) {
857 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
858 map_path, strerror(errno));
859 ret = map_fd;
860 goto out_prog;
861 }
862
863 ret = bpf_map_selfcheck_pinned(map_fd, &test,
864 offsetof(struct bpf_elf_map, max_elem),
865 type);
866 if (ret < 0) {
867 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
868 goto out_map;
869 }
870
871 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
872 if (ret < 0)
873 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
874 out_map:
875 close(map_fd);
876 out_prog:
877 close(prog_fd);
878 return ret;
879 }
880
881 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
882 {
883 union bpf_attr attr = {};
884
885 attr.target_fd = target_fd;
886 attr.attach_bpf_fd = prog_fd;
887 attr.attach_type = type;
888
889 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
890 }
891
892 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
893 {
894 union bpf_attr attr = {};
895
896 attr.target_fd = target_fd;
897 attr.attach_type = type;
898
899 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
900 }
901
902 int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
903 size_t size_insns, const char *license, char *log,
904 size_t size_log)
905 {
906 union bpf_attr attr = {};
907
908 attr.prog_type = type;
909 attr.insns = bpf_ptr_to_u64(insns);
910 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
911 attr.license = bpf_ptr_to_u64(license);
912
913 if (size_log > 0) {
914 attr.log_buf = bpf_ptr_to_u64(log);
915 attr.log_size = size_log;
916 attr.log_level = 1;
917 }
918
919 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
920 }
921
922 #ifdef HAVE_ELF
923 struct bpf_elf_prog {
924 enum bpf_prog_type type;
925 const struct bpf_insn *insns;
926 size_t size;
927 const char *license;
928 };
929
930 struct bpf_hash_entry {
931 unsigned int pinning;
932 const char *subpath;
933 struct bpf_hash_entry *next;
934 };
935
936 struct bpf_elf_ctx {
937 Elf *elf_fd;
938 GElf_Ehdr elf_hdr;
939 Elf_Data *sym_tab;
940 Elf_Data *str_tab;
941 int obj_fd;
942 int map_fds[ELF_MAX_MAPS];
943 struct bpf_elf_map maps[ELF_MAX_MAPS];
944 int sym_num;
945 int map_num;
946 int map_len;
947 bool *sec_done;
948 int sec_maps;
949 char license[ELF_MAX_LICENSE_LEN];
950 enum bpf_prog_type type;
951 bool verbose;
952 struct bpf_elf_st stat;
953 struct bpf_hash_entry *ht[256];
954 char *log;
955 size_t log_size;
956 };
957
958 struct bpf_elf_sec_data {
959 GElf_Shdr sec_hdr;
960 Elf_Data *sec_data;
961 const char *sec_name;
962 };
963
964 struct bpf_map_data {
965 int *fds;
966 const char *obj;
967 struct bpf_elf_st *st;
968 struct bpf_elf_map *ent;
969 };
970
971 static __check_format_string(2, 3) void
972 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
973 {
974 va_list vl;
975
976 va_start(vl, format);
977 vfprintf(stderr, format, vl);
978 va_end(vl);
979
980 if (ctx->log && ctx->log[0]) {
981 if (ctx->verbose) {
982 fprintf(stderr, "%s\n", ctx->log);
983 } else {
984 unsigned int off = 0, len = strlen(ctx->log);
985
986 if (len > BPF_MAX_LOG) {
987 off = len - BPF_MAX_LOG;
988 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
989 off);
990 }
991 fprintf(stderr, "%s\n", ctx->log + off);
992 }
993
994 memset(ctx->log, 0, ctx->log_size);
995 }
996 }
997
998 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
999 {
1000 const size_t log_max = UINT_MAX >> 8;
1001 size_t log_size = ctx->log_size;
1002 void *ptr;
1003
1004 if (!ctx->log) {
1005 log_size = 65536;
1006 } else if (log_size < log_max) {
1007 log_size <<= 1;
1008 if (log_size > log_max)
1009 log_size = log_max;
1010 } else {
1011 return -EINVAL;
1012 }
1013
1014 ptr = realloc(ctx->log, log_size);
1015 if (!ptr)
1016 return -ENOMEM;
1017
1018 ctx->log = ptr;
1019 ctx->log_size = log_size;
1020
1021 return 0;
1022 }
1023
1024 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1025 uint32_t size_value, uint32_t max_elem,
1026 uint32_t flags)
1027 {
1028 union bpf_attr attr = {};
1029
1030 attr.map_type = type;
1031 attr.key_size = size_key;
1032 attr.value_size = size_value;
1033 attr.max_entries = max_elem;
1034 attr.map_flags = flags;
1035
1036 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1037 }
1038
1039 static int bpf_obj_pin(int fd, const char *pathname)
1040 {
1041 union bpf_attr attr = {};
1042
1043 attr.pathname = bpf_ptr_to_u64(pathname);
1044 attr.bpf_fd = fd;
1045
1046 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1047 }
1048
1049 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1050 {
1051 struct sockaddr_alg alg = {
1052 .salg_family = AF_ALG,
1053 .salg_type = "hash",
1054 .salg_name = "sha1",
1055 };
1056 int ret, cfd, ofd, ffd;
1057 struct stat stbuff;
1058 ssize_t size;
1059
1060 if (!object || len != 20)
1061 return -EINVAL;
1062
1063 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1064 if (cfd < 0) {
1065 fprintf(stderr, "Cannot get AF_ALG socket: %s\n",
1066 strerror(errno));
1067 return cfd;
1068 }
1069
1070 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1071 if (ret < 0) {
1072 fprintf(stderr, "Error binding socket: %s\n", strerror(errno));
1073 goto out_cfd;
1074 }
1075
1076 ofd = accept(cfd, NULL, 0);
1077 if (ofd < 0) {
1078 fprintf(stderr, "Error accepting socket: %s\n",
1079 strerror(errno));
1080 ret = ofd;
1081 goto out_cfd;
1082 }
1083
1084 ffd = open(object, O_RDONLY);
1085 if (ffd < 0) {
1086 fprintf(stderr, "Error opening object %s: %s\n",
1087 object, strerror(errno));
1088 ret = ffd;
1089 goto out_ofd;
1090 }
1091
1092 ret = fstat(ffd, &stbuff);
1093 if (ret < 0) {
1094 fprintf(stderr, "Error doing fstat: %s\n",
1095 strerror(errno));
1096 goto out_ffd;
1097 }
1098
1099 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1100 if (size != stbuff.st_size) {
1101 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1102 size, stbuff.st_size, strerror(errno));
1103 ret = -1;
1104 goto out_ffd;
1105 }
1106
1107 size = read(ofd, out, len);
1108 if (size != len) {
1109 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1110 size, len, strerror(errno));
1111 ret = -1;
1112 } else {
1113 ret = 0;
1114 }
1115 out_ffd:
1116 close(ffd);
1117 out_ofd:
1118 close(ofd);
1119 out_cfd:
1120 close(cfd);
1121 return ret;
1122 }
1123
1124 static const char *bpf_get_obj_uid(const char *pathname)
1125 {
1126 static bool bpf_uid_cached;
1127 static char bpf_uid[64];
1128 uint8_t tmp[20];
1129 int ret;
1130
1131 if (bpf_uid_cached)
1132 goto done;
1133
1134 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
1135 if (ret) {
1136 fprintf(stderr, "Object hashing failed!\n");
1137 return NULL;
1138 }
1139
1140 hexstring_n2a(tmp, sizeof(tmp), bpf_uid, sizeof(bpf_uid));
1141 bpf_uid_cached = true;
1142 done:
1143 return bpf_uid;
1144 }
1145
1146 static int bpf_init_env(const char *pathname)
1147 {
1148 struct rlimit limit = {
1149 .rlim_cur = RLIM_INFINITY,
1150 .rlim_max = RLIM_INFINITY,
1151 };
1152
1153 /* Don't bother in case we fail! */
1154 setrlimit(RLIMIT_MEMLOCK, &limit);
1155
1156 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC)) {
1157 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1158 return 0;
1159 }
1160
1161 if (!bpf_get_obj_uid(pathname))
1162 return -1;
1163
1164 return 0;
1165 }
1166
1167 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1168 uint32_t pinning)
1169 {
1170 struct bpf_hash_entry *entry;
1171
1172 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1173 while (entry && entry->pinning != pinning)
1174 entry = entry->next;
1175
1176 return entry ? entry->subpath : NULL;
1177 }
1178
1179 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1180 uint32_t pinning)
1181 {
1182 switch (pinning) {
1183 case PIN_OBJECT_NS:
1184 case PIN_GLOBAL_NS:
1185 return false;
1186 case PIN_NONE:
1187 return true;
1188 default:
1189 return !bpf_custom_pinning(ctx, pinning);
1190 }
1191 }
1192
1193 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1194 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1195 {
1196 switch (pinning) {
1197 case PIN_OBJECT_NS:
1198 snprintf(pathname, len, "%s/%s/%s",
1199 bpf_get_work_dir(ctx->type),
1200 bpf_get_obj_uid(NULL), name);
1201 break;
1202 case PIN_GLOBAL_NS:
1203 snprintf(pathname, len, "%s/%s/%s",
1204 bpf_get_work_dir(ctx->type),
1205 BPF_DIR_GLOBALS, name);
1206 break;
1207 default:
1208 snprintf(pathname, len, "%s/../%s/%s",
1209 bpf_get_work_dir(ctx->type),
1210 bpf_custom_pinning(ctx, pinning), name);
1211 break;
1212 }
1213 }
1214
1215 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1216 uint32_t pinning)
1217 {
1218 char pathname[PATH_MAX];
1219
1220 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1221 return 0;
1222
1223 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1224 return bpf_obj_get(pathname, ctx->type);
1225 }
1226
1227 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1228 {
1229 char tmp[PATH_MAX];
1230 int ret;
1231
1232 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1233 bpf_get_obj_uid(NULL));
1234
1235 ret = mkdir(tmp, S_IRWXU);
1236 if (ret && errno != EEXIST) {
1237 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1238 return ret;
1239 }
1240
1241 return 0;
1242 }
1243
1244 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1245 const char *todo)
1246 {
1247 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1248 int ret;
1249
1250 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1251 snprintf(rem, sizeof(rem), "%s/", todo);
1252 sub = strtok(rem, "/");
1253
1254 while (sub) {
1255 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1256 return -EINVAL;
1257
1258 strcat(tmp, sub);
1259 strcat(tmp, "/");
1260
1261 ret = mkdir(tmp, S_IRWXU);
1262 if (ret && errno != EEXIST) {
1263 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1264 strerror(errno));
1265 return ret;
1266 }
1267
1268 sub = strtok(NULL, "/");
1269 }
1270
1271 return 0;
1272 }
1273
1274 static int bpf_place_pinned(int fd, const char *name,
1275 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1276 {
1277 char pathname[PATH_MAX];
1278 const char *tmp;
1279 int ret = 0;
1280
1281 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1282 return 0;
1283
1284 if (pinning == PIN_OBJECT_NS)
1285 ret = bpf_make_obj_path(ctx);
1286 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1287 ret = bpf_make_custom_path(ctx, tmp);
1288 if (ret < 0)
1289 return ret;
1290
1291 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1292 return bpf_obj_pin(fd, pathname);
1293 }
1294
1295 static void bpf_prog_report(int fd, const char *section,
1296 const struct bpf_elf_prog *prog,
1297 struct bpf_elf_ctx *ctx)
1298 {
1299 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1300
1301 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1302 fd < 0 ? "rejected: " : "loaded",
1303 fd < 0 ? strerror(errno) : "",
1304 fd < 0 ? errno : fd);
1305
1306 fprintf(stderr, " - Type: %u\n", prog->type);
1307 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1308 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1309 fprintf(stderr, " - License: %s\n\n", prog->license);
1310
1311 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1312 }
1313
1314 static int bpf_prog_attach(const char *section,
1315 const struct bpf_elf_prog *prog,
1316 struct bpf_elf_ctx *ctx)
1317 {
1318 int tries = 0, fd;
1319 retry:
1320 errno = 0;
1321 fd = bpf_prog_load(prog->type, prog->insns, prog->size,
1322 prog->license, ctx->log, ctx->log_size);
1323 if (fd < 0 || ctx->verbose) {
1324 /* The verifier log is pretty chatty, sometimes so chatty
1325 * on larger programs, that we could fail to dump everything
1326 * into our buffer. Still, try to give a debuggable error
1327 * log for the user, so enlarge it and re-fail.
1328 */
1329 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1330 if (tries++ < 10 && !bpf_log_realloc(ctx))
1331 goto retry;
1332
1333 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1334 ctx->log_size, tries);
1335 return fd;
1336 }
1337
1338 bpf_prog_report(fd, section, prog, ctx);
1339 }
1340
1341 return fd;
1342 }
1343
1344 static void bpf_map_report(int fd, const char *name,
1345 const struct bpf_elf_map *map,
1346 struct bpf_elf_ctx *ctx)
1347 {
1348 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1349 fd < 0 ? "rejected: " : "loaded",
1350 fd < 0 ? strerror(errno) : "",
1351 fd < 0 ? errno : fd);
1352
1353 fprintf(stderr, " - Type: %u\n", map->type);
1354 fprintf(stderr, " - Identifier: %u\n", map->id);
1355 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1356 fprintf(stderr, " - Size key: %u\n", map->size_key);
1357 fprintf(stderr, " - Size value: %u\n", map->size_value);
1358 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1359 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1360 }
1361
1362 static int bpf_map_attach(const char *name, const struct bpf_elf_map *map,
1363 struct bpf_elf_ctx *ctx)
1364 {
1365 int fd, ret;
1366
1367 fd = bpf_probe_pinned(name, ctx, map->pinning);
1368 if (fd > 0) {
1369 ret = bpf_map_selfcheck_pinned(fd, map,
1370 offsetof(struct bpf_elf_map,
1371 id), ctx->type);
1372 if (ret < 0) {
1373 close(fd);
1374 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1375 name);
1376 return ret;
1377 }
1378 if (ctx->verbose)
1379 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1380 name);
1381 return fd;
1382 }
1383
1384 errno = 0;
1385 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1386 map->max_elem, map->flags);
1387 if (fd < 0 || ctx->verbose) {
1388 bpf_map_report(fd, name, map, ctx);
1389 if (fd < 0)
1390 return fd;
1391 }
1392
1393 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1394 if (ret < 0 && errno != EEXIST) {
1395 fprintf(stderr, "Could not pin %s map: %s\n", name,
1396 strerror(errno));
1397 close(fd);
1398 return ret;
1399 }
1400
1401 return fd;
1402 }
1403
1404 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1405 const GElf_Sym *sym)
1406 {
1407 return ctx->str_tab->d_buf + sym->st_name;
1408 }
1409
1410 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1411 {
1412 GElf_Sym sym;
1413 int i;
1414
1415 for (i = 0; i < ctx->sym_num; i++) {
1416 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1417 continue;
1418
1419 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1420 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1421 sym.st_shndx != ctx->sec_maps ||
1422 sym.st_value / ctx->map_len != which)
1423 continue;
1424
1425 return bpf_str_tab_name(ctx, &sym);
1426 }
1427
1428 return NULL;
1429 }
1430
1431 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1432 {
1433 const char *map_name;
1434 int i, fd;
1435
1436 for (i = 0; i < ctx->map_num; i++) {
1437 map_name = bpf_map_fetch_name(ctx, i);
1438 if (!map_name)
1439 return -EIO;
1440
1441 fd = bpf_map_attach(map_name, &ctx->maps[i], ctx);
1442 if (fd < 0)
1443 return fd;
1444
1445 ctx->map_fds[i] = fd;
1446 }
1447
1448 return 0;
1449 }
1450
1451 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1452 {
1453 int i, num = 0;
1454 GElf_Sym sym;
1455
1456 for (i = 0; i < ctx->sym_num; i++) {
1457 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1458 continue;
1459
1460 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1461 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1462 sym.st_shndx != ctx->sec_maps)
1463 continue;
1464 num++;
1465 }
1466
1467 return num;
1468 }
1469
1470 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1471 struct bpf_elf_sec_data *data)
1472 {
1473 Elf_Data *sec_edata;
1474 GElf_Shdr sec_hdr;
1475 Elf_Scn *sec_fd;
1476 char *sec_name;
1477
1478 memset(data, 0, sizeof(*data));
1479
1480 sec_fd = elf_getscn(ctx->elf_fd, section);
1481 if (!sec_fd)
1482 return -EINVAL;
1483 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1484 return -EIO;
1485
1486 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1487 sec_hdr.sh_name);
1488 if (!sec_name || !sec_hdr.sh_size)
1489 return -ENOENT;
1490
1491 sec_edata = elf_getdata(sec_fd, NULL);
1492 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1493 return -EIO;
1494
1495 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1496
1497 data->sec_name = sec_name;
1498 data->sec_data = sec_edata;
1499 return 0;
1500 }
1501
1502 struct bpf_elf_map_min {
1503 __u32 type;
1504 __u32 size_key;
1505 __u32 size_value;
1506 __u32 max_elem;
1507 };
1508
1509 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1510 struct bpf_elf_sec_data *data)
1511 {
1512 ctx->map_num = data->sec_data->d_size;
1513 ctx->sec_maps = section;
1514 ctx->sec_done[section] = true;
1515
1516 if (ctx->map_num > sizeof(ctx->maps)) {
1517 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1518 return -ENOMEM;
1519 }
1520
1521 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1522 return 0;
1523 }
1524
1525 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1526 {
1527 GElf_Sym sym;
1528 int off, i;
1529
1530 for (off = 0; off < end; off += ctx->map_len) {
1531 /* Order doesn't need to be linear here, hence we walk
1532 * the table again.
1533 */
1534 for (i = 0; i < ctx->sym_num; i++) {
1535 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1536 continue;
1537 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1538 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1539 sym.st_shndx != ctx->sec_maps)
1540 continue;
1541 if (sym.st_value == off)
1542 break;
1543 if (i == ctx->sym_num - 1)
1544 return -1;
1545 }
1546 }
1547
1548 return off == end ? 0 : -1;
1549 }
1550
1551 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1552 {
1553 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1554 int i, sym_num = bpf_map_num_sym(ctx);
1555 __u8 *buff;
1556
1557 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1558 fprintf(stderr, "%u maps not supported in current map section!\n",
1559 sym_num);
1560 return -EINVAL;
1561 }
1562
1563 if (ctx->map_num % sym_num != 0 ||
1564 ctx->map_num % sizeof(__u32) != 0) {
1565 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1566 return -EINVAL;
1567 }
1568
1569 ctx->map_len = ctx->map_num / sym_num;
1570 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1571 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1572 return -EINVAL;
1573 }
1574
1575 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1576 ctx->map_num = sym_num;
1577 return 0;
1578 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1579 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1580 return -EINVAL;
1581 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1582 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1583 return -EINVAL;
1584 }
1585
1586 ctx->map_num = sym_num;
1587 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
1588 i++, buff += ctx->map_len) {
1589 /* The fixup leaves the rest of the members as zero, which
1590 * is fine currently, but option exist to set some other
1591 * default value as well when needed in future.
1592 */
1593 memcpy(&fixup[i], buff, ctx->map_len);
1594 }
1595
1596 memcpy(ctx->maps, fixup, sizeof(fixup));
1597
1598 printf("Note: %zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
1599 sizeof(struct bpf_elf_map) - ctx->map_len);
1600 return 0;
1601 }
1602
1603 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
1604 struct bpf_elf_sec_data *data)
1605 {
1606 if (data->sec_data->d_size > sizeof(ctx->license))
1607 return -ENOMEM;
1608
1609 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
1610 ctx->sec_done[section] = true;
1611 return 0;
1612 }
1613
1614 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
1615 struct bpf_elf_sec_data *data)
1616 {
1617 ctx->sym_tab = data->sec_data;
1618 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
1619 ctx->sec_done[section] = true;
1620 return 0;
1621 }
1622
1623 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
1624 struct bpf_elf_sec_data *data)
1625 {
1626 ctx->str_tab = data->sec_data;
1627 ctx->sec_done[section] = true;
1628 return 0;
1629 }
1630
1631 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
1632 {
1633 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
1634 }
1635
1636 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx)
1637 {
1638 struct bpf_elf_sec_data data;
1639 int i, ret = -1;
1640
1641 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1642 ret = bpf_fill_section_data(ctx, i, &data);
1643 if (ret < 0)
1644 continue;
1645
1646 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1647 !strcmp(data.sec_name, ELF_SECTION_MAPS))
1648 ret = bpf_fetch_maps_begin(ctx, i, &data);
1649 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1650 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
1651 ret = bpf_fetch_license(ctx, i, &data);
1652 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
1653 !strcmp(data.sec_name, ".symtab"))
1654 ret = bpf_fetch_symtab(ctx, i, &data);
1655 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
1656 !strcmp(data.sec_name, ".strtab"))
1657 ret = bpf_fetch_strtab(ctx, i, &data);
1658 if (ret < 0) {
1659 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
1660 i);
1661 return ret;
1662 }
1663 }
1664
1665 if (bpf_has_map_data(ctx)) {
1666 ret = bpf_fetch_maps_end(ctx);
1667 if (ret < 0) {
1668 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
1669 return ret;
1670 }
1671
1672 ret = bpf_maps_attach_all(ctx);
1673 if (ret < 0) {
1674 fprintf(stderr, "Error loading maps into kernel!\n");
1675 return ret;
1676 }
1677 }
1678
1679 return ret;
1680 }
1681
1682 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
1683 bool *sseen)
1684 {
1685 struct bpf_elf_sec_data data;
1686 struct bpf_elf_prog prog;
1687 int ret, i, fd = -1;
1688
1689 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1690 if (ctx->sec_done[i])
1691 continue;
1692
1693 ret = bpf_fill_section_data(ctx, i, &data);
1694 if (ret < 0 ||
1695 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
1696 data.sec_hdr.sh_flags & SHF_EXECINSTR &&
1697 !strcmp(data.sec_name, section)))
1698 continue;
1699
1700 *sseen = true;
1701
1702 memset(&prog, 0, sizeof(prog));
1703 prog.type = ctx->type;
1704 prog.insns = data.sec_data->d_buf;
1705 prog.size = data.sec_data->d_size;
1706 prog.license = ctx->license;
1707
1708 fd = bpf_prog_attach(section, &prog, ctx);
1709 if (fd < 0)
1710 return fd;
1711
1712 ctx->sec_done[i] = true;
1713 break;
1714 }
1715
1716 return fd;
1717 }
1718
1719 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
1720 struct bpf_elf_sec_data *data_relo,
1721 struct bpf_elf_sec_data *data_insn)
1722 {
1723 Elf_Data *idata = data_insn->sec_data;
1724 GElf_Shdr *rhdr = &data_relo->sec_hdr;
1725 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
1726 struct bpf_insn *insns = idata->d_buf;
1727 unsigned int num_insns = idata->d_size / sizeof(*insns);
1728
1729 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
1730 unsigned int ioff, rmap;
1731 GElf_Rel relo;
1732 GElf_Sym sym;
1733
1734 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
1735 return -EIO;
1736
1737 ioff = relo.r_offset / sizeof(struct bpf_insn);
1738 if (ioff >= num_insns ||
1739 insns[ioff].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1740 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
1741 ioff);
1742 if (ioff < num_insns &&
1743 insns[ioff].code == (BPF_JMP | BPF_CALL))
1744 fprintf(stderr, " - Try to annotate functions with always_inline attribute!\n");
1745 return -EINVAL;
1746 }
1747
1748 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
1749 return -EIO;
1750 if (sym.st_shndx != ctx->sec_maps) {
1751 fprintf(stderr, "ELF contains non-map related relo data in entry %u pointing to section %u! Compiler bug?!\n",
1752 relo_ent, sym.st_shndx);
1753 return -EIO;
1754 }
1755
1756 rmap = sym.st_value / ctx->map_len;
1757 if (rmap >= ARRAY_SIZE(ctx->map_fds))
1758 return -EINVAL;
1759 if (!ctx->map_fds[rmap])
1760 return -EINVAL;
1761
1762 if (ctx->verbose)
1763 fprintf(stderr, "Map \'%s\' (%d) injected into prog section \'%s\' at offset %u!\n",
1764 bpf_str_tab_name(ctx, &sym), ctx->map_fds[rmap],
1765 data_insn->sec_name, ioff);
1766
1767 insns[ioff].src_reg = BPF_PSEUDO_MAP_FD;
1768 insns[ioff].imm = ctx->map_fds[rmap];
1769 }
1770
1771 return 0;
1772 }
1773
1774 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
1775 bool *lderr, bool *sseen)
1776 {
1777 struct bpf_elf_sec_data data_relo, data_insn;
1778 struct bpf_elf_prog prog;
1779 int ret, idx, i, fd = -1;
1780
1781 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1782 ret = bpf_fill_section_data(ctx, i, &data_relo);
1783 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
1784 continue;
1785
1786 idx = data_relo.sec_hdr.sh_info;
1787
1788 ret = bpf_fill_section_data(ctx, idx, &data_insn);
1789 if (ret < 0 ||
1790 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
1791 data_insn.sec_hdr.sh_flags & SHF_EXECINSTR &&
1792 !strcmp(data_insn.sec_name, section)))
1793 continue;
1794
1795 *sseen = true;
1796
1797 ret = bpf_apply_relo_data(ctx, &data_relo, &data_insn);
1798 if (ret < 0)
1799 return ret;
1800
1801 memset(&prog, 0, sizeof(prog));
1802 prog.type = ctx->type;
1803 prog.insns = data_insn.sec_data->d_buf;
1804 prog.size = data_insn.sec_data->d_size;
1805 prog.license = ctx->license;
1806
1807 fd = bpf_prog_attach(section, &prog, ctx);
1808 if (fd < 0) {
1809 *lderr = true;
1810 return fd;
1811 }
1812
1813 ctx->sec_done[i] = true;
1814 ctx->sec_done[idx] = true;
1815 break;
1816 }
1817
1818 return fd;
1819 }
1820
1821 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
1822 {
1823 bool lderr = false, sseen = false;
1824 int ret = -1;
1825
1826 if (bpf_has_map_data(ctx))
1827 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen);
1828 if (ret < 0 && !lderr)
1829 ret = bpf_fetch_prog(ctx, section, &sseen);
1830 if (ret < 0 && !sseen)
1831 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
1832 section);
1833 return ret;
1834 }
1835
1836 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
1837 {
1838 int i;
1839
1840 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
1841 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
1842 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
1843 return i;
1844 return -1;
1845 }
1846
1847 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
1848 {
1849 struct bpf_elf_sec_data data;
1850 uint32_t map_id, key_id;
1851 int fd, i, ret, idx;
1852
1853 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1854 if (ctx->sec_done[i])
1855 continue;
1856
1857 ret = bpf_fill_section_data(ctx, i, &data);
1858 if (ret < 0)
1859 continue;
1860
1861 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
1862 if (ret != 2)
1863 continue;
1864
1865 idx = bpf_find_map_by_id(ctx, map_id);
1866 if (idx < 0)
1867 continue;
1868
1869 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
1870 if (fd < 0)
1871 return -EIO;
1872
1873 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
1874 &fd, BPF_ANY);
1875 if (ret < 0) {
1876 if (errno == E2BIG)
1877 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
1878 key_id, map_id);
1879 return -errno;
1880 }
1881
1882 ctx->sec_done[i] = true;
1883 }
1884
1885 return 0;
1886 }
1887
1888 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
1889 {
1890 struct stat st;
1891 int ret;
1892
1893 memset(&ctx->stat, 0, sizeof(ctx->stat));
1894
1895 ret = fstat(ctx->obj_fd, &st);
1896 if (ret < 0) {
1897 fprintf(stderr, "Stat of elf file failed: %s\n",
1898 strerror(errno));
1899 return;
1900 }
1901
1902 ctx->stat.st_dev = st.st_dev;
1903 ctx->stat.st_ino = st.st_ino;
1904 }
1905
1906 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
1907 {
1908 char buff[PATH_MAX];
1909
1910 while (fgets(buff, sizeof(buff), fp)) {
1911 char *ptr = buff;
1912
1913 while (*ptr == ' ' || *ptr == '\t')
1914 ptr++;
1915
1916 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
1917 continue;
1918
1919 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
1920 sscanf(ptr, "%i %s #", id, path) != 2) {
1921 strcpy(path, ptr);
1922 return -1;
1923 }
1924
1925 return 1;
1926 }
1927
1928 return 0;
1929 }
1930
1931 static bool bpf_pinning_reserved(uint32_t pinning)
1932 {
1933 switch (pinning) {
1934 case PIN_NONE:
1935 case PIN_OBJECT_NS:
1936 case PIN_GLOBAL_NS:
1937 return true;
1938 default:
1939 return false;
1940 }
1941 }
1942
1943 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
1944 {
1945 struct bpf_hash_entry *entry;
1946 char subpath[PATH_MAX] = {};
1947 uint32_t pinning;
1948 FILE *fp;
1949 int ret;
1950
1951 fp = fopen(db_file, "r");
1952 if (!fp)
1953 return;
1954
1955 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
1956 if (ret == -1) {
1957 fprintf(stderr, "Database %s is corrupted at: %s\n",
1958 db_file, subpath);
1959 fclose(fp);
1960 return;
1961 }
1962
1963 if (bpf_pinning_reserved(pinning)) {
1964 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
1965 db_file, pinning);
1966 continue;
1967 }
1968
1969 entry = malloc(sizeof(*entry));
1970 if (!entry) {
1971 fprintf(stderr, "No memory left for db entry!\n");
1972 continue;
1973 }
1974
1975 entry->pinning = pinning;
1976 entry->subpath = strdup(subpath);
1977 if (!entry->subpath) {
1978 fprintf(stderr, "No memory left for db entry!\n");
1979 free(entry);
1980 continue;
1981 }
1982
1983 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1984 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
1985 }
1986
1987 fclose(fp);
1988 }
1989
1990 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
1991 {
1992 struct bpf_hash_entry *entry;
1993 int i;
1994
1995 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
1996 while ((entry = ctx->ht[i]) != NULL) {
1997 ctx->ht[i] = entry->next;
1998 free((char *)entry->subpath);
1999 free(entry);
2000 }
2001 }
2002 }
2003
2004 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2005 {
2006 if (ctx->elf_hdr.e_type != ET_REL ||
2007 (ctx->elf_hdr.e_machine != EM_NONE &&
2008 ctx->elf_hdr.e_machine != EM_BPF) ||
2009 ctx->elf_hdr.e_version != EV_CURRENT) {
2010 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2011 return -EINVAL;
2012 }
2013
2014 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2015 default:
2016 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2017 return -EINVAL;
2018 case ELFDATA2LSB:
2019 if (htons(1) == 1) {
2020 fprintf(stderr,
2021 "We are big endian, eBPF object is little endian!\n");
2022 return -EIO;
2023 }
2024 break;
2025 case ELFDATA2MSB:
2026 if (htons(1) != 1) {
2027 fprintf(stderr,
2028 "We are little endian, eBPF object is big endian!\n");
2029 return -EIO;
2030 }
2031 break;
2032 }
2033
2034 return 0;
2035 }
2036
2037 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2038 enum bpf_prog_type type, bool verbose)
2039 {
2040 int ret = -EINVAL;
2041
2042 if (elf_version(EV_CURRENT) == EV_NONE ||
2043 bpf_init_env(pathname))
2044 return ret;
2045
2046 memset(ctx, 0, sizeof(*ctx));
2047 ctx->verbose = verbose;
2048 ctx->type = type;
2049
2050 ctx->obj_fd = open(pathname, O_RDONLY);
2051 if (ctx->obj_fd < 0)
2052 return ctx->obj_fd;
2053
2054 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2055 if (!ctx->elf_fd) {
2056 ret = -EINVAL;
2057 goto out_fd;
2058 }
2059
2060 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2061 ret = -EINVAL;
2062 goto out_fd;
2063 }
2064
2065 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2066 &ctx->elf_hdr) {
2067 ret = -EIO;
2068 goto out_elf;
2069 }
2070
2071 ret = bpf_elf_check_ehdr(ctx);
2072 if (ret < 0)
2073 goto out_elf;
2074
2075 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2076 sizeof(*(ctx->sec_done)));
2077 if (!ctx->sec_done) {
2078 ret = -ENOMEM;
2079 goto out_elf;
2080 }
2081
2082 if (ctx->verbose && bpf_log_realloc(ctx)) {
2083 ret = -ENOMEM;
2084 goto out_free;
2085 }
2086
2087 bpf_save_finfo(ctx);
2088 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2089
2090 return 0;
2091 out_free:
2092 free(ctx->sec_done);
2093 out_elf:
2094 elf_end(ctx->elf_fd);
2095 out_fd:
2096 close(ctx->obj_fd);
2097 return ret;
2098 }
2099
2100 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2101 {
2102 int i, count = 0;
2103
2104 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2105 if (!ctx->map_fds[i])
2106 break;
2107 count++;
2108 }
2109
2110 return count;
2111 }
2112
2113 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2114 {
2115 int i;
2116
2117 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2118 if (ctx->map_fds[i])
2119 close(ctx->map_fds[i]);
2120 }
2121 }
2122
2123 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2124 {
2125 if (failure)
2126 bpf_maps_teardown(ctx);
2127
2128 bpf_hash_destroy(ctx);
2129
2130 free(ctx->sec_done);
2131 free(ctx->log);
2132
2133 elf_end(ctx->elf_fd);
2134 close(ctx->obj_fd);
2135 }
2136
2137 static struct bpf_elf_ctx __ctx;
2138
2139 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2140 const char *section, bool verbose)
2141 {
2142 struct bpf_elf_ctx *ctx = &__ctx;
2143 int fd = 0, ret;
2144
2145 ret = bpf_elf_ctx_init(ctx, pathname, type, verbose);
2146 if (ret < 0) {
2147 fprintf(stderr, "Cannot initialize ELF context!\n");
2148 return ret;
2149 }
2150
2151 ret = bpf_fetch_ancillary(ctx);
2152 if (ret < 0) {
2153 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2154 goto out;
2155 }
2156
2157 fd = bpf_fetch_prog_sec(ctx, section);
2158 if (fd < 0) {
2159 fprintf(stderr, "Error fetching program/map!\n");
2160 ret = fd;
2161 goto out;
2162 }
2163
2164 ret = bpf_fill_prog_arrays(ctx);
2165 if (ret < 0)
2166 fprintf(stderr, "Error filling program arrays!\n");
2167 out:
2168 bpf_elf_ctx_destroy(ctx, ret < 0);
2169 if (ret < 0) {
2170 if (fd)
2171 close(fd);
2172 return ret;
2173 }
2174
2175 return fd;
2176 }
2177
2178 static int
2179 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
2180 const struct bpf_map_data *aux, unsigned int entries)
2181 {
2182 struct bpf_map_set_msg msg = {
2183 .aux.uds_ver = BPF_SCM_AUX_VER,
2184 .aux.num_ent = entries,
2185 };
2186 int *cmsg_buf, min_fd;
2187 char *amsg_buf;
2188 int i;
2189
2190 strncpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
2191 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
2192
2193 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
2194 amsg_buf = (char *)msg.aux.ent;
2195
2196 for (i = 0; i < entries; i += min_fd) {
2197 int ret;
2198
2199 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
2200 bpf_map_set_init_single(&msg, min_fd);
2201
2202 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
2203 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
2204
2205 ret = sendmsg(fd, &msg.hdr, 0);
2206 if (ret <= 0)
2207 return ret ? : -1;
2208 }
2209
2210 return 0;
2211 }
2212
2213 static int
2214 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
2215 unsigned int entries)
2216 {
2217 struct bpf_map_set_msg msg;
2218 int *cmsg_buf, min_fd;
2219 char *amsg_buf, *mmsg_buf;
2220 unsigned int needed = 1;
2221 int i;
2222
2223 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
2224 amsg_buf = (char *)msg.aux.ent;
2225 mmsg_buf = (char *)&msg.aux;
2226
2227 for (i = 0; i < min(entries, needed); i += min_fd) {
2228 struct cmsghdr *cmsg;
2229 int ret;
2230
2231 min_fd = min(entries, entries - i);
2232 bpf_map_set_init_single(&msg, min_fd);
2233
2234 ret = recvmsg(fd, &msg.hdr, 0);
2235 if (ret <= 0)
2236 return ret ? : -1;
2237
2238 cmsg = CMSG_FIRSTHDR(&msg.hdr);
2239 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
2240 return -EINVAL;
2241 if (msg.hdr.msg_flags & MSG_CTRUNC)
2242 return -EIO;
2243 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
2244 return -ENOSYS;
2245
2246 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
2247 if (min_fd > entries || min_fd <= 0)
2248 return -EINVAL;
2249
2250 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
2251 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
2252 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
2253
2254 needed = aux->num_ent;
2255 }
2256
2257 return 0;
2258 }
2259
2260 int bpf_send_map_fds(const char *path, const char *obj)
2261 {
2262 struct bpf_elf_ctx *ctx = &__ctx;
2263 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2264 struct bpf_map_data bpf_aux = {
2265 .fds = ctx->map_fds,
2266 .ent = ctx->maps,
2267 .st = &ctx->stat,
2268 .obj = obj,
2269 };
2270 int fd, ret;
2271
2272 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2273 if (fd < 0) {
2274 fprintf(stderr, "Cannot open socket: %s\n",
2275 strerror(errno));
2276 return -1;
2277 }
2278
2279 strncpy(addr.sun_path, path, sizeof(addr.sun_path));
2280
2281 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
2282 if (ret < 0) {
2283 fprintf(stderr, "Cannot connect to %s: %s\n",
2284 path, strerror(errno));
2285 return -1;
2286 }
2287
2288 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
2289 bpf_maps_count(ctx));
2290 if (ret < 0)
2291 fprintf(stderr, "Cannot send fds to %s: %s\n",
2292 path, strerror(errno));
2293
2294 bpf_maps_teardown(ctx);
2295 close(fd);
2296 return ret;
2297 }
2298
2299 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
2300 unsigned int entries)
2301 {
2302 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2303 int fd, ret;
2304
2305 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2306 if (fd < 0) {
2307 fprintf(stderr, "Cannot open socket: %s\n",
2308 strerror(errno));
2309 return -1;
2310 }
2311
2312 strncpy(addr.sun_path, path, sizeof(addr.sun_path));
2313
2314 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
2315 if (ret < 0) {
2316 fprintf(stderr, "Cannot bind to socket: %s\n",
2317 strerror(errno));
2318 return -1;
2319 }
2320
2321 ret = bpf_map_set_recv(fd, fds, aux, entries);
2322 if (ret < 0)
2323 fprintf(stderr, "Cannot recv fds from %s: %s\n",
2324 path, strerror(errno));
2325
2326 unlink(addr.sun_path);
2327 close(fd);
2328 return ret;
2329 }
2330 #endif /* HAVE_ELF */