]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf_legacy.c
Merge git://git.kernel.org/pub/scm/network/iproute2/iproute2-next
[mirror_iproute2.git] / lib / bpf_legacy.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43 #include "json_print.h"
44
45 #include "bpf_util.h"
46 #include "bpf_elf.h"
47 #include "bpf_scm.h"
48
49 struct bpf_prog_meta {
50 const char *type;
51 const char *subdir;
52 const char *section;
53 bool may_uds_export;
54 };
55
56 static const enum bpf_prog_type __bpf_types[] = {
57 BPF_PROG_TYPE_SCHED_CLS,
58 BPF_PROG_TYPE_SCHED_ACT,
59 BPF_PROG_TYPE_XDP,
60 BPF_PROG_TYPE_LWT_IN,
61 BPF_PROG_TYPE_LWT_OUT,
62 BPF_PROG_TYPE_LWT_XMIT,
63 };
64
65 static const struct bpf_prog_meta __bpf_prog_meta[] = {
66 [BPF_PROG_TYPE_SCHED_CLS] = {
67 .type = "cls",
68 .subdir = "tc",
69 .section = ELF_SECTION_CLASSIFIER,
70 .may_uds_export = true,
71 },
72 [BPF_PROG_TYPE_SCHED_ACT] = {
73 .type = "act",
74 .subdir = "tc",
75 .section = ELF_SECTION_ACTION,
76 .may_uds_export = true,
77 },
78 [BPF_PROG_TYPE_XDP] = {
79 .type = "xdp",
80 .subdir = "xdp",
81 .section = ELF_SECTION_PROG,
82 },
83 [BPF_PROG_TYPE_LWT_IN] = {
84 .type = "lwt_in",
85 .subdir = "ip",
86 .section = ELF_SECTION_PROG,
87 },
88 [BPF_PROG_TYPE_LWT_OUT] = {
89 .type = "lwt_out",
90 .subdir = "ip",
91 .section = ELF_SECTION_PROG,
92 },
93 [BPF_PROG_TYPE_LWT_XMIT] = {
94 .type = "lwt_xmit",
95 .subdir = "ip",
96 .section = ELF_SECTION_PROG,
97 },
98 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = {
99 .type = "lwt_seg6local",
100 .subdir = "ip",
101 .section = ELF_SECTION_PROG,
102 },
103 };
104
105 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
106 {
107 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
108 __bpf_prog_meta[type].subdir);
109 return __bpf_prog_meta[type].subdir;
110 }
111
112 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
113 {
114 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
115 __bpf_prog_meta[type].section);
116 return __bpf_prog_meta[type].section;
117 }
118
119 #ifdef HAVE_ELF
120 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
121 const char *sec, __u32 ifindex, bool verbose);
122 #else
123 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
124 const char *sec, __u32 ifindex, bool verbose)
125 {
126 fprintf(stderr, "No ELF library support compiled in.\n");
127 errno = ENOSYS;
128 return -1;
129 }
130 #endif
131
132 static inline __u64 bpf_ptr_to_u64(const void *ptr)
133 {
134 return (__u64)(unsigned long)ptr;
135 }
136
137 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
138 {
139 #ifdef __NR_bpf
140 return syscall(__NR_bpf, cmd, attr, size);
141 #else
142 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
143 errno = ENOSYS;
144 return -1;
145 #endif
146 }
147
148 static int bpf_map_update(int fd, const void *key, const void *value,
149 uint64_t flags)
150 {
151 union bpf_attr attr = {};
152
153 attr.map_fd = fd;
154 attr.key = bpf_ptr_to_u64(key);
155 attr.value = bpf_ptr_to_u64(value);
156 attr.flags = flags;
157
158 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
159 }
160
161 static int bpf_prog_fd_by_id(uint32_t id)
162 {
163 union bpf_attr attr = {};
164
165 attr.prog_id = id;
166
167 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
168 }
169
170 static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
171 uint32_t *info_len)
172 {
173 union bpf_attr attr = {};
174 int ret;
175
176 attr.info.bpf_fd = fd;
177 attr.info.info = bpf_ptr_to_u64(info);
178 attr.info.info_len = *info_len;
179
180 *info_len = 0;
181 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
182 if (!ret)
183 *info_len = attr.info.info_len;
184
185 return ret;
186 }
187
188 int bpf_dump_prog_info(FILE *f, uint32_t id)
189 {
190 struct bpf_prog_info info = {};
191 uint32_t len = sizeof(info);
192 int fd, ret, dump_ok = 0;
193 SPRINT_BUF(tmp);
194
195 open_json_object("prog");
196 print_uint(PRINT_ANY, "id", "id %u ", id);
197
198 fd = bpf_prog_fd_by_id(id);
199 if (fd < 0)
200 goto out;
201
202 ret = bpf_prog_info_by_fd(fd, &info, &len);
203 if (!ret && len) {
204 int jited = !!info.jited_prog_len;
205
206 print_string(PRINT_ANY, "tag", "tag %s ",
207 hexstring_n2a(info.tag, sizeof(info.tag),
208 tmp, sizeof(tmp)));
209 print_uint(PRINT_JSON, "jited", NULL, jited);
210 if (jited && !is_json_context())
211 fprintf(f, "jited ");
212 dump_ok = 1;
213 }
214
215 close(fd);
216 out:
217 close_json_object();
218 return dump_ok;
219 }
220
221 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
222 char **bpf_string, bool *need_release,
223 const char separator)
224 {
225 char sp;
226
227 if (from_file) {
228 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
229 char *tmp_string, *pos, c_prev = ' ';
230 FILE *fp;
231 int c;
232
233 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
234 tmp_string = pos = calloc(1, tmp_len);
235 if (tmp_string == NULL)
236 return -ENOMEM;
237
238 fp = fopen(arg, "r");
239 if (fp == NULL) {
240 perror("Cannot fopen");
241 free(tmp_string);
242 return -ENOENT;
243 }
244
245 while ((c = fgetc(fp)) != EOF) {
246 switch (c) {
247 case '\n':
248 if (c_prev != ',')
249 *(pos++) = ',';
250 c_prev = ',';
251 break;
252 case ' ':
253 case '\t':
254 if (c_prev != ' ')
255 *(pos++) = c;
256 c_prev = ' ';
257 break;
258 default:
259 *(pos++) = c;
260 c_prev = c;
261 }
262 if (pos - tmp_string == tmp_len)
263 break;
264 }
265
266 if (!feof(fp)) {
267 free(tmp_string);
268 fclose(fp);
269 return -E2BIG;
270 }
271
272 fclose(fp);
273 *pos = 0;
274
275 *need_release = true;
276 *bpf_string = tmp_string;
277 } else {
278 *need_release = false;
279 *bpf_string = arg;
280 }
281
282 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
283 sp != separator) {
284 if (*need_release)
285 free(*bpf_string);
286 return -EINVAL;
287 }
288
289 return 0;
290 }
291
292 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
293 bool from_file)
294 {
295 char *bpf_string, *token, separator = ',';
296 int ret = 0, i = 0;
297 bool need_release;
298 __u16 bpf_len = 0;
299
300 if (argc < 1)
301 return -EINVAL;
302 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
303 &need_release, separator))
304 return -EINVAL;
305 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
306 ret = -EINVAL;
307 goto out;
308 }
309
310 token = bpf_string;
311 while ((token = strchr(token, separator)) && (++token)[0]) {
312 if (i >= bpf_len) {
313 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
314 ret = -EINVAL;
315 goto out;
316 }
317
318 if (sscanf(token, "%hu %hhu %hhu %u,",
319 &bpf_ops[i].code, &bpf_ops[i].jt,
320 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
321 fprintf(stderr, "Error at instruction %d!\n", i);
322 ret = -EINVAL;
323 goto out;
324 }
325
326 i++;
327 }
328
329 if (i != bpf_len) {
330 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
331 ret = -EINVAL;
332 goto out;
333 }
334 ret = bpf_len;
335 out:
336 if (need_release)
337 free(bpf_string);
338
339 return ret;
340 }
341
342 void bpf_print_ops(struct rtattr *bpf_ops, __u16 len)
343 {
344 struct sock_filter *ops = RTA_DATA(bpf_ops);
345 int i;
346
347 if (len == 0)
348 return;
349
350 open_json_object("bytecode");
351 print_uint(PRINT_ANY, "length", "bytecode \'%u,", len);
352 open_json_array(PRINT_JSON, "insns");
353
354 for (i = 0; i < len; i++) {
355 open_json_object(NULL);
356 print_hu(PRINT_ANY, "code", "%hu ", ops[i].code);
357 print_hhu(PRINT_ANY, "jt", "%hhu ", ops[i].jt);
358 print_hhu(PRINT_ANY, "jf", "%hhu ", ops[i].jf);
359 if (i == len - 1)
360 print_uint(PRINT_ANY, "k", "%u\'", ops[i].k);
361 else
362 print_uint(PRINT_ANY, "k", "%u,", ops[i].k);
363 close_json_object();
364 }
365
366 close_json_array(PRINT_JSON, NULL);
367 close_json_object();
368 }
369
370 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
371 const struct bpf_elf_map *obj)
372 {
373 fprintf(stderr, "Map specification differs from pinned file!\n");
374
375 if (obj->type != pin->type)
376 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
377 obj->type, pin->type);
378 if (obj->size_key != pin->size_key)
379 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
380 obj->size_key, pin->size_key);
381 if (obj->size_value != pin->size_value)
382 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
383 obj->size_value, pin->size_value);
384 if (obj->max_elem != pin->max_elem)
385 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
386 obj->max_elem, pin->max_elem);
387 if (obj->flags != pin->flags)
388 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
389 obj->flags, pin->flags);
390
391 fprintf(stderr, "\n");
392 }
393
394 struct bpf_prog_data {
395 unsigned int type;
396 unsigned int jited;
397 };
398
399 struct bpf_map_ext {
400 struct bpf_prog_data owner;
401 unsigned int btf_id_key;
402 unsigned int btf_id_val;
403 };
404
405 static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map,
406 struct bpf_map_ext *ext)
407 {
408 unsigned int val, owner_type = 0, owner_jited = 0;
409 char file[PATH_MAX], buff[4096];
410 FILE *fp;
411
412 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
413 memset(map, 0, sizeof(*map));
414
415 fp = fopen(file, "r");
416 if (!fp) {
417 fprintf(stderr, "No procfs support?!\n");
418 return -EIO;
419 }
420
421 while (fgets(buff, sizeof(buff), fp)) {
422 if (sscanf(buff, "map_type:\t%u", &val) == 1)
423 map->type = val;
424 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
425 map->size_key = val;
426 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
427 map->size_value = val;
428 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
429 map->max_elem = val;
430 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
431 map->flags = val;
432 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
433 owner_type = val;
434 else if (sscanf(buff, "owner_jited:\t%i", &val) == 1)
435 owner_jited = val;
436 }
437
438 fclose(fp);
439 if (ext) {
440 memset(ext, 0, sizeof(*ext));
441 ext->owner.type = owner_type;
442 ext->owner.jited = owner_jited;
443 }
444
445 return 0;
446 }
447
448 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
449 struct bpf_map_ext *ext, int length,
450 enum bpf_prog_type type)
451 {
452 struct bpf_elf_map tmp, zero = {};
453 int ret;
454
455 ret = bpf_derive_elf_map_from_fdinfo(fd, &tmp, ext);
456 if (ret < 0)
457 return ret;
458
459 /* The decision to reject this is on kernel side eventually, but
460 * at least give the user a chance to know what's wrong.
461 */
462 if (ext->owner.type && ext->owner.type != type)
463 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
464 type, ext->owner.type);
465
466 if (!memcmp(&tmp, map, length)) {
467 return 0;
468 } else {
469 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
470 * so just accept it. We know we do have an eBPF fd and in this
471 * case, everything is 0. It is guaranteed that no such map exists
472 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
473 */
474 if (!memcmp(&tmp, &zero, length))
475 return 0;
476
477 bpf_map_pin_report(&tmp, map);
478 return -EINVAL;
479 }
480 }
481
482 static int bpf_mnt_fs(const char *target)
483 {
484 bool bind_done = false;
485
486 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
487 if (errno != EINVAL || bind_done) {
488 fprintf(stderr, "mount --make-private %s failed: %s\n",
489 target, strerror(errno));
490 return -1;
491 }
492
493 if (mount(target, target, "none", MS_BIND, NULL)) {
494 fprintf(stderr, "mount --bind %s %s failed: %s\n",
495 target, target, strerror(errno));
496 return -1;
497 }
498
499 bind_done = true;
500 }
501
502 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
503 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
504 target, strerror(errno));
505 return -1;
506 }
507
508 return 0;
509 }
510
511 static int bpf_mnt_check_target(const char *target)
512 {
513 int ret;
514
515 ret = mkdir(target, S_IRWXU);
516 if (ret && errno != EEXIST)
517 fprintf(stderr, "mkdir %s failed: %s\n", target,
518 strerror(errno));
519
520 return ret;
521 }
522
523 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
524 {
525 struct statfs st_fs;
526
527 if (statfs(mnt, &st_fs) < 0)
528 return -ENOENT;
529 if ((unsigned long)st_fs.f_type != magic)
530 return -ENOENT;
531
532 return 0;
533 }
534
535 static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt,
536 int len, const char *mntpt)
537 {
538 int ret;
539
540 ret = bpf_valid_mntpt(mntpt, magic);
541 if (!ret) {
542 strlcpy(mnt, mntpt, len);
543 return mnt;
544 }
545
546 return NULL;
547 }
548
549 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
550 char *mnt, int len,
551 const char * const *known_mnts)
552 {
553 const char * const *ptr;
554 char type[100];
555 FILE *fp;
556
557 if (known_mnts) {
558 ptr = known_mnts;
559 while (*ptr) {
560 if (bpf_find_mntpt_single(magic, mnt, len, *ptr))
561 return mnt;
562 ptr++;
563 }
564 }
565
566 if (len != PATH_MAX)
567 return NULL;
568
569 fp = fopen("/proc/mounts", "r");
570 if (fp == NULL)
571 return NULL;
572
573 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
574 mnt, type) == 2) {
575 if (strcmp(type, fstype) == 0)
576 break;
577 }
578
579 fclose(fp);
580 if (strcmp(type, fstype) != 0)
581 return NULL;
582
583 return mnt;
584 }
585
586 int bpf_trace_pipe(void)
587 {
588 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
589 static const char * const tracefs_known_mnts[] = {
590 TRACE_DIR_MNT,
591 "/sys/kernel/debug/tracing",
592 "/tracing",
593 "/trace",
594 0,
595 };
596 int fd_in, fd_out = STDERR_FILENO;
597 char tpipe[PATH_MAX];
598 const char *mnt;
599
600 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
601 sizeof(tracefs_mnt), tracefs_known_mnts);
602 if (!mnt) {
603 fprintf(stderr, "tracefs not mounted?\n");
604 return -1;
605 }
606
607 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
608
609 fd_in = open(tpipe, O_RDONLY);
610 if (fd_in < 0)
611 return -1;
612
613 fprintf(stderr, "Running! Hang up with ^C!\n\n");
614 while (1) {
615 static char buff[4096];
616 ssize_t ret;
617
618 ret = read(fd_in, buff, sizeof(buff));
619 if (ret > 0 && write(fd_out, buff, ret) == ret)
620 continue;
621 break;
622 }
623
624 close(fd_in);
625 return -1;
626 }
627
628 static int bpf_gen_global(const char *bpf_sub_dir)
629 {
630 char bpf_glo_dir[PATH_MAX];
631 int ret;
632
633 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
634 bpf_sub_dir, BPF_DIR_GLOBALS);
635
636 ret = mkdir(bpf_glo_dir, S_IRWXU);
637 if (ret && errno != EEXIST) {
638 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
639 strerror(errno));
640 return ret;
641 }
642
643 return 0;
644 }
645
646 static int bpf_gen_master(const char *base, const char *name)
647 {
648 char bpf_sub_dir[PATH_MAX + NAME_MAX + 1];
649 int ret;
650
651 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
652
653 ret = mkdir(bpf_sub_dir, S_IRWXU);
654 if (ret && errno != EEXIST) {
655 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
656 strerror(errno));
657 return ret;
658 }
659
660 return bpf_gen_global(bpf_sub_dir);
661 }
662
663 static int bpf_slave_via_bind_mnt(const char *full_name,
664 const char *full_link)
665 {
666 int ret;
667
668 ret = mkdir(full_name, S_IRWXU);
669 if (ret) {
670 assert(errno != EEXIST);
671 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
672 strerror(errno));
673 return ret;
674 }
675
676 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
677 if (ret) {
678 rmdir(full_name);
679 fprintf(stderr, "mount --bind %s %s failed: %s\n",
680 full_link, full_name, strerror(errno));
681 }
682
683 return ret;
684 }
685
686 static int bpf_gen_slave(const char *base, const char *name,
687 const char *link)
688 {
689 char bpf_lnk_dir[PATH_MAX + NAME_MAX + 1];
690 char bpf_sub_dir[PATH_MAX + NAME_MAX];
691 struct stat sb = {};
692 int ret;
693
694 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
695 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
696
697 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
698 if (ret) {
699 if (errno != EEXIST) {
700 if (errno != EPERM) {
701 fprintf(stderr, "symlink %s failed: %s\n",
702 bpf_sub_dir, strerror(errno));
703 return ret;
704 }
705
706 return bpf_slave_via_bind_mnt(bpf_sub_dir,
707 bpf_lnk_dir);
708 }
709
710 ret = lstat(bpf_sub_dir, &sb);
711 if (ret) {
712 fprintf(stderr, "lstat %s failed: %s\n",
713 bpf_sub_dir, strerror(errno));
714 return ret;
715 }
716
717 if ((sb.st_mode & S_IFMT) != S_IFLNK)
718 return bpf_gen_global(bpf_sub_dir);
719 }
720
721 return 0;
722 }
723
724 static int bpf_gen_hierarchy(const char *base)
725 {
726 int ret, i;
727
728 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
729 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
730 ret = bpf_gen_slave(base,
731 bpf_prog_to_subdir(__bpf_types[i]),
732 bpf_prog_to_subdir(__bpf_types[0]));
733 return ret;
734 }
735
736 static const char *bpf_get_work_dir(enum bpf_prog_type type)
737 {
738 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
739 static char bpf_wrk_dir[PATH_MAX];
740 static const char *mnt;
741 static bool bpf_mnt_cached;
742 const char *mnt_env = getenv(BPF_ENV_MNT);
743 static const char * const bpf_known_mnts[] = {
744 BPF_DIR_MNT,
745 "/bpf",
746 0,
747 };
748 int ret;
749
750 if (bpf_mnt_cached) {
751 const char *out = mnt;
752
753 if (out && type) {
754 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
755 out, bpf_prog_to_subdir(type));
756 out = bpf_tmp;
757 }
758 return out;
759 }
760
761 if (mnt_env)
762 mnt = bpf_find_mntpt_single(BPF_FS_MAGIC, bpf_tmp,
763 sizeof(bpf_tmp), mnt_env);
764 else
765 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp,
766 sizeof(bpf_tmp), bpf_known_mnts);
767 if (!mnt) {
768 mnt = mnt_env ? : BPF_DIR_MNT;
769 ret = bpf_mnt_check_target(mnt);
770 if (!ret)
771 ret = bpf_mnt_fs(mnt);
772 if (ret) {
773 mnt = NULL;
774 goto out;
775 }
776 }
777
778 ret = snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
779 if (ret < 0 || ret >= sizeof(bpf_wrk_dir)) {
780 mnt = NULL;
781 goto out;
782 }
783
784 ret = bpf_gen_hierarchy(bpf_wrk_dir);
785 if (ret) {
786 mnt = NULL;
787 goto out;
788 }
789
790 mnt = bpf_wrk_dir;
791 out:
792 bpf_mnt_cached = true;
793 return mnt;
794 }
795
796 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
797 {
798 union bpf_attr attr = {};
799 char tmp[PATH_MAX];
800
801 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
802 pathname[1] == ':' && bpf_get_work_dir(type)) {
803 snprintf(tmp, sizeof(tmp), "%s/%s",
804 bpf_get_work_dir(type), pathname + 2);
805 pathname = tmp;
806 }
807
808 attr.pathname = bpf_ptr_to_u64(pathname);
809
810 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
811 }
812
813 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
814 {
815 int prog_fd = bpf_obj_get(pathname, type);
816
817 if (prog_fd < 0)
818 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
819 pathname, strerror(errno));
820 return prog_fd;
821 }
822
823 static int bpf_do_parse(struct bpf_cfg_in *cfg, const bool *opt_tbl)
824 {
825 const char *file, *section, *uds_name;
826 bool verbose = false;
827 int i, ret, argc;
828 char **argv;
829
830 argv = cfg->argv;
831 argc = cfg->argc;
832
833 if (opt_tbl[CBPF_BYTECODE] &&
834 (matches(*argv, "bytecode") == 0 ||
835 strcmp(*argv, "bc") == 0)) {
836 cfg->mode = CBPF_BYTECODE;
837 } else if (opt_tbl[CBPF_FILE] &&
838 (matches(*argv, "bytecode-file") == 0 ||
839 strcmp(*argv, "bcf") == 0)) {
840 cfg->mode = CBPF_FILE;
841 } else if (opt_tbl[EBPF_OBJECT] &&
842 (matches(*argv, "object-file") == 0 ||
843 strcmp(*argv, "obj") == 0)) {
844 cfg->mode = EBPF_OBJECT;
845 } else if (opt_tbl[EBPF_PINNED] &&
846 (matches(*argv, "object-pinned") == 0 ||
847 matches(*argv, "pinned") == 0 ||
848 matches(*argv, "fd") == 0)) {
849 cfg->mode = EBPF_PINNED;
850 } else {
851 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
852 return -1;
853 }
854
855 NEXT_ARG();
856 file = section = uds_name = NULL;
857 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
858 file = *argv;
859 NEXT_ARG_FWD();
860
861 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
862 if (argc > 0 && matches(*argv, "type") == 0) {
863 NEXT_ARG();
864 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
865 i++) {
866 if (!__bpf_prog_meta[i].type)
867 continue;
868 if (!matches(*argv,
869 __bpf_prog_meta[i].type)) {
870 cfg->type = i;
871 break;
872 }
873 }
874
875 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
876 fprintf(stderr, "What type is \"%s\"?\n",
877 *argv);
878 return -1;
879 }
880 NEXT_ARG_FWD();
881 } else {
882 cfg->type = BPF_PROG_TYPE_SCHED_CLS;
883 }
884 }
885
886 section = bpf_prog_to_default_section(cfg->type);
887 if (argc > 0 && matches(*argv, "section") == 0) {
888 NEXT_ARG();
889 section = *argv;
890 NEXT_ARG_FWD();
891 }
892
893 if (__bpf_prog_meta[cfg->type].may_uds_export) {
894 uds_name = getenv(BPF_ENV_UDS);
895 if (argc > 0 && !uds_name &&
896 matches(*argv, "export") == 0) {
897 NEXT_ARG();
898 uds_name = *argv;
899 NEXT_ARG_FWD();
900 }
901 }
902
903 if (argc > 0 && matches(*argv, "verbose") == 0) {
904 verbose = true;
905 NEXT_ARG_FWD();
906 }
907
908 PREV_ARG();
909 }
910
911 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE) {
912 ret = bpf_ops_parse(argc, argv, cfg->opcodes,
913 cfg->mode == CBPF_FILE);
914 cfg->n_opcodes = ret;
915 } else if (cfg->mode == EBPF_OBJECT) {
916 ret = 0; /* program will be loaded by load stage */
917 } else if (cfg->mode == EBPF_PINNED) {
918 ret = bpf_obj_pinned(file, cfg->type);
919 cfg->prog_fd = ret;
920 } else {
921 return -1;
922 }
923
924 cfg->object = file;
925 cfg->section = section;
926 cfg->uds = uds_name;
927 cfg->argc = argc;
928 cfg->argv = argv;
929 cfg->verbose = verbose;
930
931 return ret;
932 }
933
934 static int bpf_do_load(struct bpf_cfg_in *cfg)
935 {
936 if (cfg->mode == EBPF_OBJECT) {
937 #ifdef HAVE_LIBBPF
938 return iproute2_load_libbpf(cfg);
939 #endif
940 cfg->prog_fd = bpf_obj_open(cfg->object, cfg->type,
941 cfg->section, cfg->ifindex,
942 cfg->verbose);
943 return cfg->prog_fd;
944 }
945 return 0;
946 }
947
948 int bpf_load_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops,
949 void *nl)
950 {
951 char annotation[256];
952 int ret;
953
954 ret = bpf_do_load(cfg);
955 if (ret < 0)
956 return ret;
957
958 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE)
959 ops->cbpf_cb(nl, cfg->opcodes, cfg->n_opcodes);
960 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
961 snprintf(annotation, sizeof(annotation), "%s:[%s]",
962 basename(cfg->object), cfg->mode == EBPF_PINNED ?
963 "*fsobj" : cfg->section);
964 ops->ebpf_cb(nl, cfg->prog_fd, annotation);
965 }
966
967 return 0;
968 }
969
970 int bpf_parse_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops)
971 {
972 bool opt_tbl[BPF_MODE_MAX] = {};
973
974 if (ops->cbpf_cb) {
975 opt_tbl[CBPF_BYTECODE] = true;
976 opt_tbl[CBPF_FILE] = true;
977 }
978
979 if (ops->ebpf_cb) {
980 opt_tbl[EBPF_OBJECT] = true;
981 opt_tbl[EBPF_PINNED] = true;
982 }
983
984 return bpf_do_parse(cfg, opt_tbl);
985 }
986
987 int bpf_parse_and_load_common(struct bpf_cfg_in *cfg,
988 const struct bpf_cfg_ops *ops, void *nl)
989 {
990 int ret;
991
992 ret = bpf_parse_common(cfg, ops);
993 if (ret < 0)
994 return ret;
995
996 return bpf_load_common(cfg, ops, nl);
997 }
998
999 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
1000 {
1001 const bool opt_tbl[BPF_MODE_MAX] = {
1002 [EBPF_OBJECT] = true,
1003 [EBPF_PINNED] = true,
1004 };
1005 const struct bpf_elf_map test = {
1006 .type = BPF_MAP_TYPE_PROG_ARRAY,
1007 .size_key = sizeof(int),
1008 .size_value = sizeof(int),
1009 };
1010 struct bpf_cfg_in cfg = {
1011 .type = BPF_PROG_TYPE_UNSPEC,
1012 .argc = argc,
1013 .argv = argv,
1014 };
1015 struct bpf_map_ext ext = {};
1016 int ret, prog_fd, map_fd;
1017 uint32_t map_key;
1018
1019 ret = bpf_do_parse(&cfg, opt_tbl);
1020 if (ret < 0)
1021 return ret;
1022
1023 ret = bpf_do_load(&cfg);
1024 if (ret < 0)
1025 return ret;
1026
1027 prog_fd = cfg.prog_fd;
1028
1029 if (key) {
1030 map_key = *key;
1031 } else {
1032 ret = sscanf(cfg.section, "%*i/%i", &map_key);
1033 if (ret != 1) {
1034 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
1035 ret = -EINVAL;
1036 goto out_prog;
1037 }
1038 }
1039
1040 map_fd = bpf_obj_get(map_path, cfg.type);
1041 if (map_fd < 0) {
1042 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
1043 map_path, strerror(errno));
1044 ret = map_fd;
1045 goto out_prog;
1046 }
1047
1048 ret = bpf_map_selfcheck_pinned(map_fd, &test, &ext,
1049 offsetof(struct bpf_elf_map, max_elem),
1050 cfg.type);
1051 if (ret < 0) {
1052 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
1053 goto out_map;
1054 }
1055
1056 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
1057 if (ret < 0)
1058 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
1059 out_map:
1060 close(map_fd);
1061 out_prog:
1062 close(prog_fd);
1063 return ret;
1064 }
1065
1066 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
1067 {
1068 union bpf_attr attr = {};
1069
1070 attr.target_fd = target_fd;
1071 attr.attach_bpf_fd = prog_fd;
1072 attr.attach_type = type;
1073
1074 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
1075 }
1076
1077 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
1078 {
1079 union bpf_attr attr = {};
1080
1081 attr.target_fd = target_fd;
1082 attr.attach_type = type;
1083
1084 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
1085 }
1086
1087 int bpf_prog_load_dev(enum bpf_prog_type type, const struct bpf_insn *insns,
1088 size_t size_insns, const char *license, __u32 ifindex,
1089 char *log, size_t size_log)
1090 {
1091 union bpf_attr attr = {};
1092
1093 attr.prog_type = type;
1094 attr.insns = bpf_ptr_to_u64(insns);
1095 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
1096 attr.license = bpf_ptr_to_u64(license);
1097 attr.prog_ifindex = ifindex;
1098
1099 if (size_log > 0) {
1100 attr.log_buf = bpf_ptr_to_u64(log);
1101 attr.log_size = size_log;
1102 attr.log_level = 1;
1103 }
1104
1105 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
1106 }
1107
1108 #ifdef HAVE_ELF
1109 struct bpf_elf_prog {
1110 enum bpf_prog_type type;
1111 struct bpf_insn *insns;
1112 unsigned int insns_num;
1113 size_t size;
1114 const char *license;
1115 };
1116
1117 struct bpf_hash_entry {
1118 unsigned int pinning;
1119 const char *subpath;
1120 struct bpf_hash_entry *next;
1121 };
1122
1123 struct bpf_config {
1124 unsigned int jit_enabled;
1125 };
1126
1127 struct bpf_btf {
1128 const struct btf_header *hdr;
1129 const void *raw;
1130 const char *strings;
1131 const struct btf_type **types;
1132 int types_num;
1133 };
1134
1135 struct bpf_elf_ctx {
1136 struct bpf_config cfg;
1137 Elf *elf_fd;
1138 GElf_Ehdr elf_hdr;
1139 Elf_Data *sym_tab;
1140 Elf_Data *str_tab;
1141 Elf_Data *btf_data;
1142 char obj_uid[64];
1143 int obj_fd;
1144 int btf_fd;
1145 int map_fds[ELF_MAX_MAPS];
1146 struct bpf_elf_map maps[ELF_MAX_MAPS];
1147 struct bpf_map_ext maps_ext[ELF_MAX_MAPS];
1148 struct bpf_elf_prog prog_text;
1149 struct bpf_btf btf;
1150 int sym_num;
1151 int map_num;
1152 int map_len;
1153 bool *sec_done;
1154 int sec_maps;
1155 int sec_text;
1156 int sec_btf;
1157 char license[ELF_MAX_LICENSE_LEN];
1158 enum bpf_prog_type type;
1159 __u32 ifindex;
1160 bool verbose;
1161 bool noafalg;
1162 struct bpf_elf_st stat;
1163 struct bpf_hash_entry *ht[256];
1164 char *log;
1165 size_t log_size;
1166 };
1167
1168 struct bpf_elf_sec_data {
1169 GElf_Shdr sec_hdr;
1170 Elf_Data *sec_data;
1171 const char *sec_name;
1172 };
1173
1174 struct bpf_map_data {
1175 int *fds;
1176 const char *obj;
1177 struct bpf_elf_st *st;
1178 struct bpf_elf_map *ent;
1179 };
1180
1181 static bool bpf_log_has_data(struct bpf_elf_ctx *ctx)
1182 {
1183 return ctx->log && ctx->log[0];
1184 }
1185
1186 static __check_format_string(2, 3) void
1187 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1188 {
1189 va_list vl;
1190
1191 va_start(vl, format);
1192 vfprintf(stderr, format, vl);
1193 va_end(vl);
1194
1195 if (bpf_log_has_data(ctx)) {
1196 if (ctx->verbose) {
1197 fprintf(stderr, "%s\n", ctx->log);
1198 } else {
1199 unsigned int off = 0, len = strlen(ctx->log);
1200
1201 if (len > BPF_MAX_LOG) {
1202 off = len - BPF_MAX_LOG;
1203 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1204 off);
1205 }
1206 fprintf(stderr, "%s\n", ctx->log + off);
1207 }
1208
1209 memset(ctx->log, 0, ctx->log_size);
1210 }
1211 }
1212
1213 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1214 {
1215 const size_t log_max = UINT_MAX >> 8;
1216 size_t log_size = ctx->log_size;
1217 char *ptr;
1218
1219 if (!ctx->log) {
1220 log_size = 65536;
1221 } else if (log_size < log_max) {
1222 log_size <<= 1;
1223 if (log_size > log_max)
1224 log_size = log_max;
1225 } else {
1226 return -EINVAL;
1227 }
1228
1229 ptr = realloc(ctx->log, log_size);
1230 if (!ptr)
1231 return -ENOMEM;
1232
1233 ptr[0] = 0;
1234 ctx->log = ptr;
1235 ctx->log_size = log_size;
1236
1237 return 0;
1238 }
1239
1240 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1241 uint32_t size_value, uint32_t max_elem,
1242 uint32_t flags, int inner_fd, int btf_fd,
1243 uint32_t ifindex, uint32_t btf_id_key,
1244 uint32_t btf_id_val)
1245 {
1246 union bpf_attr attr = {};
1247
1248 attr.map_type = type;
1249 attr.key_size = size_key;
1250 attr.value_size = inner_fd ? sizeof(int) : size_value;
1251 attr.max_entries = max_elem;
1252 attr.map_flags = flags;
1253 attr.inner_map_fd = inner_fd;
1254 attr.map_ifindex = ifindex;
1255 attr.btf_fd = btf_fd;
1256 attr.btf_key_type_id = btf_id_key;
1257 attr.btf_value_type_id = btf_id_val;
1258
1259 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1260 }
1261
1262 static int bpf_btf_load(void *btf, size_t size_btf,
1263 char *log, size_t size_log)
1264 {
1265 union bpf_attr attr = {};
1266
1267 attr.btf = bpf_ptr_to_u64(btf);
1268 attr.btf_size = size_btf;
1269
1270 if (size_log > 0) {
1271 attr.btf_log_buf = bpf_ptr_to_u64(log);
1272 attr.btf_log_size = size_log;
1273 attr.btf_log_level = 1;
1274 }
1275
1276 return bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
1277 }
1278
1279 static int bpf_obj_pin(int fd, const char *pathname)
1280 {
1281 union bpf_attr attr = {};
1282
1283 attr.pathname = bpf_ptr_to_u64(pathname);
1284 attr.bpf_fd = fd;
1285
1286 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1287 }
1288
1289 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1290 {
1291 struct sockaddr_alg alg = {
1292 .salg_family = AF_ALG,
1293 .salg_type = "hash",
1294 .salg_name = "sha1",
1295 };
1296 int ret, cfd, ofd, ffd;
1297 struct stat stbuff;
1298 ssize_t size;
1299
1300 if (!object || len != 20)
1301 return -EINVAL;
1302
1303 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1304 if (cfd < 0)
1305 return cfd;
1306
1307 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1308 if (ret < 0)
1309 goto out_cfd;
1310
1311 ofd = accept(cfd, NULL, 0);
1312 if (ofd < 0) {
1313 ret = ofd;
1314 goto out_cfd;
1315 }
1316
1317 ffd = open(object, O_RDONLY);
1318 if (ffd < 0) {
1319 fprintf(stderr, "Error opening object %s: %s\n",
1320 object, strerror(errno));
1321 ret = ffd;
1322 goto out_ofd;
1323 }
1324
1325 ret = fstat(ffd, &stbuff);
1326 if (ret < 0) {
1327 fprintf(stderr, "Error doing fstat: %s\n",
1328 strerror(errno));
1329 goto out_ffd;
1330 }
1331
1332 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1333 if (size != stbuff.st_size) {
1334 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1335 size, stbuff.st_size, strerror(errno));
1336 ret = -1;
1337 goto out_ffd;
1338 }
1339
1340 size = read(ofd, out, len);
1341 if (size != len) {
1342 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1343 size, len, strerror(errno));
1344 ret = -1;
1345 } else {
1346 ret = 0;
1347 }
1348 out_ffd:
1349 close(ffd);
1350 out_ofd:
1351 close(ofd);
1352 out_cfd:
1353 close(cfd);
1354 return ret;
1355 }
1356
1357 static void bpf_init_env(void)
1358 {
1359 struct rlimit limit = {
1360 .rlim_cur = RLIM_INFINITY,
1361 .rlim_max = RLIM_INFINITY,
1362 };
1363
1364 /* Don't bother in case we fail! */
1365 setrlimit(RLIMIT_MEMLOCK, &limit);
1366
1367 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC))
1368 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1369 }
1370
1371 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1372 uint32_t pinning)
1373 {
1374 struct bpf_hash_entry *entry;
1375
1376 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1377 while (entry && entry->pinning != pinning)
1378 entry = entry->next;
1379
1380 return entry ? entry->subpath : NULL;
1381 }
1382
1383 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1384 uint32_t pinning)
1385 {
1386 switch (pinning) {
1387 case PIN_OBJECT_NS:
1388 case PIN_GLOBAL_NS:
1389 return false;
1390 case PIN_NONE:
1391 return true;
1392 default:
1393 return !bpf_custom_pinning(ctx, pinning);
1394 }
1395 }
1396
1397 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1398 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1399 {
1400 switch (pinning) {
1401 case PIN_OBJECT_NS:
1402 snprintf(pathname, len, "%s/%s/%s",
1403 bpf_get_work_dir(ctx->type),
1404 ctx->obj_uid, name);
1405 break;
1406 case PIN_GLOBAL_NS:
1407 snprintf(pathname, len, "%s/%s/%s",
1408 bpf_get_work_dir(ctx->type),
1409 BPF_DIR_GLOBALS, name);
1410 break;
1411 default:
1412 snprintf(pathname, len, "%s/../%s/%s",
1413 bpf_get_work_dir(ctx->type),
1414 bpf_custom_pinning(ctx, pinning), name);
1415 break;
1416 }
1417 }
1418
1419 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1420 uint32_t pinning)
1421 {
1422 char pathname[PATH_MAX];
1423
1424 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1425 return 0;
1426
1427 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1428 return bpf_obj_get(pathname, ctx->type);
1429 }
1430
1431 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1432 {
1433 char tmp[PATH_MAX];
1434 int ret;
1435
1436 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1437 ctx->obj_uid);
1438
1439 ret = mkdir(tmp, S_IRWXU);
1440 if (ret && errno != EEXIST) {
1441 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1442 return ret;
1443 }
1444
1445 return 0;
1446 }
1447
1448 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1449 const char *todo)
1450 {
1451 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1452 int ret;
1453
1454 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1455 snprintf(rem, sizeof(rem), "%s/", todo);
1456 sub = strtok(rem, "/");
1457
1458 while (sub) {
1459 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1460 return -EINVAL;
1461
1462 strcat(tmp, sub);
1463 strcat(tmp, "/");
1464
1465 ret = mkdir(tmp, S_IRWXU);
1466 if (ret && errno != EEXIST) {
1467 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1468 strerror(errno));
1469 return ret;
1470 }
1471
1472 sub = strtok(NULL, "/");
1473 }
1474
1475 return 0;
1476 }
1477
1478 static int bpf_place_pinned(int fd, const char *name,
1479 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1480 {
1481 char pathname[PATH_MAX];
1482 const char *tmp;
1483 int ret = 0;
1484
1485 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1486 return 0;
1487
1488 if (pinning == PIN_OBJECT_NS)
1489 ret = bpf_make_obj_path(ctx);
1490 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1491 ret = bpf_make_custom_path(ctx, tmp);
1492 if (ret < 0)
1493 return ret;
1494
1495 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1496 return bpf_obj_pin(fd, pathname);
1497 }
1498
1499 static void bpf_prog_report(int fd, const char *section,
1500 const struct bpf_elf_prog *prog,
1501 struct bpf_elf_ctx *ctx)
1502 {
1503 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1504
1505 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1506 fd < 0 ? "rejected: " : "loaded",
1507 fd < 0 ? strerror(errno) : "",
1508 fd < 0 ? errno : fd);
1509
1510 fprintf(stderr, " - Type: %u\n", prog->type);
1511 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1512 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1513 fprintf(stderr, " - License: %s\n\n", prog->license);
1514
1515 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1516 }
1517
1518 static int bpf_prog_attach(const char *section,
1519 const struct bpf_elf_prog *prog,
1520 struct bpf_elf_ctx *ctx)
1521 {
1522 int tries = 0, fd;
1523 retry:
1524 errno = 0;
1525 fd = bpf_prog_load_dev(prog->type, prog->insns, prog->size,
1526 prog->license, ctx->ifindex,
1527 ctx->log, ctx->log_size);
1528 if (fd < 0 || ctx->verbose) {
1529 /* The verifier log is pretty chatty, sometimes so chatty
1530 * on larger programs, that we could fail to dump everything
1531 * into our buffer. Still, try to give a debuggable error
1532 * log for the user, so enlarge it and re-fail.
1533 */
1534 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1535 if (tries++ < 10 && !bpf_log_realloc(ctx))
1536 goto retry;
1537
1538 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1539 ctx->log_size, tries);
1540 return fd;
1541 }
1542
1543 bpf_prog_report(fd, section, prog, ctx);
1544 }
1545
1546 return fd;
1547 }
1548
1549 static void bpf_map_report(int fd, const char *name,
1550 const struct bpf_elf_map *map,
1551 struct bpf_elf_ctx *ctx, int inner_fd)
1552 {
1553 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1554 fd < 0 ? "rejected: " : "loaded",
1555 fd < 0 ? strerror(errno) : "",
1556 fd < 0 ? errno : fd);
1557
1558 fprintf(stderr, " - Type: %u\n", map->type);
1559 fprintf(stderr, " - Identifier: %u\n", map->id);
1560 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1561 fprintf(stderr, " - Size key: %u\n", map->size_key);
1562 fprintf(stderr, " - Size value: %u\n",
1563 inner_fd ? (int)sizeof(int) : map->size_value);
1564 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1565 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1566 }
1567
1568 static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1569 {
1570 int i;
1571
1572 for (i = 0; i < ctx->map_num; i++) {
1573 if (ctx->maps[i].id != id)
1574 continue;
1575 if (ctx->map_fds[i] < 0)
1576 return -EINVAL;
1577
1578 return ctx->map_fds[i];
1579 }
1580
1581 return -ENOENT;
1582 }
1583
1584 static void bpf_report_map_in_map(int outer_fd, uint32_t idx)
1585 {
1586 struct bpf_elf_map outer_map;
1587 int ret;
1588
1589 fprintf(stderr, "Cannot insert map into map! ");
1590
1591 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map, NULL);
1592 if (!ret) {
1593 if (idx >= outer_map.max_elem &&
1594 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1595 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1596 outer_map.max_elem, idx);
1597 return;
1598 }
1599 }
1600
1601 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1602 }
1603
1604 static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1605 {
1606 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1607 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1608 }
1609
1610 static bool bpf_map_offload_neutral(enum bpf_map_type type)
1611 {
1612 return type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
1613 }
1614
1615 static int bpf_map_attach(const char *name, struct bpf_elf_ctx *ctx,
1616 const struct bpf_elf_map *map, struct bpf_map_ext *ext,
1617 int *have_map_in_map)
1618 {
1619 int fd, ifindex, ret, map_inner_fd = 0;
1620 bool retried = false;
1621
1622 probe:
1623 fd = bpf_probe_pinned(name, ctx, map->pinning);
1624 if (fd > 0) {
1625 ret = bpf_map_selfcheck_pinned(fd, map, ext,
1626 offsetof(struct bpf_elf_map,
1627 id), ctx->type);
1628 if (ret < 0) {
1629 close(fd);
1630 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1631 name);
1632 return ret;
1633 }
1634 if (ctx->verbose)
1635 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1636 name);
1637 return fd;
1638 }
1639
1640 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1641 (*have_map_in_map)++;
1642 if (map->inner_id)
1643 return 0;
1644 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1645 name);
1646 return -EINVAL;
1647 }
1648
1649 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1650 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1651 if (map_inner_fd < 0) {
1652 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1653 name, map->inner_id);
1654 return -EINVAL;
1655 }
1656 }
1657
1658 ifindex = bpf_map_offload_neutral(map->type) ? 0 : ctx->ifindex;
1659 errno = 0;
1660 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1661 map->max_elem, map->flags, map_inner_fd, ctx->btf_fd,
1662 ifindex, ext->btf_id_key, ext->btf_id_val);
1663
1664 if (fd < 0 || ctx->verbose) {
1665 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1666 if (fd < 0)
1667 return fd;
1668 }
1669
1670 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1671 if (ret < 0) {
1672 close(fd);
1673 if (!retried && errno == EEXIST) {
1674 retried = true;
1675 goto probe;
1676 }
1677 fprintf(stderr, "Could not pin %s map: %s\n", name,
1678 strerror(errno));
1679 return ret;
1680 }
1681
1682 return fd;
1683 }
1684
1685 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1686 const GElf_Sym *sym)
1687 {
1688 return ctx->str_tab->d_buf + sym->st_name;
1689 }
1690
1691 static int bpf_btf_find(struct bpf_elf_ctx *ctx, const char *name)
1692 {
1693 const struct btf_type *type;
1694 const char *res;
1695 int id;
1696
1697 for (id = 1; id < ctx->btf.types_num; id++) {
1698 type = ctx->btf.types[id];
1699 if (type->name_off >= ctx->btf.hdr->str_len)
1700 continue;
1701 res = &ctx->btf.strings[type->name_off];
1702 if (!strcmp(res, name))
1703 return id;
1704 }
1705
1706 return -ENOENT;
1707 }
1708
1709 static int bpf_btf_find_kv(struct bpf_elf_ctx *ctx, const struct bpf_elf_map *map,
1710 const char *name, uint32_t *id_key, uint32_t *id_val)
1711 {
1712 const struct btf_member *key, *val;
1713 const struct btf_type *type;
1714 char btf_name[512];
1715 const char *res;
1716 int id;
1717
1718 snprintf(btf_name, sizeof(btf_name), "____btf_map_%s", name);
1719 id = bpf_btf_find(ctx, btf_name);
1720 if (id < 0)
1721 return id;
1722
1723 type = ctx->btf.types[id];
1724 if (BTF_INFO_KIND(type->info) != BTF_KIND_STRUCT)
1725 return -EINVAL;
1726 if (BTF_INFO_VLEN(type->info) != 2)
1727 return -EINVAL;
1728
1729 key = ((void *) type) + sizeof(*type);
1730 val = key + 1;
1731 if (!key->type || key->type >= ctx->btf.types_num ||
1732 !val->type || val->type >= ctx->btf.types_num)
1733 return -EINVAL;
1734
1735 if (key->name_off >= ctx->btf.hdr->str_len ||
1736 val->name_off >= ctx->btf.hdr->str_len)
1737 return -EINVAL;
1738
1739 res = &ctx->btf.strings[key->name_off];
1740 if (strcmp(res, "key"))
1741 return -EINVAL;
1742
1743 res = &ctx->btf.strings[val->name_off];
1744 if (strcmp(res, "value"))
1745 return -EINVAL;
1746
1747 *id_key = key->type;
1748 *id_val = val->type;
1749 return 0;
1750 }
1751
1752 static void bpf_btf_annotate(struct bpf_elf_ctx *ctx, int which, const char *name)
1753 {
1754 uint32_t id_key = 0, id_val = 0;
1755
1756 if (!bpf_btf_find_kv(ctx, &ctx->maps[which], name, &id_key, &id_val)) {
1757 ctx->maps_ext[which].btf_id_key = id_key;
1758 ctx->maps_ext[which].btf_id_val = id_val;
1759 }
1760 }
1761
1762 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1763 {
1764 const char *name;
1765 GElf_Sym sym;
1766 int i;
1767
1768 for (i = 0; i < ctx->sym_num; i++) {
1769 int type;
1770
1771 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1772 continue;
1773
1774 type = GELF_ST_TYPE(sym.st_info);
1775 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1776 (type != STT_NOTYPE && type != STT_OBJECT) ||
1777 sym.st_shndx != ctx->sec_maps ||
1778 sym.st_value / ctx->map_len != which)
1779 continue;
1780
1781 name = bpf_str_tab_name(ctx, &sym);
1782 bpf_btf_annotate(ctx, which, name);
1783 return name;
1784 }
1785
1786 return NULL;
1787 }
1788
1789 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1790 {
1791 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1792 const char *map_name;
1793
1794 for (i = 0; i < ctx->map_num; i++) {
1795 if (ctx->maps[i].pinning == PIN_OBJECT_NS &&
1796 ctx->noafalg) {
1797 fprintf(stderr, "Missing kernel AF_ALG support for PIN_OBJECT_NS!\n");
1798 return -ENOTSUP;
1799 }
1800
1801 map_name = bpf_map_fetch_name(ctx, i);
1802 if (!map_name)
1803 return -EIO;
1804
1805 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1806 &ctx->maps_ext[i], &have_map_in_map);
1807 if (fd < 0)
1808 return fd;
1809
1810 ctx->map_fds[i] = !fd ? -1 : fd;
1811 }
1812
1813 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1814 if (ctx->map_fds[i] >= 0)
1815 continue;
1816
1817 map_name = bpf_map_fetch_name(ctx, i);
1818 if (!map_name)
1819 return -EIO;
1820
1821 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1822 &ctx->maps_ext[i], NULL);
1823 if (fd < 0)
1824 return fd;
1825
1826 ctx->map_fds[i] = fd;
1827 }
1828
1829 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1830 if (!ctx->maps[i].id ||
1831 ctx->maps[i].inner_id ||
1832 ctx->maps[i].inner_idx == -1)
1833 continue;
1834
1835 inner_fd = ctx->map_fds[i];
1836 inner_idx = ctx->maps[i].inner_idx;
1837
1838 for (j = 0; j < ctx->map_num; j++) {
1839 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1840 continue;
1841 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1842 continue;
1843
1844 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1845 &inner_fd, BPF_ANY);
1846 if (ret < 0) {
1847 bpf_report_map_in_map(ctx->map_fds[j],
1848 inner_idx);
1849 return ret;
1850 }
1851 }
1852 }
1853
1854 return 0;
1855 }
1856
1857 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1858 {
1859 int i, num = 0;
1860 GElf_Sym sym;
1861
1862 for (i = 0; i < ctx->sym_num; i++) {
1863 int type;
1864
1865 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1866 continue;
1867
1868 type = GELF_ST_TYPE(sym.st_info);
1869 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1870 (type != STT_NOTYPE && type != STT_OBJECT) ||
1871 sym.st_shndx != ctx->sec_maps)
1872 continue;
1873 num++;
1874 }
1875
1876 return num;
1877 }
1878
1879 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1880 struct bpf_elf_sec_data *data)
1881 {
1882 Elf_Data *sec_edata;
1883 GElf_Shdr sec_hdr;
1884 Elf_Scn *sec_fd;
1885 char *sec_name;
1886
1887 memset(data, 0, sizeof(*data));
1888
1889 sec_fd = elf_getscn(ctx->elf_fd, section);
1890 if (!sec_fd)
1891 return -EINVAL;
1892 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1893 return -EIO;
1894
1895 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1896 sec_hdr.sh_name);
1897 if (!sec_name || !sec_hdr.sh_size)
1898 return -ENOENT;
1899
1900 sec_edata = elf_getdata(sec_fd, NULL);
1901 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1902 return -EIO;
1903
1904 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1905
1906 data->sec_name = sec_name;
1907 data->sec_data = sec_edata;
1908 return 0;
1909 }
1910
1911 struct bpf_elf_map_min {
1912 __u32 type;
1913 __u32 size_key;
1914 __u32 size_value;
1915 __u32 max_elem;
1916 };
1917
1918 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1919 struct bpf_elf_sec_data *data)
1920 {
1921 ctx->map_num = data->sec_data->d_size;
1922 ctx->sec_maps = section;
1923 ctx->sec_done[section] = true;
1924
1925 if (ctx->map_num > sizeof(ctx->maps)) {
1926 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1927 return -ENOMEM;
1928 }
1929
1930 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1931 return 0;
1932 }
1933
1934 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1935 {
1936 GElf_Sym sym;
1937 int off, i;
1938
1939 for (off = 0; off < end; off += ctx->map_len) {
1940 /* Order doesn't need to be linear here, hence we walk
1941 * the table again.
1942 */
1943 for (i = 0; i < ctx->sym_num; i++) {
1944 int type;
1945
1946 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1947 continue;
1948
1949 type = GELF_ST_TYPE(sym.st_info);
1950 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1951 (type != STT_NOTYPE && type != STT_OBJECT) ||
1952 sym.st_shndx != ctx->sec_maps)
1953 continue;
1954 if (sym.st_value == off)
1955 break;
1956 if (i == ctx->sym_num - 1)
1957 return -1;
1958 }
1959 }
1960
1961 return off == end ? 0 : -1;
1962 }
1963
1964 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1965 {
1966 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1967 int i, sym_num = bpf_map_num_sym(ctx);
1968 __u8 *buff;
1969
1970 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1971 fprintf(stderr, "%u maps not supported in current map section!\n",
1972 sym_num);
1973 return -EINVAL;
1974 }
1975
1976 if (ctx->map_num % sym_num != 0 ||
1977 ctx->map_num % sizeof(__u32) != 0) {
1978 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1979 return -EINVAL;
1980 }
1981
1982 ctx->map_len = ctx->map_num / sym_num;
1983 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1984 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1985 return -EINVAL;
1986 }
1987
1988 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1989 ctx->map_num = sym_num;
1990 return 0;
1991 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1992 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1993 return -EINVAL;
1994 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1995 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1996 return -EINVAL;
1997 }
1998
1999 ctx->map_num = sym_num;
2000 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
2001 i++, buff += ctx->map_len) {
2002 /* The fixup leaves the rest of the members as zero, which
2003 * is fine currently, but option exist to set some other
2004 * default value as well when needed in future.
2005 */
2006 memcpy(&fixup[i], buff, ctx->map_len);
2007 }
2008
2009 memcpy(ctx->maps, fixup, sizeof(fixup));
2010 if (ctx->verbose)
2011 printf("%zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
2012 sizeof(struct bpf_elf_map) - ctx->map_len);
2013 return 0;
2014 }
2015
2016 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
2017 struct bpf_elf_sec_data *data)
2018 {
2019 if (data->sec_data->d_size > sizeof(ctx->license))
2020 return -ENOMEM;
2021
2022 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
2023 ctx->sec_done[section] = true;
2024 return 0;
2025 }
2026
2027 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
2028 struct bpf_elf_sec_data *data)
2029 {
2030 ctx->sym_tab = data->sec_data;
2031 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
2032 ctx->sec_done[section] = true;
2033 return 0;
2034 }
2035
2036 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
2037 struct bpf_elf_sec_data *data)
2038 {
2039 ctx->str_tab = data->sec_data;
2040 ctx->sec_done[section] = true;
2041 return 0;
2042 }
2043
2044 static int bpf_fetch_text(struct bpf_elf_ctx *ctx, int section,
2045 struct bpf_elf_sec_data *data)
2046 {
2047 ctx->sec_text = section;
2048 ctx->sec_done[section] = true;
2049 return 0;
2050 }
2051
2052 static void bpf_btf_report(int fd, struct bpf_elf_ctx *ctx)
2053 {
2054 fprintf(stderr, "\nBTF debug data section \'.BTF\' %s%s (%d)!\n",
2055 fd < 0 ? "rejected: " : "loaded",
2056 fd < 0 ? strerror(errno) : "",
2057 fd < 0 ? errno : fd);
2058
2059 fprintf(stderr, " - Length: %zu\n", ctx->btf_data->d_size);
2060
2061 bpf_dump_error(ctx, "Verifier analysis:\n\n");
2062 }
2063
2064 static int bpf_btf_attach(struct bpf_elf_ctx *ctx)
2065 {
2066 int tries = 0, fd;
2067 retry:
2068 errno = 0;
2069 fd = bpf_btf_load(ctx->btf_data->d_buf, ctx->btf_data->d_size,
2070 ctx->log, ctx->log_size);
2071 if (fd < 0 || ctx->verbose) {
2072 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
2073 if (tries++ < 10 && !bpf_log_realloc(ctx))
2074 goto retry;
2075
2076 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
2077 ctx->log_size, tries);
2078 return fd;
2079 }
2080
2081 if (bpf_log_has_data(ctx))
2082 bpf_btf_report(fd, ctx);
2083 }
2084
2085 return fd;
2086 }
2087
2088 static int bpf_fetch_btf_begin(struct bpf_elf_ctx *ctx, int section,
2089 struct bpf_elf_sec_data *data)
2090 {
2091 ctx->btf_data = data->sec_data;
2092 ctx->sec_btf = section;
2093 ctx->sec_done[section] = true;
2094 return 0;
2095 }
2096
2097 static int bpf_btf_check_header(struct bpf_elf_ctx *ctx)
2098 {
2099 const struct btf_header *hdr = ctx->btf_data->d_buf;
2100 const char *str_start, *str_end;
2101 unsigned int data_len;
2102
2103 if (hdr->magic != BTF_MAGIC) {
2104 fprintf(stderr, "Object has wrong BTF magic: %x, expected: %x!\n",
2105 hdr->magic, BTF_MAGIC);
2106 return -EINVAL;
2107 }
2108
2109 if (hdr->version != BTF_VERSION) {
2110 fprintf(stderr, "Object has wrong BTF version: %u, expected: %u!\n",
2111 hdr->version, BTF_VERSION);
2112 return -EINVAL;
2113 }
2114
2115 if (hdr->flags) {
2116 fprintf(stderr, "Object has unsupported BTF flags %x!\n",
2117 hdr->flags);
2118 return -EINVAL;
2119 }
2120
2121 data_len = ctx->btf_data->d_size - sizeof(*hdr);
2122 if (data_len < hdr->type_off ||
2123 data_len < hdr->str_off ||
2124 data_len < hdr->type_len + hdr->str_len ||
2125 hdr->type_off >= hdr->str_off ||
2126 hdr->type_off + hdr->type_len != hdr->str_off ||
2127 hdr->str_off + hdr->str_len != data_len ||
2128 (hdr->type_off & (sizeof(uint32_t) - 1))) {
2129 fprintf(stderr, "Object has malformed BTF data!\n");
2130 return -EINVAL;
2131 }
2132
2133 ctx->btf.hdr = hdr;
2134 ctx->btf.raw = hdr + 1;
2135
2136 str_start = ctx->btf.raw + hdr->str_off;
2137 str_end = str_start + hdr->str_len;
2138 if (!hdr->str_len ||
2139 hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2140 str_start[0] || str_end[-1]) {
2141 fprintf(stderr, "Object has malformed BTF string data!\n");
2142 return -EINVAL;
2143 }
2144
2145 ctx->btf.strings = str_start;
2146 return 0;
2147 }
2148
2149 static int bpf_btf_register_type(struct bpf_elf_ctx *ctx,
2150 const struct btf_type *type)
2151 {
2152 int cur = ctx->btf.types_num, num = cur + 1;
2153 const struct btf_type **types;
2154
2155 types = realloc(ctx->btf.types, num * sizeof(type));
2156 if (!types) {
2157 free(ctx->btf.types);
2158 ctx->btf.types = NULL;
2159 ctx->btf.types_num = 0;
2160 return -ENOMEM;
2161 }
2162
2163 ctx->btf.types = types;
2164 ctx->btf.types[cur] = type;
2165 ctx->btf.types_num = num;
2166 return 0;
2167 }
2168
2169 static struct btf_type btf_type_void;
2170
2171 static int bpf_btf_prep_type_data(struct bpf_elf_ctx *ctx)
2172 {
2173 const void *type_cur = ctx->btf.raw + ctx->btf.hdr->type_off;
2174 const void *type_end = ctx->btf.raw + ctx->btf.hdr->str_off;
2175 const struct btf_type *type;
2176 uint16_t var_len;
2177 int ret, kind;
2178
2179 ret = bpf_btf_register_type(ctx, &btf_type_void);
2180 if (ret < 0)
2181 return ret;
2182
2183 while (type_cur < type_end) {
2184 type = type_cur;
2185 type_cur += sizeof(*type);
2186
2187 var_len = BTF_INFO_VLEN(type->info);
2188 kind = BTF_INFO_KIND(type->info);
2189
2190 switch (kind) {
2191 case BTF_KIND_INT:
2192 type_cur += sizeof(int);
2193 break;
2194 case BTF_KIND_ARRAY:
2195 type_cur += sizeof(struct btf_array);
2196 break;
2197 case BTF_KIND_STRUCT:
2198 case BTF_KIND_UNION:
2199 type_cur += var_len * sizeof(struct btf_member);
2200 break;
2201 case BTF_KIND_ENUM:
2202 type_cur += var_len * sizeof(struct btf_enum);
2203 break;
2204 case BTF_KIND_FUNC_PROTO:
2205 type_cur += var_len * sizeof(struct btf_param);
2206 break;
2207 case BTF_KIND_TYPEDEF:
2208 case BTF_KIND_PTR:
2209 case BTF_KIND_FWD:
2210 case BTF_KIND_VOLATILE:
2211 case BTF_KIND_CONST:
2212 case BTF_KIND_RESTRICT:
2213 case BTF_KIND_FUNC:
2214 break;
2215 default:
2216 fprintf(stderr, "Object has unknown BTF type: %u!\n", kind);
2217 return -EINVAL;
2218 }
2219
2220 ret = bpf_btf_register_type(ctx, type);
2221 if (ret < 0)
2222 return ret;
2223 }
2224
2225 return 0;
2226 }
2227
2228 static int bpf_btf_prep_data(struct bpf_elf_ctx *ctx)
2229 {
2230 int ret = bpf_btf_check_header(ctx);
2231
2232 if (!ret)
2233 return bpf_btf_prep_type_data(ctx);
2234 return ret;
2235 }
2236
2237 static void bpf_fetch_btf_end(struct bpf_elf_ctx *ctx)
2238 {
2239 int fd = bpf_btf_attach(ctx);
2240
2241 if (fd < 0)
2242 return;
2243 ctx->btf_fd = fd;
2244 if (bpf_btf_prep_data(ctx) < 0) {
2245 close(ctx->btf_fd);
2246 ctx->btf_fd = 0;
2247 }
2248 }
2249
2250 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
2251 {
2252 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
2253 }
2254
2255 static bool bpf_has_btf_data(const struct bpf_elf_ctx *ctx)
2256 {
2257 return ctx->sec_btf;
2258 }
2259
2260 static bool bpf_has_call_data(const struct bpf_elf_ctx *ctx)
2261 {
2262 return ctx->sec_text;
2263 }
2264
2265 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx, bool check_text_sec)
2266 {
2267 struct bpf_elf_sec_data data;
2268 int i, ret = -1;
2269
2270 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2271 ret = bpf_fill_section_data(ctx, i, &data);
2272 if (ret < 0)
2273 continue;
2274
2275 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2276 !strcmp(data.sec_name, ELF_SECTION_MAPS))
2277 ret = bpf_fetch_maps_begin(ctx, i, &data);
2278 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2279 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
2280 ret = bpf_fetch_license(ctx, i, &data);
2281 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2282 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2283 !strcmp(data.sec_name, ".text") &&
2284 check_text_sec)
2285 ret = bpf_fetch_text(ctx, i, &data);
2286 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
2287 !strcmp(data.sec_name, ".symtab"))
2288 ret = bpf_fetch_symtab(ctx, i, &data);
2289 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
2290 !strcmp(data.sec_name, ".strtab"))
2291 ret = bpf_fetch_strtab(ctx, i, &data);
2292 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2293 !strcmp(data.sec_name, ".BTF"))
2294 ret = bpf_fetch_btf_begin(ctx, i, &data);
2295 if (ret < 0) {
2296 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
2297 i);
2298 return ret;
2299 }
2300 }
2301
2302 if (bpf_has_btf_data(ctx))
2303 bpf_fetch_btf_end(ctx);
2304 if (bpf_has_map_data(ctx)) {
2305 ret = bpf_fetch_maps_end(ctx);
2306 if (ret < 0) {
2307 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
2308 return ret;
2309 }
2310
2311 ret = bpf_maps_attach_all(ctx);
2312 if (ret < 0) {
2313 fprintf(stderr, "Error loading maps into kernel!\n");
2314 return ret;
2315 }
2316 }
2317
2318 return ret;
2319 }
2320
2321 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
2322 bool *sseen)
2323 {
2324 struct bpf_elf_sec_data data;
2325 struct bpf_elf_prog prog;
2326 int ret, i, fd = -1;
2327
2328 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2329 if (ctx->sec_done[i])
2330 continue;
2331
2332 ret = bpf_fill_section_data(ctx, i, &data);
2333 if (ret < 0 ||
2334 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
2335 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2336 !strcmp(data.sec_name, section)))
2337 continue;
2338
2339 *sseen = true;
2340
2341 memset(&prog, 0, sizeof(prog));
2342 prog.type = ctx->type;
2343 prog.license = ctx->license;
2344 prog.size = data.sec_data->d_size;
2345 prog.insns_num = prog.size / sizeof(struct bpf_insn);
2346 prog.insns = data.sec_data->d_buf;
2347
2348 fd = bpf_prog_attach(section, &prog, ctx);
2349 if (fd < 0)
2350 return fd;
2351
2352 ctx->sec_done[i] = true;
2353 break;
2354 }
2355
2356 return fd;
2357 }
2358
2359 struct bpf_relo_props {
2360 struct bpf_tail_call {
2361 unsigned int total;
2362 unsigned int jited;
2363 } tc;
2364 int main_num;
2365 };
2366
2367 static int bpf_apply_relo_map(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2368 GElf_Rel *relo, GElf_Sym *sym,
2369 struct bpf_relo_props *props)
2370 {
2371 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2372 unsigned int map_idx = sym->st_value / ctx->map_len;
2373
2374 if (insn_off >= prog->insns_num)
2375 return -EINVAL;
2376 if (prog->insns[insn_off].code != (BPF_LD | BPF_IMM | BPF_DW)) {
2377 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
2378 insn_off);
2379 return -EINVAL;
2380 }
2381
2382 if (map_idx >= ARRAY_SIZE(ctx->map_fds))
2383 return -EINVAL;
2384 if (!ctx->map_fds[map_idx])
2385 return -EINVAL;
2386 if (ctx->maps[map_idx].type == BPF_MAP_TYPE_PROG_ARRAY) {
2387 props->tc.total++;
2388 if (ctx->maps_ext[map_idx].owner.jited ||
2389 (ctx->maps_ext[map_idx].owner.type == 0 &&
2390 ctx->cfg.jit_enabled))
2391 props->tc.jited++;
2392 }
2393
2394 prog->insns[insn_off].src_reg = BPF_PSEUDO_MAP_FD;
2395 prog->insns[insn_off].imm = ctx->map_fds[map_idx];
2396 return 0;
2397 }
2398
2399 static int bpf_apply_relo_call(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2400 GElf_Rel *relo, GElf_Sym *sym,
2401 struct bpf_relo_props *props)
2402 {
2403 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2404 struct bpf_elf_prog *prog_text = &ctx->prog_text;
2405
2406 if (insn_off >= prog->insns_num)
2407 return -EINVAL;
2408 if (prog->insns[insn_off].code != (BPF_JMP | BPF_CALL) &&
2409 prog->insns[insn_off].src_reg != BPF_PSEUDO_CALL) {
2410 fprintf(stderr, "ELF contains relo data for non call instruction at offset %u! Compiler bug?!\n",
2411 insn_off);
2412 return -EINVAL;
2413 }
2414
2415 if (!props->main_num) {
2416 struct bpf_insn *insns = realloc(prog->insns,
2417 prog->size + prog_text->size);
2418 if (!insns)
2419 return -ENOMEM;
2420
2421 memcpy(insns + prog->insns_num, prog_text->insns,
2422 prog_text->size);
2423 props->main_num = prog->insns_num;
2424 prog->insns = insns;
2425 prog->insns_num += prog_text->insns_num;
2426 prog->size += prog_text->size;
2427 }
2428
2429 prog->insns[insn_off].imm += props->main_num - insn_off;
2430 return 0;
2431 }
2432
2433 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
2434 struct bpf_elf_sec_data *data_relo,
2435 struct bpf_elf_prog *prog,
2436 struct bpf_relo_props *props)
2437 {
2438 GElf_Shdr *rhdr = &data_relo->sec_hdr;
2439 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
2440
2441 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
2442 GElf_Rel relo;
2443 GElf_Sym sym;
2444 int ret = -EIO;
2445
2446 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
2447 return -EIO;
2448 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
2449 return -EIO;
2450
2451 if (sym.st_shndx == ctx->sec_maps)
2452 ret = bpf_apply_relo_map(ctx, prog, &relo, &sym, props);
2453 else if (sym.st_shndx == ctx->sec_text)
2454 ret = bpf_apply_relo_call(ctx, prog, &relo, &sym, props);
2455 else
2456 fprintf(stderr, "ELF contains non-{map,call} related relo data in entry %u pointing to section %u! Compiler bug?!\n",
2457 relo_ent, sym.st_shndx);
2458 if (ret < 0)
2459 return ret;
2460 }
2461
2462 return 0;
2463 }
2464
2465 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
2466 bool *lderr, bool *sseen, struct bpf_elf_prog *prog)
2467 {
2468 struct bpf_elf_sec_data data_relo, data_insn;
2469 int ret, idx, i, fd = -1;
2470
2471 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2472 struct bpf_relo_props props = {};
2473
2474 ret = bpf_fill_section_data(ctx, i, &data_relo);
2475 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
2476 continue;
2477
2478 idx = data_relo.sec_hdr.sh_info;
2479
2480 ret = bpf_fill_section_data(ctx, idx, &data_insn);
2481 if (ret < 0 ||
2482 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
2483 (data_insn.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2484 !strcmp(data_insn.sec_name, section)))
2485 continue;
2486 if (sseen)
2487 *sseen = true;
2488
2489 memset(prog, 0, sizeof(*prog));
2490 prog->type = ctx->type;
2491 prog->license = ctx->license;
2492 prog->size = data_insn.sec_data->d_size;
2493 prog->insns_num = prog->size / sizeof(struct bpf_insn);
2494 prog->insns = malloc(prog->size);
2495 if (!prog->insns) {
2496 *lderr = true;
2497 return -ENOMEM;
2498 }
2499
2500 memcpy(prog->insns, data_insn.sec_data->d_buf, prog->size);
2501
2502 ret = bpf_apply_relo_data(ctx, &data_relo, prog, &props);
2503 if (ret < 0) {
2504 *lderr = true;
2505 if (ctx->sec_text != idx)
2506 free(prog->insns);
2507 return ret;
2508 }
2509 if (ctx->sec_text == idx) {
2510 fd = 0;
2511 goto out;
2512 }
2513
2514 fd = bpf_prog_attach(section, prog, ctx);
2515 free(prog->insns);
2516 if (fd < 0) {
2517 *lderr = true;
2518 if (props.tc.total) {
2519 if (ctx->cfg.jit_enabled &&
2520 props.tc.total != props.tc.jited)
2521 fprintf(stderr, "JIT enabled, but only %u/%u tail call maps in the program have JITed owner!\n",
2522 props.tc.jited, props.tc.total);
2523 if (!ctx->cfg.jit_enabled &&
2524 props.tc.jited)
2525 fprintf(stderr, "JIT disabled, but %u/%u tail call maps in the program have JITed owner!\n",
2526 props.tc.jited, props.tc.total);
2527 }
2528 return fd;
2529 }
2530 out:
2531 ctx->sec_done[i] = true;
2532 ctx->sec_done[idx] = true;
2533 break;
2534 }
2535
2536 return fd;
2537 }
2538
2539 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2540 {
2541 bool lderr = false, sseen = false;
2542 struct bpf_elf_prog prog;
2543 int ret = -1;
2544
2545 if (bpf_has_call_data(ctx)) {
2546 ret = bpf_fetch_prog_relo(ctx, ".text", &lderr, NULL,
2547 &ctx->prog_text);
2548 if (ret < 0)
2549 return ret;
2550 }
2551
2552 if (bpf_has_map_data(ctx) || bpf_has_call_data(ctx))
2553 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen, &prog);
2554 if (ret < 0 && !lderr)
2555 ret = bpf_fetch_prog(ctx, section, &sseen);
2556 if (ret < 0 && !sseen)
2557 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2558 section);
2559 return ret;
2560 }
2561
2562 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2563 {
2564 int i;
2565
2566 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2567 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2568 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2569 return i;
2570 return -1;
2571 }
2572
2573 struct bpf_jited_aux {
2574 int prog_fd;
2575 int map_fd;
2576 struct bpf_prog_data prog;
2577 struct bpf_map_ext map;
2578 };
2579
2580 static int bpf_derive_prog_from_fdinfo(int fd, struct bpf_prog_data *prog)
2581 {
2582 char file[PATH_MAX], buff[4096];
2583 unsigned int val;
2584 FILE *fp;
2585
2586 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
2587 memset(prog, 0, sizeof(*prog));
2588
2589 fp = fopen(file, "r");
2590 if (!fp) {
2591 fprintf(stderr, "No procfs support?!\n");
2592 return -EIO;
2593 }
2594
2595 while (fgets(buff, sizeof(buff), fp)) {
2596 if (sscanf(buff, "prog_type:\t%u", &val) == 1)
2597 prog->type = val;
2598 else if (sscanf(buff, "prog_jited:\t%u", &val) == 1)
2599 prog->jited = val;
2600 }
2601
2602 fclose(fp);
2603 return 0;
2604 }
2605
2606 static int bpf_tail_call_get_aux(struct bpf_jited_aux *aux)
2607 {
2608 struct bpf_elf_map tmp;
2609 int ret;
2610
2611 ret = bpf_derive_elf_map_from_fdinfo(aux->map_fd, &tmp, &aux->map);
2612 if (!ret)
2613 ret = bpf_derive_prog_from_fdinfo(aux->prog_fd, &aux->prog);
2614
2615 return ret;
2616 }
2617
2618 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2619 {
2620 struct bpf_elf_sec_data data;
2621 uint32_t map_id, key_id;
2622 int fd, i, ret, idx;
2623
2624 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2625 if (ctx->sec_done[i])
2626 continue;
2627
2628 ret = bpf_fill_section_data(ctx, i, &data);
2629 if (ret < 0)
2630 continue;
2631
2632 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2633 if (ret != 2)
2634 continue;
2635
2636 idx = bpf_find_map_by_id(ctx, map_id);
2637 if (idx < 0)
2638 continue;
2639
2640 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2641 if (fd < 0)
2642 return -EIO;
2643
2644 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2645 &fd, BPF_ANY);
2646 if (ret < 0) {
2647 struct bpf_jited_aux aux = {};
2648
2649 ret = -errno;
2650 if (errno == E2BIG) {
2651 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2652 key_id, map_id);
2653 return ret;
2654 }
2655
2656 aux.map_fd = ctx->map_fds[idx];
2657 aux.prog_fd = fd;
2658
2659 if (bpf_tail_call_get_aux(&aux))
2660 return ret;
2661 if (!aux.map.owner.type)
2662 return ret;
2663
2664 if (aux.prog.type != aux.map.owner.type)
2665 fprintf(stderr, "Tail call map owned by prog type %u, but prog type is %u!\n",
2666 aux.map.owner.type, aux.prog.type);
2667 if (aux.prog.jited != aux.map.owner.jited)
2668 fprintf(stderr, "Tail call map %s jited, but prog %s!\n",
2669 aux.map.owner.jited ? "is" : "not",
2670 aux.prog.jited ? "is" : "not");
2671 return ret;
2672 }
2673
2674 ctx->sec_done[i] = true;
2675 }
2676
2677 return 0;
2678 }
2679
2680 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2681 {
2682 struct stat st;
2683 int ret;
2684
2685 memset(&ctx->stat, 0, sizeof(ctx->stat));
2686
2687 ret = fstat(ctx->obj_fd, &st);
2688 if (ret < 0) {
2689 fprintf(stderr, "Stat of elf file failed: %s\n",
2690 strerror(errno));
2691 return;
2692 }
2693
2694 ctx->stat.st_dev = st.st_dev;
2695 ctx->stat.st_ino = st.st_ino;
2696 }
2697
2698 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2699 {
2700 char buff[PATH_MAX];
2701
2702 while (fgets(buff, sizeof(buff), fp)) {
2703 char *ptr = buff;
2704
2705 while (*ptr == ' ' || *ptr == '\t')
2706 ptr++;
2707
2708 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2709 continue;
2710
2711 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2712 sscanf(ptr, "%i %s #", id, path) != 2) {
2713 strcpy(path, ptr);
2714 return -1;
2715 }
2716
2717 return 1;
2718 }
2719
2720 return 0;
2721 }
2722
2723 static bool bpf_pinning_reserved(uint32_t pinning)
2724 {
2725 switch (pinning) {
2726 case PIN_NONE:
2727 case PIN_OBJECT_NS:
2728 case PIN_GLOBAL_NS:
2729 return true;
2730 default:
2731 return false;
2732 }
2733 }
2734
2735 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2736 {
2737 struct bpf_hash_entry *entry;
2738 char subpath[PATH_MAX] = {};
2739 uint32_t pinning;
2740 FILE *fp;
2741 int ret;
2742
2743 fp = fopen(db_file, "r");
2744 if (!fp)
2745 return;
2746
2747 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2748 if (ret == -1) {
2749 fprintf(stderr, "Database %s is corrupted at: %s\n",
2750 db_file, subpath);
2751 fclose(fp);
2752 return;
2753 }
2754
2755 if (bpf_pinning_reserved(pinning)) {
2756 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2757 db_file, pinning);
2758 continue;
2759 }
2760
2761 entry = malloc(sizeof(*entry));
2762 if (!entry) {
2763 fprintf(stderr, "No memory left for db entry!\n");
2764 continue;
2765 }
2766
2767 entry->pinning = pinning;
2768 entry->subpath = strdup(subpath);
2769 if (!entry->subpath) {
2770 fprintf(stderr, "No memory left for db entry!\n");
2771 free(entry);
2772 continue;
2773 }
2774
2775 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2776 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2777 }
2778
2779 fclose(fp);
2780 }
2781
2782 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2783 {
2784 struct bpf_hash_entry *entry;
2785 int i;
2786
2787 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2788 while ((entry = ctx->ht[i]) != NULL) {
2789 ctx->ht[i] = entry->next;
2790 free((char *)entry->subpath);
2791 free(entry);
2792 }
2793 }
2794 }
2795
2796 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2797 {
2798 if (ctx->elf_hdr.e_type != ET_REL ||
2799 (ctx->elf_hdr.e_machine != EM_NONE &&
2800 ctx->elf_hdr.e_machine != EM_BPF) ||
2801 ctx->elf_hdr.e_version != EV_CURRENT) {
2802 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2803 return -EINVAL;
2804 }
2805
2806 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2807 default:
2808 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2809 return -EINVAL;
2810 case ELFDATA2LSB:
2811 if (htons(1) == 1) {
2812 fprintf(stderr,
2813 "We are big endian, eBPF object is little endian!\n");
2814 return -EIO;
2815 }
2816 break;
2817 case ELFDATA2MSB:
2818 if (htons(1) != 1) {
2819 fprintf(stderr,
2820 "We are little endian, eBPF object is big endian!\n");
2821 return -EIO;
2822 }
2823 break;
2824 }
2825
2826 return 0;
2827 }
2828
2829 static void bpf_get_cfg(struct bpf_elf_ctx *ctx)
2830 {
2831 static const char *path_jit = "/proc/sys/net/core/bpf_jit_enable";
2832 int fd;
2833
2834 fd = open(path_jit, O_RDONLY);
2835 if (fd > 0) {
2836 char tmp[16] = {};
2837
2838 if (read(fd, tmp, sizeof(tmp)) > 0)
2839 ctx->cfg.jit_enabled = atoi(tmp);
2840 close(fd);
2841 }
2842 }
2843
2844 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2845 enum bpf_prog_type type, __u32 ifindex,
2846 bool verbose)
2847 {
2848 uint8_t tmp[20];
2849 int ret;
2850
2851 if (elf_version(EV_CURRENT) == EV_NONE)
2852 return -EINVAL;
2853
2854 bpf_init_env();
2855
2856 memset(ctx, 0, sizeof(*ctx));
2857 bpf_get_cfg(ctx);
2858
2859 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
2860 if (ret)
2861 ctx->noafalg = true;
2862 else
2863 hexstring_n2a(tmp, sizeof(tmp), ctx->obj_uid,
2864 sizeof(ctx->obj_uid));
2865
2866 ctx->verbose = verbose;
2867 ctx->type = type;
2868 ctx->ifindex = ifindex;
2869
2870 ctx->obj_fd = open(pathname, O_RDONLY);
2871 if (ctx->obj_fd < 0)
2872 return ctx->obj_fd;
2873
2874 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2875 if (!ctx->elf_fd) {
2876 ret = -EINVAL;
2877 goto out_fd;
2878 }
2879
2880 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2881 ret = -EINVAL;
2882 goto out_fd;
2883 }
2884
2885 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2886 &ctx->elf_hdr) {
2887 ret = -EIO;
2888 goto out_elf;
2889 }
2890
2891 ret = bpf_elf_check_ehdr(ctx);
2892 if (ret < 0)
2893 goto out_elf;
2894
2895 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2896 sizeof(*(ctx->sec_done)));
2897 if (!ctx->sec_done) {
2898 ret = -ENOMEM;
2899 goto out_elf;
2900 }
2901
2902 if (ctx->verbose && bpf_log_realloc(ctx)) {
2903 ret = -ENOMEM;
2904 goto out_free;
2905 }
2906
2907 bpf_save_finfo(ctx);
2908 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2909
2910 return 0;
2911 out_free:
2912 free(ctx->sec_done);
2913 out_elf:
2914 elf_end(ctx->elf_fd);
2915 out_fd:
2916 close(ctx->obj_fd);
2917 return ret;
2918 }
2919
2920 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2921 {
2922 int i, count = 0;
2923
2924 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2925 if (!ctx->map_fds[i])
2926 break;
2927 count++;
2928 }
2929
2930 return count;
2931 }
2932
2933 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2934 {
2935 int i;
2936
2937 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2938 if (ctx->map_fds[i])
2939 close(ctx->map_fds[i]);
2940 }
2941
2942 if (ctx->btf_fd)
2943 close(ctx->btf_fd);
2944 free(ctx->btf.types);
2945 }
2946
2947 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2948 {
2949 if (failure)
2950 bpf_maps_teardown(ctx);
2951
2952 bpf_hash_destroy(ctx);
2953
2954 free(ctx->prog_text.insns);
2955 free(ctx->sec_done);
2956 free(ctx->log);
2957
2958 elf_end(ctx->elf_fd);
2959 close(ctx->obj_fd);
2960 }
2961
2962 static struct bpf_elf_ctx __ctx;
2963
2964 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2965 const char *section, __u32 ifindex, bool verbose)
2966 {
2967 struct bpf_elf_ctx *ctx = &__ctx;
2968 int fd = 0, ret;
2969
2970 ret = bpf_elf_ctx_init(ctx, pathname, type, ifindex, verbose);
2971 if (ret < 0) {
2972 fprintf(stderr, "Cannot initialize ELF context!\n");
2973 return ret;
2974 }
2975
2976 ret = bpf_fetch_ancillary(ctx, strcmp(section, ".text"));
2977 if (ret < 0) {
2978 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2979 goto out;
2980 }
2981
2982 fd = bpf_fetch_prog_sec(ctx, section);
2983 if (fd < 0) {
2984 fprintf(stderr, "Error fetching program/map!\n");
2985 ret = fd;
2986 goto out;
2987 }
2988
2989 ret = bpf_fill_prog_arrays(ctx);
2990 if (ret < 0)
2991 fprintf(stderr, "Error filling program arrays!\n");
2992 out:
2993 bpf_elf_ctx_destroy(ctx, ret < 0);
2994 if (ret < 0) {
2995 if (fd)
2996 close(fd);
2997 return ret;
2998 }
2999
3000 return fd;
3001 }
3002
3003 static int
3004 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
3005 const struct bpf_map_data *aux, unsigned int entries)
3006 {
3007 struct bpf_map_set_msg msg = {
3008 .aux.uds_ver = BPF_SCM_AUX_VER,
3009 .aux.num_ent = entries,
3010 };
3011 int *cmsg_buf, min_fd;
3012 char *amsg_buf;
3013 int i;
3014
3015 strlcpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
3016 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
3017
3018 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
3019 amsg_buf = (char *)msg.aux.ent;
3020
3021 for (i = 0; i < entries; i += min_fd) {
3022 int ret;
3023
3024 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
3025 bpf_map_set_init_single(&msg, min_fd);
3026
3027 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
3028 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
3029
3030 ret = sendmsg(fd, &msg.hdr, 0);
3031 if (ret <= 0)
3032 return ret ? : -1;
3033 }
3034
3035 return 0;
3036 }
3037
3038 static int
3039 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
3040 unsigned int entries)
3041 {
3042 struct bpf_map_set_msg msg;
3043 int *cmsg_buf, min_fd;
3044 char *amsg_buf, *mmsg_buf;
3045 unsigned int needed = 1;
3046 int i;
3047
3048 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
3049 amsg_buf = (char *)msg.aux.ent;
3050 mmsg_buf = (char *)&msg.aux;
3051
3052 for (i = 0; i < min(entries, needed); i += min_fd) {
3053 struct cmsghdr *cmsg;
3054 int ret;
3055
3056 min_fd = min(entries, entries - i);
3057 bpf_map_set_init_single(&msg, min_fd);
3058
3059 ret = recvmsg(fd, &msg.hdr, 0);
3060 if (ret <= 0)
3061 return ret ? : -1;
3062
3063 cmsg = CMSG_FIRSTHDR(&msg.hdr);
3064 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
3065 return -EINVAL;
3066 if (msg.hdr.msg_flags & MSG_CTRUNC)
3067 return -EIO;
3068 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
3069 return -ENOSYS;
3070
3071 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
3072 if (min_fd > entries || min_fd <= 0)
3073 return -EINVAL;
3074
3075 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
3076 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
3077 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
3078
3079 needed = aux->num_ent;
3080 }
3081
3082 return 0;
3083 }
3084
3085 int bpf_send_map_fds(const char *path, const char *obj)
3086 {
3087 struct bpf_elf_ctx *ctx = &__ctx;
3088 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3089 struct bpf_map_data bpf_aux = {
3090 .fds = ctx->map_fds,
3091 .ent = ctx->maps,
3092 .st = &ctx->stat,
3093 .obj = obj,
3094 };
3095 int fd, ret;
3096
3097 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3098 if (fd < 0) {
3099 fprintf(stderr, "Cannot open socket: %s\n",
3100 strerror(errno));
3101 return -1;
3102 }
3103
3104 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3105
3106 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
3107 if (ret < 0) {
3108 fprintf(stderr, "Cannot connect to %s: %s\n",
3109 path, strerror(errno));
3110 return -1;
3111 }
3112
3113 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
3114 bpf_maps_count(ctx));
3115 if (ret < 0)
3116 fprintf(stderr, "Cannot send fds to %s: %s\n",
3117 path, strerror(errno));
3118
3119 bpf_maps_teardown(ctx);
3120 close(fd);
3121 return ret;
3122 }
3123
3124 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
3125 unsigned int entries)
3126 {
3127 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3128 int fd, ret;
3129
3130 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3131 if (fd < 0) {
3132 fprintf(stderr, "Cannot open socket: %s\n",
3133 strerror(errno));
3134 return -1;
3135 }
3136
3137 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3138
3139 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
3140 if (ret < 0) {
3141 fprintf(stderr, "Cannot bind to socket: %s\n",
3142 strerror(errno));
3143 return -1;
3144 }
3145
3146 ret = bpf_map_set_recv(fd, fds, aux, entries);
3147 if (ret < 0)
3148 fprintf(stderr, "Cannot recv fds from %s: %s\n",
3149 path, strerror(errno));
3150
3151 unlink(addr.sun_path);
3152 close(fd);
3153 return ret;
3154 }
3155
3156 #ifdef HAVE_LIBBPF
3157 /* The following functions are wrapper functions for libbpf code to be
3158 * compatible with the legacy format. So all the functions have prefix
3159 * with iproute2_
3160 */
3161 int iproute2_bpf_elf_ctx_init(struct bpf_cfg_in *cfg)
3162 {
3163 struct bpf_elf_ctx *ctx = &__ctx;
3164
3165 return bpf_elf_ctx_init(ctx, cfg->object, cfg->type, cfg->ifindex, cfg->verbose);
3166 }
3167
3168 int iproute2_bpf_fetch_ancillary(void)
3169 {
3170 struct bpf_elf_ctx *ctx = &__ctx;
3171 struct bpf_elf_sec_data data;
3172 int i, ret = 0;
3173
3174 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
3175 ret = bpf_fill_section_data(ctx, i, &data);
3176 if (ret < 0)
3177 continue;
3178
3179 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
3180 !strcmp(data.sec_name, ELF_SECTION_MAPS))
3181 ret = bpf_fetch_maps_begin(ctx, i, &data);
3182 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
3183 !strcmp(data.sec_name, ".symtab"))
3184 ret = bpf_fetch_symtab(ctx, i, &data);
3185 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
3186 !strcmp(data.sec_name, ".strtab"))
3187 ret = bpf_fetch_strtab(ctx, i, &data);
3188 if (ret < 0) {
3189 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
3190 i);
3191 return ret;
3192 }
3193 }
3194
3195 if (bpf_has_map_data(ctx)) {
3196 ret = bpf_fetch_maps_end(ctx);
3197 if (ret < 0) {
3198 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
3199 return ret;
3200 }
3201 }
3202
3203 return ret;
3204 }
3205
3206 int iproute2_get_root_path(char *root_path, size_t len)
3207 {
3208 struct bpf_elf_ctx *ctx = &__ctx;
3209 int ret = 0;
3210
3211 snprintf(root_path, len, "%s/%s",
3212 bpf_get_work_dir(ctx->type), BPF_DIR_GLOBALS);
3213
3214 ret = mkdir(root_path, S_IRWXU);
3215 if (ret && errno != EEXIST) {
3216 fprintf(stderr, "mkdir %s failed: %s\n", root_path, strerror(errno));
3217 return ret;
3218 }
3219
3220 return 0;
3221 }
3222
3223 bool iproute2_is_pin_map(const char *libbpf_map_name, char *pathname)
3224 {
3225 struct bpf_elf_ctx *ctx = &__ctx;
3226 const char *map_name, *tmp;
3227 unsigned int pinning;
3228 int i, ret = 0;
3229
3230 for (i = 0; i < ctx->map_num; i++) {
3231 if (ctx->maps[i].pinning == PIN_OBJECT_NS &&
3232 ctx->noafalg) {
3233 fprintf(stderr, "Missing kernel AF_ALG support for PIN_OBJECT_NS!\n");
3234 return false;
3235 }
3236
3237 map_name = bpf_map_fetch_name(ctx, i);
3238 if (!map_name) {
3239 return false;
3240 }
3241
3242 if (strcmp(libbpf_map_name, map_name))
3243 continue;
3244
3245 pinning = ctx->maps[i].pinning;
3246
3247 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
3248 return false;
3249
3250 if (pinning == PIN_OBJECT_NS)
3251 ret = bpf_make_obj_path(ctx);
3252 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
3253 ret = bpf_make_custom_path(ctx, tmp);
3254 if (ret < 0)
3255 return false;
3256
3257 bpf_make_pathname(pathname, PATH_MAX, map_name, ctx, pinning);
3258
3259 return true;
3260 }
3261
3262 return false;
3263 }
3264
3265 bool iproute2_is_map_in_map(const char *libbpf_map_name, struct bpf_elf_map *imap,
3266 struct bpf_elf_map *omap, char *omap_name)
3267 {
3268 struct bpf_elf_ctx *ctx = &__ctx;
3269 const char *inner_map_name, *outer_map_name;
3270 int i, j;
3271
3272 for (i = 0; i < ctx->map_num; i++) {
3273 inner_map_name = bpf_map_fetch_name(ctx, i);
3274 if (!inner_map_name) {
3275 return false;
3276 }
3277
3278 if (strcmp(libbpf_map_name, inner_map_name))
3279 continue;
3280
3281 if (!ctx->maps[i].id ||
3282 ctx->maps[i].inner_id ||
3283 ctx->maps[i].inner_idx == -1)
3284 continue;
3285
3286 *imap = ctx->maps[i];
3287
3288 for (j = 0; j < ctx->map_num; j++) {
3289 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
3290 continue;
3291 if (ctx->maps[j].inner_id != ctx->maps[i].id)
3292 continue;
3293
3294 *omap = ctx->maps[j];
3295 outer_map_name = bpf_map_fetch_name(ctx, j);
3296 memcpy(omap_name, outer_map_name, strlen(outer_map_name) + 1);
3297
3298 return true;
3299 }
3300 }
3301
3302 return false;
3303 }
3304
3305 int iproute2_find_map_name_by_id(unsigned int map_id, char *name)
3306 {
3307 struct bpf_elf_ctx *ctx = &__ctx;
3308 const char *map_name;
3309 int i, idx = -1;
3310
3311 for (i = 0; i < ctx->map_num; i++) {
3312 if (ctx->maps[i].id == map_id &&
3313 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY) {
3314 idx = i;
3315 break;
3316 }
3317 }
3318
3319 if (idx < 0)
3320 return -1;
3321
3322 map_name = bpf_map_fetch_name(ctx, idx);
3323 if (!map_name)
3324 return -1;
3325
3326 memcpy(name, map_name, strlen(map_name) + 1);
3327 return 0;
3328 }
3329 #endif /* HAVE_LIBBPF */
3330 #endif /* HAVE_ELF */