]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
bridge: fdb: add support for src_vni option
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43 #include "json_print.h"
44
45 #include "bpf_util.h"
46 #include "bpf_elf.h"
47 #include "bpf_scm.h"
48
49 struct bpf_prog_meta {
50 const char *type;
51 const char *subdir;
52 const char *section;
53 bool may_uds_export;
54 };
55
56 static const enum bpf_prog_type __bpf_types[] = {
57 BPF_PROG_TYPE_SCHED_CLS,
58 BPF_PROG_TYPE_SCHED_ACT,
59 BPF_PROG_TYPE_XDP,
60 BPF_PROG_TYPE_LWT_IN,
61 BPF_PROG_TYPE_LWT_OUT,
62 BPF_PROG_TYPE_LWT_XMIT,
63 };
64
65 static const struct bpf_prog_meta __bpf_prog_meta[] = {
66 [BPF_PROG_TYPE_SCHED_CLS] = {
67 .type = "cls",
68 .subdir = "tc",
69 .section = ELF_SECTION_CLASSIFIER,
70 .may_uds_export = true,
71 },
72 [BPF_PROG_TYPE_SCHED_ACT] = {
73 .type = "act",
74 .subdir = "tc",
75 .section = ELF_SECTION_ACTION,
76 .may_uds_export = true,
77 },
78 [BPF_PROG_TYPE_XDP] = {
79 .type = "xdp",
80 .subdir = "xdp",
81 .section = ELF_SECTION_PROG,
82 },
83 [BPF_PROG_TYPE_LWT_IN] = {
84 .type = "lwt_in",
85 .subdir = "ip",
86 .section = ELF_SECTION_PROG,
87 },
88 [BPF_PROG_TYPE_LWT_OUT] = {
89 .type = "lwt_out",
90 .subdir = "ip",
91 .section = ELF_SECTION_PROG,
92 },
93 [BPF_PROG_TYPE_LWT_XMIT] = {
94 .type = "lwt_xmit",
95 .subdir = "ip",
96 .section = ELF_SECTION_PROG,
97 },
98 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = {
99 .type = "lwt_seg6local",
100 .subdir = "ip",
101 .section = ELF_SECTION_PROG,
102 },
103 };
104
105 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
106 {
107 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
108 __bpf_prog_meta[type].subdir);
109 return __bpf_prog_meta[type].subdir;
110 }
111
112 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
113 {
114 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
115 __bpf_prog_meta[type].section);
116 return __bpf_prog_meta[type].section;
117 }
118
119 #ifdef HAVE_ELF
120 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
121 const char *sec, __u32 ifindex, bool verbose);
122 #else
123 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
124 const char *sec, __u32 ifindex, bool verbose)
125 {
126 fprintf(stderr, "No ELF library support compiled in.\n");
127 errno = ENOSYS;
128 return -1;
129 }
130 #endif
131
132 static inline __u64 bpf_ptr_to_u64(const void *ptr)
133 {
134 return (__u64)(unsigned long)ptr;
135 }
136
137 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
138 {
139 #ifdef __NR_bpf
140 return syscall(__NR_bpf, cmd, attr, size);
141 #else
142 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
143 errno = ENOSYS;
144 return -1;
145 #endif
146 }
147
148 static int bpf_map_update(int fd, const void *key, const void *value,
149 uint64_t flags)
150 {
151 union bpf_attr attr = {};
152
153 attr.map_fd = fd;
154 attr.key = bpf_ptr_to_u64(key);
155 attr.value = bpf_ptr_to_u64(value);
156 attr.flags = flags;
157
158 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
159 }
160
161 static int bpf_prog_fd_by_id(uint32_t id)
162 {
163 union bpf_attr attr = {};
164
165 attr.prog_id = id;
166
167 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
168 }
169
170 static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
171 uint32_t *info_len)
172 {
173 union bpf_attr attr = {};
174 int ret;
175
176 attr.info.bpf_fd = fd;
177 attr.info.info = bpf_ptr_to_u64(info);
178 attr.info.info_len = *info_len;
179
180 *info_len = 0;
181 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
182 if (!ret)
183 *info_len = attr.info.info_len;
184
185 return ret;
186 }
187
188 int bpf_dump_prog_info(FILE *f, uint32_t id)
189 {
190 struct bpf_prog_info info = {};
191 uint32_t len = sizeof(info);
192 int fd, ret, dump_ok = 0;
193 SPRINT_BUF(tmp);
194
195 open_json_object("prog");
196 print_uint(PRINT_ANY, "id", "id %u ", id);
197
198 fd = bpf_prog_fd_by_id(id);
199 if (fd < 0)
200 goto out;
201
202 ret = bpf_prog_info_by_fd(fd, &info, &len);
203 if (!ret && len) {
204 int jited = !!info.jited_prog_len;
205
206 print_string(PRINT_ANY, "tag", "tag %s ",
207 hexstring_n2a(info.tag, sizeof(info.tag),
208 tmp, sizeof(tmp)));
209 print_uint(PRINT_JSON, "jited", NULL, jited);
210 if (jited && !is_json_context())
211 fprintf(f, "jited ");
212 dump_ok = 1;
213 }
214
215 close(fd);
216 out:
217 close_json_object();
218 return dump_ok;
219 }
220
221 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
222 char **bpf_string, bool *need_release,
223 const char separator)
224 {
225 char sp;
226
227 if (from_file) {
228 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
229 char *tmp_string, *pos, c_prev = ' ';
230 FILE *fp;
231 int c;
232
233 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
234 tmp_string = pos = calloc(1, tmp_len);
235 if (tmp_string == NULL)
236 return -ENOMEM;
237
238 fp = fopen(arg, "r");
239 if (fp == NULL) {
240 perror("Cannot fopen");
241 free(tmp_string);
242 return -ENOENT;
243 }
244
245 while ((c = fgetc(fp)) != EOF) {
246 switch (c) {
247 case '\n':
248 if (c_prev != ',')
249 *(pos++) = ',';
250 c_prev = ',';
251 break;
252 case ' ':
253 case '\t':
254 if (c_prev != ' ')
255 *(pos++) = c;
256 c_prev = ' ';
257 break;
258 default:
259 *(pos++) = c;
260 c_prev = c;
261 }
262 if (pos - tmp_string == tmp_len)
263 break;
264 }
265
266 if (!feof(fp)) {
267 free(tmp_string);
268 fclose(fp);
269 return -E2BIG;
270 }
271
272 fclose(fp);
273 *pos = 0;
274
275 *need_release = true;
276 *bpf_string = tmp_string;
277 } else {
278 *need_release = false;
279 *bpf_string = arg;
280 }
281
282 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
283 sp != separator) {
284 if (*need_release)
285 free(*bpf_string);
286 return -EINVAL;
287 }
288
289 return 0;
290 }
291
292 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
293 bool from_file)
294 {
295 char *bpf_string, *token, separator = ',';
296 int ret = 0, i = 0;
297 bool need_release;
298 __u16 bpf_len = 0;
299
300 if (argc < 1)
301 return -EINVAL;
302 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
303 &need_release, separator))
304 return -EINVAL;
305 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
306 ret = -EINVAL;
307 goto out;
308 }
309
310 token = bpf_string;
311 while ((token = strchr(token, separator)) && (++token)[0]) {
312 if (i >= bpf_len) {
313 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
314 ret = -EINVAL;
315 goto out;
316 }
317
318 if (sscanf(token, "%hu %hhu %hhu %u,",
319 &bpf_ops[i].code, &bpf_ops[i].jt,
320 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
321 fprintf(stderr, "Error at instruction %d!\n", i);
322 ret = -EINVAL;
323 goto out;
324 }
325
326 i++;
327 }
328
329 if (i != bpf_len) {
330 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
331 ret = -EINVAL;
332 goto out;
333 }
334 ret = bpf_len;
335 out:
336 if (need_release)
337 free(bpf_string);
338
339 return ret;
340 }
341
342 void bpf_print_ops(struct rtattr *bpf_ops, __u16 len)
343 {
344 struct sock_filter *ops = RTA_DATA(bpf_ops);
345 int i;
346
347 if (len == 0)
348 return;
349
350 open_json_object("bytecode");
351 print_uint(PRINT_ANY, "length", "bytecode \'%u,", len);
352 open_json_array(PRINT_JSON, "insns");
353
354 for (i = 0; i < len; i++) {
355 open_json_object(NULL);
356 print_hu(PRINT_ANY, "code", "%hu ", ops[i].code);
357 print_hhu(PRINT_ANY, "jt", "%hhu ", ops[i].jt);
358 print_hhu(PRINT_ANY, "jf", "%hhu ", ops[i].jf);
359 if (i == len - 1)
360 print_uint(PRINT_ANY, "k", "%u\'", ops[i].k);
361 else
362 print_uint(PRINT_ANY, "k", "%u,", ops[i].k);
363 close_json_object();
364 }
365
366 close_json_array(PRINT_JSON, NULL);
367 close_json_object();
368 }
369
370 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
371 const struct bpf_elf_map *obj)
372 {
373 fprintf(stderr, "Map specification differs from pinned file!\n");
374
375 if (obj->type != pin->type)
376 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
377 obj->type, pin->type);
378 if (obj->size_key != pin->size_key)
379 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
380 obj->size_key, pin->size_key);
381 if (obj->size_value != pin->size_value)
382 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
383 obj->size_value, pin->size_value);
384 if (obj->max_elem != pin->max_elem)
385 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
386 obj->max_elem, pin->max_elem);
387 if (obj->flags != pin->flags)
388 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
389 obj->flags, pin->flags);
390
391 fprintf(stderr, "\n");
392 }
393
394 struct bpf_prog_data {
395 unsigned int type;
396 unsigned int jited;
397 };
398
399 struct bpf_map_ext {
400 struct bpf_prog_data owner;
401 unsigned int btf_id_key;
402 unsigned int btf_id_val;
403 };
404
405 static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map,
406 struct bpf_map_ext *ext)
407 {
408 unsigned int val, owner_type = 0, owner_jited = 0;
409 char file[PATH_MAX], buff[4096];
410 FILE *fp;
411
412 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
413 memset(map, 0, sizeof(*map));
414
415 fp = fopen(file, "r");
416 if (!fp) {
417 fprintf(stderr, "No procfs support?!\n");
418 return -EIO;
419 }
420
421 while (fgets(buff, sizeof(buff), fp)) {
422 if (sscanf(buff, "map_type:\t%u", &val) == 1)
423 map->type = val;
424 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
425 map->size_key = val;
426 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
427 map->size_value = val;
428 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
429 map->max_elem = val;
430 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
431 map->flags = val;
432 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
433 owner_type = val;
434 else if (sscanf(buff, "owner_jited:\t%i", &val) == 1)
435 owner_jited = val;
436 }
437
438 fclose(fp);
439 if (ext) {
440 memset(ext, 0, sizeof(*ext));
441 ext->owner.type = owner_type;
442 ext->owner.jited = owner_jited;
443 }
444
445 return 0;
446 }
447
448 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
449 struct bpf_map_ext *ext, int length,
450 enum bpf_prog_type type)
451 {
452 struct bpf_elf_map tmp, zero = {};
453 int ret;
454
455 ret = bpf_derive_elf_map_from_fdinfo(fd, &tmp, ext);
456 if (ret < 0)
457 return ret;
458
459 /* The decision to reject this is on kernel side eventually, but
460 * at least give the user a chance to know what's wrong.
461 */
462 if (ext->owner.type && ext->owner.type != type)
463 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
464 type, ext->owner.type);
465
466 if (!memcmp(&tmp, map, length)) {
467 return 0;
468 } else {
469 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
470 * so just accept it. We know we do have an eBPF fd and in this
471 * case, everything is 0. It is guaranteed that no such map exists
472 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
473 */
474 if (!memcmp(&tmp, &zero, length))
475 return 0;
476
477 bpf_map_pin_report(&tmp, map);
478 return -EINVAL;
479 }
480 }
481
482 static int bpf_mnt_fs(const char *target)
483 {
484 bool bind_done = false;
485
486 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
487 if (errno != EINVAL || bind_done) {
488 fprintf(stderr, "mount --make-private %s failed: %s\n",
489 target, strerror(errno));
490 return -1;
491 }
492
493 if (mount(target, target, "none", MS_BIND, NULL)) {
494 fprintf(stderr, "mount --bind %s %s failed: %s\n",
495 target, target, strerror(errno));
496 return -1;
497 }
498
499 bind_done = true;
500 }
501
502 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
503 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
504 target, strerror(errno));
505 return -1;
506 }
507
508 return 0;
509 }
510
511 static int bpf_mnt_check_target(const char *target)
512 {
513 struct stat sb = {};
514 int ret;
515
516 ret = stat(target, &sb);
517 if (ret) {
518 ret = mkdir(target, S_IRWXU);
519 if (ret) {
520 fprintf(stderr, "mkdir %s failed: %s\n", target,
521 strerror(errno));
522 return ret;
523 }
524 }
525
526 return 0;
527 }
528
529 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
530 {
531 struct statfs st_fs;
532
533 if (statfs(mnt, &st_fs) < 0)
534 return -ENOENT;
535 if ((unsigned long)st_fs.f_type != magic)
536 return -ENOENT;
537
538 return 0;
539 }
540
541 static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt,
542 int len, const char *mntpt)
543 {
544 int ret;
545
546 ret = bpf_valid_mntpt(mntpt, magic);
547 if (!ret) {
548 strlcpy(mnt, mntpt, len);
549 return mnt;
550 }
551
552 return NULL;
553 }
554
555 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
556 char *mnt, int len,
557 const char * const *known_mnts)
558 {
559 const char * const *ptr;
560 char type[100];
561 FILE *fp;
562
563 if (known_mnts) {
564 ptr = known_mnts;
565 while (*ptr) {
566 if (bpf_find_mntpt_single(magic, mnt, len, *ptr))
567 return mnt;
568 ptr++;
569 }
570 }
571
572 if (len != PATH_MAX)
573 return NULL;
574
575 fp = fopen("/proc/mounts", "r");
576 if (fp == NULL)
577 return NULL;
578
579 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
580 mnt, type) == 2) {
581 if (strcmp(type, fstype) == 0)
582 break;
583 }
584
585 fclose(fp);
586 if (strcmp(type, fstype) != 0)
587 return NULL;
588
589 return mnt;
590 }
591
592 int bpf_trace_pipe(void)
593 {
594 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
595 static const char * const tracefs_known_mnts[] = {
596 TRACE_DIR_MNT,
597 "/sys/kernel/debug/tracing",
598 "/tracing",
599 "/trace",
600 0,
601 };
602 int fd_in, fd_out = STDERR_FILENO;
603 char tpipe[PATH_MAX];
604 const char *mnt;
605
606 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
607 sizeof(tracefs_mnt), tracefs_known_mnts);
608 if (!mnt) {
609 fprintf(stderr, "tracefs not mounted?\n");
610 return -1;
611 }
612
613 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
614
615 fd_in = open(tpipe, O_RDONLY);
616 if (fd_in < 0)
617 return -1;
618
619 fprintf(stderr, "Running! Hang up with ^C!\n\n");
620 while (1) {
621 static char buff[4096];
622 ssize_t ret;
623
624 ret = read(fd_in, buff, sizeof(buff));
625 if (ret > 0 && write(fd_out, buff, ret) == ret)
626 continue;
627 break;
628 }
629
630 close(fd_in);
631 return -1;
632 }
633
634 static int bpf_gen_global(const char *bpf_sub_dir)
635 {
636 char bpf_glo_dir[PATH_MAX];
637 int ret;
638
639 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
640 bpf_sub_dir, BPF_DIR_GLOBALS);
641
642 ret = mkdir(bpf_glo_dir, S_IRWXU);
643 if (ret && errno != EEXIST) {
644 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
645 strerror(errno));
646 return ret;
647 }
648
649 return 0;
650 }
651
652 static int bpf_gen_master(const char *base, const char *name)
653 {
654 char bpf_sub_dir[PATH_MAX + NAME_MAX + 1];
655 int ret;
656
657 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
658
659 ret = mkdir(bpf_sub_dir, S_IRWXU);
660 if (ret && errno != EEXIST) {
661 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
662 strerror(errno));
663 return ret;
664 }
665
666 return bpf_gen_global(bpf_sub_dir);
667 }
668
669 static int bpf_slave_via_bind_mnt(const char *full_name,
670 const char *full_link)
671 {
672 int ret;
673
674 ret = mkdir(full_name, S_IRWXU);
675 if (ret) {
676 assert(errno != EEXIST);
677 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
678 strerror(errno));
679 return ret;
680 }
681
682 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
683 if (ret) {
684 rmdir(full_name);
685 fprintf(stderr, "mount --bind %s %s failed: %s\n",
686 full_link, full_name, strerror(errno));
687 }
688
689 return ret;
690 }
691
692 static int bpf_gen_slave(const char *base, const char *name,
693 const char *link)
694 {
695 char bpf_lnk_dir[PATH_MAX + NAME_MAX + 1];
696 char bpf_sub_dir[PATH_MAX + NAME_MAX];
697 struct stat sb = {};
698 int ret;
699
700 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
701 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
702
703 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
704 if (ret) {
705 if (errno != EEXIST) {
706 if (errno != EPERM) {
707 fprintf(stderr, "symlink %s failed: %s\n",
708 bpf_sub_dir, strerror(errno));
709 return ret;
710 }
711
712 return bpf_slave_via_bind_mnt(bpf_sub_dir,
713 bpf_lnk_dir);
714 }
715
716 ret = lstat(bpf_sub_dir, &sb);
717 if (ret) {
718 fprintf(stderr, "lstat %s failed: %s\n",
719 bpf_sub_dir, strerror(errno));
720 return ret;
721 }
722
723 if ((sb.st_mode & S_IFMT) != S_IFLNK)
724 return bpf_gen_global(bpf_sub_dir);
725 }
726
727 return 0;
728 }
729
730 static int bpf_gen_hierarchy(const char *base)
731 {
732 int ret, i;
733
734 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
735 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
736 ret = bpf_gen_slave(base,
737 bpf_prog_to_subdir(__bpf_types[i]),
738 bpf_prog_to_subdir(__bpf_types[0]));
739 return ret;
740 }
741
742 static const char *bpf_get_work_dir(enum bpf_prog_type type)
743 {
744 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
745 static char bpf_wrk_dir[PATH_MAX];
746 static const char *mnt;
747 static bool bpf_mnt_cached;
748 const char *mnt_env = getenv(BPF_ENV_MNT);
749 static const char * const bpf_known_mnts[] = {
750 BPF_DIR_MNT,
751 "/bpf",
752 0,
753 };
754 int ret;
755
756 if (bpf_mnt_cached) {
757 const char *out = mnt;
758
759 if (out && type) {
760 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
761 out, bpf_prog_to_subdir(type));
762 out = bpf_tmp;
763 }
764 return out;
765 }
766
767 if (mnt_env)
768 mnt = bpf_find_mntpt_single(BPF_FS_MAGIC, bpf_tmp,
769 sizeof(bpf_tmp), mnt_env);
770 else
771 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp,
772 sizeof(bpf_tmp), bpf_known_mnts);
773 if (!mnt) {
774 mnt = mnt_env ? : BPF_DIR_MNT;
775 ret = bpf_mnt_check_target(mnt);
776 if (!ret)
777 ret = bpf_mnt_fs(mnt);
778 if (ret) {
779 mnt = NULL;
780 goto out;
781 }
782 }
783
784 snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
785
786 ret = bpf_gen_hierarchy(bpf_wrk_dir);
787 if (ret) {
788 mnt = NULL;
789 goto out;
790 }
791
792 mnt = bpf_wrk_dir;
793 out:
794 bpf_mnt_cached = true;
795 return mnt;
796 }
797
798 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
799 {
800 union bpf_attr attr = {};
801 char tmp[PATH_MAX];
802
803 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
804 pathname[1] == ':' && bpf_get_work_dir(type)) {
805 snprintf(tmp, sizeof(tmp), "%s/%s",
806 bpf_get_work_dir(type), pathname + 2);
807 pathname = tmp;
808 }
809
810 attr.pathname = bpf_ptr_to_u64(pathname);
811
812 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
813 }
814
815 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
816 {
817 int prog_fd = bpf_obj_get(pathname, type);
818
819 if (prog_fd < 0)
820 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
821 pathname, strerror(errno));
822 return prog_fd;
823 }
824
825 static int bpf_do_parse(struct bpf_cfg_in *cfg, const bool *opt_tbl)
826 {
827 const char *file, *section, *uds_name;
828 bool verbose = false;
829 int i, ret, argc;
830 char **argv;
831
832 argv = cfg->argv;
833 argc = cfg->argc;
834
835 if (opt_tbl[CBPF_BYTECODE] &&
836 (matches(*argv, "bytecode") == 0 ||
837 strcmp(*argv, "bc") == 0)) {
838 cfg->mode = CBPF_BYTECODE;
839 } else if (opt_tbl[CBPF_FILE] &&
840 (matches(*argv, "bytecode-file") == 0 ||
841 strcmp(*argv, "bcf") == 0)) {
842 cfg->mode = CBPF_FILE;
843 } else if (opt_tbl[EBPF_OBJECT] &&
844 (matches(*argv, "object-file") == 0 ||
845 strcmp(*argv, "obj") == 0)) {
846 cfg->mode = EBPF_OBJECT;
847 } else if (opt_tbl[EBPF_PINNED] &&
848 (matches(*argv, "object-pinned") == 0 ||
849 matches(*argv, "pinned") == 0 ||
850 matches(*argv, "fd") == 0)) {
851 cfg->mode = EBPF_PINNED;
852 } else {
853 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
854 return -1;
855 }
856
857 NEXT_ARG();
858 file = section = uds_name = NULL;
859 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
860 file = *argv;
861 NEXT_ARG_FWD();
862
863 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
864 if (argc > 0 && matches(*argv, "type") == 0) {
865 NEXT_ARG();
866 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
867 i++) {
868 if (!__bpf_prog_meta[i].type)
869 continue;
870 if (!matches(*argv,
871 __bpf_prog_meta[i].type)) {
872 cfg->type = i;
873 break;
874 }
875 }
876
877 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
878 fprintf(stderr, "What type is \"%s\"?\n",
879 *argv);
880 return -1;
881 }
882 NEXT_ARG_FWD();
883 } else {
884 cfg->type = BPF_PROG_TYPE_SCHED_CLS;
885 }
886 }
887
888 section = bpf_prog_to_default_section(cfg->type);
889 if (argc > 0 && matches(*argv, "section") == 0) {
890 NEXT_ARG();
891 section = *argv;
892 NEXT_ARG_FWD();
893 }
894
895 if (__bpf_prog_meta[cfg->type].may_uds_export) {
896 uds_name = getenv(BPF_ENV_UDS);
897 if (argc > 0 && !uds_name &&
898 matches(*argv, "export") == 0) {
899 NEXT_ARG();
900 uds_name = *argv;
901 NEXT_ARG_FWD();
902 }
903 }
904
905 if (argc > 0 && matches(*argv, "verbose") == 0) {
906 verbose = true;
907 NEXT_ARG_FWD();
908 }
909
910 PREV_ARG();
911 }
912
913 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE) {
914 ret = bpf_ops_parse(argc, argv, cfg->opcodes,
915 cfg->mode == CBPF_FILE);
916 cfg->n_opcodes = ret;
917 } else if (cfg->mode == EBPF_OBJECT) {
918 ret = 0; /* program will be loaded by load stage */
919 } else if (cfg->mode == EBPF_PINNED) {
920 ret = bpf_obj_pinned(file, cfg->type);
921 cfg->prog_fd = ret;
922 } else {
923 return -1;
924 }
925
926 cfg->object = file;
927 cfg->section = section;
928 cfg->uds = uds_name;
929 cfg->argc = argc;
930 cfg->argv = argv;
931 cfg->verbose = verbose;
932
933 return ret;
934 }
935
936 static int bpf_do_load(struct bpf_cfg_in *cfg)
937 {
938 if (cfg->mode == EBPF_OBJECT) {
939 cfg->prog_fd = bpf_obj_open(cfg->object, cfg->type,
940 cfg->section, cfg->ifindex,
941 cfg->verbose);
942 return cfg->prog_fd;
943 }
944 return 0;
945 }
946
947 int bpf_load_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops,
948 void *nl)
949 {
950 char annotation[256];
951 int ret;
952
953 ret = bpf_do_load(cfg);
954 if (ret < 0)
955 return ret;
956
957 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE)
958 ops->cbpf_cb(nl, cfg->opcodes, cfg->n_opcodes);
959 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
960 snprintf(annotation, sizeof(annotation), "%s:[%s]",
961 basename(cfg->object), cfg->mode == EBPF_PINNED ?
962 "*fsobj" : cfg->section);
963 ops->ebpf_cb(nl, cfg->prog_fd, annotation);
964 }
965
966 return 0;
967 }
968
969 int bpf_parse_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops)
970 {
971 bool opt_tbl[BPF_MODE_MAX] = {};
972
973 if (ops->cbpf_cb) {
974 opt_tbl[CBPF_BYTECODE] = true;
975 opt_tbl[CBPF_FILE] = true;
976 }
977
978 if (ops->ebpf_cb) {
979 opt_tbl[EBPF_OBJECT] = true;
980 opt_tbl[EBPF_PINNED] = true;
981 }
982
983 return bpf_do_parse(cfg, opt_tbl);
984 }
985
986 int bpf_parse_and_load_common(struct bpf_cfg_in *cfg,
987 const struct bpf_cfg_ops *ops, void *nl)
988 {
989 int ret;
990
991 ret = bpf_parse_common(cfg, ops);
992 if (ret < 0)
993 return ret;
994
995 return bpf_load_common(cfg, ops, nl);
996 }
997
998 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
999 {
1000 const bool opt_tbl[BPF_MODE_MAX] = {
1001 [EBPF_OBJECT] = true,
1002 [EBPF_PINNED] = true,
1003 };
1004 const struct bpf_elf_map test = {
1005 .type = BPF_MAP_TYPE_PROG_ARRAY,
1006 .size_key = sizeof(int),
1007 .size_value = sizeof(int),
1008 };
1009 struct bpf_cfg_in cfg = {
1010 .type = BPF_PROG_TYPE_UNSPEC,
1011 .argc = argc,
1012 .argv = argv,
1013 };
1014 struct bpf_map_ext ext = {};
1015 int ret, prog_fd, map_fd;
1016 uint32_t map_key;
1017
1018 ret = bpf_do_parse(&cfg, opt_tbl);
1019 if (ret < 0)
1020 return ret;
1021
1022 ret = bpf_do_load(&cfg);
1023 if (ret < 0)
1024 return ret;
1025
1026 prog_fd = cfg.prog_fd;
1027
1028 if (key) {
1029 map_key = *key;
1030 } else {
1031 ret = sscanf(cfg.section, "%*i/%i", &map_key);
1032 if (ret != 1) {
1033 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
1034 ret = -EINVAL;
1035 goto out_prog;
1036 }
1037 }
1038
1039 map_fd = bpf_obj_get(map_path, cfg.type);
1040 if (map_fd < 0) {
1041 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
1042 map_path, strerror(errno));
1043 ret = map_fd;
1044 goto out_prog;
1045 }
1046
1047 ret = bpf_map_selfcheck_pinned(map_fd, &test, &ext,
1048 offsetof(struct bpf_elf_map, max_elem),
1049 cfg.type);
1050 if (ret < 0) {
1051 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
1052 goto out_map;
1053 }
1054
1055 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
1056 if (ret < 0)
1057 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
1058 out_map:
1059 close(map_fd);
1060 out_prog:
1061 close(prog_fd);
1062 return ret;
1063 }
1064
1065 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
1066 {
1067 union bpf_attr attr = {};
1068
1069 attr.target_fd = target_fd;
1070 attr.attach_bpf_fd = prog_fd;
1071 attr.attach_type = type;
1072
1073 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
1074 }
1075
1076 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
1077 {
1078 union bpf_attr attr = {};
1079
1080 attr.target_fd = target_fd;
1081 attr.attach_type = type;
1082
1083 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
1084 }
1085
1086 static int bpf_prog_load_dev(enum bpf_prog_type type,
1087 const struct bpf_insn *insns, size_t size_insns,
1088 const char *license, __u32 ifindex,
1089 char *log, size_t size_log)
1090 {
1091 union bpf_attr attr = {};
1092
1093 attr.prog_type = type;
1094 attr.insns = bpf_ptr_to_u64(insns);
1095 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
1096 attr.license = bpf_ptr_to_u64(license);
1097 attr.prog_ifindex = ifindex;
1098
1099 if (size_log > 0) {
1100 attr.log_buf = bpf_ptr_to_u64(log);
1101 attr.log_size = size_log;
1102 attr.log_level = 1;
1103 }
1104
1105 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
1106 }
1107
1108 int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
1109 size_t size_insns, const char *license, char *log,
1110 size_t size_log)
1111 {
1112 return bpf_prog_load_dev(type, insns, size_insns, license, 0,
1113 log, size_log);
1114 }
1115
1116 #ifdef HAVE_ELF
1117 struct bpf_elf_prog {
1118 enum bpf_prog_type type;
1119 struct bpf_insn *insns;
1120 unsigned int insns_num;
1121 size_t size;
1122 const char *license;
1123 };
1124
1125 struct bpf_hash_entry {
1126 unsigned int pinning;
1127 const char *subpath;
1128 struct bpf_hash_entry *next;
1129 };
1130
1131 struct bpf_config {
1132 unsigned int jit_enabled;
1133 };
1134
1135 struct bpf_btf {
1136 const struct btf_header *hdr;
1137 const void *raw;
1138 const char *strings;
1139 const struct btf_type **types;
1140 int types_num;
1141 };
1142
1143 struct bpf_elf_ctx {
1144 struct bpf_config cfg;
1145 Elf *elf_fd;
1146 GElf_Ehdr elf_hdr;
1147 Elf_Data *sym_tab;
1148 Elf_Data *str_tab;
1149 Elf_Data *btf_data;
1150 char obj_uid[64];
1151 int obj_fd;
1152 int btf_fd;
1153 int map_fds[ELF_MAX_MAPS];
1154 struct bpf_elf_map maps[ELF_MAX_MAPS];
1155 struct bpf_map_ext maps_ext[ELF_MAX_MAPS];
1156 struct bpf_elf_prog prog_text;
1157 struct bpf_btf btf;
1158 int sym_num;
1159 int map_num;
1160 int map_len;
1161 bool *sec_done;
1162 int sec_maps;
1163 int sec_text;
1164 int sec_btf;
1165 char license[ELF_MAX_LICENSE_LEN];
1166 enum bpf_prog_type type;
1167 __u32 ifindex;
1168 bool verbose;
1169 bool noafalg;
1170 struct bpf_elf_st stat;
1171 struct bpf_hash_entry *ht[256];
1172 char *log;
1173 size_t log_size;
1174 };
1175
1176 struct bpf_elf_sec_data {
1177 GElf_Shdr sec_hdr;
1178 Elf_Data *sec_data;
1179 const char *sec_name;
1180 };
1181
1182 struct bpf_map_data {
1183 int *fds;
1184 const char *obj;
1185 struct bpf_elf_st *st;
1186 struct bpf_elf_map *ent;
1187 };
1188
1189 static bool bpf_log_has_data(struct bpf_elf_ctx *ctx)
1190 {
1191 return ctx->log && ctx->log[0];
1192 }
1193
1194 static __check_format_string(2, 3) void
1195 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1196 {
1197 va_list vl;
1198
1199 va_start(vl, format);
1200 vfprintf(stderr, format, vl);
1201 va_end(vl);
1202
1203 if (bpf_log_has_data(ctx)) {
1204 if (ctx->verbose) {
1205 fprintf(stderr, "%s\n", ctx->log);
1206 } else {
1207 unsigned int off = 0, len = strlen(ctx->log);
1208
1209 if (len > BPF_MAX_LOG) {
1210 off = len - BPF_MAX_LOG;
1211 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1212 off);
1213 }
1214 fprintf(stderr, "%s\n", ctx->log + off);
1215 }
1216
1217 memset(ctx->log, 0, ctx->log_size);
1218 }
1219 }
1220
1221 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1222 {
1223 const size_t log_max = UINT_MAX >> 8;
1224 size_t log_size = ctx->log_size;
1225 char *ptr;
1226
1227 if (!ctx->log) {
1228 log_size = 65536;
1229 } else if (log_size < log_max) {
1230 log_size <<= 1;
1231 if (log_size > log_max)
1232 log_size = log_max;
1233 } else {
1234 return -EINVAL;
1235 }
1236
1237 ptr = realloc(ctx->log, log_size);
1238 if (!ptr)
1239 return -ENOMEM;
1240
1241 ptr[0] = 0;
1242 ctx->log = ptr;
1243 ctx->log_size = log_size;
1244
1245 return 0;
1246 }
1247
1248 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1249 uint32_t size_value, uint32_t max_elem,
1250 uint32_t flags, int inner_fd, int btf_fd,
1251 uint32_t ifindex, uint32_t btf_id_key,
1252 uint32_t btf_id_val)
1253 {
1254 union bpf_attr attr = {};
1255
1256 attr.map_type = type;
1257 attr.key_size = size_key;
1258 attr.value_size = inner_fd ? sizeof(int) : size_value;
1259 attr.max_entries = max_elem;
1260 attr.map_flags = flags;
1261 attr.inner_map_fd = inner_fd;
1262 attr.map_ifindex = ifindex;
1263 attr.btf_fd = btf_fd;
1264 attr.btf_key_type_id = btf_id_key;
1265 attr.btf_value_type_id = btf_id_val;
1266
1267 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1268 }
1269
1270 static int bpf_btf_load(void *btf, size_t size_btf,
1271 char *log, size_t size_log)
1272 {
1273 union bpf_attr attr = {};
1274
1275 attr.btf = bpf_ptr_to_u64(btf);
1276 attr.btf_size = size_btf;
1277
1278 if (size_log > 0) {
1279 attr.btf_log_buf = bpf_ptr_to_u64(log);
1280 attr.btf_log_size = size_log;
1281 attr.btf_log_level = 1;
1282 }
1283
1284 return bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
1285 }
1286
1287 static int bpf_obj_pin(int fd, const char *pathname)
1288 {
1289 union bpf_attr attr = {};
1290
1291 attr.pathname = bpf_ptr_to_u64(pathname);
1292 attr.bpf_fd = fd;
1293
1294 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1295 }
1296
1297 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1298 {
1299 struct sockaddr_alg alg = {
1300 .salg_family = AF_ALG,
1301 .salg_type = "hash",
1302 .salg_name = "sha1",
1303 };
1304 int ret, cfd, ofd, ffd;
1305 struct stat stbuff;
1306 ssize_t size;
1307
1308 if (!object || len != 20)
1309 return -EINVAL;
1310
1311 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1312 if (cfd < 0)
1313 return cfd;
1314
1315 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1316 if (ret < 0)
1317 goto out_cfd;
1318
1319 ofd = accept(cfd, NULL, 0);
1320 if (ofd < 0) {
1321 ret = ofd;
1322 goto out_cfd;
1323 }
1324
1325 ffd = open(object, O_RDONLY);
1326 if (ffd < 0) {
1327 fprintf(stderr, "Error opening object %s: %s\n",
1328 object, strerror(errno));
1329 ret = ffd;
1330 goto out_ofd;
1331 }
1332
1333 ret = fstat(ffd, &stbuff);
1334 if (ret < 0) {
1335 fprintf(stderr, "Error doing fstat: %s\n",
1336 strerror(errno));
1337 goto out_ffd;
1338 }
1339
1340 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1341 if (size != stbuff.st_size) {
1342 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1343 size, stbuff.st_size, strerror(errno));
1344 ret = -1;
1345 goto out_ffd;
1346 }
1347
1348 size = read(ofd, out, len);
1349 if (size != len) {
1350 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1351 size, len, strerror(errno));
1352 ret = -1;
1353 } else {
1354 ret = 0;
1355 }
1356 out_ffd:
1357 close(ffd);
1358 out_ofd:
1359 close(ofd);
1360 out_cfd:
1361 close(cfd);
1362 return ret;
1363 }
1364
1365 static void bpf_init_env(void)
1366 {
1367 struct rlimit limit = {
1368 .rlim_cur = RLIM_INFINITY,
1369 .rlim_max = RLIM_INFINITY,
1370 };
1371
1372 /* Don't bother in case we fail! */
1373 setrlimit(RLIMIT_MEMLOCK, &limit);
1374
1375 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC))
1376 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1377 }
1378
1379 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1380 uint32_t pinning)
1381 {
1382 struct bpf_hash_entry *entry;
1383
1384 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1385 while (entry && entry->pinning != pinning)
1386 entry = entry->next;
1387
1388 return entry ? entry->subpath : NULL;
1389 }
1390
1391 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1392 uint32_t pinning)
1393 {
1394 switch (pinning) {
1395 case PIN_OBJECT_NS:
1396 case PIN_GLOBAL_NS:
1397 return false;
1398 case PIN_NONE:
1399 return true;
1400 default:
1401 return !bpf_custom_pinning(ctx, pinning);
1402 }
1403 }
1404
1405 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1406 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1407 {
1408 switch (pinning) {
1409 case PIN_OBJECT_NS:
1410 snprintf(pathname, len, "%s/%s/%s",
1411 bpf_get_work_dir(ctx->type),
1412 ctx->obj_uid, name);
1413 break;
1414 case PIN_GLOBAL_NS:
1415 snprintf(pathname, len, "%s/%s/%s",
1416 bpf_get_work_dir(ctx->type),
1417 BPF_DIR_GLOBALS, name);
1418 break;
1419 default:
1420 snprintf(pathname, len, "%s/../%s/%s",
1421 bpf_get_work_dir(ctx->type),
1422 bpf_custom_pinning(ctx, pinning), name);
1423 break;
1424 }
1425 }
1426
1427 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1428 uint32_t pinning)
1429 {
1430 char pathname[PATH_MAX];
1431
1432 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1433 return 0;
1434
1435 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1436 return bpf_obj_get(pathname, ctx->type);
1437 }
1438
1439 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1440 {
1441 char tmp[PATH_MAX];
1442 int ret;
1443
1444 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1445 ctx->obj_uid);
1446
1447 ret = mkdir(tmp, S_IRWXU);
1448 if (ret && errno != EEXIST) {
1449 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1450 return ret;
1451 }
1452
1453 return 0;
1454 }
1455
1456 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1457 const char *todo)
1458 {
1459 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1460 int ret;
1461
1462 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1463 snprintf(rem, sizeof(rem), "%s/", todo);
1464 sub = strtok(rem, "/");
1465
1466 while (sub) {
1467 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1468 return -EINVAL;
1469
1470 strcat(tmp, sub);
1471 strcat(tmp, "/");
1472
1473 ret = mkdir(tmp, S_IRWXU);
1474 if (ret && errno != EEXIST) {
1475 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1476 strerror(errno));
1477 return ret;
1478 }
1479
1480 sub = strtok(NULL, "/");
1481 }
1482
1483 return 0;
1484 }
1485
1486 static int bpf_place_pinned(int fd, const char *name,
1487 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1488 {
1489 char pathname[PATH_MAX];
1490 const char *tmp;
1491 int ret = 0;
1492
1493 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1494 return 0;
1495
1496 if (pinning == PIN_OBJECT_NS)
1497 ret = bpf_make_obj_path(ctx);
1498 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1499 ret = bpf_make_custom_path(ctx, tmp);
1500 if (ret < 0)
1501 return ret;
1502
1503 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1504 return bpf_obj_pin(fd, pathname);
1505 }
1506
1507 static void bpf_prog_report(int fd, const char *section,
1508 const struct bpf_elf_prog *prog,
1509 struct bpf_elf_ctx *ctx)
1510 {
1511 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1512
1513 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1514 fd < 0 ? "rejected: " : "loaded",
1515 fd < 0 ? strerror(errno) : "",
1516 fd < 0 ? errno : fd);
1517
1518 fprintf(stderr, " - Type: %u\n", prog->type);
1519 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1520 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1521 fprintf(stderr, " - License: %s\n\n", prog->license);
1522
1523 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1524 }
1525
1526 static int bpf_prog_attach(const char *section,
1527 const struct bpf_elf_prog *prog,
1528 struct bpf_elf_ctx *ctx)
1529 {
1530 int tries = 0, fd;
1531 retry:
1532 errno = 0;
1533 fd = bpf_prog_load_dev(prog->type, prog->insns, prog->size,
1534 prog->license, ctx->ifindex,
1535 ctx->log, ctx->log_size);
1536 if (fd < 0 || ctx->verbose) {
1537 /* The verifier log is pretty chatty, sometimes so chatty
1538 * on larger programs, that we could fail to dump everything
1539 * into our buffer. Still, try to give a debuggable error
1540 * log for the user, so enlarge it and re-fail.
1541 */
1542 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1543 if (tries++ < 10 && !bpf_log_realloc(ctx))
1544 goto retry;
1545
1546 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1547 ctx->log_size, tries);
1548 return fd;
1549 }
1550
1551 bpf_prog_report(fd, section, prog, ctx);
1552 }
1553
1554 return fd;
1555 }
1556
1557 static void bpf_map_report(int fd, const char *name,
1558 const struct bpf_elf_map *map,
1559 struct bpf_elf_ctx *ctx, int inner_fd)
1560 {
1561 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1562 fd < 0 ? "rejected: " : "loaded",
1563 fd < 0 ? strerror(errno) : "",
1564 fd < 0 ? errno : fd);
1565
1566 fprintf(stderr, " - Type: %u\n", map->type);
1567 fprintf(stderr, " - Identifier: %u\n", map->id);
1568 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1569 fprintf(stderr, " - Size key: %u\n", map->size_key);
1570 fprintf(stderr, " - Size value: %u\n",
1571 inner_fd ? (int)sizeof(int) : map->size_value);
1572 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1573 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1574 }
1575
1576 static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1577 {
1578 int i;
1579
1580 for (i = 0; i < ctx->map_num; i++) {
1581 if (ctx->maps[i].id != id)
1582 continue;
1583 if (ctx->map_fds[i] < 0)
1584 return -EINVAL;
1585
1586 return ctx->map_fds[i];
1587 }
1588
1589 return -ENOENT;
1590 }
1591
1592 static void bpf_report_map_in_map(int outer_fd, uint32_t idx)
1593 {
1594 struct bpf_elf_map outer_map;
1595 int ret;
1596
1597 fprintf(stderr, "Cannot insert map into map! ");
1598
1599 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map, NULL);
1600 if (!ret) {
1601 if (idx >= outer_map.max_elem &&
1602 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1603 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1604 outer_map.max_elem, idx);
1605 return;
1606 }
1607 }
1608
1609 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1610 }
1611
1612 static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1613 {
1614 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1615 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1616 }
1617
1618 static bool bpf_map_offload_neutral(enum bpf_map_type type)
1619 {
1620 return type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
1621 }
1622
1623 static int bpf_map_attach(const char *name, struct bpf_elf_ctx *ctx,
1624 const struct bpf_elf_map *map, struct bpf_map_ext *ext,
1625 int *have_map_in_map)
1626 {
1627 int fd, ifindex, ret, map_inner_fd = 0;
1628
1629 fd = bpf_probe_pinned(name, ctx, map->pinning);
1630 if (fd > 0) {
1631 ret = bpf_map_selfcheck_pinned(fd, map, ext,
1632 offsetof(struct bpf_elf_map,
1633 id), ctx->type);
1634 if (ret < 0) {
1635 close(fd);
1636 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1637 name);
1638 return ret;
1639 }
1640 if (ctx->verbose)
1641 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1642 name);
1643 return fd;
1644 }
1645
1646 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1647 (*have_map_in_map)++;
1648 if (map->inner_id)
1649 return 0;
1650 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1651 name);
1652 return -EINVAL;
1653 }
1654
1655 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1656 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1657 if (map_inner_fd < 0) {
1658 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1659 name, map->inner_id);
1660 return -EINVAL;
1661 }
1662 }
1663
1664 ifindex = bpf_map_offload_neutral(map->type) ? 0 : ctx->ifindex;
1665 errno = 0;
1666 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1667 map->max_elem, map->flags, map_inner_fd, ctx->btf_fd,
1668 ifindex, ext->btf_id_key, ext->btf_id_val);
1669
1670 if (fd < 0 || ctx->verbose) {
1671 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1672 if (fd < 0)
1673 return fd;
1674 }
1675
1676 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1677 if (ret < 0 && errno != EEXIST) {
1678 fprintf(stderr, "Could not pin %s map: %s\n", name,
1679 strerror(errno));
1680 close(fd);
1681 return ret;
1682 }
1683
1684 return fd;
1685 }
1686
1687 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1688 const GElf_Sym *sym)
1689 {
1690 return ctx->str_tab->d_buf + sym->st_name;
1691 }
1692
1693 static int bpf_btf_find(struct bpf_elf_ctx *ctx, const char *name)
1694 {
1695 const struct btf_type *type;
1696 const char *res;
1697 int id;
1698
1699 for (id = 1; id < ctx->btf.types_num; id++) {
1700 type = ctx->btf.types[id];
1701 if (type->name_off >= ctx->btf.hdr->str_len)
1702 continue;
1703 res = &ctx->btf.strings[type->name_off];
1704 if (!strcmp(res, name))
1705 return id;
1706 }
1707
1708 return -ENOENT;
1709 }
1710
1711 static int bpf_btf_find_kv(struct bpf_elf_ctx *ctx, const struct bpf_elf_map *map,
1712 const char *name, uint32_t *id_key, uint32_t *id_val)
1713 {
1714 const struct btf_member *key, *val;
1715 const struct btf_type *type;
1716 char btf_name[512];
1717 const char *res;
1718 int id;
1719
1720 snprintf(btf_name, sizeof(btf_name), "____btf_map_%s", name);
1721 id = bpf_btf_find(ctx, btf_name);
1722 if (id < 0)
1723 return id;
1724
1725 type = ctx->btf.types[id];
1726 if (BTF_INFO_KIND(type->info) != BTF_KIND_STRUCT)
1727 return -EINVAL;
1728 if (BTF_INFO_VLEN(type->info) != 2)
1729 return -EINVAL;
1730
1731 key = ((void *) type) + sizeof(*type);
1732 val = key + 1;
1733 if (!key->type || key->type >= ctx->btf.types_num ||
1734 !val->type || val->type >= ctx->btf.types_num)
1735 return -EINVAL;
1736
1737 if (key->name_off >= ctx->btf.hdr->str_len ||
1738 val->name_off >= ctx->btf.hdr->str_len)
1739 return -EINVAL;
1740
1741 res = &ctx->btf.strings[key->name_off];
1742 if (strcmp(res, "key"))
1743 return -EINVAL;
1744
1745 res = &ctx->btf.strings[val->name_off];
1746 if (strcmp(res, "value"))
1747 return -EINVAL;
1748
1749 *id_key = key->type;
1750 *id_val = val->type;
1751 return 0;
1752 }
1753
1754 static void bpf_btf_annotate(struct bpf_elf_ctx *ctx, int which, const char *name)
1755 {
1756 uint32_t id_key = 0, id_val = 0;
1757
1758 if (!bpf_btf_find_kv(ctx, &ctx->maps[which], name, &id_key, &id_val)) {
1759 ctx->maps_ext[which].btf_id_key = id_key;
1760 ctx->maps_ext[which].btf_id_val = id_val;
1761 }
1762 }
1763
1764 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1765 {
1766 const char *name;
1767 GElf_Sym sym;
1768 int i;
1769
1770 for (i = 0; i < ctx->sym_num; i++) {
1771 int type;
1772
1773 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1774 continue;
1775
1776 type = GELF_ST_TYPE(sym.st_info);
1777 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1778 (type != STT_NOTYPE && type != STT_OBJECT) ||
1779 sym.st_shndx != ctx->sec_maps ||
1780 sym.st_value / ctx->map_len != which)
1781 continue;
1782
1783 name = bpf_str_tab_name(ctx, &sym);
1784 bpf_btf_annotate(ctx, which, name);
1785 return name;
1786 }
1787
1788 return NULL;
1789 }
1790
1791 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1792 {
1793 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1794 const char *map_name;
1795
1796 for (i = 0; i < ctx->map_num; i++) {
1797 if (ctx->maps[i].pinning == PIN_OBJECT_NS &&
1798 ctx->noafalg) {
1799 fprintf(stderr, "Missing kernel AF_ALG support for PIN_OBJECT_NS!\n");
1800 return -ENOTSUP;
1801 }
1802
1803 map_name = bpf_map_fetch_name(ctx, i);
1804 if (!map_name)
1805 return -EIO;
1806
1807 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1808 &ctx->maps_ext[i], &have_map_in_map);
1809 if (fd < 0)
1810 return fd;
1811
1812 ctx->map_fds[i] = !fd ? -1 : fd;
1813 }
1814
1815 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1816 if (ctx->map_fds[i] >= 0)
1817 continue;
1818
1819 map_name = bpf_map_fetch_name(ctx, i);
1820 if (!map_name)
1821 return -EIO;
1822
1823 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1824 &ctx->maps_ext[i], NULL);
1825 if (fd < 0)
1826 return fd;
1827
1828 ctx->map_fds[i] = fd;
1829 }
1830
1831 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1832 if (!ctx->maps[i].id ||
1833 ctx->maps[i].inner_id ||
1834 ctx->maps[i].inner_idx == -1)
1835 continue;
1836
1837 inner_fd = ctx->map_fds[i];
1838 inner_idx = ctx->maps[i].inner_idx;
1839
1840 for (j = 0; j < ctx->map_num; j++) {
1841 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1842 continue;
1843 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1844 continue;
1845
1846 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1847 &inner_fd, BPF_ANY);
1848 if (ret < 0) {
1849 bpf_report_map_in_map(ctx->map_fds[j],
1850 inner_idx);
1851 return ret;
1852 }
1853 }
1854 }
1855
1856 return 0;
1857 }
1858
1859 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1860 {
1861 int i, num = 0;
1862 GElf_Sym sym;
1863
1864 for (i = 0; i < ctx->sym_num; i++) {
1865 int type;
1866
1867 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1868 continue;
1869
1870 type = GELF_ST_TYPE(sym.st_info);
1871 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1872 (type != STT_NOTYPE && type != STT_OBJECT) ||
1873 sym.st_shndx != ctx->sec_maps)
1874 continue;
1875 num++;
1876 }
1877
1878 return num;
1879 }
1880
1881 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1882 struct bpf_elf_sec_data *data)
1883 {
1884 Elf_Data *sec_edata;
1885 GElf_Shdr sec_hdr;
1886 Elf_Scn *sec_fd;
1887 char *sec_name;
1888
1889 memset(data, 0, sizeof(*data));
1890
1891 sec_fd = elf_getscn(ctx->elf_fd, section);
1892 if (!sec_fd)
1893 return -EINVAL;
1894 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1895 return -EIO;
1896
1897 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1898 sec_hdr.sh_name);
1899 if (!sec_name || !sec_hdr.sh_size)
1900 return -ENOENT;
1901
1902 sec_edata = elf_getdata(sec_fd, NULL);
1903 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1904 return -EIO;
1905
1906 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1907
1908 data->sec_name = sec_name;
1909 data->sec_data = sec_edata;
1910 return 0;
1911 }
1912
1913 struct bpf_elf_map_min {
1914 __u32 type;
1915 __u32 size_key;
1916 __u32 size_value;
1917 __u32 max_elem;
1918 };
1919
1920 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1921 struct bpf_elf_sec_data *data)
1922 {
1923 ctx->map_num = data->sec_data->d_size;
1924 ctx->sec_maps = section;
1925 ctx->sec_done[section] = true;
1926
1927 if (ctx->map_num > sizeof(ctx->maps)) {
1928 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1929 return -ENOMEM;
1930 }
1931
1932 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1933 return 0;
1934 }
1935
1936 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1937 {
1938 GElf_Sym sym;
1939 int off, i;
1940
1941 for (off = 0; off < end; off += ctx->map_len) {
1942 /* Order doesn't need to be linear here, hence we walk
1943 * the table again.
1944 */
1945 for (i = 0; i < ctx->sym_num; i++) {
1946 int type;
1947
1948 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1949 continue;
1950
1951 type = GELF_ST_TYPE(sym.st_info);
1952 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1953 (type != STT_NOTYPE && type != STT_OBJECT) ||
1954 sym.st_shndx != ctx->sec_maps)
1955 continue;
1956 if (sym.st_value == off)
1957 break;
1958 if (i == ctx->sym_num - 1)
1959 return -1;
1960 }
1961 }
1962
1963 return off == end ? 0 : -1;
1964 }
1965
1966 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1967 {
1968 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1969 int i, sym_num = bpf_map_num_sym(ctx);
1970 __u8 *buff;
1971
1972 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1973 fprintf(stderr, "%u maps not supported in current map section!\n",
1974 sym_num);
1975 return -EINVAL;
1976 }
1977
1978 if (ctx->map_num % sym_num != 0 ||
1979 ctx->map_num % sizeof(__u32) != 0) {
1980 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1981 return -EINVAL;
1982 }
1983
1984 ctx->map_len = ctx->map_num / sym_num;
1985 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1986 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1987 return -EINVAL;
1988 }
1989
1990 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1991 ctx->map_num = sym_num;
1992 return 0;
1993 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1994 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1995 return -EINVAL;
1996 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1997 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1998 return -EINVAL;
1999 }
2000
2001 ctx->map_num = sym_num;
2002 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
2003 i++, buff += ctx->map_len) {
2004 /* The fixup leaves the rest of the members as zero, which
2005 * is fine currently, but option exist to set some other
2006 * default value as well when needed in future.
2007 */
2008 memcpy(&fixup[i], buff, ctx->map_len);
2009 }
2010
2011 memcpy(ctx->maps, fixup, sizeof(fixup));
2012 if (ctx->verbose)
2013 printf("%zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
2014 sizeof(struct bpf_elf_map) - ctx->map_len);
2015 return 0;
2016 }
2017
2018 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
2019 struct bpf_elf_sec_data *data)
2020 {
2021 if (data->sec_data->d_size > sizeof(ctx->license))
2022 return -ENOMEM;
2023
2024 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
2025 ctx->sec_done[section] = true;
2026 return 0;
2027 }
2028
2029 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
2030 struct bpf_elf_sec_data *data)
2031 {
2032 ctx->sym_tab = data->sec_data;
2033 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
2034 ctx->sec_done[section] = true;
2035 return 0;
2036 }
2037
2038 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
2039 struct bpf_elf_sec_data *data)
2040 {
2041 ctx->str_tab = data->sec_data;
2042 ctx->sec_done[section] = true;
2043 return 0;
2044 }
2045
2046 static int bpf_fetch_text(struct bpf_elf_ctx *ctx, int section,
2047 struct bpf_elf_sec_data *data)
2048 {
2049 ctx->sec_text = section;
2050 ctx->sec_done[section] = true;
2051 return 0;
2052 }
2053
2054 static void bpf_btf_report(int fd, struct bpf_elf_ctx *ctx)
2055 {
2056 fprintf(stderr, "\nBTF debug data section \'.BTF\' %s%s (%d)!\n",
2057 fd < 0 ? "rejected: " : "loaded",
2058 fd < 0 ? strerror(errno) : "",
2059 fd < 0 ? errno : fd);
2060
2061 fprintf(stderr, " - Length: %zu\n", ctx->btf_data->d_size);
2062
2063 bpf_dump_error(ctx, "Verifier analysis:\n\n");
2064 }
2065
2066 static int bpf_btf_attach(struct bpf_elf_ctx *ctx)
2067 {
2068 int tries = 0, fd;
2069 retry:
2070 errno = 0;
2071 fd = bpf_btf_load(ctx->btf_data->d_buf, ctx->btf_data->d_size,
2072 ctx->log, ctx->log_size);
2073 if (fd < 0 || ctx->verbose) {
2074 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
2075 if (tries++ < 10 && !bpf_log_realloc(ctx))
2076 goto retry;
2077
2078 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
2079 ctx->log_size, tries);
2080 return fd;
2081 }
2082
2083 if (bpf_log_has_data(ctx))
2084 bpf_btf_report(fd, ctx);
2085 }
2086
2087 return fd;
2088 }
2089
2090 static int bpf_fetch_btf_begin(struct bpf_elf_ctx *ctx, int section,
2091 struct bpf_elf_sec_data *data)
2092 {
2093 ctx->btf_data = data->sec_data;
2094 ctx->sec_btf = section;
2095 ctx->sec_done[section] = true;
2096 return 0;
2097 }
2098
2099 static int bpf_btf_check_header(struct bpf_elf_ctx *ctx)
2100 {
2101 const struct btf_header *hdr = ctx->btf_data->d_buf;
2102 const char *str_start, *str_end;
2103 unsigned int data_len;
2104
2105 if (hdr->magic != BTF_MAGIC) {
2106 fprintf(stderr, "Object has wrong BTF magic: %x, expected: %x!\n",
2107 hdr->magic, BTF_MAGIC);
2108 return -EINVAL;
2109 }
2110
2111 if (hdr->version != BTF_VERSION) {
2112 fprintf(stderr, "Object has wrong BTF version: %u, expected: %u!\n",
2113 hdr->version, BTF_VERSION);
2114 return -EINVAL;
2115 }
2116
2117 if (hdr->flags) {
2118 fprintf(stderr, "Object has unsupported BTF flags %x!\n",
2119 hdr->flags);
2120 return -EINVAL;
2121 }
2122
2123 data_len = ctx->btf_data->d_size - sizeof(*hdr);
2124 if (data_len < hdr->type_off ||
2125 data_len < hdr->str_off ||
2126 data_len < hdr->type_len + hdr->str_len ||
2127 hdr->type_off >= hdr->str_off ||
2128 hdr->type_off + hdr->type_len != hdr->str_off ||
2129 hdr->str_off + hdr->str_len != data_len ||
2130 (hdr->type_off & (sizeof(uint32_t) - 1))) {
2131 fprintf(stderr, "Object has malformed BTF data!\n");
2132 return -EINVAL;
2133 }
2134
2135 ctx->btf.hdr = hdr;
2136 ctx->btf.raw = hdr + 1;
2137
2138 str_start = ctx->btf.raw + hdr->str_off;
2139 str_end = str_start + hdr->str_len;
2140 if (!hdr->str_len ||
2141 hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2142 str_start[0] || str_end[-1]) {
2143 fprintf(stderr, "Object has malformed BTF string data!\n");
2144 return -EINVAL;
2145 }
2146
2147 ctx->btf.strings = str_start;
2148 return 0;
2149 }
2150
2151 static int bpf_btf_register_type(struct bpf_elf_ctx *ctx,
2152 const struct btf_type *type)
2153 {
2154 int cur = ctx->btf.types_num, num = cur + 1;
2155 const struct btf_type **types;
2156
2157 types = realloc(ctx->btf.types, num * sizeof(type));
2158 if (!types) {
2159 free(ctx->btf.types);
2160 ctx->btf.types = NULL;
2161 ctx->btf.types_num = 0;
2162 return -ENOMEM;
2163 }
2164
2165 ctx->btf.types = types;
2166 ctx->btf.types[cur] = type;
2167 ctx->btf.types_num = num;
2168 return 0;
2169 }
2170
2171 static struct btf_type btf_type_void;
2172
2173 static int bpf_btf_prep_type_data(struct bpf_elf_ctx *ctx)
2174 {
2175 const void *type_cur = ctx->btf.raw + ctx->btf.hdr->type_off;
2176 const void *type_end = ctx->btf.raw + ctx->btf.hdr->str_off;
2177 const struct btf_type *type;
2178 uint16_t var_len;
2179 int ret, kind;
2180
2181 ret = bpf_btf_register_type(ctx, &btf_type_void);
2182 if (ret < 0)
2183 return ret;
2184
2185 while (type_cur < type_end) {
2186 type = type_cur;
2187 type_cur += sizeof(*type);
2188
2189 var_len = BTF_INFO_VLEN(type->info);
2190 kind = BTF_INFO_KIND(type->info);
2191
2192 switch (kind) {
2193 case BTF_KIND_INT:
2194 type_cur += sizeof(int);
2195 break;
2196 case BTF_KIND_ARRAY:
2197 type_cur += sizeof(struct btf_array);
2198 break;
2199 case BTF_KIND_STRUCT:
2200 case BTF_KIND_UNION:
2201 type_cur += var_len * sizeof(struct btf_member);
2202 break;
2203 case BTF_KIND_ENUM:
2204 type_cur += var_len * sizeof(struct btf_enum);
2205 break;
2206 case BTF_KIND_FUNC_PROTO:
2207 type_cur += var_len * sizeof(struct btf_param);
2208 break;
2209 case BTF_KIND_TYPEDEF:
2210 case BTF_KIND_PTR:
2211 case BTF_KIND_FWD:
2212 case BTF_KIND_VOLATILE:
2213 case BTF_KIND_CONST:
2214 case BTF_KIND_RESTRICT:
2215 case BTF_KIND_FUNC:
2216 break;
2217 default:
2218 fprintf(stderr, "Object has unknown BTF type: %u!\n", kind);
2219 return -EINVAL;
2220 }
2221
2222 ret = bpf_btf_register_type(ctx, type);
2223 if (ret < 0)
2224 return ret;
2225 }
2226
2227 return 0;
2228 }
2229
2230 static int bpf_btf_prep_data(struct bpf_elf_ctx *ctx)
2231 {
2232 int ret = bpf_btf_check_header(ctx);
2233
2234 if (!ret)
2235 return bpf_btf_prep_type_data(ctx);
2236 return ret;
2237 }
2238
2239 static void bpf_fetch_btf_end(struct bpf_elf_ctx *ctx)
2240 {
2241 int fd = bpf_btf_attach(ctx);
2242
2243 if (fd < 0)
2244 return;
2245 ctx->btf_fd = fd;
2246 if (bpf_btf_prep_data(ctx) < 0) {
2247 close(ctx->btf_fd);
2248 ctx->btf_fd = 0;
2249 }
2250 }
2251
2252 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
2253 {
2254 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
2255 }
2256
2257 static bool bpf_has_btf_data(const struct bpf_elf_ctx *ctx)
2258 {
2259 return ctx->sec_btf;
2260 }
2261
2262 static bool bpf_has_call_data(const struct bpf_elf_ctx *ctx)
2263 {
2264 return ctx->sec_text;
2265 }
2266
2267 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx, bool check_text_sec)
2268 {
2269 struct bpf_elf_sec_data data;
2270 int i, ret = -1;
2271
2272 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2273 ret = bpf_fill_section_data(ctx, i, &data);
2274 if (ret < 0)
2275 continue;
2276
2277 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2278 !strcmp(data.sec_name, ELF_SECTION_MAPS))
2279 ret = bpf_fetch_maps_begin(ctx, i, &data);
2280 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2281 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
2282 ret = bpf_fetch_license(ctx, i, &data);
2283 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2284 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2285 !strcmp(data.sec_name, ".text") &&
2286 check_text_sec)
2287 ret = bpf_fetch_text(ctx, i, &data);
2288 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
2289 !strcmp(data.sec_name, ".symtab"))
2290 ret = bpf_fetch_symtab(ctx, i, &data);
2291 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
2292 !strcmp(data.sec_name, ".strtab"))
2293 ret = bpf_fetch_strtab(ctx, i, &data);
2294 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2295 !strcmp(data.sec_name, ".BTF"))
2296 ret = bpf_fetch_btf_begin(ctx, i, &data);
2297 if (ret < 0) {
2298 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
2299 i);
2300 return ret;
2301 }
2302 }
2303
2304 if (bpf_has_btf_data(ctx))
2305 bpf_fetch_btf_end(ctx);
2306 if (bpf_has_map_data(ctx)) {
2307 ret = bpf_fetch_maps_end(ctx);
2308 if (ret < 0) {
2309 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
2310 return ret;
2311 }
2312
2313 ret = bpf_maps_attach_all(ctx);
2314 if (ret < 0) {
2315 fprintf(stderr, "Error loading maps into kernel!\n");
2316 return ret;
2317 }
2318 }
2319
2320 return ret;
2321 }
2322
2323 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
2324 bool *sseen)
2325 {
2326 struct bpf_elf_sec_data data;
2327 struct bpf_elf_prog prog;
2328 int ret, i, fd = -1;
2329
2330 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2331 if (ctx->sec_done[i])
2332 continue;
2333
2334 ret = bpf_fill_section_data(ctx, i, &data);
2335 if (ret < 0 ||
2336 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
2337 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2338 !strcmp(data.sec_name, section)))
2339 continue;
2340
2341 *sseen = true;
2342
2343 memset(&prog, 0, sizeof(prog));
2344 prog.type = ctx->type;
2345 prog.license = ctx->license;
2346 prog.size = data.sec_data->d_size;
2347 prog.insns_num = prog.size / sizeof(struct bpf_insn);
2348 prog.insns = data.sec_data->d_buf;
2349
2350 fd = bpf_prog_attach(section, &prog, ctx);
2351 if (fd < 0)
2352 return fd;
2353
2354 ctx->sec_done[i] = true;
2355 break;
2356 }
2357
2358 return fd;
2359 }
2360
2361 struct bpf_relo_props {
2362 struct bpf_tail_call {
2363 unsigned int total;
2364 unsigned int jited;
2365 } tc;
2366 int main_num;
2367 };
2368
2369 static int bpf_apply_relo_map(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2370 GElf_Rel *relo, GElf_Sym *sym,
2371 struct bpf_relo_props *props)
2372 {
2373 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2374 unsigned int map_idx = sym->st_value / ctx->map_len;
2375
2376 if (insn_off >= prog->insns_num)
2377 return -EINVAL;
2378 if (prog->insns[insn_off].code != (BPF_LD | BPF_IMM | BPF_DW)) {
2379 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
2380 insn_off);
2381 return -EINVAL;
2382 }
2383
2384 if (map_idx >= ARRAY_SIZE(ctx->map_fds))
2385 return -EINVAL;
2386 if (!ctx->map_fds[map_idx])
2387 return -EINVAL;
2388 if (ctx->maps[map_idx].type == BPF_MAP_TYPE_PROG_ARRAY) {
2389 props->tc.total++;
2390 if (ctx->maps_ext[map_idx].owner.jited ||
2391 (ctx->maps_ext[map_idx].owner.type == 0 &&
2392 ctx->cfg.jit_enabled))
2393 props->tc.jited++;
2394 }
2395
2396 prog->insns[insn_off].src_reg = BPF_PSEUDO_MAP_FD;
2397 prog->insns[insn_off].imm = ctx->map_fds[map_idx];
2398 return 0;
2399 }
2400
2401 static int bpf_apply_relo_call(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2402 GElf_Rel *relo, GElf_Sym *sym,
2403 struct bpf_relo_props *props)
2404 {
2405 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2406 struct bpf_elf_prog *prog_text = &ctx->prog_text;
2407
2408 if (insn_off >= prog->insns_num)
2409 return -EINVAL;
2410 if (prog->insns[insn_off].code != (BPF_JMP | BPF_CALL) &&
2411 prog->insns[insn_off].src_reg != BPF_PSEUDO_CALL) {
2412 fprintf(stderr, "ELF contains relo data for non call instruction at offset %u! Compiler bug?!\n",
2413 insn_off);
2414 return -EINVAL;
2415 }
2416
2417 if (!props->main_num) {
2418 struct bpf_insn *insns = realloc(prog->insns,
2419 prog->size + prog_text->size);
2420 if (!insns)
2421 return -ENOMEM;
2422
2423 memcpy(insns + prog->insns_num, prog_text->insns,
2424 prog_text->size);
2425 props->main_num = prog->insns_num;
2426 prog->insns = insns;
2427 prog->insns_num += prog_text->insns_num;
2428 prog->size += prog_text->size;
2429 }
2430
2431 prog->insns[insn_off].imm += props->main_num - insn_off;
2432 return 0;
2433 }
2434
2435 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
2436 struct bpf_elf_sec_data *data_relo,
2437 struct bpf_elf_prog *prog,
2438 struct bpf_relo_props *props)
2439 {
2440 GElf_Shdr *rhdr = &data_relo->sec_hdr;
2441 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
2442
2443 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
2444 GElf_Rel relo;
2445 GElf_Sym sym;
2446 int ret = -EIO;
2447
2448 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
2449 return -EIO;
2450 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
2451 return -EIO;
2452
2453 if (sym.st_shndx == ctx->sec_maps)
2454 ret = bpf_apply_relo_map(ctx, prog, &relo, &sym, props);
2455 else if (sym.st_shndx == ctx->sec_text)
2456 ret = bpf_apply_relo_call(ctx, prog, &relo, &sym, props);
2457 else
2458 fprintf(stderr, "ELF contains non-{map,call} related relo data in entry %u pointing to section %u! Compiler bug?!\n",
2459 relo_ent, sym.st_shndx);
2460 if (ret < 0)
2461 return ret;
2462 }
2463
2464 return 0;
2465 }
2466
2467 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
2468 bool *lderr, bool *sseen, struct bpf_elf_prog *prog)
2469 {
2470 struct bpf_elf_sec_data data_relo, data_insn;
2471 int ret, idx, i, fd = -1;
2472
2473 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2474 struct bpf_relo_props props = {};
2475
2476 ret = bpf_fill_section_data(ctx, i, &data_relo);
2477 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
2478 continue;
2479
2480 idx = data_relo.sec_hdr.sh_info;
2481
2482 ret = bpf_fill_section_data(ctx, idx, &data_insn);
2483 if (ret < 0 ||
2484 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
2485 (data_insn.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2486 !strcmp(data_insn.sec_name, section)))
2487 continue;
2488 if (sseen)
2489 *sseen = true;
2490
2491 memset(prog, 0, sizeof(*prog));
2492 prog->type = ctx->type;
2493 prog->license = ctx->license;
2494 prog->size = data_insn.sec_data->d_size;
2495 prog->insns_num = prog->size / sizeof(struct bpf_insn);
2496 prog->insns = malloc(prog->size);
2497 if (!prog->insns) {
2498 *lderr = true;
2499 return -ENOMEM;
2500 }
2501
2502 memcpy(prog->insns, data_insn.sec_data->d_buf, prog->size);
2503
2504 ret = bpf_apply_relo_data(ctx, &data_relo, prog, &props);
2505 if (ret < 0) {
2506 *lderr = true;
2507 if (ctx->sec_text != idx)
2508 free(prog->insns);
2509 return ret;
2510 }
2511 if (ctx->sec_text == idx) {
2512 fd = 0;
2513 goto out;
2514 }
2515
2516 fd = bpf_prog_attach(section, prog, ctx);
2517 free(prog->insns);
2518 if (fd < 0) {
2519 *lderr = true;
2520 if (props.tc.total) {
2521 if (ctx->cfg.jit_enabled &&
2522 props.tc.total != props.tc.jited)
2523 fprintf(stderr, "JIT enabled, but only %u/%u tail call maps in the program have JITed owner!\n",
2524 props.tc.jited, props.tc.total);
2525 if (!ctx->cfg.jit_enabled &&
2526 props.tc.jited)
2527 fprintf(stderr, "JIT disabled, but %u/%u tail call maps in the program have JITed owner!\n",
2528 props.tc.jited, props.tc.total);
2529 }
2530 return fd;
2531 }
2532 out:
2533 ctx->sec_done[i] = true;
2534 ctx->sec_done[idx] = true;
2535 break;
2536 }
2537
2538 return fd;
2539 }
2540
2541 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2542 {
2543 bool lderr = false, sseen = false;
2544 struct bpf_elf_prog prog;
2545 int ret = -1;
2546
2547 if (bpf_has_call_data(ctx)) {
2548 ret = bpf_fetch_prog_relo(ctx, ".text", &lderr, NULL,
2549 &ctx->prog_text);
2550 if (ret < 0)
2551 return ret;
2552 }
2553
2554 if (bpf_has_map_data(ctx) || bpf_has_call_data(ctx))
2555 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen, &prog);
2556 if (ret < 0 && !lderr)
2557 ret = bpf_fetch_prog(ctx, section, &sseen);
2558 if (ret < 0 && !sseen)
2559 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2560 section);
2561 return ret;
2562 }
2563
2564 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2565 {
2566 int i;
2567
2568 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2569 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2570 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2571 return i;
2572 return -1;
2573 }
2574
2575 struct bpf_jited_aux {
2576 int prog_fd;
2577 int map_fd;
2578 struct bpf_prog_data prog;
2579 struct bpf_map_ext map;
2580 };
2581
2582 static int bpf_derive_prog_from_fdinfo(int fd, struct bpf_prog_data *prog)
2583 {
2584 char file[PATH_MAX], buff[4096];
2585 unsigned int val;
2586 FILE *fp;
2587
2588 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
2589 memset(prog, 0, sizeof(*prog));
2590
2591 fp = fopen(file, "r");
2592 if (!fp) {
2593 fprintf(stderr, "No procfs support?!\n");
2594 return -EIO;
2595 }
2596
2597 while (fgets(buff, sizeof(buff), fp)) {
2598 if (sscanf(buff, "prog_type:\t%u", &val) == 1)
2599 prog->type = val;
2600 else if (sscanf(buff, "prog_jited:\t%u", &val) == 1)
2601 prog->jited = val;
2602 }
2603
2604 fclose(fp);
2605 return 0;
2606 }
2607
2608 static int bpf_tail_call_get_aux(struct bpf_jited_aux *aux)
2609 {
2610 struct bpf_elf_map tmp;
2611 int ret;
2612
2613 ret = bpf_derive_elf_map_from_fdinfo(aux->map_fd, &tmp, &aux->map);
2614 if (!ret)
2615 ret = bpf_derive_prog_from_fdinfo(aux->prog_fd, &aux->prog);
2616
2617 return ret;
2618 }
2619
2620 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2621 {
2622 struct bpf_elf_sec_data data;
2623 uint32_t map_id, key_id;
2624 int fd, i, ret, idx;
2625
2626 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2627 if (ctx->sec_done[i])
2628 continue;
2629
2630 ret = bpf_fill_section_data(ctx, i, &data);
2631 if (ret < 0)
2632 continue;
2633
2634 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2635 if (ret != 2)
2636 continue;
2637
2638 idx = bpf_find_map_by_id(ctx, map_id);
2639 if (idx < 0)
2640 continue;
2641
2642 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2643 if (fd < 0)
2644 return -EIO;
2645
2646 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2647 &fd, BPF_ANY);
2648 if (ret < 0) {
2649 struct bpf_jited_aux aux = {};
2650
2651 ret = -errno;
2652 if (errno == E2BIG) {
2653 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2654 key_id, map_id);
2655 return ret;
2656 }
2657
2658 aux.map_fd = ctx->map_fds[idx];
2659 aux.prog_fd = fd;
2660
2661 if (bpf_tail_call_get_aux(&aux))
2662 return ret;
2663 if (!aux.map.owner.type)
2664 return ret;
2665
2666 if (aux.prog.type != aux.map.owner.type)
2667 fprintf(stderr, "Tail call map owned by prog type %u, but prog type is %u!\n",
2668 aux.map.owner.type, aux.prog.type);
2669 if (aux.prog.jited != aux.map.owner.jited)
2670 fprintf(stderr, "Tail call map %s jited, but prog %s!\n",
2671 aux.map.owner.jited ? "is" : "not",
2672 aux.prog.jited ? "is" : "not");
2673 return ret;
2674 }
2675
2676 ctx->sec_done[i] = true;
2677 }
2678
2679 return 0;
2680 }
2681
2682 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2683 {
2684 struct stat st;
2685 int ret;
2686
2687 memset(&ctx->stat, 0, sizeof(ctx->stat));
2688
2689 ret = fstat(ctx->obj_fd, &st);
2690 if (ret < 0) {
2691 fprintf(stderr, "Stat of elf file failed: %s\n",
2692 strerror(errno));
2693 return;
2694 }
2695
2696 ctx->stat.st_dev = st.st_dev;
2697 ctx->stat.st_ino = st.st_ino;
2698 }
2699
2700 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2701 {
2702 char buff[PATH_MAX];
2703
2704 while (fgets(buff, sizeof(buff), fp)) {
2705 char *ptr = buff;
2706
2707 while (*ptr == ' ' || *ptr == '\t')
2708 ptr++;
2709
2710 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2711 continue;
2712
2713 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2714 sscanf(ptr, "%i %s #", id, path) != 2) {
2715 strcpy(path, ptr);
2716 return -1;
2717 }
2718
2719 return 1;
2720 }
2721
2722 return 0;
2723 }
2724
2725 static bool bpf_pinning_reserved(uint32_t pinning)
2726 {
2727 switch (pinning) {
2728 case PIN_NONE:
2729 case PIN_OBJECT_NS:
2730 case PIN_GLOBAL_NS:
2731 return true;
2732 default:
2733 return false;
2734 }
2735 }
2736
2737 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2738 {
2739 struct bpf_hash_entry *entry;
2740 char subpath[PATH_MAX] = {};
2741 uint32_t pinning;
2742 FILE *fp;
2743 int ret;
2744
2745 fp = fopen(db_file, "r");
2746 if (!fp)
2747 return;
2748
2749 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2750 if (ret == -1) {
2751 fprintf(stderr, "Database %s is corrupted at: %s\n",
2752 db_file, subpath);
2753 fclose(fp);
2754 return;
2755 }
2756
2757 if (bpf_pinning_reserved(pinning)) {
2758 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2759 db_file, pinning);
2760 continue;
2761 }
2762
2763 entry = malloc(sizeof(*entry));
2764 if (!entry) {
2765 fprintf(stderr, "No memory left for db entry!\n");
2766 continue;
2767 }
2768
2769 entry->pinning = pinning;
2770 entry->subpath = strdup(subpath);
2771 if (!entry->subpath) {
2772 fprintf(stderr, "No memory left for db entry!\n");
2773 free(entry);
2774 continue;
2775 }
2776
2777 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2778 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2779 }
2780
2781 fclose(fp);
2782 }
2783
2784 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2785 {
2786 struct bpf_hash_entry *entry;
2787 int i;
2788
2789 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2790 while ((entry = ctx->ht[i]) != NULL) {
2791 ctx->ht[i] = entry->next;
2792 free((char *)entry->subpath);
2793 free(entry);
2794 }
2795 }
2796 }
2797
2798 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2799 {
2800 if (ctx->elf_hdr.e_type != ET_REL ||
2801 (ctx->elf_hdr.e_machine != EM_NONE &&
2802 ctx->elf_hdr.e_machine != EM_BPF) ||
2803 ctx->elf_hdr.e_version != EV_CURRENT) {
2804 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2805 return -EINVAL;
2806 }
2807
2808 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2809 default:
2810 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2811 return -EINVAL;
2812 case ELFDATA2LSB:
2813 if (htons(1) == 1) {
2814 fprintf(stderr,
2815 "We are big endian, eBPF object is little endian!\n");
2816 return -EIO;
2817 }
2818 break;
2819 case ELFDATA2MSB:
2820 if (htons(1) != 1) {
2821 fprintf(stderr,
2822 "We are little endian, eBPF object is big endian!\n");
2823 return -EIO;
2824 }
2825 break;
2826 }
2827
2828 return 0;
2829 }
2830
2831 static void bpf_get_cfg(struct bpf_elf_ctx *ctx)
2832 {
2833 static const char *path_jit = "/proc/sys/net/core/bpf_jit_enable";
2834 int fd;
2835
2836 fd = open(path_jit, O_RDONLY);
2837 if (fd > 0) {
2838 char tmp[16] = {};
2839
2840 if (read(fd, tmp, sizeof(tmp)) > 0)
2841 ctx->cfg.jit_enabled = atoi(tmp);
2842 close(fd);
2843 }
2844 }
2845
2846 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2847 enum bpf_prog_type type, __u32 ifindex,
2848 bool verbose)
2849 {
2850 uint8_t tmp[20];
2851 int ret;
2852
2853 if (elf_version(EV_CURRENT) == EV_NONE)
2854 return -EINVAL;
2855
2856 bpf_init_env();
2857
2858 memset(ctx, 0, sizeof(*ctx));
2859 bpf_get_cfg(ctx);
2860
2861 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
2862 if (ret)
2863 ctx->noafalg = true;
2864 else
2865 hexstring_n2a(tmp, sizeof(tmp), ctx->obj_uid,
2866 sizeof(ctx->obj_uid));
2867
2868 ctx->verbose = verbose;
2869 ctx->type = type;
2870 ctx->ifindex = ifindex;
2871
2872 ctx->obj_fd = open(pathname, O_RDONLY);
2873 if (ctx->obj_fd < 0)
2874 return ctx->obj_fd;
2875
2876 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2877 if (!ctx->elf_fd) {
2878 ret = -EINVAL;
2879 goto out_fd;
2880 }
2881
2882 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2883 ret = -EINVAL;
2884 goto out_fd;
2885 }
2886
2887 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2888 &ctx->elf_hdr) {
2889 ret = -EIO;
2890 goto out_elf;
2891 }
2892
2893 ret = bpf_elf_check_ehdr(ctx);
2894 if (ret < 0)
2895 goto out_elf;
2896
2897 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2898 sizeof(*(ctx->sec_done)));
2899 if (!ctx->sec_done) {
2900 ret = -ENOMEM;
2901 goto out_elf;
2902 }
2903
2904 if (ctx->verbose && bpf_log_realloc(ctx)) {
2905 ret = -ENOMEM;
2906 goto out_free;
2907 }
2908
2909 bpf_save_finfo(ctx);
2910 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2911
2912 return 0;
2913 out_free:
2914 free(ctx->sec_done);
2915 out_elf:
2916 elf_end(ctx->elf_fd);
2917 out_fd:
2918 close(ctx->obj_fd);
2919 return ret;
2920 }
2921
2922 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2923 {
2924 int i, count = 0;
2925
2926 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2927 if (!ctx->map_fds[i])
2928 break;
2929 count++;
2930 }
2931
2932 return count;
2933 }
2934
2935 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2936 {
2937 int i;
2938
2939 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2940 if (ctx->map_fds[i])
2941 close(ctx->map_fds[i]);
2942 }
2943
2944 if (ctx->btf_fd)
2945 close(ctx->btf_fd);
2946 free(ctx->btf.types);
2947 }
2948
2949 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2950 {
2951 if (failure)
2952 bpf_maps_teardown(ctx);
2953
2954 bpf_hash_destroy(ctx);
2955
2956 free(ctx->prog_text.insns);
2957 free(ctx->sec_done);
2958 free(ctx->log);
2959
2960 elf_end(ctx->elf_fd);
2961 close(ctx->obj_fd);
2962 }
2963
2964 static struct bpf_elf_ctx __ctx;
2965
2966 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2967 const char *section, __u32 ifindex, bool verbose)
2968 {
2969 struct bpf_elf_ctx *ctx = &__ctx;
2970 int fd = 0, ret;
2971
2972 ret = bpf_elf_ctx_init(ctx, pathname, type, ifindex, verbose);
2973 if (ret < 0) {
2974 fprintf(stderr, "Cannot initialize ELF context!\n");
2975 return ret;
2976 }
2977
2978 ret = bpf_fetch_ancillary(ctx, strcmp(section, ".text"));
2979 if (ret < 0) {
2980 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2981 goto out;
2982 }
2983
2984 fd = bpf_fetch_prog_sec(ctx, section);
2985 if (fd < 0) {
2986 fprintf(stderr, "Error fetching program/map!\n");
2987 ret = fd;
2988 goto out;
2989 }
2990
2991 ret = bpf_fill_prog_arrays(ctx);
2992 if (ret < 0)
2993 fprintf(stderr, "Error filling program arrays!\n");
2994 out:
2995 bpf_elf_ctx_destroy(ctx, ret < 0);
2996 if (ret < 0) {
2997 if (fd)
2998 close(fd);
2999 return ret;
3000 }
3001
3002 return fd;
3003 }
3004
3005 static int
3006 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
3007 const struct bpf_map_data *aux, unsigned int entries)
3008 {
3009 struct bpf_map_set_msg msg = {
3010 .aux.uds_ver = BPF_SCM_AUX_VER,
3011 .aux.num_ent = entries,
3012 };
3013 int *cmsg_buf, min_fd;
3014 char *amsg_buf;
3015 int i;
3016
3017 strlcpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
3018 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
3019
3020 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
3021 amsg_buf = (char *)msg.aux.ent;
3022
3023 for (i = 0; i < entries; i += min_fd) {
3024 int ret;
3025
3026 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
3027 bpf_map_set_init_single(&msg, min_fd);
3028
3029 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
3030 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
3031
3032 ret = sendmsg(fd, &msg.hdr, 0);
3033 if (ret <= 0)
3034 return ret ? : -1;
3035 }
3036
3037 return 0;
3038 }
3039
3040 static int
3041 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
3042 unsigned int entries)
3043 {
3044 struct bpf_map_set_msg msg;
3045 int *cmsg_buf, min_fd;
3046 char *amsg_buf, *mmsg_buf;
3047 unsigned int needed = 1;
3048 int i;
3049
3050 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
3051 amsg_buf = (char *)msg.aux.ent;
3052 mmsg_buf = (char *)&msg.aux;
3053
3054 for (i = 0; i < min(entries, needed); i += min_fd) {
3055 struct cmsghdr *cmsg;
3056 int ret;
3057
3058 min_fd = min(entries, entries - i);
3059 bpf_map_set_init_single(&msg, min_fd);
3060
3061 ret = recvmsg(fd, &msg.hdr, 0);
3062 if (ret <= 0)
3063 return ret ? : -1;
3064
3065 cmsg = CMSG_FIRSTHDR(&msg.hdr);
3066 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
3067 return -EINVAL;
3068 if (msg.hdr.msg_flags & MSG_CTRUNC)
3069 return -EIO;
3070 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
3071 return -ENOSYS;
3072
3073 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
3074 if (min_fd > entries || min_fd <= 0)
3075 return -EINVAL;
3076
3077 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
3078 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
3079 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
3080
3081 needed = aux->num_ent;
3082 }
3083
3084 return 0;
3085 }
3086
3087 int bpf_send_map_fds(const char *path, const char *obj)
3088 {
3089 struct bpf_elf_ctx *ctx = &__ctx;
3090 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3091 struct bpf_map_data bpf_aux = {
3092 .fds = ctx->map_fds,
3093 .ent = ctx->maps,
3094 .st = &ctx->stat,
3095 .obj = obj,
3096 };
3097 int fd, ret;
3098
3099 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3100 if (fd < 0) {
3101 fprintf(stderr, "Cannot open socket: %s\n",
3102 strerror(errno));
3103 return -1;
3104 }
3105
3106 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3107
3108 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
3109 if (ret < 0) {
3110 fprintf(stderr, "Cannot connect to %s: %s\n",
3111 path, strerror(errno));
3112 return -1;
3113 }
3114
3115 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
3116 bpf_maps_count(ctx));
3117 if (ret < 0)
3118 fprintf(stderr, "Cannot send fds to %s: %s\n",
3119 path, strerror(errno));
3120
3121 bpf_maps_teardown(ctx);
3122 close(fd);
3123 return ret;
3124 }
3125
3126 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
3127 unsigned int entries)
3128 {
3129 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3130 int fd, ret;
3131
3132 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3133 if (fd < 0) {
3134 fprintf(stderr, "Cannot open socket: %s\n",
3135 strerror(errno));
3136 return -1;
3137 }
3138
3139 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3140
3141 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
3142 if (ret < 0) {
3143 fprintf(stderr, "Cannot bind to socket: %s\n",
3144 strerror(errno));
3145 return -1;
3146 }
3147
3148 ret = bpf_map_set_recv(fd, fds, aux, entries);
3149 if (ret < 0)
3150 fprintf(stderr, "Cannot recv fds from %s: %s\n",
3151 path, strerror(errno));
3152
3153 unlink(addr.sun_path);
3154 close(fd);
3155 return ret;
3156 }
3157 #endif /* HAVE_ELF */