]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
Merge ../iproute2-next
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43 #include "json_print.h"
44
45 #include "bpf_util.h"
46 #include "bpf_elf.h"
47 #include "bpf_scm.h"
48
49 struct bpf_prog_meta {
50 const char *type;
51 const char *subdir;
52 const char *section;
53 bool may_uds_export;
54 };
55
56 static const enum bpf_prog_type __bpf_types[] = {
57 BPF_PROG_TYPE_SCHED_CLS,
58 BPF_PROG_TYPE_SCHED_ACT,
59 BPF_PROG_TYPE_XDP,
60 BPF_PROG_TYPE_LWT_IN,
61 BPF_PROG_TYPE_LWT_OUT,
62 BPF_PROG_TYPE_LWT_XMIT,
63 };
64
65 static const struct bpf_prog_meta __bpf_prog_meta[] = {
66 [BPF_PROG_TYPE_SCHED_CLS] = {
67 .type = "cls",
68 .subdir = "tc",
69 .section = ELF_SECTION_CLASSIFIER,
70 .may_uds_export = true,
71 },
72 [BPF_PROG_TYPE_SCHED_ACT] = {
73 .type = "act",
74 .subdir = "tc",
75 .section = ELF_SECTION_ACTION,
76 .may_uds_export = true,
77 },
78 [BPF_PROG_TYPE_XDP] = {
79 .type = "xdp",
80 .subdir = "xdp",
81 .section = ELF_SECTION_PROG,
82 },
83 [BPF_PROG_TYPE_LWT_IN] = {
84 .type = "lwt_in",
85 .subdir = "ip",
86 .section = ELF_SECTION_PROG,
87 },
88 [BPF_PROG_TYPE_LWT_OUT] = {
89 .type = "lwt_out",
90 .subdir = "ip",
91 .section = ELF_SECTION_PROG,
92 },
93 [BPF_PROG_TYPE_LWT_XMIT] = {
94 .type = "lwt_xmit",
95 .subdir = "ip",
96 .section = ELF_SECTION_PROG,
97 },
98 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = {
99 .type = "lwt_seg6local",
100 .subdir = "ip",
101 .section = ELF_SECTION_PROG,
102 },
103 };
104
105 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
106 {
107 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
108 __bpf_prog_meta[type].subdir);
109 return __bpf_prog_meta[type].subdir;
110 }
111
112 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
113 {
114 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
115 __bpf_prog_meta[type].section);
116 return __bpf_prog_meta[type].section;
117 }
118
119 #ifdef HAVE_ELF
120 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
121 const char *sec, __u32 ifindex, bool verbose);
122 #else
123 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
124 const char *sec, __u32 ifindex, bool verbose)
125 {
126 fprintf(stderr, "No ELF library support compiled in.\n");
127 errno = ENOSYS;
128 return -1;
129 }
130 #endif
131
132 static inline __u64 bpf_ptr_to_u64(const void *ptr)
133 {
134 return (__u64)(unsigned long)ptr;
135 }
136
137 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
138 {
139 #ifdef __NR_bpf
140 return syscall(__NR_bpf, cmd, attr, size);
141 #else
142 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
143 errno = ENOSYS;
144 return -1;
145 #endif
146 }
147
148 static int bpf_map_update(int fd, const void *key, const void *value,
149 uint64_t flags)
150 {
151 union bpf_attr attr = {};
152
153 attr.map_fd = fd;
154 attr.key = bpf_ptr_to_u64(key);
155 attr.value = bpf_ptr_to_u64(value);
156 attr.flags = flags;
157
158 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
159 }
160
161 static int bpf_prog_fd_by_id(uint32_t id)
162 {
163 union bpf_attr attr = {};
164
165 attr.prog_id = id;
166
167 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
168 }
169
170 static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
171 uint32_t *info_len)
172 {
173 union bpf_attr attr = {};
174 int ret;
175
176 attr.info.bpf_fd = fd;
177 attr.info.info = bpf_ptr_to_u64(info);
178 attr.info.info_len = *info_len;
179
180 *info_len = 0;
181 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
182 if (!ret)
183 *info_len = attr.info.info_len;
184
185 return ret;
186 }
187
188 int bpf_dump_prog_info(FILE *f, uint32_t id)
189 {
190 struct bpf_prog_info info = {};
191 uint32_t len = sizeof(info);
192 int fd, ret, dump_ok = 0;
193 SPRINT_BUF(tmp);
194
195 open_json_object("prog");
196 print_uint(PRINT_ANY, "id", "id %u ", id);
197
198 fd = bpf_prog_fd_by_id(id);
199 if (fd < 0)
200 goto out;
201
202 ret = bpf_prog_info_by_fd(fd, &info, &len);
203 if (!ret && len) {
204 int jited = !!info.jited_prog_len;
205
206 print_string(PRINT_ANY, "tag", "tag %s ",
207 hexstring_n2a(info.tag, sizeof(info.tag),
208 tmp, sizeof(tmp)));
209 print_uint(PRINT_JSON, "jited", NULL, jited);
210 if (jited && !is_json_context())
211 fprintf(f, "jited ");
212 dump_ok = 1;
213 }
214
215 close(fd);
216 out:
217 close_json_object();
218 return dump_ok;
219 }
220
221 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
222 char **bpf_string, bool *need_release,
223 const char separator)
224 {
225 char sp;
226
227 if (from_file) {
228 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
229 char *tmp_string, *pos, c_prev = ' ';
230 FILE *fp;
231 int c;
232
233 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
234 tmp_string = pos = calloc(1, tmp_len);
235 if (tmp_string == NULL)
236 return -ENOMEM;
237
238 fp = fopen(arg, "r");
239 if (fp == NULL) {
240 perror("Cannot fopen");
241 free(tmp_string);
242 return -ENOENT;
243 }
244
245 while ((c = fgetc(fp)) != EOF) {
246 switch (c) {
247 case '\n':
248 if (c_prev != ',')
249 *(pos++) = ',';
250 c_prev = ',';
251 break;
252 case ' ':
253 case '\t':
254 if (c_prev != ' ')
255 *(pos++) = c;
256 c_prev = ' ';
257 break;
258 default:
259 *(pos++) = c;
260 c_prev = c;
261 }
262 if (pos - tmp_string == tmp_len)
263 break;
264 }
265
266 if (!feof(fp)) {
267 free(tmp_string);
268 fclose(fp);
269 return -E2BIG;
270 }
271
272 fclose(fp);
273 *pos = 0;
274
275 *need_release = true;
276 *bpf_string = tmp_string;
277 } else {
278 *need_release = false;
279 *bpf_string = arg;
280 }
281
282 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
283 sp != separator) {
284 if (*need_release)
285 free(*bpf_string);
286 return -EINVAL;
287 }
288
289 return 0;
290 }
291
292 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
293 bool from_file)
294 {
295 char *bpf_string, *token, separator = ',';
296 int ret = 0, i = 0;
297 bool need_release;
298 __u16 bpf_len = 0;
299
300 if (argc < 1)
301 return -EINVAL;
302 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
303 &need_release, separator))
304 return -EINVAL;
305 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
306 ret = -EINVAL;
307 goto out;
308 }
309
310 token = bpf_string;
311 while ((token = strchr(token, separator)) && (++token)[0]) {
312 if (i >= bpf_len) {
313 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
314 ret = -EINVAL;
315 goto out;
316 }
317
318 if (sscanf(token, "%hu %hhu %hhu %u,",
319 &bpf_ops[i].code, &bpf_ops[i].jt,
320 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
321 fprintf(stderr, "Error at instruction %d!\n", i);
322 ret = -EINVAL;
323 goto out;
324 }
325
326 i++;
327 }
328
329 if (i != bpf_len) {
330 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
331 ret = -EINVAL;
332 goto out;
333 }
334 ret = bpf_len;
335 out:
336 if (need_release)
337 free(bpf_string);
338
339 return ret;
340 }
341
342 void bpf_print_ops(struct rtattr *bpf_ops, __u16 len)
343 {
344 struct sock_filter *ops = RTA_DATA(bpf_ops);
345 int i;
346
347 if (len == 0)
348 return;
349
350 open_json_object("bytecode");
351 print_uint(PRINT_ANY, "length", "bytecode \'%u,", len);
352 open_json_array(PRINT_JSON, "insns");
353
354 for (i = 0; i < len; i++) {
355 open_json_object(NULL);
356 print_hu(PRINT_ANY, "code", "%hu ", ops[i].code);
357 print_hhu(PRINT_ANY, "jt", "%hhu ", ops[i].jt);
358 print_hhu(PRINT_ANY, "jf", "%hhu ", ops[i].jf);
359 if (i == len - 1)
360 print_uint(PRINT_ANY, "k", "%u\'", ops[i].k);
361 else
362 print_uint(PRINT_ANY, "k", "%u,", ops[i].k);
363 close_json_object();
364 }
365
366 close_json_array(PRINT_JSON, NULL);
367 close_json_object();
368 }
369
370 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
371 const struct bpf_elf_map *obj)
372 {
373 fprintf(stderr, "Map specification differs from pinned file!\n");
374
375 if (obj->type != pin->type)
376 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
377 obj->type, pin->type);
378 if (obj->size_key != pin->size_key)
379 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
380 obj->size_key, pin->size_key);
381 if (obj->size_value != pin->size_value)
382 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
383 obj->size_value, pin->size_value);
384 if (obj->max_elem != pin->max_elem)
385 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
386 obj->max_elem, pin->max_elem);
387 if (obj->flags != pin->flags)
388 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
389 obj->flags, pin->flags);
390
391 fprintf(stderr, "\n");
392 }
393
394 struct bpf_prog_data {
395 unsigned int type;
396 unsigned int jited;
397 };
398
399 struct bpf_map_ext {
400 struct bpf_prog_data owner;
401 unsigned int btf_id_key;
402 unsigned int btf_id_val;
403 };
404
405 static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map,
406 struct bpf_map_ext *ext)
407 {
408 unsigned int val, owner_type = 0, owner_jited = 0;
409 char *file = NULL;
410 char buff[4096];
411 FILE *fp;
412 int ret;
413
414 ret = asprintf(&file, "/proc/%d/fdinfo/%d", getpid(), fd);
415 if (ret < 0) {
416 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
417 free(file);
418 return ret;
419 }
420 memset(map, 0, sizeof(*map));
421
422 fp = fopen(file, "r");
423 free(file);
424 if (!fp) {
425 fprintf(stderr, "No procfs support?!\n");
426 return -EIO;
427 }
428
429 while (fgets(buff, sizeof(buff), fp)) {
430 if (sscanf(buff, "map_type:\t%u", &val) == 1)
431 map->type = val;
432 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
433 map->size_key = val;
434 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
435 map->size_value = val;
436 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
437 map->max_elem = val;
438 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
439 map->flags = val;
440 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
441 owner_type = val;
442 else if (sscanf(buff, "owner_jited:\t%i", &val) == 1)
443 owner_jited = val;
444 }
445
446 fclose(fp);
447 if (ext) {
448 memset(ext, 0, sizeof(*ext));
449 ext->owner.type = owner_type;
450 ext->owner.jited = owner_jited;
451 }
452
453 return 0;
454 }
455
456 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
457 struct bpf_map_ext *ext, int length,
458 enum bpf_prog_type type)
459 {
460 struct bpf_elf_map tmp, zero = {};
461 int ret;
462
463 ret = bpf_derive_elf_map_from_fdinfo(fd, &tmp, ext);
464 if (ret < 0)
465 return ret;
466
467 /* The decision to reject this is on kernel side eventually, but
468 * at least give the user a chance to know what's wrong.
469 */
470 if (ext->owner.type && ext->owner.type != type)
471 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
472 type, ext->owner.type);
473
474 if (!memcmp(&tmp, map, length)) {
475 return 0;
476 } else {
477 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
478 * so just accept it. We know we do have an eBPF fd and in this
479 * case, everything is 0. It is guaranteed that no such map exists
480 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
481 */
482 if (!memcmp(&tmp, &zero, length))
483 return 0;
484
485 bpf_map_pin_report(&tmp, map);
486 return -EINVAL;
487 }
488 }
489
490 static int bpf_mnt_fs(const char *target)
491 {
492 bool bind_done = false;
493
494 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
495 if (errno != EINVAL || bind_done) {
496 fprintf(stderr, "mount --make-private %s failed: %s\n",
497 target, strerror(errno));
498 return -1;
499 }
500
501 if (mount(target, target, "none", MS_BIND, NULL)) {
502 fprintf(stderr, "mount --bind %s %s failed: %s\n",
503 target, target, strerror(errno));
504 return -1;
505 }
506
507 bind_done = true;
508 }
509
510 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
511 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
512 target, strerror(errno));
513 return -1;
514 }
515
516 return 0;
517 }
518
519 static int bpf_mnt_check_target(const char *target)
520 {
521 struct stat sb = {};
522 int ret;
523
524 ret = stat(target, &sb);
525 if (ret) {
526 ret = mkdir(target, S_IRWXU);
527 if (ret) {
528 fprintf(stderr, "mkdir %s failed: %s\n", target,
529 strerror(errno));
530 return ret;
531 }
532 }
533
534 return 0;
535 }
536
537 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
538 {
539 struct statfs st_fs;
540
541 if (statfs(mnt, &st_fs) < 0)
542 return -ENOENT;
543 if ((unsigned long)st_fs.f_type != magic)
544 return -ENOENT;
545
546 return 0;
547 }
548
549 static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt,
550 int len, const char *mntpt)
551 {
552 int ret;
553
554 ret = bpf_valid_mntpt(mntpt, magic);
555 if (!ret) {
556 strlcpy(mnt, mntpt, len);
557 return mnt;
558 }
559
560 return NULL;
561 }
562
563 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
564 char *mnt, int len,
565 const char * const *known_mnts)
566 {
567 const char * const *ptr;
568 char type[100];
569 FILE *fp;
570
571 if (known_mnts) {
572 ptr = known_mnts;
573 while (*ptr) {
574 if (bpf_find_mntpt_single(magic, mnt, len, *ptr))
575 return mnt;
576 ptr++;
577 }
578 }
579
580 if (len != PATH_MAX)
581 return NULL;
582
583 fp = fopen("/proc/mounts", "r");
584 if (fp == NULL)
585 return NULL;
586
587 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
588 mnt, type) == 2) {
589 if (strcmp(type, fstype) == 0)
590 break;
591 }
592
593 fclose(fp);
594 if (strcmp(type, fstype) != 0)
595 return NULL;
596
597 return mnt;
598 }
599
600 int bpf_trace_pipe(void)
601 {
602 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
603 static const char * const tracefs_known_mnts[] = {
604 TRACE_DIR_MNT,
605 "/sys/kernel/debug/tracing",
606 "/tracing",
607 "/trace",
608 0,
609 };
610 int fd_in, fd_out = STDERR_FILENO;
611 char *tpipe = NULL;
612 const char *mnt;
613 int ret;
614
615 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
616 sizeof(tracefs_mnt), tracefs_known_mnts);
617 if (!mnt) {
618 fprintf(stderr, "tracefs not mounted?\n");
619 return -1;
620 }
621
622 ret = asprintf(&tpipe, "%s/trace_pipe", mnt);
623 if (ret < 0) {
624 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
625 free(tpipe);
626 return ret;
627 }
628
629 fd_in = open(tpipe, O_RDONLY);
630 free(tpipe);
631 if (fd_in < 0)
632 return -1;
633
634 fprintf(stderr, "Running! Hang up with ^C!\n\n");
635 while (1) {
636 static char buff[4096];
637 ssize_t ret;
638
639 ret = read(fd_in, buff, sizeof(buff));
640 if (ret > 0 && write(fd_out, buff, ret) == ret)
641 continue;
642 break;
643 }
644
645 close(fd_in);
646 return -1;
647 }
648
649 static int bpf_gen_global(const char *bpf_sub_dir)
650 {
651 char *bpf_glo_dir = NULL;
652 int ret;
653
654 ret = asprintf(&bpf_glo_dir, "%s/%s/", bpf_sub_dir, BPF_DIR_GLOBALS);
655 if (ret < 0) {
656 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
657 goto out;
658 }
659
660 ret = mkdir(bpf_glo_dir, S_IRWXU);
661 if (ret && errno != EEXIST) {
662 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
663 strerror(errno));
664 goto out;
665 }
666
667 ret = 0;
668 out:
669 free(bpf_glo_dir);
670 return ret;
671 }
672
673 static int bpf_gen_master(const char *base, const char *name)
674 {
675 char *bpf_sub_dir = NULL;
676 int ret;
677
678 ret = asprintf(&bpf_sub_dir, "%s%s/", base, name);
679 if (ret < 0) {
680 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
681 goto out;
682 }
683
684 ret = mkdir(bpf_sub_dir, S_IRWXU);
685 if (ret && errno != EEXIST) {
686 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
687 strerror(errno));
688 goto out;
689 }
690
691 ret = bpf_gen_global(bpf_sub_dir);
692 out:
693 free(bpf_sub_dir);
694 return ret;
695 }
696
697 static int bpf_slave_via_bind_mnt(const char *full_name,
698 const char *full_link)
699 {
700 int ret;
701
702 ret = mkdir(full_name, S_IRWXU);
703 if (ret) {
704 assert(errno != EEXIST);
705 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
706 strerror(errno));
707 return ret;
708 }
709
710 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
711 if (ret) {
712 rmdir(full_name);
713 fprintf(stderr, "mount --bind %s %s failed: %s\n",
714 full_link, full_name, strerror(errno));
715 }
716
717 return ret;
718 }
719
720 static int bpf_gen_slave(const char *base, const char *name,
721 const char *link)
722 {
723 char *bpf_lnk_dir = NULL;
724 char *bpf_sub_dir = NULL;
725 struct stat sb = {};
726 int ret;
727
728 ret = asprintf(&bpf_lnk_dir, "%s%s/", base, link);
729 if (ret < 0) {
730 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
731 goto out;
732 }
733
734 ret = asprintf(&bpf_sub_dir, "%s%s", base, name);
735 if (ret < 0) {
736 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
737 goto out;
738 }
739
740 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
741 if (ret) {
742 if (errno != EEXIST) {
743 if (errno != EPERM) {
744 fprintf(stderr, "symlink %s failed: %s\n",
745 bpf_sub_dir, strerror(errno));
746 goto out;
747 }
748
749 ret = bpf_slave_via_bind_mnt(bpf_sub_dir, bpf_lnk_dir);
750 goto out;
751 }
752
753 ret = lstat(bpf_sub_dir, &sb);
754 if (ret) {
755 fprintf(stderr, "lstat %s failed: %s\n",
756 bpf_sub_dir, strerror(errno));
757 goto out;
758 }
759
760 if ((sb.st_mode & S_IFMT) != S_IFLNK) {
761 ret = bpf_gen_global(bpf_sub_dir);
762 goto out;
763 }
764 }
765
766 out:
767 free(bpf_lnk_dir);
768 free(bpf_sub_dir);
769 return ret;
770 }
771
772 static int bpf_gen_hierarchy(const char *base)
773 {
774 int ret, i;
775
776 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
777 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
778 ret = bpf_gen_slave(base,
779 bpf_prog_to_subdir(__bpf_types[i]),
780 bpf_prog_to_subdir(__bpf_types[0]));
781 return ret;
782 }
783
784 static const char *bpf_get_work_dir(enum bpf_prog_type type)
785 {
786 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
787 static char *bpf_wrk_dir;
788 static const char *mnt;
789 static bool bpf_mnt_cached;
790 const char *mnt_env = getenv(BPF_ENV_MNT);
791 static const char * const bpf_known_mnts[] = {
792 BPF_DIR_MNT,
793 "/bpf",
794 0,
795 };
796 int ret;
797
798 if (bpf_mnt_cached) {
799 const char *out = mnt;
800
801 if (out && type) {
802 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
803 out, bpf_prog_to_subdir(type));
804 out = bpf_tmp;
805 }
806 return out;
807 }
808
809 if (mnt_env)
810 mnt = bpf_find_mntpt_single(BPF_FS_MAGIC, bpf_tmp,
811 sizeof(bpf_tmp), mnt_env);
812 else
813 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp,
814 sizeof(bpf_tmp), bpf_known_mnts);
815 if (!mnt) {
816 mnt = mnt_env ? : BPF_DIR_MNT;
817 ret = bpf_mnt_check_target(mnt);
818 if (!ret)
819 ret = bpf_mnt_fs(mnt);
820 if (ret) {
821 mnt = NULL;
822 goto out;
823 }
824 }
825
826 ret = asprintf(&bpf_wrk_dir, "%s/", mnt);
827 if (ret < 0) {
828 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
829 free(bpf_wrk_dir);
830 goto out;
831 }
832
833 ret = bpf_gen_hierarchy(bpf_wrk_dir);
834 if (ret) {
835 mnt = NULL;
836 goto out;
837 }
838
839 mnt = bpf_wrk_dir;
840 out:
841 bpf_mnt_cached = true;
842 return mnt;
843 }
844
845 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
846 {
847 union bpf_attr attr = {};
848 char tmp[PATH_MAX];
849
850 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
851 pathname[1] == ':' && bpf_get_work_dir(type)) {
852 snprintf(tmp, sizeof(tmp), "%s/%s",
853 bpf_get_work_dir(type), pathname + 2);
854 pathname = tmp;
855 }
856
857 attr.pathname = bpf_ptr_to_u64(pathname);
858
859 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
860 }
861
862 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
863 {
864 int prog_fd = bpf_obj_get(pathname, type);
865
866 if (prog_fd < 0)
867 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
868 pathname, strerror(errno));
869 return prog_fd;
870 }
871
872 static int bpf_do_parse(struct bpf_cfg_in *cfg, const bool *opt_tbl)
873 {
874 const char *file, *section, *uds_name;
875 bool verbose = false;
876 int i, ret, argc;
877 char **argv;
878
879 argv = cfg->argv;
880 argc = cfg->argc;
881
882 if (opt_tbl[CBPF_BYTECODE] &&
883 (matches(*argv, "bytecode") == 0 ||
884 strcmp(*argv, "bc") == 0)) {
885 cfg->mode = CBPF_BYTECODE;
886 } else if (opt_tbl[CBPF_FILE] &&
887 (matches(*argv, "bytecode-file") == 0 ||
888 strcmp(*argv, "bcf") == 0)) {
889 cfg->mode = CBPF_FILE;
890 } else if (opt_tbl[EBPF_OBJECT] &&
891 (matches(*argv, "object-file") == 0 ||
892 strcmp(*argv, "obj") == 0)) {
893 cfg->mode = EBPF_OBJECT;
894 } else if (opt_tbl[EBPF_PINNED] &&
895 (matches(*argv, "object-pinned") == 0 ||
896 matches(*argv, "pinned") == 0 ||
897 matches(*argv, "fd") == 0)) {
898 cfg->mode = EBPF_PINNED;
899 } else {
900 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
901 return -1;
902 }
903
904 NEXT_ARG();
905 file = section = uds_name = NULL;
906 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
907 file = *argv;
908 NEXT_ARG_FWD();
909
910 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
911 if (argc > 0 && matches(*argv, "type") == 0) {
912 NEXT_ARG();
913 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
914 i++) {
915 if (!__bpf_prog_meta[i].type)
916 continue;
917 if (!matches(*argv,
918 __bpf_prog_meta[i].type)) {
919 cfg->type = i;
920 break;
921 }
922 }
923
924 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
925 fprintf(stderr, "What type is \"%s\"?\n",
926 *argv);
927 return -1;
928 }
929 NEXT_ARG_FWD();
930 } else {
931 cfg->type = BPF_PROG_TYPE_SCHED_CLS;
932 }
933 }
934
935 section = bpf_prog_to_default_section(cfg->type);
936 if (argc > 0 && matches(*argv, "section") == 0) {
937 NEXT_ARG();
938 section = *argv;
939 NEXT_ARG_FWD();
940 }
941
942 if (__bpf_prog_meta[cfg->type].may_uds_export) {
943 uds_name = getenv(BPF_ENV_UDS);
944 if (argc > 0 && !uds_name &&
945 matches(*argv, "export") == 0) {
946 NEXT_ARG();
947 uds_name = *argv;
948 NEXT_ARG_FWD();
949 }
950 }
951
952 if (argc > 0 && matches(*argv, "verbose") == 0) {
953 verbose = true;
954 NEXT_ARG_FWD();
955 }
956
957 PREV_ARG();
958 }
959
960 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE) {
961 ret = bpf_ops_parse(argc, argv, cfg->opcodes,
962 cfg->mode == CBPF_FILE);
963 cfg->n_opcodes = ret;
964 } else if (cfg->mode == EBPF_OBJECT) {
965 ret = 0; /* program will be loaded by load stage */
966 } else if (cfg->mode == EBPF_PINNED) {
967 ret = bpf_obj_pinned(file, cfg->type);
968 cfg->prog_fd = ret;
969 } else {
970 return -1;
971 }
972
973 cfg->object = file;
974 cfg->section = section;
975 cfg->uds = uds_name;
976 cfg->argc = argc;
977 cfg->argv = argv;
978 cfg->verbose = verbose;
979
980 return ret;
981 }
982
983 static int bpf_do_load(struct bpf_cfg_in *cfg)
984 {
985 if (cfg->mode == EBPF_OBJECT) {
986 cfg->prog_fd = bpf_obj_open(cfg->object, cfg->type,
987 cfg->section, cfg->ifindex,
988 cfg->verbose);
989 return cfg->prog_fd;
990 }
991 return 0;
992 }
993
994 int bpf_load_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops,
995 void *nl)
996 {
997 char annotation[256];
998 int ret;
999
1000 ret = bpf_do_load(cfg);
1001 if (ret < 0)
1002 return ret;
1003
1004 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE)
1005 ops->cbpf_cb(nl, cfg->opcodes, cfg->n_opcodes);
1006 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
1007 snprintf(annotation, sizeof(annotation), "%s:[%s]",
1008 basename(cfg->object), cfg->mode == EBPF_PINNED ?
1009 "*fsobj" : cfg->section);
1010 ops->ebpf_cb(nl, cfg->prog_fd, annotation);
1011 }
1012
1013 return 0;
1014 }
1015
1016 int bpf_parse_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops)
1017 {
1018 bool opt_tbl[BPF_MODE_MAX] = {};
1019
1020 if (ops->cbpf_cb) {
1021 opt_tbl[CBPF_BYTECODE] = true;
1022 opt_tbl[CBPF_FILE] = true;
1023 }
1024
1025 if (ops->ebpf_cb) {
1026 opt_tbl[EBPF_OBJECT] = true;
1027 opt_tbl[EBPF_PINNED] = true;
1028 }
1029
1030 return bpf_do_parse(cfg, opt_tbl);
1031 }
1032
1033 int bpf_parse_and_load_common(struct bpf_cfg_in *cfg,
1034 const struct bpf_cfg_ops *ops, void *nl)
1035 {
1036 int ret;
1037
1038 ret = bpf_parse_common(cfg, ops);
1039 if (ret < 0)
1040 return ret;
1041
1042 return bpf_load_common(cfg, ops, nl);
1043 }
1044
1045 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
1046 {
1047 const bool opt_tbl[BPF_MODE_MAX] = {
1048 [EBPF_OBJECT] = true,
1049 [EBPF_PINNED] = true,
1050 };
1051 const struct bpf_elf_map test = {
1052 .type = BPF_MAP_TYPE_PROG_ARRAY,
1053 .size_key = sizeof(int),
1054 .size_value = sizeof(int),
1055 };
1056 struct bpf_cfg_in cfg = {
1057 .type = BPF_PROG_TYPE_UNSPEC,
1058 .argc = argc,
1059 .argv = argv,
1060 };
1061 struct bpf_map_ext ext = {};
1062 int ret, prog_fd, map_fd;
1063 uint32_t map_key;
1064
1065 ret = bpf_do_parse(&cfg, opt_tbl);
1066 if (ret < 0)
1067 return ret;
1068
1069 ret = bpf_do_load(&cfg);
1070 if (ret < 0)
1071 return ret;
1072
1073 prog_fd = cfg.prog_fd;
1074
1075 if (key) {
1076 map_key = *key;
1077 } else {
1078 ret = sscanf(cfg.section, "%*i/%i", &map_key);
1079 if (ret != 1) {
1080 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
1081 ret = -EINVAL;
1082 goto out_prog;
1083 }
1084 }
1085
1086 map_fd = bpf_obj_get(map_path, cfg.type);
1087 if (map_fd < 0) {
1088 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
1089 map_path, strerror(errno));
1090 ret = map_fd;
1091 goto out_prog;
1092 }
1093
1094 ret = bpf_map_selfcheck_pinned(map_fd, &test, &ext,
1095 offsetof(struct bpf_elf_map, max_elem),
1096 cfg.type);
1097 if (ret < 0) {
1098 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
1099 goto out_map;
1100 }
1101
1102 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
1103 if (ret < 0)
1104 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
1105 out_map:
1106 close(map_fd);
1107 out_prog:
1108 close(prog_fd);
1109 return ret;
1110 }
1111
1112 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
1113 {
1114 union bpf_attr attr = {};
1115
1116 attr.target_fd = target_fd;
1117 attr.attach_bpf_fd = prog_fd;
1118 attr.attach_type = type;
1119
1120 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
1121 }
1122
1123 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
1124 {
1125 union bpf_attr attr = {};
1126
1127 attr.target_fd = target_fd;
1128 attr.attach_type = type;
1129
1130 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
1131 }
1132
1133 static int bpf_prog_load_dev(enum bpf_prog_type type,
1134 const struct bpf_insn *insns, size_t size_insns,
1135 const char *license, __u32 ifindex,
1136 char *log, size_t size_log)
1137 {
1138 union bpf_attr attr = {};
1139
1140 attr.prog_type = type;
1141 attr.insns = bpf_ptr_to_u64(insns);
1142 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
1143 attr.license = bpf_ptr_to_u64(license);
1144 attr.prog_ifindex = ifindex;
1145
1146 if (size_log > 0) {
1147 attr.log_buf = bpf_ptr_to_u64(log);
1148 attr.log_size = size_log;
1149 attr.log_level = 1;
1150 }
1151
1152 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
1153 }
1154
1155 int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
1156 size_t size_insns, const char *license, char *log,
1157 size_t size_log)
1158 {
1159 return bpf_prog_load_dev(type, insns, size_insns, license, 0,
1160 log, size_log);
1161 }
1162
1163 #ifdef HAVE_ELF
1164 struct bpf_elf_prog {
1165 enum bpf_prog_type type;
1166 struct bpf_insn *insns;
1167 unsigned int insns_num;
1168 size_t size;
1169 const char *license;
1170 };
1171
1172 struct bpf_hash_entry {
1173 unsigned int pinning;
1174 const char *subpath;
1175 struct bpf_hash_entry *next;
1176 };
1177
1178 struct bpf_config {
1179 unsigned int jit_enabled;
1180 };
1181
1182 struct bpf_btf {
1183 const struct btf_header *hdr;
1184 const void *raw;
1185 const char *strings;
1186 const struct btf_type **types;
1187 int types_num;
1188 };
1189
1190 struct bpf_elf_ctx {
1191 struct bpf_config cfg;
1192 Elf *elf_fd;
1193 GElf_Ehdr elf_hdr;
1194 Elf_Data *sym_tab;
1195 Elf_Data *str_tab;
1196 Elf_Data *btf_data;
1197 char obj_uid[64];
1198 int obj_fd;
1199 int btf_fd;
1200 int map_fds[ELF_MAX_MAPS];
1201 struct bpf_elf_map maps[ELF_MAX_MAPS];
1202 struct bpf_map_ext maps_ext[ELF_MAX_MAPS];
1203 struct bpf_elf_prog prog_text;
1204 struct bpf_btf btf;
1205 int sym_num;
1206 int map_num;
1207 int map_len;
1208 bool *sec_done;
1209 int sec_maps;
1210 int sec_text;
1211 int sec_btf;
1212 char license[ELF_MAX_LICENSE_LEN];
1213 enum bpf_prog_type type;
1214 __u32 ifindex;
1215 bool verbose;
1216 bool noafalg;
1217 struct bpf_elf_st stat;
1218 struct bpf_hash_entry *ht[256];
1219 char *log;
1220 size_t log_size;
1221 };
1222
1223 struct bpf_elf_sec_data {
1224 GElf_Shdr sec_hdr;
1225 Elf_Data *sec_data;
1226 const char *sec_name;
1227 };
1228
1229 struct bpf_map_data {
1230 int *fds;
1231 const char *obj;
1232 struct bpf_elf_st *st;
1233 struct bpf_elf_map *ent;
1234 };
1235
1236 static bool bpf_log_has_data(struct bpf_elf_ctx *ctx)
1237 {
1238 return ctx->log && ctx->log[0];
1239 }
1240
1241 static __check_format_string(2, 3) void
1242 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1243 {
1244 va_list vl;
1245
1246 va_start(vl, format);
1247 vfprintf(stderr, format, vl);
1248 va_end(vl);
1249
1250 if (bpf_log_has_data(ctx)) {
1251 if (ctx->verbose) {
1252 fprintf(stderr, "%s\n", ctx->log);
1253 } else {
1254 unsigned int off = 0, len = strlen(ctx->log);
1255
1256 if (len > BPF_MAX_LOG) {
1257 off = len - BPF_MAX_LOG;
1258 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1259 off);
1260 }
1261 fprintf(stderr, "%s\n", ctx->log + off);
1262 }
1263
1264 memset(ctx->log, 0, ctx->log_size);
1265 }
1266 }
1267
1268 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1269 {
1270 const size_t log_max = UINT_MAX >> 8;
1271 size_t log_size = ctx->log_size;
1272 char *ptr;
1273
1274 if (!ctx->log) {
1275 log_size = 65536;
1276 } else if (log_size < log_max) {
1277 log_size <<= 1;
1278 if (log_size > log_max)
1279 log_size = log_max;
1280 } else {
1281 return -EINVAL;
1282 }
1283
1284 ptr = realloc(ctx->log, log_size);
1285 if (!ptr)
1286 return -ENOMEM;
1287
1288 ptr[0] = 0;
1289 ctx->log = ptr;
1290 ctx->log_size = log_size;
1291
1292 return 0;
1293 }
1294
1295 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1296 uint32_t size_value, uint32_t max_elem,
1297 uint32_t flags, int inner_fd, int btf_fd,
1298 uint32_t ifindex, uint32_t btf_id_key,
1299 uint32_t btf_id_val)
1300 {
1301 union bpf_attr attr = {};
1302
1303 attr.map_type = type;
1304 attr.key_size = size_key;
1305 attr.value_size = inner_fd ? sizeof(int) : size_value;
1306 attr.max_entries = max_elem;
1307 attr.map_flags = flags;
1308 attr.inner_map_fd = inner_fd;
1309 attr.map_ifindex = ifindex;
1310 attr.btf_fd = btf_fd;
1311 attr.btf_key_type_id = btf_id_key;
1312 attr.btf_value_type_id = btf_id_val;
1313
1314 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1315 }
1316
1317 static int bpf_btf_load(void *btf, size_t size_btf,
1318 char *log, size_t size_log)
1319 {
1320 union bpf_attr attr = {};
1321
1322 attr.btf = bpf_ptr_to_u64(btf);
1323 attr.btf_size = size_btf;
1324
1325 if (size_log > 0) {
1326 attr.btf_log_buf = bpf_ptr_to_u64(log);
1327 attr.btf_log_size = size_log;
1328 attr.btf_log_level = 1;
1329 }
1330
1331 return bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
1332 }
1333
1334 static int bpf_obj_pin(int fd, const char *pathname)
1335 {
1336 union bpf_attr attr = {};
1337
1338 attr.pathname = bpf_ptr_to_u64(pathname);
1339 attr.bpf_fd = fd;
1340
1341 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1342 }
1343
1344 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1345 {
1346 struct sockaddr_alg alg = {
1347 .salg_family = AF_ALG,
1348 .salg_type = "hash",
1349 .salg_name = "sha1",
1350 };
1351 int ret, cfd, ofd, ffd;
1352 struct stat stbuff;
1353 ssize_t size;
1354
1355 if (!object || len != 20)
1356 return -EINVAL;
1357
1358 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1359 if (cfd < 0)
1360 return cfd;
1361
1362 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1363 if (ret < 0)
1364 goto out_cfd;
1365
1366 ofd = accept(cfd, NULL, 0);
1367 if (ofd < 0) {
1368 ret = ofd;
1369 goto out_cfd;
1370 }
1371
1372 ffd = open(object, O_RDONLY);
1373 if (ffd < 0) {
1374 fprintf(stderr, "Error opening object %s: %s\n",
1375 object, strerror(errno));
1376 ret = ffd;
1377 goto out_ofd;
1378 }
1379
1380 ret = fstat(ffd, &stbuff);
1381 if (ret < 0) {
1382 fprintf(stderr, "Error doing fstat: %s\n",
1383 strerror(errno));
1384 goto out_ffd;
1385 }
1386
1387 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1388 if (size != stbuff.st_size) {
1389 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1390 size, stbuff.st_size, strerror(errno));
1391 ret = -1;
1392 goto out_ffd;
1393 }
1394
1395 size = read(ofd, out, len);
1396 if (size != len) {
1397 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1398 size, len, strerror(errno));
1399 ret = -1;
1400 } else {
1401 ret = 0;
1402 }
1403 out_ffd:
1404 close(ffd);
1405 out_ofd:
1406 close(ofd);
1407 out_cfd:
1408 close(cfd);
1409 return ret;
1410 }
1411
1412 static void bpf_init_env(void)
1413 {
1414 struct rlimit limit = {
1415 .rlim_cur = RLIM_INFINITY,
1416 .rlim_max = RLIM_INFINITY,
1417 };
1418
1419 /* Don't bother in case we fail! */
1420 setrlimit(RLIMIT_MEMLOCK, &limit);
1421
1422 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC))
1423 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1424 }
1425
1426 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1427 uint32_t pinning)
1428 {
1429 struct bpf_hash_entry *entry;
1430
1431 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1432 while (entry && entry->pinning != pinning)
1433 entry = entry->next;
1434
1435 return entry ? entry->subpath : NULL;
1436 }
1437
1438 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1439 uint32_t pinning)
1440 {
1441 switch (pinning) {
1442 case PIN_OBJECT_NS:
1443 case PIN_GLOBAL_NS:
1444 return false;
1445 case PIN_NONE:
1446 return true;
1447 default:
1448 return !bpf_custom_pinning(ctx, pinning);
1449 }
1450 }
1451
1452 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1453 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1454 {
1455 switch (pinning) {
1456 case PIN_OBJECT_NS:
1457 snprintf(pathname, len, "%s/%s/%s",
1458 bpf_get_work_dir(ctx->type),
1459 ctx->obj_uid, name);
1460 break;
1461 case PIN_GLOBAL_NS:
1462 snprintf(pathname, len, "%s/%s/%s",
1463 bpf_get_work_dir(ctx->type),
1464 BPF_DIR_GLOBALS, name);
1465 break;
1466 default:
1467 snprintf(pathname, len, "%s/../%s/%s",
1468 bpf_get_work_dir(ctx->type),
1469 bpf_custom_pinning(ctx, pinning), name);
1470 break;
1471 }
1472 }
1473
1474 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1475 uint32_t pinning)
1476 {
1477 char pathname[PATH_MAX];
1478
1479 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1480 return 0;
1481
1482 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1483 return bpf_obj_get(pathname, ctx->type);
1484 }
1485
1486 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1487 {
1488 char *tmp = NULL;
1489 int ret;
1490
1491 ret = asprintf(&tmp, "%s/%s", bpf_get_work_dir(ctx->type), ctx->obj_uid);
1492 if (ret < 0) {
1493 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
1494 goto out;
1495 }
1496
1497 ret = mkdir(tmp, S_IRWXU);
1498 if (ret && errno != EEXIST) {
1499 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1500 goto out;
1501 }
1502
1503 ret = 0;
1504 out:
1505 free(tmp);
1506 return ret;
1507 }
1508
1509 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1510 const char *todo)
1511 {
1512 char *tmp = NULL;
1513 char *rem = NULL;
1514 char *sub;
1515 int ret;
1516
1517 ret = asprintf(&tmp, "%s/../", bpf_get_work_dir(ctx->type));
1518 if (ret < 0) {
1519 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
1520 goto out;
1521 }
1522
1523 ret = asprintf(&rem, "%s/", todo);
1524 if (ret < 0) {
1525 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
1526 goto out;
1527 }
1528
1529 sub = strtok(rem, "/");
1530 while (sub) {
1531 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1532 return -EINVAL;
1533
1534 strcat(tmp, sub);
1535 strcat(tmp, "/");
1536
1537 ret = mkdir(tmp, S_IRWXU);
1538 if (ret && errno != EEXIST) {
1539 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1540 strerror(errno));
1541 goto out;
1542 }
1543
1544 sub = strtok(NULL, "/");
1545 }
1546
1547 ret = 0;
1548 out:
1549 free(rem);
1550 free(tmp);
1551 return ret;
1552 }
1553
1554 static int bpf_place_pinned(int fd, const char *name,
1555 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1556 {
1557 char pathname[PATH_MAX];
1558 const char *tmp;
1559 int ret = 0;
1560
1561 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1562 return 0;
1563
1564 if (pinning == PIN_OBJECT_NS)
1565 ret = bpf_make_obj_path(ctx);
1566 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1567 ret = bpf_make_custom_path(ctx, tmp);
1568 if (ret < 0)
1569 return ret;
1570
1571 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1572 return bpf_obj_pin(fd, pathname);
1573 }
1574
1575 static void bpf_prog_report(int fd, const char *section,
1576 const struct bpf_elf_prog *prog,
1577 struct bpf_elf_ctx *ctx)
1578 {
1579 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1580
1581 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1582 fd < 0 ? "rejected: " : "loaded",
1583 fd < 0 ? strerror(errno) : "",
1584 fd < 0 ? errno : fd);
1585
1586 fprintf(stderr, " - Type: %u\n", prog->type);
1587 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1588 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1589 fprintf(stderr, " - License: %s\n\n", prog->license);
1590
1591 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1592 }
1593
1594 static int bpf_prog_attach(const char *section,
1595 const struct bpf_elf_prog *prog,
1596 struct bpf_elf_ctx *ctx)
1597 {
1598 int tries = 0, fd;
1599 retry:
1600 errno = 0;
1601 fd = bpf_prog_load_dev(prog->type, prog->insns, prog->size,
1602 prog->license, ctx->ifindex,
1603 ctx->log, ctx->log_size);
1604 if (fd < 0 || ctx->verbose) {
1605 /* The verifier log is pretty chatty, sometimes so chatty
1606 * on larger programs, that we could fail to dump everything
1607 * into our buffer. Still, try to give a debuggable error
1608 * log for the user, so enlarge it and re-fail.
1609 */
1610 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1611 if (tries++ < 10 && !bpf_log_realloc(ctx))
1612 goto retry;
1613
1614 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1615 ctx->log_size, tries);
1616 return fd;
1617 }
1618
1619 bpf_prog_report(fd, section, prog, ctx);
1620 }
1621
1622 return fd;
1623 }
1624
1625 static void bpf_map_report(int fd, const char *name,
1626 const struct bpf_elf_map *map,
1627 struct bpf_elf_ctx *ctx, int inner_fd)
1628 {
1629 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1630 fd < 0 ? "rejected: " : "loaded",
1631 fd < 0 ? strerror(errno) : "",
1632 fd < 0 ? errno : fd);
1633
1634 fprintf(stderr, " - Type: %u\n", map->type);
1635 fprintf(stderr, " - Identifier: %u\n", map->id);
1636 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1637 fprintf(stderr, " - Size key: %u\n", map->size_key);
1638 fprintf(stderr, " - Size value: %u\n",
1639 inner_fd ? (int)sizeof(int) : map->size_value);
1640 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1641 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1642 }
1643
1644 static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1645 {
1646 int i;
1647
1648 for (i = 0; i < ctx->map_num; i++) {
1649 if (ctx->maps[i].id != id)
1650 continue;
1651 if (ctx->map_fds[i] < 0)
1652 return -EINVAL;
1653
1654 return ctx->map_fds[i];
1655 }
1656
1657 return -ENOENT;
1658 }
1659
1660 static void bpf_report_map_in_map(int outer_fd, uint32_t idx)
1661 {
1662 struct bpf_elf_map outer_map;
1663 int ret;
1664
1665 fprintf(stderr, "Cannot insert map into map! ");
1666
1667 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map, NULL);
1668 if (!ret) {
1669 if (idx >= outer_map.max_elem &&
1670 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1671 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1672 outer_map.max_elem, idx);
1673 return;
1674 }
1675 }
1676
1677 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1678 }
1679
1680 static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1681 {
1682 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1683 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1684 }
1685
1686 static bool bpf_map_offload_neutral(enum bpf_map_type type)
1687 {
1688 return type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
1689 }
1690
1691 static int bpf_map_attach(const char *name, struct bpf_elf_ctx *ctx,
1692 const struct bpf_elf_map *map, struct bpf_map_ext *ext,
1693 int *have_map_in_map)
1694 {
1695 int fd, ifindex, ret, map_inner_fd = 0;
1696 bool retried = false;
1697
1698 probe:
1699 fd = bpf_probe_pinned(name, ctx, map->pinning);
1700 if (fd > 0) {
1701 ret = bpf_map_selfcheck_pinned(fd, map, ext,
1702 offsetof(struct bpf_elf_map,
1703 id), ctx->type);
1704 if (ret < 0) {
1705 close(fd);
1706 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1707 name);
1708 return ret;
1709 }
1710 if (ctx->verbose)
1711 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1712 name);
1713 return fd;
1714 }
1715
1716 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1717 (*have_map_in_map)++;
1718 if (map->inner_id)
1719 return 0;
1720 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1721 name);
1722 return -EINVAL;
1723 }
1724
1725 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1726 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1727 if (map_inner_fd < 0) {
1728 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1729 name, map->inner_id);
1730 return -EINVAL;
1731 }
1732 }
1733
1734 ifindex = bpf_map_offload_neutral(map->type) ? 0 : ctx->ifindex;
1735 errno = 0;
1736 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1737 map->max_elem, map->flags, map_inner_fd, ctx->btf_fd,
1738 ifindex, ext->btf_id_key, ext->btf_id_val);
1739
1740 if (fd < 0 || ctx->verbose) {
1741 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1742 if (fd < 0)
1743 return fd;
1744 }
1745
1746 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1747 if (ret < 0) {
1748 close(fd);
1749 if (!retried && errno == EEXIST) {
1750 retried = true;
1751 goto probe;
1752 }
1753 fprintf(stderr, "Could not pin %s map: %s\n", name,
1754 strerror(errno));
1755 return ret;
1756 }
1757
1758 return fd;
1759 }
1760
1761 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1762 const GElf_Sym *sym)
1763 {
1764 return ctx->str_tab->d_buf + sym->st_name;
1765 }
1766
1767 static int bpf_btf_find(struct bpf_elf_ctx *ctx, const char *name)
1768 {
1769 const struct btf_type *type;
1770 const char *res;
1771 int id;
1772
1773 for (id = 1; id < ctx->btf.types_num; id++) {
1774 type = ctx->btf.types[id];
1775 if (type->name_off >= ctx->btf.hdr->str_len)
1776 continue;
1777 res = &ctx->btf.strings[type->name_off];
1778 if (!strcmp(res, name))
1779 return id;
1780 }
1781
1782 return -ENOENT;
1783 }
1784
1785 static int bpf_btf_find_kv(struct bpf_elf_ctx *ctx, const struct bpf_elf_map *map,
1786 const char *name, uint32_t *id_key, uint32_t *id_val)
1787 {
1788 const struct btf_member *key, *val;
1789 const struct btf_type *type;
1790 char btf_name[512];
1791 const char *res;
1792 int id;
1793
1794 snprintf(btf_name, sizeof(btf_name), "____btf_map_%s", name);
1795 id = bpf_btf_find(ctx, btf_name);
1796 if (id < 0)
1797 return id;
1798
1799 type = ctx->btf.types[id];
1800 if (BTF_INFO_KIND(type->info) != BTF_KIND_STRUCT)
1801 return -EINVAL;
1802 if (BTF_INFO_VLEN(type->info) != 2)
1803 return -EINVAL;
1804
1805 key = ((void *) type) + sizeof(*type);
1806 val = key + 1;
1807 if (!key->type || key->type >= ctx->btf.types_num ||
1808 !val->type || val->type >= ctx->btf.types_num)
1809 return -EINVAL;
1810
1811 if (key->name_off >= ctx->btf.hdr->str_len ||
1812 val->name_off >= ctx->btf.hdr->str_len)
1813 return -EINVAL;
1814
1815 res = &ctx->btf.strings[key->name_off];
1816 if (strcmp(res, "key"))
1817 return -EINVAL;
1818
1819 res = &ctx->btf.strings[val->name_off];
1820 if (strcmp(res, "value"))
1821 return -EINVAL;
1822
1823 *id_key = key->type;
1824 *id_val = val->type;
1825 return 0;
1826 }
1827
1828 static void bpf_btf_annotate(struct bpf_elf_ctx *ctx, int which, const char *name)
1829 {
1830 uint32_t id_key = 0, id_val = 0;
1831
1832 if (!bpf_btf_find_kv(ctx, &ctx->maps[which], name, &id_key, &id_val)) {
1833 ctx->maps_ext[which].btf_id_key = id_key;
1834 ctx->maps_ext[which].btf_id_val = id_val;
1835 }
1836 }
1837
1838 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1839 {
1840 const char *name;
1841 GElf_Sym sym;
1842 int i;
1843
1844 for (i = 0; i < ctx->sym_num; i++) {
1845 int type;
1846
1847 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1848 continue;
1849
1850 type = GELF_ST_TYPE(sym.st_info);
1851 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1852 (type != STT_NOTYPE && type != STT_OBJECT) ||
1853 sym.st_shndx != ctx->sec_maps ||
1854 sym.st_value / ctx->map_len != which)
1855 continue;
1856
1857 name = bpf_str_tab_name(ctx, &sym);
1858 bpf_btf_annotate(ctx, which, name);
1859 return name;
1860 }
1861
1862 return NULL;
1863 }
1864
1865 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1866 {
1867 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1868 const char *map_name;
1869
1870 for (i = 0; i < ctx->map_num; i++) {
1871 if (ctx->maps[i].pinning == PIN_OBJECT_NS &&
1872 ctx->noafalg) {
1873 fprintf(stderr, "Missing kernel AF_ALG support for PIN_OBJECT_NS!\n");
1874 return -ENOTSUP;
1875 }
1876
1877 map_name = bpf_map_fetch_name(ctx, i);
1878 if (!map_name)
1879 return -EIO;
1880
1881 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1882 &ctx->maps_ext[i], &have_map_in_map);
1883 if (fd < 0)
1884 return fd;
1885
1886 ctx->map_fds[i] = !fd ? -1 : fd;
1887 }
1888
1889 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1890 if (ctx->map_fds[i] >= 0)
1891 continue;
1892
1893 map_name = bpf_map_fetch_name(ctx, i);
1894 if (!map_name)
1895 return -EIO;
1896
1897 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1898 &ctx->maps_ext[i], NULL);
1899 if (fd < 0)
1900 return fd;
1901
1902 ctx->map_fds[i] = fd;
1903 }
1904
1905 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1906 if (!ctx->maps[i].id ||
1907 ctx->maps[i].inner_id ||
1908 ctx->maps[i].inner_idx == -1)
1909 continue;
1910
1911 inner_fd = ctx->map_fds[i];
1912 inner_idx = ctx->maps[i].inner_idx;
1913
1914 for (j = 0; j < ctx->map_num; j++) {
1915 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1916 continue;
1917 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1918 continue;
1919
1920 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1921 &inner_fd, BPF_ANY);
1922 if (ret < 0) {
1923 bpf_report_map_in_map(ctx->map_fds[j],
1924 inner_idx);
1925 return ret;
1926 }
1927 }
1928 }
1929
1930 return 0;
1931 }
1932
1933 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1934 {
1935 int i, num = 0;
1936 GElf_Sym sym;
1937
1938 for (i = 0; i < ctx->sym_num; i++) {
1939 int type;
1940
1941 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1942 continue;
1943
1944 type = GELF_ST_TYPE(sym.st_info);
1945 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1946 (type != STT_NOTYPE && type != STT_OBJECT) ||
1947 sym.st_shndx != ctx->sec_maps)
1948 continue;
1949 num++;
1950 }
1951
1952 return num;
1953 }
1954
1955 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1956 struct bpf_elf_sec_data *data)
1957 {
1958 Elf_Data *sec_edata;
1959 GElf_Shdr sec_hdr;
1960 Elf_Scn *sec_fd;
1961 char *sec_name;
1962
1963 memset(data, 0, sizeof(*data));
1964
1965 sec_fd = elf_getscn(ctx->elf_fd, section);
1966 if (!sec_fd)
1967 return -EINVAL;
1968 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1969 return -EIO;
1970
1971 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1972 sec_hdr.sh_name);
1973 if (!sec_name || !sec_hdr.sh_size)
1974 return -ENOENT;
1975
1976 sec_edata = elf_getdata(sec_fd, NULL);
1977 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1978 return -EIO;
1979
1980 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1981
1982 data->sec_name = sec_name;
1983 data->sec_data = sec_edata;
1984 return 0;
1985 }
1986
1987 struct bpf_elf_map_min {
1988 __u32 type;
1989 __u32 size_key;
1990 __u32 size_value;
1991 __u32 max_elem;
1992 };
1993
1994 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1995 struct bpf_elf_sec_data *data)
1996 {
1997 ctx->map_num = data->sec_data->d_size;
1998 ctx->sec_maps = section;
1999 ctx->sec_done[section] = true;
2000
2001 if (ctx->map_num > sizeof(ctx->maps)) {
2002 fprintf(stderr, "Too many BPF maps in ELF section!\n");
2003 return -ENOMEM;
2004 }
2005
2006 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
2007 return 0;
2008 }
2009
2010 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
2011 {
2012 GElf_Sym sym;
2013 int off, i;
2014
2015 for (off = 0; off < end; off += ctx->map_len) {
2016 /* Order doesn't need to be linear here, hence we walk
2017 * the table again.
2018 */
2019 for (i = 0; i < ctx->sym_num; i++) {
2020 int type;
2021
2022 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
2023 continue;
2024
2025 type = GELF_ST_TYPE(sym.st_info);
2026 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
2027 (type != STT_NOTYPE && type != STT_OBJECT) ||
2028 sym.st_shndx != ctx->sec_maps)
2029 continue;
2030 if (sym.st_value == off)
2031 break;
2032 if (i == ctx->sym_num - 1)
2033 return -1;
2034 }
2035 }
2036
2037 return off == end ? 0 : -1;
2038 }
2039
2040 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
2041 {
2042 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
2043 int i, sym_num = bpf_map_num_sym(ctx);
2044 __u8 *buff;
2045
2046 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
2047 fprintf(stderr, "%u maps not supported in current map section!\n",
2048 sym_num);
2049 return -EINVAL;
2050 }
2051
2052 if (ctx->map_num % sym_num != 0 ||
2053 ctx->map_num % sizeof(__u32) != 0) {
2054 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
2055 return -EINVAL;
2056 }
2057
2058 ctx->map_len = ctx->map_num / sym_num;
2059 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
2060 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
2061 return -EINVAL;
2062 }
2063
2064 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
2065 ctx->map_num = sym_num;
2066 return 0;
2067 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
2068 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
2069 return -EINVAL;
2070 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
2071 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
2072 return -EINVAL;
2073 }
2074
2075 ctx->map_num = sym_num;
2076 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
2077 i++, buff += ctx->map_len) {
2078 /* The fixup leaves the rest of the members as zero, which
2079 * is fine currently, but option exist to set some other
2080 * default value as well when needed in future.
2081 */
2082 memcpy(&fixup[i], buff, ctx->map_len);
2083 }
2084
2085 memcpy(ctx->maps, fixup, sizeof(fixup));
2086 if (ctx->verbose)
2087 printf("%zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
2088 sizeof(struct bpf_elf_map) - ctx->map_len);
2089 return 0;
2090 }
2091
2092 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
2093 struct bpf_elf_sec_data *data)
2094 {
2095 if (data->sec_data->d_size > sizeof(ctx->license))
2096 return -ENOMEM;
2097
2098 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
2099 ctx->sec_done[section] = true;
2100 return 0;
2101 }
2102
2103 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
2104 struct bpf_elf_sec_data *data)
2105 {
2106 ctx->sym_tab = data->sec_data;
2107 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
2108 ctx->sec_done[section] = true;
2109 return 0;
2110 }
2111
2112 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
2113 struct bpf_elf_sec_data *data)
2114 {
2115 ctx->str_tab = data->sec_data;
2116 ctx->sec_done[section] = true;
2117 return 0;
2118 }
2119
2120 static int bpf_fetch_text(struct bpf_elf_ctx *ctx, int section,
2121 struct bpf_elf_sec_data *data)
2122 {
2123 ctx->sec_text = section;
2124 ctx->sec_done[section] = true;
2125 return 0;
2126 }
2127
2128 static void bpf_btf_report(int fd, struct bpf_elf_ctx *ctx)
2129 {
2130 fprintf(stderr, "\nBTF debug data section \'.BTF\' %s%s (%d)!\n",
2131 fd < 0 ? "rejected: " : "loaded",
2132 fd < 0 ? strerror(errno) : "",
2133 fd < 0 ? errno : fd);
2134
2135 fprintf(stderr, " - Length: %zu\n", ctx->btf_data->d_size);
2136
2137 bpf_dump_error(ctx, "Verifier analysis:\n\n");
2138 }
2139
2140 static int bpf_btf_attach(struct bpf_elf_ctx *ctx)
2141 {
2142 int tries = 0, fd;
2143 retry:
2144 errno = 0;
2145 fd = bpf_btf_load(ctx->btf_data->d_buf, ctx->btf_data->d_size,
2146 ctx->log, ctx->log_size);
2147 if (fd < 0 || ctx->verbose) {
2148 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
2149 if (tries++ < 10 && !bpf_log_realloc(ctx))
2150 goto retry;
2151
2152 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
2153 ctx->log_size, tries);
2154 return fd;
2155 }
2156
2157 if (bpf_log_has_data(ctx))
2158 bpf_btf_report(fd, ctx);
2159 }
2160
2161 return fd;
2162 }
2163
2164 static int bpf_fetch_btf_begin(struct bpf_elf_ctx *ctx, int section,
2165 struct bpf_elf_sec_data *data)
2166 {
2167 ctx->btf_data = data->sec_data;
2168 ctx->sec_btf = section;
2169 ctx->sec_done[section] = true;
2170 return 0;
2171 }
2172
2173 static int bpf_btf_check_header(struct bpf_elf_ctx *ctx)
2174 {
2175 const struct btf_header *hdr = ctx->btf_data->d_buf;
2176 const char *str_start, *str_end;
2177 unsigned int data_len;
2178
2179 if (hdr->magic != BTF_MAGIC) {
2180 fprintf(stderr, "Object has wrong BTF magic: %x, expected: %x!\n",
2181 hdr->magic, BTF_MAGIC);
2182 return -EINVAL;
2183 }
2184
2185 if (hdr->version != BTF_VERSION) {
2186 fprintf(stderr, "Object has wrong BTF version: %u, expected: %u!\n",
2187 hdr->version, BTF_VERSION);
2188 return -EINVAL;
2189 }
2190
2191 if (hdr->flags) {
2192 fprintf(stderr, "Object has unsupported BTF flags %x!\n",
2193 hdr->flags);
2194 return -EINVAL;
2195 }
2196
2197 data_len = ctx->btf_data->d_size - sizeof(*hdr);
2198 if (data_len < hdr->type_off ||
2199 data_len < hdr->str_off ||
2200 data_len < hdr->type_len + hdr->str_len ||
2201 hdr->type_off >= hdr->str_off ||
2202 hdr->type_off + hdr->type_len != hdr->str_off ||
2203 hdr->str_off + hdr->str_len != data_len ||
2204 (hdr->type_off & (sizeof(uint32_t) - 1))) {
2205 fprintf(stderr, "Object has malformed BTF data!\n");
2206 return -EINVAL;
2207 }
2208
2209 ctx->btf.hdr = hdr;
2210 ctx->btf.raw = hdr + 1;
2211
2212 str_start = ctx->btf.raw + hdr->str_off;
2213 str_end = str_start + hdr->str_len;
2214 if (!hdr->str_len ||
2215 hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2216 str_start[0] || str_end[-1]) {
2217 fprintf(stderr, "Object has malformed BTF string data!\n");
2218 return -EINVAL;
2219 }
2220
2221 ctx->btf.strings = str_start;
2222 return 0;
2223 }
2224
2225 static int bpf_btf_register_type(struct bpf_elf_ctx *ctx,
2226 const struct btf_type *type)
2227 {
2228 int cur = ctx->btf.types_num, num = cur + 1;
2229 const struct btf_type **types;
2230
2231 types = realloc(ctx->btf.types, num * sizeof(type));
2232 if (!types) {
2233 free(ctx->btf.types);
2234 ctx->btf.types = NULL;
2235 ctx->btf.types_num = 0;
2236 return -ENOMEM;
2237 }
2238
2239 ctx->btf.types = types;
2240 ctx->btf.types[cur] = type;
2241 ctx->btf.types_num = num;
2242 return 0;
2243 }
2244
2245 static struct btf_type btf_type_void;
2246
2247 static int bpf_btf_prep_type_data(struct bpf_elf_ctx *ctx)
2248 {
2249 const void *type_cur = ctx->btf.raw + ctx->btf.hdr->type_off;
2250 const void *type_end = ctx->btf.raw + ctx->btf.hdr->str_off;
2251 const struct btf_type *type;
2252 uint16_t var_len;
2253 int ret, kind;
2254
2255 ret = bpf_btf_register_type(ctx, &btf_type_void);
2256 if (ret < 0)
2257 return ret;
2258
2259 while (type_cur < type_end) {
2260 type = type_cur;
2261 type_cur += sizeof(*type);
2262
2263 var_len = BTF_INFO_VLEN(type->info);
2264 kind = BTF_INFO_KIND(type->info);
2265
2266 switch (kind) {
2267 case BTF_KIND_INT:
2268 type_cur += sizeof(int);
2269 break;
2270 case BTF_KIND_ARRAY:
2271 type_cur += sizeof(struct btf_array);
2272 break;
2273 case BTF_KIND_STRUCT:
2274 case BTF_KIND_UNION:
2275 type_cur += var_len * sizeof(struct btf_member);
2276 break;
2277 case BTF_KIND_ENUM:
2278 type_cur += var_len * sizeof(struct btf_enum);
2279 break;
2280 case BTF_KIND_FUNC_PROTO:
2281 type_cur += var_len * sizeof(struct btf_param);
2282 break;
2283 case BTF_KIND_TYPEDEF:
2284 case BTF_KIND_PTR:
2285 case BTF_KIND_FWD:
2286 case BTF_KIND_VOLATILE:
2287 case BTF_KIND_CONST:
2288 case BTF_KIND_RESTRICT:
2289 case BTF_KIND_FUNC:
2290 break;
2291 default:
2292 fprintf(stderr, "Object has unknown BTF type: %u!\n", kind);
2293 return -EINVAL;
2294 }
2295
2296 ret = bpf_btf_register_type(ctx, type);
2297 if (ret < 0)
2298 return ret;
2299 }
2300
2301 return 0;
2302 }
2303
2304 static int bpf_btf_prep_data(struct bpf_elf_ctx *ctx)
2305 {
2306 int ret = bpf_btf_check_header(ctx);
2307
2308 if (!ret)
2309 return bpf_btf_prep_type_data(ctx);
2310 return ret;
2311 }
2312
2313 static void bpf_fetch_btf_end(struct bpf_elf_ctx *ctx)
2314 {
2315 int fd = bpf_btf_attach(ctx);
2316
2317 if (fd < 0)
2318 return;
2319 ctx->btf_fd = fd;
2320 if (bpf_btf_prep_data(ctx) < 0) {
2321 close(ctx->btf_fd);
2322 ctx->btf_fd = 0;
2323 }
2324 }
2325
2326 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
2327 {
2328 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
2329 }
2330
2331 static bool bpf_has_btf_data(const struct bpf_elf_ctx *ctx)
2332 {
2333 return ctx->sec_btf;
2334 }
2335
2336 static bool bpf_has_call_data(const struct bpf_elf_ctx *ctx)
2337 {
2338 return ctx->sec_text;
2339 }
2340
2341 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx, bool check_text_sec)
2342 {
2343 struct bpf_elf_sec_data data;
2344 int i, ret = -1;
2345
2346 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2347 ret = bpf_fill_section_data(ctx, i, &data);
2348 if (ret < 0)
2349 continue;
2350
2351 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2352 !strcmp(data.sec_name, ELF_SECTION_MAPS))
2353 ret = bpf_fetch_maps_begin(ctx, i, &data);
2354 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2355 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
2356 ret = bpf_fetch_license(ctx, i, &data);
2357 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2358 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2359 !strcmp(data.sec_name, ".text") &&
2360 check_text_sec)
2361 ret = bpf_fetch_text(ctx, i, &data);
2362 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
2363 !strcmp(data.sec_name, ".symtab"))
2364 ret = bpf_fetch_symtab(ctx, i, &data);
2365 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
2366 !strcmp(data.sec_name, ".strtab"))
2367 ret = bpf_fetch_strtab(ctx, i, &data);
2368 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2369 !strcmp(data.sec_name, ".BTF"))
2370 ret = bpf_fetch_btf_begin(ctx, i, &data);
2371 if (ret < 0) {
2372 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
2373 i);
2374 return ret;
2375 }
2376 }
2377
2378 if (bpf_has_btf_data(ctx))
2379 bpf_fetch_btf_end(ctx);
2380 if (bpf_has_map_data(ctx)) {
2381 ret = bpf_fetch_maps_end(ctx);
2382 if (ret < 0) {
2383 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
2384 return ret;
2385 }
2386
2387 ret = bpf_maps_attach_all(ctx);
2388 if (ret < 0) {
2389 fprintf(stderr, "Error loading maps into kernel!\n");
2390 return ret;
2391 }
2392 }
2393
2394 return ret;
2395 }
2396
2397 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
2398 bool *sseen)
2399 {
2400 struct bpf_elf_sec_data data;
2401 struct bpf_elf_prog prog;
2402 int ret, i, fd = -1;
2403
2404 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2405 if (ctx->sec_done[i])
2406 continue;
2407
2408 ret = bpf_fill_section_data(ctx, i, &data);
2409 if (ret < 0 ||
2410 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
2411 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2412 !strcmp(data.sec_name, section)))
2413 continue;
2414
2415 *sseen = true;
2416
2417 memset(&prog, 0, sizeof(prog));
2418 prog.type = ctx->type;
2419 prog.license = ctx->license;
2420 prog.size = data.sec_data->d_size;
2421 prog.insns_num = prog.size / sizeof(struct bpf_insn);
2422 prog.insns = data.sec_data->d_buf;
2423
2424 fd = bpf_prog_attach(section, &prog, ctx);
2425 if (fd < 0)
2426 return fd;
2427
2428 ctx->sec_done[i] = true;
2429 break;
2430 }
2431
2432 return fd;
2433 }
2434
2435 struct bpf_relo_props {
2436 struct bpf_tail_call {
2437 unsigned int total;
2438 unsigned int jited;
2439 } tc;
2440 int main_num;
2441 };
2442
2443 static int bpf_apply_relo_map(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2444 GElf_Rel *relo, GElf_Sym *sym,
2445 struct bpf_relo_props *props)
2446 {
2447 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2448 unsigned int map_idx = sym->st_value / ctx->map_len;
2449
2450 if (insn_off >= prog->insns_num)
2451 return -EINVAL;
2452 if (prog->insns[insn_off].code != (BPF_LD | BPF_IMM | BPF_DW)) {
2453 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
2454 insn_off);
2455 return -EINVAL;
2456 }
2457
2458 if (map_idx >= ARRAY_SIZE(ctx->map_fds))
2459 return -EINVAL;
2460 if (!ctx->map_fds[map_idx])
2461 return -EINVAL;
2462 if (ctx->maps[map_idx].type == BPF_MAP_TYPE_PROG_ARRAY) {
2463 props->tc.total++;
2464 if (ctx->maps_ext[map_idx].owner.jited ||
2465 (ctx->maps_ext[map_idx].owner.type == 0 &&
2466 ctx->cfg.jit_enabled))
2467 props->tc.jited++;
2468 }
2469
2470 prog->insns[insn_off].src_reg = BPF_PSEUDO_MAP_FD;
2471 prog->insns[insn_off].imm = ctx->map_fds[map_idx];
2472 return 0;
2473 }
2474
2475 static int bpf_apply_relo_call(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2476 GElf_Rel *relo, GElf_Sym *sym,
2477 struct bpf_relo_props *props)
2478 {
2479 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2480 struct bpf_elf_prog *prog_text = &ctx->prog_text;
2481
2482 if (insn_off >= prog->insns_num)
2483 return -EINVAL;
2484 if (prog->insns[insn_off].code != (BPF_JMP | BPF_CALL) &&
2485 prog->insns[insn_off].src_reg != BPF_PSEUDO_CALL) {
2486 fprintf(stderr, "ELF contains relo data for non call instruction at offset %u! Compiler bug?!\n",
2487 insn_off);
2488 return -EINVAL;
2489 }
2490
2491 if (!props->main_num) {
2492 struct bpf_insn *insns = realloc(prog->insns,
2493 prog->size + prog_text->size);
2494 if (!insns)
2495 return -ENOMEM;
2496
2497 memcpy(insns + prog->insns_num, prog_text->insns,
2498 prog_text->size);
2499 props->main_num = prog->insns_num;
2500 prog->insns = insns;
2501 prog->insns_num += prog_text->insns_num;
2502 prog->size += prog_text->size;
2503 }
2504
2505 prog->insns[insn_off].imm += props->main_num - insn_off;
2506 return 0;
2507 }
2508
2509 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
2510 struct bpf_elf_sec_data *data_relo,
2511 struct bpf_elf_prog *prog,
2512 struct bpf_relo_props *props)
2513 {
2514 GElf_Shdr *rhdr = &data_relo->sec_hdr;
2515 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
2516
2517 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
2518 GElf_Rel relo;
2519 GElf_Sym sym;
2520 int ret = -EIO;
2521
2522 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
2523 return -EIO;
2524 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
2525 return -EIO;
2526
2527 if (sym.st_shndx == ctx->sec_maps)
2528 ret = bpf_apply_relo_map(ctx, prog, &relo, &sym, props);
2529 else if (sym.st_shndx == ctx->sec_text)
2530 ret = bpf_apply_relo_call(ctx, prog, &relo, &sym, props);
2531 else
2532 fprintf(stderr, "ELF contains non-{map,call} related relo data in entry %u pointing to section %u! Compiler bug?!\n",
2533 relo_ent, sym.st_shndx);
2534 if (ret < 0)
2535 return ret;
2536 }
2537
2538 return 0;
2539 }
2540
2541 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
2542 bool *lderr, bool *sseen, struct bpf_elf_prog *prog)
2543 {
2544 struct bpf_elf_sec_data data_relo, data_insn;
2545 int ret, idx, i, fd = -1;
2546
2547 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2548 struct bpf_relo_props props = {};
2549
2550 ret = bpf_fill_section_data(ctx, i, &data_relo);
2551 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
2552 continue;
2553
2554 idx = data_relo.sec_hdr.sh_info;
2555
2556 ret = bpf_fill_section_data(ctx, idx, &data_insn);
2557 if (ret < 0 ||
2558 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
2559 (data_insn.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2560 !strcmp(data_insn.sec_name, section)))
2561 continue;
2562 if (sseen)
2563 *sseen = true;
2564
2565 memset(prog, 0, sizeof(*prog));
2566 prog->type = ctx->type;
2567 prog->license = ctx->license;
2568 prog->size = data_insn.sec_data->d_size;
2569 prog->insns_num = prog->size / sizeof(struct bpf_insn);
2570 prog->insns = malloc(prog->size);
2571 if (!prog->insns) {
2572 *lderr = true;
2573 return -ENOMEM;
2574 }
2575
2576 memcpy(prog->insns, data_insn.sec_data->d_buf, prog->size);
2577
2578 ret = bpf_apply_relo_data(ctx, &data_relo, prog, &props);
2579 if (ret < 0) {
2580 *lderr = true;
2581 if (ctx->sec_text != idx)
2582 free(prog->insns);
2583 return ret;
2584 }
2585 if (ctx->sec_text == idx) {
2586 fd = 0;
2587 goto out;
2588 }
2589
2590 fd = bpf_prog_attach(section, prog, ctx);
2591 free(prog->insns);
2592 if (fd < 0) {
2593 *lderr = true;
2594 if (props.tc.total) {
2595 if (ctx->cfg.jit_enabled &&
2596 props.tc.total != props.tc.jited)
2597 fprintf(stderr, "JIT enabled, but only %u/%u tail call maps in the program have JITed owner!\n",
2598 props.tc.jited, props.tc.total);
2599 if (!ctx->cfg.jit_enabled &&
2600 props.tc.jited)
2601 fprintf(stderr, "JIT disabled, but %u/%u tail call maps in the program have JITed owner!\n",
2602 props.tc.jited, props.tc.total);
2603 }
2604 return fd;
2605 }
2606 out:
2607 ctx->sec_done[i] = true;
2608 ctx->sec_done[idx] = true;
2609 break;
2610 }
2611
2612 return fd;
2613 }
2614
2615 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2616 {
2617 bool lderr = false, sseen = false;
2618 struct bpf_elf_prog prog;
2619 int ret = -1;
2620
2621 if (bpf_has_call_data(ctx)) {
2622 ret = bpf_fetch_prog_relo(ctx, ".text", &lderr, NULL,
2623 &ctx->prog_text);
2624 if (ret < 0)
2625 return ret;
2626 }
2627
2628 if (bpf_has_map_data(ctx) || bpf_has_call_data(ctx))
2629 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen, &prog);
2630 if (ret < 0 && !lderr)
2631 ret = bpf_fetch_prog(ctx, section, &sseen);
2632 if (ret < 0 && !sseen)
2633 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2634 section);
2635 return ret;
2636 }
2637
2638 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2639 {
2640 int i;
2641
2642 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2643 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2644 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2645 return i;
2646 return -1;
2647 }
2648
2649 struct bpf_jited_aux {
2650 int prog_fd;
2651 int map_fd;
2652 struct bpf_prog_data prog;
2653 struct bpf_map_ext map;
2654 };
2655
2656 static int bpf_derive_prog_from_fdinfo(int fd, struct bpf_prog_data *prog)
2657 {
2658 char *file = NULL;
2659 char buff[4096];
2660 unsigned int val;
2661 FILE *fp;
2662 int ret;
2663
2664 ret = asprintf(&file, "/proc/%d/fdinfo/%d", getpid(), fd);
2665 if (ret < 0) {
2666 fprintf(stderr, "asprintf failed: %s\n", strerror(errno));
2667 free(file);
2668 return ret;
2669 }
2670
2671 memset(prog, 0, sizeof(*prog));
2672
2673 fp = fopen(file, "r");
2674 free(file);
2675 if (!fp) {
2676 fprintf(stderr, "No procfs support?!\n");
2677 return -EIO;
2678 }
2679
2680 while (fgets(buff, sizeof(buff), fp)) {
2681 if (sscanf(buff, "prog_type:\t%u", &val) == 1)
2682 prog->type = val;
2683 else if (sscanf(buff, "prog_jited:\t%u", &val) == 1)
2684 prog->jited = val;
2685 }
2686
2687 fclose(fp);
2688 return 0;
2689 }
2690
2691 static int bpf_tail_call_get_aux(struct bpf_jited_aux *aux)
2692 {
2693 struct bpf_elf_map tmp;
2694 int ret;
2695
2696 ret = bpf_derive_elf_map_from_fdinfo(aux->map_fd, &tmp, &aux->map);
2697 if (!ret)
2698 ret = bpf_derive_prog_from_fdinfo(aux->prog_fd, &aux->prog);
2699
2700 return ret;
2701 }
2702
2703 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2704 {
2705 struct bpf_elf_sec_data data;
2706 uint32_t map_id, key_id;
2707 int fd, i, ret, idx;
2708
2709 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2710 if (ctx->sec_done[i])
2711 continue;
2712
2713 ret = bpf_fill_section_data(ctx, i, &data);
2714 if (ret < 0)
2715 continue;
2716
2717 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2718 if (ret != 2)
2719 continue;
2720
2721 idx = bpf_find_map_by_id(ctx, map_id);
2722 if (idx < 0)
2723 continue;
2724
2725 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2726 if (fd < 0)
2727 return -EIO;
2728
2729 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2730 &fd, BPF_ANY);
2731 if (ret < 0) {
2732 struct bpf_jited_aux aux = {};
2733
2734 ret = -errno;
2735 if (errno == E2BIG) {
2736 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2737 key_id, map_id);
2738 return ret;
2739 }
2740
2741 aux.map_fd = ctx->map_fds[idx];
2742 aux.prog_fd = fd;
2743
2744 if (bpf_tail_call_get_aux(&aux))
2745 return ret;
2746 if (!aux.map.owner.type)
2747 return ret;
2748
2749 if (aux.prog.type != aux.map.owner.type)
2750 fprintf(stderr, "Tail call map owned by prog type %u, but prog type is %u!\n",
2751 aux.map.owner.type, aux.prog.type);
2752 if (aux.prog.jited != aux.map.owner.jited)
2753 fprintf(stderr, "Tail call map %s jited, but prog %s!\n",
2754 aux.map.owner.jited ? "is" : "not",
2755 aux.prog.jited ? "is" : "not");
2756 return ret;
2757 }
2758
2759 ctx->sec_done[i] = true;
2760 }
2761
2762 return 0;
2763 }
2764
2765 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2766 {
2767 struct stat st;
2768 int ret;
2769
2770 memset(&ctx->stat, 0, sizeof(ctx->stat));
2771
2772 ret = fstat(ctx->obj_fd, &st);
2773 if (ret < 0) {
2774 fprintf(stderr, "Stat of elf file failed: %s\n",
2775 strerror(errno));
2776 return;
2777 }
2778
2779 ctx->stat.st_dev = st.st_dev;
2780 ctx->stat.st_ino = st.st_ino;
2781 }
2782
2783 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2784 {
2785 char buff[PATH_MAX];
2786
2787 while (fgets(buff, sizeof(buff), fp)) {
2788 char *ptr = buff;
2789
2790 while (*ptr == ' ' || *ptr == '\t')
2791 ptr++;
2792
2793 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2794 continue;
2795
2796 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2797 sscanf(ptr, "%i %s #", id, path) != 2) {
2798 strcpy(path, ptr);
2799 return -1;
2800 }
2801
2802 return 1;
2803 }
2804
2805 return 0;
2806 }
2807
2808 static bool bpf_pinning_reserved(uint32_t pinning)
2809 {
2810 switch (pinning) {
2811 case PIN_NONE:
2812 case PIN_OBJECT_NS:
2813 case PIN_GLOBAL_NS:
2814 return true;
2815 default:
2816 return false;
2817 }
2818 }
2819
2820 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2821 {
2822 struct bpf_hash_entry *entry;
2823 char subpath[PATH_MAX] = {};
2824 uint32_t pinning;
2825 FILE *fp;
2826 int ret;
2827
2828 fp = fopen(db_file, "r");
2829 if (!fp)
2830 return;
2831
2832 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2833 if (ret == -1) {
2834 fprintf(stderr, "Database %s is corrupted at: %s\n",
2835 db_file, subpath);
2836 fclose(fp);
2837 return;
2838 }
2839
2840 if (bpf_pinning_reserved(pinning)) {
2841 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2842 db_file, pinning);
2843 continue;
2844 }
2845
2846 entry = malloc(sizeof(*entry));
2847 if (!entry) {
2848 fprintf(stderr, "No memory left for db entry!\n");
2849 continue;
2850 }
2851
2852 entry->pinning = pinning;
2853 entry->subpath = strdup(subpath);
2854 if (!entry->subpath) {
2855 fprintf(stderr, "No memory left for db entry!\n");
2856 free(entry);
2857 continue;
2858 }
2859
2860 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2861 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2862 }
2863
2864 fclose(fp);
2865 }
2866
2867 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2868 {
2869 struct bpf_hash_entry *entry;
2870 int i;
2871
2872 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2873 while ((entry = ctx->ht[i]) != NULL) {
2874 ctx->ht[i] = entry->next;
2875 free((char *)entry->subpath);
2876 free(entry);
2877 }
2878 }
2879 }
2880
2881 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2882 {
2883 if (ctx->elf_hdr.e_type != ET_REL ||
2884 (ctx->elf_hdr.e_machine != EM_NONE &&
2885 ctx->elf_hdr.e_machine != EM_BPF) ||
2886 ctx->elf_hdr.e_version != EV_CURRENT) {
2887 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2888 return -EINVAL;
2889 }
2890
2891 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2892 default:
2893 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2894 return -EINVAL;
2895 case ELFDATA2LSB:
2896 if (htons(1) == 1) {
2897 fprintf(stderr,
2898 "We are big endian, eBPF object is little endian!\n");
2899 return -EIO;
2900 }
2901 break;
2902 case ELFDATA2MSB:
2903 if (htons(1) != 1) {
2904 fprintf(stderr,
2905 "We are little endian, eBPF object is big endian!\n");
2906 return -EIO;
2907 }
2908 break;
2909 }
2910
2911 return 0;
2912 }
2913
2914 static void bpf_get_cfg(struct bpf_elf_ctx *ctx)
2915 {
2916 static const char *path_jit = "/proc/sys/net/core/bpf_jit_enable";
2917 int fd;
2918
2919 fd = open(path_jit, O_RDONLY);
2920 if (fd > 0) {
2921 char tmp[16] = {};
2922
2923 if (read(fd, tmp, sizeof(tmp)) > 0)
2924 ctx->cfg.jit_enabled = atoi(tmp);
2925 close(fd);
2926 }
2927 }
2928
2929 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2930 enum bpf_prog_type type, __u32 ifindex,
2931 bool verbose)
2932 {
2933 uint8_t tmp[20];
2934 int ret;
2935
2936 if (elf_version(EV_CURRENT) == EV_NONE)
2937 return -EINVAL;
2938
2939 bpf_init_env();
2940
2941 memset(ctx, 0, sizeof(*ctx));
2942 bpf_get_cfg(ctx);
2943
2944 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
2945 if (ret)
2946 ctx->noafalg = true;
2947 else
2948 hexstring_n2a(tmp, sizeof(tmp), ctx->obj_uid,
2949 sizeof(ctx->obj_uid));
2950
2951 ctx->verbose = verbose;
2952 ctx->type = type;
2953 ctx->ifindex = ifindex;
2954
2955 ctx->obj_fd = open(pathname, O_RDONLY);
2956 if (ctx->obj_fd < 0)
2957 return ctx->obj_fd;
2958
2959 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2960 if (!ctx->elf_fd) {
2961 ret = -EINVAL;
2962 goto out_fd;
2963 }
2964
2965 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2966 ret = -EINVAL;
2967 goto out_fd;
2968 }
2969
2970 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2971 &ctx->elf_hdr) {
2972 ret = -EIO;
2973 goto out_elf;
2974 }
2975
2976 ret = bpf_elf_check_ehdr(ctx);
2977 if (ret < 0)
2978 goto out_elf;
2979
2980 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2981 sizeof(*(ctx->sec_done)));
2982 if (!ctx->sec_done) {
2983 ret = -ENOMEM;
2984 goto out_elf;
2985 }
2986
2987 if (ctx->verbose && bpf_log_realloc(ctx)) {
2988 ret = -ENOMEM;
2989 goto out_free;
2990 }
2991
2992 bpf_save_finfo(ctx);
2993 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2994
2995 return 0;
2996 out_free:
2997 free(ctx->sec_done);
2998 out_elf:
2999 elf_end(ctx->elf_fd);
3000 out_fd:
3001 close(ctx->obj_fd);
3002 return ret;
3003 }
3004
3005 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
3006 {
3007 int i, count = 0;
3008
3009 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
3010 if (!ctx->map_fds[i])
3011 break;
3012 count++;
3013 }
3014
3015 return count;
3016 }
3017
3018 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
3019 {
3020 int i;
3021
3022 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
3023 if (ctx->map_fds[i])
3024 close(ctx->map_fds[i]);
3025 }
3026
3027 if (ctx->btf_fd)
3028 close(ctx->btf_fd);
3029 free(ctx->btf.types);
3030 }
3031
3032 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
3033 {
3034 if (failure)
3035 bpf_maps_teardown(ctx);
3036
3037 bpf_hash_destroy(ctx);
3038
3039 free(ctx->prog_text.insns);
3040 free(ctx->sec_done);
3041 free(ctx->log);
3042
3043 elf_end(ctx->elf_fd);
3044 close(ctx->obj_fd);
3045 }
3046
3047 static struct bpf_elf_ctx __ctx;
3048
3049 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
3050 const char *section, __u32 ifindex, bool verbose)
3051 {
3052 struct bpf_elf_ctx *ctx = &__ctx;
3053 int fd = 0, ret;
3054
3055 ret = bpf_elf_ctx_init(ctx, pathname, type, ifindex, verbose);
3056 if (ret < 0) {
3057 fprintf(stderr, "Cannot initialize ELF context!\n");
3058 return ret;
3059 }
3060
3061 ret = bpf_fetch_ancillary(ctx, strcmp(section, ".text"));
3062 if (ret < 0) {
3063 fprintf(stderr, "Error fetching ELF ancillary data!\n");
3064 goto out;
3065 }
3066
3067 fd = bpf_fetch_prog_sec(ctx, section);
3068 if (fd < 0) {
3069 fprintf(stderr, "Error fetching program/map!\n");
3070 ret = fd;
3071 goto out;
3072 }
3073
3074 ret = bpf_fill_prog_arrays(ctx);
3075 if (ret < 0)
3076 fprintf(stderr, "Error filling program arrays!\n");
3077 out:
3078 bpf_elf_ctx_destroy(ctx, ret < 0);
3079 if (ret < 0) {
3080 if (fd)
3081 close(fd);
3082 return ret;
3083 }
3084
3085 return fd;
3086 }
3087
3088 static int
3089 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
3090 const struct bpf_map_data *aux, unsigned int entries)
3091 {
3092 struct bpf_map_set_msg msg = {
3093 .aux.uds_ver = BPF_SCM_AUX_VER,
3094 .aux.num_ent = entries,
3095 };
3096 int *cmsg_buf, min_fd;
3097 char *amsg_buf;
3098 int i;
3099
3100 strlcpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
3101 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
3102
3103 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
3104 amsg_buf = (char *)msg.aux.ent;
3105
3106 for (i = 0; i < entries; i += min_fd) {
3107 int ret;
3108
3109 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
3110 bpf_map_set_init_single(&msg, min_fd);
3111
3112 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
3113 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
3114
3115 ret = sendmsg(fd, &msg.hdr, 0);
3116 if (ret <= 0)
3117 return ret ? : -1;
3118 }
3119
3120 return 0;
3121 }
3122
3123 static int
3124 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
3125 unsigned int entries)
3126 {
3127 struct bpf_map_set_msg msg;
3128 int *cmsg_buf, min_fd;
3129 char *amsg_buf, *mmsg_buf;
3130 unsigned int needed = 1;
3131 int i;
3132
3133 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
3134 amsg_buf = (char *)msg.aux.ent;
3135 mmsg_buf = (char *)&msg.aux;
3136
3137 for (i = 0; i < min(entries, needed); i += min_fd) {
3138 struct cmsghdr *cmsg;
3139 int ret;
3140
3141 min_fd = min(entries, entries - i);
3142 bpf_map_set_init_single(&msg, min_fd);
3143
3144 ret = recvmsg(fd, &msg.hdr, 0);
3145 if (ret <= 0)
3146 return ret ? : -1;
3147
3148 cmsg = CMSG_FIRSTHDR(&msg.hdr);
3149 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
3150 return -EINVAL;
3151 if (msg.hdr.msg_flags & MSG_CTRUNC)
3152 return -EIO;
3153 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
3154 return -ENOSYS;
3155
3156 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
3157 if (min_fd > entries || min_fd <= 0)
3158 return -EINVAL;
3159
3160 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
3161 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
3162 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
3163
3164 needed = aux->num_ent;
3165 }
3166
3167 return 0;
3168 }
3169
3170 int bpf_send_map_fds(const char *path, const char *obj)
3171 {
3172 struct bpf_elf_ctx *ctx = &__ctx;
3173 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3174 struct bpf_map_data bpf_aux = {
3175 .fds = ctx->map_fds,
3176 .ent = ctx->maps,
3177 .st = &ctx->stat,
3178 .obj = obj,
3179 };
3180 int fd, ret;
3181
3182 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3183 if (fd < 0) {
3184 fprintf(stderr, "Cannot open socket: %s\n",
3185 strerror(errno));
3186 return -1;
3187 }
3188
3189 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3190
3191 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
3192 if (ret < 0) {
3193 fprintf(stderr, "Cannot connect to %s: %s\n",
3194 path, strerror(errno));
3195 return -1;
3196 }
3197
3198 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
3199 bpf_maps_count(ctx));
3200 if (ret < 0)
3201 fprintf(stderr, "Cannot send fds to %s: %s\n",
3202 path, strerror(errno));
3203
3204 bpf_maps_teardown(ctx);
3205 close(fd);
3206 return ret;
3207 }
3208
3209 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
3210 unsigned int entries)
3211 {
3212 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3213 int fd, ret;
3214
3215 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3216 if (fd < 0) {
3217 fprintf(stderr, "Cannot open socket: %s\n",
3218 strerror(errno));
3219 return -1;
3220 }
3221
3222 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3223
3224 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
3225 if (ret < 0) {
3226 fprintf(stderr, "Cannot bind to socket: %s\n",
3227 strerror(errno));
3228 return -1;
3229 }
3230
3231 ret = bpf_map_set_recv(fd, fds, aux, entries);
3232 if (ret < 0)
3233 fprintf(stderr, "Cannot recv fds from %s: %s\n",
3234 path, strerror(errno));
3235
3236 unlink(addr.sun_path);
3237 close(fd);
3238 return ret;
3239 }
3240 #endif /* HAVE_ELF */