]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
bpf: add btf func and func_proto kind support
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43 #include "json_print.h"
44
45 #include "bpf_util.h"
46 #include "bpf_elf.h"
47 #include "bpf_scm.h"
48
49 struct bpf_prog_meta {
50 const char *type;
51 const char *subdir;
52 const char *section;
53 bool may_uds_export;
54 };
55
56 static const enum bpf_prog_type __bpf_types[] = {
57 BPF_PROG_TYPE_SCHED_CLS,
58 BPF_PROG_TYPE_SCHED_ACT,
59 BPF_PROG_TYPE_XDP,
60 BPF_PROG_TYPE_LWT_IN,
61 BPF_PROG_TYPE_LWT_OUT,
62 BPF_PROG_TYPE_LWT_XMIT,
63 };
64
65 static const struct bpf_prog_meta __bpf_prog_meta[] = {
66 [BPF_PROG_TYPE_SCHED_CLS] = {
67 .type = "cls",
68 .subdir = "tc",
69 .section = ELF_SECTION_CLASSIFIER,
70 .may_uds_export = true,
71 },
72 [BPF_PROG_TYPE_SCHED_ACT] = {
73 .type = "act",
74 .subdir = "tc",
75 .section = ELF_SECTION_ACTION,
76 .may_uds_export = true,
77 },
78 [BPF_PROG_TYPE_XDP] = {
79 .type = "xdp",
80 .subdir = "xdp",
81 .section = ELF_SECTION_PROG,
82 },
83 [BPF_PROG_TYPE_LWT_IN] = {
84 .type = "lwt_in",
85 .subdir = "ip",
86 .section = ELF_SECTION_PROG,
87 },
88 [BPF_PROG_TYPE_LWT_OUT] = {
89 .type = "lwt_out",
90 .subdir = "ip",
91 .section = ELF_SECTION_PROG,
92 },
93 [BPF_PROG_TYPE_LWT_XMIT] = {
94 .type = "lwt_xmit",
95 .subdir = "ip",
96 .section = ELF_SECTION_PROG,
97 },
98 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = {
99 .type = "lwt_seg6local",
100 .subdir = "ip",
101 .section = ELF_SECTION_PROG,
102 },
103 };
104
105 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
106 {
107 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
108 __bpf_prog_meta[type].subdir);
109 return __bpf_prog_meta[type].subdir;
110 }
111
112 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
113 {
114 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
115 __bpf_prog_meta[type].section);
116 return __bpf_prog_meta[type].section;
117 }
118
119 #ifdef HAVE_ELF
120 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
121 const char *sec, __u32 ifindex, bool verbose);
122 #else
123 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
124 const char *sec, __u32 ifindex, bool verbose)
125 {
126 fprintf(stderr, "No ELF library support compiled in.\n");
127 errno = ENOSYS;
128 return -1;
129 }
130 #endif
131
132 static inline __u64 bpf_ptr_to_u64(const void *ptr)
133 {
134 return (__u64)(unsigned long)ptr;
135 }
136
137 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
138 {
139 #ifdef __NR_bpf
140 return syscall(__NR_bpf, cmd, attr, size);
141 #else
142 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
143 errno = ENOSYS;
144 return -1;
145 #endif
146 }
147
148 static int bpf_map_update(int fd, const void *key, const void *value,
149 uint64_t flags)
150 {
151 union bpf_attr attr = {};
152
153 attr.map_fd = fd;
154 attr.key = bpf_ptr_to_u64(key);
155 attr.value = bpf_ptr_to_u64(value);
156 attr.flags = flags;
157
158 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
159 }
160
161 static int bpf_prog_fd_by_id(uint32_t id)
162 {
163 union bpf_attr attr = {};
164
165 attr.prog_id = id;
166
167 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
168 }
169
170 static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
171 uint32_t *info_len)
172 {
173 union bpf_attr attr = {};
174 int ret;
175
176 attr.info.bpf_fd = fd;
177 attr.info.info = bpf_ptr_to_u64(info);
178 attr.info.info_len = *info_len;
179
180 *info_len = 0;
181 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
182 if (!ret)
183 *info_len = attr.info.info_len;
184
185 return ret;
186 }
187
188 int bpf_dump_prog_info(FILE *f, uint32_t id)
189 {
190 struct bpf_prog_info info = {};
191 uint32_t len = sizeof(info);
192 int fd, ret, dump_ok = 0;
193 SPRINT_BUF(tmp);
194
195 open_json_object("prog");
196 print_uint(PRINT_ANY, "id", "id %u ", id);
197
198 fd = bpf_prog_fd_by_id(id);
199 if (fd < 0)
200 goto out;
201
202 ret = bpf_prog_info_by_fd(fd, &info, &len);
203 if (!ret && len) {
204 int jited = !!info.jited_prog_len;
205
206 print_string(PRINT_ANY, "tag", "tag %s ",
207 hexstring_n2a(info.tag, sizeof(info.tag),
208 tmp, sizeof(tmp)));
209 print_uint(PRINT_JSON, "jited", NULL, jited);
210 if (jited && !is_json_context())
211 fprintf(f, "jited ");
212 dump_ok = 1;
213 }
214
215 close(fd);
216 out:
217 close_json_object();
218 return dump_ok;
219 }
220
221 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
222 char **bpf_string, bool *need_release,
223 const char separator)
224 {
225 char sp;
226
227 if (from_file) {
228 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
229 char *tmp_string, *pos, c_prev = ' ';
230 FILE *fp;
231 int c;
232
233 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
234 tmp_string = pos = calloc(1, tmp_len);
235 if (tmp_string == NULL)
236 return -ENOMEM;
237
238 fp = fopen(arg, "r");
239 if (fp == NULL) {
240 perror("Cannot fopen");
241 free(tmp_string);
242 return -ENOENT;
243 }
244
245 while ((c = fgetc(fp)) != EOF) {
246 switch (c) {
247 case '\n':
248 if (c_prev != ',')
249 *(pos++) = ',';
250 c_prev = ',';
251 break;
252 case ' ':
253 case '\t':
254 if (c_prev != ' ')
255 *(pos++) = c;
256 c_prev = ' ';
257 break;
258 default:
259 *(pos++) = c;
260 c_prev = c;
261 }
262 if (pos - tmp_string == tmp_len)
263 break;
264 }
265
266 if (!feof(fp)) {
267 free(tmp_string);
268 fclose(fp);
269 return -E2BIG;
270 }
271
272 fclose(fp);
273 *pos = 0;
274
275 *need_release = true;
276 *bpf_string = tmp_string;
277 } else {
278 *need_release = false;
279 *bpf_string = arg;
280 }
281
282 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
283 sp != separator) {
284 if (*need_release)
285 free(*bpf_string);
286 return -EINVAL;
287 }
288
289 return 0;
290 }
291
292 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
293 bool from_file)
294 {
295 char *bpf_string, *token, separator = ',';
296 int ret = 0, i = 0;
297 bool need_release;
298 __u16 bpf_len = 0;
299
300 if (argc < 1)
301 return -EINVAL;
302 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
303 &need_release, separator))
304 return -EINVAL;
305 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
306 ret = -EINVAL;
307 goto out;
308 }
309
310 token = bpf_string;
311 while ((token = strchr(token, separator)) && (++token)[0]) {
312 if (i >= bpf_len) {
313 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
314 ret = -EINVAL;
315 goto out;
316 }
317
318 if (sscanf(token, "%hu %hhu %hhu %u,",
319 &bpf_ops[i].code, &bpf_ops[i].jt,
320 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
321 fprintf(stderr, "Error at instruction %d!\n", i);
322 ret = -EINVAL;
323 goto out;
324 }
325
326 i++;
327 }
328
329 if (i != bpf_len) {
330 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
331 ret = -EINVAL;
332 goto out;
333 }
334 ret = bpf_len;
335 out:
336 if (need_release)
337 free(bpf_string);
338
339 return ret;
340 }
341
342 void bpf_print_ops(FILE *f, struct rtattr *bpf_ops, __u16 len)
343 {
344 struct sock_filter *ops = RTA_DATA(bpf_ops);
345 int i;
346
347 if (len == 0)
348 return;
349
350 fprintf(f, "bytecode \'%u,", len);
351
352 for (i = 0; i < len - 1; i++)
353 fprintf(f, "%hu %hhu %hhu %u,", ops[i].code, ops[i].jt,
354 ops[i].jf, ops[i].k);
355
356 fprintf(f, "%hu %hhu %hhu %u\'", ops[i].code, ops[i].jt,
357 ops[i].jf, ops[i].k);
358 }
359
360 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
361 const struct bpf_elf_map *obj)
362 {
363 fprintf(stderr, "Map specification differs from pinned file!\n");
364
365 if (obj->type != pin->type)
366 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
367 obj->type, pin->type);
368 if (obj->size_key != pin->size_key)
369 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
370 obj->size_key, pin->size_key);
371 if (obj->size_value != pin->size_value)
372 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
373 obj->size_value, pin->size_value);
374 if (obj->max_elem != pin->max_elem)
375 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
376 obj->max_elem, pin->max_elem);
377 if (obj->flags != pin->flags)
378 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
379 obj->flags, pin->flags);
380
381 fprintf(stderr, "\n");
382 }
383
384 struct bpf_prog_data {
385 unsigned int type;
386 unsigned int jited;
387 };
388
389 struct bpf_map_ext {
390 struct bpf_prog_data owner;
391 unsigned int btf_id_key;
392 unsigned int btf_id_val;
393 };
394
395 static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map,
396 struct bpf_map_ext *ext)
397 {
398 unsigned int val, owner_type = 0, owner_jited = 0;
399 char file[PATH_MAX], buff[4096];
400 FILE *fp;
401
402 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
403 memset(map, 0, sizeof(*map));
404
405 fp = fopen(file, "r");
406 if (!fp) {
407 fprintf(stderr, "No procfs support?!\n");
408 return -EIO;
409 }
410
411 while (fgets(buff, sizeof(buff), fp)) {
412 if (sscanf(buff, "map_type:\t%u", &val) == 1)
413 map->type = val;
414 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
415 map->size_key = val;
416 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
417 map->size_value = val;
418 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
419 map->max_elem = val;
420 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
421 map->flags = val;
422 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
423 owner_type = val;
424 else if (sscanf(buff, "owner_jited:\t%i", &val) == 1)
425 owner_jited = val;
426 }
427
428 fclose(fp);
429 if (ext) {
430 memset(ext, 0, sizeof(*ext));
431 ext->owner.type = owner_type;
432 ext->owner.jited = owner_jited;
433 }
434
435 return 0;
436 }
437
438 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
439 struct bpf_map_ext *ext, int length,
440 enum bpf_prog_type type)
441 {
442 struct bpf_elf_map tmp, zero = {};
443 int ret;
444
445 ret = bpf_derive_elf_map_from_fdinfo(fd, &tmp, ext);
446 if (ret < 0)
447 return ret;
448
449 /* The decision to reject this is on kernel side eventually, but
450 * at least give the user a chance to know what's wrong.
451 */
452 if (ext->owner.type && ext->owner.type != type)
453 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
454 type, ext->owner.type);
455
456 if (!memcmp(&tmp, map, length)) {
457 return 0;
458 } else {
459 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
460 * so just accept it. We know we do have an eBPF fd and in this
461 * case, everything is 0. It is guaranteed that no such map exists
462 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
463 */
464 if (!memcmp(&tmp, &zero, length))
465 return 0;
466
467 bpf_map_pin_report(&tmp, map);
468 return -EINVAL;
469 }
470 }
471
472 static int bpf_mnt_fs(const char *target)
473 {
474 bool bind_done = false;
475
476 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
477 if (errno != EINVAL || bind_done) {
478 fprintf(stderr, "mount --make-private %s failed: %s\n",
479 target, strerror(errno));
480 return -1;
481 }
482
483 if (mount(target, target, "none", MS_BIND, NULL)) {
484 fprintf(stderr, "mount --bind %s %s failed: %s\n",
485 target, target, strerror(errno));
486 return -1;
487 }
488
489 bind_done = true;
490 }
491
492 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
493 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
494 target, strerror(errno));
495 return -1;
496 }
497
498 return 0;
499 }
500
501 static int bpf_mnt_check_target(const char *target)
502 {
503 struct stat sb = {};
504 int ret;
505
506 ret = stat(target, &sb);
507 if (ret) {
508 ret = mkdir(target, S_IRWXU);
509 if (ret) {
510 fprintf(stderr, "mkdir %s failed: %s\n", target,
511 strerror(errno));
512 return ret;
513 }
514 }
515
516 return 0;
517 }
518
519 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
520 {
521 struct statfs st_fs;
522
523 if (statfs(mnt, &st_fs) < 0)
524 return -ENOENT;
525 if ((unsigned long)st_fs.f_type != magic)
526 return -ENOENT;
527
528 return 0;
529 }
530
531 static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt,
532 int len, const char *mntpt)
533 {
534 int ret;
535
536 ret = bpf_valid_mntpt(mntpt, magic);
537 if (!ret) {
538 strlcpy(mnt, mntpt, len);
539 return mnt;
540 }
541
542 return NULL;
543 }
544
545 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
546 char *mnt, int len,
547 const char * const *known_mnts)
548 {
549 const char * const *ptr;
550 char type[100];
551 FILE *fp;
552
553 if (known_mnts) {
554 ptr = known_mnts;
555 while (*ptr) {
556 if (bpf_find_mntpt_single(magic, mnt, len, *ptr))
557 return mnt;
558 ptr++;
559 }
560 }
561
562 if (len != PATH_MAX)
563 return NULL;
564
565 fp = fopen("/proc/mounts", "r");
566 if (fp == NULL)
567 return NULL;
568
569 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
570 mnt, type) == 2) {
571 if (strcmp(type, fstype) == 0)
572 break;
573 }
574
575 fclose(fp);
576 if (strcmp(type, fstype) != 0)
577 return NULL;
578
579 return mnt;
580 }
581
582 int bpf_trace_pipe(void)
583 {
584 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
585 static const char * const tracefs_known_mnts[] = {
586 TRACE_DIR_MNT,
587 "/sys/kernel/debug/tracing",
588 "/tracing",
589 "/trace",
590 0,
591 };
592 int fd_in, fd_out = STDERR_FILENO;
593 char tpipe[PATH_MAX];
594 const char *mnt;
595
596 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
597 sizeof(tracefs_mnt), tracefs_known_mnts);
598 if (!mnt) {
599 fprintf(stderr, "tracefs not mounted?\n");
600 return -1;
601 }
602
603 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
604
605 fd_in = open(tpipe, O_RDONLY);
606 if (fd_in < 0)
607 return -1;
608
609 fprintf(stderr, "Running! Hang up with ^C!\n\n");
610 while (1) {
611 static char buff[4096];
612 ssize_t ret;
613
614 ret = read(fd_in, buff, sizeof(buff));
615 if (ret > 0 && write(fd_out, buff, ret) == ret)
616 continue;
617 break;
618 }
619
620 close(fd_in);
621 return -1;
622 }
623
624 static int bpf_gen_global(const char *bpf_sub_dir)
625 {
626 char bpf_glo_dir[PATH_MAX];
627 int ret;
628
629 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
630 bpf_sub_dir, BPF_DIR_GLOBALS);
631
632 ret = mkdir(bpf_glo_dir, S_IRWXU);
633 if (ret && errno != EEXIST) {
634 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
635 strerror(errno));
636 return ret;
637 }
638
639 return 0;
640 }
641
642 static int bpf_gen_master(const char *base, const char *name)
643 {
644 char bpf_sub_dir[PATH_MAX + NAME_MAX + 1];
645 int ret;
646
647 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
648
649 ret = mkdir(bpf_sub_dir, S_IRWXU);
650 if (ret && errno != EEXIST) {
651 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
652 strerror(errno));
653 return ret;
654 }
655
656 return bpf_gen_global(bpf_sub_dir);
657 }
658
659 static int bpf_slave_via_bind_mnt(const char *full_name,
660 const char *full_link)
661 {
662 int ret;
663
664 ret = mkdir(full_name, S_IRWXU);
665 if (ret) {
666 assert(errno != EEXIST);
667 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
668 strerror(errno));
669 return ret;
670 }
671
672 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
673 if (ret) {
674 rmdir(full_name);
675 fprintf(stderr, "mount --bind %s %s failed: %s\n",
676 full_link, full_name, strerror(errno));
677 }
678
679 return ret;
680 }
681
682 static int bpf_gen_slave(const char *base, const char *name,
683 const char *link)
684 {
685 char bpf_lnk_dir[PATH_MAX + NAME_MAX + 1];
686 char bpf_sub_dir[PATH_MAX + NAME_MAX];
687 struct stat sb = {};
688 int ret;
689
690 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
691 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
692
693 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
694 if (ret) {
695 if (errno != EEXIST) {
696 if (errno != EPERM) {
697 fprintf(stderr, "symlink %s failed: %s\n",
698 bpf_sub_dir, strerror(errno));
699 return ret;
700 }
701
702 return bpf_slave_via_bind_mnt(bpf_sub_dir,
703 bpf_lnk_dir);
704 }
705
706 ret = lstat(bpf_sub_dir, &sb);
707 if (ret) {
708 fprintf(stderr, "lstat %s failed: %s\n",
709 bpf_sub_dir, strerror(errno));
710 return ret;
711 }
712
713 if ((sb.st_mode & S_IFMT) != S_IFLNK)
714 return bpf_gen_global(bpf_sub_dir);
715 }
716
717 return 0;
718 }
719
720 static int bpf_gen_hierarchy(const char *base)
721 {
722 int ret, i;
723
724 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
725 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
726 ret = bpf_gen_slave(base,
727 bpf_prog_to_subdir(__bpf_types[i]),
728 bpf_prog_to_subdir(__bpf_types[0]));
729 return ret;
730 }
731
732 static const char *bpf_get_work_dir(enum bpf_prog_type type)
733 {
734 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
735 static char bpf_wrk_dir[PATH_MAX];
736 static const char *mnt;
737 static bool bpf_mnt_cached;
738 const char *mnt_env = getenv(BPF_ENV_MNT);
739 static const char * const bpf_known_mnts[] = {
740 BPF_DIR_MNT,
741 "/bpf",
742 0,
743 };
744 int ret;
745
746 if (bpf_mnt_cached) {
747 const char *out = mnt;
748
749 if (out && type) {
750 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
751 out, bpf_prog_to_subdir(type));
752 out = bpf_tmp;
753 }
754 return out;
755 }
756
757 if (mnt_env)
758 mnt = bpf_find_mntpt_single(BPF_FS_MAGIC, bpf_tmp,
759 sizeof(bpf_tmp), mnt_env);
760 else
761 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp,
762 sizeof(bpf_tmp), bpf_known_mnts);
763 if (!mnt) {
764 mnt = mnt_env ? : BPF_DIR_MNT;
765 ret = bpf_mnt_check_target(mnt);
766 if (!ret)
767 ret = bpf_mnt_fs(mnt);
768 if (ret) {
769 mnt = NULL;
770 goto out;
771 }
772 }
773
774 snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
775
776 ret = bpf_gen_hierarchy(bpf_wrk_dir);
777 if (ret) {
778 mnt = NULL;
779 goto out;
780 }
781
782 mnt = bpf_wrk_dir;
783 out:
784 bpf_mnt_cached = true;
785 return mnt;
786 }
787
788 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
789 {
790 union bpf_attr attr = {};
791 char tmp[PATH_MAX];
792
793 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
794 pathname[1] == ':' && bpf_get_work_dir(type)) {
795 snprintf(tmp, sizeof(tmp), "%s/%s",
796 bpf_get_work_dir(type), pathname + 2);
797 pathname = tmp;
798 }
799
800 attr.pathname = bpf_ptr_to_u64(pathname);
801
802 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
803 }
804
805 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
806 {
807 int prog_fd = bpf_obj_get(pathname, type);
808
809 if (prog_fd < 0)
810 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
811 pathname, strerror(errno));
812 return prog_fd;
813 }
814
815 static int bpf_do_parse(struct bpf_cfg_in *cfg, const bool *opt_tbl)
816 {
817 const char *file, *section, *uds_name;
818 bool verbose = false;
819 int i, ret, argc;
820 char **argv;
821
822 argv = cfg->argv;
823 argc = cfg->argc;
824
825 if (opt_tbl[CBPF_BYTECODE] &&
826 (matches(*argv, "bytecode") == 0 ||
827 strcmp(*argv, "bc") == 0)) {
828 cfg->mode = CBPF_BYTECODE;
829 } else if (opt_tbl[CBPF_FILE] &&
830 (matches(*argv, "bytecode-file") == 0 ||
831 strcmp(*argv, "bcf") == 0)) {
832 cfg->mode = CBPF_FILE;
833 } else if (opt_tbl[EBPF_OBJECT] &&
834 (matches(*argv, "object-file") == 0 ||
835 strcmp(*argv, "obj") == 0)) {
836 cfg->mode = EBPF_OBJECT;
837 } else if (opt_tbl[EBPF_PINNED] &&
838 (matches(*argv, "object-pinned") == 0 ||
839 matches(*argv, "pinned") == 0 ||
840 matches(*argv, "fd") == 0)) {
841 cfg->mode = EBPF_PINNED;
842 } else {
843 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
844 return -1;
845 }
846
847 NEXT_ARG();
848 file = section = uds_name = NULL;
849 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
850 file = *argv;
851 NEXT_ARG_FWD();
852
853 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
854 if (argc > 0 && matches(*argv, "type") == 0) {
855 NEXT_ARG();
856 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
857 i++) {
858 if (!__bpf_prog_meta[i].type)
859 continue;
860 if (!matches(*argv,
861 __bpf_prog_meta[i].type)) {
862 cfg->type = i;
863 break;
864 }
865 }
866
867 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
868 fprintf(stderr, "What type is \"%s\"?\n",
869 *argv);
870 return -1;
871 }
872 NEXT_ARG_FWD();
873 } else {
874 cfg->type = BPF_PROG_TYPE_SCHED_CLS;
875 }
876 }
877
878 section = bpf_prog_to_default_section(cfg->type);
879 if (argc > 0 && matches(*argv, "section") == 0) {
880 NEXT_ARG();
881 section = *argv;
882 NEXT_ARG_FWD();
883 }
884
885 if (__bpf_prog_meta[cfg->type].may_uds_export) {
886 uds_name = getenv(BPF_ENV_UDS);
887 if (argc > 0 && !uds_name &&
888 matches(*argv, "export") == 0) {
889 NEXT_ARG();
890 uds_name = *argv;
891 NEXT_ARG_FWD();
892 }
893 }
894
895 if (argc > 0 && matches(*argv, "verbose") == 0) {
896 verbose = true;
897 NEXT_ARG_FWD();
898 }
899
900 PREV_ARG();
901 }
902
903 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE) {
904 ret = bpf_ops_parse(argc, argv, cfg->opcodes,
905 cfg->mode == CBPF_FILE);
906 cfg->n_opcodes = ret;
907 } else if (cfg->mode == EBPF_OBJECT) {
908 ret = 0; /* program will be loaded by load stage */
909 } else if (cfg->mode == EBPF_PINNED) {
910 ret = bpf_obj_pinned(file, cfg->type);
911 cfg->prog_fd = ret;
912 } else {
913 return -1;
914 }
915
916 cfg->object = file;
917 cfg->section = section;
918 cfg->uds = uds_name;
919 cfg->argc = argc;
920 cfg->argv = argv;
921 cfg->verbose = verbose;
922
923 return ret;
924 }
925
926 static int bpf_do_load(struct bpf_cfg_in *cfg)
927 {
928 if (cfg->mode == EBPF_OBJECT) {
929 cfg->prog_fd = bpf_obj_open(cfg->object, cfg->type,
930 cfg->section, cfg->ifindex,
931 cfg->verbose);
932 return cfg->prog_fd;
933 }
934 return 0;
935 }
936
937 int bpf_load_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops,
938 void *nl)
939 {
940 char annotation[256];
941 int ret;
942
943 ret = bpf_do_load(cfg);
944 if (ret < 0)
945 return ret;
946
947 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE)
948 ops->cbpf_cb(nl, cfg->opcodes, cfg->n_opcodes);
949 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
950 snprintf(annotation, sizeof(annotation), "%s:[%s]",
951 basename(cfg->object), cfg->mode == EBPF_PINNED ?
952 "*fsobj" : cfg->section);
953 ops->ebpf_cb(nl, cfg->prog_fd, annotation);
954 }
955
956 return 0;
957 }
958
959 int bpf_parse_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops)
960 {
961 bool opt_tbl[BPF_MODE_MAX] = {};
962
963 if (ops->cbpf_cb) {
964 opt_tbl[CBPF_BYTECODE] = true;
965 opt_tbl[CBPF_FILE] = true;
966 }
967
968 if (ops->ebpf_cb) {
969 opt_tbl[EBPF_OBJECT] = true;
970 opt_tbl[EBPF_PINNED] = true;
971 }
972
973 return bpf_do_parse(cfg, opt_tbl);
974 }
975
976 int bpf_parse_and_load_common(struct bpf_cfg_in *cfg,
977 const struct bpf_cfg_ops *ops, void *nl)
978 {
979 int ret;
980
981 ret = bpf_parse_common(cfg, ops);
982 if (ret < 0)
983 return ret;
984
985 return bpf_load_common(cfg, ops, nl);
986 }
987
988 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
989 {
990 const bool opt_tbl[BPF_MODE_MAX] = {
991 [EBPF_OBJECT] = true,
992 [EBPF_PINNED] = true,
993 };
994 const struct bpf_elf_map test = {
995 .type = BPF_MAP_TYPE_PROG_ARRAY,
996 .size_key = sizeof(int),
997 .size_value = sizeof(int),
998 };
999 struct bpf_cfg_in cfg = {
1000 .type = BPF_PROG_TYPE_UNSPEC,
1001 .argc = argc,
1002 .argv = argv,
1003 };
1004 struct bpf_map_ext ext = {};
1005 int ret, prog_fd, map_fd;
1006 uint32_t map_key;
1007
1008 ret = bpf_do_parse(&cfg, opt_tbl);
1009 if (ret < 0)
1010 return ret;
1011
1012 ret = bpf_do_load(&cfg);
1013 if (ret < 0)
1014 return ret;
1015
1016 prog_fd = cfg.prog_fd;
1017
1018 if (key) {
1019 map_key = *key;
1020 } else {
1021 ret = sscanf(cfg.section, "%*i/%i", &map_key);
1022 if (ret != 1) {
1023 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
1024 ret = -EINVAL;
1025 goto out_prog;
1026 }
1027 }
1028
1029 map_fd = bpf_obj_get(map_path, cfg.type);
1030 if (map_fd < 0) {
1031 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
1032 map_path, strerror(errno));
1033 ret = map_fd;
1034 goto out_prog;
1035 }
1036
1037 ret = bpf_map_selfcheck_pinned(map_fd, &test, &ext,
1038 offsetof(struct bpf_elf_map, max_elem),
1039 cfg.type);
1040 if (ret < 0) {
1041 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
1042 goto out_map;
1043 }
1044
1045 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
1046 if (ret < 0)
1047 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
1048 out_map:
1049 close(map_fd);
1050 out_prog:
1051 close(prog_fd);
1052 return ret;
1053 }
1054
1055 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
1056 {
1057 union bpf_attr attr = {};
1058
1059 attr.target_fd = target_fd;
1060 attr.attach_bpf_fd = prog_fd;
1061 attr.attach_type = type;
1062
1063 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
1064 }
1065
1066 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
1067 {
1068 union bpf_attr attr = {};
1069
1070 attr.target_fd = target_fd;
1071 attr.attach_type = type;
1072
1073 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
1074 }
1075
1076 static int bpf_prog_load_dev(enum bpf_prog_type type,
1077 const struct bpf_insn *insns, size_t size_insns,
1078 const char *license, __u32 ifindex,
1079 char *log, size_t size_log)
1080 {
1081 union bpf_attr attr = {};
1082
1083 attr.prog_type = type;
1084 attr.insns = bpf_ptr_to_u64(insns);
1085 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
1086 attr.license = bpf_ptr_to_u64(license);
1087 attr.prog_ifindex = ifindex;
1088
1089 if (size_log > 0) {
1090 attr.log_buf = bpf_ptr_to_u64(log);
1091 attr.log_size = size_log;
1092 attr.log_level = 1;
1093 }
1094
1095 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
1096 }
1097
1098 int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
1099 size_t size_insns, const char *license, char *log,
1100 size_t size_log)
1101 {
1102 return bpf_prog_load_dev(type, insns, size_insns, license, 0,
1103 log, size_log);
1104 }
1105
1106 #ifdef HAVE_ELF
1107 struct bpf_elf_prog {
1108 enum bpf_prog_type type;
1109 struct bpf_insn *insns;
1110 unsigned int insns_num;
1111 size_t size;
1112 const char *license;
1113 };
1114
1115 struct bpf_hash_entry {
1116 unsigned int pinning;
1117 const char *subpath;
1118 struct bpf_hash_entry *next;
1119 };
1120
1121 struct bpf_config {
1122 unsigned int jit_enabled;
1123 };
1124
1125 struct bpf_btf {
1126 const struct btf_header *hdr;
1127 const void *raw;
1128 const char *strings;
1129 const struct btf_type **types;
1130 int types_num;
1131 };
1132
1133 struct bpf_elf_ctx {
1134 struct bpf_config cfg;
1135 Elf *elf_fd;
1136 GElf_Ehdr elf_hdr;
1137 Elf_Data *sym_tab;
1138 Elf_Data *str_tab;
1139 Elf_Data *btf_data;
1140 char obj_uid[64];
1141 int obj_fd;
1142 int btf_fd;
1143 int map_fds[ELF_MAX_MAPS];
1144 struct bpf_elf_map maps[ELF_MAX_MAPS];
1145 struct bpf_map_ext maps_ext[ELF_MAX_MAPS];
1146 struct bpf_elf_prog prog_text;
1147 struct bpf_btf btf;
1148 int sym_num;
1149 int map_num;
1150 int map_len;
1151 bool *sec_done;
1152 int sec_maps;
1153 int sec_text;
1154 int sec_btf;
1155 char license[ELF_MAX_LICENSE_LEN];
1156 enum bpf_prog_type type;
1157 __u32 ifindex;
1158 bool verbose;
1159 bool noafalg;
1160 struct bpf_elf_st stat;
1161 struct bpf_hash_entry *ht[256];
1162 char *log;
1163 size_t log_size;
1164 };
1165
1166 struct bpf_elf_sec_data {
1167 GElf_Shdr sec_hdr;
1168 Elf_Data *sec_data;
1169 const char *sec_name;
1170 };
1171
1172 struct bpf_map_data {
1173 int *fds;
1174 const char *obj;
1175 struct bpf_elf_st *st;
1176 struct bpf_elf_map *ent;
1177 };
1178
1179 static bool bpf_log_has_data(struct bpf_elf_ctx *ctx)
1180 {
1181 return ctx->log && ctx->log[0];
1182 }
1183
1184 static __check_format_string(2, 3) void
1185 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1186 {
1187 va_list vl;
1188
1189 va_start(vl, format);
1190 vfprintf(stderr, format, vl);
1191 va_end(vl);
1192
1193 if (bpf_log_has_data(ctx)) {
1194 if (ctx->verbose) {
1195 fprintf(stderr, "%s\n", ctx->log);
1196 } else {
1197 unsigned int off = 0, len = strlen(ctx->log);
1198
1199 if (len > BPF_MAX_LOG) {
1200 off = len - BPF_MAX_LOG;
1201 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1202 off);
1203 }
1204 fprintf(stderr, "%s\n", ctx->log + off);
1205 }
1206
1207 memset(ctx->log, 0, ctx->log_size);
1208 }
1209 }
1210
1211 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1212 {
1213 const size_t log_max = UINT_MAX >> 8;
1214 size_t log_size = ctx->log_size;
1215 char *ptr;
1216
1217 if (!ctx->log) {
1218 log_size = 65536;
1219 } else if (log_size < log_max) {
1220 log_size <<= 1;
1221 if (log_size > log_max)
1222 log_size = log_max;
1223 } else {
1224 return -EINVAL;
1225 }
1226
1227 ptr = realloc(ctx->log, log_size);
1228 if (!ptr)
1229 return -ENOMEM;
1230
1231 ptr[0] = 0;
1232 ctx->log = ptr;
1233 ctx->log_size = log_size;
1234
1235 return 0;
1236 }
1237
1238 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1239 uint32_t size_value, uint32_t max_elem,
1240 uint32_t flags, int inner_fd, int btf_fd,
1241 uint32_t ifindex, uint32_t btf_id_key,
1242 uint32_t btf_id_val)
1243 {
1244 union bpf_attr attr = {};
1245
1246 attr.map_type = type;
1247 attr.key_size = size_key;
1248 attr.value_size = inner_fd ? sizeof(int) : size_value;
1249 attr.max_entries = max_elem;
1250 attr.map_flags = flags;
1251 attr.inner_map_fd = inner_fd;
1252 attr.map_ifindex = ifindex;
1253 attr.btf_fd = btf_fd;
1254 attr.btf_key_type_id = btf_id_key;
1255 attr.btf_value_type_id = btf_id_val;
1256
1257 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1258 }
1259
1260 static int bpf_btf_load(void *btf, size_t size_btf,
1261 char *log, size_t size_log)
1262 {
1263 union bpf_attr attr = {};
1264
1265 attr.btf = bpf_ptr_to_u64(btf);
1266 attr.btf_size = size_btf;
1267
1268 if (size_log > 0) {
1269 attr.btf_log_buf = bpf_ptr_to_u64(log);
1270 attr.btf_log_size = size_log;
1271 attr.btf_log_level = 1;
1272 }
1273
1274 return bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
1275 }
1276
1277 static int bpf_obj_pin(int fd, const char *pathname)
1278 {
1279 union bpf_attr attr = {};
1280
1281 attr.pathname = bpf_ptr_to_u64(pathname);
1282 attr.bpf_fd = fd;
1283
1284 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1285 }
1286
1287 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1288 {
1289 struct sockaddr_alg alg = {
1290 .salg_family = AF_ALG,
1291 .salg_type = "hash",
1292 .salg_name = "sha1",
1293 };
1294 int ret, cfd, ofd, ffd;
1295 struct stat stbuff;
1296 ssize_t size;
1297
1298 if (!object || len != 20)
1299 return -EINVAL;
1300
1301 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1302 if (cfd < 0)
1303 return cfd;
1304
1305 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1306 if (ret < 0)
1307 goto out_cfd;
1308
1309 ofd = accept(cfd, NULL, 0);
1310 if (ofd < 0) {
1311 ret = ofd;
1312 goto out_cfd;
1313 }
1314
1315 ffd = open(object, O_RDONLY);
1316 if (ffd < 0) {
1317 fprintf(stderr, "Error opening object %s: %s\n",
1318 object, strerror(errno));
1319 ret = ffd;
1320 goto out_ofd;
1321 }
1322
1323 ret = fstat(ffd, &stbuff);
1324 if (ret < 0) {
1325 fprintf(stderr, "Error doing fstat: %s\n",
1326 strerror(errno));
1327 goto out_ffd;
1328 }
1329
1330 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1331 if (size != stbuff.st_size) {
1332 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1333 size, stbuff.st_size, strerror(errno));
1334 ret = -1;
1335 goto out_ffd;
1336 }
1337
1338 size = read(ofd, out, len);
1339 if (size != len) {
1340 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1341 size, len, strerror(errno));
1342 ret = -1;
1343 } else {
1344 ret = 0;
1345 }
1346 out_ffd:
1347 close(ffd);
1348 out_ofd:
1349 close(ofd);
1350 out_cfd:
1351 close(cfd);
1352 return ret;
1353 }
1354
1355 static void bpf_init_env(void)
1356 {
1357 struct rlimit limit = {
1358 .rlim_cur = RLIM_INFINITY,
1359 .rlim_max = RLIM_INFINITY,
1360 };
1361
1362 /* Don't bother in case we fail! */
1363 setrlimit(RLIMIT_MEMLOCK, &limit);
1364
1365 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC))
1366 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1367 }
1368
1369 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1370 uint32_t pinning)
1371 {
1372 struct bpf_hash_entry *entry;
1373
1374 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1375 while (entry && entry->pinning != pinning)
1376 entry = entry->next;
1377
1378 return entry ? entry->subpath : NULL;
1379 }
1380
1381 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1382 uint32_t pinning)
1383 {
1384 switch (pinning) {
1385 case PIN_OBJECT_NS:
1386 case PIN_GLOBAL_NS:
1387 return false;
1388 case PIN_NONE:
1389 return true;
1390 default:
1391 return !bpf_custom_pinning(ctx, pinning);
1392 }
1393 }
1394
1395 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1396 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1397 {
1398 switch (pinning) {
1399 case PIN_OBJECT_NS:
1400 snprintf(pathname, len, "%s/%s/%s",
1401 bpf_get_work_dir(ctx->type),
1402 ctx->obj_uid, name);
1403 break;
1404 case PIN_GLOBAL_NS:
1405 snprintf(pathname, len, "%s/%s/%s",
1406 bpf_get_work_dir(ctx->type),
1407 BPF_DIR_GLOBALS, name);
1408 break;
1409 default:
1410 snprintf(pathname, len, "%s/../%s/%s",
1411 bpf_get_work_dir(ctx->type),
1412 bpf_custom_pinning(ctx, pinning), name);
1413 break;
1414 }
1415 }
1416
1417 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1418 uint32_t pinning)
1419 {
1420 char pathname[PATH_MAX];
1421
1422 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1423 return 0;
1424
1425 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1426 return bpf_obj_get(pathname, ctx->type);
1427 }
1428
1429 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1430 {
1431 char tmp[PATH_MAX];
1432 int ret;
1433
1434 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1435 ctx->obj_uid);
1436
1437 ret = mkdir(tmp, S_IRWXU);
1438 if (ret && errno != EEXIST) {
1439 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1440 return ret;
1441 }
1442
1443 return 0;
1444 }
1445
1446 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1447 const char *todo)
1448 {
1449 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1450 int ret;
1451
1452 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1453 snprintf(rem, sizeof(rem), "%s/", todo);
1454 sub = strtok(rem, "/");
1455
1456 while (sub) {
1457 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1458 return -EINVAL;
1459
1460 strcat(tmp, sub);
1461 strcat(tmp, "/");
1462
1463 ret = mkdir(tmp, S_IRWXU);
1464 if (ret && errno != EEXIST) {
1465 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1466 strerror(errno));
1467 return ret;
1468 }
1469
1470 sub = strtok(NULL, "/");
1471 }
1472
1473 return 0;
1474 }
1475
1476 static int bpf_place_pinned(int fd, const char *name,
1477 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1478 {
1479 char pathname[PATH_MAX];
1480 const char *tmp;
1481 int ret = 0;
1482
1483 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1484 return 0;
1485
1486 if (pinning == PIN_OBJECT_NS)
1487 ret = bpf_make_obj_path(ctx);
1488 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1489 ret = bpf_make_custom_path(ctx, tmp);
1490 if (ret < 0)
1491 return ret;
1492
1493 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1494 return bpf_obj_pin(fd, pathname);
1495 }
1496
1497 static void bpf_prog_report(int fd, const char *section,
1498 const struct bpf_elf_prog *prog,
1499 struct bpf_elf_ctx *ctx)
1500 {
1501 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1502
1503 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1504 fd < 0 ? "rejected: " : "loaded",
1505 fd < 0 ? strerror(errno) : "",
1506 fd < 0 ? errno : fd);
1507
1508 fprintf(stderr, " - Type: %u\n", prog->type);
1509 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1510 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1511 fprintf(stderr, " - License: %s\n\n", prog->license);
1512
1513 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1514 }
1515
1516 static int bpf_prog_attach(const char *section,
1517 const struct bpf_elf_prog *prog,
1518 struct bpf_elf_ctx *ctx)
1519 {
1520 int tries = 0, fd;
1521 retry:
1522 errno = 0;
1523 fd = bpf_prog_load_dev(prog->type, prog->insns, prog->size,
1524 prog->license, ctx->ifindex,
1525 ctx->log, ctx->log_size);
1526 if (fd < 0 || ctx->verbose) {
1527 /* The verifier log is pretty chatty, sometimes so chatty
1528 * on larger programs, that we could fail to dump everything
1529 * into our buffer. Still, try to give a debuggable error
1530 * log for the user, so enlarge it and re-fail.
1531 */
1532 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1533 if (tries++ < 10 && !bpf_log_realloc(ctx))
1534 goto retry;
1535
1536 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1537 ctx->log_size, tries);
1538 return fd;
1539 }
1540
1541 bpf_prog_report(fd, section, prog, ctx);
1542 }
1543
1544 return fd;
1545 }
1546
1547 static void bpf_map_report(int fd, const char *name,
1548 const struct bpf_elf_map *map,
1549 struct bpf_elf_ctx *ctx, int inner_fd)
1550 {
1551 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1552 fd < 0 ? "rejected: " : "loaded",
1553 fd < 0 ? strerror(errno) : "",
1554 fd < 0 ? errno : fd);
1555
1556 fprintf(stderr, " - Type: %u\n", map->type);
1557 fprintf(stderr, " - Identifier: %u\n", map->id);
1558 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1559 fprintf(stderr, " - Size key: %u\n", map->size_key);
1560 fprintf(stderr, " - Size value: %u\n",
1561 inner_fd ? (int)sizeof(int) : map->size_value);
1562 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1563 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1564 }
1565
1566 static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1567 {
1568 int i;
1569
1570 for (i = 0; i < ctx->map_num; i++) {
1571 if (ctx->maps[i].id != id)
1572 continue;
1573 if (ctx->map_fds[i] < 0)
1574 return -EINVAL;
1575
1576 return ctx->map_fds[i];
1577 }
1578
1579 return -ENOENT;
1580 }
1581
1582 static void bpf_report_map_in_map(int outer_fd, uint32_t idx)
1583 {
1584 struct bpf_elf_map outer_map;
1585 int ret;
1586
1587 fprintf(stderr, "Cannot insert map into map! ");
1588
1589 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map, NULL);
1590 if (!ret) {
1591 if (idx >= outer_map.max_elem &&
1592 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1593 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1594 outer_map.max_elem, idx);
1595 return;
1596 }
1597 }
1598
1599 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1600 }
1601
1602 static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1603 {
1604 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1605 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1606 }
1607
1608 static bool bpf_map_offload_neutral(enum bpf_map_type type)
1609 {
1610 return type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
1611 }
1612
1613 static int bpf_map_attach(const char *name, struct bpf_elf_ctx *ctx,
1614 const struct bpf_elf_map *map, struct bpf_map_ext *ext,
1615 int *have_map_in_map)
1616 {
1617 int fd, ifindex, ret, map_inner_fd = 0;
1618
1619 fd = bpf_probe_pinned(name, ctx, map->pinning);
1620 if (fd > 0) {
1621 ret = bpf_map_selfcheck_pinned(fd, map, ext,
1622 offsetof(struct bpf_elf_map,
1623 id), ctx->type);
1624 if (ret < 0) {
1625 close(fd);
1626 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1627 name);
1628 return ret;
1629 }
1630 if (ctx->verbose)
1631 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1632 name);
1633 return fd;
1634 }
1635
1636 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1637 (*have_map_in_map)++;
1638 if (map->inner_id)
1639 return 0;
1640 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1641 name);
1642 return -EINVAL;
1643 }
1644
1645 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1646 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1647 if (map_inner_fd < 0) {
1648 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1649 name, map->inner_id);
1650 return -EINVAL;
1651 }
1652 }
1653
1654 ifindex = bpf_map_offload_neutral(map->type) ? 0 : ctx->ifindex;
1655 errno = 0;
1656 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1657 map->max_elem, map->flags, map_inner_fd, ctx->btf_fd,
1658 ifindex, ext->btf_id_key, ext->btf_id_val);
1659
1660 if (fd < 0 || ctx->verbose) {
1661 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1662 if (fd < 0)
1663 return fd;
1664 }
1665
1666 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1667 if (ret < 0 && errno != EEXIST) {
1668 fprintf(stderr, "Could not pin %s map: %s\n", name,
1669 strerror(errno));
1670 close(fd);
1671 return ret;
1672 }
1673
1674 return fd;
1675 }
1676
1677 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1678 const GElf_Sym *sym)
1679 {
1680 return ctx->str_tab->d_buf + sym->st_name;
1681 }
1682
1683 static int bpf_btf_find(struct bpf_elf_ctx *ctx, const char *name)
1684 {
1685 const struct btf_type *type;
1686 const char *res;
1687 int id;
1688
1689 for (id = 1; id < ctx->btf.types_num; id++) {
1690 type = ctx->btf.types[id];
1691 if (type->name_off >= ctx->btf.hdr->str_len)
1692 continue;
1693 res = &ctx->btf.strings[type->name_off];
1694 if (!strcmp(res, name))
1695 return id;
1696 }
1697
1698 return -ENOENT;
1699 }
1700
1701 static int bpf_btf_find_kv(struct bpf_elf_ctx *ctx, const struct bpf_elf_map *map,
1702 const char *name, uint32_t *id_key, uint32_t *id_val)
1703 {
1704 const struct btf_member *key, *val;
1705 const struct btf_type *type;
1706 char btf_name[512];
1707 const char *res;
1708 int id;
1709
1710 snprintf(btf_name, sizeof(btf_name), "____btf_map_%s", name);
1711 id = bpf_btf_find(ctx, btf_name);
1712 if (id < 0)
1713 return id;
1714
1715 type = ctx->btf.types[id];
1716 if (BTF_INFO_KIND(type->info) != BTF_KIND_STRUCT)
1717 return -EINVAL;
1718 if (BTF_INFO_VLEN(type->info) != 2)
1719 return -EINVAL;
1720
1721 key = ((void *) type) + sizeof(*type);
1722 val = key + 1;
1723 if (!key->type || key->type >= ctx->btf.types_num ||
1724 !val->type || val->type >= ctx->btf.types_num)
1725 return -EINVAL;
1726
1727 if (key->name_off >= ctx->btf.hdr->str_len ||
1728 val->name_off >= ctx->btf.hdr->str_len)
1729 return -EINVAL;
1730
1731 res = &ctx->btf.strings[key->name_off];
1732 if (strcmp(res, "key"))
1733 return -EINVAL;
1734
1735 res = &ctx->btf.strings[val->name_off];
1736 if (strcmp(res, "value"))
1737 return -EINVAL;
1738
1739 *id_key = key->type;
1740 *id_val = val->type;
1741 return 0;
1742 }
1743
1744 static void bpf_btf_annotate(struct bpf_elf_ctx *ctx, int which, const char *name)
1745 {
1746 uint32_t id_key = 0, id_val = 0;
1747
1748 if (!bpf_btf_find_kv(ctx, &ctx->maps[which], name, &id_key, &id_val)) {
1749 ctx->maps_ext[which].btf_id_key = id_key;
1750 ctx->maps_ext[which].btf_id_val = id_val;
1751 }
1752 }
1753
1754 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1755 {
1756 const char *name;
1757 GElf_Sym sym;
1758 int i;
1759
1760 for (i = 0; i < ctx->sym_num; i++) {
1761 int type;
1762
1763 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1764 continue;
1765
1766 type = GELF_ST_TYPE(sym.st_info);
1767 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1768 (type != STT_NOTYPE && type != STT_OBJECT) ||
1769 sym.st_shndx != ctx->sec_maps ||
1770 sym.st_value / ctx->map_len != which)
1771 continue;
1772
1773 name = bpf_str_tab_name(ctx, &sym);
1774 bpf_btf_annotate(ctx, which, name);
1775 return name;
1776 }
1777
1778 return NULL;
1779 }
1780
1781 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1782 {
1783 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1784 const char *map_name;
1785
1786 for (i = 0; i < ctx->map_num; i++) {
1787 if (ctx->maps[i].pinning == PIN_OBJECT_NS &&
1788 ctx->noafalg) {
1789 fprintf(stderr, "Missing kernel AF_ALG support for PIN_OBJECT_NS!\n");
1790 return -ENOTSUP;
1791 }
1792
1793 map_name = bpf_map_fetch_name(ctx, i);
1794 if (!map_name)
1795 return -EIO;
1796
1797 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1798 &ctx->maps_ext[i], &have_map_in_map);
1799 if (fd < 0)
1800 return fd;
1801
1802 ctx->map_fds[i] = !fd ? -1 : fd;
1803 }
1804
1805 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1806 if (ctx->map_fds[i] >= 0)
1807 continue;
1808
1809 map_name = bpf_map_fetch_name(ctx, i);
1810 if (!map_name)
1811 return -EIO;
1812
1813 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1814 &ctx->maps_ext[i], NULL);
1815 if (fd < 0)
1816 return fd;
1817
1818 ctx->map_fds[i] = fd;
1819 }
1820
1821 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1822 if (!ctx->maps[i].id ||
1823 ctx->maps[i].inner_id ||
1824 ctx->maps[i].inner_idx == -1)
1825 continue;
1826
1827 inner_fd = ctx->map_fds[i];
1828 inner_idx = ctx->maps[i].inner_idx;
1829
1830 for (j = 0; j < ctx->map_num; j++) {
1831 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1832 continue;
1833 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1834 continue;
1835
1836 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1837 &inner_fd, BPF_ANY);
1838 if (ret < 0) {
1839 bpf_report_map_in_map(ctx->map_fds[j],
1840 inner_idx);
1841 return ret;
1842 }
1843 }
1844 }
1845
1846 return 0;
1847 }
1848
1849 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1850 {
1851 int i, num = 0;
1852 GElf_Sym sym;
1853
1854 for (i = 0; i < ctx->sym_num; i++) {
1855 int type;
1856
1857 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1858 continue;
1859
1860 type = GELF_ST_TYPE(sym.st_info);
1861 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1862 (type != STT_NOTYPE && type != STT_OBJECT) ||
1863 sym.st_shndx != ctx->sec_maps)
1864 continue;
1865 num++;
1866 }
1867
1868 return num;
1869 }
1870
1871 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1872 struct bpf_elf_sec_data *data)
1873 {
1874 Elf_Data *sec_edata;
1875 GElf_Shdr sec_hdr;
1876 Elf_Scn *sec_fd;
1877 char *sec_name;
1878
1879 memset(data, 0, sizeof(*data));
1880
1881 sec_fd = elf_getscn(ctx->elf_fd, section);
1882 if (!sec_fd)
1883 return -EINVAL;
1884 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1885 return -EIO;
1886
1887 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1888 sec_hdr.sh_name);
1889 if (!sec_name || !sec_hdr.sh_size)
1890 return -ENOENT;
1891
1892 sec_edata = elf_getdata(sec_fd, NULL);
1893 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1894 return -EIO;
1895
1896 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1897
1898 data->sec_name = sec_name;
1899 data->sec_data = sec_edata;
1900 return 0;
1901 }
1902
1903 struct bpf_elf_map_min {
1904 __u32 type;
1905 __u32 size_key;
1906 __u32 size_value;
1907 __u32 max_elem;
1908 };
1909
1910 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1911 struct bpf_elf_sec_data *data)
1912 {
1913 ctx->map_num = data->sec_data->d_size;
1914 ctx->sec_maps = section;
1915 ctx->sec_done[section] = true;
1916
1917 if (ctx->map_num > sizeof(ctx->maps)) {
1918 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1919 return -ENOMEM;
1920 }
1921
1922 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1923 return 0;
1924 }
1925
1926 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1927 {
1928 GElf_Sym sym;
1929 int off, i;
1930
1931 for (off = 0; off < end; off += ctx->map_len) {
1932 /* Order doesn't need to be linear here, hence we walk
1933 * the table again.
1934 */
1935 for (i = 0; i < ctx->sym_num; i++) {
1936 int type;
1937
1938 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1939 continue;
1940
1941 type = GELF_ST_TYPE(sym.st_info);
1942 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1943 (type != STT_NOTYPE && type != STT_OBJECT) ||
1944 sym.st_shndx != ctx->sec_maps)
1945 continue;
1946 if (sym.st_value == off)
1947 break;
1948 if (i == ctx->sym_num - 1)
1949 return -1;
1950 }
1951 }
1952
1953 return off == end ? 0 : -1;
1954 }
1955
1956 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1957 {
1958 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1959 int i, sym_num = bpf_map_num_sym(ctx);
1960 __u8 *buff;
1961
1962 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1963 fprintf(stderr, "%u maps not supported in current map section!\n",
1964 sym_num);
1965 return -EINVAL;
1966 }
1967
1968 if (ctx->map_num % sym_num != 0 ||
1969 ctx->map_num % sizeof(__u32) != 0) {
1970 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1971 return -EINVAL;
1972 }
1973
1974 ctx->map_len = ctx->map_num / sym_num;
1975 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1976 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1977 return -EINVAL;
1978 }
1979
1980 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1981 ctx->map_num = sym_num;
1982 return 0;
1983 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1984 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1985 return -EINVAL;
1986 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1987 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1988 return -EINVAL;
1989 }
1990
1991 ctx->map_num = sym_num;
1992 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
1993 i++, buff += ctx->map_len) {
1994 /* The fixup leaves the rest of the members as zero, which
1995 * is fine currently, but option exist to set some other
1996 * default value as well when needed in future.
1997 */
1998 memcpy(&fixup[i], buff, ctx->map_len);
1999 }
2000
2001 memcpy(ctx->maps, fixup, sizeof(fixup));
2002 if (ctx->verbose)
2003 printf("%zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
2004 sizeof(struct bpf_elf_map) - ctx->map_len);
2005 return 0;
2006 }
2007
2008 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
2009 struct bpf_elf_sec_data *data)
2010 {
2011 if (data->sec_data->d_size > sizeof(ctx->license))
2012 return -ENOMEM;
2013
2014 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
2015 ctx->sec_done[section] = true;
2016 return 0;
2017 }
2018
2019 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
2020 struct bpf_elf_sec_data *data)
2021 {
2022 ctx->sym_tab = data->sec_data;
2023 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
2024 ctx->sec_done[section] = true;
2025 return 0;
2026 }
2027
2028 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
2029 struct bpf_elf_sec_data *data)
2030 {
2031 ctx->str_tab = data->sec_data;
2032 ctx->sec_done[section] = true;
2033 return 0;
2034 }
2035
2036 static int bpf_fetch_text(struct bpf_elf_ctx *ctx, int section,
2037 struct bpf_elf_sec_data *data)
2038 {
2039 ctx->sec_text = section;
2040 ctx->sec_done[section] = true;
2041 return 0;
2042 }
2043
2044 static void bpf_btf_report(int fd, struct bpf_elf_ctx *ctx)
2045 {
2046 fprintf(stderr, "\nBTF debug data section \'.BTF\' %s%s (%d)!\n",
2047 fd < 0 ? "rejected: " : "loaded",
2048 fd < 0 ? strerror(errno) : "",
2049 fd < 0 ? errno : fd);
2050
2051 fprintf(stderr, " - Length: %zu\n", ctx->btf_data->d_size);
2052
2053 bpf_dump_error(ctx, "Verifier analysis:\n\n");
2054 }
2055
2056 static int bpf_btf_attach(struct bpf_elf_ctx *ctx)
2057 {
2058 int tries = 0, fd;
2059 retry:
2060 errno = 0;
2061 fd = bpf_btf_load(ctx->btf_data->d_buf, ctx->btf_data->d_size,
2062 ctx->log, ctx->log_size);
2063 if (fd < 0 || ctx->verbose) {
2064 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
2065 if (tries++ < 10 && !bpf_log_realloc(ctx))
2066 goto retry;
2067
2068 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
2069 ctx->log_size, tries);
2070 return fd;
2071 }
2072
2073 if (bpf_log_has_data(ctx))
2074 bpf_btf_report(fd, ctx);
2075 }
2076
2077 return fd;
2078 }
2079
2080 static int bpf_fetch_btf_begin(struct bpf_elf_ctx *ctx, int section,
2081 struct bpf_elf_sec_data *data)
2082 {
2083 ctx->btf_data = data->sec_data;
2084 ctx->sec_btf = section;
2085 ctx->sec_done[section] = true;
2086 return 0;
2087 }
2088
2089 static int bpf_btf_check_header(struct bpf_elf_ctx *ctx)
2090 {
2091 const struct btf_header *hdr = ctx->btf_data->d_buf;
2092 const char *str_start, *str_end;
2093 unsigned int data_len;
2094
2095 if (hdr->magic != BTF_MAGIC) {
2096 fprintf(stderr, "Object has wrong BTF magic: %x, expected: %x!\n",
2097 hdr->magic, BTF_MAGIC);
2098 return -EINVAL;
2099 }
2100
2101 if (hdr->version != BTF_VERSION) {
2102 fprintf(stderr, "Object has wrong BTF version: %u, expected: %u!\n",
2103 hdr->version, BTF_VERSION);
2104 return -EINVAL;
2105 }
2106
2107 if (hdr->flags) {
2108 fprintf(stderr, "Object has unsupported BTF flags %x!\n",
2109 hdr->flags);
2110 return -EINVAL;
2111 }
2112
2113 data_len = ctx->btf_data->d_size - sizeof(*hdr);
2114 if (data_len < hdr->type_off ||
2115 data_len < hdr->str_off ||
2116 data_len < hdr->type_len + hdr->str_len ||
2117 hdr->type_off >= hdr->str_off ||
2118 hdr->type_off + hdr->type_len != hdr->str_off ||
2119 hdr->str_off + hdr->str_len != data_len ||
2120 (hdr->type_off & (sizeof(uint32_t) - 1))) {
2121 fprintf(stderr, "Object has malformed BTF data!\n");
2122 return -EINVAL;
2123 }
2124
2125 ctx->btf.hdr = hdr;
2126 ctx->btf.raw = hdr + 1;
2127
2128 str_start = ctx->btf.raw + hdr->str_off;
2129 str_end = str_start + hdr->str_len;
2130 if (!hdr->str_len ||
2131 hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2132 str_start[0] || str_end[-1]) {
2133 fprintf(stderr, "Object has malformed BTF string data!\n");
2134 return -EINVAL;
2135 }
2136
2137 ctx->btf.strings = str_start;
2138 return 0;
2139 }
2140
2141 static int bpf_btf_register_type(struct bpf_elf_ctx *ctx,
2142 const struct btf_type *type)
2143 {
2144 int cur = ctx->btf.types_num, num = cur + 1;
2145 const struct btf_type **types;
2146
2147 types = realloc(ctx->btf.types, num * sizeof(type));
2148 if (!types) {
2149 free(ctx->btf.types);
2150 ctx->btf.types = NULL;
2151 ctx->btf.types_num = 0;
2152 return -ENOMEM;
2153 }
2154
2155 ctx->btf.types = types;
2156 ctx->btf.types[cur] = type;
2157 ctx->btf.types_num = num;
2158 return 0;
2159 }
2160
2161 static struct btf_type btf_type_void;
2162
2163 static int bpf_btf_prep_type_data(struct bpf_elf_ctx *ctx)
2164 {
2165 const void *type_cur = ctx->btf.raw + ctx->btf.hdr->type_off;
2166 const void *type_end = ctx->btf.raw + ctx->btf.hdr->str_off;
2167 const struct btf_type *type;
2168 uint16_t var_len;
2169 int ret, kind;
2170
2171 ret = bpf_btf_register_type(ctx, &btf_type_void);
2172 if (ret < 0)
2173 return ret;
2174
2175 while (type_cur < type_end) {
2176 type = type_cur;
2177 type_cur += sizeof(*type);
2178
2179 var_len = BTF_INFO_VLEN(type->info);
2180 kind = BTF_INFO_KIND(type->info);
2181
2182 switch (kind) {
2183 case BTF_KIND_INT:
2184 type_cur += sizeof(int);
2185 break;
2186 case BTF_KIND_ARRAY:
2187 type_cur += sizeof(struct btf_array);
2188 break;
2189 case BTF_KIND_STRUCT:
2190 case BTF_KIND_UNION:
2191 type_cur += var_len * sizeof(struct btf_member);
2192 break;
2193 case BTF_KIND_ENUM:
2194 type_cur += var_len * sizeof(struct btf_enum);
2195 break;
2196 case BTF_KIND_FUNC_PROTO:
2197 type_cur += var_len * sizeof(struct btf_param);
2198 break;
2199 case BTF_KIND_TYPEDEF:
2200 case BTF_KIND_PTR:
2201 case BTF_KIND_FWD:
2202 case BTF_KIND_VOLATILE:
2203 case BTF_KIND_CONST:
2204 case BTF_KIND_RESTRICT:
2205 case BTF_KIND_FUNC:
2206 break;
2207 default:
2208 fprintf(stderr, "Object has unknown BTF type: %u!\n", kind);
2209 return -EINVAL;
2210 }
2211
2212 ret = bpf_btf_register_type(ctx, type);
2213 if (ret < 0)
2214 return ret;
2215 }
2216
2217 return 0;
2218 }
2219
2220 static int bpf_btf_prep_data(struct bpf_elf_ctx *ctx)
2221 {
2222 int ret = bpf_btf_check_header(ctx);
2223
2224 if (!ret)
2225 return bpf_btf_prep_type_data(ctx);
2226 return ret;
2227 }
2228
2229 static void bpf_fetch_btf_end(struct bpf_elf_ctx *ctx)
2230 {
2231 int fd = bpf_btf_attach(ctx);
2232
2233 if (fd < 0)
2234 return;
2235 ctx->btf_fd = fd;
2236 if (bpf_btf_prep_data(ctx) < 0) {
2237 close(ctx->btf_fd);
2238 ctx->btf_fd = 0;
2239 }
2240 }
2241
2242 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
2243 {
2244 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
2245 }
2246
2247 static bool bpf_has_btf_data(const struct bpf_elf_ctx *ctx)
2248 {
2249 return ctx->sec_btf;
2250 }
2251
2252 static bool bpf_has_call_data(const struct bpf_elf_ctx *ctx)
2253 {
2254 return ctx->sec_text;
2255 }
2256
2257 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx, bool check_text_sec)
2258 {
2259 struct bpf_elf_sec_data data;
2260 int i, ret = -1;
2261
2262 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2263 ret = bpf_fill_section_data(ctx, i, &data);
2264 if (ret < 0)
2265 continue;
2266
2267 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2268 !strcmp(data.sec_name, ELF_SECTION_MAPS))
2269 ret = bpf_fetch_maps_begin(ctx, i, &data);
2270 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2271 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
2272 ret = bpf_fetch_license(ctx, i, &data);
2273 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2274 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2275 !strcmp(data.sec_name, ".text") &&
2276 check_text_sec)
2277 ret = bpf_fetch_text(ctx, i, &data);
2278 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
2279 !strcmp(data.sec_name, ".symtab"))
2280 ret = bpf_fetch_symtab(ctx, i, &data);
2281 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
2282 !strcmp(data.sec_name, ".strtab"))
2283 ret = bpf_fetch_strtab(ctx, i, &data);
2284 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2285 !strcmp(data.sec_name, ".BTF"))
2286 ret = bpf_fetch_btf_begin(ctx, i, &data);
2287 if (ret < 0) {
2288 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
2289 i);
2290 return ret;
2291 }
2292 }
2293
2294 if (bpf_has_btf_data(ctx))
2295 bpf_fetch_btf_end(ctx);
2296 if (bpf_has_map_data(ctx)) {
2297 ret = bpf_fetch_maps_end(ctx);
2298 if (ret < 0) {
2299 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
2300 return ret;
2301 }
2302
2303 ret = bpf_maps_attach_all(ctx);
2304 if (ret < 0) {
2305 fprintf(stderr, "Error loading maps into kernel!\n");
2306 return ret;
2307 }
2308 }
2309
2310 return ret;
2311 }
2312
2313 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
2314 bool *sseen)
2315 {
2316 struct bpf_elf_sec_data data;
2317 struct bpf_elf_prog prog;
2318 int ret, i, fd = -1;
2319
2320 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2321 if (ctx->sec_done[i])
2322 continue;
2323
2324 ret = bpf_fill_section_data(ctx, i, &data);
2325 if (ret < 0 ||
2326 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
2327 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2328 !strcmp(data.sec_name, section)))
2329 continue;
2330
2331 *sseen = true;
2332
2333 memset(&prog, 0, sizeof(prog));
2334 prog.type = ctx->type;
2335 prog.license = ctx->license;
2336 prog.size = data.sec_data->d_size;
2337 prog.insns_num = prog.size / sizeof(struct bpf_insn);
2338 prog.insns = data.sec_data->d_buf;
2339
2340 fd = bpf_prog_attach(section, &prog, ctx);
2341 if (fd < 0)
2342 return fd;
2343
2344 ctx->sec_done[i] = true;
2345 break;
2346 }
2347
2348 return fd;
2349 }
2350
2351 struct bpf_relo_props {
2352 struct bpf_tail_call {
2353 unsigned int total;
2354 unsigned int jited;
2355 } tc;
2356 int main_num;
2357 };
2358
2359 static int bpf_apply_relo_map(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2360 GElf_Rel *relo, GElf_Sym *sym,
2361 struct bpf_relo_props *props)
2362 {
2363 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2364 unsigned int map_idx = sym->st_value / ctx->map_len;
2365
2366 if (insn_off >= prog->insns_num)
2367 return -EINVAL;
2368 if (prog->insns[insn_off].code != (BPF_LD | BPF_IMM | BPF_DW)) {
2369 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
2370 insn_off);
2371 return -EINVAL;
2372 }
2373
2374 if (map_idx >= ARRAY_SIZE(ctx->map_fds))
2375 return -EINVAL;
2376 if (!ctx->map_fds[map_idx])
2377 return -EINVAL;
2378 if (ctx->maps[map_idx].type == BPF_MAP_TYPE_PROG_ARRAY) {
2379 props->tc.total++;
2380 if (ctx->maps_ext[map_idx].owner.jited ||
2381 (ctx->maps_ext[map_idx].owner.type == 0 &&
2382 ctx->cfg.jit_enabled))
2383 props->tc.jited++;
2384 }
2385
2386 prog->insns[insn_off].src_reg = BPF_PSEUDO_MAP_FD;
2387 prog->insns[insn_off].imm = ctx->map_fds[map_idx];
2388 return 0;
2389 }
2390
2391 static int bpf_apply_relo_call(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2392 GElf_Rel *relo, GElf_Sym *sym,
2393 struct bpf_relo_props *props)
2394 {
2395 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2396 struct bpf_elf_prog *prog_text = &ctx->prog_text;
2397
2398 if (insn_off >= prog->insns_num)
2399 return -EINVAL;
2400 if (prog->insns[insn_off].code != (BPF_JMP | BPF_CALL) &&
2401 prog->insns[insn_off].src_reg != BPF_PSEUDO_CALL) {
2402 fprintf(stderr, "ELF contains relo data for non call instruction at offset %u! Compiler bug?!\n",
2403 insn_off);
2404 return -EINVAL;
2405 }
2406
2407 if (!props->main_num) {
2408 struct bpf_insn *insns = realloc(prog->insns,
2409 prog->size + prog_text->size);
2410 if (!insns)
2411 return -ENOMEM;
2412
2413 memcpy(insns + prog->insns_num, prog_text->insns,
2414 prog_text->size);
2415 props->main_num = prog->insns_num;
2416 prog->insns = insns;
2417 prog->insns_num += prog_text->insns_num;
2418 prog->size += prog_text->size;
2419 }
2420
2421 prog->insns[insn_off].imm += props->main_num - insn_off;
2422 return 0;
2423 }
2424
2425 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
2426 struct bpf_elf_sec_data *data_relo,
2427 struct bpf_elf_prog *prog,
2428 struct bpf_relo_props *props)
2429 {
2430 GElf_Shdr *rhdr = &data_relo->sec_hdr;
2431 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
2432
2433 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
2434 GElf_Rel relo;
2435 GElf_Sym sym;
2436 int ret = -EIO;
2437
2438 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
2439 return -EIO;
2440 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
2441 return -EIO;
2442
2443 if (sym.st_shndx == ctx->sec_maps)
2444 ret = bpf_apply_relo_map(ctx, prog, &relo, &sym, props);
2445 else if (sym.st_shndx == ctx->sec_text)
2446 ret = bpf_apply_relo_call(ctx, prog, &relo, &sym, props);
2447 else
2448 fprintf(stderr, "ELF contains non-{map,call} related relo data in entry %u pointing to section %u! Compiler bug?!\n",
2449 relo_ent, sym.st_shndx);
2450 if (ret < 0)
2451 return ret;
2452 }
2453
2454 return 0;
2455 }
2456
2457 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
2458 bool *lderr, bool *sseen, struct bpf_elf_prog *prog)
2459 {
2460 struct bpf_elf_sec_data data_relo, data_insn;
2461 int ret, idx, i, fd = -1;
2462
2463 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2464 struct bpf_relo_props props = {};
2465
2466 ret = bpf_fill_section_data(ctx, i, &data_relo);
2467 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
2468 continue;
2469
2470 idx = data_relo.sec_hdr.sh_info;
2471
2472 ret = bpf_fill_section_data(ctx, idx, &data_insn);
2473 if (ret < 0 ||
2474 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
2475 (data_insn.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2476 !strcmp(data_insn.sec_name, section)))
2477 continue;
2478 if (sseen)
2479 *sseen = true;
2480
2481 memset(prog, 0, sizeof(*prog));
2482 prog->type = ctx->type;
2483 prog->license = ctx->license;
2484 prog->size = data_insn.sec_data->d_size;
2485 prog->insns_num = prog->size / sizeof(struct bpf_insn);
2486 prog->insns = malloc(prog->size);
2487 if (!prog->insns) {
2488 *lderr = true;
2489 return -ENOMEM;
2490 }
2491
2492 memcpy(prog->insns, data_insn.sec_data->d_buf, prog->size);
2493
2494 ret = bpf_apply_relo_data(ctx, &data_relo, prog, &props);
2495 if (ret < 0) {
2496 *lderr = true;
2497 if (ctx->sec_text != idx)
2498 free(prog->insns);
2499 return ret;
2500 }
2501 if (ctx->sec_text == idx) {
2502 fd = 0;
2503 goto out;
2504 }
2505
2506 fd = bpf_prog_attach(section, prog, ctx);
2507 free(prog->insns);
2508 if (fd < 0) {
2509 *lderr = true;
2510 if (props.tc.total) {
2511 if (ctx->cfg.jit_enabled &&
2512 props.tc.total != props.tc.jited)
2513 fprintf(stderr, "JIT enabled, but only %u/%u tail call maps in the program have JITed owner!\n",
2514 props.tc.jited, props.tc.total);
2515 if (!ctx->cfg.jit_enabled &&
2516 props.tc.jited)
2517 fprintf(stderr, "JIT disabled, but %u/%u tail call maps in the program have JITed owner!\n",
2518 props.tc.jited, props.tc.total);
2519 }
2520 return fd;
2521 }
2522 out:
2523 ctx->sec_done[i] = true;
2524 ctx->sec_done[idx] = true;
2525 break;
2526 }
2527
2528 return fd;
2529 }
2530
2531 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2532 {
2533 bool lderr = false, sseen = false;
2534 struct bpf_elf_prog prog;
2535 int ret = -1;
2536
2537 if (bpf_has_call_data(ctx)) {
2538 ret = bpf_fetch_prog_relo(ctx, ".text", &lderr, NULL,
2539 &ctx->prog_text);
2540 if (ret < 0)
2541 return ret;
2542 }
2543
2544 if (bpf_has_map_data(ctx) || bpf_has_call_data(ctx))
2545 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen, &prog);
2546 if (ret < 0 && !lderr)
2547 ret = bpf_fetch_prog(ctx, section, &sseen);
2548 if (ret < 0 && !sseen)
2549 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2550 section);
2551 return ret;
2552 }
2553
2554 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2555 {
2556 int i;
2557
2558 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2559 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2560 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2561 return i;
2562 return -1;
2563 }
2564
2565 struct bpf_jited_aux {
2566 int prog_fd;
2567 int map_fd;
2568 struct bpf_prog_data prog;
2569 struct bpf_map_ext map;
2570 };
2571
2572 static int bpf_derive_prog_from_fdinfo(int fd, struct bpf_prog_data *prog)
2573 {
2574 char file[PATH_MAX], buff[4096];
2575 unsigned int val;
2576 FILE *fp;
2577
2578 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
2579 memset(prog, 0, sizeof(*prog));
2580
2581 fp = fopen(file, "r");
2582 if (!fp) {
2583 fprintf(stderr, "No procfs support?!\n");
2584 return -EIO;
2585 }
2586
2587 while (fgets(buff, sizeof(buff), fp)) {
2588 if (sscanf(buff, "prog_type:\t%u", &val) == 1)
2589 prog->type = val;
2590 else if (sscanf(buff, "prog_jited:\t%u", &val) == 1)
2591 prog->jited = val;
2592 }
2593
2594 fclose(fp);
2595 return 0;
2596 }
2597
2598 static int bpf_tail_call_get_aux(struct bpf_jited_aux *aux)
2599 {
2600 struct bpf_elf_map tmp;
2601 int ret;
2602
2603 ret = bpf_derive_elf_map_from_fdinfo(aux->map_fd, &tmp, &aux->map);
2604 if (!ret)
2605 ret = bpf_derive_prog_from_fdinfo(aux->prog_fd, &aux->prog);
2606
2607 return ret;
2608 }
2609
2610 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2611 {
2612 struct bpf_elf_sec_data data;
2613 uint32_t map_id, key_id;
2614 int fd, i, ret, idx;
2615
2616 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2617 if (ctx->sec_done[i])
2618 continue;
2619
2620 ret = bpf_fill_section_data(ctx, i, &data);
2621 if (ret < 0)
2622 continue;
2623
2624 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2625 if (ret != 2)
2626 continue;
2627
2628 idx = bpf_find_map_by_id(ctx, map_id);
2629 if (idx < 0)
2630 continue;
2631
2632 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2633 if (fd < 0)
2634 return -EIO;
2635
2636 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2637 &fd, BPF_ANY);
2638 if (ret < 0) {
2639 struct bpf_jited_aux aux = {};
2640
2641 ret = -errno;
2642 if (errno == E2BIG) {
2643 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2644 key_id, map_id);
2645 return ret;
2646 }
2647
2648 aux.map_fd = ctx->map_fds[idx];
2649 aux.prog_fd = fd;
2650
2651 if (bpf_tail_call_get_aux(&aux))
2652 return ret;
2653 if (!aux.map.owner.type)
2654 return ret;
2655
2656 if (aux.prog.type != aux.map.owner.type)
2657 fprintf(stderr, "Tail call map owned by prog type %u, but prog type is %u!\n",
2658 aux.map.owner.type, aux.prog.type);
2659 if (aux.prog.jited != aux.map.owner.jited)
2660 fprintf(stderr, "Tail call map %s jited, but prog %s!\n",
2661 aux.map.owner.jited ? "is" : "not",
2662 aux.prog.jited ? "is" : "not");
2663 return ret;
2664 }
2665
2666 ctx->sec_done[i] = true;
2667 }
2668
2669 return 0;
2670 }
2671
2672 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2673 {
2674 struct stat st;
2675 int ret;
2676
2677 memset(&ctx->stat, 0, sizeof(ctx->stat));
2678
2679 ret = fstat(ctx->obj_fd, &st);
2680 if (ret < 0) {
2681 fprintf(stderr, "Stat of elf file failed: %s\n",
2682 strerror(errno));
2683 return;
2684 }
2685
2686 ctx->stat.st_dev = st.st_dev;
2687 ctx->stat.st_ino = st.st_ino;
2688 }
2689
2690 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2691 {
2692 char buff[PATH_MAX];
2693
2694 while (fgets(buff, sizeof(buff), fp)) {
2695 char *ptr = buff;
2696
2697 while (*ptr == ' ' || *ptr == '\t')
2698 ptr++;
2699
2700 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2701 continue;
2702
2703 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2704 sscanf(ptr, "%i %s #", id, path) != 2) {
2705 strcpy(path, ptr);
2706 return -1;
2707 }
2708
2709 return 1;
2710 }
2711
2712 return 0;
2713 }
2714
2715 static bool bpf_pinning_reserved(uint32_t pinning)
2716 {
2717 switch (pinning) {
2718 case PIN_NONE:
2719 case PIN_OBJECT_NS:
2720 case PIN_GLOBAL_NS:
2721 return true;
2722 default:
2723 return false;
2724 }
2725 }
2726
2727 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2728 {
2729 struct bpf_hash_entry *entry;
2730 char subpath[PATH_MAX] = {};
2731 uint32_t pinning;
2732 FILE *fp;
2733 int ret;
2734
2735 fp = fopen(db_file, "r");
2736 if (!fp)
2737 return;
2738
2739 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2740 if (ret == -1) {
2741 fprintf(stderr, "Database %s is corrupted at: %s\n",
2742 db_file, subpath);
2743 fclose(fp);
2744 return;
2745 }
2746
2747 if (bpf_pinning_reserved(pinning)) {
2748 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2749 db_file, pinning);
2750 continue;
2751 }
2752
2753 entry = malloc(sizeof(*entry));
2754 if (!entry) {
2755 fprintf(stderr, "No memory left for db entry!\n");
2756 continue;
2757 }
2758
2759 entry->pinning = pinning;
2760 entry->subpath = strdup(subpath);
2761 if (!entry->subpath) {
2762 fprintf(stderr, "No memory left for db entry!\n");
2763 free(entry);
2764 continue;
2765 }
2766
2767 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2768 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2769 }
2770
2771 fclose(fp);
2772 }
2773
2774 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2775 {
2776 struct bpf_hash_entry *entry;
2777 int i;
2778
2779 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2780 while ((entry = ctx->ht[i]) != NULL) {
2781 ctx->ht[i] = entry->next;
2782 free((char *)entry->subpath);
2783 free(entry);
2784 }
2785 }
2786 }
2787
2788 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2789 {
2790 if (ctx->elf_hdr.e_type != ET_REL ||
2791 (ctx->elf_hdr.e_machine != EM_NONE &&
2792 ctx->elf_hdr.e_machine != EM_BPF) ||
2793 ctx->elf_hdr.e_version != EV_CURRENT) {
2794 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2795 return -EINVAL;
2796 }
2797
2798 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2799 default:
2800 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2801 return -EINVAL;
2802 case ELFDATA2LSB:
2803 if (htons(1) == 1) {
2804 fprintf(stderr,
2805 "We are big endian, eBPF object is little endian!\n");
2806 return -EIO;
2807 }
2808 break;
2809 case ELFDATA2MSB:
2810 if (htons(1) != 1) {
2811 fprintf(stderr,
2812 "We are little endian, eBPF object is big endian!\n");
2813 return -EIO;
2814 }
2815 break;
2816 }
2817
2818 return 0;
2819 }
2820
2821 static void bpf_get_cfg(struct bpf_elf_ctx *ctx)
2822 {
2823 static const char *path_jit = "/proc/sys/net/core/bpf_jit_enable";
2824 int fd;
2825
2826 fd = open(path_jit, O_RDONLY);
2827 if (fd > 0) {
2828 char tmp[16] = {};
2829
2830 if (read(fd, tmp, sizeof(tmp)) > 0)
2831 ctx->cfg.jit_enabled = atoi(tmp);
2832 close(fd);
2833 }
2834 }
2835
2836 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2837 enum bpf_prog_type type, __u32 ifindex,
2838 bool verbose)
2839 {
2840 uint8_t tmp[20];
2841 int ret;
2842
2843 if (elf_version(EV_CURRENT) == EV_NONE)
2844 return -EINVAL;
2845
2846 bpf_init_env();
2847
2848 memset(ctx, 0, sizeof(*ctx));
2849 bpf_get_cfg(ctx);
2850
2851 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
2852 if (ret)
2853 ctx->noafalg = true;
2854 else
2855 hexstring_n2a(tmp, sizeof(tmp), ctx->obj_uid,
2856 sizeof(ctx->obj_uid));
2857
2858 ctx->verbose = verbose;
2859 ctx->type = type;
2860 ctx->ifindex = ifindex;
2861
2862 ctx->obj_fd = open(pathname, O_RDONLY);
2863 if (ctx->obj_fd < 0)
2864 return ctx->obj_fd;
2865
2866 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2867 if (!ctx->elf_fd) {
2868 ret = -EINVAL;
2869 goto out_fd;
2870 }
2871
2872 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2873 ret = -EINVAL;
2874 goto out_fd;
2875 }
2876
2877 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2878 &ctx->elf_hdr) {
2879 ret = -EIO;
2880 goto out_elf;
2881 }
2882
2883 ret = bpf_elf_check_ehdr(ctx);
2884 if (ret < 0)
2885 goto out_elf;
2886
2887 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2888 sizeof(*(ctx->sec_done)));
2889 if (!ctx->sec_done) {
2890 ret = -ENOMEM;
2891 goto out_elf;
2892 }
2893
2894 if (ctx->verbose && bpf_log_realloc(ctx)) {
2895 ret = -ENOMEM;
2896 goto out_free;
2897 }
2898
2899 bpf_save_finfo(ctx);
2900 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2901
2902 return 0;
2903 out_free:
2904 free(ctx->sec_done);
2905 out_elf:
2906 elf_end(ctx->elf_fd);
2907 out_fd:
2908 close(ctx->obj_fd);
2909 return ret;
2910 }
2911
2912 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2913 {
2914 int i, count = 0;
2915
2916 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2917 if (!ctx->map_fds[i])
2918 break;
2919 count++;
2920 }
2921
2922 return count;
2923 }
2924
2925 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2926 {
2927 int i;
2928
2929 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2930 if (ctx->map_fds[i])
2931 close(ctx->map_fds[i]);
2932 }
2933
2934 if (ctx->btf_fd)
2935 close(ctx->btf_fd);
2936 free(ctx->btf.types);
2937 }
2938
2939 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2940 {
2941 if (failure)
2942 bpf_maps_teardown(ctx);
2943
2944 bpf_hash_destroy(ctx);
2945
2946 free(ctx->prog_text.insns);
2947 free(ctx->sec_done);
2948 free(ctx->log);
2949
2950 elf_end(ctx->elf_fd);
2951 close(ctx->obj_fd);
2952 }
2953
2954 static struct bpf_elf_ctx __ctx;
2955
2956 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2957 const char *section, __u32 ifindex, bool verbose)
2958 {
2959 struct bpf_elf_ctx *ctx = &__ctx;
2960 int fd = 0, ret;
2961
2962 ret = bpf_elf_ctx_init(ctx, pathname, type, ifindex, verbose);
2963 if (ret < 0) {
2964 fprintf(stderr, "Cannot initialize ELF context!\n");
2965 return ret;
2966 }
2967
2968 ret = bpf_fetch_ancillary(ctx, strcmp(section, ".text"));
2969 if (ret < 0) {
2970 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2971 goto out;
2972 }
2973
2974 fd = bpf_fetch_prog_sec(ctx, section);
2975 if (fd < 0) {
2976 fprintf(stderr, "Error fetching program/map!\n");
2977 ret = fd;
2978 goto out;
2979 }
2980
2981 ret = bpf_fill_prog_arrays(ctx);
2982 if (ret < 0)
2983 fprintf(stderr, "Error filling program arrays!\n");
2984 out:
2985 bpf_elf_ctx_destroy(ctx, ret < 0);
2986 if (ret < 0) {
2987 if (fd)
2988 close(fd);
2989 return ret;
2990 }
2991
2992 return fd;
2993 }
2994
2995 static int
2996 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
2997 const struct bpf_map_data *aux, unsigned int entries)
2998 {
2999 struct bpf_map_set_msg msg = {
3000 .aux.uds_ver = BPF_SCM_AUX_VER,
3001 .aux.num_ent = entries,
3002 };
3003 int *cmsg_buf, min_fd;
3004 char *amsg_buf;
3005 int i;
3006
3007 strlcpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
3008 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
3009
3010 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
3011 amsg_buf = (char *)msg.aux.ent;
3012
3013 for (i = 0; i < entries; i += min_fd) {
3014 int ret;
3015
3016 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
3017 bpf_map_set_init_single(&msg, min_fd);
3018
3019 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
3020 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
3021
3022 ret = sendmsg(fd, &msg.hdr, 0);
3023 if (ret <= 0)
3024 return ret ? : -1;
3025 }
3026
3027 return 0;
3028 }
3029
3030 static int
3031 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
3032 unsigned int entries)
3033 {
3034 struct bpf_map_set_msg msg;
3035 int *cmsg_buf, min_fd;
3036 char *amsg_buf, *mmsg_buf;
3037 unsigned int needed = 1;
3038 int i;
3039
3040 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
3041 amsg_buf = (char *)msg.aux.ent;
3042 mmsg_buf = (char *)&msg.aux;
3043
3044 for (i = 0; i < min(entries, needed); i += min_fd) {
3045 struct cmsghdr *cmsg;
3046 int ret;
3047
3048 min_fd = min(entries, entries - i);
3049 bpf_map_set_init_single(&msg, min_fd);
3050
3051 ret = recvmsg(fd, &msg.hdr, 0);
3052 if (ret <= 0)
3053 return ret ? : -1;
3054
3055 cmsg = CMSG_FIRSTHDR(&msg.hdr);
3056 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
3057 return -EINVAL;
3058 if (msg.hdr.msg_flags & MSG_CTRUNC)
3059 return -EIO;
3060 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
3061 return -ENOSYS;
3062
3063 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
3064 if (min_fd > entries || min_fd <= 0)
3065 return -EINVAL;
3066
3067 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
3068 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
3069 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
3070
3071 needed = aux->num_ent;
3072 }
3073
3074 return 0;
3075 }
3076
3077 int bpf_send_map_fds(const char *path, const char *obj)
3078 {
3079 struct bpf_elf_ctx *ctx = &__ctx;
3080 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3081 struct bpf_map_data bpf_aux = {
3082 .fds = ctx->map_fds,
3083 .ent = ctx->maps,
3084 .st = &ctx->stat,
3085 .obj = obj,
3086 };
3087 int fd, ret;
3088
3089 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3090 if (fd < 0) {
3091 fprintf(stderr, "Cannot open socket: %s\n",
3092 strerror(errno));
3093 return -1;
3094 }
3095
3096 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3097
3098 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
3099 if (ret < 0) {
3100 fprintf(stderr, "Cannot connect to %s: %s\n",
3101 path, strerror(errno));
3102 return -1;
3103 }
3104
3105 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
3106 bpf_maps_count(ctx));
3107 if (ret < 0)
3108 fprintf(stderr, "Cannot send fds to %s: %s\n",
3109 path, strerror(errno));
3110
3111 bpf_maps_teardown(ctx);
3112 close(fd);
3113 return ret;
3114 }
3115
3116 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
3117 unsigned int entries)
3118 {
3119 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3120 int fd, ret;
3121
3122 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3123 if (fd < 0) {
3124 fprintf(stderr, "Cannot open socket: %s\n",
3125 strerror(errno));
3126 return -1;
3127 }
3128
3129 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3130
3131 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
3132 if (ret < 0) {
3133 fprintf(stderr, "Cannot bind to socket: %s\n",
3134 strerror(errno));
3135 return -1;
3136 }
3137
3138 ret = bpf_map_set_recv(fd, fds, aux, entries);
3139 if (ret < 0)
3140 fprintf(stderr, "Cannot recv fds from %s: %s\n",
3141 path, strerror(errno));
3142
3143 unlink(addr.sun_path);
3144 close(fd);
3145 return ret;
3146 }
3147 #endif /* HAVE_ELF */