]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
Include bsd/string.h only in include/utils.h
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43 #include "json_print.h"
44
45 #include "bpf_util.h"
46 #include "bpf_elf.h"
47 #include "bpf_scm.h"
48
49 struct bpf_prog_meta {
50 const char *type;
51 const char *subdir;
52 const char *section;
53 bool may_uds_export;
54 };
55
56 static const enum bpf_prog_type __bpf_types[] = {
57 BPF_PROG_TYPE_SCHED_CLS,
58 BPF_PROG_TYPE_SCHED_ACT,
59 BPF_PROG_TYPE_XDP,
60 BPF_PROG_TYPE_LWT_IN,
61 BPF_PROG_TYPE_LWT_OUT,
62 BPF_PROG_TYPE_LWT_XMIT,
63 };
64
65 static const struct bpf_prog_meta __bpf_prog_meta[] = {
66 [BPF_PROG_TYPE_SCHED_CLS] = {
67 .type = "cls",
68 .subdir = "tc",
69 .section = ELF_SECTION_CLASSIFIER,
70 .may_uds_export = true,
71 },
72 [BPF_PROG_TYPE_SCHED_ACT] = {
73 .type = "act",
74 .subdir = "tc",
75 .section = ELF_SECTION_ACTION,
76 .may_uds_export = true,
77 },
78 [BPF_PROG_TYPE_XDP] = {
79 .type = "xdp",
80 .subdir = "xdp",
81 .section = ELF_SECTION_PROG,
82 },
83 [BPF_PROG_TYPE_LWT_IN] = {
84 .type = "lwt_in",
85 .subdir = "ip",
86 .section = ELF_SECTION_PROG,
87 },
88 [BPF_PROG_TYPE_LWT_OUT] = {
89 .type = "lwt_out",
90 .subdir = "ip",
91 .section = ELF_SECTION_PROG,
92 },
93 [BPF_PROG_TYPE_LWT_XMIT] = {
94 .type = "lwt_xmit",
95 .subdir = "ip",
96 .section = ELF_SECTION_PROG,
97 },
98 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = {
99 .type = "lwt_seg6local",
100 .subdir = "ip",
101 .section = ELF_SECTION_PROG,
102 },
103 };
104
105 static bool bpf_map_offload_neutral(enum bpf_map_type type)
106 {
107 return type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
108 }
109
110 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
111 {
112 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
113 __bpf_prog_meta[type].subdir);
114 return __bpf_prog_meta[type].subdir;
115 }
116
117 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
118 {
119 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
120 __bpf_prog_meta[type].section);
121 return __bpf_prog_meta[type].section;
122 }
123
124 #ifdef HAVE_ELF
125 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
126 const char *sec, __u32 ifindex, bool verbose);
127 #else
128 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
129 const char *sec, __u32 ifindex, bool verbose)
130 {
131 fprintf(stderr, "No ELF library support compiled in.\n");
132 errno = ENOSYS;
133 return -1;
134 }
135 #endif
136
137 static inline __u64 bpf_ptr_to_u64(const void *ptr)
138 {
139 return (__u64)(unsigned long)ptr;
140 }
141
142 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
143 {
144 #ifdef __NR_bpf
145 return syscall(__NR_bpf, cmd, attr, size);
146 #else
147 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
148 errno = ENOSYS;
149 return -1;
150 #endif
151 }
152
153 static int bpf_map_update(int fd, const void *key, const void *value,
154 uint64_t flags)
155 {
156 union bpf_attr attr = {};
157
158 attr.map_fd = fd;
159 attr.key = bpf_ptr_to_u64(key);
160 attr.value = bpf_ptr_to_u64(value);
161 attr.flags = flags;
162
163 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
164 }
165
166 static int bpf_prog_fd_by_id(uint32_t id)
167 {
168 union bpf_attr attr = {};
169
170 attr.prog_id = id;
171
172 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
173 }
174
175 static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
176 uint32_t *info_len)
177 {
178 union bpf_attr attr = {};
179 int ret;
180
181 attr.info.bpf_fd = fd;
182 attr.info.info = bpf_ptr_to_u64(info);
183 attr.info.info_len = *info_len;
184
185 *info_len = 0;
186 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
187 if (!ret)
188 *info_len = attr.info.info_len;
189
190 return ret;
191 }
192
193 int bpf_dump_prog_info(FILE *f, uint32_t id)
194 {
195 struct bpf_prog_info info = {};
196 uint32_t len = sizeof(info);
197 int fd, ret, dump_ok = 0;
198 SPRINT_BUF(tmp);
199
200 open_json_object("prog");
201 print_uint(PRINT_ANY, "id", "id %u ", id);
202
203 fd = bpf_prog_fd_by_id(id);
204 if (fd < 0)
205 goto out;
206
207 ret = bpf_prog_info_by_fd(fd, &info, &len);
208 if (!ret && len) {
209 int jited = !!info.jited_prog_len;
210
211 print_string(PRINT_ANY, "tag", "tag %s ",
212 hexstring_n2a(info.tag, sizeof(info.tag),
213 tmp, sizeof(tmp)));
214 print_uint(PRINT_JSON, "jited", NULL, jited);
215 if (jited && !is_json_context())
216 fprintf(f, "jited ");
217 dump_ok = 1;
218 }
219
220 close(fd);
221 out:
222 close_json_object();
223 return dump_ok;
224 }
225
226 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
227 char **bpf_string, bool *need_release,
228 const char separator)
229 {
230 char sp;
231
232 if (from_file) {
233 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
234 char *tmp_string, *pos, c_prev = ' ';
235 FILE *fp;
236 int c;
237
238 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
239 tmp_string = pos = calloc(1, tmp_len);
240 if (tmp_string == NULL)
241 return -ENOMEM;
242
243 fp = fopen(arg, "r");
244 if (fp == NULL) {
245 perror("Cannot fopen");
246 free(tmp_string);
247 return -ENOENT;
248 }
249
250 while ((c = fgetc(fp)) != EOF) {
251 switch (c) {
252 case '\n':
253 if (c_prev != ',')
254 *(pos++) = ',';
255 c_prev = ',';
256 break;
257 case ' ':
258 case '\t':
259 if (c_prev != ' ')
260 *(pos++) = c;
261 c_prev = ' ';
262 break;
263 default:
264 *(pos++) = c;
265 c_prev = c;
266 }
267 if (pos - tmp_string == tmp_len)
268 break;
269 }
270
271 if (!feof(fp)) {
272 free(tmp_string);
273 fclose(fp);
274 return -E2BIG;
275 }
276
277 fclose(fp);
278 *pos = 0;
279
280 *need_release = true;
281 *bpf_string = tmp_string;
282 } else {
283 *need_release = false;
284 *bpf_string = arg;
285 }
286
287 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
288 sp != separator) {
289 if (*need_release)
290 free(*bpf_string);
291 return -EINVAL;
292 }
293
294 return 0;
295 }
296
297 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
298 bool from_file)
299 {
300 char *bpf_string, *token, separator = ',';
301 int ret = 0, i = 0;
302 bool need_release;
303 __u16 bpf_len = 0;
304
305 if (argc < 1)
306 return -EINVAL;
307 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
308 &need_release, separator))
309 return -EINVAL;
310 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
311 ret = -EINVAL;
312 goto out;
313 }
314
315 token = bpf_string;
316 while ((token = strchr(token, separator)) && (++token)[0]) {
317 if (i >= bpf_len) {
318 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
319 ret = -EINVAL;
320 goto out;
321 }
322
323 if (sscanf(token, "%hu %hhu %hhu %u,",
324 &bpf_ops[i].code, &bpf_ops[i].jt,
325 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
326 fprintf(stderr, "Error at instruction %d!\n", i);
327 ret = -EINVAL;
328 goto out;
329 }
330
331 i++;
332 }
333
334 if (i != bpf_len) {
335 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
336 ret = -EINVAL;
337 goto out;
338 }
339 ret = bpf_len;
340 out:
341 if (need_release)
342 free(bpf_string);
343
344 return ret;
345 }
346
347 void bpf_print_ops(FILE *f, struct rtattr *bpf_ops, __u16 len)
348 {
349 struct sock_filter *ops = RTA_DATA(bpf_ops);
350 int i;
351
352 if (len == 0)
353 return;
354
355 fprintf(f, "bytecode \'%u,", len);
356
357 for (i = 0; i < len - 1; i++)
358 fprintf(f, "%hu %hhu %hhu %u,", ops[i].code, ops[i].jt,
359 ops[i].jf, ops[i].k);
360
361 fprintf(f, "%hu %hhu %hhu %u\'", ops[i].code, ops[i].jt,
362 ops[i].jf, ops[i].k);
363 }
364
365 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
366 const struct bpf_elf_map *obj)
367 {
368 fprintf(stderr, "Map specification differs from pinned file!\n");
369
370 if (obj->type != pin->type)
371 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
372 obj->type, pin->type);
373 if (obj->size_key != pin->size_key)
374 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
375 obj->size_key, pin->size_key);
376 if (obj->size_value != pin->size_value)
377 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
378 obj->size_value, pin->size_value);
379 if (obj->max_elem != pin->max_elem)
380 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
381 obj->max_elem, pin->max_elem);
382 if (obj->flags != pin->flags)
383 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
384 obj->flags, pin->flags);
385
386 fprintf(stderr, "\n");
387 }
388
389 struct bpf_prog_data {
390 unsigned int type;
391 unsigned int jited;
392 };
393
394 struct bpf_map_ext {
395 struct bpf_prog_data owner;
396 unsigned int btf_id_key;
397 unsigned int btf_id_val;
398 };
399
400 static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map,
401 struct bpf_map_ext *ext)
402 {
403 unsigned int val, owner_type = 0, owner_jited = 0;
404 char file[PATH_MAX], buff[4096];
405 FILE *fp;
406
407 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
408 memset(map, 0, sizeof(*map));
409
410 fp = fopen(file, "r");
411 if (!fp) {
412 fprintf(stderr, "No procfs support?!\n");
413 return -EIO;
414 }
415
416 while (fgets(buff, sizeof(buff), fp)) {
417 if (sscanf(buff, "map_type:\t%u", &val) == 1)
418 map->type = val;
419 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
420 map->size_key = val;
421 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
422 map->size_value = val;
423 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
424 map->max_elem = val;
425 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
426 map->flags = val;
427 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
428 owner_type = val;
429 else if (sscanf(buff, "owner_jited:\t%i", &val) == 1)
430 owner_jited = val;
431 }
432
433 fclose(fp);
434 if (ext) {
435 memset(ext, 0, sizeof(*ext));
436 ext->owner.type = owner_type;
437 ext->owner.jited = owner_jited;
438 }
439
440 return 0;
441 }
442
443 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
444 struct bpf_map_ext *ext, int length,
445 enum bpf_prog_type type)
446 {
447 struct bpf_elf_map tmp, zero = {};
448 int ret;
449
450 ret = bpf_derive_elf_map_from_fdinfo(fd, &tmp, ext);
451 if (ret < 0)
452 return ret;
453
454 /* The decision to reject this is on kernel side eventually, but
455 * at least give the user a chance to know what's wrong.
456 */
457 if (ext->owner.type && ext->owner.type != type)
458 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
459 type, ext->owner.type);
460
461 if (!memcmp(&tmp, map, length)) {
462 return 0;
463 } else {
464 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
465 * so just accept it. We know we do have an eBPF fd and in this
466 * case, everything is 0. It is guaranteed that no such map exists
467 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
468 */
469 if (!memcmp(&tmp, &zero, length))
470 return 0;
471
472 bpf_map_pin_report(&tmp, map);
473 return -EINVAL;
474 }
475 }
476
477 static int bpf_mnt_fs(const char *target)
478 {
479 bool bind_done = false;
480
481 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
482 if (errno != EINVAL || bind_done) {
483 fprintf(stderr, "mount --make-private %s failed: %s\n",
484 target, strerror(errno));
485 return -1;
486 }
487
488 if (mount(target, target, "none", MS_BIND, NULL)) {
489 fprintf(stderr, "mount --bind %s %s failed: %s\n",
490 target, target, strerror(errno));
491 return -1;
492 }
493
494 bind_done = true;
495 }
496
497 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
498 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
499 target, strerror(errno));
500 return -1;
501 }
502
503 return 0;
504 }
505
506 static int bpf_mnt_check_target(const char *target)
507 {
508 struct stat sb = {};
509 int ret;
510
511 ret = stat(target, &sb);
512 if (ret) {
513 ret = mkdir(target, S_IRWXU);
514 if (ret) {
515 fprintf(stderr, "mkdir %s failed: %s\n", target,
516 strerror(errno));
517 return ret;
518 }
519 }
520
521 return 0;
522 }
523
524 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
525 {
526 struct statfs st_fs;
527
528 if (statfs(mnt, &st_fs) < 0)
529 return -ENOENT;
530 if ((unsigned long)st_fs.f_type != magic)
531 return -ENOENT;
532
533 return 0;
534 }
535
536 static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt,
537 int len, const char *mntpt)
538 {
539 int ret;
540
541 ret = bpf_valid_mntpt(mntpt, magic);
542 if (!ret) {
543 strlcpy(mnt, mntpt, len);
544 return mnt;
545 }
546
547 return NULL;
548 }
549
550 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
551 char *mnt, int len,
552 const char * const *known_mnts)
553 {
554 const char * const *ptr;
555 char type[100];
556 FILE *fp;
557
558 if (known_mnts) {
559 ptr = known_mnts;
560 while (*ptr) {
561 if (bpf_find_mntpt_single(magic, mnt, len, *ptr))
562 return mnt;
563 ptr++;
564 }
565 }
566
567 if (len != PATH_MAX)
568 return NULL;
569
570 fp = fopen("/proc/mounts", "r");
571 if (fp == NULL)
572 return NULL;
573
574 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
575 mnt, type) == 2) {
576 if (strcmp(type, fstype) == 0)
577 break;
578 }
579
580 fclose(fp);
581 if (strcmp(type, fstype) != 0)
582 return NULL;
583
584 return mnt;
585 }
586
587 int bpf_trace_pipe(void)
588 {
589 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
590 static const char * const tracefs_known_mnts[] = {
591 TRACE_DIR_MNT,
592 "/sys/kernel/debug/tracing",
593 "/tracing",
594 "/trace",
595 0,
596 };
597 int fd_in, fd_out = STDERR_FILENO;
598 char tpipe[PATH_MAX];
599 const char *mnt;
600
601 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
602 sizeof(tracefs_mnt), tracefs_known_mnts);
603 if (!mnt) {
604 fprintf(stderr, "tracefs not mounted?\n");
605 return -1;
606 }
607
608 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
609
610 fd_in = open(tpipe, O_RDONLY);
611 if (fd_in < 0)
612 return -1;
613
614 fprintf(stderr, "Running! Hang up with ^C!\n\n");
615 while (1) {
616 static char buff[4096];
617 ssize_t ret;
618
619 ret = read(fd_in, buff, sizeof(buff));
620 if (ret > 0 && write(fd_out, buff, ret) == ret)
621 continue;
622 break;
623 }
624
625 close(fd_in);
626 return -1;
627 }
628
629 static int bpf_gen_global(const char *bpf_sub_dir)
630 {
631 char bpf_glo_dir[PATH_MAX];
632 int ret;
633
634 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
635 bpf_sub_dir, BPF_DIR_GLOBALS);
636
637 ret = mkdir(bpf_glo_dir, S_IRWXU);
638 if (ret && errno != EEXIST) {
639 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
640 strerror(errno));
641 return ret;
642 }
643
644 return 0;
645 }
646
647 static int bpf_gen_master(const char *base, const char *name)
648 {
649 char bpf_sub_dir[PATH_MAX + NAME_MAX + 1];
650 int ret;
651
652 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
653
654 ret = mkdir(bpf_sub_dir, S_IRWXU);
655 if (ret && errno != EEXIST) {
656 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
657 strerror(errno));
658 return ret;
659 }
660
661 return bpf_gen_global(bpf_sub_dir);
662 }
663
664 static int bpf_slave_via_bind_mnt(const char *full_name,
665 const char *full_link)
666 {
667 int ret;
668
669 ret = mkdir(full_name, S_IRWXU);
670 if (ret) {
671 assert(errno != EEXIST);
672 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
673 strerror(errno));
674 return ret;
675 }
676
677 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
678 if (ret) {
679 rmdir(full_name);
680 fprintf(stderr, "mount --bind %s %s failed: %s\n",
681 full_link, full_name, strerror(errno));
682 }
683
684 return ret;
685 }
686
687 static int bpf_gen_slave(const char *base, const char *name,
688 const char *link)
689 {
690 char bpf_lnk_dir[PATH_MAX + NAME_MAX + 1];
691 char bpf_sub_dir[PATH_MAX + NAME_MAX];
692 struct stat sb = {};
693 int ret;
694
695 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
696 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
697
698 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
699 if (ret) {
700 if (errno != EEXIST) {
701 if (errno != EPERM) {
702 fprintf(stderr, "symlink %s failed: %s\n",
703 bpf_sub_dir, strerror(errno));
704 return ret;
705 }
706
707 return bpf_slave_via_bind_mnt(bpf_sub_dir,
708 bpf_lnk_dir);
709 }
710
711 ret = lstat(bpf_sub_dir, &sb);
712 if (ret) {
713 fprintf(stderr, "lstat %s failed: %s\n",
714 bpf_sub_dir, strerror(errno));
715 return ret;
716 }
717
718 if ((sb.st_mode & S_IFMT) != S_IFLNK)
719 return bpf_gen_global(bpf_sub_dir);
720 }
721
722 return 0;
723 }
724
725 static int bpf_gen_hierarchy(const char *base)
726 {
727 int ret, i;
728
729 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
730 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
731 ret = bpf_gen_slave(base,
732 bpf_prog_to_subdir(__bpf_types[i]),
733 bpf_prog_to_subdir(__bpf_types[0]));
734 return ret;
735 }
736
737 static const char *bpf_get_work_dir(enum bpf_prog_type type)
738 {
739 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
740 static char bpf_wrk_dir[PATH_MAX];
741 static const char *mnt;
742 static bool bpf_mnt_cached;
743 const char *mnt_env = getenv(BPF_ENV_MNT);
744 static const char * const bpf_known_mnts[] = {
745 BPF_DIR_MNT,
746 "/bpf",
747 0,
748 };
749 int ret;
750
751 if (bpf_mnt_cached) {
752 const char *out = mnt;
753
754 if (out && type) {
755 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
756 out, bpf_prog_to_subdir(type));
757 out = bpf_tmp;
758 }
759 return out;
760 }
761
762 if (mnt_env)
763 mnt = bpf_find_mntpt_single(BPF_FS_MAGIC, bpf_tmp,
764 sizeof(bpf_tmp), mnt_env);
765 else
766 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp,
767 sizeof(bpf_tmp), bpf_known_mnts);
768 if (!mnt) {
769 mnt = mnt_env ? : BPF_DIR_MNT;
770 ret = bpf_mnt_check_target(mnt);
771 if (!ret)
772 ret = bpf_mnt_fs(mnt);
773 if (ret) {
774 mnt = NULL;
775 goto out;
776 }
777 }
778
779 snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
780
781 ret = bpf_gen_hierarchy(bpf_wrk_dir);
782 if (ret) {
783 mnt = NULL;
784 goto out;
785 }
786
787 mnt = bpf_wrk_dir;
788 out:
789 bpf_mnt_cached = true;
790 return mnt;
791 }
792
793 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
794 {
795 union bpf_attr attr = {};
796 char tmp[PATH_MAX];
797
798 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
799 pathname[1] == ':' && bpf_get_work_dir(type)) {
800 snprintf(tmp, sizeof(tmp), "%s/%s",
801 bpf_get_work_dir(type), pathname + 2);
802 pathname = tmp;
803 }
804
805 attr.pathname = bpf_ptr_to_u64(pathname);
806
807 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
808 }
809
810 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
811 {
812 int prog_fd = bpf_obj_get(pathname, type);
813
814 if (prog_fd < 0)
815 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
816 pathname, strerror(errno));
817 return prog_fd;
818 }
819
820 static int bpf_do_parse(struct bpf_cfg_in *cfg, const bool *opt_tbl)
821 {
822 const char *file, *section, *uds_name;
823 bool verbose = false;
824 int i, ret, argc;
825 char **argv;
826
827 argv = cfg->argv;
828 argc = cfg->argc;
829
830 if (opt_tbl[CBPF_BYTECODE] &&
831 (matches(*argv, "bytecode") == 0 ||
832 strcmp(*argv, "bc") == 0)) {
833 cfg->mode = CBPF_BYTECODE;
834 } else if (opt_tbl[CBPF_FILE] &&
835 (matches(*argv, "bytecode-file") == 0 ||
836 strcmp(*argv, "bcf") == 0)) {
837 cfg->mode = CBPF_FILE;
838 } else if (opt_tbl[EBPF_OBJECT] &&
839 (matches(*argv, "object-file") == 0 ||
840 strcmp(*argv, "obj") == 0)) {
841 cfg->mode = EBPF_OBJECT;
842 } else if (opt_tbl[EBPF_PINNED] &&
843 (matches(*argv, "object-pinned") == 0 ||
844 matches(*argv, "pinned") == 0 ||
845 matches(*argv, "fd") == 0)) {
846 cfg->mode = EBPF_PINNED;
847 } else {
848 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
849 return -1;
850 }
851
852 NEXT_ARG();
853 file = section = uds_name = NULL;
854 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
855 file = *argv;
856 NEXT_ARG_FWD();
857
858 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
859 if (argc > 0 && matches(*argv, "type") == 0) {
860 NEXT_ARG();
861 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
862 i++) {
863 if (!__bpf_prog_meta[i].type)
864 continue;
865 if (!matches(*argv,
866 __bpf_prog_meta[i].type)) {
867 cfg->type = i;
868 break;
869 }
870 }
871
872 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
873 fprintf(stderr, "What type is \"%s\"?\n",
874 *argv);
875 return -1;
876 }
877 NEXT_ARG_FWD();
878 } else {
879 cfg->type = BPF_PROG_TYPE_SCHED_CLS;
880 }
881 }
882
883 section = bpf_prog_to_default_section(cfg->type);
884 if (argc > 0 && matches(*argv, "section") == 0) {
885 NEXT_ARG();
886 section = *argv;
887 NEXT_ARG_FWD();
888 }
889
890 if (__bpf_prog_meta[cfg->type].may_uds_export) {
891 uds_name = getenv(BPF_ENV_UDS);
892 if (argc > 0 && !uds_name &&
893 matches(*argv, "export") == 0) {
894 NEXT_ARG();
895 uds_name = *argv;
896 NEXT_ARG_FWD();
897 }
898 }
899
900 if (argc > 0 && matches(*argv, "verbose") == 0) {
901 verbose = true;
902 NEXT_ARG_FWD();
903 }
904
905 PREV_ARG();
906 }
907
908 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE) {
909 ret = bpf_ops_parse(argc, argv, cfg->opcodes,
910 cfg->mode == CBPF_FILE);
911 cfg->n_opcodes = ret;
912 } else if (cfg->mode == EBPF_OBJECT) {
913 ret = 0; /* program will be loaded by load stage */
914 } else if (cfg->mode == EBPF_PINNED) {
915 ret = bpf_obj_pinned(file, cfg->type);
916 cfg->prog_fd = ret;
917 } else {
918 return -1;
919 }
920
921 cfg->object = file;
922 cfg->section = section;
923 cfg->uds = uds_name;
924 cfg->argc = argc;
925 cfg->argv = argv;
926 cfg->verbose = verbose;
927
928 return ret;
929 }
930
931 static int bpf_do_load(struct bpf_cfg_in *cfg)
932 {
933 if (cfg->mode == EBPF_OBJECT) {
934 cfg->prog_fd = bpf_obj_open(cfg->object, cfg->type,
935 cfg->section, cfg->ifindex,
936 cfg->verbose);
937 return cfg->prog_fd;
938 }
939 return 0;
940 }
941
942 int bpf_load_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops,
943 void *nl)
944 {
945 char annotation[256];
946 int ret;
947
948 ret = bpf_do_load(cfg);
949 if (ret < 0)
950 return ret;
951
952 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE)
953 ops->cbpf_cb(nl, cfg->opcodes, cfg->n_opcodes);
954 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
955 snprintf(annotation, sizeof(annotation), "%s:[%s]",
956 basename(cfg->object), cfg->mode == EBPF_PINNED ?
957 "*fsobj" : cfg->section);
958 ops->ebpf_cb(nl, cfg->prog_fd, annotation);
959 }
960
961 return 0;
962 }
963
964 int bpf_parse_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops)
965 {
966 bool opt_tbl[BPF_MODE_MAX] = {};
967
968 if (ops->cbpf_cb) {
969 opt_tbl[CBPF_BYTECODE] = true;
970 opt_tbl[CBPF_FILE] = true;
971 }
972
973 if (ops->ebpf_cb) {
974 opt_tbl[EBPF_OBJECT] = true;
975 opt_tbl[EBPF_PINNED] = true;
976 }
977
978 return bpf_do_parse(cfg, opt_tbl);
979 }
980
981 int bpf_parse_and_load_common(struct bpf_cfg_in *cfg,
982 const struct bpf_cfg_ops *ops, void *nl)
983 {
984 int ret;
985
986 ret = bpf_parse_common(cfg, ops);
987 if (ret < 0)
988 return ret;
989
990 return bpf_load_common(cfg, ops, nl);
991 }
992
993 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
994 {
995 const bool opt_tbl[BPF_MODE_MAX] = {
996 [EBPF_OBJECT] = true,
997 [EBPF_PINNED] = true,
998 };
999 const struct bpf_elf_map test = {
1000 .type = BPF_MAP_TYPE_PROG_ARRAY,
1001 .size_key = sizeof(int),
1002 .size_value = sizeof(int),
1003 };
1004 struct bpf_cfg_in cfg = {
1005 .type = BPF_PROG_TYPE_UNSPEC,
1006 .argc = argc,
1007 .argv = argv,
1008 };
1009 struct bpf_map_ext ext = {};
1010 int ret, prog_fd, map_fd;
1011 uint32_t map_key;
1012
1013 ret = bpf_do_parse(&cfg, opt_tbl);
1014 if (ret < 0)
1015 return ret;
1016
1017 ret = bpf_do_load(&cfg);
1018 if (ret < 0)
1019 return ret;
1020
1021 prog_fd = cfg.prog_fd;
1022
1023 if (key) {
1024 map_key = *key;
1025 } else {
1026 ret = sscanf(cfg.section, "%*i/%i", &map_key);
1027 if (ret != 1) {
1028 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
1029 ret = -EINVAL;
1030 goto out_prog;
1031 }
1032 }
1033
1034 map_fd = bpf_obj_get(map_path, cfg.type);
1035 if (map_fd < 0) {
1036 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
1037 map_path, strerror(errno));
1038 ret = map_fd;
1039 goto out_prog;
1040 }
1041
1042 ret = bpf_map_selfcheck_pinned(map_fd, &test, &ext,
1043 offsetof(struct bpf_elf_map, max_elem),
1044 cfg.type);
1045 if (ret < 0) {
1046 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
1047 goto out_map;
1048 }
1049
1050 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
1051 if (ret < 0)
1052 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
1053 out_map:
1054 close(map_fd);
1055 out_prog:
1056 close(prog_fd);
1057 return ret;
1058 }
1059
1060 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
1061 {
1062 union bpf_attr attr = {};
1063
1064 attr.target_fd = target_fd;
1065 attr.attach_bpf_fd = prog_fd;
1066 attr.attach_type = type;
1067
1068 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
1069 }
1070
1071 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
1072 {
1073 union bpf_attr attr = {};
1074
1075 attr.target_fd = target_fd;
1076 attr.attach_type = type;
1077
1078 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
1079 }
1080
1081 static int bpf_prog_load_dev(enum bpf_prog_type type,
1082 const struct bpf_insn *insns, size_t size_insns,
1083 const char *license, __u32 ifindex,
1084 char *log, size_t size_log)
1085 {
1086 union bpf_attr attr = {};
1087
1088 attr.prog_type = type;
1089 attr.insns = bpf_ptr_to_u64(insns);
1090 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
1091 attr.license = bpf_ptr_to_u64(license);
1092 attr.prog_ifindex = ifindex;
1093
1094 if (size_log > 0) {
1095 attr.log_buf = bpf_ptr_to_u64(log);
1096 attr.log_size = size_log;
1097 attr.log_level = 1;
1098 }
1099
1100 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
1101 }
1102
1103 int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
1104 size_t size_insns, const char *license, char *log,
1105 size_t size_log)
1106 {
1107 return bpf_prog_load_dev(type, insns, size_insns, license, 0,
1108 log, size_log);
1109 }
1110
1111 #ifdef HAVE_ELF
1112 struct bpf_elf_prog {
1113 enum bpf_prog_type type;
1114 struct bpf_insn *insns;
1115 unsigned int insns_num;
1116 size_t size;
1117 const char *license;
1118 };
1119
1120 struct bpf_hash_entry {
1121 unsigned int pinning;
1122 const char *subpath;
1123 struct bpf_hash_entry *next;
1124 };
1125
1126 struct bpf_config {
1127 unsigned int jit_enabled;
1128 };
1129
1130 struct bpf_btf {
1131 const struct btf_header *hdr;
1132 const void *raw;
1133 const char *strings;
1134 const struct btf_type **types;
1135 int types_num;
1136 };
1137
1138 struct bpf_elf_ctx {
1139 struct bpf_config cfg;
1140 Elf *elf_fd;
1141 GElf_Ehdr elf_hdr;
1142 Elf_Data *sym_tab;
1143 Elf_Data *str_tab;
1144 Elf_Data *btf_data;
1145 char obj_uid[64];
1146 int obj_fd;
1147 int btf_fd;
1148 int map_fds[ELF_MAX_MAPS];
1149 struct bpf_elf_map maps[ELF_MAX_MAPS];
1150 struct bpf_map_ext maps_ext[ELF_MAX_MAPS];
1151 struct bpf_elf_prog prog_text;
1152 struct bpf_btf btf;
1153 int sym_num;
1154 int map_num;
1155 int map_len;
1156 bool *sec_done;
1157 int sec_maps;
1158 int sec_text;
1159 int sec_btf;
1160 char license[ELF_MAX_LICENSE_LEN];
1161 enum bpf_prog_type type;
1162 __u32 ifindex;
1163 bool verbose;
1164 bool noafalg;
1165 struct bpf_elf_st stat;
1166 struct bpf_hash_entry *ht[256];
1167 char *log;
1168 size_t log_size;
1169 };
1170
1171 struct bpf_elf_sec_data {
1172 GElf_Shdr sec_hdr;
1173 Elf_Data *sec_data;
1174 const char *sec_name;
1175 };
1176
1177 struct bpf_map_data {
1178 int *fds;
1179 const char *obj;
1180 struct bpf_elf_st *st;
1181 struct bpf_elf_map *ent;
1182 };
1183
1184 static bool bpf_log_has_data(struct bpf_elf_ctx *ctx)
1185 {
1186 return ctx->log && ctx->log[0];
1187 }
1188
1189 static __check_format_string(2, 3) void
1190 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1191 {
1192 va_list vl;
1193
1194 va_start(vl, format);
1195 vfprintf(stderr, format, vl);
1196 va_end(vl);
1197
1198 if (bpf_log_has_data(ctx)) {
1199 if (ctx->verbose) {
1200 fprintf(stderr, "%s\n", ctx->log);
1201 } else {
1202 unsigned int off = 0, len = strlen(ctx->log);
1203
1204 if (len > BPF_MAX_LOG) {
1205 off = len - BPF_MAX_LOG;
1206 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1207 off);
1208 }
1209 fprintf(stderr, "%s\n", ctx->log + off);
1210 }
1211
1212 memset(ctx->log, 0, ctx->log_size);
1213 }
1214 }
1215
1216 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1217 {
1218 const size_t log_max = UINT_MAX >> 8;
1219 size_t log_size = ctx->log_size;
1220 char *ptr;
1221
1222 if (!ctx->log) {
1223 log_size = 65536;
1224 } else if (log_size < log_max) {
1225 log_size <<= 1;
1226 if (log_size > log_max)
1227 log_size = log_max;
1228 } else {
1229 return -EINVAL;
1230 }
1231
1232 ptr = realloc(ctx->log, log_size);
1233 if (!ptr)
1234 return -ENOMEM;
1235
1236 ptr[0] = 0;
1237 ctx->log = ptr;
1238 ctx->log_size = log_size;
1239
1240 return 0;
1241 }
1242
1243 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1244 uint32_t size_value, uint32_t max_elem,
1245 uint32_t flags, int inner_fd, int btf_fd,
1246 uint32_t ifindex, uint32_t btf_id_key,
1247 uint32_t btf_id_val)
1248 {
1249 union bpf_attr attr = {};
1250
1251 attr.map_type = type;
1252 attr.key_size = size_key;
1253 attr.value_size = inner_fd ? sizeof(int) : size_value;
1254 attr.max_entries = max_elem;
1255 attr.map_flags = flags;
1256 attr.inner_map_fd = inner_fd;
1257 attr.map_ifindex = ifindex;
1258 attr.btf_fd = btf_fd;
1259 attr.btf_key_type_id = btf_id_key;
1260 attr.btf_value_type_id = btf_id_val;
1261
1262 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1263 }
1264
1265 static int bpf_btf_load(void *btf, size_t size_btf,
1266 char *log, size_t size_log)
1267 {
1268 union bpf_attr attr = {};
1269
1270 attr.btf = bpf_ptr_to_u64(btf);
1271 attr.btf_size = size_btf;
1272
1273 if (size_log > 0) {
1274 attr.btf_log_buf = bpf_ptr_to_u64(log);
1275 attr.btf_log_size = size_log;
1276 attr.btf_log_level = 1;
1277 }
1278
1279 return bpf(BPF_BTF_LOAD, &attr, sizeof(attr));
1280 }
1281
1282 static int bpf_obj_pin(int fd, const char *pathname)
1283 {
1284 union bpf_attr attr = {};
1285
1286 attr.pathname = bpf_ptr_to_u64(pathname);
1287 attr.bpf_fd = fd;
1288
1289 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1290 }
1291
1292 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1293 {
1294 struct sockaddr_alg alg = {
1295 .salg_family = AF_ALG,
1296 .salg_type = "hash",
1297 .salg_name = "sha1",
1298 };
1299 int ret, cfd, ofd, ffd;
1300 struct stat stbuff;
1301 ssize_t size;
1302
1303 if (!object || len != 20)
1304 return -EINVAL;
1305
1306 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1307 if (cfd < 0)
1308 return cfd;
1309
1310 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1311 if (ret < 0)
1312 goto out_cfd;
1313
1314 ofd = accept(cfd, NULL, 0);
1315 if (ofd < 0) {
1316 ret = ofd;
1317 goto out_cfd;
1318 }
1319
1320 ffd = open(object, O_RDONLY);
1321 if (ffd < 0) {
1322 fprintf(stderr, "Error opening object %s: %s\n",
1323 object, strerror(errno));
1324 ret = ffd;
1325 goto out_ofd;
1326 }
1327
1328 ret = fstat(ffd, &stbuff);
1329 if (ret < 0) {
1330 fprintf(stderr, "Error doing fstat: %s\n",
1331 strerror(errno));
1332 goto out_ffd;
1333 }
1334
1335 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1336 if (size != stbuff.st_size) {
1337 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1338 size, stbuff.st_size, strerror(errno));
1339 ret = -1;
1340 goto out_ffd;
1341 }
1342
1343 size = read(ofd, out, len);
1344 if (size != len) {
1345 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1346 size, len, strerror(errno));
1347 ret = -1;
1348 } else {
1349 ret = 0;
1350 }
1351 out_ffd:
1352 close(ffd);
1353 out_ofd:
1354 close(ofd);
1355 out_cfd:
1356 close(cfd);
1357 return ret;
1358 }
1359
1360 static void bpf_init_env(void)
1361 {
1362 struct rlimit limit = {
1363 .rlim_cur = RLIM_INFINITY,
1364 .rlim_max = RLIM_INFINITY,
1365 };
1366
1367 /* Don't bother in case we fail! */
1368 setrlimit(RLIMIT_MEMLOCK, &limit);
1369
1370 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC))
1371 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1372 }
1373
1374 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1375 uint32_t pinning)
1376 {
1377 struct bpf_hash_entry *entry;
1378
1379 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1380 while (entry && entry->pinning != pinning)
1381 entry = entry->next;
1382
1383 return entry ? entry->subpath : NULL;
1384 }
1385
1386 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1387 uint32_t pinning)
1388 {
1389 switch (pinning) {
1390 case PIN_OBJECT_NS:
1391 case PIN_GLOBAL_NS:
1392 return false;
1393 case PIN_NONE:
1394 return true;
1395 default:
1396 return !bpf_custom_pinning(ctx, pinning);
1397 }
1398 }
1399
1400 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1401 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1402 {
1403 switch (pinning) {
1404 case PIN_OBJECT_NS:
1405 snprintf(pathname, len, "%s/%s/%s",
1406 bpf_get_work_dir(ctx->type),
1407 ctx->obj_uid, name);
1408 break;
1409 case PIN_GLOBAL_NS:
1410 snprintf(pathname, len, "%s/%s/%s",
1411 bpf_get_work_dir(ctx->type),
1412 BPF_DIR_GLOBALS, name);
1413 break;
1414 default:
1415 snprintf(pathname, len, "%s/../%s/%s",
1416 bpf_get_work_dir(ctx->type),
1417 bpf_custom_pinning(ctx, pinning), name);
1418 break;
1419 }
1420 }
1421
1422 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1423 uint32_t pinning)
1424 {
1425 char pathname[PATH_MAX];
1426
1427 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1428 return 0;
1429
1430 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1431 return bpf_obj_get(pathname, ctx->type);
1432 }
1433
1434 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1435 {
1436 char tmp[PATH_MAX];
1437 int ret;
1438
1439 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1440 ctx->obj_uid);
1441
1442 ret = mkdir(tmp, S_IRWXU);
1443 if (ret && errno != EEXIST) {
1444 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1445 return ret;
1446 }
1447
1448 return 0;
1449 }
1450
1451 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1452 const char *todo)
1453 {
1454 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1455 int ret;
1456
1457 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1458 snprintf(rem, sizeof(rem), "%s/", todo);
1459 sub = strtok(rem, "/");
1460
1461 while (sub) {
1462 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1463 return -EINVAL;
1464
1465 strcat(tmp, sub);
1466 strcat(tmp, "/");
1467
1468 ret = mkdir(tmp, S_IRWXU);
1469 if (ret && errno != EEXIST) {
1470 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1471 strerror(errno));
1472 return ret;
1473 }
1474
1475 sub = strtok(NULL, "/");
1476 }
1477
1478 return 0;
1479 }
1480
1481 static int bpf_place_pinned(int fd, const char *name,
1482 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1483 {
1484 char pathname[PATH_MAX];
1485 const char *tmp;
1486 int ret = 0;
1487
1488 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1489 return 0;
1490
1491 if (pinning == PIN_OBJECT_NS)
1492 ret = bpf_make_obj_path(ctx);
1493 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1494 ret = bpf_make_custom_path(ctx, tmp);
1495 if (ret < 0)
1496 return ret;
1497
1498 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1499 return bpf_obj_pin(fd, pathname);
1500 }
1501
1502 static void bpf_prog_report(int fd, const char *section,
1503 const struct bpf_elf_prog *prog,
1504 struct bpf_elf_ctx *ctx)
1505 {
1506 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1507
1508 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1509 fd < 0 ? "rejected: " : "loaded",
1510 fd < 0 ? strerror(errno) : "",
1511 fd < 0 ? errno : fd);
1512
1513 fprintf(stderr, " - Type: %u\n", prog->type);
1514 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1515 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1516 fprintf(stderr, " - License: %s\n\n", prog->license);
1517
1518 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1519 }
1520
1521 static int bpf_prog_attach(const char *section,
1522 const struct bpf_elf_prog *prog,
1523 struct bpf_elf_ctx *ctx)
1524 {
1525 int tries = 0, fd;
1526 retry:
1527 errno = 0;
1528 fd = bpf_prog_load_dev(prog->type, prog->insns, prog->size,
1529 prog->license, ctx->ifindex,
1530 ctx->log, ctx->log_size);
1531 if (fd < 0 || ctx->verbose) {
1532 /* The verifier log is pretty chatty, sometimes so chatty
1533 * on larger programs, that we could fail to dump everything
1534 * into our buffer. Still, try to give a debuggable error
1535 * log for the user, so enlarge it and re-fail.
1536 */
1537 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1538 if (tries++ < 10 && !bpf_log_realloc(ctx))
1539 goto retry;
1540
1541 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1542 ctx->log_size, tries);
1543 return fd;
1544 }
1545
1546 bpf_prog_report(fd, section, prog, ctx);
1547 }
1548
1549 return fd;
1550 }
1551
1552 static void bpf_map_report(int fd, const char *name,
1553 const struct bpf_elf_map *map,
1554 struct bpf_elf_ctx *ctx, int inner_fd)
1555 {
1556 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1557 fd < 0 ? "rejected: " : "loaded",
1558 fd < 0 ? strerror(errno) : "",
1559 fd < 0 ? errno : fd);
1560
1561 fprintf(stderr, " - Type: %u\n", map->type);
1562 fprintf(stderr, " - Identifier: %u\n", map->id);
1563 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1564 fprintf(stderr, " - Size key: %u\n", map->size_key);
1565 fprintf(stderr, " - Size value: %u\n",
1566 inner_fd ? (int)sizeof(int) : map->size_value);
1567 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1568 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1569 }
1570
1571 static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1572 {
1573 int i;
1574
1575 for (i = 0; i < ctx->map_num; i++) {
1576 if (ctx->maps[i].id != id)
1577 continue;
1578 if (ctx->map_fds[i] < 0)
1579 return -EINVAL;
1580
1581 return ctx->map_fds[i];
1582 }
1583
1584 return -ENOENT;
1585 }
1586
1587 static void bpf_report_map_in_map(int outer_fd, uint32_t idx)
1588 {
1589 struct bpf_elf_map outer_map;
1590 int ret;
1591
1592 fprintf(stderr, "Cannot insert map into map! ");
1593
1594 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map, NULL);
1595 if (!ret) {
1596 if (idx >= outer_map.max_elem &&
1597 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1598 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1599 outer_map.max_elem, idx);
1600 return;
1601 }
1602 }
1603
1604 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1605 }
1606
1607 static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1608 {
1609 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1610 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1611 }
1612
1613 static int bpf_map_attach(const char *name, struct bpf_elf_ctx *ctx,
1614 const struct bpf_elf_map *map, struct bpf_map_ext *ext,
1615 int *have_map_in_map)
1616 {
1617 int fd, ifindex, ret, map_inner_fd = 0;
1618
1619 fd = bpf_probe_pinned(name, ctx, map->pinning);
1620 if (fd > 0) {
1621 ret = bpf_map_selfcheck_pinned(fd, map, ext,
1622 offsetof(struct bpf_elf_map,
1623 id), ctx->type);
1624 if (ret < 0) {
1625 close(fd);
1626 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1627 name);
1628 return ret;
1629 }
1630 if (ctx->verbose)
1631 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1632 name);
1633 return fd;
1634 }
1635
1636 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1637 (*have_map_in_map)++;
1638 if (map->inner_id)
1639 return 0;
1640 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1641 name);
1642 return -EINVAL;
1643 }
1644
1645 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1646 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1647 if (map_inner_fd < 0) {
1648 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1649 name, map->inner_id);
1650 return -EINVAL;
1651 }
1652 }
1653
1654 ifindex = bpf_map_offload_neutral(map->type) ? 0 : ctx->ifindex;
1655 errno = 0;
1656 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1657 map->max_elem, map->flags, map_inner_fd, ctx->btf_fd,
1658 ifindex, ext->btf_id_key, ext->btf_id_val);
1659
1660 if (fd < 0 || ctx->verbose) {
1661 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1662 if (fd < 0)
1663 return fd;
1664 }
1665
1666 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1667 if (ret < 0 && errno != EEXIST) {
1668 fprintf(stderr, "Could not pin %s map: %s\n", name,
1669 strerror(errno));
1670 close(fd);
1671 return ret;
1672 }
1673
1674 return fd;
1675 }
1676
1677 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1678 const GElf_Sym *sym)
1679 {
1680 return ctx->str_tab->d_buf + sym->st_name;
1681 }
1682
1683 static int bpf_btf_find(struct bpf_elf_ctx *ctx, const char *name)
1684 {
1685 const struct btf_type *type;
1686 const char *res;
1687 int id;
1688
1689 for (id = 1; id < ctx->btf.types_num; id++) {
1690 type = ctx->btf.types[id];
1691 if (type->name_off >= ctx->btf.hdr->str_len)
1692 continue;
1693 res = &ctx->btf.strings[type->name_off];
1694 if (!strcmp(res, name))
1695 return id;
1696 }
1697
1698 return -ENOENT;
1699 }
1700
1701 static int bpf_btf_find_kv(struct bpf_elf_ctx *ctx, const struct bpf_elf_map *map,
1702 const char *name, uint32_t *id_key, uint32_t *id_val)
1703 {
1704 const struct btf_member *key, *val;
1705 const struct btf_type *type;
1706 char btf_name[512];
1707 const char *res;
1708 int id;
1709
1710 snprintf(btf_name, sizeof(btf_name), "____btf_map_%s", name);
1711 id = bpf_btf_find(ctx, btf_name);
1712 if (id < 0)
1713 return id;
1714
1715 type = ctx->btf.types[id];
1716 if (BTF_INFO_KIND(type->info) != BTF_KIND_STRUCT)
1717 return -EINVAL;
1718 if (BTF_INFO_VLEN(type->info) != 2)
1719 return -EINVAL;
1720
1721 key = ((void *) type) + sizeof(*type);
1722 val = key + 1;
1723 if (!key->type || key->type >= ctx->btf.types_num ||
1724 !val->type || val->type >= ctx->btf.types_num)
1725 return -EINVAL;
1726
1727 if (key->name_off >= ctx->btf.hdr->str_len ||
1728 val->name_off >= ctx->btf.hdr->str_len)
1729 return -EINVAL;
1730
1731 res = &ctx->btf.strings[key->name_off];
1732 if (strcmp(res, "key"))
1733 return -EINVAL;
1734
1735 res = &ctx->btf.strings[val->name_off];
1736 if (strcmp(res, "value"))
1737 return -EINVAL;
1738
1739 *id_key = key->type;
1740 *id_val = val->type;
1741 return 0;
1742 }
1743
1744 static void bpf_btf_annotate(struct bpf_elf_ctx *ctx, int which, const char *name)
1745 {
1746 uint32_t id_key = 0, id_val = 0;
1747
1748 if (!bpf_btf_find_kv(ctx, &ctx->maps[which], name, &id_key, &id_val)) {
1749 ctx->maps_ext[which].btf_id_key = id_key;
1750 ctx->maps_ext[which].btf_id_val = id_val;
1751 }
1752 }
1753
1754 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1755 {
1756 const char *name;
1757 GElf_Sym sym;
1758 int i;
1759
1760 for (i = 0; i < ctx->sym_num; i++) {
1761 int type = GELF_ST_TYPE(sym.st_info);
1762
1763 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1764 continue;
1765
1766 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1767 (type != STT_NOTYPE && type != STT_OBJECT) ||
1768 sym.st_shndx != ctx->sec_maps ||
1769 sym.st_value / ctx->map_len != which)
1770 continue;
1771
1772 name = bpf_str_tab_name(ctx, &sym);
1773 bpf_btf_annotate(ctx, which, name);
1774 return name;
1775 }
1776
1777 return NULL;
1778 }
1779
1780 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1781 {
1782 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1783 const char *map_name;
1784
1785 for (i = 0; i < ctx->map_num; i++) {
1786 if (ctx->maps[i].pinning == PIN_OBJECT_NS &&
1787 ctx->noafalg) {
1788 fprintf(stderr, "Missing kernel AF_ALG support for PIN_OBJECT_NS!\n");
1789 return -ENOTSUP;
1790 }
1791
1792 map_name = bpf_map_fetch_name(ctx, i);
1793 if (!map_name)
1794 return -EIO;
1795
1796 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1797 &ctx->maps_ext[i], &have_map_in_map);
1798 if (fd < 0)
1799 return fd;
1800
1801 ctx->map_fds[i] = !fd ? -1 : fd;
1802 }
1803
1804 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1805 if (ctx->map_fds[i] >= 0)
1806 continue;
1807
1808 map_name = bpf_map_fetch_name(ctx, i);
1809 if (!map_name)
1810 return -EIO;
1811
1812 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1813 &ctx->maps_ext[i], NULL);
1814 if (fd < 0)
1815 return fd;
1816
1817 ctx->map_fds[i] = fd;
1818 }
1819
1820 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1821 if (!ctx->maps[i].id ||
1822 ctx->maps[i].inner_id ||
1823 ctx->maps[i].inner_idx == -1)
1824 continue;
1825
1826 inner_fd = ctx->map_fds[i];
1827 inner_idx = ctx->maps[i].inner_idx;
1828
1829 for (j = 0; j < ctx->map_num; j++) {
1830 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1831 continue;
1832 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1833 continue;
1834
1835 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1836 &inner_fd, BPF_ANY);
1837 if (ret < 0) {
1838 bpf_report_map_in_map(ctx->map_fds[j],
1839 inner_idx);
1840 return ret;
1841 }
1842 }
1843 }
1844
1845 return 0;
1846 }
1847
1848 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1849 {
1850 int i, num = 0;
1851 GElf_Sym sym;
1852
1853 for (i = 0; i < ctx->sym_num; i++) {
1854 int type = GELF_ST_TYPE(sym.st_info);
1855
1856 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1857 continue;
1858
1859 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1860 (type != STT_NOTYPE && type != STT_OBJECT) ||
1861 sym.st_shndx != ctx->sec_maps)
1862 continue;
1863 num++;
1864 }
1865
1866 return num;
1867 }
1868
1869 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1870 struct bpf_elf_sec_data *data)
1871 {
1872 Elf_Data *sec_edata;
1873 GElf_Shdr sec_hdr;
1874 Elf_Scn *sec_fd;
1875 char *sec_name;
1876
1877 memset(data, 0, sizeof(*data));
1878
1879 sec_fd = elf_getscn(ctx->elf_fd, section);
1880 if (!sec_fd)
1881 return -EINVAL;
1882 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1883 return -EIO;
1884
1885 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1886 sec_hdr.sh_name);
1887 if (!sec_name || !sec_hdr.sh_size)
1888 return -ENOENT;
1889
1890 sec_edata = elf_getdata(sec_fd, NULL);
1891 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1892 return -EIO;
1893
1894 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1895
1896 data->sec_name = sec_name;
1897 data->sec_data = sec_edata;
1898 return 0;
1899 }
1900
1901 struct bpf_elf_map_min {
1902 __u32 type;
1903 __u32 size_key;
1904 __u32 size_value;
1905 __u32 max_elem;
1906 };
1907
1908 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1909 struct bpf_elf_sec_data *data)
1910 {
1911 ctx->map_num = data->sec_data->d_size;
1912 ctx->sec_maps = section;
1913 ctx->sec_done[section] = true;
1914
1915 if (ctx->map_num > sizeof(ctx->maps)) {
1916 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1917 return -ENOMEM;
1918 }
1919
1920 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1921 return 0;
1922 }
1923
1924 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1925 {
1926 GElf_Sym sym;
1927 int off, i;
1928
1929 for (off = 0; off < end; off += ctx->map_len) {
1930 /* Order doesn't need to be linear here, hence we walk
1931 * the table again.
1932 */
1933 for (i = 0; i < ctx->sym_num; i++) {
1934 int type = GELF_ST_TYPE(sym.st_info);
1935
1936 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1937 continue;
1938 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1939 (type != STT_NOTYPE && type != STT_OBJECT) ||
1940 sym.st_shndx != ctx->sec_maps)
1941 continue;
1942 if (sym.st_value == off)
1943 break;
1944 if (i == ctx->sym_num - 1)
1945 return -1;
1946 }
1947 }
1948
1949 return off == end ? 0 : -1;
1950 }
1951
1952 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1953 {
1954 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1955 int i, sym_num = bpf_map_num_sym(ctx);
1956 __u8 *buff;
1957
1958 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1959 fprintf(stderr, "%u maps not supported in current map section!\n",
1960 sym_num);
1961 return -EINVAL;
1962 }
1963
1964 if (ctx->map_num % sym_num != 0 ||
1965 ctx->map_num % sizeof(__u32) != 0) {
1966 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1967 return -EINVAL;
1968 }
1969
1970 ctx->map_len = ctx->map_num / sym_num;
1971 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1972 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1973 return -EINVAL;
1974 }
1975
1976 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1977 ctx->map_num = sym_num;
1978 return 0;
1979 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1980 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1981 return -EINVAL;
1982 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1983 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1984 return -EINVAL;
1985 }
1986
1987 ctx->map_num = sym_num;
1988 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
1989 i++, buff += ctx->map_len) {
1990 /* The fixup leaves the rest of the members as zero, which
1991 * is fine currently, but option exist to set some other
1992 * default value as well when needed in future.
1993 */
1994 memcpy(&fixup[i], buff, ctx->map_len);
1995 }
1996
1997 memcpy(ctx->maps, fixup, sizeof(fixup));
1998 if (ctx->verbose)
1999 printf("%zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
2000 sizeof(struct bpf_elf_map) - ctx->map_len);
2001 return 0;
2002 }
2003
2004 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
2005 struct bpf_elf_sec_data *data)
2006 {
2007 if (data->sec_data->d_size > sizeof(ctx->license))
2008 return -ENOMEM;
2009
2010 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
2011 ctx->sec_done[section] = true;
2012 return 0;
2013 }
2014
2015 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
2016 struct bpf_elf_sec_data *data)
2017 {
2018 ctx->sym_tab = data->sec_data;
2019 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
2020 ctx->sec_done[section] = true;
2021 return 0;
2022 }
2023
2024 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
2025 struct bpf_elf_sec_data *data)
2026 {
2027 ctx->str_tab = data->sec_data;
2028 ctx->sec_done[section] = true;
2029 return 0;
2030 }
2031
2032 static int bpf_fetch_text(struct bpf_elf_ctx *ctx, int section,
2033 struct bpf_elf_sec_data *data)
2034 {
2035 ctx->sec_text = section;
2036 ctx->sec_done[section] = true;
2037 return 0;
2038 }
2039
2040 static void bpf_btf_report(int fd, struct bpf_elf_ctx *ctx)
2041 {
2042 fprintf(stderr, "\nBTF debug data section \'.BTF\' %s%s (%d)!\n",
2043 fd < 0 ? "rejected: " : "loaded",
2044 fd < 0 ? strerror(errno) : "",
2045 fd < 0 ? errno : fd);
2046
2047 fprintf(stderr, " - Length: %zu\n", ctx->btf_data->d_size);
2048
2049 bpf_dump_error(ctx, "Verifier analysis:\n\n");
2050 }
2051
2052 static int bpf_btf_attach(struct bpf_elf_ctx *ctx)
2053 {
2054 int tries = 0, fd;
2055 retry:
2056 errno = 0;
2057 fd = bpf_btf_load(ctx->btf_data->d_buf, ctx->btf_data->d_size,
2058 ctx->log, ctx->log_size);
2059 if (fd < 0 || ctx->verbose) {
2060 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
2061 if (tries++ < 10 && !bpf_log_realloc(ctx))
2062 goto retry;
2063
2064 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
2065 ctx->log_size, tries);
2066 return fd;
2067 }
2068
2069 if (bpf_log_has_data(ctx))
2070 bpf_btf_report(fd, ctx);
2071 }
2072
2073 return fd;
2074 }
2075
2076 static int bpf_fetch_btf_begin(struct bpf_elf_ctx *ctx, int section,
2077 struct bpf_elf_sec_data *data)
2078 {
2079 ctx->btf_data = data->sec_data;
2080 ctx->sec_btf = section;
2081 ctx->sec_done[section] = true;
2082 return 0;
2083 }
2084
2085 static int bpf_btf_check_header(struct bpf_elf_ctx *ctx)
2086 {
2087 const struct btf_header *hdr = ctx->btf_data->d_buf;
2088 const char *str_start, *str_end;
2089 unsigned int data_len;
2090
2091 if (hdr->magic != BTF_MAGIC) {
2092 fprintf(stderr, "Object has wrong BTF magic: %x, expected: %x!\n",
2093 hdr->magic, BTF_MAGIC);
2094 return -EINVAL;
2095 }
2096
2097 if (hdr->version != BTF_VERSION) {
2098 fprintf(stderr, "Object has wrong BTF version: %u, expected: %u!\n",
2099 hdr->version, BTF_VERSION);
2100 return -EINVAL;
2101 }
2102
2103 if (hdr->flags) {
2104 fprintf(stderr, "Object has unsupported BTF flags %x!\n",
2105 hdr->flags);
2106 return -EINVAL;
2107 }
2108
2109 data_len = ctx->btf_data->d_size - sizeof(*hdr);
2110 if (data_len < hdr->type_off ||
2111 data_len < hdr->str_off ||
2112 data_len < hdr->type_len + hdr->str_len ||
2113 hdr->type_off >= hdr->str_off ||
2114 hdr->type_off + hdr->type_len != hdr->str_off ||
2115 hdr->str_off + hdr->str_len != data_len ||
2116 (hdr->type_off & (sizeof(uint32_t) - 1))) {
2117 fprintf(stderr, "Object has malformed BTF data!\n");
2118 return -EINVAL;
2119 }
2120
2121 ctx->btf.hdr = hdr;
2122 ctx->btf.raw = hdr + 1;
2123
2124 str_start = ctx->btf.raw + hdr->str_off;
2125 str_end = str_start + hdr->str_len;
2126 if (!hdr->str_len ||
2127 hdr->str_len - 1 > BTF_MAX_NAME_OFFSET ||
2128 str_start[0] || str_end[-1]) {
2129 fprintf(stderr, "Object has malformed BTF string data!\n");
2130 return -EINVAL;
2131 }
2132
2133 ctx->btf.strings = str_start;
2134 return 0;
2135 }
2136
2137 static int bpf_btf_register_type(struct bpf_elf_ctx *ctx,
2138 const struct btf_type *type)
2139 {
2140 int cur = ctx->btf.types_num, num = cur + 1;
2141 const struct btf_type **types;
2142
2143 types = realloc(ctx->btf.types, num * sizeof(type));
2144 if (!types) {
2145 free(ctx->btf.types);
2146 ctx->btf.types = NULL;
2147 ctx->btf.types_num = 0;
2148 return -ENOMEM;
2149 }
2150
2151 ctx->btf.types = types;
2152 ctx->btf.types[cur] = type;
2153 ctx->btf.types_num = num;
2154 return 0;
2155 }
2156
2157 static struct btf_type btf_type_void;
2158
2159 static int bpf_btf_prep_type_data(struct bpf_elf_ctx *ctx)
2160 {
2161 const void *type_cur = ctx->btf.raw + ctx->btf.hdr->type_off;
2162 const void *type_end = ctx->btf.raw + ctx->btf.hdr->str_off;
2163 const struct btf_type *type;
2164 uint16_t var_len;
2165 int ret, kind;
2166
2167 ret = bpf_btf_register_type(ctx, &btf_type_void);
2168 if (ret < 0)
2169 return ret;
2170
2171 while (type_cur < type_end) {
2172 type = type_cur;
2173 type_cur += sizeof(*type);
2174
2175 var_len = BTF_INFO_VLEN(type->info);
2176 kind = BTF_INFO_KIND(type->info);
2177
2178 switch (kind) {
2179 case BTF_KIND_INT:
2180 type_cur += sizeof(int);
2181 break;
2182 case BTF_KIND_ARRAY:
2183 type_cur += sizeof(struct btf_array);
2184 break;
2185 case BTF_KIND_STRUCT:
2186 case BTF_KIND_UNION:
2187 type_cur += var_len * sizeof(struct btf_member);
2188 break;
2189 case BTF_KIND_ENUM:
2190 type_cur += var_len * sizeof(struct btf_enum);
2191 break;
2192 case BTF_KIND_TYPEDEF:
2193 case BTF_KIND_PTR:
2194 case BTF_KIND_FWD:
2195 case BTF_KIND_VOLATILE:
2196 case BTF_KIND_CONST:
2197 case BTF_KIND_RESTRICT:
2198 break;
2199 default:
2200 fprintf(stderr, "Object has unknown BTF type: %u!\n", kind);
2201 return -EINVAL;
2202 }
2203
2204 ret = bpf_btf_register_type(ctx, type);
2205 if (ret < 0)
2206 return ret;
2207 }
2208
2209 return 0;
2210 }
2211
2212 static int bpf_btf_prep_data(struct bpf_elf_ctx *ctx)
2213 {
2214 int ret = bpf_btf_check_header(ctx);
2215
2216 if (!ret)
2217 return bpf_btf_prep_type_data(ctx);
2218 return ret;
2219 }
2220
2221 static void bpf_fetch_btf_end(struct bpf_elf_ctx *ctx)
2222 {
2223 int fd = bpf_btf_attach(ctx);
2224
2225 if (fd < 0)
2226 return;
2227 ctx->btf_fd = fd;
2228 if (bpf_btf_prep_data(ctx) < 0) {
2229 close(ctx->btf_fd);
2230 ctx->btf_fd = 0;
2231 }
2232 }
2233
2234 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
2235 {
2236 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
2237 }
2238
2239 static bool bpf_has_btf_data(const struct bpf_elf_ctx *ctx)
2240 {
2241 return ctx->sec_btf;
2242 }
2243
2244 static bool bpf_has_call_data(const struct bpf_elf_ctx *ctx)
2245 {
2246 return ctx->sec_text;
2247 }
2248
2249 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx, bool check_text_sec)
2250 {
2251 struct bpf_elf_sec_data data;
2252 int i, ret = -1;
2253
2254 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2255 ret = bpf_fill_section_data(ctx, i, &data);
2256 if (ret < 0)
2257 continue;
2258
2259 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2260 !strcmp(data.sec_name, ELF_SECTION_MAPS))
2261 ret = bpf_fetch_maps_begin(ctx, i, &data);
2262 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2263 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
2264 ret = bpf_fetch_license(ctx, i, &data);
2265 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2266 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2267 !strcmp(data.sec_name, ".text") &&
2268 check_text_sec)
2269 ret = bpf_fetch_text(ctx, i, &data);
2270 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
2271 !strcmp(data.sec_name, ".symtab"))
2272 ret = bpf_fetch_symtab(ctx, i, &data);
2273 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
2274 !strcmp(data.sec_name, ".strtab"))
2275 ret = bpf_fetch_strtab(ctx, i, &data);
2276 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
2277 !strcmp(data.sec_name, ".BTF"))
2278 ret = bpf_fetch_btf_begin(ctx, i, &data);
2279 if (ret < 0) {
2280 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
2281 i);
2282 return ret;
2283 }
2284 }
2285
2286 if (bpf_has_btf_data(ctx))
2287 bpf_fetch_btf_end(ctx);
2288 if (bpf_has_map_data(ctx)) {
2289 ret = bpf_fetch_maps_end(ctx);
2290 if (ret < 0) {
2291 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
2292 return ret;
2293 }
2294
2295 ret = bpf_maps_attach_all(ctx);
2296 if (ret < 0) {
2297 fprintf(stderr, "Error loading maps into kernel!\n");
2298 return ret;
2299 }
2300 }
2301
2302 return ret;
2303 }
2304
2305 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
2306 bool *sseen)
2307 {
2308 struct bpf_elf_sec_data data;
2309 struct bpf_elf_prog prog;
2310 int ret, i, fd = -1;
2311
2312 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2313 if (ctx->sec_done[i])
2314 continue;
2315
2316 ret = bpf_fill_section_data(ctx, i, &data);
2317 if (ret < 0 ||
2318 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
2319 (data.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2320 !strcmp(data.sec_name, section)))
2321 continue;
2322
2323 *sseen = true;
2324
2325 memset(&prog, 0, sizeof(prog));
2326 prog.type = ctx->type;
2327 prog.license = ctx->license;
2328 prog.size = data.sec_data->d_size;
2329 prog.insns_num = prog.size / sizeof(struct bpf_insn);
2330 prog.insns = data.sec_data->d_buf;
2331
2332 fd = bpf_prog_attach(section, &prog, ctx);
2333 if (fd < 0)
2334 return fd;
2335
2336 ctx->sec_done[i] = true;
2337 break;
2338 }
2339
2340 return fd;
2341 }
2342
2343 struct bpf_relo_props {
2344 struct bpf_tail_call {
2345 unsigned int total;
2346 unsigned int jited;
2347 } tc;
2348 int main_num;
2349 };
2350
2351 static int bpf_apply_relo_map(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2352 GElf_Rel *relo, GElf_Sym *sym,
2353 struct bpf_relo_props *props)
2354 {
2355 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2356 unsigned int map_idx = sym->st_value / ctx->map_len;
2357
2358 if (insn_off >= prog->insns_num)
2359 return -EINVAL;
2360 if (prog->insns[insn_off].code != (BPF_LD | BPF_IMM | BPF_DW)) {
2361 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
2362 insn_off);
2363 return -EINVAL;
2364 }
2365
2366 if (map_idx >= ARRAY_SIZE(ctx->map_fds))
2367 return -EINVAL;
2368 if (!ctx->map_fds[map_idx])
2369 return -EINVAL;
2370 if (ctx->maps[map_idx].type == BPF_MAP_TYPE_PROG_ARRAY) {
2371 props->tc.total++;
2372 if (ctx->maps_ext[map_idx].owner.jited ||
2373 (ctx->maps_ext[map_idx].owner.type == 0 &&
2374 ctx->cfg.jit_enabled))
2375 props->tc.jited++;
2376 }
2377
2378 prog->insns[insn_off].src_reg = BPF_PSEUDO_MAP_FD;
2379 prog->insns[insn_off].imm = ctx->map_fds[map_idx];
2380 return 0;
2381 }
2382
2383 static int bpf_apply_relo_call(struct bpf_elf_ctx *ctx, struct bpf_elf_prog *prog,
2384 GElf_Rel *relo, GElf_Sym *sym,
2385 struct bpf_relo_props *props)
2386 {
2387 unsigned int insn_off = relo->r_offset / sizeof(struct bpf_insn);
2388 struct bpf_elf_prog *prog_text = &ctx->prog_text;
2389
2390 if (insn_off >= prog->insns_num)
2391 return -EINVAL;
2392 if (prog->insns[insn_off].code != (BPF_JMP | BPF_CALL) &&
2393 prog->insns[insn_off].src_reg != BPF_PSEUDO_CALL) {
2394 fprintf(stderr, "ELF contains relo data for non call instruction at offset %u! Compiler bug?!\n",
2395 insn_off);
2396 return -EINVAL;
2397 }
2398
2399 if (!props->main_num) {
2400 struct bpf_insn *insns = realloc(prog->insns,
2401 prog->size + prog_text->size);
2402 if (!insns)
2403 return -ENOMEM;
2404
2405 memcpy(insns + prog->insns_num, prog_text->insns,
2406 prog_text->size);
2407 props->main_num = prog->insns_num;
2408 prog->insns = insns;
2409 prog->insns_num += prog_text->insns_num;
2410 prog->size += prog_text->size;
2411 }
2412
2413 prog->insns[insn_off].imm += props->main_num - insn_off;
2414 return 0;
2415 }
2416
2417 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
2418 struct bpf_elf_sec_data *data_relo,
2419 struct bpf_elf_prog *prog,
2420 struct bpf_relo_props *props)
2421 {
2422 GElf_Shdr *rhdr = &data_relo->sec_hdr;
2423 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
2424
2425 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
2426 GElf_Rel relo;
2427 GElf_Sym sym;
2428 int ret = -EIO;
2429
2430 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
2431 return -EIO;
2432 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
2433 return -EIO;
2434
2435 if (sym.st_shndx == ctx->sec_maps)
2436 ret = bpf_apply_relo_map(ctx, prog, &relo, &sym, props);
2437 else if (sym.st_shndx == ctx->sec_text)
2438 ret = bpf_apply_relo_call(ctx, prog, &relo, &sym, props);
2439 else
2440 fprintf(stderr, "ELF contains non-{map,call} related relo data in entry %u pointing to section %u! Compiler bug?!\n",
2441 relo_ent, sym.st_shndx);
2442 if (ret < 0)
2443 return ret;
2444 }
2445
2446 return 0;
2447 }
2448
2449 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
2450 bool *lderr, bool *sseen, struct bpf_elf_prog *prog)
2451 {
2452 struct bpf_elf_sec_data data_relo, data_insn;
2453 int ret, idx, i, fd = -1;
2454
2455 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2456 struct bpf_relo_props props = {};
2457
2458 ret = bpf_fill_section_data(ctx, i, &data_relo);
2459 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
2460 continue;
2461
2462 idx = data_relo.sec_hdr.sh_info;
2463
2464 ret = bpf_fill_section_data(ctx, idx, &data_insn);
2465 if (ret < 0 ||
2466 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
2467 (data_insn.sec_hdr.sh_flags & SHF_EXECINSTR) &&
2468 !strcmp(data_insn.sec_name, section)))
2469 continue;
2470 if (sseen)
2471 *sseen = true;
2472
2473 memset(prog, 0, sizeof(*prog));
2474 prog->type = ctx->type;
2475 prog->license = ctx->license;
2476 prog->size = data_insn.sec_data->d_size;
2477 prog->insns_num = prog->size / sizeof(struct bpf_insn);
2478 prog->insns = malloc(prog->size);
2479 if (!prog->insns) {
2480 *lderr = true;
2481 return -ENOMEM;
2482 }
2483
2484 memcpy(prog->insns, data_insn.sec_data->d_buf, prog->size);
2485
2486 ret = bpf_apply_relo_data(ctx, &data_relo, prog, &props);
2487 if (ret < 0) {
2488 *lderr = true;
2489 if (ctx->sec_text != idx)
2490 free(prog->insns);
2491 return ret;
2492 }
2493 if (ctx->sec_text == idx) {
2494 fd = 0;
2495 goto out;
2496 }
2497
2498 fd = bpf_prog_attach(section, prog, ctx);
2499 free(prog->insns);
2500 if (fd < 0) {
2501 *lderr = true;
2502 if (props.tc.total) {
2503 if (ctx->cfg.jit_enabled &&
2504 props.tc.total != props.tc.jited)
2505 fprintf(stderr, "JIT enabled, but only %u/%u tail call maps in the program have JITed owner!\n",
2506 props.tc.jited, props.tc.total);
2507 if (!ctx->cfg.jit_enabled &&
2508 props.tc.jited)
2509 fprintf(stderr, "JIT disabled, but %u/%u tail call maps in the program have JITed owner!\n",
2510 props.tc.jited, props.tc.total);
2511 }
2512 return fd;
2513 }
2514 out:
2515 ctx->sec_done[i] = true;
2516 ctx->sec_done[idx] = true;
2517 break;
2518 }
2519
2520 return fd;
2521 }
2522
2523 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2524 {
2525 bool lderr = false, sseen = false;
2526 struct bpf_elf_prog prog;
2527 int ret = -1;
2528
2529 if (bpf_has_call_data(ctx)) {
2530 ret = bpf_fetch_prog_relo(ctx, ".text", &lderr, NULL,
2531 &ctx->prog_text);
2532 if (ret < 0)
2533 return ret;
2534 }
2535
2536 if (bpf_has_map_data(ctx) || bpf_has_call_data(ctx))
2537 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen, &prog);
2538 if (ret < 0 && !lderr)
2539 ret = bpf_fetch_prog(ctx, section, &sseen);
2540 if (ret < 0 && !sseen)
2541 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2542 section);
2543 return ret;
2544 }
2545
2546 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2547 {
2548 int i;
2549
2550 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2551 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2552 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2553 return i;
2554 return -1;
2555 }
2556
2557 struct bpf_jited_aux {
2558 int prog_fd;
2559 int map_fd;
2560 struct bpf_prog_data prog;
2561 struct bpf_map_ext map;
2562 };
2563
2564 static int bpf_derive_prog_from_fdinfo(int fd, struct bpf_prog_data *prog)
2565 {
2566 char file[PATH_MAX], buff[4096];
2567 unsigned int val;
2568 FILE *fp;
2569
2570 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
2571 memset(prog, 0, sizeof(*prog));
2572
2573 fp = fopen(file, "r");
2574 if (!fp) {
2575 fprintf(stderr, "No procfs support?!\n");
2576 return -EIO;
2577 }
2578
2579 while (fgets(buff, sizeof(buff), fp)) {
2580 if (sscanf(buff, "prog_type:\t%u", &val) == 1)
2581 prog->type = val;
2582 else if (sscanf(buff, "prog_jited:\t%u", &val) == 1)
2583 prog->jited = val;
2584 }
2585
2586 fclose(fp);
2587 return 0;
2588 }
2589
2590 static int bpf_tail_call_get_aux(struct bpf_jited_aux *aux)
2591 {
2592 struct bpf_elf_map tmp;
2593 int ret;
2594
2595 ret = bpf_derive_elf_map_from_fdinfo(aux->map_fd, &tmp, &aux->map);
2596 if (!ret)
2597 ret = bpf_derive_prog_from_fdinfo(aux->prog_fd, &aux->prog);
2598
2599 return ret;
2600 }
2601
2602 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2603 {
2604 struct bpf_elf_sec_data data;
2605 uint32_t map_id, key_id;
2606 int fd, i, ret, idx;
2607
2608 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2609 if (ctx->sec_done[i])
2610 continue;
2611
2612 ret = bpf_fill_section_data(ctx, i, &data);
2613 if (ret < 0)
2614 continue;
2615
2616 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2617 if (ret != 2)
2618 continue;
2619
2620 idx = bpf_find_map_by_id(ctx, map_id);
2621 if (idx < 0)
2622 continue;
2623
2624 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2625 if (fd < 0)
2626 return -EIO;
2627
2628 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2629 &fd, BPF_ANY);
2630 if (ret < 0) {
2631 struct bpf_jited_aux aux = {};
2632
2633 ret = -errno;
2634 if (errno == E2BIG) {
2635 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2636 key_id, map_id);
2637 return ret;
2638 }
2639
2640 aux.map_fd = ctx->map_fds[idx];
2641 aux.prog_fd = fd;
2642
2643 if (bpf_tail_call_get_aux(&aux))
2644 return ret;
2645 if (!aux.map.owner.type)
2646 return ret;
2647
2648 if (aux.prog.type != aux.map.owner.type)
2649 fprintf(stderr, "Tail call map owned by prog type %u, but prog type is %u!\n",
2650 aux.map.owner.type, aux.prog.type);
2651 if (aux.prog.jited != aux.map.owner.jited)
2652 fprintf(stderr, "Tail call map %s jited, but prog %s!\n",
2653 aux.map.owner.jited ? "is" : "not",
2654 aux.prog.jited ? "is" : "not");
2655 return ret;
2656 }
2657
2658 ctx->sec_done[i] = true;
2659 }
2660
2661 return 0;
2662 }
2663
2664 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2665 {
2666 struct stat st;
2667 int ret;
2668
2669 memset(&ctx->stat, 0, sizeof(ctx->stat));
2670
2671 ret = fstat(ctx->obj_fd, &st);
2672 if (ret < 0) {
2673 fprintf(stderr, "Stat of elf file failed: %s\n",
2674 strerror(errno));
2675 return;
2676 }
2677
2678 ctx->stat.st_dev = st.st_dev;
2679 ctx->stat.st_ino = st.st_ino;
2680 }
2681
2682 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2683 {
2684 char buff[PATH_MAX];
2685
2686 while (fgets(buff, sizeof(buff), fp)) {
2687 char *ptr = buff;
2688
2689 while (*ptr == ' ' || *ptr == '\t')
2690 ptr++;
2691
2692 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2693 continue;
2694
2695 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2696 sscanf(ptr, "%i %s #", id, path) != 2) {
2697 strcpy(path, ptr);
2698 return -1;
2699 }
2700
2701 return 1;
2702 }
2703
2704 return 0;
2705 }
2706
2707 static bool bpf_pinning_reserved(uint32_t pinning)
2708 {
2709 switch (pinning) {
2710 case PIN_NONE:
2711 case PIN_OBJECT_NS:
2712 case PIN_GLOBAL_NS:
2713 return true;
2714 default:
2715 return false;
2716 }
2717 }
2718
2719 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2720 {
2721 struct bpf_hash_entry *entry;
2722 char subpath[PATH_MAX] = {};
2723 uint32_t pinning;
2724 FILE *fp;
2725 int ret;
2726
2727 fp = fopen(db_file, "r");
2728 if (!fp)
2729 return;
2730
2731 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2732 if (ret == -1) {
2733 fprintf(stderr, "Database %s is corrupted at: %s\n",
2734 db_file, subpath);
2735 fclose(fp);
2736 return;
2737 }
2738
2739 if (bpf_pinning_reserved(pinning)) {
2740 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2741 db_file, pinning);
2742 continue;
2743 }
2744
2745 entry = malloc(sizeof(*entry));
2746 if (!entry) {
2747 fprintf(stderr, "No memory left for db entry!\n");
2748 continue;
2749 }
2750
2751 entry->pinning = pinning;
2752 entry->subpath = strdup(subpath);
2753 if (!entry->subpath) {
2754 fprintf(stderr, "No memory left for db entry!\n");
2755 free(entry);
2756 continue;
2757 }
2758
2759 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2760 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2761 }
2762
2763 fclose(fp);
2764 }
2765
2766 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2767 {
2768 struct bpf_hash_entry *entry;
2769 int i;
2770
2771 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2772 while ((entry = ctx->ht[i]) != NULL) {
2773 ctx->ht[i] = entry->next;
2774 free((char *)entry->subpath);
2775 free(entry);
2776 }
2777 }
2778 }
2779
2780 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2781 {
2782 if (ctx->elf_hdr.e_type != ET_REL ||
2783 (ctx->elf_hdr.e_machine != EM_NONE &&
2784 ctx->elf_hdr.e_machine != EM_BPF) ||
2785 ctx->elf_hdr.e_version != EV_CURRENT) {
2786 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2787 return -EINVAL;
2788 }
2789
2790 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2791 default:
2792 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2793 return -EINVAL;
2794 case ELFDATA2LSB:
2795 if (htons(1) == 1) {
2796 fprintf(stderr,
2797 "We are big endian, eBPF object is little endian!\n");
2798 return -EIO;
2799 }
2800 break;
2801 case ELFDATA2MSB:
2802 if (htons(1) != 1) {
2803 fprintf(stderr,
2804 "We are little endian, eBPF object is big endian!\n");
2805 return -EIO;
2806 }
2807 break;
2808 }
2809
2810 return 0;
2811 }
2812
2813 static void bpf_get_cfg(struct bpf_elf_ctx *ctx)
2814 {
2815 static const char *path_jit = "/proc/sys/net/core/bpf_jit_enable";
2816 int fd;
2817
2818 fd = open(path_jit, O_RDONLY);
2819 if (fd > 0) {
2820 char tmp[16] = {};
2821
2822 if (read(fd, tmp, sizeof(tmp)) > 0)
2823 ctx->cfg.jit_enabled = atoi(tmp);
2824 close(fd);
2825 }
2826 }
2827
2828 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2829 enum bpf_prog_type type, __u32 ifindex,
2830 bool verbose)
2831 {
2832 uint8_t tmp[20];
2833 int ret;
2834
2835 if (elf_version(EV_CURRENT) == EV_NONE)
2836 return -EINVAL;
2837
2838 bpf_init_env();
2839
2840 memset(ctx, 0, sizeof(*ctx));
2841 bpf_get_cfg(ctx);
2842
2843 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
2844 if (ret)
2845 ctx->noafalg = true;
2846 else
2847 hexstring_n2a(tmp, sizeof(tmp), ctx->obj_uid,
2848 sizeof(ctx->obj_uid));
2849
2850 ctx->verbose = verbose;
2851 ctx->type = type;
2852 ctx->ifindex = ifindex;
2853
2854 ctx->obj_fd = open(pathname, O_RDONLY);
2855 if (ctx->obj_fd < 0)
2856 return ctx->obj_fd;
2857
2858 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2859 if (!ctx->elf_fd) {
2860 ret = -EINVAL;
2861 goto out_fd;
2862 }
2863
2864 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2865 ret = -EINVAL;
2866 goto out_fd;
2867 }
2868
2869 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2870 &ctx->elf_hdr) {
2871 ret = -EIO;
2872 goto out_elf;
2873 }
2874
2875 ret = bpf_elf_check_ehdr(ctx);
2876 if (ret < 0)
2877 goto out_elf;
2878
2879 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2880 sizeof(*(ctx->sec_done)));
2881 if (!ctx->sec_done) {
2882 ret = -ENOMEM;
2883 goto out_elf;
2884 }
2885
2886 if (ctx->verbose && bpf_log_realloc(ctx)) {
2887 ret = -ENOMEM;
2888 goto out_free;
2889 }
2890
2891 bpf_save_finfo(ctx);
2892 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2893
2894 return 0;
2895 out_free:
2896 free(ctx->sec_done);
2897 out_elf:
2898 elf_end(ctx->elf_fd);
2899 out_fd:
2900 close(ctx->obj_fd);
2901 return ret;
2902 }
2903
2904 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2905 {
2906 int i, count = 0;
2907
2908 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2909 if (!ctx->map_fds[i])
2910 break;
2911 count++;
2912 }
2913
2914 return count;
2915 }
2916
2917 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2918 {
2919 int i;
2920
2921 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2922 if (ctx->map_fds[i])
2923 close(ctx->map_fds[i]);
2924 }
2925
2926 if (ctx->btf_fd)
2927 close(ctx->btf_fd);
2928 free(ctx->btf.types);
2929 }
2930
2931 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2932 {
2933 if (failure)
2934 bpf_maps_teardown(ctx);
2935
2936 bpf_hash_destroy(ctx);
2937
2938 free(ctx->prog_text.insns);
2939 free(ctx->sec_done);
2940 free(ctx->log);
2941
2942 elf_end(ctx->elf_fd);
2943 close(ctx->obj_fd);
2944 }
2945
2946 static struct bpf_elf_ctx __ctx;
2947
2948 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2949 const char *section, __u32 ifindex, bool verbose)
2950 {
2951 struct bpf_elf_ctx *ctx = &__ctx;
2952 int fd = 0, ret;
2953
2954 ret = bpf_elf_ctx_init(ctx, pathname, type, ifindex, verbose);
2955 if (ret < 0) {
2956 fprintf(stderr, "Cannot initialize ELF context!\n");
2957 return ret;
2958 }
2959
2960 ret = bpf_fetch_ancillary(ctx, strcmp(section, ".text"));
2961 if (ret < 0) {
2962 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2963 goto out;
2964 }
2965
2966 fd = bpf_fetch_prog_sec(ctx, section);
2967 if (fd < 0) {
2968 fprintf(stderr, "Error fetching program/map!\n");
2969 ret = fd;
2970 goto out;
2971 }
2972
2973 ret = bpf_fill_prog_arrays(ctx);
2974 if (ret < 0)
2975 fprintf(stderr, "Error filling program arrays!\n");
2976 out:
2977 bpf_elf_ctx_destroy(ctx, ret < 0);
2978 if (ret < 0) {
2979 if (fd)
2980 close(fd);
2981 return ret;
2982 }
2983
2984 return fd;
2985 }
2986
2987 static int
2988 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
2989 const struct bpf_map_data *aux, unsigned int entries)
2990 {
2991 struct bpf_map_set_msg msg = {
2992 .aux.uds_ver = BPF_SCM_AUX_VER,
2993 .aux.num_ent = entries,
2994 };
2995 int *cmsg_buf, min_fd;
2996 char *amsg_buf;
2997 int i;
2998
2999 strlcpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
3000 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
3001
3002 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
3003 amsg_buf = (char *)msg.aux.ent;
3004
3005 for (i = 0; i < entries; i += min_fd) {
3006 int ret;
3007
3008 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
3009 bpf_map_set_init_single(&msg, min_fd);
3010
3011 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
3012 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
3013
3014 ret = sendmsg(fd, &msg.hdr, 0);
3015 if (ret <= 0)
3016 return ret ? : -1;
3017 }
3018
3019 return 0;
3020 }
3021
3022 static int
3023 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
3024 unsigned int entries)
3025 {
3026 struct bpf_map_set_msg msg;
3027 int *cmsg_buf, min_fd;
3028 char *amsg_buf, *mmsg_buf;
3029 unsigned int needed = 1;
3030 int i;
3031
3032 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
3033 amsg_buf = (char *)msg.aux.ent;
3034 mmsg_buf = (char *)&msg.aux;
3035
3036 for (i = 0; i < min(entries, needed); i += min_fd) {
3037 struct cmsghdr *cmsg;
3038 int ret;
3039
3040 min_fd = min(entries, entries - i);
3041 bpf_map_set_init_single(&msg, min_fd);
3042
3043 ret = recvmsg(fd, &msg.hdr, 0);
3044 if (ret <= 0)
3045 return ret ? : -1;
3046
3047 cmsg = CMSG_FIRSTHDR(&msg.hdr);
3048 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
3049 return -EINVAL;
3050 if (msg.hdr.msg_flags & MSG_CTRUNC)
3051 return -EIO;
3052 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
3053 return -ENOSYS;
3054
3055 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
3056 if (min_fd > entries || min_fd <= 0)
3057 return -EINVAL;
3058
3059 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
3060 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
3061 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
3062
3063 needed = aux->num_ent;
3064 }
3065
3066 return 0;
3067 }
3068
3069 int bpf_send_map_fds(const char *path, const char *obj)
3070 {
3071 struct bpf_elf_ctx *ctx = &__ctx;
3072 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3073 struct bpf_map_data bpf_aux = {
3074 .fds = ctx->map_fds,
3075 .ent = ctx->maps,
3076 .st = &ctx->stat,
3077 .obj = obj,
3078 };
3079 int fd, ret;
3080
3081 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3082 if (fd < 0) {
3083 fprintf(stderr, "Cannot open socket: %s\n",
3084 strerror(errno));
3085 return -1;
3086 }
3087
3088 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3089
3090 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
3091 if (ret < 0) {
3092 fprintf(stderr, "Cannot connect to %s: %s\n",
3093 path, strerror(errno));
3094 return -1;
3095 }
3096
3097 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
3098 bpf_maps_count(ctx));
3099 if (ret < 0)
3100 fprintf(stderr, "Cannot send fds to %s: %s\n",
3101 path, strerror(errno));
3102
3103 bpf_maps_teardown(ctx);
3104 close(fd);
3105 return ret;
3106 }
3107
3108 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
3109 unsigned int entries)
3110 {
3111 struct sockaddr_un addr = { .sun_family = AF_UNIX };
3112 int fd, ret;
3113
3114 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
3115 if (fd < 0) {
3116 fprintf(stderr, "Cannot open socket: %s\n",
3117 strerror(errno));
3118 return -1;
3119 }
3120
3121 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
3122
3123 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
3124 if (ret < 0) {
3125 fprintf(stderr, "Cannot bind to socket: %s\n",
3126 strerror(errno));
3127 return -1;
3128 }
3129
3130 ret = bpf_map_set_recv(fd, fds, aux, entries);
3131 if (ret < 0)
3132 fprintf(stderr, "Cannot recv fds from %s: %s\n",
3133 path, strerror(errno));
3134
3135 unlink(addr.sun_path);
3136 close(fd);
3137 return ret;
3138 }
3139 #endif /* HAVE_ELF */