]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
ip: add support for seg6local End.BPF action
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43 #include "json_print.h"
44
45 #include "bpf_util.h"
46 #include "bpf_elf.h"
47 #include "bpf_scm.h"
48
49 struct bpf_prog_meta {
50 const char *type;
51 const char *subdir;
52 const char *section;
53 bool may_uds_export;
54 };
55
56 static const enum bpf_prog_type __bpf_types[] = {
57 BPF_PROG_TYPE_SCHED_CLS,
58 BPF_PROG_TYPE_SCHED_ACT,
59 BPF_PROG_TYPE_XDP,
60 BPF_PROG_TYPE_LWT_IN,
61 BPF_PROG_TYPE_LWT_OUT,
62 BPF_PROG_TYPE_LWT_XMIT,
63 };
64
65 static const struct bpf_prog_meta __bpf_prog_meta[] = {
66 [BPF_PROG_TYPE_SCHED_CLS] = {
67 .type = "cls",
68 .subdir = "tc",
69 .section = ELF_SECTION_CLASSIFIER,
70 .may_uds_export = true,
71 },
72 [BPF_PROG_TYPE_SCHED_ACT] = {
73 .type = "act",
74 .subdir = "tc",
75 .section = ELF_SECTION_ACTION,
76 .may_uds_export = true,
77 },
78 [BPF_PROG_TYPE_XDP] = {
79 .type = "xdp",
80 .subdir = "xdp",
81 .section = ELF_SECTION_PROG,
82 },
83 [BPF_PROG_TYPE_LWT_IN] = {
84 .type = "lwt_in",
85 .subdir = "ip",
86 .section = ELF_SECTION_PROG,
87 },
88 [BPF_PROG_TYPE_LWT_OUT] = {
89 .type = "lwt_out",
90 .subdir = "ip",
91 .section = ELF_SECTION_PROG,
92 },
93 [BPF_PROG_TYPE_LWT_XMIT] = {
94 .type = "lwt_xmit",
95 .subdir = "ip",
96 .section = ELF_SECTION_PROG,
97 },
98 [BPF_PROG_TYPE_LWT_SEG6LOCAL] = {
99 .type = "lwt_seg6local",
100 .subdir = "ip",
101 .section = ELF_SECTION_PROG,
102 },
103 };
104
105 static bool bpf_map_offload_neutral(enum bpf_map_type type)
106 {
107 return type == BPF_MAP_TYPE_PERF_EVENT_ARRAY;
108 }
109
110 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
111 {
112 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
113 __bpf_prog_meta[type].subdir);
114 return __bpf_prog_meta[type].subdir;
115 }
116
117 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
118 {
119 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
120 __bpf_prog_meta[type].section);
121 return __bpf_prog_meta[type].section;
122 }
123
124 #ifdef HAVE_ELF
125 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
126 const char *sec, __u32 ifindex, bool verbose);
127 #else
128 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
129 const char *sec, __u32 ifindex, bool verbose)
130 {
131 fprintf(stderr, "No ELF library support compiled in.\n");
132 errno = ENOSYS;
133 return -1;
134 }
135 #endif
136
137 static inline __u64 bpf_ptr_to_u64(const void *ptr)
138 {
139 return (__u64)(unsigned long)ptr;
140 }
141
142 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
143 {
144 #ifdef __NR_bpf
145 return syscall(__NR_bpf, cmd, attr, size);
146 #else
147 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
148 errno = ENOSYS;
149 return -1;
150 #endif
151 }
152
153 static int bpf_map_update(int fd, const void *key, const void *value,
154 uint64_t flags)
155 {
156 union bpf_attr attr = {};
157
158 attr.map_fd = fd;
159 attr.key = bpf_ptr_to_u64(key);
160 attr.value = bpf_ptr_to_u64(value);
161 attr.flags = flags;
162
163 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
164 }
165
166 static int bpf_prog_fd_by_id(uint32_t id)
167 {
168 union bpf_attr attr = {};
169
170 attr.prog_id = id;
171
172 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
173 }
174
175 static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
176 uint32_t *info_len)
177 {
178 union bpf_attr attr = {};
179 int ret;
180
181 attr.info.bpf_fd = fd;
182 attr.info.info = bpf_ptr_to_u64(info);
183 attr.info.info_len = *info_len;
184
185 *info_len = 0;
186 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
187 if (!ret)
188 *info_len = attr.info.info_len;
189
190 return ret;
191 }
192
193 int bpf_dump_prog_info(FILE *f, uint32_t id)
194 {
195 struct bpf_prog_info info = {};
196 uint32_t len = sizeof(info);
197 int fd, ret, dump_ok = 0;
198 SPRINT_BUF(tmp);
199
200 open_json_object("prog");
201 print_uint(PRINT_ANY, "id", "id %u ", id);
202
203 fd = bpf_prog_fd_by_id(id);
204 if (fd < 0)
205 goto out;
206
207 ret = bpf_prog_info_by_fd(fd, &info, &len);
208 if (!ret && len) {
209 int jited = !!info.jited_prog_len;
210
211 print_string(PRINT_ANY, "tag", "tag %s ",
212 hexstring_n2a(info.tag, sizeof(info.tag),
213 tmp, sizeof(tmp)));
214 print_uint(PRINT_JSON, "jited", NULL, jited);
215 if (jited && !is_json_context())
216 fprintf(f, "jited ");
217 dump_ok = 1;
218 }
219
220 close(fd);
221 out:
222 close_json_object();
223 return dump_ok;
224 }
225
226 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
227 char **bpf_string, bool *need_release,
228 const char separator)
229 {
230 char sp;
231
232 if (from_file) {
233 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
234 char *tmp_string, *pos, c_prev = ' ';
235 FILE *fp;
236 int c;
237
238 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
239 tmp_string = pos = calloc(1, tmp_len);
240 if (tmp_string == NULL)
241 return -ENOMEM;
242
243 fp = fopen(arg, "r");
244 if (fp == NULL) {
245 perror("Cannot fopen");
246 free(tmp_string);
247 return -ENOENT;
248 }
249
250 while ((c = fgetc(fp)) != EOF) {
251 switch (c) {
252 case '\n':
253 if (c_prev != ',')
254 *(pos++) = ',';
255 c_prev = ',';
256 break;
257 case ' ':
258 case '\t':
259 if (c_prev != ' ')
260 *(pos++) = c;
261 c_prev = ' ';
262 break;
263 default:
264 *(pos++) = c;
265 c_prev = c;
266 }
267 if (pos - tmp_string == tmp_len)
268 break;
269 }
270
271 if (!feof(fp)) {
272 free(tmp_string);
273 fclose(fp);
274 return -E2BIG;
275 }
276
277 fclose(fp);
278 *pos = 0;
279
280 *need_release = true;
281 *bpf_string = tmp_string;
282 } else {
283 *need_release = false;
284 *bpf_string = arg;
285 }
286
287 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
288 sp != separator) {
289 if (*need_release)
290 free(*bpf_string);
291 return -EINVAL;
292 }
293
294 return 0;
295 }
296
297 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
298 bool from_file)
299 {
300 char *bpf_string, *token, separator = ',';
301 int ret = 0, i = 0;
302 bool need_release;
303 __u16 bpf_len = 0;
304
305 if (argc < 1)
306 return -EINVAL;
307 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
308 &need_release, separator))
309 return -EINVAL;
310 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
311 ret = -EINVAL;
312 goto out;
313 }
314
315 token = bpf_string;
316 while ((token = strchr(token, separator)) && (++token)[0]) {
317 if (i >= bpf_len) {
318 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
319 ret = -EINVAL;
320 goto out;
321 }
322
323 if (sscanf(token, "%hu %hhu %hhu %u,",
324 &bpf_ops[i].code, &bpf_ops[i].jt,
325 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
326 fprintf(stderr, "Error at instruction %d!\n", i);
327 ret = -EINVAL;
328 goto out;
329 }
330
331 i++;
332 }
333
334 if (i != bpf_len) {
335 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
336 ret = -EINVAL;
337 goto out;
338 }
339 ret = bpf_len;
340 out:
341 if (need_release)
342 free(bpf_string);
343
344 return ret;
345 }
346
347 void bpf_print_ops(FILE *f, struct rtattr *bpf_ops, __u16 len)
348 {
349 struct sock_filter *ops = RTA_DATA(bpf_ops);
350 int i;
351
352 if (len == 0)
353 return;
354
355 fprintf(f, "bytecode \'%u,", len);
356
357 for (i = 0; i < len - 1; i++)
358 fprintf(f, "%hu %hhu %hhu %u,", ops[i].code, ops[i].jt,
359 ops[i].jf, ops[i].k);
360
361 fprintf(f, "%hu %hhu %hhu %u\'", ops[i].code, ops[i].jt,
362 ops[i].jf, ops[i].k);
363 }
364
365 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
366 const struct bpf_elf_map *obj)
367 {
368 fprintf(stderr, "Map specification differs from pinned file!\n");
369
370 if (obj->type != pin->type)
371 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
372 obj->type, pin->type);
373 if (obj->size_key != pin->size_key)
374 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
375 obj->size_key, pin->size_key);
376 if (obj->size_value != pin->size_value)
377 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
378 obj->size_value, pin->size_value);
379 if (obj->max_elem != pin->max_elem)
380 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
381 obj->max_elem, pin->max_elem);
382 if (obj->flags != pin->flags)
383 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
384 obj->flags, pin->flags);
385
386 fprintf(stderr, "\n");
387 }
388
389 struct bpf_prog_data {
390 unsigned int type;
391 unsigned int jited;
392 };
393
394 struct bpf_map_ext {
395 struct bpf_prog_data owner;
396 };
397
398 static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map,
399 struct bpf_map_ext *ext)
400 {
401 unsigned int val, owner_type = 0, owner_jited = 0;
402 char file[PATH_MAX], buff[4096];
403 FILE *fp;
404
405 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
406 memset(map, 0, sizeof(*map));
407
408 fp = fopen(file, "r");
409 if (!fp) {
410 fprintf(stderr, "No procfs support?!\n");
411 return -EIO;
412 }
413
414 while (fgets(buff, sizeof(buff), fp)) {
415 if (sscanf(buff, "map_type:\t%u", &val) == 1)
416 map->type = val;
417 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
418 map->size_key = val;
419 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
420 map->size_value = val;
421 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
422 map->max_elem = val;
423 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
424 map->flags = val;
425 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
426 owner_type = val;
427 else if (sscanf(buff, "owner_jited:\t%i", &val) == 1)
428 owner_jited = val;
429 }
430
431 fclose(fp);
432 if (ext) {
433 memset(ext, 0, sizeof(*ext));
434 ext->owner.type = owner_type;
435 ext->owner.jited = owner_jited;
436 }
437
438 return 0;
439 }
440
441 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
442 struct bpf_map_ext *ext, int length,
443 enum bpf_prog_type type)
444 {
445 struct bpf_elf_map tmp, zero = {};
446 int ret;
447
448 ret = bpf_derive_elf_map_from_fdinfo(fd, &tmp, ext);
449 if (ret < 0)
450 return ret;
451
452 /* The decision to reject this is on kernel side eventually, but
453 * at least give the user a chance to know what's wrong.
454 */
455 if (ext->owner.type && ext->owner.type != type)
456 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
457 type, ext->owner.type);
458
459 if (!memcmp(&tmp, map, length)) {
460 return 0;
461 } else {
462 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
463 * so just accept it. We know we do have an eBPF fd and in this
464 * case, everything is 0. It is guaranteed that no such map exists
465 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
466 */
467 if (!memcmp(&tmp, &zero, length))
468 return 0;
469
470 bpf_map_pin_report(&tmp, map);
471 return -EINVAL;
472 }
473 }
474
475 static int bpf_mnt_fs(const char *target)
476 {
477 bool bind_done = false;
478
479 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
480 if (errno != EINVAL || bind_done) {
481 fprintf(stderr, "mount --make-private %s failed: %s\n",
482 target, strerror(errno));
483 return -1;
484 }
485
486 if (mount(target, target, "none", MS_BIND, NULL)) {
487 fprintf(stderr, "mount --bind %s %s failed: %s\n",
488 target, target, strerror(errno));
489 return -1;
490 }
491
492 bind_done = true;
493 }
494
495 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
496 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
497 target, strerror(errno));
498 return -1;
499 }
500
501 return 0;
502 }
503
504 static int bpf_mnt_check_target(const char *target)
505 {
506 struct stat sb = {};
507 int ret;
508
509 ret = stat(target, &sb);
510 if (ret) {
511 ret = mkdir(target, S_IRWXU);
512 if (ret) {
513 fprintf(stderr, "mkdir %s failed: %s\n", target,
514 strerror(errno));
515 return ret;
516 }
517 }
518
519 return 0;
520 }
521
522 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
523 {
524 struct statfs st_fs;
525
526 if (statfs(mnt, &st_fs) < 0)
527 return -ENOENT;
528 if ((unsigned long)st_fs.f_type != magic)
529 return -ENOENT;
530
531 return 0;
532 }
533
534 static const char *bpf_find_mntpt_single(unsigned long magic, char *mnt,
535 int len, const char *mntpt)
536 {
537 int ret;
538
539 ret = bpf_valid_mntpt(mntpt, magic);
540 if (!ret) {
541 strlcpy(mnt, mntpt, len);
542 return mnt;
543 }
544
545 return NULL;
546 }
547
548 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
549 char *mnt, int len,
550 const char * const *known_mnts)
551 {
552 const char * const *ptr;
553 char type[100];
554 FILE *fp;
555
556 if (known_mnts) {
557 ptr = known_mnts;
558 while (*ptr) {
559 if (bpf_find_mntpt_single(magic, mnt, len, *ptr))
560 return mnt;
561 ptr++;
562 }
563 }
564
565 if (len != PATH_MAX)
566 return NULL;
567
568 fp = fopen("/proc/mounts", "r");
569 if (fp == NULL)
570 return NULL;
571
572 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
573 mnt, type) == 2) {
574 if (strcmp(type, fstype) == 0)
575 break;
576 }
577
578 fclose(fp);
579 if (strcmp(type, fstype) != 0)
580 return NULL;
581
582 return mnt;
583 }
584
585 int bpf_trace_pipe(void)
586 {
587 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
588 static const char * const tracefs_known_mnts[] = {
589 TRACE_DIR_MNT,
590 "/sys/kernel/debug/tracing",
591 "/tracing",
592 "/trace",
593 0,
594 };
595 int fd_in, fd_out = STDERR_FILENO;
596 char tpipe[PATH_MAX];
597 const char *mnt;
598
599 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
600 sizeof(tracefs_mnt), tracefs_known_mnts);
601 if (!mnt) {
602 fprintf(stderr, "tracefs not mounted?\n");
603 return -1;
604 }
605
606 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
607
608 fd_in = open(tpipe, O_RDONLY);
609 if (fd_in < 0)
610 return -1;
611
612 fprintf(stderr, "Running! Hang up with ^C!\n\n");
613 while (1) {
614 static char buff[4096];
615 ssize_t ret;
616
617 ret = read(fd_in, buff, sizeof(buff));
618 if (ret > 0 && write(fd_out, buff, ret) == ret)
619 continue;
620 break;
621 }
622
623 close(fd_in);
624 return -1;
625 }
626
627 static int bpf_gen_global(const char *bpf_sub_dir)
628 {
629 char bpf_glo_dir[PATH_MAX];
630 int ret;
631
632 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
633 bpf_sub_dir, BPF_DIR_GLOBALS);
634
635 ret = mkdir(bpf_glo_dir, S_IRWXU);
636 if (ret && errno != EEXIST) {
637 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
638 strerror(errno));
639 return ret;
640 }
641
642 return 0;
643 }
644
645 static int bpf_gen_master(const char *base, const char *name)
646 {
647 char bpf_sub_dir[PATH_MAX + NAME_MAX + 1];
648 int ret;
649
650 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
651
652 ret = mkdir(bpf_sub_dir, S_IRWXU);
653 if (ret && errno != EEXIST) {
654 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
655 strerror(errno));
656 return ret;
657 }
658
659 return bpf_gen_global(bpf_sub_dir);
660 }
661
662 static int bpf_slave_via_bind_mnt(const char *full_name,
663 const char *full_link)
664 {
665 int ret;
666
667 ret = mkdir(full_name, S_IRWXU);
668 if (ret) {
669 assert(errno != EEXIST);
670 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
671 strerror(errno));
672 return ret;
673 }
674
675 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
676 if (ret) {
677 rmdir(full_name);
678 fprintf(stderr, "mount --bind %s %s failed: %s\n",
679 full_link, full_name, strerror(errno));
680 }
681
682 return ret;
683 }
684
685 static int bpf_gen_slave(const char *base, const char *name,
686 const char *link)
687 {
688 char bpf_lnk_dir[PATH_MAX + NAME_MAX + 1];
689 char bpf_sub_dir[PATH_MAX + NAME_MAX];
690 struct stat sb = {};
691 int ret;
692
693 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
694 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
695
696 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
697 if (ret) {
698 if (errno != EEXIST) {
699 if (errno != EPERM) {
700 fprintf(stderr, "symlink %s failed: %s\n",
701 bpf_sub_dir, strerror(errno));
702 return ret;
703 }
704
705 return bpf_slave_via_bind_mnt(bpf_sub_dir,
706 bpf_lnk_dir);
707 }
708
709 ret = lstat(bpf_sub_dir, &sb);
710 if (ret) {
711 fprintf(stderr, "lstat %s failed: %s\n",
712 bpf_sub_dir, strerror(errno));
713 return ret;
714 }
715
716 if ((sb.st_mode & S_IFMT) != S_IFLNK)
717 return bpf_gen_global(bpf_sub_dir);
718 }
719
720 return 0;
721 }
722
723 static int bpf_gen_hierarchy(const char *base)
724 {
725 int ret, i;
726
727 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
728 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
729 ret = bpf_gen_slave(base,
730 bpf_prog_to_subdir(__bpf_types[i]),
731 bpf_prog_to_subdir(__bpf_types[0]));
732 return ret;
733 }
734
735 static const char *bpf_get_work_dir(enum bpf_prog_type type)
736 {
737 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
738 static char bpf_wrk_dir[PATH_MAX];
739 static const char *mnt;
740 static bool bpf_mnt_cached;
741 const char *mnt_env = getenv(BPF_ENV_MNT);
742 static const char * const bpf_known_mnts[] = {
743 BPF_DIR_MNT,
744 "/bpf",
745 0,
746 };
747 int ret;
748
749 if (bpf_mnt_cached) {
750 const char *out = mnt;
751
752 if (out && type) {
753 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
754 out, bpf_prog_to_subdir(type));
755 out = bpf_tmp;
756 }
757 return out;
758 }
759
760 if (mnt_env)
761 mnt = bpf_find_mntpt_single(BPF_FS_MAGIC, bpf_tmp,
762 sizeof(bpf_tmp), mnt_env);
763 else
764 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp,
765 sizeof(bpf_tmp), bpf_known_mnts);
766 if (!mnt) {
767 mnt = mnt_env ? : BPF_DIR_MNT;
768 ret = bpf_mnt_check_target(mnt);
769 if (!ret)
770 ret = bpf_mnt_fs(mnt);
771 if (ret) {
772 mnt = NULL;
773 goto out;
774 }
775 }
776
777 snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
778
779 ret = bpf_gen_hierarchy(bpf_wrk_dir);
780 if (ret) {
781 mnt = NULL;
782 goto out;
783 }
784
785 mnt = bpf_wrk_dir;
786 out:
787 bpf_mnt_cached = true;
788 return mnt;
789 }
790
791 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
792 {
793 union bpf_attr attr = {};
794 char tmp[PATH_MAX];
795
796 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
797 pathname[1] == ':' && bpf_get_work_dir(type)) {
798 snprintf(tmp, sizeof(tmp), "%s/%s",
799 bpf_get_work_dir(type), pathname + 2);
800 pathname = tmp;
801 }
802
803 attr.pathname = bpf_ptr_to_u64(pathname);
804
805 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
806 }
807
808 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
809 {
810 int prog_fd = bpf_obj_get(pathname, type);
811
812 if (prog_fd < 0)
813 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
814 pathname, strerror(errno));
815 return prog_fd;
816 }
817
818 static int bpf_do_parse(struct bpf_cfg_in *cfg, const bool *opt_tbl)
819 {
820 const char *file, *section, *uds_name;
821 bool verbose = false;
822 int i, ret, argc;
823 char **argv;
824
825 argv = cfg->argv;
826 argc = cfg->argc;
827
828 if (opt_tbl[CBPF_BYTECODE] &&
829 (matches(*argv, "bytecode") == 0 ||
830 strcmp(*argv, "bc") == 0)) {
831 cfg->mode = CBPF_BYTECODE;
832 } else if (opt_tbl[CBPF_FILE] &&
833 (matches(*argv, "bytecode-file") == 0 ||
834 strcmp(*argv, "bcf") == 0)) {
835 cfg->mode = CBPF_FILE;
836 } else if (opt_tbl[EBPF_OBJECT] &&
837 (matches(*argv, "object-file") == 0 ||
838 strcmp(*argv, "obj") == 0)) {
839 cfg->mode = EBPF_OBJECT;
840 } else if (opt_tbl[EBPF_PINNED] &&
841 (matches(*argv, "object-pinned") == 0 ||
842 matches(*argv, "pinned") == 0 ||
843 matches(*argv, "fd") == 0)) {
844 cfg->mode = EBPF_PINNED;
845 } else {
846 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
847 return -1;
848 }
849
850 NEXT_ARG();
851 file = section = uds_name = NULL;
852 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
853 file = *argv;
854 NEXT_ARG_FWD();
855
856 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
857 if (argc > 0 && matches(*argv, "type") == 0) {
858 NEXT_ARG();
859 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
860 i++) {
861 if (!__bpf_prog_meta[i].type)
862 continue;
863 if (!matches(*argv,
864 __bpf_prog_meta[i].type)) {
865 cfg->type = i;
866 break;
867 }
868 }
869
870 if (cfg->type == BPF_PROG_TYPE_UNSPEC) {
871 fprintf(stderr, "What type is \"%s\"?\n",
872 *argv);
873 return -1;
874 }
875 NEXT_ARG_FWD();
876 } else {
877 cfg->type = BPF_PROG_TYPE_SCHED_CLS;
878 }
879 }
880
881 section = bpf_prog_to_default_section(cfg->type);
882 if (argc > 0 && matches(*argv, "section") == 0) {
883 NEXT_ARG();
884 section = *argv;
885 NEXT_ARG_FWD();
886 }
887
888 if (__bpf_prog_meta[cfg->type].may_uds_export) {
889 uds_name = getenv(BPF_ENV_UDS);
890 if (argc > 0 && !uds_name &&
891 matches(*argv, "export") == 0) {
892 NEXT_ARG();
893 uds_name = *argv;
894 NEXT_ARG_FWD();
895 }
896 }
897
898 if (argc > 0 && matches(*argv, "verbose") == 0) {
899 verbose = true;
900 NEXT_ARG_FWD();
901 }
902
903 PREV_ARG();
904 }
905
906 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE) {
907 ret = bpf_ops_parse(argc, argv, cfg->opcodes,
908 cfg->mode == CBPF_FILE);
909 cfg->n_opcodes = ret;
910 } else if (cfg->mode == EBPF_OBJECT) {
911 ret = 0; /* program will be loaded by load stage */
912 } else if (cfg->mode == EBPF_PINNED) {
913 ret = bpf_obj_pinned(file, cfg->type);
914 cfg->prog_fd = ret;
915 } else {
916 return -1;
917 }
918
919 cfg->object = file;
920 cfg->section = section;
921 cfg->uds = uds_name;
922 cfg->argc = argc;
923 cfg->argv = argv;
924 cfg->verbose = verbose;
925
926 return ret;
927 }
928
929 static int bpf_do_load(struct bpf_cfg_in *cfg)
930 {
931 if (cfg->mode == EBPF_OBJECT) {
932 cfg->prog_fd = bpf_obj_open(cfg->object, cfg->type,
933 cfg->section, cfg->ifindex,
934 cfg->verbose);
935 return cfg->prog_fd;
936 }
937 return 0;
938 }
939
940 int bpf_load_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops,
941 void *nl)
942 {
943 char annotation[256];
944 int ret;
945
946 ret = bpf_do_load(cfg);
947 if (ret < 0)
948 return ret;
949
950 if (cfg->mode == CBPF_BYTECODE || cfg->mode == CBPF_FILE)
951 ops->cbpf_cb(nl, cfg->opcodes, cfg->n_opcodes);
952 if (cfg->mode == EBPF_OBJECT || cfg->mode == EBPF_PINNED) {
953 snprintf(annotation, sizeof(annotation), "%s:[%s]",
954 basename(cfg->object), cfg->mode == EBPF_PINNED ?
955 "*fsobj" : cfg->section);
956 ops->ebpf_cb(nl, cfg->prog_fd, annotation);
957 }
958
959 return 0;
960 }
961
962 int bpf_parse_common(struct bpf_cfg_in *cfg, const struct bpf_cfg_ops *ops)
963 {
964 bool opt_tbl[BPF_MODE_MAX] = {};
965
966 if (ops->cbpf_cb) {
967 opt_tbl[CBPF_BYTECODE] = true;
968 opt_tbl[CBPF_FILE] = true;
969 }
970
971 if (ops->ebpf_cb) {
972 opt_tbl[EBPF_OBJECT] = true;
973 opt_tbl[EBPF_PINNED] = true;
974 }
975
976 return bpf_do_parse(cfg, opt_tbl);
977 }
978
979 int bpf_parse_and_load_common(struct bpf_cfg_in *cfg,
980 const struct bpf_cfg_ops *ops, void *nl)
981 {
982 int ret;
983
984 ret = bpf_parse_common(cfg, ops);
985 if (ret < 0)
986 return ret;
987
988 return bpf_load_common(cfg, ops, nl);
989 }
990
991 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
992 {
993 const bool opt_tbl[BPF_MODE_MAX] = {
994 [EBPF_OBJECT] = true,
995 [EBPF_PINNED] = true,
996 };
997 const struct bpf_elf_map test = {
998 .type = BPF_MAP_TYPE_PROG_ARRAY,
999 .size_key = sizeof(int),
1000 .size_value = sizeof(int),
1001 };
1002 struct bpf_cfg_in cfg = {
1003 .type = BPF_PROG_TYPE_UNSPEC,
1004 .argc = argc,
1005 .argv = argv,
1006 };
1007 struct bpf_map_ext ext = {};
1008 int ret, prog_fd, map_fd;
1009 uint32_t map_key;
1010
1011 ret = bpf_do_parse(&cfg, opt_tbl);
1012 if (ret < 0)
1013 return ret;
1014
1015 ret = bpf_do_load(&cfg);
1016 if (ret < 0)
1017 return ret;
1018
1019 prog_fd = cfg.prog_fd;
1020
1021 if (key) {
1022 map_key = *key;
1023 } else {
1024 ret = sscanf(cfg.section, "%*i/%i", &map_key);
1025 if (ret != 1) {
1026 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
1027 ret = -EINVAL;
1028 goto out_prog;
1029 }
1030 }
1031
1032 map_fd = bpf_obj_get(map_path, cfg.type);
1033 if (map_fd < 0) {
1034 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
1035 map_path, strerror(errno));
1036 ret = map_fd;
1037 goto out_prog;
1038 }
1039
1040 ret = bpf_map_selfcheck_pinned(map_fd, &test, &ext,
1041 offsetof(struct bpf_elf_map, max_elem),
1042 cfg.type);
1043 if (ret < 0) {
1044 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
1045 goto out_map;
1046 }
1047
1048 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
1049 if (ret < 0)
1050 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
1051 out_map:
1052 close(map_fd);
1053 out_prog:
1054 close(prog_fd);
1055 return ret;
1056 }
1057
1058 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
1059 {
1060 union bpf_attr attr = {};
1061
1062 attr.target_fd = target_fd;
1063 attr.attach_bpf_fd = prog_fd;
1064 attr.attach_type = type;
1065
1066 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
1067 }
1068
1069 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
1070 {
1071 union bpf_attr attr = {};
1072
1073 attr.target_fd = target_fd;
1074 attr.attach_type = type;
1075
1076 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
1077 }
1078
1079 static int bpf_prog_load_dev(enum bpf_prog_type type,
1080 const struct bpf_insn *insns, size_t size_insns,
1081 const char *license, __u32 ifindex,
1082 char *log, size_t size_log)
1083 {
1084 union bpf_attr attr = {};
1085
1086 attr.prog_type = type;
1087 attr.insns = bpf_ptr_to_u64(insns);
1088 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
1089 attr.license = bpf_ptr_to_u64(license);
1090 attr.prog_ifindex = ifindex;
1091
1092 if (size_log > 0) {
1093 attr.log_buf = bpf_ptr_to_u64(log);
1094 attr.log_size = size_log;
1095 attr.log_level = 1;
1096 }
1097
1098 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
1099 }
1100
1101 int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
1102 size_t size_insns, const char *license, char *log,
1103 size_t size_log)
1104 {
1105 return bpf_prog_load_dev(type, insns, size_insns, license, 0,
1106 log, size_log);
1107 }
1108
1109 #ifdef HAVE_ELF
1110 struct bpf_elf_prog {
1111 enum bpf_prog_type type;
1112 const struct bpf_insn *insns;
1113 size_t size;
1114 const char *license;
1115 };
1116
1117 struct bpf_hash_entry {
1118 unsigned int pinning;
1119 const char *subpath;
1120 struct bpf_hash_entry *next;
1121 };
1122
1123 struct bpf_config {
1124 unsigned int jit_enabled;
1125 };
1126
1127 struct bpf_elf_ctx {
1128 struct bpf_config cfg;
1129 Elf *elf_fd;
1130 GElf_Ehdr elf_hdr;
1131 Elf_Data *sym_tab;
1132 Elf_Data *str_tab;
1133 int obj_fd;
1134 int map_fds[ELF_MAX_MAPS];
1135 struct bpf_elf_map maps[ELF_MAX_MAPS];
1136 struct bpf_map_ext maps_ext[ELF_MAX_MAPS];
1137 int sym_num;
1138 int map_num;
1139 int map_len;
1140 bool *sec_done;
1141 int sec_maps;
1142 char license[ELF_MAX_LICENSE_LEN];
1143 enum bpf_prog_type type;
1144 __u32 ifindex;
1145 bool verbose;
1146 struct bpf_elf_st stat;
1147 struct bpf_hash_entry *ht[256];
1148 char *log;
1149 size_t log_size;
1150 };
1151
1152 struct bpf_elf_sec_data {
1153 GElf_Shdr sec_hdr;
1154 Elf_Data *sec_data;
1155 const char *sec_name;
1156 };
1157
1158 struct bpf_map_data {
1159 int *fds;
1160 const char *obj;
1161 struct bpf_elf_st *st;
1162 struct bpf_elf_map *ent;
1163 };
1164
1165 static __check_format_string(2, 3) void
1166 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1167 {
1168 va_list vl;
1169
1170 va_start(vl, format);
1171 vfprintf(stderr, format, vl);
1172 va_end(vl);
1173
1174 if (ctx->log && ctx->log[0]) {
1175 if (ctx->verbose) {
1176 fprintf(stderr, "%s\n", ctx->log);
1177 } else {
1178 unsigned int off = 0, len = strlen(ctx->log);
1179
1180 if (len > BPF_MAX_LOG) {
1181 off = len - BPF_MAX_LOG;
1182 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1183 off);
1184 }
1185 fprintf(stderr, "%s\n", ctx->log + off);
1186 }
1187
1188 memset(ctx->log, 0, ctx->log_size);
1189 }
1190 }
1191
1192 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1193 {
1194 const size_t log_max = UINT_MAX >> 8;
1195 size_t log_size = ctx->log_size;
1196 char *ptr;
1197
1198 if (!ctx->log) {
1199 log_size = 65536;
1200 } else if (log_size < log_max) {
1201 log_size <<= 1;
1202 if (log_size > log_max)
1203 log_size = log_max;
1204 } else {
1205 return -EINVAL;
1206 }
1207
1208 ptr = realloc(ctx->log, log_size);
1209 if (!ptr)
1210 return -ENOMEM;
1211
1212 ptr[0] = 0;
1213 ctx->log = ptr;
1214 ctx->log_size = log_size;
1215
1216 return 0;
1217 }
1218
1219 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1220 uint32_t size_value, uint32_t max_elem,
1221 uint32_t flags, int inner_fd, uint32_t ifindex)
1222 {
1223 union bpf_attr attr = {};
1224
1225 attr.map_type = type;
1226 attr.key_size = size_key;
1227 attr.value_size = inner_fd ? sizeof(int) : size_value;
1228 attr.max_entries = max_elem;
1229 attr.map_flags = flags;
1230 attr.inner_map_fd = inner_fd;
1231 attr.map_ifindex = ifindex;
1232
1233 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1234 }
1235
1236 static int bpf_obj_pin(int fd, const char *pathname)
1237 {
1238 union bpf_attr attr = {};
1239
1240 attr.pathname = bpf_ptr_to_u64(pathname);
1241 attr.bpf_fd = fd;
1242
1243 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1244 }
1245
1246 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1247 {
1248 struct sockaddr_alg alg = {
1249 .salg_family = AF_ALG,
1250 .salg_type = "hash",
1251 .salg_name = "sha1",
1252 };
1253 int ret, cfd, ofd, ffd;
1254 struct stat stbuff;
1255 ssize_t size;
1256
1257 if (!object || len != 20)
1258 return -EINVAL;
1259
1260 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1261 if (cfd < 0) {
1262 fprintf(stderr, "Cannot get AF_ALG socket: %s\n",
1263 strerror(errno));
1264 return cfd;
1265 }
1266
1267 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1268 if (ret < 0) {
1269 fprintf(stderr, "Error binding socket: %s\n", strerror(errno));
1270 goto out_cfd;
1271 }
1272
1273 ofd = accept(cfd, NULL, 0);
1274 if (ofd < 0) {
1275 fprintf(stderr, "Error accepting socket: %s\n",
1276 strerror(errno));
1277 ret = ofd;
1278 goto out_cfd;
1279 }
1280
1281 ffd = open(object, O_RDONLY);
1282 if (ffd < 0) {
1283 fprintf(stderr, "Error opening object %s: %s\n",
1284 object, strerror(errno));
1285 ret = ffd;
1286 goto out_ofd;
1287 }
1288
1289 ret = fstat(ffd, &stbuff);
1290 if (ret < 0) {
1291 fprintf(stderr, "Error doing fstat: %s\n",
1292 strerror(errno));
1293 goto out_ffd;
1294 }
1295
1296 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1297 if (size != stbuff.st_size) {
1298 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1299 size, stbuff.st_size, strerror(errno));
1300 ret = -1;
1301 goto out_ffd;
1302 }
1303
1304 size = read(ofd, out, len);
1305 if (size != len) {
1306 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1307 size, len, strerror(errno));
1308 ret = -1;
1309 } else {
1310 ret = 0;
1311 }
1312 out_ffd:
1313 close(ffd);
1314 out_ofd:
1315 close(ofd);
1316 out_cfd:
1317 close(cfd);
1318 return ret;
1319 }
1320
1321 static const char *bpf_get_obj_uid(const char *pathname)
1322 {
1323 static bool bpf_uid_cached;
1324 static char bpf_uid[64];
1325 uint8_t tmp[20];
1326 int ret;
1327
1328 if (bpf_uid_cached)
1329 goto done;
1330
1331 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
1332 if (ret) {
1333 fprintf(stderr, "Object hashing failed!\n");
1334 return NULL;
1335 }
1336
1337 hexstring_n2a(tmp, sizeof(tmp), bpf_uid, sizeof(bpf_uid));
1338 bpf_uid_cached = true;
1339 done:
1340 return bpf_uid;
1341 }
1342
1343 static int bpf_init_env(const char *pathname)
1344 {
1345 struct rlimit limit = {
1346 .rlim_cur = RLIM_INFINITY,
1347 .rlim_max = RLIM_INFINITY,
1348 };
1349
1350 /* Don't bother in case we fail! */
1351 setrlimit(RLIMIT_MEMLOCK, &limit);
1352
1353 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC)) {
1354 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1355 return 0;
1356 }
1357
1358 if (!bpf_get_obj_uid(pathname))
1359 return -1;
1360
1361 return 0;
1362 }
1363
1364 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1365 uint32_t pinning)
1366 {
1367 struct bpf_hash_entry *entry;
1368
1369 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1370 while (entry && entry->pinning != pinning)
1371 entry = entry->next;
1372
1373 return entry ? entry->subpath : NULL;
1374 }
1375
1376 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1377 uint32_t pinning)
1378 {
1379 switch (pinning) {
1380 case PIN_OBJECT_NS:
1381 case PIN_GLOBAL_NS:
1382 return false;
1383 case PIN_NONE:
1384 return true;
1385 default:
1386 return !bpf_custom_pinning(ctx, pinning);
1387 }
1388 }
1389
1390 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1391 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1392 {
1393 switch (pinning) {
1394 case PIN_OBJECT_NS:
1395 snprintf(pathname, len, "%s/%s/%s",
1396 bpf_get_work_dir(ctx->type),
1397 bpf_get_obj_uid(NULL), name);
1398 break;
1399 case PIN_GLOBAL_NS:
1400 snprintf(pathname, len, "%s/%s/%s",
1401 bpf_get_work_dir(ctx->type),
1402 BPF_DIR_GLOBALS, name);
1403 break;
1404 default:
1405 snprintf(pathname, len, "%s/../%s/%s",
1406 bpf_get_work_dir(ctx->type),
1407 bpf_custom_pinning(ctx, pinning), name);
1408 break;
1409 }
1410 }
1411
1412 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1413 uint32_t pinning)
1414 {
1415 char pathname[PATH_MAX];
1416
1417 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1418 return 0;
1419
1420 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1421 return bpf_obj_get(pathname, ctx->type);
1422 }
1423
1424 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1425 {
1426 char tmp[PATH_MAX];
1427 int ret;
1428
1429 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1430 bpf_get_obj_uid(NULL));
1431
1432 ret = mkdir(tmp, S_IRWXU);
1433 if (ret && errno != EEXIST) {
1434 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1435 return ret;
1436 }
1437
1438 return 0;
1439 }
1440
1441 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1442 const char *todo)
1443 {
1444 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1445 int ret;
1446
1447 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1448 snprintf(rem, sizeof(rem), "%s/", todo);
1449 sub = strtok(rem, "/");
1450
1451 while (sub) {
1452 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1453 return -EINVAL;
1454
1455 strcat(tmp, sub);
1456 strcat(tmp, "/");
1457
1458 ret = mkdir(tmp, S_IRWXU);
1459 if (ret && errno != EEXIST) {
1460 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1461 strerror(errno));
1462 return ret;
1463 }
1464
1465 sub = strtok(NULL, "/");
1466 }
1467
1468 return 0;
1469 }
1470
1471 static int bpf_place_pinned(int fd, const char *name,
1472 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1473 {
1474 char pathname[PATH_MAX];
1475 const char *tmp;
1476 int ret = 0;
1477
1478 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1479 return 0;
1480
1481 if (pinning == PIN_OBJECT_NS)
1482 ret = bpf_make_obj_path(ctx);
1483 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1484 ret = bpf_make_custom_path(ctx, tmp);
1485 if (ret < 0)
1486 return ret;
1487
1488 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1489 return bpf_obj_pin(fd, pathname);
1490 }
1491
1492 static void bpf_prog_report(int fd, const char *section,
1493 const struct bpf_elf_prog *prog,
1494 struct bpf_elf_ctx *ctx)
1495 {
1496 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1497
1498 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1499 fd < 0 ? "rejected: " : "loaded",
1500 fd < 0 ? strerror(errno) : "",
1501 fd < 0 ? errno : fd);
1502
1503 fprintf(stderr, " - Type: %u\n", prog->type);
1504 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1505 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1506 fprintf(stderr, " - License: %s\n\n", prog->license);
1507
1508 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1509 }
1510
1511 static int bpf_prog_attach(const char *section,
1512 const struct bpf_elf_prog *prog,
1513 struct bpf_elf_ctx *ctx)
1514 {
1515 int tries = 0, fd;
1516 retry:
1517 errno = 0;
1518 fd = bpf_prog_load_dev(prog->type, prog->insns, prog->size,
1519 prog->license, ctx->ifindex,
1520 ctx->log, ctx->log_size);
1521 if (fd < 0 || ctx->verbose) {
1522 /* The verifier log is pretty chatty, sometimes so chatty
1523 * on larger programs, that we could fail to dump everything
1524 * into our buffer. Still, try to give a debuggable error
1525 * log for the user, so enlarge it and re-fail.
1526 */
1527 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1528 if (tries++ < 10 && !bpf_log_realloc(ctx))
1529 goto retry;
1530
1531 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1532 ctx->log_size, tries);
1533 return fd;
1534 }
1535
1536 bpf_prog_report(fd, section, prog, ctx);
1537 }
1538
1539 return fd;
1540 }
1541
1542 static void bpf_map_report(int fd, const char *name,
1543 const struct bpf_elf_map *map,
1544 struct bpf_elf_ctx *ctx, int inner_fd)
1545 {
1546 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1547 fd < 0 ? "rejected: " : "loaded",
1548 fd < 0 ? strerror(errno) : "",
1549 fd < 0 ? errno : fd);
1550
1551 fprintf(stderr, " - Type: %u\n", map->type);
1552 fprintf(stderr, " - Identifier: %u\n", map->id);
1553 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1554 fprintf(stderr, " - Size key: %u\n", map->size_key);
1555 fprintf(stderr, " - Size value: %u\n",
1556 inner_fd ? (int)sizeof(int) : map->size_value);
1557 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1558 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1559 }
1560
1561 static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1562 {
1563 int i;
1564
1565 for (i = 0; i < ctx->map_num; i++) {
1566 if (ctx->maps[i].id != id)
1567 continue;
1568 if (ctx->map_fds[i] < 0)
1569 return -EINVAL;
1570
1571 return ctx->map_fds[i];
1572 }
1573
1574 return -ENOENT;
1575 }
1576
1577 static void bpf_report_map_in_map(int outer_fd, uint32_t idx)
1578 {
1579 struct bpf_elf_map outer_map;
1580 int ret;
1581
1582 fprintf(stderr, "Cannot insert map into map! ");
1583
1584 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map, NULL);
1585 if (!ret) {
1586 if (idx >= outer_map.max_elem &&
1587 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1588 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1589 outer_map.max_elem, idx);
1590 return;
1591 }
1592 }
1593
1594 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1595 }
1596
1597 static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1598 {
1599 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1600 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1601 }
1602
1603 static int bpf_map_attach(const char *name, struct bpf_elf_ctx *ctx,
1604 const struct bpf_elf_map *map, struct bpf_map_ext *ext,
1605 int *have_map_in_map)
1606 {
1607 int fd, ifindex, ret, map_inner_fd = 0;
1608
1609 fd = bpf_probe_pinned(name, ctx, map->pinning);
1610 if (fd > 0) {
1611 ret = bpf_map_selfcheck_pinned(fd, map, ext,
1612 offsetof(struct bpf_elf_map,
1613 id), ctx->type);
1614 if (ret < 0) {
1615 close(fd);
1616 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1617 name);
1618 return ret;
1619 }
1620 if (ctx->verbose)
1621 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1622 name);
1623 return fd;
1624 }
1625
1626 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1627 (*have_map_in_map)++;
1628 if (map->inner_id)
1629 return 0;
1630 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1631 name);
1632 return -EINVAL;
1633 }
1634
1635 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1636 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1637 if (map_inner_fd < 0) {
1638 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1639 name, map->inner_id);
1640 return -EINVAL;
1641 }
1642 }
1643
1644 ifindex = bpf_map_offload_neutral(map->type) ? 0 : ctx->ifindex;
1645 errno = 0;
1646 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1647 map->max_elem, map->flags, map_inner_fd, ifindex);
1648
1649 if (fd < 0 || ctx->verbose) {
1650 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1651 if (fd < 0)
1652 return fd;
1653 }
1654
1655 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1656 if (ret < 0 && errno != EEXIST) {
1657 fprintf(stderr, "Could not pin %s map: %s\n", name,
1658 strerror(errno));
1659 close(fd);
1660 return ret;
1661 }
1662
1663 return fd;
1664 }
1665
1666 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1667 const GElf_Sym *sym)
1668 {
1669 return ctx->str_tab->d_buf + sym->st_name;
1670 }
1671
1672 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1673 {
1674 GElf_Sym sym;
1675 int i;
1676
1677 for (i = 0; i < ctx->sym_num; i++) {
1678 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1679 continue;
1680
1681 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1682 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1683 sym.st_shndx != ctx->sec_maps ||
1684 sym.st_value / ctx->map_len != which)
1685 continue;
1686
1687 return bpf_str_tab_name(ctx, &sym);
1688 }
1689
1690 return NULL;
1691 }
1692
1693 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1694 {
1695 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1696 const char *map_name;
1697
1698 for (i = 0; i < ctx->map_num; i++) {
1699 map_name = bpf_map_fetch_name(ctx, i);
1700 if (!map_name)
1701 return -EIO;
1702
1703 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1704 &ctx->maps_ext[i], &have_map_in_map);
1705 if (fd < 0)
1706 return fd;
1707
1708 ctx->map_fds[i] = !fd ? -1 : fd;
1709 }
1710
1711 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1712 if (ctx->map_fds[i] >= 0)
1713 continue;
1714
1715 map_name = bpf_map_fetch_name(ctx, i);
1716 if (!map_name)
1717 return -EIO;
1718
1719 fd = bpf_map_attach(map_name, ctx, &ctx->maps[i],
1720 &ctx->maps_ext[i], NULL);
1721 if (fd < 0)
1722 return fd;
1723
1724 ctx->map_fds[i] = fd;
1725 }
1726
1727 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1728 if (!ctx->maps[i].id ||
1729 ctx->maps[i].inner_id ||
1730 ctx->maps[i].inner_idx == -1)
1731 continue;
1732
1733 inner_fd = ctx->map_fds[i];
1734 inner_idx = ctx->maps[i].inner_idx;
1735
1736 for (j = 0; j < ctx->map_num; j++) {
1737 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1738 continue;
1739 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1740 continue;
1741
1742 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1743 &inner_fd, BPF_ANY);
1744 if (ret < 0) {
1745 bpf_report_map_in_map(ctx->map_fds[j],
1746 inner_idx);
1747 return ret;
1748 }
1749 }
1750 }
1751
1752 return 0;
1753 }
1754
1755 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1756 {
1757 int i, num = 0;
1758 GElf_Sym sym;
1759
1760 for (i = 0; i < ctx->sym_num; i++) {
1761 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1762 continue;
1763
1764 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1765 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1766 sym.st_shndx != ctx->sec_maps)
1767 continue;
1768 num++;
1769 }
1770
1771 return num;
1772 }
1773
1774 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1775 struct bpf_elf_sec_data *data)
1776 {
1777 Elf_Data *sec_edata;
1778 GElf_Shdr sec_hdr;
1779 Elf_Scn *sec_fd;
1780 char *sec_name;
1781
1782 memset(data, 0, sizeof(*data));
1783
1784 sec_fd = elf_getscn(ctx->elf_fd, section);
1785 if (!sec_fd)
1786 return -EINVAL;
1787 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1788 return -EIO;
1789
1790 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1791 sec_hdr.sh_name);
1792 if (!sec_name || !sec_hdr.sh_size)
1793 return -ENOENT;
1794
1795 sec_edata = elf_getdata(sec_fd, NULL);
1796 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1797 return -EIO;
1798
1799 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1800
1801 data->sec_name = sec_name;
1802 data->sec_data = sec_edata;
1803 return 0;
1804 }
1805
1806 struct bpf_elf_map_min {
1807 __u32 type;
1808 __u32 size_key;
1809 __u32 size_value;
1810 __u32 max_elem;
1811 };
1812
1813 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1814 struct bpf_elf_sec_data *data)
1815 {
1816 ctx->map_num = data->sec_data->d_size;
1817 ctx->sec_maps = section;
1818 ctx->sec_done[section] = true;
1819
1820 if (ctx->map_num > sizeof(ctx->maps)) {
1821 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1822 return -ENOMEM;
1823 }
1824
1825 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1826 return 0;
1827 }
1828
1829 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1830 {
1831 GElf_Sym sym;
1832 int off, i;
1833
1834 for (off = 0; off < end; off += ctx->map_len) {
1835 /* Order doesn't need to be linear here, hence we walk
1836 * the table again.
1837 */
1838 for (i = 0; i < ctx->sym_num; i++) {
1839 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1840 continue;
1841 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1842 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1843 sym.st_shndx != ctx->sec_maps)
1844 continue;
1845 if (sym.st_value == off)
1846 break;
1847 if (i == ctx->sym_num - 1)
1848 return -1;
1849 }
1850 }
1851
1852 return off == end ? 0 : -1;
1853 }
1854
1855 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1856 {
1857 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1858 int i, sym_num = bpf_map_num_sym(ctx);
1859 __u8 *buff;
1860
1861 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1862 fprintf(stderr, "%u maps not supported in current map section!\n",
1863 sym_num);
1864 return -EINVAL;
1865 }
1866
1867 if (ctx->map_num % sym_num != 0 ||
1868 ctx->map_num % sizeof(__u32) != 0) {
1869 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1870 return -EINVAL;
1871 }
1872
1873 ctx->map_len = ctx->map_num / sym_num;
1874 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1875 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1876 return -EINVAL;
1877 }
1878
1879 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1880 ctx->map_num = sym_num;
1881 return 0;
1882 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1883 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1884 return -EINVAL;
1885 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1886 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1887 return -EINVAL;
1888 }
1889
1890 ctx->map_num = sym_num;
1891 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
1892 i++, buff += ctx->map_len) {
1893 /* The fixup leaves the rest of the members as zero, which
1894 * is fine currently, but option exist to set some other
1895 * default value as well when needed in future.
1896 */
1897 memcpy(&fixup[i], buff, ctx->map_len);
1898 }
1899
1900 memcpy(ctx->maps, fixup, sizeof(fixup));
1901
1902 printf("Note: %zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
1903 sizeof(struct bpf_elf_map) - ctx->map_len);
1904 return 0;
1905 }
1906
1907 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
1908 struct bpf_elf_sec_data *data)
1909 {
1910 if (data->sec_data->d_size > sizeof(ctx->license))
1911 return -ENOMEM;
1912
1913 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
1914 ctx->sec_done[section] = true;
1915 return 0;
1916 }
1917
1918 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
1919 struct bpf_elf_sec_data *data)
1920 {
1921 ctx->sym_tab = data->sec_data;
1922 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
1923 ctx->sec_done[section] = true;
1924 return 0;
1925 }
1926
1927 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
1928 struct bpf_elf_sec_data *data)
1929 {
1930 ctx->str_tab = data->sec_data;
1931 ctx->sec_done[section] = true;
1932 return 0;
1933 }
1934
1935 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
1936 {
1937 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
1938 }
1939
1940 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx)
1941 {
1942 struct bpf_elf_sec_data data;
1943 int i, ret = -1;
1944
1945 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1946 ret = bpf_fill_section_data(ctx, i, &data);
1947 if (ret < 0)
1948 continue;
1949
1950 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1951 !strcmp(data.sec_name, ELF_SECTION_MAPS))
1952 ret = bpf_fetch_maps_begin(ctx, i, &data);
1953 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1954 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
1955 ret = bpf_fetch_license(ctx, i, &data);
1956 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
1957 !strcmp(data.sec_name, ".symtab"))
1958 ret = bpf_fetch_symtab(ctx, i, &data);
1959 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
1960 !strcmp(data.sec_name, ".strtab"))
1961 ret = bpf_fetch_strtab(ctx, i, &data);
1962 if (ret < 0) {
1963 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
1964 i);
1965 return ret;
1966 }
1967 }
1968
1969 if (bpf_has_map_data(ctx)) {
1970 ret = bpf_fetch_maps_end(ctx);
1971 if (ret < 0) {
1972 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
1973 return ret;
1974 }
1975
1976 ret = bpf_maps_attach_all(ctx);
1977 if (ret < 0) {
1978 fprintf(stderr, "Error loading maps into kernel!\n");
1979 return ret;
1980 }
1981 }
1982
1983 return ret;
1984 }
1985
1986 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
1987 bool *sseen)
1988 {
1989 struct bpf_elf_sec_data data;
1990 struct bpf_elf_prog prog;
1991 int ret, i, fd = -1;
1992
1993 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1994 if (ctx->sec_done[i])
1995 continue;
1996
1997 ret = bpf_fill_section_data(ctx, i, &data);
1998 if (ret < 0 ||
1999 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
2000 data.sec_hdr.sh_flags & SHF_EXECINSTR &&
2001 !strcmp(data.sec_name, section)))
2002 continue;
2003
2004 *sseen = true;
2005
2006 memset(&prog, 0, sizeof(prog));
2007 prog.type = ctx->type;
2008 prog.insns = data.sec_data->d_buf;
2009 prog.size = data.sec_data->d_size;
2010 prog.license = ctx->license;
2011
2012 fd = bpf_prog_attach(section, &prog, ctx);
2013 if (fd < 0)
2014 return fd;
2015
2016 ctx->sec_done[i] = true;
2017 break;
2018 }
2019
2020 return fd;
2021 }
2022
2023 struct bpf_tail_call_props {
2024 unsigned int total;
2025 unsigned int jited;
2026 };
2027
2028 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
2029 struct bpf_elf_sec_data *data_relo,
2030 struct bpf_elf_sec_data *data_insn,
2031 struct bpf_tail_call_props *props)
2032 {
2033 Elf_Data *idata = data_insn->sec_data;
2034 GElf_Shdr *rhdr = &data_relo->sec_hdr;
2035 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
2036 struct bpf_insn *insns = idata->d_buf;
2037 unsigned int num_insns = idata->d_size / sizeof(*insns);
2038
2039 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
2040 unsigned int ioff, rmap;
2041 GElf_Rel relo;
2042 GElf_Sym sym;
2043
2044 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
2045 return -EIO;
2046
2047 ioff = relo.r_offset / sizeof(struct bpf_insn);
2048 if (ioff >= num_insns ||
2049 insns[ioff].code != (BPF_LD | BPF_IMM | BPF_DW)) {
2050 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
2051 ioff);
2052 fprintf(stderr, " - Current section: %s\n", data_relo->sec_name);
2053 if (ioff < num_insns &&
2054 insns[ioff].code == (BPF_JMP | BPF_CALL))
2055 fprintf(stderr, " - Try to annotate functions with always_inline attribute!\n");
2056 return -EINVAL;
2057 }
2058
2059 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
2060 return -EIO;
2061 if (sym.st_shndx != ctx->sec_maps) {
2062 fprintf(stderr, "ELF contains non-map related relo data in entry %u pointing to section %u! Compiler bug?!\n",
2063 relo_ent, sym.st_shndx);
2064 return -EIO;
2065 }
2066
2067 rmap = sym.st_value / ctx->map_len;
2068 if (rmap >= ARRAY_SIZE(ctx->map_fds))
2069 return -EINVAL;
2070 if (!ctx->map_fds[rmap])
2071 return -EINVAL;
2072 if (ctx->maps[rmap].type == BPF_MAP_TYPE_PROG_ARRAY) {
2073 props->total++;
2074 if (ctx->maps_ext[rmap].owner.jited ||
2075 (ctx->maps_ext[rmap].owner.type == 0 &&
2076 ctx->cfg.jit_enabled))
2077 props->jited++;
2078 }
2079
2080 if (ctx->verbose)
2081 fprintf(stderr, "Map \'%s\' (%d) injected into prog section \'%s\' at offset %u!\n",
2082 bpf_str_tab_name(ctx, &sym), ctx->map_fds[rmap],
2083 data_insn->sec_name, ioff);
2084
2085 insns[ioff].src_reg = BPF_PSEUDO_MAP_FD;
2086 insns[ioff].imm = ctx->map_fds[rmap];
2087 }
2088
2089 return 0;
2090 }
2091
2092 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
2093 bool *lderr, bool *sseen)
2094 {
2095 struct bpf_elf_sec_data data_relo, data_insn;
2096 struct bpf_elf_prog prog;
2097 int ret, idx, i, fd = -1;
2098
2099 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2100 struct bpf_tail_call_props props = {};
2101
2102 ret = bpf_fill_section_data(ctx, i, &data_relo);
2103 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
2104 continue;
2105
2106 idx = data_relo.sec_hdr.sh_info;
2107
2108 ret = bpf_fill_section_data(ctx, idx, &data_insn);
2109 if (ret < 0 ||
2110 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
2111 data_insn.sec_hdr.sh_flags & SHF_EXECINSTR &&
2112 !strcmp(data_insn.sec_name, section)))
2113 continue;
2114
2115 *sseen = true;
2116
2117 ret = bpf_apply_relo_data(ctx, &data_relo, &data_insn, &props);
2118 if (ret < 0) {
2119 *lderr = true;
2120 return ret;
2121 }
2122
2123 memset(&prog, 0, sizeof(prog));
2124 prog.type = ctx->type;
2125 prog.insns = data_insn.sec_data->d_buf;
2126 prog.size = data_insn.sec_data->d_size;
2127 prog.license = ctx->license;
2128
2129 fd = bpf_prog_attach(section, &prog, ctx);
2130 if (fd < 0) {
2131 *lderr = true;
2132 if (props.total) {
2133 if (ctx->cfg.jit_enabled &&
2134 props.total != props.jited)
2135 fprintf(stderr, "JIT enabled, but only %u/%u tail call maps in the program have JITed owner!\n",
2136 props.jited, props.total);
2137 if (!ctx->cfg.jit_enabled &&
2138 props.jited)
2139 fprintf(stderr, "JIT disabled, but %u/%u tail call maps in the program have JITed owner!\n",
2140 props.jited, props.total);
2141 }
2142 return fd;
2143 }
2144
2145 ctx->sec_done[i] = true;
2146 ctx->sec_done[idx] = true;
2147 break;
2148 }
2149
2150 return fd;
2151 }
2152
2153 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2154 {
2155 bool lderr = false, sseen = false;
2156 int ret = -1;
2157
2158 if (bpf_has_map_data(ctx))
2159 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen);
2160 if (ret < 0 && !lderr)
2161 ret = bpf_fetch_prog(ctx, section, &sseen);
2162 if (ret < 0 && !sseen)
2163 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2164 section);
2165 return ret;
2166 }
2167
2168 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2169 {
2170 int i;
2171
2172 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2173 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2174 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2175 return i;
2176 return -1;
2177 }
2178
2179 struct bpf_jited_aux {
2180 int prog_fd;
2181 int map_fd;
2182 struct bpf_prog_data prog;
2183 struct bpf_map_ext map;
2184 };
2185
2186 static int bpf_derive_prog_from_fdinfo(int fd, struct bpf_prog_data *prog)
2187 {
2188 char file[PATH_MAX], buff[4096];
2189 unsigned int val;
2190 FILE *fp;
2191
2192 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
2193 memset(prog, 0, sizeof(*prog));
2194
2195 fp = fopen(file, "r");
2196 if (!fp) {
2197 fprintf(stderr, "No procfs support?!\n");
2198 return -EIO;
2199 }
2200
2201 while (fgets(buff, sizeof(buff), fp)) {
2202 if (sscanf(buff, "prog_type:\t%u", &val) == 1)
2203 prog->type = val;
2204 else if (sscanf(buff, "prog_jited:\t%u", &val) == 1)
2205 prog->jited = val;
2206 }
2207
2208 fclose(fp);
2209 return 0;
2210 }
2211
2212 static int bpf_tail_call_get_aux(struct bpf_jited_aux *aux)
2213 {
2214 struct bpf_elf_map tmp;
2215 int ret;
2216
2217 ret = bpf_derive_elf_map_from_fdinfo(aux->map_fd, &tmp, &aux->map);
2218 if (!ret)
2219 ret = bpf_derive_prog_from_fdinfo(aux->prog_fd, &aux->prog);
2220
2221 return ret;
2222 }
2223
2224 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2225 {
2226 struct bpf_elf_sec_data data;
2227 uint32_t map_id, key_id;
2228 int fd, i, ret, idx;
2229
2230 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2231 if (ctx->sec_done[i])
2232 continue;
2233
2234 ret = bpf_fill_section_data(ctx, i, &data);
2235 if (ret < 0)
2236 continue;
2237
2238 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2239 if (ret != 2)
2240 continue;
2241
2242 idx = bpf_find_map_by_id(ctx, map_id);
2243 if (idx < 0)
2244 continue;
2245
2246 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2247 if (fd < 0)
2248 return -EIO;
2249
2250 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2251 &fd, BPF_ANY);
2252 if (ret < 0) {
2253 struct bpf_jited_aux aux = {};
2254
2255 ret = -errno;
2256 if (errno == E2BIG) {
2257 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2258 key_id, map_id);
2259 return ret;
2260 }
2261
2262 aux.map_fd = ctx->map_fds[idx];
2263 aux.prog_fd = fd;
2264
2265 if (bpf_tail_call_get_aux(&aux))
2266 return ret;
2267 if (!aux.map.owner.type)
2268 return ret;
2269
2270 if (aux.prog.type != aux.map.owner.type)
2271 fprintf(stderr, "Tail call map owned by prog type %u, but prog type is %u!\n",
2272 aux.map.owner.type, aux.prog.type);
2273 if (aux.prog.jited != aux.map.owner.jited)
2274 fprintf(stderr, "Tail call map %s jited, but prog %s!\n",
2275 aux.map.owner.jited ? "is" : "not",
2276 aux.prog.jited ? "is" : "not");
2277 return ret;
2278 }
2279
2280 ctx->sec_done[i] = true;
2281 }
2282
2283 return 0;
2284 }
2285
2286 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2287 {
2288 struct stat st;
2289 int ret;
2290
2291 memset(&ctx->stat, 0, sizeof(ctx->stat));
2292
2293 ret = fstat(ctx->obj_fd, &st);
2294 if (ret < 0) {
2295 fprintf(stderr, "Stat of elf file failed: %s\n",
2296 strerror(errno));
2297 return;
2298 }
2299
2300 ctx->stat.st_dev = st.st_dev;
2301 ctx->stat.st_ino = st.st_ino;
2302 }
2303
2304 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2305 {
2306 char buff[PATH_MAX];
2307
2308 while (fgets(buff, sizeof(buff), fp)) {
2309 char *ptr = buff;
2310
2311 while (*ptr == ' ' || *ptr == '\t')
2312 ptr++;
2313
2314 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2315 continue;
2316
2317 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2318 sscanf(ptr, "%i %s #", id, path) != 2) {
2319 strcpy(path, ptr);
2320 return -1;
2321 }
2322
2323 return 1;
2324 }
2325
2326 return 0;
2327 }
2328
2329 static bool bpf_pinning_reserved(uint32_t pinning)
2330 {
2331 switch (pinning) {
2332 case PIN_NONE:
2333 case PIN_OBJECT_NS:
2334 case PIN_GLOBAL_NS:
2335 return true;
2336 default:
2337 return false;
2338 }
2339 }
2340
2341 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2342 {
2343 struct bpf_hash_entry *entry;
2344 char subpath[PATH_MAX] = {};
2345 uint32_t pinning;
2346 FILE *fp;
2347 int ret;
2348
2349 fp = fopen(db_file, "r");
2350 if (!fp)
2351 return;
2352
2353 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2354 if (ret == -1) {
2355 fprintf(stderr, "Database %s is corrupted at: %s\n",
2356 db_file, subpath);
2357 fclose(fp);
2358 return;
2359 }
2360
2361 if (bpf_pinning_reserved(pinning)) {
2362 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2363 db_file, pinning);
2364 continue;
2365 }
2366
2367 entry = malloc(sizeof(*entry));
2368 if (!entry) {
2369 fprintf(stderr, "No memory left for db entry!\n");
2370 continue;
2371 }
2372
2373 entry->pinning = pinning;
2374 entry->subpath = strdup(subpath);
2375 if (!entry->subpath) {
2376 fprintf(stderr, "No memory left for db entry!\n");
2377 free(entry);
2378 continue;
2379 }
2380
2381 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2382 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2383 }
2384
2385 fclose(fp);
2386 }
2387
2388 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2389 {
2390 struct bpf_hash_entry *entry;
2391 int i;
2392
2393 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2394 while ((entry = ctx->ht[i]) != NULL) {
2395 ctx->ht[i] = entry->next;
2396 free((char *)entry->subpath);
2397 free(entry);
2398 }
2399 }
2400 }
2401
2402 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2403 {
2404 if (ctx->elf_hdr.e_type != ET_REL ||
2405 (ctx->elf_hdr.e_machine != EM_NONE &&
2406 ctx->elf_hdr.e_machine != EM_BPF) ||
2407 ctx->elf_hdr.e_version != EV_CURRENT) {
2408 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2409 return -EINVAL;
2410 }
2411
2412 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2413 default:
2414 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2415 return -EINVAL;
2416 case ELFDATA2LSB:
2417 if (htons(1) == 1) {
2418 fprintf(stderr,
2419 "We are big endian, eBPF object is little endian!\n");
2420 return -EIO;
2421 }
2422 break;
2423 case ELFDATA2MSB:
2424 if (htons(1) != 1) {
2425 fprintf(stderr,
2426 "We are little endian, eBPF object is big endian!\n");
2427 return -EIO;
2428 }
2429 break;
2430 }
2431
2432 return 0;
2433 }
2434
2435 static void bpf_get_cfg(struct bpf_elf_ctx *ctx)
2436 {
2437 static const char *path_jit = "/proc/sys/net/core/bpf_jit_enable";
2438 int fd;
2439
2440 fd = open(path_jit, O_RDONLY);
2441 if (fd > 0) {
2442 char tmp[16] = {};
2443
2444 if (read(fd, tmp, sizeof(tmp)) > 0)
2445 ctx->cfg.jit_enabled = atoi(tmp);
2446 close(fd);
2447 }
2448 }
2449
2450 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2451 enum bpf_prog_type type, __u32 ifindex,
2452 bool verbose)
2453 {
2454 int ret = -EINVAL;
2455
2456 if (elf_version(EV_CURRENT) == EV_NONE ||
2457 bpf_init_env(pathname))
2458 return ret;
2459
2460 memset(ctx, 0, sizeof(*ctx));
2461 bpf_get_cfg(ctx);
2462 ctx->verbose = verbose;
2463 ctx->type = type;
2464 ctx->ifindex = ifindex;
2465
2466 ctx->obj_fd = open(pathname, O_RDONLY);
2467 if (ctx->obj_fd < 0)
2468 return ctx->obj_fd;
2469
2470 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2471 if (!ctx->elf_fd) {
2472 ret = -EINVAL;
2473 goto out_fd;
2474 }
2475
2476 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2477 ret = -EINVAL;
2478 goto out_fd;
2479 }
2480
2481 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2482 &ctx->elf_hdr) {
2483 ret = -EIO;
2484 goto out_elf;
2485 }
2486
2487 ret = bpf_elf_check_ehdr(ctx);
2488 if (ret < 0)
2489 goto out_elf;
2490
2491 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2492 sizeof(*(ctx->sec_done)));
2493 if (!ctx->sec_done) {
2494 ret = -ENOMEM;
2495 goto out_elf;
2496 }
2497
2498 if (ctx->verbose && bpf_log_realloc(ctx)) {
2499 ret = -ENOMEM;
2500 goto out_free;
2501 }
2502
2503 bpf_save_finfo(ctx);
2504 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2505
2506 return 0;
2507 out_free:
2508 free(ctx->sec_done);
2509 out_elf:
2510 elf_end(ctx->elf_fd);
2511 out_fd:
2512 close(ctx->obj_fd);
2513 return ret;
2514 }
2515
2516 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2517 {
2518 int i, count = 0;
2519
2520 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2521 if (!ctx->map_fds[i])
2522 break;
2523 count++;
2524 }
2525
2526 return count;
2527 }
2528
2529 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2530 {
2531 int i;
2532
2533 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2534 if (ctx->map_fds[i])
2535 close(ctx->map_fds[i]);
2536 }
2537 }
2538
2539 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2540 {
2541 if (failure)
2542 bpf_maps_teardown(ctx);
2543
2544 bpf_hash_destroy(ctx);
2545
2546 free(ctx->sec_done);
2547 free(ctx->log);
2548
2549 elf_end(ctx->elf_fd);
2550 close(ctx->obj_fd);
2551 }
2552
2553 static struct bpf_elf_ctx __ctx;
2554
2555 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2556 const char *section, __u32 ifindex, bool verbose)
2557 {
2558 struct bpf_elf_ctx *ctx = &__ctx;
2559 int fd = 0, ret;
2560
2561 ret = bpf_elf_ctx_init(ctx, pathname, type, ifindex, verbose);
2562 if (ret < 0) {
2563 fprintf(stderr, "Cannot initialize ELF context!\n");
2564 return ret;
2565 }
2566
2567 ret = bpf_fetch_ancillary(ctx);
2568 if (ret < 0) {
2569 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2570 goto out;
2571 }
2572
2573 fd = bpf_fetch_prog_sec(ctx, section);
2574 if (fd < 0) {
2575 fprintf(stderr, "Error fetching program/map!\n");
2576 ret = fd;
2577 goto out;
2578 }
2579
2580 ret = bpf_fill_prog_arrays(ctx);
2581 if (ret < 0)
2582 fprintf(stderr, "Error filling program arrays!\n");
2583 out:
2584 bpf_elf_ctx_destroy(ctx, ret < 0);
2585 if (ret < 0) {
2586 if (fd)
2587 close(fd);
2588 return ret;
2589 }
2590
2591 return fd;
2592 }
2593
2594 static int
2595 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
2596 const struct bpf_map_data *aux, unsigned int entries)
2597 {
2598 struct bpf_map_set_msg msg = {
2599 .aux.uds_ver = BPF_SCM_AUX_VER,
2600 .aux.num_ent = entries,
2601 };
2602 int *cmsg_buf, min_fd;
2603 char *amsg_buf;
2604 int i;
2605
2606 strlcpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
2607 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
2608
2609 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
2610 amsg_buf = (char *)msg.aux.ent;
2611
2612 for (i = 0; i < entries; i += min_fd) {
2613 int ret;
2614
2615 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
2616 bpf_map_set_init_single(&msg, min_fd);
2617
2618 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
2619 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
2620
2621 ret = sendmsg(fd, &msg.hdr, 0);
2622 if (ret <= 0)
2623 return ret ? : -1;
2624 }
2625
2626 return 0;
2627 }
2628
2629 static int
2630 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
2631 unsigned int entries)
2632 {
2633 struct bpf_map_set_msg msg;
2634 int *cmsg_buf, min_fd;
2635 char *amsg_buf, *mmsg_buf;
2636 unsigned int needed = 1;
2637 int i;
2638
2639 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
2640 amsg_buf = (char *)msg.aux.ent;
2641 mmsg_buf = (char *)&msg.aux;
2642
2643 for (i = 0; i < min(entries, needed); i += min_fd) {
2644 struct cmsghdr *cmsg;
2645 int ret;
2646
2647 min_fd = min(entries, entries - i);
2648 bpf_map_set_init_single(&msg, min_fd);
2649
2650 ret = recvmsg(fd, &msg.hdr, 0);
2651 if (ret <= 0)
2652 return ret ? : -1;
2653
2654 cmsg = CMSG_FIRSTHDR(&msg.hdr);
2655 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
2656 return -EINVAL;
2657 if (msg.hdr.msg_flags & MSG_CTRUNC)
2658 return -EIO;
2659 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
2660 return -ENOSYS;
2661
2662 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
2663 if (min_fd > entries || min_fd <= 0)
2664 return -EINVAL;
2665
2666 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
2667 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
2668 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
2669
2670 needed = aux->num_ent;
2671 }
2672
2673 return 0;
2674 }
2675
2676 int bpf_send_map_fds(const char *path, const char *obj)
2677 {
2678 struct bpf_elf_ctx *ctx = &__ctx;
2679 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2680 struct bpf_map_data bpf_aux = {
2681 .fds = ctx->map_fds,
2682 .ent = ctx->maps,
2683 .st = &ctx->stat,
2684 .obj = obj,
2685 };
2686 int fd, ret;
2687
2688 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2689 if (fd < 0) {
2690 fprintf(stderr, "Cannot open socket: %s\n",
2691 strerror(errno));
2692 return -1;
2693 }
2694
2695 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
2696
2697 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
2698 if (ret < 0) {
2699 fprintf(stderr, "Cannot connect to %s: %s\n",
2700 path, strerror(errno));
2701 return -1;
2702 }
2703
2704 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
2705 bpf_maps_count(ctx));
2706 if (ret < 0)
2707 fprintf(stderr, "Cannot send fds to %s: %s\n",
2708 path, strerror(errno));
2709
2710 bpf_maps_teardown(ctx);
2711 close(fd);
2712 return ret;
2713 }
2714
2715 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
2716 unsigned int entries)
2717 {
2718 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2719 int fd, ret;
2720
2721 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2722 if (fd < 0) {
2723 fprintf(stderr, "Cannot open socket: %s\n",
2724 strerror(errno));
2725 return -1;
2726 }
2727
2728 strlcpy(addr.sun_path, path, sizeof(addr.sun_path));
2729
2730 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
2731 if (ret < 0) {
2732 fprintf(stderr, "Cannot bind to socket: %s\n",
2733 strerror(errno));
2734 return -1;
2735 }
2736
2737 ret = bpf_map_set_recv(fd, fds, aux, entries);
2738 if (ret < 0)
2739 fprintf(stderr, "Cannot recv fds from %s: %s\n",
2740 path, strerror(errno));
2741
2742 unlink(addr.sun_path);
2743 close(fd);
2744 return ret;
2745 }
2746 #endif /* HAVE_ELF */