]> git.proxmox.com Git - mirror_iproute2.git/blob - lib/bpf.c
Merge branch 'master' into net-next
[mirror_iproute2.git] / lib / bpf.c
1 /*
2 * bpf.c BPF common code
3 *
4 * This program is free software; you can distribute it and/or
5 * modify it under the terms of the GNU General Public License
6 * as published by the Free Software Foundation; either version
7 * 2 of the License, or (at your option) any later version.
8 *
9 * Authors: Daniel Borkmann <daniel@iogearbox.net>
10 * Jiri Pirko <jiri@resnulli.us>
11 * Alexei Starovoitov <ast@kernel.org>
12 */
13
14 #include <stdio.h>
15 #include <stdlib.h>
16 #include <unistd.h>
17 #include <string.h>
18 #include <stdbool.h>
19 #include <stdint.h>
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <stdarg.h>
23 #include <limits.h>
24 #include <assert.h>
25
26 #ifdef HAVE_ELF
27 #include <libelf.h>
28 #include <gelf.h>
29 #endif
30
31 #include <sys/types.h>
32 #include <sys/stat.h>
33 #include <sys/un.h>
34 #include <sys/vfs.h>
35 #include <sys/mount.h>
36 #include <sys/syscall.h>
37 #include <sys/sendfile.h>
38 #include <sys/resource.h>
39
40 #include <arpa/inet.h>
41
42 #include "utils.h"
43
44 #include "bpf_util.h"
45 #include "bpf_elf.h"
46 #include "bpf_scm.h"
47
48 struct bpf_prog_meta {
49 const char *type;
50 const char *subdir;
51 const char *section;
52 bool may_uds_export;
53 };
54
55 static const enum bpf_prog_type __bpf_types[] = {
56 BPF_PROG_TYPE_SCHED_CLS,
57 BPF_PROG_TYPE_SCHED_ACT,
58 BPF_PROG_TYPE_XDP,
59 BPF_PROG_TYPE_LWT_IN,
60 BPF_PROG_TYPE_LWT_OUT,
61 BPF_PROG_TYPE_LWT_XMIT,
62 };
63
64 static const struct bpf_prog_meta __bpf_prog_meta[] = {
65 [BPF_PROG_TYPE_SCHED_CLS] = {
66 .type = "cls",
67 .subdir = "tc",
68 .section = ELF_SECTION_CLASSIFIER,
69 .may_uds_export = true,
70 },
71 [BPF_PROG_TYPE_SCHED_ACT] = {
72 .type = "act",
73 .subdir = "tc",
74 .section = ELF_SECTION_ACTION,
75 .may_uds_export = true,
76 },
77 [BPF_PROG_TYPE_XDP] = {
78 .type = "xdp",
79 .subdir = "xdp",
80 .section = ELF_SECTION_PROG,
81 },
82 [BPF_PROG_TYPE_LWT_IN] = {
83 .type = "lwt_in",
84 .subdir = "ip",
85 .section = ELF_SECTION_PROG,
86 },
87 [BPF_PROG_TYPE_LWT_OUT] = {
88 .type = "lwt_out",
89 .subdir = "ip",
90 .section = ELF_SECTION_PROG,
91 },
92 [BPF_PROG_TYPE_LWT_XMIT] = {
93 .type = "lwt_xmit",
94 .subdir = "ip",
95 .section = ELF_SECTION_PROG,
96 },
97 };
98
99 static const char *bpf_prog_to_subdir(enum bpf_prog_type type)
100 {
101 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
102 __bpf_prog_meta[type].subdir);
103 return __bpf_prog_meta[type].subdir;
104 }
105
106 const char *bpf_prog_to_default_section(enum bpf_prog_type type)
107 {
108 assert(type < ARRAY_SIZE(__bpf_prog_meta) &&
109 __bpf_prog_meta[type].section);
110 return __bpf_prog_meta[type].section;
111 }
112
113 #ifdef HAVE_ELF
114 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
115 const char *sec, bool verbose);
116 #else
117 static int bpf_obj_open(const char *path, enum bpf_prog_type type,
118 const char *sec, bool verbose)
119 {
120 fprintf(stderr, "No ELF library support compiled in.\n");
121 errno = ENOSYS;
122 return -1;
123 }
124 #endif
125
126 static inline __u64 bpf_ptr_to_u64(const void *ptr)
127 {
128 return (__u64)(unsigned long)ptr;
129 }
130
131 static int bpf(int cmd, union bpf_attr *attr, unsigned int size)
132 {
133 #ifdef __NR_bpf
134 return syscall(__NR_bpf, cmd, attr, size);
135 #else
136 fprintf(stderr, "No bpf syscall, kernel headers too old?\n");
137 errno = ENOSYS;
138 return -1;
139 #endif
140 }
141
142 static int bpf_map_update(int fd, const void *key, const void *value,
143 uint64_t flags)
144 {
145 union bpf_attr attr = {};
146
147 attr.map_fd = fd;
148 attr.key = bpf_ptr_to_u64(key);
149 attr.value = bpf_ptr_to_u64(value);
150 attr.flags = flags;
151
152 return bpf(BPF_MAP_UPDATE_ELEM, &attr, sizeof(attr));
153 }
154
155 static int bpf_prog_fd_by_id(uint32_t id)
156 {
157 union bpf_attr attr = {};
158
159 attr.prog_id = id;
160
161 return bpf(BPF_PROG_GET_FD_BY_ID, &attr, sizeof(attr));
162 }
163
164 static int bpf_prog_info_by_fd(int fd, struct bpf_prog_info *info,
165 uint32_t *info_len)
166 {
167 union bpf_attr attr = {};
168 int ret;
169
170 attr.info.bpf_fd = fd;
171 attr.info.info = bpf_ptr_to_u64(info);
172 attr.info.info_len = *info_len;
173
174 *info_len = 0;
175 ret = bpf(BPF_OBJ_GET_INFO_BY_FD, &attr, sizeof(attr));
176 if (!ret)
177 *info_len = attr.info.info_len;
178
179 return ret;
180 }
181
182 void bpf_dump_prog_info(FILE *f, uint32_t id)
183 {
184 struct bpf_prog_info info = {};
185 uint32_t len = sizeof(info);
186 int fd, ret;
187
188 fprintf(f, "id %u ", id);
189
190 fd = bpf_prog_fd_by_id(id);
191 if (fd < 0)
192 return;
193
194 ret = bpf_prog_info_by_fd(fd, &info, &len);
195 if (!ret && len) {
196 if (info.jited_prog_len)
197 fprintf(f, "jited ");
198 }
199
200 close(fd);
201 }
202
203 static int bpf_parse_string(char *arg, bool from_file, __u16 *bpf_len,
204 char **bpf_string, bool *need_release,
205 const char separator)
206 {
207 char sp;
208
209 if (from_file) {
210 size_t tmp_len, op_len = sizeof("65535 255 255 4294967295,");
211 char *tmp_string, *last;
212 FILE *fp;
213
214 tmp_len = sizeof("4096,") + BPF_MAXINSNS * op_len;
215 tmp_string = calloc(1, tmp_len);
216 if (tmp_string == NULL)
217 return -ENOMEM;
218
219 fp = fopen(arg, "r");
220 if (fp == NULL) {
221 perror("Cannot fopen");
222 free(tmp_string);
223 return -ENOENT;
224 }
225
226 if (!fgets(tmp_string, tmp_len, fp)) {
227 free(tmp_string);
228 fclose(fp);
229 return -EIO;
230 }
231
232 fclose(fp);
233
234 last = &tmp_string[strlen(tmp_string) - 1];
235 if (*last == '\n')
236 *last = 0;
237
238 *need_release = true;
239 *bpf_string = tmp_string;
240 } else {
241 *need_release = false;
242 *bpf_string = arg;
243 }
244
245 if (sscanf(*bpf_string, "%hu%c", bpf_len, &sp) != 2 ||
246 sp != separator) {
247 if (*need_release)
248 free(*bpf_string);
249 return -EINVAL;
250 }
251
252 return 0;
253 }
254
255 static int bpf_ops_parse(int argc, char **argv, struct sock_filter *bpf_ops,
256 bool from_file)
257 {
258 char *bpf_string, *token, separator = ',';
259 int ret = 0, i = 0;
260 bool need_release;
261 __u16 bpf_len = 0;
262
263 if (argc < 1)
264 return -EINVAL;
265 if (bpf_parse_string(argv[0], from_file, &bpf_len, &bpf_string,
266 &need_release, separator))
267 return -EINVAL;
268 if (bpf_len == 0 || bpf_len > BPF_MAXINSNS) {
269 ret = -EINVAL;
270 goto out;
271 }
272
273 token = bpf_string;
274 while ((token = strchr(token, separator)) && (++token)[0]) {
275 if (i >= bpf_len) {
276 fprintf(stderr, "Real program length exceeds encoded length parameter!\n");
277 ret = -EINVAL;
278 goto out;
279 }
280
281 if (sscanf(token, "%hu %hhu %hhu %u,",
282 &bpf_ops[i].code, &bpf_ops[i].jt,
283 &bpf_ops[i].jf, &bpf_ops[i].k) != 4) {
284 fprintf(stderr, "Error at instruction %d!\n", i);
285 ret = -EINVAL;
286 goto out;
287 }
288
289 i++;
290 }
291
292 if (i != bpf_len) {
293 fprintf(stderr, "Parsed program length is less than encoded length parameter!\n");
294 ret = -EINVAL;
295 goto out;
296 }
297 ret = bpf_len;
298 out:
299 if (need_release)
300 free(bpf_string);
301
302 return ret;
303 }
304
305 void bpf_print_ops(FILE *f, struct rtattr *bpf_ops, __u16 len)
306 {
307 struct sock_filter *ops = RTA_DATA(bpf_ops);
308 int i;
309
310 if (len == 0)
311 return;
312
313 fprintf(f, "bytecode \'%u,", len);
314
315 for (i = 0; i < len - 1; i++)
316 fprintf(f, "%hu %hhu %hhu %u,", ops[i].code, ops[i].jt,
317 ops[i].jf, ops[i].k);
318
319 fprintf(f, "%hu %hhu %hhu %u\'", ops[i].code, ops[i].jt,
320 ops[i].jf, ops[i].k);
321 }
322
323 static void bpf_map_pin_report(const struct bpf_elf_map *pin,
324 const struct bpf_elf_map *obj)
325 {
326 fprintf(stderr, "Map specification differs from pinned file!\n");
327
328 if (obj->type != pin->type)
329 fprintf(stderr, " - Type: %u (obj) != %u (pin)\n",
330 obj->type, pin->type);
331 if (obj->size_key != pin->size_key)
332 fprintf(stderr, " - Size key: %u (obj) != %u (pin)\n",
333 obj->size_key, pin->size_key);
334 if (obj->size_value != pin->size_value)
335 fprintf(stderr, " - Size value: %u (obj) != %u (pin)\n",
336 obj->size_value, pin->size_value);
337 if (obj->max_elem != pin->max_elem)
338 fprintf(stderr, " - Max elems: %u (obj) != %u (pin)\n",
339 obj->max_elem, pin->max_elem);
340 if (obj->flags != pin->flags)
341 fprintf(stderr, " - Flags: %#x (obj) != %#x (pin)\n",
342 obj->flags, pin->flags);
343
344 fprintf(stderr, "\n");
345 }
346
347 static int bpf_map_selfcheck_pinned(int fd, const struct bpf_elf_map *map,
348 int length, enum bpf_prog_type type)
349 {
350 char file[PATH_MAX], buff[4096];
351 struct bpf_elf_map tmp = {}, zero = {};
352 unsigned int val, owner_type = 0;
353 FILE *fp;
354
355 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
356
357 fp = fopen(file, "r");
358 if (!fp) {
359 fprintf(stderr, "No procfs support?!\n");
360 return -EIO;
361 }
362
363 while (fgets(buff, sizeof(buff), fp)) {
364 if (sscanf(buff, "map_type:\t%u", &val) == 1)
365 tmp.type = val;
366 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
367 tmp.size_key = val;
368 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
369 tmp.size_value = val;
370 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
371 tmp.max_elem = val;
372 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
373 tmp.flags = val;
374 else if (sscanf(buff, "owner_prog_type:\t%i", &val) == 1)
375 owner_type = val;
376 }
377
378 fclose(fp);
379
380 /* The decision to reject this is on kernel side eventually, but
381 * at least give the user a chance to know what's wrong.
382 */
383 if (owner_type && owner_type != type)
384 fprintf(stderr, "Program array map owner types differ: %u (obj) != %u (pin)\n",
385 type, owner_type);
386
387 if (!memcmp(&tmp, map, length)) {
388 return 0;
389 } else {
390 /* If kernel doesn't have eBPF-related fdinfo, we cannot do much,
391 * so just accept it. We know we do have an eBPF fd and in this
392 * case, everything is 0. It is guaranteed that no such map exists
393 * since map type of 0 is unloadable BPF_MAP_TYPE_UNSPEC.
394 */
395 if (!memcmp(&tmp, &zero, length))
396 return 0;
397
398 bpf_map_pin_report(&tmp, map);
399 return -EINVAL;
400 }
401 }
402
403 static int bpf_mnt_fs(const char *target)
404 {
405 bool bind_done = false;
406
407 while (mount("", target, "none", MS_PRIVATE | MS_REC, NULL)) {
408 if (errno != EINVAL || bind_done) {
409 fprintf(stderr, "mount --make-private %s failed: %s\n",
410 target, strerror(errno));
411 return -1;
412 }
413
414 if (mount(target, target, "none", MS_BIND, NULL)) {
415 fprintf(stderr, "mount --bind %s %s failed: %s\n",
416 target, target, strerror(errno));
417 return -1;
418 }
419
420 bind_done = true;
421 }
422
423 if (mount("bpf", target, "bpf", 0, "mode=0700")) {
424 fprintf(stderr, "mount -t bpf bpf %s failed: %s\n",
425 target, strerror(errno));
426 return -1;
427 }
428
429 return 0;
430 }
431
432 static int bpf_valid_mntpt(const char *mnt, unsigned long magic)
433 {
434 struct statfs st_fs;
435
436 if (statfs(mnt, &st_fs) < 0)
437 return -ENOENT;
438 if ((unsigned long)st_fs.f_type != magic)
439 return -ENOENT;
440
441 return 0;
442 }
443
444 static const char *bpf_find_mntpt(const char *fstype, unsigned long magic,
445 char *mnt, int len,
446 const char * const *known_mnts)
447 {
448 const char * const *ptr;
449 char type[100];
450 FILE *fp;
451
452 if (known_mnts) {
453 ptr = known_mnts;
454 while (*ptr) {
455 if (bpf_valid_mntpt(*ptr, magic) == 0) {
456 strncpy(mnt, *ptr, len - 1);
457 mnt[len - 1] = 0;
458 return mnt;
459 }
460 ptr++;
461 }
462 }
463
464 fp = fopen("/proc/mounts", "r");
465 if (fp == NULL || len != PATH_MAX)
466 return NULL;
467
468 while (fscanf(fp, "%*s %" textify(PATH_MAX) "s %99s %*s %*d %*d\n",
469 mnt, type) == 2) {
470 if (strcmp(type, fstype) == 0)
471 break;
472 }
473
474 fclose(fp);
475 if (strcmp(type, fstype) != 0)
476 return NULL;
477
478 return mnt;
479 }
480
481 int bpf_trace_pipe(void)
482 {
483 char tracefs_mnt[PATH_MAX] = TRACE_DIR_MNT;
484 static const char * const tracefs_known_mnts[] = {
485 TRACE_DIR_MNT,
486 "/sys/kernel/debug/tracing",
487 "/tracing",
488 "/trace",
489 0,
490 };
491 char tpipe[PATH_MAX];
492 const char *mnt;
493 int fd;
494
495 mnt = bpf_find_mntpt("tracefs", TRACEFS_MAGIC, tracefs_mnt,
496 sizeof(tracefs_mnt), tracefs_known_mnts);
497 if (!mnt) {
498 fprintf(stderr, "tracefs not mounted?\n");
499 return -1;
500 }
501
502 snprintf(tpipe, sizeof(tpipe), "%s/trace_pipe", mnt);
503
504 fd = open(tpipe, O_RDONLY);
505 if (fd < 0)
506 return -1;
507
508 fprintf(stderr, "Running! Hang up with ^C!\n\n");
509 while (1) {
510 static char buff[4096];
511 ssize_t ret;
512
513 ret = read(fd, buff, sizeof(buff) - 1);
514 if (ret > 0) {
515 write(2, buff, ret);
516 fflush(stderr);
517 }
518 }
519
520 return 0;
521 }
522
523 static int bpf_gen_global(const char *bpf_sub_dir)
524 {
525 char bpf_glo_dir[PATH_MAX];
526 int ret;
527
528 snprintf(bpf_glo_dir, sizeof(bpf_glo_dir), "%s/%s/",
529 bpf_sub_dir, BPF_DIR_GLOBALS);
530
531 ret = mkdir(bpf_glo_dir, S_IRWXU);
532 if (ret && errno != EEXIST) {
533 fprintf(stderr, "mkdir %s failed: %s\n", bpf_glo_dir,
534 strerror(errno));
535 return ret;
536 }
537
538 return 0;
539 }
540
541 static int bpf_gen_master(const char *base, const char *name)
542 {
543 char bpf_sub_dir[PATH_MAX];
544 int ret;
545
546 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s/", base, name);
547
548 ret = mkdir(bpf_sub_dir, S_IRWXU);
549 if (ret && errno != EEXIST) {
550 fprintf(stderr, "mkdir %s failed: %s\n", bpf_sub_dir,
551 strerror(errno));
552 return ret;
553 }
554
555 return bpf_gen_global(bpf_sub_dir);
556 }
557
558 static int bpf_slave_via_bind_mnt(const char *full_name,
559 const char *full_link)
560 {
561 int ret;
562
563 ret = mkdir(full_name, S_IRWXU);
564 if (ret) {
565 assert(errno != EEXIST);
566 fprintf(stderr, "mkdir %s failed: %s\n", full_name,
567 strerror(errno));
568 return ret;
569 }
570
571 ret = mount(full_link, full_name, "none", MS_BIND, NULL);
572 if (ret) {
573 rmdir(full_name);
574 fprintf(stderr, "mount --bind %s %s failed: %s\n",
575 full_link, full_name, strerror(errno));
576 }
577
578 return ret;
579 }
580
581 static int bpf_gen_slave(const char *base, const char *name,
582 const char *link)
583 {
584 char bpf_lnk_dir[PATH_MAX];
585 char bpf_sub_dir[PATH_MAX];
586 struct stat sb = {};
587 int ret;
588
589 snprintf(bpf_lnk_dir, sizeof(bpf_lnk_dir), "%s%s/", base, link);
590 snprintf(bpf_sub_dir, sizeof(bpf_sub_dir), "%s%s", base, name);
591
592 ret = symlink(bpf_lnk_dir, bpf_sub_dir);
593 if (ret) {
594 if (errno != EEXIST) {
595 if (errno != EPERM) {
596 fprintf(stderr, "symlink %s failed: %s\n",
597 bpf_sub_dir, strerror(errno));
598 return ret;
599 }
600
601 return bpf_slave_via_bind_mnt(bpf_sub_dir,
602 bpf_lnk_dir);
603 }
604
605 ret = lstat(bpf_sub_dir, &sb);
606 if (ret) {
607 fprintf(stderr, "lstat %s failed: %s\n",
608 bpf_sub_dir, strerror(errno));
609 return ret;
610 }
611
612 if ((sb.st_mode & S_IFMT) != S_IFLNK)
613 return bpf_gen_global(bpf_sub_dir);
614 }
615
616 return 0;
617 }
618
619 static int bpf_gen_hierarchy(const char *base)
620 {
621 int ret, i;
622
623 ret = bpf_gen_master(base, bpf_prog_to_subdir(__bpf_types[0]));
624 for (i = 1; i < ARRAY_SIZE(__bpf_types) && !ret; i++)
625 ret = bpf_gen_slave(base,
626 bpf_prog_to_subdir(__bpf_types[i]),
627 bpf_prog_to_subdir(__bpf_types[0]));
628 return ret;
629 }
630
631 static const char *bpf_get_work_dir(enum bpf_prog_type type)
632 {
633 static char bpf_tmp[PATH_MAX] = BPF_DIR_MNT;
634 static char bpf_wrk_dir[PATH_MAX];
635 static const char *mnt;
636 static bool bpf_mnt_cached;
637 static const char * const bpf_known_mnts[] = {
638 BPF_DIR_MNT,
639 "/bpf",
640 0,
641 };
642 int ret;
643
644 if (bpf_mnt_cached) {
645 const char *out = mnt;
646
647 if (out && type) {
648 snprintf(bpf_tmp, sizeof(bpf_tmp), "%s%s/",
649 out, bpf_prog_to_subdir(type));
650 out = bpf_tmp;
651 }
652 return out;
653 }
654
655 mnt = bpf_find_mntpt("bpf", BPF_FS_MAGIC, bpf_tmp, sizeof(bpf_tmp),
656 bpf_known_mnts);
657 if (!mnt) {
658 mnt = getenv(BPF_ENV_MNT);
659 if (!mnt)
660 mnt = BPF_DIR_MNT;
661 ret = bpf_mnt_fs(mnt);
662 if (ret) {
663 mnt = NULL;
664 goto out;
665 }
666 }
667
668 snprintf(bpf_wrk_dir, sizeof(bpf_wrk_dir), "%s/", mnt);
669
670 ret = bpf_gen_hierarchy(bpf_wrk_dir);
671 if (ret) {
672 mnt = NULL;
673 goto out;
674 }
675
676 mnt = bpf_wrk_dir;
677 out:
678 bpf_mnt_cached = true;
679 return mnt;
680 }
681
682 static int bpf_obj_get(const char *pathname, enum bpf_prog_type type)
683 {
684 union bpf_attr attr = {};
685 char tmp[PATH_MAX];
686
687 if (strlen(pathname) > 2 && pathname[0] == 'm' &&
688 pathname[1] == ':' && bpf_get_work_dir(type)) {
689 snprintf(tmp, sizeof(tmp), "%s/%s",
690 bpf_get_work_dir(type), pathname + 2);
691 pathname = tmp;
692 }
693
694 attr.pathname = bpf_ptr_to_u64(pathname);
695
696 return bpf(BPF_OBJ_GET, &attr, sizeof(attr));
697 }
698
699 static int bpf_obj_pinned(const char *pathname, enum bpf_prog_type type)
700 {
701 int prog_fd = bpf_obj_get(pathname, type);
702
703 if (prog_fd < 0)
704 fprintf(stderr, "Couldn\'t retrieve pinned program \'%s\': %s\n",
705 pathname, strerror(errno));
706 return prog_fd;
707 }
708
709 enum bpf_mode {
710 CBPF_BYTECODE,
711 CBPF_FILE,
712 EBPF_OBJECT,
713 EBPF_PINNED,
714 BPF_MODE_MAX,
715 };
716
717 static int bpf_parse(enum bpf_prog_type *type, enum bpf_mode *mode,
718 struct bpf_cfg_in *cfg, const bool *opt_tbl)
719 {
720 const char *file, *section, *uds_name;
721 bool verbose = false;
722 int i, ret, argc;
723 char **argv;
724
725 argv = cfg->argv;
726 argc = cfg->argc;
727
728 if (opt_tbl[CBPF_BYTECODE] &&
729 (matches(*argv, "bytecode") == 0 ||
730 strcmp(*argv, "bc") == 0)) {
731 *mode = CBPF_BYTECODE;
732 } else if (opt_tbl[CBPF_FILE] &&
733 (matches(*argv, "bytecode-file") == 0 ||
734 strcmp(*argv, "bcf") == 0)) {
735 *mode = CBPF_FILE;
736 } else if (opt_tbl[EBPF_OBJECT] &&
737 (matches(*argv, "object-file") == 0 ||
738 strcmp(*argv, "obj") == 0)) {
739 *mode = EBPF_OBJECT;
740 } else if (opt_tbl[EBPF_PINNED] &&
741 (matches(*argv, "object-pinned") == 0 ||
742 matches(*argv, "pinned") == 0 ||
743 matches(*argv, "fd") == 0)) {
744 *mode = EBPF_PINNED;
745 } else {
746 fprintf(stderr, "What mode is \"%s\"?\n", *argv);
747 return -1;
748 }
749
750 NEXT_ARG();
751 file = section = uds_name = NULL;
752 if (*mode == EBPF_OBJECT || *mode == EBPF_PINNED) {
753 file = *argv;
754 NEXT_ARG_FWD();
755
756 if (*type == BPF_PROG_TYPE_UNSPEC) {
757 if (argc > 0 && matches(*argv, "type") == 0) {
758 NEXT_ARG();
759 for (i = 0; i < ARRAY_SIZE(__bpf_prog_meta);
760 i++) {
761 if (!__bpf_prog_meta[i].type)
762 continue;
763 if (!matches(*argv,
764 __bpf_prog_meta[i].type)) {
765 *type = i;
766 break;
767 }
768 }
769
770 if (*type == BPF_PROG_TYPE_UNSPEC) {
771 fprintf(stderr, "What type is \"%s\"?\n",
772 *argv);
773 return -1;
774 }
775 NEXT_ARG_FWD();
776 } else {
777 *type = BPF_PROG_TYPE_SCHED_CLS;
778 }
779 }
780
781 section = bpf_prog_to_default_section(*type);
782 if (argc > 0 && matches(*argv, "section") == 0) {
783 NEXT_ARG();
784 section = *argv;
785 NEXT_ARG_FWD();
786 }
787
788 if (__bpf_prog_meta[*type].may_uds_export) {
789 uds_name = getenv(BPF_ENV_UDS);
790 if (argc > 0 && !uds_name &&
791 matches(*argv, "export") == 0) {
792 NEXT_ARG();
793 uds_name = *argv;
794 NEXT_ARG_FWD();
795 }
796 }
797
798 if (argc > 0 && matches(*argv, "verbose") == 0) {
799 verbose = true;
800 NEXT_ARG_FWD();
801 }
802
803 PREV_ARG();
804 }
805
806 if (*mode == CBPF_BYTECODE || *mode == CBPF_FILE)
807 ret = bpf_ops_parse(argc, argv, cfg->ops, *mode == CBPF_FILE);
808 else if (*mode == EBPF_OBJECT)
809 ret = bpf_obj_open(file, *type, section, verbose);
810 else if (*mode == EBPF_PINNED)
811 ret = bpf_obj_pinned(file, *type);
812 else
813 return -1;
814
815 cfg->object = file;
816 cfg->section = section;
817 cfg->uds = uds_name;
818 cfg->argc = argc;
819 cfg->argv = argv;
820
821 return ret;
822 }
823
824 static int bpf_parse_opt_tbl(enum bpf_prog_type type, struct bpf_cfg_in *cfg,
825 const struct bpf_cfg_ops *ops, void *nl,
826 const bool *opt_tbl)
827 {
828 struct sock_filter opcodes[BPF_MAXINSNS];
829 char annotation[256];
830 enum bpf_mode mode;
831 int ret;
832
833 cfg->ops = opcodes;
834 ret = bpf_parse(&type, &mode, cfg, opt_tbl);
835 cfg->ops = NULL;
836 if (ret < 0)
837 return ret;
838
839 if (mode == CBPF_BYTECODE || mode == CBPF_FILE)
840 ops->cbpf_cb(nl, opcodes, ret);
841 if (mode == EBPF_OBJECT || mode == EBPF_PINNED) {
842 snprintf(annotation, sizeof(annotation), "%s:[%s]",
843 basename(cfg->object), mode == EBPF_PINNED ?
844 "*fsobj" : cfg->section);
845 ops->ebpf_cb(nl, ret, annotation);
846 }
847
848 return 0;
849 }
850
851 int bpf_parse_common(enum bpf_prog_type type, struct bpf_cfg_in *cfg,
852 const struct bpf_cfg_ops *ops, void *nl)
853 {
854 bool opt_tbl[BPF_MODE_MAX] = {};
855
856 if (ops->cbpf_cb) {
857 opt_tbl[CBPF_BYTECODE] = true;
858 opt_tbl[CBPF_FILE] = true;
859 }
860
861 if (ops->ebpf_cb) {
862 opt_tbl[EBPF_OBJECT] = true;
863 opt_tbl[EBPF_PINNED] = true;
864 }
865
866 return bpf_parse_opt_tbl(type, cfg, ops, nl, opt_tbl);
867 }
868
869 int bpf_graft_map(const char *map_path, uint32_t *key, int argc, char **argv)
870 {
871 enum bpf_prog_type type = BPF_PROG_TYPE_UNSPEC;
872 const bool opt_tbl[BPF_MODE_MAX] = {
873 [EBPF_OBJECT] = true,
874 [EBPF_PINNED] = true,
875 };
876 const struct bpf_elf_map test = {
877 .type = BPF_MAP_TYPE_PROG_ARRAY,
878 .size_key = sizeof(int),
879 .size_value = sizeof(int),
880 };
881 struct bpf_cfg_in cfg = {
882 .argc = argc,
883 .argv = argv,
884 };
885 int ret, prog_fd, map_fd;
886 enum bpf_mode mode;
887 uint32_t map_key;
888
889 prog_fd = bpf_parse(&type, &mode, &cfg, opt_tbl);
890 if (prog_fd < 0)
891 return prog_fd;
892 if (key) {
893 map_key = *key;
894 } else {
895 ret = sscanf(cfg.section, "%*i/%i", &map_key);
896 if (ret != 1) {
897 fprintf(stderr, "Couldn\'t infer map key from section name! Please provide \'key\' argument!\n");
898 ret = -EINVAL;
899 goto out_prog;
900 }
901 }
902
903 map_fd = bpf_obj_get(map_path, type);
904 if (map_fd < 0) {
905 fprintf(stderr, "Couldn\'t retrieve pinned map \'%s\': %s\n",
906 map_path, strerror(errno));
907 ret = map_fd;
908 goto out_prog;
909 }
910
911 ret = bpf_map_selfcheck_pinned(map_fd, &test,
912 offsetof(struct bpf_elf_map, max_elem),
913 type);
914 if (ret < 0) {
915 fprintf(stderr, "Map \'%s\' self-check failed!\n", map_path);
916 goto out_map;
917 }
918
919 ret = bpf_map_update(map_fd, &map_key, &prog_fd, BPF_ANY);
920 if (ret < 0)
921 fprintf(stderr, "Map update failed: %s\n", strerror(errno));
922 out_map:
923 close(map_fd);
924 out_prog:
925 close(prog_fd);
926 return ret;
927 }
928
929 int bpf_prog_attach_fd(int prog_fd, int target_fd, enum bpf_attach_type type)
930 {
931 union bpf_attr attr = {};
932
933 attr.target_fd = target_fd;
934 attr.attach_bpf_fd = prog_fd;
935 attr.attach_type = type;
936
937 return bpf(BPF_PROG_ATTACH, &attr, sizeof(attr));
938 }
939
940 int bpf_prog_detach_fd(int target_fd, enum bpf_attach_type type)
941 {
942 union bpf_attr attr = {};
943
944 attr.target_fd = target_fd;
945 attr.attach_type = type;
946
947 return bpf(BPF_PROG_DETACH, &attr, sizeof(attr));
948 }
949
950 int bpf_prog_load(enum bpf_prog_type type, const struct bpf_insn *insns,
951 size_t size_insns, const char *license, char *log,
952 size_t size_log)
953 {
954 union bpf_attr attr = {};
955
956 attr.prog_type = type;
957 attr.insns = bpf_ptr_to_u64(insns);
958 attr.insn_cnt = size_insns / sizeof(struct bpf_insn);
959 attr.license = bpf_ptr_to_u64(license);
960
961 if (size_log > 0) {
962 attr.log_buf = bpf_ptr_to_u64(log);
963 attr.log_size = size_log;
964 attr.log_level = 1;
965 }
966
967 return bpf(BPF_PROG_LOAD, &attr, sizeof(attr));
968 }
969
970 #ifdef HAVE_ELF
971 struct bpf_elf_prog {
972 enum bpf_prog_type type;
973 const struct bpf_insn *insns;
974 size_t size;
975 const char *license;
976 };
977
978 struct bpf_hash_entry {
979 unsigned int pinning;
980 const char *subpath;
981 struct bpf_hash_entry *next;
982 };
983
984 struct bpf_elf_ctx {
985 Elf *elf_fd;
986 GElf_Ehdr elf_hdr;
987 Elf_Data *sym_tab;
988 Elf_Data *str_tab;
989 int obj_fd;
990 int map_fds[ELF_MAX_MAPS];
991 struct bpf_elf_map maps[ELF_MAX_MAPS];
992 int sym_num;
993 int map_num;
994 int map_len;
995 bool *sec_done;
996 int sec_maps;
997 char license[ELF_MAX_LICENSE_LEN];
998 enum bpf_prog_type type;
999 bool verbose;
1000 struct bpf_elf_st stat;
1001 struct bpf_hash_entry *ht[256];
1002 char *log;
1003 size_t log_size;
1004 };
1005
1006 struct bpf_elf_sec_data {
1007 GElf_Shdr sec_hdr;
1008 Elf_Data *sec_data;
1009 const char *sec_name;
1010 };
1011
1012 struct bpf_map_data {
1013 int *fds;
1014 const char *obj;
1015 struct bpf_elf_st *st;
1016 struct bpf_elf_map *ent;
1017 };
1018
1019 static __check_format_string(2, 3) void
1020 bpf_dump_error(struct bpf_elf_ctx *ctx, const char *format, ...)
1021 {
1022 va_list vl;
1023
1024 va_start(vl, format);
1025 vfprintf(stderr, format, vl);
1026 va_end(vl);
1027
1028 if (ctx->log && ctx->log[0]) {
1029 if (ctx->verbose) {
1030 fprintf(stderr, "%s\n", ctx->log);
1031 } else {
1032 unsigned int off = 0, len = strlen(ctx->log);
1033
1034 if (len > BPF_MAX_LOG) {
1035 off = len - BPF_MAX_LOG;
1036 fprintf(stderr, "Skipped %u bytes, use \'verb\' option for the full verbose log.\n[...]\n",
1037 off);
1038 }
1039 fprintf(stderr, "%s\n", ctx->log + off);
1040 }
1041
1042 memset(ctx->log, 0, ctx->log_size);
1043 }
1044 }
1045
1046 static int bpf_log_realloc(struct bpf_elf_ctx *ctx)
1047 {
1048 const size_t log_max = UINT_MAX >> 8;
1049 size_t log_size = ctx->log_size;
1050 void *ptr;
1051
1052 if (!ctx->log) {
1053 log_size = 65536;
1054 } else if (log_size < log_max) {
1055 log_size <<= 1;
1056 if (log_size > log_max)
1057 log_size = log_max;
1058 } else {
1059 return -EINVAL;
1060 }
1061
1062 ptr = realloc(ctx->log, log_size);
1063 if (!ptr)
1064 return -ENOMEM;
1065
1066 ctx->log = ptr;
1067 ctx->log_size = log_size;
1068
1069 return 0;
1070 }
1071
1072 static int bpf_map_create(enum bpf_map_type type, uint32_t size_key,
1073 uint32_t size_value, uint32_t max_elem,
1074 uint32_t flags, int inner_fd)
1075 {
1076 union bpf_attr attr = {};
1077
1078 attr.map_type = type;
1079 attr.key_size = size_key;
1080 attr.value_size = inner_fd ? sizeof(int) : size_value;
1081 attr.max_entries = max_elem;
1082 attr.map_flags = flags;
1083 attr.inner_map_fd = inner_fd;
1084
1085 return bpf(BPF_MAP_CREATE, &attr, sizeof(attr));
1086 }
1087
1088 static int bpf_obj_pin(int fd, const char *pathname)
1089 {
1090 union bpf_attr attr = {};
1091
1092 attr.pathname = bpf_ptr_to_u64(pathname);
1093 attr.bpf_fd = fd;
1094
1095 return bpf(BPF_OBJ_PIN, &attr, sizeof(attr));
1096 }
1097
1098 static int bpf_obj_hash(const char *object, uint8_t *out, size_t len)
1099 {
1100 struct sockaddr_alg alg = {
1101 .salg_family = AF_ALG,
1102 .salg_type = "hash",
1103 .salg_name = "sha1",
1104 };
1105 int ret, cfd, ofd, ffd;
1106 struct stat stbuff;
1107 ssize_t size;
1108
1109 if (!object || len != 20)
1110 return -EINVAL;
1111
1112 cfd = socket(AF_ALG, SOCK_SEQPACKET, 0);
1113 if (cfd < 0) {
1114 fprintf(stderr, "Cannot get AF_ALG socket: %s\n",
1115 strerror(errno));
1116 return cfd;
1117 }
1118
1119 ret = bind(cfd, (struct sockaddr *)&alg, sizeof(alg));
1120 if (ret < 0) {
1121 fprintf(stderr, "Error binding socket: %s\n", strerror(errno));
1122 goto out_cfd;
1123 }
1124
1125 ofd = accept(cfd, NULL, 0);
1126 if (ofd < 0) {
1127 fprintf(stderr, "Error accepting socket: %s\n",
1128 strerror(errno));
1129 ret = ofd;
1130 goto out_cfd;
1131 }
1132
1133 ffd = open(object, O_RDONLY);
1134 if (ffd < 0) {
1135 fprintf(stderr, "Error opening object %s: %s\n",
1136 object, strerror(errno));
1137 ret = ffd;
1138 goto out_ofd;
1139 }
1140
1141 ret = fstat(ffd, &stbuff);
1142 if (ret < 0) {
1143 fprintf(stderr, "Error doing fstat: %s\n",
1144 strerror(errno));
1145 goto out_ffd;
1146 }
1147
1148 size = sendfile(ofd, ffd, NULL, stbuff.st_size);
1149 if (size != stbuff.st_size) {
1150 fprintf(stderr, "Error from sendfile (%zd vs %zu bytes): %s\n",
1151 size, stbuff.st_size, strerror(errno));
1152 ret = -1;
1153 goto out_ffd;
1154 }
1155
1156 size = read(ofd, out, len);
1157 if (size != len) {
1158 fprintf(stderr, "Error from read (%zd vs %zu bytes): %s\n",
1159 size, len, strerror(errno));
1160 ret = -1;
1161 } else {
1162 ret = 0;
1163 }
1164 out_ffd:
1165 close(ffd);
1166 out_ofd:
1167 close(ofd);
1168 out_cfd:
1169 close(cfd);
1170 return ret;
1171 }
1172
1173 static const char *bpf_get_obj_uid(const char *pathname)
1174 {
1175 static bool bpf_uid_cached;
1176 static char bpf_uid[64];
1177 uint8_t tmp[20];
1178 int ret;
1179
1180 if (bpf_uid_cached)
1181 goto done;
1182
1183 ret = bpf_obj_hash(pathname, tmp, sizeof(tmp));
1184 if (ret) {
1185 fprintf(stderr, "Object hashing failed!\n");
1186 return NULL;
1187 }
1188
1189 hexstring_n2a(tmp, sizeof(tmp), bpf_uid, sizeof(bpf_uid));
1190 bpf_uid_cached = true;
1191 done:
1192 return bpf_uid;
1193 }
1194
1195 static int bpf_init_env(const char *pathname)
1196 {
1197 struct rlimit limit = {
1198 .rlim_cur = RLIM_INFINITY,
1199 .rlim_max = RLIM_INFINITY,
1200 };
1201
1202 /* Don't bother in case we fail! */
1203 setrlimit(RLIMIT_MEMLOCK, &limit);
1204
1205 if (!bpf_get_work_dir(BPF_PROG_TYPE_UNSPEC)) {
1206 fprintf(stderr, "Continuing without mounted eBPF fs. Too old kernel?\n");
1207 return 0;
1208 }
1209
1210 if (!bpf_get_obj_uid(pathname))
1211 return -1;
1212
1213 return 0;
1214 }
1215
1216 static const char *bpf_custom_pinning(const struct bpf_elf_ctx *ctx,
1217 uint32_t pinning)
1218 {
1219 struct bpf_hash_entry *entry;
1220
1221 entry = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
1222 while (entry && entry->pinning != pinning)
1223 entry = entry->next;
1224
1225 return entry ? entry->subpath : NULL;
1226 }
1227
1228 static bool bpf_no_pinning(const struct bpf_elf_ctx *ctx,
1229 uint32_t pinning)
1230 {
1231 switch (pinning) {
1232 case PIN_OBJECT_NS:
1233 case PIN_GLOBAL_NS:
1234 return false;
1235 case PIN_NONE:
1236 return true;
1237 default:
1238 return !bpf_custom_pinning(ctx, pinning);
1239 }
1240 }
1241
1242 static void bpf_make_pathname(char *pathname, size_t len, const char *name,
1243 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1244 {
1245 switch (pinning) {
1246 case PIN_OBJECT_NS:
1247 snprintf(pathname, len, "%s/%s/%s",
1248 bpf_get_work_dir(ctx->type),
1249 bpf_get_obj_uid(NULL), name);
1250 break;
1251 case PIN_GLOBAL_NS:
1252 snprintf(pathname, len, "%s/%s/%s",
1253 bpf_get_work_dir(ctx->type),
1254 BPF_DIR_GLOBALS, name);
1255 break;
1256 default:
1257 snprintf(pathname, len, "%s/../%s/%s",
1258 bpf_get_work_dir(ctx->type),
1259 bpf_custom_pinning(ctx, pinning), name);
1260 break;
1261 }
1262 }
1263
1264 static int bpf_probe_pinned(const char *name, const struct bpf_elf_ctx *ctx,
1265 uint32_t pinning)
1266 {
1267 char pathname[PATH_MAX];
1268
1269 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1270 return 0;
1271
1272 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1273 return bpf_obj_get(pathname, ctx->type);
1274 }
1275
1276 static int bpf_make_obj_path(const struct bpf_elf_ctx *ctx)
1277 {
1278 char tmp[PATH_MAX];
1279 int ret;
1280
1281 snprintf(tmp, sizeof(tmp), "%s/%s", bpf_get_work_dir(ctx->type),
1282 bpf_get_obj_uid(NULL));
1283
1284 ret = mkdir(tmp, S_IRWXU);
1285 if (ret && errno != EEXIST) {
1286 fprintf(stderr, "mkdir %s failed: %s\n", tmp, strerror(errno));
1287 return ret;
1288 }
1289
1290 return 0;
1291 }
1292
1293 static int bpf_make_custom_path(const struct bpf_elf_ctx *ctx,
1294 const char *todo)
1295 {
1296 char tmp[PATH_MAX], rem[PATH_MAX], *sub;
1297 int ret;
1298
1299 snprintf(tmp, sizeof(tmp), "%s/../", bpf_get_work_dir(ctx->type));
1300 snprintf(rem, sizeof(rem), "%s/", todo);
1301 sub = strtok(rem, "/");
1302
1303 while (sub) {
1304 if (strlen(tmp) + strlen(sub) + 2 > PATH_MAX)
1305 return -EINVAL;
1306
1307 strcat(tmp, sub);
1308 strcat(tmp, "/");
1309
1310 ret = mkdir(tmp, S_IRWXU);
1311 if (ret && errno != EEXIST) {
1312 fprintf(stderr, "mkdir %s failed: %s\n", tmp,
1313 strerror(errno));
1314 return ret;
1315 }
1316
1317 sub = strtok(NULL, "/");
1318 }
1319
1320 return 0;
1321 }
1322
1323 static int bpf_place_pinned(int fd, const char *name,
1324 const struct bpf_elf_ctx *ctx, uint32_t pinning)
1325 {
1326 char pathname[PATH_MAX];
1327 const char *tmp;
1328 int ret = 0;
1329
1330 if (bpf_no_pinning(ctx, pinning) || !bpf_get_work_dir(ctx->type))
1331 return 0;
1332
1333 if (pinning == PIN_OBJECT_NS)
1334 ret = bpf_make_obj_path(ctx);
1335 else if ((tmp = bpf_custom_pinning(ctx, pinning)))
1336 ret = bpf_make_custom_path(ctx, tmp);
1337 if (ret < 0)
1338 return ret;
1339
1340 bpf_make_pathname(pathname, sizeof(pathname), name, ctx, pinning);
1341 return bpf_obj_pin(fd, pathname);
1342 }
1343
1344 static void bpf_prog_report(int fd, const char *section,
1345 const struct bpf_elf_prog *prog,
1346 struct bpf_elf_ctx *ctx)
1347 {
1348 unsigned int insns = prog->size / sizeof(struct bpf_insn);
1349
1350 fprintf(stderr, "\nProg section \'%s\' %s%s (%d)!\n", section,
1351 fd < 0 ? "rejected: " : "loaded",
1352 fd < 0 ? strerror(errno) : "",
1353 fd < 0 ? errno : fd);
1354
1355 fprintf(stderr, " - Type: %u\n", prog->type);
1356 fprintf(stderr, " - Instructions: %u (%u over limit)\n",
1357 insns, insns > BPF_MAXINSNS ? insns - BPF_MAXINSNS : 0);
1358 fprintf(stderr, " - License: %s\n\n", prog->license);
1359
1360 bpf_dump_error(ctx, "Verifier analysis:\n\n");
1361 }
1362
1363 static int bpf_prog_attach(const char *section,
1364 const struct bpf_elf_prog *prog,
1365 struct bpf_elf_ctx *ctx)
1366 {
1367 int tries = 0, fd;
1368 retry:
1369 errno = 0;
1370 fd = bpf_prog_load(prog->type, prog->insns, prog->size,
1371 prog->license, ctx->log, ctx->log_size);
1372 if (fd < 0 || ctx->verbose) {
1373 /* The verifier log is pretty chatty, sometimes so chatty
1374 * on larger programs, that we could fail to dump everything
1375 * into our buffer. Still, try to give a debuggable error
1376 * log for the user, so enlarge it and re-fail.
1377 */
1378 if (fd < 0 && (errno == ENOSPC || !ctx->log_size)) {
1379 if (tries++ < 10 && !bpf_log_realloc(ctx))
1380 goto retry;
1381
1382 fprintf(stderr, "Log buffer too small to dump verifier log %zu bytes (%d tries)!\n",
1383 ctx->log_size, tries);
1384 return fd;
1385 }
1386
1387 bpf_prog_report(fd, section, prog, ctx);
1388 }
1389
1390 return fd;
1391 }
1392
1393 static void bpf_map_report(int fd, const char *name,
1394 const struct bpf_elf_map *map,
1395 struct bpf_elf_ctx *ctx, int inner_fd)
1396 {
1397 fprintf(stderr, "Map object \'%s\' %s%s (%d)!\n", name,
1398 fd < 0 ? "rejected: " : "loaded",
1399 fd < 0 ? strerror(errno) : "",
1400 fd < 0 ? errno : fd);
1401
1402 fprintf(stderr, " - Type: %u\n", map->type);
1403 fprintf(stderr, " - Identifier: %u\n", map->id);
1404 fprintf(stderr, " - Pinning: %u\n", map->pinning);
1405 fprintf(stderr, " - Size key: %u\n", map->size_key);
1406 fprintf(stderr, " - Size value: %u\n",
1407 inner_fd ? (int)sizeof(int) : map->size_value);
1408 fprintf(stderr, " - Max elems: %u\n", map->max_elem);
1409 fprintf(stderr, " - Flags: %#x\n\n", map->flags);
1410 }
1411
1412 static int bpf_find_map_id(const struct bpf_elf_ctx *ctx, uint32_t id)
1413 {
1414 int i;
1415
1416 for (i = 0; i < ctx->map_num; i++) {
1417 if (ctx->maps[i].id != id)
1418 continue;
1419 if (ctx->map_fds[i] < 0)
1420 return -EINVAL;
1421
1422 return ctx->map_fds[i];
1423 }
1424
1425 return -ENOENT;
1426 }
1427
1428 static int bpf_derive_elf_map_from_fdinfo(int fd, struct bpf_elf_map *map)
1429 {
1430 char file[PATH_MAX], buff[4096];
1431 unsigned int val;
1432 FILE *fp;
1433
1434 snprintf(file, sizeof(file), "/proc/%d/fdinfo/%d", getpid(), fd);
1435
1436 memset(map, 0, sizeof(*map));
1437
1438 fp = fopen(file, "r");
1439 if (!fp) {
1440 fprintf(stderr, "No procfs support?!\n");
1441 return -EIO;
1442 }
1443
1444 while (fgets(buff, sizeof(buff), fp)) {
1445 if (sscanf(buff, "map_type:\t%u", &val) == 1)
1446 map->type = val;
1447 else if (sscanf(buff, "key_size:\t%u", &val) == 1)
1448 map->size_key = val;
1449 else if (sscanf(buff, "value_size:\t%u", &val) == 1)
1450 map->size_value = val;
1451 else if (sscanf(buff, "max_entries:\t%u", &val) == 1)
1452 map->max_elem = val;
1453 else if (sscanf(buff, "map_flags:\t%i", &val) == 1)
1454 map->flags = val;
1455 }
1456
1457 fclose(fp);
1458 return 0;
1459 }
1460
1461 static void bpf_report_map_in_map(int outer_fd, int inner_fd, uint32_t idx)
1462 {
1463 struct bpf_elf_map outer_map;
1464 int ret;
1465
1466 fprintf(stderr, "Cannot insert map into map! ");
1467
1468 ret = bpf_derive_elf_map_from_fdinfo(outer_fd, &outer_map);
1469 if (!ret) {
1470 if (idx >= outer_map.max_elem &&
1471 outer_map.type == BPF_MAP_TYPE_ARRAY_OF_MAPS) {
1472 fprintf(stderr, "Outer map has %u elements, index %u is invalid!\n",
1473 outer_map.max_elem, idx);
1474 return;
1475 }
1476 }
1477
1478 fprintf(stderr, "Different map specs used for outer and inner map?\n");
1479 }
1480
1481 static bool bpf_is_map_in_map_type(const struct bpf_elf_map *map)
1482 {
1483 return map->type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
1484 map->type == BPF_MAP_TYPE_HASH_OF_MAPS;
1485 }
1486
1487 static int bpf_map_attach(const char *name, const struct bpf_elf_map *map,
1488 struct bpf_elf_ctx *ctx, int *have_map_in_map)
1489 {
1490 int fd, ret, map_inner_fd = 0;
1491
1492 fd = bpf_probe_pinned(name, ctx, map->pinning);
1493 if (fd > 0) {
1494 ret = bpf_map_selfcheck_pinned(fd, map,
1495 offsetof(struct bpf_elf_map,
1496 id), ctx->type);
1497 if (ret < 0) {
1498 close(fd);
1499 fprintf(stderr, "Map \'%s\' self-check failed!\n",
1500 name);
1501 return ret;
1502 }
1503 if (ctx->verbose)
1504 fprintf(stderr, "Map \'%s\' loaded as pinned!\n",
1505 name);
1506 return fd;
1507 }
1508
1509 if (have_map_in_map && bpf_is_map_in_map_type(map)) {
1510 (*have_map_in_map)++;
1511 if (map->inner_id)
1512 return 0;
1513 fprintf(stderr, "Map \'%s\' cannot be created since no inner map ID defined!\n",
1514 name);
1515 return -EINVAL;
1516 }
1517
1518 if (!have_map_in_map && bpf_is_map_in_map_type(map)) {
1519 map_inner_fd = bpf_find_map_id(ctx, map->inner_id);
1520 if (map_inner_fd < 0) {
1521 fprintf(stderr, "Map \'%s\' cannot be loaded. Inner map with ID %u not found!\n",
1522 name, map->inner_id);
1523 return -EINVAL;
1524 }
1525 }
1526
1527 errno = 0;
1528 fd = bpf_map_create(map->type, map->size_key, map->size_value,
1529 map->max_elem, map->flags, map_inner_fd);
1530 if (fd < 0 || ctx->verbose) {
1531 bpf_map_report(fd, name, map, ctx, map_inner_fd);
1532 if (fd < 0)
1533 return fd;
1534 }
1535
1536 ret = bpf_place_pinned(fd, name, ctx, map->pinning);
1537 if (ret < 0 && errno != EEXIST) {
1538 fprintf(stderr, "Could not pin %s map: %s\n", name,
1539 strerror(errno));
1540 close(fd);
1541 return ret;
1542 }
1543
1544 return fd;
1545 }
1546
1547 static const char *bpf_str_tab_name(const struct bpf_elf_ctx *ctx,
1548 const GElf_Sym *sym)
1549 {
1550 return ctx->str_tab->d_buf + sym->st_name;
1551 }
1552
1553 static const char *bpf_map_fetch_name(struct bpf_elf_ctx *ctx, int which)
1554 {
1555 GElf_Sym sym;
1556 int i;
1557
1558 for (i = 0; i < ctx->sym_num; i++) {
1559 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1560 continue;
1561
1562 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1563 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1564 sym.st_shndx != ctx->sec_maps ||
1565 sym.st_value / ctx->map_len != which)
1566 continue;
1567
1568 return bpf_str_tab_name(ctx, &sym);
1569 }
1570
1571 return NULL;
1572 }
1573
1574 static int bpf_maps_attach_all(struct bpf_elf_ctx *ctx)
1575 {
1576 int i, j, ret, fd, inner_fd, inner_idx, have_map_in_map = 0;
1577 const char *map_name;
1578
1579 for (i = 0; i < ctx->map_num; i++) {
1580 map_name = bpf_map_fetch_name(ctx, i);
1581 if (!map_name)
1582 return -EIO;
1583
1584 fd = bpf_map_attach(map_name, &ctx->maps[i], ctx,
1585 &have_map_in_map);
1586 if (fd < 0)
1587 return fd;
1588
1589 ctx->map_fds[i] = !fd ? -1 : fd;
1590 }
1591
1592 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1593 if (ctx->map_fds[i] >= 0)
1594 continue;
1595
1596 map_name = bpf_map_fetch_name(ctx, i);
1597 if (!map_name)
1598 return -EIO;
1599
1600 fd = bpf_map_attach(map_name, &ctx->maps[i], ctx,
1601 NULL);
1602 if (fd < 0)
1603 return fd;
1604
1605 ctx->map_fds[i] = fd;
1606 }
1607
1608 for (i = 0; have_map_in_map && i < ctx->map_num; i++) {
1609 if (!ctx->maps[i].id ||
1610 ctx->maps[i].inner_id ||
1611 ctx->maps[i].inner_idx == -1)
1612 continue;
1613
1614 inner_fd = ctx->map_fds[i];
1615 inner_idx = ctx->maps[i].inner_idx;
1616
1617 for (j = 0; j < ctx->map_num; j++) {
1618 if (!bpf_is_map_in_map_type(&ctx->maps[j]))
1619 continue;
1620 if (ctx->maps[j].inner_id != ctx->maps[i].id)
1621 continue;
1622
1623 ret = bpf_map_update(ctx->map_fds[j], &inner_idx,
1624 &inner_fd, BPF_ANY);
1625 if (ret < 0) {
1626 bpf_report_map_in_map(ctx->map_fds[j],
1627 inner_fd, inner_idx);
1628 return ret;
1629 }
1630 }
1631 }
1632
1633 return 0;
1634 }
1635
1636 static int bpf_map_num_sym(struct bpf_elf_ctx *ctx)
1637 {
1638 int i, num = 0;
1639 GElf_Sym sym;
1640
1641 for (i = 0; i < ctx->sym_num; i++) {
1642 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1643 continue;
1644
1645 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1646 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1647 sym.st_shndx != ctx->sec_maps)
1648 continue;
1649 num++;
1650 }
1651
1652 return num;
1653 }
1654
1655 static int bpf_fill_section_data(struct bpf_elf_ctx *ctx, int section,
1656 struct bpf_elf_sec_data *data)
1657 {
1658 Elf_Data *sec_edata;
1659 GElf_Shdr sec_hdr;
1660 Elf_Scn *sec_fd;
1661 char *sec_name;
1662
1663 memset(data, 0, sizeof(*data));
1664
1665 sec_fd = elf_getscn(ctx->elf_fd, section);
1666 if (!sec_fd)
1667 return -EINVAL;
1668 if (gelf_getshdr(sec_fd, &sec_hdr) != &sec_hdr)
1669 return -EIO;
1670
1671 sec_name = elf_strptr(ctx->elf_fd, ctx->elf_hdr.e_shstrndx,
1672 sec_hdr.sh_name);
1673 if (!sec_name || !sec_hdr.sh_size)
1674 return -ENOENT;
1675
1676 sec_edata = elf_getdata(sec_fd, NULL);
1677 if (!sec_edata || elf_getdata(sec_fd, sec_edata))
1678 return -EIO;
1679
1680 memcpy(&data->sec_hdr, &sec_hdr, sizeof(sec_hdr));
1681
1682 data->sec_name = sec_name;
1683 data->sec_data = sec_edata;
1684 return 0;
1685 }
1686
1687 struct bpf_elf_map_min {
1688 __u32 type;
1689 __u32 size_key;
1690 __u32 size_value;
1691 __u32 max_elem;
1692 };
1693
1694 static int bpf_fetch_maps_begin(struct bpf_elf_ctx *ctx, int section,
1695 struct bpf_elf_sec_data *data)
1696 {
1697 ctx->map_num = data->sec_data->d_size;
1698 ctx->sec_maps = section;
1699 ctx->sec_done[section] = true;
1700
1701 if (ctx->map_num > sizeof(ctx->maps)) {
1702 fprintf(stderr, "Too many BPF maps in ELF section!\n");
1703 return -ENOMEM;
1704 }
1705
1706 memcpy(ctx->maps, data->sec_data->d_buf, ctx->map_num);
1707 return 0;
1708 }
1709
1710 static int bpf_map_verify_all_offs(struct bpf_elf_ctx *ctx, int end)
1711 {
1712 GElf_Sym sym;
1713 int off, i;
1714
1715 for (off = 0; off < end; off += ctx->map_len) {
1716 /* Order doesn't need to be linear here, hence we walk
1717 * the table again.
1718 */
1719 for (i = 0; i < ctx->sym_num; i++) {
1720 if (gelf_getsym(ctx->sym_tab, i, &sym) != &sym)
1721 continue;
1722 if (GELF_ST_BIND(sym.st_info) != STB_GLOBAL ||
1723 GELF_ST_TYPE(sym.st_info) != STT_NOTYPE ||
1724 sym.st_shndx != ctx->sec_maps)
1725 continue;
1726 if (sym.st_value == off)
1727 break;
1728 if (i == ctx->sym_num - 1)
1729 return -1;
1730 }
1731 }
1732
1733 return off == end ? 0 : -1;
1734 }
1735
1736 static int bpf_fetch_maps_end(struct bpf_elf_ctx *ctx)
1737 {
1738 struct bpf_elf_map fixup[ARRAY_SIZE(ctx->maps)] = {};
1739 int i, sym_num = bpf_map_num_sym(ctx);
1740 __u8 *buff;
1741
1742 if (sym_num == 0 || sym_num > ARRAY_SIZE(ctx->maps)) {
1743 fprintf(stderr, "%u maps not supported in current map section!\n",
1744 sym_num);
1745 return -EINVAL;
1746 }
1747
1748 if (ctx->map_num % sym_num != 0 ||
1749 ctx->map_num % sizeof(__u32) != 0) {
1750 fprintf(stderr, "Number BPF map symbols are not multiple of struct bpf_elf_map!\n");
1751 return -EINVAL;
1752 }
1753
1754 ctx->map_len = ctx->map_num / sym_num;
1755 if (bpf_map_verify_all_offs(ctx, ctx->map_num)) {
1756 fprintf(stderr, "Different struct bpf_elf_map in use!\n");
1757 return -EINVAL;
1758 }
1759
1760 if (ctx->map_len == sizeof(struct bpf_elf_map)) {
1761 ctx->map_num = sym_num;
1762 return 0;
1763 } else if (ctx->map_len > sizeof(struct bpf_elf_map)) {
1764 fprintf(stderr, "struct bpf_elf_map not supported, coming from future version?\n");
1765 return -EINVAL;
1766 } else if (ctx->map_len < sizeof(struct bpf_elf_map_min)) {
1767 fprintf(stderr, "struct bpf_elf_map too small, not supported!\n");
1768 return -EINVAL;
1769 }
1770
1771 ctx->map_num = sym_num;
1772 for (i = 0, buff = (void *)ctx->maps; i < ctx->map_num;
1773 i++, buff += ctx->map_len) {
1774 /* The fixup leaves the rest of the members as zero, which
1775 * is fine currently, but option exist to set some other
1776 * default value as well when needed in future.
1777 */
1778 memcpy(&fixup[i], buff, ctx->map_len);
1779 }
1780
1781 memcpy(ctx->maps, fixup, sizeof(fixup));
1782
1783 printf("Note: %zu bytes struct bpf_elf_map fixup performed due to size mismatch!\n",
1784 sizeof(struct bpf_elf_map) - ctx->map_len);
1785 return 0;
1786 }
1787
1788 static int bpf_fetch_license(struct bpf_elf_ctx *ctx, int section,
1789 struct bpf_elf_sec_data *data)
1790 {
1791 if (data->sec_data->d_size > sizeof(ctx->license))
1792 return -ENOMEM;
1793
1794 memcpy(ctx->license, data->sec_data->d_buf, data->sec_data->d_size);
1795 ctx->sec_done[section] = true;
1796 return 0;
1797 }
1798
1799 static int bpf_fetch_symtab(struct bpf_elf_ctx *ctx, int section,
1800 struct bpf_elf_sec_data *data)
1801 {
1802 ctx->sym_tab = data->sec_data;
1803 ctx->sym_num = data->sec_hdr.sh_size / data->sec_hdr.sh_entsize;
1804 ctx->sec_done[section] = true;
1805 return 0;
1806 }
1807
1808 static int bpf_fetch_strtab(struct bpf_elf_ctx *ctx, int section,
1809 struct bpf_elf_sec_data *data)
1810 {
1811 ctx->str_tab = data->sec_data;
1812 ctx->sec_done[section] = true;
1813 return 0;
1814 }
1815
1816 static bool bpf_has_map_data(const struct bpf_elf_ctx *ctx)
1817 {
1818 return ctx->sym_tab && ctx->str_tab && ctx->sec_maps;
1819 }
1820
1821 static int bpf_fetch_ancillary(struct bpf_elf_ctx *ctx)
1822 {
1823 struct bpf_elf_sec_data data;
1824 int i, ret = -1;
1825
1826 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1827 ret = bpf_fill_section_data(ctx, i, &data);
1828 if (ret < 0)
1829 continue;
1830
1831 if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1832 !strcmp(data.sec_name, ELF_SECTION_MAPS))
1833 ret = bpf_fetch_maps_begin(ctx, i, &data);
1834 else if (data.sec_hdr.sh_type == SHT_PROGBITS &&
1835 !strcmp(data.sec_name, ELF_SECTION_LICENSE))
1836 ret = bpf_fetch_license(ctx, i, &data);
1837 else if (data.sec_hdr.sh_type == SHT_SYMTAB &&
1838 !strcmp(data.sec_name, ".symtab"))
1839 ret = bpf_fetch_symtab(ctx, i, &data);
1840 else if (data.sec_hdr.sh_type == SHT_STRTAB &&
1841 !strcmp(data.sec_name, ".strtab"))
1842 ret = bpf_fetch_strtab(ctx, i, &data);
1843 if (ret < 0) {
1844 fprintf(stderr, "Error parsing section %d! Perhaps check with readelf -a?\n",
1845 i);
1846 return ret;
1847 }
1848 }
1849
1850 if (bpf_has_map_data(ctx)) {
1851 ret = bpf_fetch_maps_end(ctx);
1852 if (ret < 0) {
1853 fprintf(stderr, "Error fixing up map structure, incompatible struct bpf_elf_map used?\n");
1854 return ret;
1855 }
1856
1857 ret = bpf_maps_attach_all(ctx);
1858 if (ret < 0) {
1859 fprintf(stderr, "Error loading maps into kernel!\n");
1860 return ret;
1861 }
1862 }
1863
1864 return ret;
1865 }
1866
1867 static int bpf_fetch_prog(struct bpf_elf_ctx *ctx, const char *section,
1868 bool *sseen)
1869 {
1870 struct bpf_elf_sec_data data;
1871 struct bpf_elf_prog prog;
1872 int ret, i, fd = -1;
1873
1874 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1875 if (ctx->sec_done[i])
1876 continue;
1877
1878 ret = bpf_fill_section_data(ctx, i, &data);
1879 if (ret < 0 ||
1880 !(data.sec_hdr.sh_type == SHT_PROGBITS &&
1881 data.sec_hdr.sh_flags & SHF_EXECINSTR &&
1882 !strcmp(data.sec_name, section)))
1883 continue;
1884
1885 *sseen = true;
1886
1887 memset(&prog, 0, sizeof(prog));
1888 prog.type = ctx->type;
1889 prog.insns = data.sec_data->d_buf;
1890 prog.size = data.sec_data->d_size;
1891 prog.license = ctx->license;
1892
1893 fd = bpf_prog_attach(section, &prog, ctx);
1894 if (fd < 0)
1895 return fd;
1896
1897 ctx->sec_done[i] = true;
1898 break;
1899 }
1900
1901 return fd;
1902 }
1903
1904 static int bpf_apply_relo_data(struct bpf_elf_ctx *ctx,
1905 struct bpf_elf_sec_data *data_relo,
1906 struct bpf_elf_sec_data *data_insn)
1907 {
1908 Elf_Data *idata = data_insn->sec_data;
1909 GElf_Shdr *rhdr = &data_relo->sec_hdr;
1910 int relo_ent, relo_num = rhdr->sh_size / rhdr->sh_entsize;
1911 struct bpf_insn *insns = idata->d_buf;
1912 unsigned int num_insns = idata->d_size / sizeof(*insns);
1913
1914 for (relo_ent = 0; relo_ent < relo_num; relo_ent++) {
1915 unsigned int ioff, rmap;
1916 GElf_Rel relo;
1917 GElf_Sym sym;
1918
1919 if (gelf_getrel(data_relo->sec_data, relo_ent, &relo) != &relo)
1920 return -EIO;
1921
1922 ioff = relo.r_offset / sizeof(struct bpf_insn);
1923 if (ioff >= num_insns ||
1924 insns[ioff].code != (BPF_LD | BPF_IMM | BPF_DW)) {
1925 fprintf(stderr, "ELF contains relo data for non ld64 instruction at offset %u! Compiler bug?!\n",
1926 ioff);
1927 if (ioff < num_insns &&
1928 insns[ioff].code == (BPF_JMP | BPF_CALL))
1929 fprintf(stderr, " - Try to annotate functions with always_inline attribute!\n");
1930 return -EINVAL;
1931 }
1932
1933 if (gelf_getsym(ctx->sym_tab, GELF_R_SYM(relo.r_info), &sym) != &sym)
1934 return -EIO;
1935 if (sym.st_shndx != ctx->sec_maps) {
1936 fprintf(stderr, "ELF contains non-map related relo data in entry %u pointing to section %u! Compiler bug?!\n",
1937 relo_ent, sym.st_shndx);
1938 return -EIO;
1939 }
1940
1941 rmap = sym.st_value / ctx->map_len;
1942 if (rmap >= ARRAY_SIZE(ctx->map_fds))
1943 return -EINVAL;
1944 if (!ctx->map_fds[rmap])
1945 return -EINVAL;
1946
1947 if (ctx->verbose)
1948 fprintf(stderr, "Map \'%s\' (%d) injected into prog section \'%s\' at offset %u!\n",
1949 bpf_str_tab_name(ctx, &sym), ctx->map_fds[rmap],
1950 data_insn->sec_name, ioff);
1951
1952 insns[ioff].src_reg = BPF_PSEUDO_MAP_FD;
1953 insns[ioff].imm = ctx->map_fds[rmap];
1954 }
1955
1956 return 0;
1957 }
1958
1959 static int bpf_fetch_prog_relo(struct bpf_elf_ctx *ctx, const char *section,
1960 bool *lderr, bool *sseen)
1961 {
1962 struct bpf_elf_sec_data data_relo, data_insn;
1963 struct bpf_elf_prog prog;
1964 int ret, idx, i, fd = -1;
1965
1966 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
1967 ret = bpf_fill_section_data(ctx, i, &data_relo);
1968 if (ret < 0 || data_relo.sec_hdr.sh_type != SHT_REL)
1969 continue;
1970
1971 idx = data_relo.sec_hdr.sh_info;
1972
1973 ret = bpf_fill_section_data(ctx, idx, &data_insn);
1974 if (ret < 0 ||
1975 !(data_insn.sec_hdr.sh_type == SHT_PROGBITS &&
1976 data_insn.sec_hdr.sh_flags & SHF_EXECINSTR &&
1977 !strcmp(data_insn.sec_name, section)))
1978 continue;
1979
1980 *sseen = true;
1981
1982 ret = bpf_apply_relo_data(ctx, &data_relo, &data_insn);
1983 if (ret < 0) {
1984 *lderr = true;
1985 return ret;
1986 }
1987
1988 memset(&prog, 0, sizeof(prog));
1989 prog.type = ctx->type;
1990 prog.insns = data_insn.sec_data->d_buf;
1991 prog.size = data_insn.sec_data->d_size;
1992 prog.license = ctx->license;
1993
1994 fd = bpf_prog_attach(section, &prog, ctx);
1995 if (fd < 0) {
1996 *lderr = true;
1997 return fd;
1998 }
1999
2000 ctx->sec_done[i] = true;
2001 ctx->sec_done[idx] = true;
2002 break;
2003 }
2004
2005 return fd;
2006 }
2007
2008 static int bpf_fetch_prog_sec(struct bpf_elf_ctx *ctx, const char *section)
2009 {
2010 bool lderr = false, sseen = false;
2011 int ret = -1;
2012
2013 if (bpf_has_map_data(ctx))
2014 ret = bpf_fetch_prog_relo(ctx, section, &lderr, &sseen);
2015 if (ret < 0 && !lderr)
2016 ret = bpf_fetch_prog(ctx, section, &sseen);
2017 if (ret < 0 && !sseen)
2018 fprintf(stderr, "Program section \'%s\' not found in ELF file!\n",
2019 section);
2020 return ret;
2021 }
2022
2023 static int bpf_find_map_by_id(struct bpf_elf_ctx *ctx, uint32_t id)
2024 {
2025 int i;
2026
2027 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++)
2028 if (ctx->map_fds[i] && ctx->maps[i].id == id &&
2029 ctx->maps[i].type == BPF_MAP_TYPE_PROG_ARRAY)
2030 return i;
2031 return -1;
2032 }
2033
2034 static int bpf_fill_prog_arrays(struct bpf_elf_ctx *ctx)
2035 {
2036 struct bpf_elf_sec_data data;
2037 uint32_t map_id, key_id;
2038 int fd, i, ret, idx;
2039
2040 for (i = 1; i < ctx->elf_hdr.e_shnum; i++) {
2041 if (ctx->sec_done[i])
2042 continue;
2043
2044 ret = bpf_fill_section_data(ctx, i, &data);
2045 if (ret < 0)
2046 continue;
2047
2048 ret = sscanf(data.sec_name, "%i/%i", &map_id, &key_id);
2049 if (ret != 2)
2050 continue;
2051
2052 idx = bpf_find_map_by_id(ctx, map_id);
2053 if (idx < 0)
2054 continue;
2055
2056 fd = bpf_fetch_prog_sec(ctx, data.sec_name);
2057 if (fd < 0)
2058 return -EIO;
2059
2060 ret = bpf_map_update(ctx->map_fds[idx], &key_id,
2061 &fd, BPF_ANY);
2062 if (ret < 0) {
2063 if (errno == E2BIG)
2064 fprintf(stderr, "Tail call key %u for map %u out of bounds?\n",
2065 key_id, map_id);
2066 return -errno;
2067 }
2068
2069 ctx->sec_done[i] = true;
2070 }
2071
2072 return 0;
2073 }
2074
2075 static void bpf_save_finfo(struct bpf_elf_ctx *ctx)
2076 {
2077 struct stat st;
2078 int ret;
2079
2080 memset(&ctx->stat, 0, sizeof(ctx->stat));
2081
2082 ret = fstat(ctx->obj_fd, &st);
2083 if (ret < 0) {
2084 fprintf(stderr, "Stat of elf file failed: %s\n",
2085 strerror(errno));
2086 return;
2087 }
2088
2089 ctx->stat.st_dev = st.st_dev;
2090 ctx->stat.st_ino = st.st_ino;
2091 }
2092
2093 static int bpf_read_pin_mapping(FILE *fp, uint32_t *id, char *path)
2094 {
2095 char buff[PATH_MAX];
2096
2097 while (fgets(buff, sizeof(buff), fp)) {
2098 char *ptr = buff;
2099
2100 while (*ptr == ' ' || *ptr == '\t')
2101 ptr++;
2102
2103 if (*ptr == '#' || *ptr == '\n' || *ptr == 0)
2104 continue;
2105
2106 if (sscanf(ptr, "%i %s\n", id, path) != 2 &&
2107 sscanf(ptr, "%i %s #", id, path) != 2) {
2108 strcpy(path, ptr);
2109 return -1;
2110 }
2111
2112 return 1;
2113 }
2114
2115 return 0;
2116 }
2117
2118 static bool bpf_pinning_reserved(uint32_t pinning)
2119 {
2120 switch (pinning) {
2121 case PIN_NONE:
2122 case PIN_OBJECT_NS:
2123 case PIN_GLOBAL_NS:
2124 return true;
2125 default:
2126 return false;
2127 }
2128 }
2129
2130 static void bpf_hash_init(struct bpf_elf_ctx *ctx, const char *db_file)
2131 {
2132 struct bpf_hash_entry *entry;
2133 char subpath[PATH_MAX] = {};
2134 uint32_t pinning;
2135 FILE *fp;
2136 int ret;
2137
2138 fp = fopen(db_file, "r");
2139 if (!fp)
2140 return;
2141
2142 while ((ret = bpf_read_pin_mapping(fp, &pinning, subpath))) {
2143 if (ret == -1) {
2144 fprintf(stderr, "Database %s is corrupted at: %s\n",
2145 db_file, subpath);
2146 fclose(fp);
2147 return;
2148 }
2149
2150 if (bpf_pinning_reserved(pinning)) {
2151 fprintf(stderr, "Database %s, id %u is reserved - ignoring!\n",
2152 db_file, pinning);
2153 continue;
2154 }
2155
2156 entry = malloc(sizeof(*entry));
2157 if (!entry) {
2158 fprintf(stderr, "No memory left for db entry!\n");
2159 continue;
2160 }
2161
2162 entry->pinning = pinning;
2163 entry->subpath = strdup(subpath);
2164 if (!entry->subpath) {
2165 fprintf(stderr, "No memory left for db entry!\n");
2166 free(entry);
2167 continue;
2168 }
2169
2170 entry->next = ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)];
2171 ctx->ht[pinning & (ARRAY_SIZE(ctx->ht) - 1)] = entry;
2172 }
2173
2174 fclose(fp);
2175 }
2176
2177 static void bpf_hash_destroy(struct bpf_elf_ctx *ctx)
2178 {
2179 struct bpf_hash_entry *entry;
2180 int i;
2181
2182 for (i = 0; i < ARRAY_SIZE(ctx->ht); i++) {
2183 while ((entry = ctx->ht[i]) != NULL) {
2184 ctx->ht[i] = entry->next;
2185 free((char *)entry->subpath);
2186 free(entry);
2187 }
2188 }
2189 }
2190
2191 static int bpf_elf_check_ehdr(const struct bpf_elf_ctx *ctx)
2192 {
2193 if (ctx->elf_hdr.e_type != ET_REL ||
2194 (ctx->elf_hdr.e_machine != EM_NONE &&
2195 ctx->elf_hdr.e_machine != EM_BPF) ||
2196 ctx->elf_hdr.e_version != EV_CURRENT) {
2197 fprintf(stderr, "ELF format error, ELF file not for eBPF?\n");
2198 return -EINVAL;
2199 }
2200
2201 switch (ctx->elf_hdr.e_ident[EI_DATA]) {
2202 default:
2203 fprintf(stderr, "ELF format error, wrong endianness info?\n");
2204 return -EINVAL;
2205 case ELFDATA2LSB:
2206 if (htons(1) == 1) {
2207 fprintf(stderr,
2208 "We are big endian, eBPF object is little endian!\n");
2209 return -EIO;
2210 }
2211 break;
2212 case ELFDATA2MSB:
2213 if (htons(1) != 1) {
2214 fprintf(stderr,
2215 "We are little endian, eBPF object is big endian!\n");
2216 return -EIO;
2217 }
2218 break;
2219 }
2220
2221 return 0;
2222 }
2223
2224 static int bpf_elf_ctx_init(struct bpf_elf_ctx *ctx, const char *pathname,
2225 enum bpf_prog_type type, bool verbose)
2226 {
2227 int ret = -EINVAL;
2228
2229 if (elf_version(EV_CURRENT) == EV_NONE ||
2230 bpf_init_env(pathname))
2231 return ret;
2232
2233 memset(ctx, 0, sizeof(*ctx));
2234 ctx->verbose = verbose;
2235 ctx->type = type;
2236
2237 ctx->obj_fd = open(pathname, O_RDONLY);
2238 if (ctx->obj_fd < 0)
2239 return ctx->obj_fd;
2240
2241 ctx->elf_fd = elf_begin(ctx->obj_fd, ELF_C_READ, NULL);
2242 if (!ctx->elf_fd) {
2243 ret = -EINVAL;
2244 goto out_fd;
2245 }
2246
2247 if (elf_kind(ctx->elf_fd) != ELF_K_ELF) {
2248 ret = -EINVAL;
2249 goto out_fd;
2250 }
2251
2252 if (gelf_getehdr(ctx->elf_fd, &ctx->elf_hdr) !=
2253 &ctx->elf_hdr) {
2254 ret = -EIO;
2255 goto out_elf;
2256 }
2257
2258 ret = bpf_elf_check_ehdr(ctx);
2259 if (ret < 0)
2260 goto out_elf;
2261
2262 ctx->sec_done = calloc(ctx->elf_hdr.e_shnum,
2263 sizeof(*(ctx->sec_done)));
2264 if (!ctx->sec_done) {
2265 ret = -ENOMEM;
2266 goto out_elf;
2267 }
2268
2269 if (ctx->verbose && bpf_log_realloc(ctx)) {
2270 ret = -ENOMEM;
2271 goto out_free;
2272 }
2273
2274 bpf_save_finfo(ctx);
2275 bpf_hash_init(ctx, CONFDIR "/bpf_pinning");
2276
2277 return 0;
2278 out_free:
2279 free(ctx->sec_done);
2280 out_elf:
2281 elf_end(ctx->elf_fd);
2282 out_fd:
2283 close(ctx->obj_fd);
2284 return ret;
2285 }
2286
2287 static int bpf_maps_count(struct bpf_elf_ctx *ctx)
2288 {
2289 int i, count = 0;
2290
2291 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2292 if (!ctx->map_fds[i])
2293 break;
2294 count++;
2295 }
2296
2297 return count;
2298 }
2299
2300 static void bpf_maps_teardown(struct bpf_elf_ctx *ctx)
2301 {
2302 int i;
2303
2304 for (i = 0; i < ARRAY_SIZE(ctx->map_fds); i++) {
2305 if (ctx->map_fds[i])
2306 close(ctx->map_fds[i]);
2307 }
2308 }
2309
2310 static void bpf_elf_ctx_destroy(struct bpf_elf_ctx *ctx, bool failure)
2311 {
2312 if (failure)
2313 bpf_maps_teardown(ctx);
2314
2315 bpf_hash_destroy(ctx);
2316
2317 free(ctx->sec_done);
2318 free(ctx->log);
2319
2320 elf_end(ctx->elf_fd);
2321 close(ctx->obj_fd);
2322 }
2323
2324 static struct bpf_elf_ctx __ctx;
2325
2326 static int bpf_obj_open(const char *pathname, enum bpf_prog_type type,
2327 const char *section, bool verbose)
2328 {
2329 struct bpf_elf_ctx *ctx = &__ctx;
2330 int fd = 0, ret;
2331
2332 ret = bpf_elf_ctx_init(ctx, pathname, type, verbose);
2333 if (ret < 0) {
2334 fprintf(stderr, "Cannot initialize ELF context!\n");
2335 return ret;
2336 }
2337
2338 ret = bpf_fetch_ancillary(ctx);
2339 if (ret < 0) {
2340 fprintf(stderr, "Error fetching ELF ancillary data!\n");
2341 goto out;
2342 }
2343
2344 fd = bpf_fetch_prog_sec(ctx, section);
2345 if (fd < 0) {
2346 fprintf(stderr, "Error fetching program/map!\n");
2347 ret = fd;
2348 goto out;
2349 }
2350
2351 ret = bpf_fill_prog_arrays(ctx);
2352 if (ret < 0)
2353 fprintf(stderr, "Error filling program arrays!\n");
2354 out:
2355 bpf_elf_ctx_destroy(ctx, ret < 0);
2356 if (ret < 0) {
2357 if (fd)
2358 close(fd);
2359 return ret;
2360 }
2361
2362 return fd;
2363 }
2364
2365 static int
2366 bpf_map_set_send(int fd, struct sockaddr_un *addr, unsigned int addr_len,
2367 const struct bpf_map_data *aux, unsigned int entries)
2368 {
2369 struct bpf_map_set_msg msg = {
2370 .aux.uds_ver = BPF_SCM_AUX_VER,
2371 .aux.num_ent = entries,
2372 };
2373 int *cmsg_buf, min_fd;
2374 char *amsg_buf;
2375 int i;
2376
2377 strncpy(msg.aux.obj_name, aux->obj, sizeof(msg.aux.obj_name));
2378 memcpy(&msg.aux.obj_st, aux->st, sizeof(msg.aux.obj_st));
2379
2380 cmsg_buf = bpf_map_set_init(&msg, addr, addr_len);
2381 amsg_buf = (char *)msg.aux.ent;
2382
2383 for (i = 0; i < entries; i += min_fd) {
2384 int ret;
2385
2386 min_fd = min(BPF_SCM_MAX_FDS * 1U, entries - i);
2387 bpf_map_set_init_single(&msg, min_fd);
2388
2389 memcpy(cmsg_buf, &aux->fds[i], sizeof(aux->fds[0]) * min_fd);
2390 memcpy(amsg_buf, &aux->ent[i], sizeof(aux->ent[0]) * min_fd);
2391
2392 ret = sendmsg(fd, &msg.hdr, 0);
2393 if (ret <= 0)
2394 return ret ? : -1;
2395 }
2396
2397 return 0;
2398 }
2399
2400 static int
2401 bpf_map_set_recv(int fd, int *fds, struct bpf_map_aux *aux,
2402 unsigned int entries)
2403 {
2404 struct bpf_map_set_msg msg;
2405 int *cmsg_buf, min_fd;
2406 char *amsg_buf, *mmsg_buf;
2407 unsigned int needed = 1;
2408 int i;
2409
2410 cmsg_buf = bpf_map_set_init(&msg, NULL, 0);
2411 amsg_buf = (char *)msg.aux.ent;
2412 mmsg_buf = (char *)&msg.aux;
2413
2414 for (i = 0; i < min(entries, needed); i += min_fd) {
2415 struct cmsghdr *cmsg;
2416 int ret;
2417
2418 min_fd = min(entries, entries - i);
2419 bpf_map_set_init_single(&msg, min_fd);
2420
2421 ret = recvmsg(fd, &msg.hdr, 0);
2422 if (ret <= 0)
2423 return ret ? : -1;
2424
2425 cmsg = CMSG_FIRSTHDR(&msg.hdr);
2426 if (!cmsg || cmsg->cmsg_type != SCM_RIGHTS)
2427 return -EINVAL;
2428 if (msg.hdr.msg_flags & MSG_CTRUNC)
2429 return -EIO;
2430 if (msg.aux.uds_ver != BPF_SCM_AUX_VER)
2431 return -ENOSYS;
2432
2433 min_fd = (cmsg->cmsg_len - sizeof(*cmsg)) / sizeof(fd);
2434 if (min_fd > entries || min_fd <= 0)
2435 return -EINVAL;
2436
2437 memcpy(&fds[i], cmsg_buf, sizeof(fds[0]) * min_fd);
2438 memcpy(&aux->ent[i], amsg_buf, sizeof(aux->ent[0]) * min_fd);
2439 memcpy(aux, mmsg_buf, offsetof(struct bpf_map_aux, ent));
2440
2441 needed = aux->num_ent;
2442 }
2443
2444 return 0;
2445 }
2446
2447 int bpf_send_map_fds(const char *path, const char *obj)
2448 {
2449 struct bpf_elf_ctx *ctx = &__ctx;
2450 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2451 struct bpf_map_data bpf_aux = {
2452 .fds = ctx->map_fds,
2453 .ent = ctx->maps,
2454 .st = &ctx->stat,
2455 .obj = obj,
2456 };
2457 int fd, ret;
2458
2459 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2460 if (fd < 0) {
2461 fprintf(stderr, "Cannot open socket: %s\n",
2462 strerror(errno));
2463 return -1;
2464 }
2465
2466 strncpy(addr.sun_path, path, sizeof(addr.sun_path));
2467
2468 ret = connect(fd, (struct sockaddr *)&addr, sizeof(addr));
2469 if (ret < 0) {
2470 fprintf(stderr, "Cannot connect to %s: %s\n",
2471 path, strerror(errno));
2472 return -1;
2473 }
2474
2475 ret = bpf_map_set_send(fd, &addr, sizeof(addr), &bpf_aux,
2476 bpf_maps_count(ctx));
2477 if (ret < 0)
2478 fprintf(stderr, "Cannot send fds to %s: %s\n",
2479 path, strerror(errno));
2480
2481 bpf_maps_teardown(ctx);
2482 close(fd);
2483 return ret;
2484 }
2485
2486 int bpf_recv_map_fds(const char *path, int *fds, struct bpf_map_aux *aux,
2487 unsigned int entries)
2488 {
2489 struct sockaddr_un addr = { .sun_family = AF_UNIX };
2490 int fd, ret;
2491
2492 fd = socket(AF_UNIX, SOCK_DGRAM, 0);
2493 if (fd < 0) {
2494 fprintf(stderr, "Cannot open socket: %s\n",
2495 strerror(errno));
2496 return -1;
2497 }
2498
2499 strncpy(addr.sun_path, path, sizeof(addr.sun_path));
2500
2501 ret = bind(fd, (struct sockaddr *)&addr, sizeof(addr));
2502 if (ret < 0) {
2503 fprintf(stderr, "Cannot bind to socket: %s\n",
2504 strerror(errno));
2505 return -1;
2506 }
2507
2508 ret = bpf_map_set_recv(fd, fds, aux, entries);
2509 if (ret < 0)
2510 fprintf(stderr, "Cannot recv fds from %s: %s\n",
2511 path, strerror(errno));
2512
2513 unlink(addr.sun_path);
2514 close(fd);
2515 return ret;
2516 }
2517 #endif /* HAVE_ELF */