12 #include <linux/bpf.h>
13 #include <linux/filter.h>
14 #include <linux/perf_event.h>
15 #include <linux/netlink.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/types.h>
18 #include <sys/types.h>
19 #include <sys/socket.h>
20 #include <sys/syscall.h>
21 #include <sys/ioctl.h>
30 #define DEBUGFS "/sys/kernel/debug/tracing/"
32 static char license
[128];
33 static int kern_version
;
34 static bool processed_sec
[128];
35 char bpf_log_buf
[BPF_LOG_BUF_SIZE
];
37 int prog_fd
[MAX_PROGS
];
38 int event_fd
[MAX_PROGS
];
40 int prog_array_fd
= -1;
42 struct bpf_map_data map_data
[MAX_MAPS
];
43 int map_data_count
= 0;
45 static int populate_prog_array(const char *event
, int prog_fd
)
47 int ind
= atoi(event
), err
;
49 err
= bpf_map_update_elem(prog_array_fd
, &ind
, &prog_fd
, BPF_ANY
);
51 printf("failed to store prog_fd in prog_array\n");
57 static int load_and_attach(const char *event
, struct bpf_insn
*prog
, int size
)
59 bool is_socket
= strncmp(event
, "socket", 6) == 0;
60 bool is_kprobe
= strncmp(event
, "kprobe/", 7) == 0;
61 bool is_kretprobe
= strncmp(event
, "kretprobe/", 10) == 0;
62 bool is_tracepoint
= strncmp(event
, "tracepoint/", 11) == 0;
63 bool is_xdp
= strncmp(event
, "xdp", 3) == 0;
64 bool is_perf_event
= strncmp(event
, "perf_event", 10) == 0;
65 bool is_cgroup_skb
= strncmp(event
, "cgroup/skb", 10) == 0;
66 bool is_cgroup_sk
= strncmp(event
, "cgroup/sock", 11) == 0;
67 size_t insns_cnt
= size
/ sizeof(struct bpf_insn
);
68 enum bpf_prog_type prog_type
;
71 struct perf_event_attr attr
= {};
73 attr
.type
= PERF_TYPE_TRACEPOINT
;
74 attr
.sample_type
= PERF_SAMPLE_RAW
;
75 attr
.sample_period
= 1;
76 attr
.wakeup_events
= 1;
79 prog_type
= BPF_PROG_TYPE_SOCKET_FILTER
;
80 } else if (is_kprobe
|| is_kretprobe
) {
81 prog_type
= BPF_PROG_TYPE_KPROBE
;
82 } else if (is_tracepoint
) {
83 prog_type
= BPF_PROG_TYPE_TRACEPOINT
;
85 prog_type
= BPF_PROG_TYPE_XDP
;
86 } else if (is_perf_event
) {
87 prog_type
= BPF_PROG_TYPE_PERF_EVENT
;
88 } else if (is_cgroup_skb
) {
89 prog_type
= BPF_PROG_TYPE_CGROUP_SKB
;
90 } else if (is_cgroup_sk
) {
91 prog_type
= BPF_PROG_TYPE_CGROUP_SOCK
;
93 printf("Unknown event '%s'\n", event
);
97 fd
= bpf_load_program(prog_type
, prog
, insns_cnt
, license
, kern_version
,
98 bpf_log_buf
, BPF_LOG_BUF_SIZE
);
100 printf("bpf_load_program() err=%d\n%s", errno
, bpf_log_buf
);
104 prog_fd
[prog_cnt
++] = fd
;
106 if (is_xdp
|| is_perf_event
|| is_cgroup_skb
|| is_cgroup_sk
)
114 if (!isdigit(*event
)) {
115 printf("invalid prog number\n");
118 return populate_prog_array(event
, fd
);
121 if (is_kprobe
|| is_kretprobe
) {
128 printf("event name cannot be empty\n");
133 return populate_prog_array(event
, fd
);
135 snprintf(buf
, sizeof(buf
),
136 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
137 is_kprobe
? 'p' : 'r', event
, event
);
140 printf("failed to create kprobe '%s' error '%s'\n",
141 event
, strerror(errno
));
145 strcpy(buf
, DEBUGFS
);
146 strcat(buf
, "events/kprobes/");
149 } else if (is_tracepoint
) {
153 printf("event name cannot be empty\n");
156 strcpy(buf
, DEBUGFS
);
157 strcat(buf
, "events/");
162 efd
= open(buf
, O_RDONLY
, 0);
164 printf("failed to open event %s\n", event
);
168 err
= read(efd
, buf
, sizeof(buf
));
169 if (err
< 0 || err
>= sizeof(buf
)) {
170 printf("read from '%s' failed '%s'\n", event
, strerror(errno
));
180 efd
= sys_perf_event_open(&attr
, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
182 printf("event %d fd %d err %s\n", id
, efd
, strerror(errno
));
185 event_fd
[prog_cnt
- 1] = efd
;
186 ioctl(efd
, PERF_EVENT_IOC_ENABLE
, 0);
187 ioctl(efd
, PERF_EVENT_IOC_SET_BPF
, fd
);
192 static int load_maps(struct bpf_map_data
*maps
, int nr_maps
,
193 fixup_map_cb fixup_map
)
197 for (i
= 0; i
< nr_maps
; i
++) {
199 fixup_map(&maps
[i
], i
);
200 /* Allow userspace to assign map FD prior to creation */
201 if (maps
[i
].fd
!= -1) {
202 map_fd
[i
] = maps
[i
].fd
;
207 if (maps
[i
].def
.type
== BPF_MAP_TYPE_ARRAY_OF_MAPS
||
208 maps
[i
].def
.type
== BPF_MAP_TYPE_HASH_OF_MAPS
) {
209 int inner_map_fd
= map_fd
[maps
[i
].def
.inner_map_idx
];
211 map_fd
[i
] = bpf_create_map_in_map(maps
[i
].def
.type
,
212 maps
[i
].def
.key_size
,
214 maps
[i
].def
.max_entries
,
215 maps
[i
].def
.map_flags
);
217 map_fd
[i
] = bpf_create_map(maps
[i
].def
.type
,
218 maps
[i
].def
.key_size
,
219 maps
[i
].def
.value_size
,
220 maps
[i
].def
.max_entries
,
221 maps
[i
].def
.map_flags
);
224 printf("failed to create a map: %d %s\n",
225 errno
, strerror(errno
));
228 maps
[i
].fd
= map_fd
[i
];
230 if (maps
[i
].def
.type
== BPF_MAP_TYPE_PROG_ARRAY
)
231 prog_array_fd
= map_fd
[i
];
236 static int get_sec(Elf
*elf
, int i
, GElf_Ehdr
*ehdr
, char **shname
,
237 GElf_Shdr
*shdr
, Elf_Data
**data
)
241 scn
= elf_getscn(elf
, i
);
245 if (gelf_getshdr(scn
, shdr
) != shdr
)
248 *shname
= elf_strptr(elf
, ehdr
->e_shstrndx
, shdr
->sh_name
);
249 if (!*shname
|| !shdr
->sh_size
)
252 *data
= elf_getdata(scn
, 0);
253 if (!*data
|| elf_getdata(scn
, *data
) != NULL
)
259 static int parse_relo_and_apply(Elf_Data
*data
, Elf_Data
*symbols
,
260 GElf_Shdr
*shdr
, struct bpf_insn
*insn
,
261 struct bpf_map_data
*maps
, int nr_maps
)
265 nrels
= shdr
->sh_size
/ shdr
->sh_entsize
;
267 for (i
= 0; i
< nrels
; i
++) {
270 unsigned int insn_idx
;
274 gelf_getrel(data
, i
, &rel
);
276 insn_idx
= rel
.r_offset
/ sizeof(struct bpf_insn
);
278 gelf_getsym(symbols
, GELF_R_SYM(rel
.r_info
), &sym
);
280 if (insn
[insn_idx
].code
!= (BPF_LD
| BPF_IMM
| BPF_DW
)) {
281 printf("invalid relo for insn[%d].code 0x%x\n",
282 insn_idx
, insn
[insn_idx
].code
);
285 insn
[insn_idx
].src_reg
= BPF_PSEUDO_MAP_FD
;
287 /* Match FD relocation against recorded map_data[] offset */
288 for (map_idx
= 0; map_idx
< nr_maps
; map_idx
++) {
289 if (maps
[map_idx
].elf_offset
== sym
.st_value
) {
295 insn
[insn_idx
].imm
= maps
[map_idx
].fd
;
297 printf("invalid relo for insn[%d] no map_data match\n",
306 static int cmp_symbols(const void *l
, const void *r
)
308 const GElf_Sym
*lsym
= (const GElf_Sym
*)l
;
309 const GElf_Sym
*rsym
= (const GElf_Sym
*)r
;
311 if (lsym
->st_value
< rsym
->st_value
)
313 else if (lsym
->st_value
> rsym
->st_value
)
319 static int load_elf_maps_section(struct bpf_map_data
*maps
, int maps_shndx
,
320 Elf
*elf
, Elf_Data
*symbols
, int strtabidx
)
322 int map_sz_elf
, map_sz_copy
;
323 bool validate_zero
= false;
335 /* Get data for maps section via elf index */
336 scn
= elf_getscn(elf
, maps_shndx
);
338 data_maps
= elf_getdata(scn
, NULL
);
339 if (!scn
|| !data_maps
) {
340 printf("Failed to get Elf_Data from maps section %d\n",
345 /* For each map get corrosponding symbol table entry */
346 sym
= calloc(MAX_MAPS
+1, sizeof(GElf_Sym
));
347 for (i
= 0, nr_maps
= 0; i
< symbols
->d_size
/ sizeof(GElf_Sym
); i
++) {
348 assert(nr_maps
< MAX_MAPS
+1);
349 if (!gelf_getsym(symbols
, i
, &sym
[nr_maps
]))
351 if (sym
[nr_maps
].st_shndx
!= maps_shndx
)
353 /* Only increment iif maps section */
357 /* Align to map_fd[] order, via sort on offset in sym.st_value */
358 qsort(sym
, nr_maps
, sizeof(GElf_Sym
), cmp_symbols
);
360 /* Keeping compatible with ELF maps section changes
361 * ------------------------------------------------
362 * The program size of struct bpf_map_def is known by loader
363 * code, but struct stored in ELF file can be different.
365 * Unfortunately sym[i].st_size is zero. To calculate the
366 * struct size stored in the ELF file, assume all struct have
367 * the same size, and simply divide with number of map
370 map_sz_elf
= data_maps
->d_size
/ nr_maps
;
371 map_sz_copy
= sizeof(struct bpf_map_def
);
372 if (map_sz_elf
< map_sz_copy
) {
374 * Backward compat, loading older ELF file with
375 * smaller struct, keeping remaining bytes zero.
377 map_sz_copy
= map_sz_elf
;
378 } else if (map_sz_elf
> map_sz_copy
) {
380 * Forward compat, loading newer ELF file with larger
381 * struct with unknown features. Assume zero means
382 * feature not used. Thus, validate rest of struct
385 validate_zero
= true;
388 /* Memcpy relevant part of ELF maps data to loader maps */
389 for (i
= 0; i
< nr_maps
; i
++) {
390 unsigned char *addr
, *end
;
391 struct bpf_map_def
*def
;
392 const char *map_name
;
395 map_name
= elf_strptr(elf
, strtabidx
, sym
[i
].st_name
);
396 maps
[i
].name
= strdup(map_name
);
398 printf("strdup(%s): %s(%d)\n", map_name
,
399 strerror(errno
), errno
);
404 /* Symbol value is offset into ELF maps section data area */
405 offset
= sym
[i
].st_value
;
406 def
= (struct bpf_map_def
*)(data_maps
->d_buf
+ offset
);
407 maps
[i
].elf_offset
= offset
;
408 memset(&maps
[i
].def
, 0, sizeof(struct bpf_map_def
));
409 memcpy(&maps
[i
].def
, def
, map_sz_copy
);
411 /* Verify no newer features were requested */
413 addr
= (unsigned char*) def
+ map_sz_copy
;
414 end
= (unsigned char*) def
+ map_sz_elf
;
415 for (; addr
< end
; addr
++) {
428 static int do_load_bpf_file(const char *path
, fixup_map_cb fixup_map
)
430 int fd
, i
, ret
, maps_shndx
= -1, strtabidx
= -1;
433 GElf_Shdr shdr
, shdr_prog
;
434 Elf_Data
*data
, *data_prog
, *data_maps
= NULL
, *symbols
= NULL
;
435 char *shname
, *shname_prog
;
438 /* reset global variables */
440 memset(license
, 0, sizeof(license
));
441 memset(processed_sec
, 0, sizeof(processed_sec
));
443 if (elf_version(EV_CURRENT
) == EV_NONE
)
446 fd
= open(path
, O_RDONLY
, 0);
450 elf
= elf_begin(fd
, ELF_C_READ
, NULL
);
455 if (gelf_getehdr(elf
, &ehdr
) != &ehdr
)
458 /* clear all kprobes */
459 i
= system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
461 /* scan over all elf sections to get license and map info */
462 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
464 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
467 if (0) /* helpful for llvm debugging */
468 printf("section %d:%s data %p size %zd link %d flags %d\n",
469 i
, shname
, data
->d_buf
, data
->d_size
,
470 shdr
.sh_link
, (int) shdr
.sh_flags
);
472 if (strcmp(shname
, "license") == 0) {
473 processed_sec
[i
] = true;
474 memcpy(license
, data
->d_buf
, data
->d_size
);
475 } else if (strcmp(shname
, "version") == 0) {
476 processed_sec
[i
] = true;
477 if (data
->d_size
!= sizeof(int)) {
478 printf("invalid size of version section %zd\n",
482 memcpy(&kern_version
, data
->d_buf
, sizeof(int));
483 } else if (strcmp(shname
, "maps") == 0) {
488 for (j
= 0; j
< MAX_MAPS
; j
++)
490 } else if (shdr
.sh_type
== SHT_SYMTAB
) {
491 strtabidx
= shdr
.sh_link
;
499 printf("missing SHT_SYMTAB section\n");
504 nr_maps
= load_elf_maps_section(map_data
, maps_shndx
,
505 elf
, symbols
, strtabidx
);
507 printf("Error: Failed loading ELF maps (errno:%d):%s\n",
508 nr_maps
, strerror(-nr_maps
));
512 if (load_maps(map_data
, nr_maps
, fixup_map
))
514 map_data_count
= nr_maps
;
516 processed_sec
[maps_shndx
] = true;
519 /* load programs that need map fixup (relocations) */
520 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
521 if (processed_sec
[i
])
524 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
526 if (shdr
.sh_type
== SHT_REL
) {
527 struct bpf_insn
*insns
;
529 if (get_sec(elf
, shdr
.sh_info
, &ehdr
, &shname_prog
,
530 &shdr_prog
, &data_prog
))
533 if (shdr_prog
.sh_type
!= SHT_PROGBITS
||
534 !(shdr_prog
.sh_flags
& SHF_EXECINSTR
))
537 insns
= (struct bpf_insn
*) data_prog
->d_buf
;
539 processed_sec
[shdr
.sh_info
] = true;
540 processed_sec
[i
] = true;
542 if (parse_relo_and_apply(data
, symbols
, &shdr
, insns
,
546 if (memcmp(shname_prog
, "kprobe/", 7) == 0 ||
547 memcmp(shname_prog
, "kretprobe/", 10) == 0 ||
548 memcmp(shname_prog
, "tracepoint/", 11) == 0 ||
549 memcmp(shname_prog
, "xdp", 3) == 0 ||
550 memcmp(shname_prog
, "perf_event", 10) == 0 ||
551 memcmp(shname_prog
, "socket", 6) == 0 ||
552 memcmp(shname_prog
, "cgroup/", 7) == 0)
553 load_and_attach(shname_prog
, insns
, data_prog
->d_size
);
557 /* load programs that don't use maps */
558 for (i
= 1; i
< ehdr
.e_shnum
; i
++) {
560 if (processed_sec
[i
])
563 if (get_sec(elf
, i
, &ehdr
, &shname
, &shdr
, &data
))
566 if (memcmp(shname
, "kprobe/", 7) == 0 ||
567 memcmp(shname
, "kretprobe/", 10) == 0 ||
568 memcmp(shname
, "tracepoint/", 11) == 0 ||
569 memcmp(shname
, "xdp", 3) == 0 ||
570 memcmp(shname
, "perf_event", 10) == 0 ||
571 memcmp(shname
, "socket", 6) == 0 ||
572 memcmp(shname
, "cgroup/", 7) == 0)
573 load_and_attach(shname
, data
->d_buf
, data
->d_size
);
582 int load_bpf_file(char *path
)
584 return do_load_bpf_file(path
, NULL
);
587 int load_bpf_file_fixup_map(const char *path
, fixup_map_cb fixup_map
)
589 return do_load_bpf_file(path
, fixup_map
);
592 void read_trace_pipe(void)
596 trace_fd
= open(DEBUGFS
"trace_pipe", O_RDONLY
, 0);
601 static char buf
[4096];
604 sz
= read(trace_fd
, buf
, sizeof(buf
));
612 #define MAX_SYMS 300000
613 static struct ksym syms
[MAX_SYMS
];
616 static int ksym_cmp(const void *p1
, const void *p2
)
618 return ((struct ksym
*)p1
)->addr
- ((struct ksym
*)p2
)->addr
;
621 int load_kallsyms(void)
623 FILE *f
= fopen("/proc/kallsyms", "r");
624 char func
[256], buf
[256];
633 if (!fgets(buf
, sizeof(buf
), f
))
635 if (sscanf(buf
, "%p %c %s", &addr
, &symbol
, func
) != 3)
639 syms
[i
].addr
= (long) addr
;
640 syms
[i
].name
= strdup(func
);
644 qsort(syms
, sym_cnt
, sizeof(struct ksym
), ksym_cmp
);
648 struct ksym
*ksym_search(long key
)
650 int start
= 0, end
= sym_cnt
;
653 while (start
< end
) {
654 size_t mid
= start
+ (end
- start
) / 2;
656 result
= key
- syms
[mid
].addr
;
665 if (start
>= 1 && syms
[start
- 1].addr
< key
&&
666 key
< syms
[start
].addr
)
668 return &syms
[start
- 1];
670 /* out of range. return _stext */
674 int set_link_xdp_fd(int ifindex
, int fd
, __u32 flags
)
676 struct sockaddr_nl sa
;
677 int sock
, seq
= 0, len
, ret
= -1;
679 struct nlattr
*nla
, *nla_xdp
;
682 struct ifinfomsg ifinfo
;
686 struct nlmsgerr
*err
;
688 memset(&sa
, 0, sizeof(sa
));
689 sa
.nl_family
= AF_NETLINK
;
691 sock
= socket(AF_NETLINK
, SOCK_RAW
, NETLINK_ROUTE
);
693 printf("open netlink socket: %s\n", strerror(errno
));
697 if (bind(sock
, (struct sockaddr
*)&sa
, sizeof(sa
)) < 0) {
698 printf("bind to netlink: %s\n", strerror(errno
));
702 memset(&req
, 0, sizeof(req
));
703 req
.nh
.nlmsg_len
= NLMSG_LENGTH(sizeof(struct ifinfomsg
));
704 req
.nh
.nlmsg_flags
= NLM_F_REQUEST
| NLM_F_ACK
;
705 req
.nh
.nlmsg_type
= RTM_SETLINK
;
706 req
.nh
.nlmsg_pid
= 0;
707 req
.nh
.nlmsg_seq
= ++seq
;
708 req
.ifinfo
.ifi_family
= AF_UNSPEC
;
709 req
.ifinfo
.ifi_index
= ifindex
;
711 /* started nested attribute for XDP */
712 nla
= (struct nlattr
*)(((char *)&req
)
713 + NLMSG_ALIGN(req
.nh
.nlmsg_len
));
714 nla
->nla_type
= NLA_F_NESTED
| 43/*IFLA_XDP*/;
715 nla
->nla_len
= NLA_HDRLEN
;
718 nla_xdp
= (struct nlattr
*)((char *)nla
+ nla
->nla_len
);
719 nla_xdp
->nla_type
= 1/*IFLA_XDP_FD*/;
720 nla_xdp
->nla_len
= NLA_HDRLEN
+ sizeof(int);
721 memcpy((char *)nla_xdp
+ NLA_HDRLEN
, &fd
, sizeof(fd
));
722 nla
->nla_len
+= nla_xdp
->nla_len
;
724 /* if user passed in any flags, add those too */
726 nla_xdp
= (struct nlattr
*)((char *)nla
+ nla
->nla_len
);
727 nla_xdp
->nla_type
= 3/*IFLA_XDP_FLAGS*/;
728 nla_xdp
->nla_len
= NLA_HDRLEN
+ sizeof(flags
);
729 memcpy((char *)nla_xdp
+ NLA_HDRLEN
, &flags
, sizeof(flags
));
730 nla
->nla_len
+= nla_xdp
->nla_len
;
733 req
.nh
.nlmsg_len
+= NLA_ALIGN(nla
->nla_len
);
735 if (send(sock
, &req
, req
.nh
.nlmsg_len
, 0) < 0) {
736 printf("send to netlink: %s\n", strerror(errno
));
740 len
= recv(sock
, buf
, sizeof(buf
), 0);
742 printf("recv from netlink: %s\n", strerror(errno
));
746 for (nh
= (struct nlmsghdr
*)buf
; NLMSG_OK(nh
, len
);
747 nh
= NLMSG_NEXT(nh
, len
)) {
748 if (nh
->nlmsg_pid
!= getpid()) {
749 printf("Wrong pid %d, expected %d\n",
750 nh
->nlmsg_pid
, getpid());
753 if (nh
->nlmsg_seq
!= seq
) {
754 printf("Wrong seq %d, expected %d\n",
758 switch (nh
->nlmsg_type
) {
760 err
= (struct nlmsgerr
*)NLMSG_DATA(nh
);
763 printf("nlmsg error %s\n", strerror(-err
->error
));