]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - samples/bpf/bpf_load.c
Merge branch 'sample-bpf-loader-fixes'
[mirror_ubuntu-artful-kernel.git] / samples / bpf / bpf_load.c
1 #include <stdio.h>
2 #include <sys/types.h>
3 #include <sys/stat.h>
4 #include <fcntl.h>
5 #include <libelf.h>
6 #include <gelf.h>
7 #include <errno.h>
8 #include <unistd.h>
9 #include <string.h>
10 #include <stdbool.h>
11 #include <stdlib.h>
12 #include <linux/bpf.h>
13 #include <linux/filter.h>
14 #include <linux/perf_event.h>
15 #include <linux/netlink.h>
16 #include <linux/rtnetlink.h>
17 #include <linux/types.h>
18 #include <sys/types.h>
19 #include <sys/socket.h>
20 #include <sys/syscall.h>
21 #include <sys/ioctl.h>
22 #include <sys/mman.h>
23 #include <poll.h>
24 #include <ctype.h>
25 #include <assert.h>
26 #include "libbpf.h"
27 #include "bpf_load.h"
28 #include "perf-sys.h"
29
30 #define DEBUGFS "/sys/kernel/debug/tracing/"
31
32 static char license[128];
33 static int kern_version;
34 static bool processed_sec[128];
35 char bpf_log_buf[BPF_LOG_BUF_SIZE];
36 int map_fd[MAX_MAPS];
37 int prog_fd[MAX_PROGS];
38 int event_fd[MAX_PROGS];
39 int prog_cnt;
40 int prog_array_fd = -1;
41
42 struct bpf_map_data map_data[MAX_MAPS];
43 int map_data_count = 0;
44
45 static int populate_prog_array(const char *event, int prog_fd)
46 {
47 int ind = atoi(event), err;
48
49 err = bpf_map_update_elem(prog_array_fd, &ind, &prog_fd, BPF_ANY);
50 if (err < 0) {
51 printf("failed to store prog_fd in prog_array\n");
52 return -1;
53 }
54 return 0;
55 }
56
57 static int load_and_attach(const char *event, struct bpf_insn *prog, int size)
58 {
59 bool is_socket = strncmp(event, "socket", 6) == 0;
60 bool is_kprobe = strncmp(event, "kprobe/", 7) == 0;
61 bool is_kretprobe = strncmp(event, "kretprobe/", 10) == 0;
62 bool is_tracepoint = strncmp(event, "tracepoint/", 11) == 0;
63 bool is_xdp = strncmp(event, "xdp", 3) == 0;
64 bool is_perf_event = strncmp(event, "perf_event", 10) == 0;
65 bool is_cgroup_skb = strncmp(event, "cgroup/skb", 10) == 0;
66 bool is_cgroup_sk = strncmp(event, "cgroup/sock", 11) == 0;
67 size_t insns_cnt = size / sizeof(struct bpf_insn);
68 enum bpf_prog_type prog_type;
69 char buf[256];
70 int fd, efd, err, id;
71 struct perf_event_attr attr = {};
72
73 attr.type = PERF_TYPE_TRACEPOINT;
74 attr.sample_type = PERF_SAMPLE_RAW;
75 attr.sample_period = 1;
76 attr.wakeup_events = 1;
77
78 if (is_socket) {
79 prog_type = BPF_PROG_TYPE_SOCKET_FILTER;
80 } else if (is_kprobe || is_kretprobe) {
81 prog_type = BPF_PROG_TYPE_KPROBE;
82 } else if (is_tracepoint) {
83 prog_type = BPF_PROG_TYPE_TRACEPOINT;
84 } else if (is_xdp) {
85 prog_type = BPF_PROG_TYPE_XDP;
86 } else if (is_perf_event) {
87 prog_type = BPF_PROG_TYPE_PERF_EVENT;
88 } else if (is_cgroup_skb) {
89 prog_type = BPF_PROG_TYPE_CGROUP_SKB;
90 } else if (is_cgroup_sk) {
91 prog_type = BPF_PROG_TYPE_CGROUP_SOCK;
92 } else {
93 printf("Unknown event '%s'\n", event);
94 return -1;
95 }
96
97 fd = bpf_load_program(prog_type, prog, insns_cnt, license, kern_version,
98 bpf_log_buf, BPF_LOG_BUF_SIZE);
99 if (fd < 0) {
100 printf("bpf_load_program() err=%d\n%s", errno, bpf_log_buf);
101 return -1;
102 }
103
104 prog_fd[prog_cnt++] = fd;
105
106 if (is_xdp || is_perf_event || is_cgroup_skb || is_cgroup_sk)
107 return 0;
108
109 if (is_socket) {
110 event += 6;
111 if (*event != '/')
112 return 0;
113 event++;
114 if (!isdigit(*event)) {
115 printf("invalid prog number\n");
116 return -1;
117 }
118 return populate_prog_array(event, fd);
119 }
120
121 if (is_kprobe || is_kretprobe) {
122 if (is_kprobe)
123 event += 7;
124 else
125 event += 10;
126
127 if (*event == 0) {
128 printf("event name cannot be empty\n");
129 return -1;
130 }
131
132 if (isdigit(*event))
133 return populate_prog_array(event, fd);
134
135 snprintf(buf, sizeof(buf),
136 "echo '%c:%s %s' >> /sys/kernel/debug/tracing/kprobe_events",
137 is_kprobe ? 'p' : 'r', event, event);
138 err = system(buf);
139 if (err < 0) {
140 printf("failed to create kprobe '%s' error '%s'\n",
141 event, strerror(errno));
142 return -1;
143 }
144
145 strcpy(buf, DEBUGFS);
146 strcat(buf, "events/kprobes/");
147 strcat(buf, event);
148 strcat(buf, "/id");
149 } else if (is_tracepoint) {
150 event += 11;
151
152 if (*event == 0) {
153 printf("event name cannot be empty\n");
154 return -1;
155 }
156 strcpy(buf, DEBUGFS);
157 strcat(buf, "events/");
158 strcat(buf, event);
159 strcat(buf, "/id");
160 }
161
162 efd = open(buf, O_RDONLY, 0);
163 if (efd < 0) {
164 printf("failed to open event %s\n", event);
165 return -1;
166 }
167
168 err = read(efd, buf, sizeof(buf));
169 if (err < 0 || err >= sizeof(buf)) {
170 printf("read from '%s' failed '%s'\n", event, strerror(errno));
171 return -1;
172 }
173
174 close(efd);
175
176 buf[err] = 0;
177 id = atoi(buf);
178 attr.config = id;
179
180 efd = sys_perf_event_open(&attr, -1/*pid*/, 0/*cpu*/, -1/*group_fd*/, 0);
181 if (efd < 0) {
182 printf("event %d fd %d err %s\n", id, efd, strerror(errno));
183 return -1;
184 }
185 event_fd[prog_cnt - 1] = efd;
186 ioctl(efd, PERF_EVENT_IOC_ENABLE, 0);
187 ioctl(efd, PERF_EVENT_IOC_SET_BPF, fd);
188
189 return 0;
190 }
191
192 static int load_maps(struct bpf_map_data *maps, int nr_maps,
193 fixup_map_cb fixup_map)
194 {
195 int i;
196
197 for (i = 0; i < nr_maps; i++) {
198 if (fixup_map) {
199 fixup_map(&maps[i], i);
200 /* Allow userspace to assign map FD prior to creation */
201 if (maps[i].fd != -1) {
202 map_fd[i] = maps[i].fd;
203 continue;
204 }
205 }
206
207 if (maps[i].def.type == BPF_MAP_TYPE_ARRAY_OF_MAPS ||
208 maps[i].def.type == BPF_MAP_TYPE_HASH_OF_MAPS) {
209 int inner_map_fd = map_fd[maps[i].def.inner_map_idx];
210
211 map_fd[i] = bpf_create_map_in_map(maps[i].def.type,
212 maps[i].def.key_size,
213 inner_map_fd,
214 maps[i].def.max_entries,
215 maps[i].def.map_flags);
216 } else {
217 map_fd[i] = bpf_create_map(maps[i].def.type,
218 maps[i].def.key_size,
219 maps[i].def.value_size,
220 maps[i].def.max_entries,
221 maps[i].def.map_flags);
222 }
223 if (map_fd[i] < 0) {
224 printf("failed to create a map: %d %s\n",
225 errno, strerror(errno));
226 return 1;
227 }
228 maps[i].fd = map_fd[i];
229
230 if (maps[i].def.type == BPF_MAP_TYPE_PROG_ARRAY)
231 prog_array_fd = map_fd[i];
232 }
233 return 0;
234 }
235
236 static int get_sec(Elf *elf, int i, GElf_Ehdr *ehdr, char **shname,
237 GElf_Shdr *shdr, Elf_Data **data)
238 {
239 Elf_Scn *scn;
240
241 scn = elf_getscn(elf, i);
242 if (!scn)
243 return 1;
244
245 if (gelf_getshdr(scn, shdr) != shdr)
246 return 2;
247
248 *shname = elf_strptr(elf, ehdr->e_shstrndx, shdr->sh_name);
249 if (!*shname || !shdr->sh_size)
250 return 3;
251
252 *data = elf_getdata(scn, 0);
253 if (!*data || elf_getdata(scn, *data) != NULL)
254 return 4;
255
256 return 0;
257 }
258
259 static int parse_relo_and_apply(Elf_Data *data, Elf_Data *symbols,
260 GElf_Shdr *shdr, struct bpf_insn *insn,
261 struct bpf_map_data *maps, int nr_maps)
262 {
263 int i, nrels;
264
265 nrels = shdr->sh_size / shdr->sh_entsize;
266
267 for (i = 0; i < nrels; i++) {
268 GElf_Sym sym;
269 GElf_Rel rel;
270 unsigned int insn_idx;
271 bool match = false;
272 int j, map_idx;
273
274 gelf_getrel(data, i, &rel);
275
276 insn_idx = rel.r_offset / sizeof(struct bpf_insn);
277
278 gelf_getsym(symbols, GELF_R_SYM(rel.r_info), &sym);
279
280 if (insn[insn_idx].code != (BPF_LD | BPF_IMM | BPF_DW)) {
281 printf("invalid relo for insn[%d].code 0x%x\n",
282 insn_idx, insn[insn_idx].code);
283 return 1;
284 }
285 insn[insn_idx].src_reg = BPF_PSEUDO_MAP_FD;
286
287 /* Match FD relocation against recorded map_data[] offset */
288 for (map_idx = 0; map_idx < nr_maps; map_idx++) {
289 if (maps[map_idx].elf_offset == sym.st_value) {
290 match = true;
291 break;
292 }
293 }
294 if (match) {
295 insn[insn_idx].imm = maps[map_idx].fd;
296 } else {
297 printf("invalid relo for insn[%d] no map_data match\n",
298 insn_idx);
299 return 1;
300 }
301 }
302
303 return 0;
304 }
305
306 static int cmp_symbols(const void *l, const void *r)
307 {
308 const GElf_Sym *lsym = (const GElf_Sym *)l;
309 const GElf_Sym *rsym = (const GElf_Sym *)r;
310
311 if (lsym->st_value < rsym->st_value)
312 return -1;
313 else if (lsym->st_value > rsym->st_value)
314 return 1;
315 else
316 return 0;
317 }
318
319 static int load_elf_maps_section(struct bpf_map_data *maps, int maps_shndx,
320 Elf *elf, Elf_Data *symbols, int strtabidx)
321 {
322 int map_sz_elf, map_sz_copy;
323 bool validate_zero = false;
324 Elf_Data *data_maps;
325 int i, nr_maps;
326 GElf_Sym *sym;
327 Elf_Scn *scn;
328 int copy_sz;
329
330 if (maps_shndx < 0)
331 return -EINVAL;
332 if (!symbols)
333 return -EINVAL;
334
335 /* Get data for maps section via elf index */
336 scn = elf_getscn(elf, maps_shndx);
337 if (scn)
338 data_maps = elf_getdata(scn, NULL);
339 if (!scn || !data_maps) {
340 printf("Failed to get Elf_Data from maps section %d\n",
341 maps_shndx);
342 return -EINVAL;
343 }
344
345 /* For each map get corrosponding symbol table entry */
346 sym = calloc(MAX_MAPS+1, sizeof(GElf_Sym));
347 for (i = 0, nr_maps = 0; i < symbols->d_size / sizeof(GElf_Sym); i++) {
348 assert(nr_maps < MAX_MAPS+1);
349 if (!gelf_getsym(symbols, i, &sym[nr_maps]))
350 continue;
351 if (sym[nr_maps].st_shndx != maps_shndx)
352 continue;
353 /* Only increment iif maps section */
354 nr_maps++;
355 }
356
357 /* Align to map_fd[] order, via sort on offset in sym.st_value */
358 qsort(sym, nr_maps, sizeof(GElf_Sym), cmp_symbols);
359
360 /* Keeping compatible with ELF maps section changes
361 * ------------------------------------------------
362 * The program size of struct bpf_map_def is known by loader
363 * code, but struct stored in ELF file can be different.
364 *
365 * Unfortunately sym[i].st_size is zero. To calculate the
366 * struct size stored in the ELF file, assume all struct have
367 * the same size, and simply divide with number of map
368 * symbols.
369 */
370 map_sz_elf = data_maps->d_size / nr_maps;
371 map_sz_copy = sizeof(struct bpf_map_def);
372 if (map_sz_elf < map_sz_copy) {
373 /*
374 * Backward compat, loading older ELF file with
375 * smaller struct, keeping remaining bytes zero.
376 */
377 map_sz_copy = map_sz_elf;
378 } else if (map_sz_elf > map_sz_copy) {
379 /*
380 * Forward compat, loading newer ELF file with larger
381 * struct with unknown features. Assume zero means
382 * feature not used. Thus, validate rest of struct
383 * data is zero.
384 */
385 validate_zero = true;
386 }
387
388 /* Memcpy relevant part of ELF maps data to loader maps */
389 for (i = 0; i < nr_maps; i++) {
390 unsigned char *addr, *end;
391 struct bpf_map_def *def;
392 const char *map_name;
393 size_t offset;
394
395 map_name = elf_strptr(elf, strtabidx, sym[i].st_name);
396 maps[i].name = strdup(map_name);
397 if (!maps[i].name) {
398 printf("strdup(%s): %s(%d)\n", map_name,
399 strerror(errno), errno);
400 free(sym);
401 return -errno;
402 }
403
404 /* Symbol value is offset into ELF maps section data area */
405 offset = sym[i].st_value;
406 def = (struct bpf_map_def *)(data_maps->d_buf + offset);
407 maps[i].elf_offset = offset;
408 memset(&maps[i].def, 0, sizeof(struct bpf_map_def));
409 memcpy(&maps[i].def, def, map_sz_copy);
410
411 /* Verify no newer features were requested */
412 if (validate_zero) {
413 addr = (unsigned char*) def + map_sz_copy;
414 end = (unsigned char*) def + map_sz_elf;
415 for (; addr < end; addr++) {
416 if (*addr != 0) {
417 free(sym);
418 return -EFBIG;
419 }
420 }
421 }
422 }
423
424 free(sym);
425 return nr_maps;
426 }
427
428 static int do_load_bpf_file(const char *path, fixup_map_cb fixup_map)
429 {
430 int fd, i, ret, maps_shndx = -1, strtabidx = -1;
431 Elf *elf;
432 GElf_Ehdr ehdr;
433 GElf_Shdr shdr, shdr_prog;
434 Elf_Data *data, *data_prog, *data_maps = NULL, *symbols = NULL;
435 char *shname, *shname_prog;
436 int nr_maps = 0;
437
438 /* reset global variables */
439 kern_version = 0;
440 memset(license, 0, sizeof(license));
441 memset(processed_sec, 0, sizeof(processed_sec));
442
443 if (elf_version(EV_CURRENT) == EV_NONE)
444 return 1;
445
446 fd = open(path, O_RDONLY, 0);
447 if (fd < 0)
448 return 1;
449
450 elf = elf_begin(fd, ELF_C_READ, NULL);
451
452 if (!elf)
453 return 1;
454
455 if (gelf_getehdr(elf, &ehdr) != &ehdr)
456 return 1;
457
458 /* clear all kprobes */
459 i = system("echo \"\" > /sys/kernel/debug/tracing/kprobe_events");
460
461 /* scan over all elf sections to get license and map info */
462 for (i = 1; i < ehdr.e_shnum; i++) {
463
464 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
465 continue;
466
467 if (0) /* helpful for llvm debugging */
468 printf("section %d:%s data %p size %zd link %d flags %d\n",
469 i, shname, data->d_buf, data->d_size,
470 shdr.sh_link, (int) shdr.sh_flags);
471
472 if (strcmp(shname, "license") == 0) {
473 processed_sec[i] = true;
474 memcpy(license, data->d_buf, data->d_size);
475 } else if (strcmp(shname, "version") == 0) {
476 processed_sec[i] = true;
477 if (data->d_size != sizeof(int)) {
478 printf("invalid size of version section %zd\n",
479 data->d_size);
480 return 1;
481 }
482 memcpy(&kern_version, data->d_buf, sizeof(int));
483 } else if (strcmp(shname, "maps") == 0) {
484 int j;
485
486 maps_shndx = i;
487 data_maps = data;
488 for (j = 0; j < MAX_MAPS; j++)
489 map_data[j].fd = -1;
490 } else if (shdr.sh_type == SHT_SYMTAB) {
491 strtabidx = shdr.sh_link;
492 symbols = data;
493 }
494 }
495
496 ret = 1;
497
498 if (!symbols) {
499 printf("missing SHT_SYMTAB section\n");
500 goto done;
501 }
502
503 if (data_maps) {
504 nr_maps = load_elf_maps_section(map_data, maps_shndx,
505 elf, symbols, strtabidx);
506 if (nr_maps < 0) {
507 printf("Error: Failed loading ELF maps (errno:%d):%s\n",
508 nr_maps, strerror(-nr_maps));
509 ret = 1;
510 goto done;
511 }
512 if (load_maps(map_data, nr_maps, fixup_map))
513 goto done;
514 map_data_count = nr_maps;
515
516 processed_sec[maps_shndx] = true;
517 }
518
519 /* load programs that need map fixup (relocations) */
520 for (i = 1; i < ehdr.e_shnum; i++) {
521 if (processed_sec[i])
522 continue;
523
524 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
525 continue;
526 if (shdr.sh_type == SHT_REL) {
527 struct bpf_insn *insns;
528
529 if (get_sec(elf, shdr.sh_info, &ehdr, &shname_prog,
530 &shdr_prog, &data_prog))
531 continue;
532
533 if (shdr_prog.sh_type != SHT_PROGBITS ||
534 !(shdr_prog.sh_flags & SHF_EXECINSTR))
535 continue;
536
537 insns = (struct bpf_insn *) data_prog->d_buf;
538
539 processed_sec[shdr.sh_info] = true;
540 processed_sec[i] = true;
541
542 if (parse_relo_and_apply(data, symbols, &shdr, insns,
543 map_data, nr_maps))
544 continue;
545
546 if (memcmp(shname_prog, "kprobe/", 7) == 0 ||
547 memcmp(shname_prog, "kretprobe/", 10) == 0 ||
548 memcmp(shname_prog, "tracepoint/", 11) == 0 ||
549 memcmp(shname_prog, "xdp", 3) == 0 ||
550 memcmp(shname_prog, "perf_event", 10) == 0 ||
551 memcmp(shname_prog, "socket", 6) == 0 ||
552 memcmp(shname_prog, "cgroup/", 7) == 0)
553 load_and_attach(shname_prog, insns, data_prog->d_size);
554 }
555 }
556
557 /* load programs that don't use maps */
558 for (i = 1; i < ehdr.e_shnum; i++) {
559
560 if (processed_sec[i])
561 continue;
562
563 if (get_sec(elf, i, &ehdr, &shname, &shdr, &data))
564 continue;
565
566 if (memcmp(shname, "kprobe/", 7) == 0 ||
567 memcmp(shname, "kretprobe/", 10) == 0 ||
568 memcmp(shname, "tracepoint/", 11) == 0 ||
569 memcmp(shname, "xdp", 3) == 0 ||
570 memcmp(shname, "perf_event", 10) == 0 ||
571 memcmp(shname, "socket", 6) == 0 ||
572 memcmp(shname, "cgroup/", 7) == 0)
573 load_and_attach(shname, data->d_buf, data->d_size);
574 }
575
576 ret = 0;
577 done:
578 close(fd);
579 return ret;
580 }
581
582 int load_bpf_file(char *path)
583 {
584 return do_load_bpf_file(path, NULL);
585 }
586
587 int load_bpf_file_fixup_map(const char *path, fixup_map_cb fixup_map)
588 {
589 return do_load_bpf_file(path, fixup_map);
590 }
591
592 void read_trace_pipe(void)
593 {
594 int trace_fd;
595
596 trace_fd = open(DEBUGFS "trace_pipe", O_RDONLY, 0);
597 if (trace_fd < 0)
598 return;
599
600 while (1) {
601 static char buf[4096];
602 ssize_t sz;
603
604 sz = read(trace_fd, buf, sizeof(buf));
605 if (sz > 0) {
606 buf[sz] = 0;
607 puts(buf);
608 }
609 }
610 }
611
612 #define MAX_SYMS 300000
613 static struct ksym syms[MAX_SYMS];
614 static int sym_cnt;
615
616 static int ksym_cmp(const void *p1, const void *p2)
617 {
618 return ((struct ksym *)p1)->addr - ((struct ksym *)p2)->addr;
619 }
620
621 int load_kallsyms(void)
622 {
623 FILE *f = fopen("/proc/kallsyms", "r");
624 char func[256], buf[256];
625 char symbol;
626 void *addr;
627 int i = 0;
628
629 if (!f)
630 return -ENOENT;
631
632 while (!feof(f)) {
633 if (!fgets(buf, sizeof(buf), f))
634 break;
635 if (sscanf(buf, "%p %c %s", &addr, &symbol, func) != 3)
636 break;
637 if (!addr)
638 continue;
639 syms[i].addr = (long) addr;
640 syms[i].name = strdup(func);
641 i++;
642 }
643 sym_cnt = i;
644 qsort(syms, sym_cnt, sizeof(struct ksym), ksym_cmp);
645 return 0;
646 }
647
648 struct ksym *ksym_search(long key)
649 {
650 int start = 0, end = sym_cnt;
651 int result;
652
653 while (start < end) {
654 size_t mid = start + (end - start) / 2;
655
656 result = key - syms[mid].addr;
657 if (result < 0)
658 end = mid;
659 else if (result > 0)
660 start = mid + 1;
661 else
662 return &syms[mid];
663 }
664
665 if (start >= 1 && syms[start - 1].addr < key &&
666 key < syms[start].addr)
667 /* valid ksym */
668 return &syms[start - 1];
669
670 /* out of range. return _stext */
671 return &syms[0];
672 }
673
674 int set_link_xdp_fd(int ifindex, int fd, __u32 flags)
675 {
676 struct sockaddr_nl sa;
677 int sock, seq = 0, len, ret = -1;
678 char buf[4096];
679 struct nlattr *nla, *nla_xdp;
680 struct {
681 struct nlmsghdr nh;
682 struct ifinfomsg ifinfo;
683 char attrbuf[64];
684 } req;
685 struct nlmsghdr *nh;
686 struct nlmsgerr *err;
687
688 memset(&sa, 0, sizeof(sa));
689 sa.nl_family = AF_NETLINK;
690
691 sock = socket(AF_NETLINK, SOCK_RAW, NETLINK_ROUTE);
692 if (sock < 0) {
693 printf("open netlink socket: %s\n", strerror(errno));
694 return -1;
695 }
696
697 if (bind(sock, (struct sockaddr *)&sa, sizeof(sa)) < 0) {
698 printf("bind to netlink: %s\n", strerror(errno));
699 goto cleanup;
700 }
701
702 memset(&req, 0, sizeof(req));
703 req.nh.nlmsg_len = NLMSG_LENGTH(sizeof(struct ifinfomsg));
704 req.nh.nlmsg_flags = NLM_F_REQUEST | NLM_F_ACK;
705 req.nh.nlmsg_type = RTM_SETLINK;
706 req.nh.nlmsg_pid = 0;
707 req.nh.nlmsg_seq = ++seq;
708 req.ifinfo.ifi_family = AF_UNSPEC;
709 req.ifinfo.ifi_index = ifindex;
710
711 /* started nested attribute for XDP */
712 nla = (struct nlattr *)(((char *)&req)
713 + NLMSG_ALIGN(req.nh.nlmsg_len));
714 nla->nla_type = NLA_F_NESTED | 43/*IFLA_XDP*/;
715 nla->nla_len = NLA_HDRLEN;
716
717 /* add XDP fd */
718 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
719 nla_xdp->nla_type = 1/*IFLA_XDP_FD*/;
720 nla_xdp->nla_len = NLA_HDRLEN + sizeof(int);
721 memcpy((char *)nla_xdp + NLA_HDRLEN, &fd, sizeof(fd));
722 nla->nla_len += nla_xdp->nla_len;
723
724 /* if user passed in any flags, add those too */
725 if (flags) {
726 nla_xdp = (struct nlattr *)((char *)nla + nla->nla_len);
727 nla_xdp->nla_type = 3/*IFLA_XDP_FLAGS*/;
728 nla_xdp->nla_len = NLA_HDRLEN + sizeof(flags);
729 memcpy((char *)nla_xdp + NLA_HDRLEN, &flags, sizeof(flags));
730 nla->nla_len += nla_xdp->nla_len;
731 }
732
733 req.nh.nlmsg_len += NLA_ALIGN(nla->nla_len);
734
735 if (send(sock, &req, req.nh.nlmsg_len, 0) < 0) {
736 printf("send to netlink: %s\n", strerror(errno));
737 goto cleanup;
738 }
739
740 len = recv(sock, buf, sizeof(buf), 0);
741 if (len < 0) {
742 printf("recv from netlink: %s\n", strerror(errno));
743 goto cleanup;
744 }
745
746 for (nh = (struct nlmsghdr *)buf; NLMSG_OK(nh, len);
747 nh = NLMSG_NEXT(nh, len)) {
748 if (nh->nlmsg_pid != getpid()) {
749 printf("Wrong pid %d, expected %d\n",
750 nh->nlmsg_pid, getpid());
751 goto cleanup;
752 }
753 if (nh->nlmsg_seq != seq) {
754 printf("Wrong seq %d, expected %d\n",
755 nh->nlmsg_seq, seq);
756 goto cleanup;
757 }
758 switch (nh->nlmsg_type) {
759 case NLMSG_ERROR:
760 err = (struct nlmsgerr *)NLMSG_DATA(nh);
761 if (!err->error)
762 continue;
763 printf("nlmsg error %s\n", strerror(-err->error));
764 goto cleanup;
765 case NLMSG_DONE:
766 break;
767 }
768 }
769
770 ret = 0;
771
772 cleanup:
773 close(sock);
774 return ret;
775 }