]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blob - tools/lib/bpf/xsk.c
HID: logitech-dj: fix spelling in printk
[mirror_ubuntu-kernels.git] / tools / lib / bpf / xsk.c
1 // SPDX-License-Identifier: (LGPL-2.1 OR BSD-2-Clause)
2
3 /*
4 * AF_XDP user-space access library.
5 *
6 * Copyright(c) 2018 - 2019 Intel Corporation.
7 *
8 * Author(s): Magnus Karlsson <magnus.karlsson@intel.com>
9 */
10
11 #include <errno.h>
12 #include <stdlib.h>
13 #include <string.h>
14 #include <unistd.h>
15 #include <arpa/inet.h>
16 #include <asm/barrier.h>
17 #include <linux/compiler.h>
18 #include <linux/ethtool.h>
19 #include <linux/filter.h>
20 #include <linux/if_ether.h>
21 #include <linux/if_packet.h>
22 #include <linux/if_xdp.h>
23 #include <linux/sockios.h>
24 #include <net/if.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27 #include <sys/socket.h>
28 #include <sys/types.h>
29
30 #include "bpf.h"
31 #include "libbpf.h"
32 #include "libbpf_util.h"
33 #include "xsk.h"
34
35 #ifndef SOL_XDP
36 #define SOL_XDP 283
37 #endif
38
39 #ifndef AF_XDP
40 #define AF_XDP 44
41 #endif
42
43 #ifndef PF_XDP
44 #define PF_XDP AF_XDP
45 #endif
46
47 struct xsk_umem {
48 struct xsk_ring_prod *fill;
49 struct xsk_ring_cons *comp;
50 char *umem_area;
51 struct xsk_umem_config config;
52 int fd;
53 int refcount;
54 };
55
56 struct xsk_socket {
57 struct xsk_ring_cons *rx;
58 struct xsk_ring_prod *tx;
59 __u64 outstanding_tx;
60 struct xsk_umem *umem;
61 struct xsk_socket_config config;
62 int fd;
63 int xsks_map;
64 int ifindex;
65 int prog_fd;
66 int qidconf_map_fd;
67 int xsks_map_fd;
68 __u32 queue_id;
69 char ifname[IFNAMSIZ];
70 };
71
72 struct xsk_nl_info {
73 bool xdp_prog_attached;
74 int ifindex;
75 int fd;
76 };
77
78 /* For 32-bit systems, we need to use mmap2 as the offsets are 64-bit.
79 * Unfortunately, it is not part of glibc.
80 */
81 static inline void *xsk_mmap(void *addr, size_t length, int prot, int flags,
82 int fd, __u64 offset)
83 {
84 #ifdef __NR_mmap2
85 unsigned int page_shift = __builtin_ffs(getpagesize()) - 1;
86 long ret = syscall(__NR_mmap2, addr, length, prot, flags, fd,
87 (off_t)(offset >> page_shift));
88
89 return (void *)ret;
90 #else
91 return mmap(addr, length, prot, flags, fd, offset);
92 #endif
93 }
94
95 int xsk_umem__fd(const struct xsk_umem *umem)
96 {
97 return umem ? umem->fd : -EINVAL;
98 }
99
100 int xsk_socket__fd(const struct xsk_socket *xsk)
101 {
102 return xsk ? xsk->fd : -EINVAL;
103 }
104
105 static bool xsk_page_aligned(void *buffer)
106 {
107 unsigned long addr = (unsigned long)buffer;
108
109 return !(addr & (getpagesize() - 1));
110 }
111
112 static void xsk_set_umem_config(struct xsk_umem_config *cfg,
113 const struct xsk_umem_config *usr_cfg)
114 {
115 if (!usr_cfg) {
116 cfg->fill_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
117 cfg->comp_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
118 cfg->frame_size = XSK_UMEM__DEFAULT_FRAME_SIZE;
119 cfg->frame_headroom = XSK_UMEM__DEFAULT_FRAME_HEADROOM;
120 return;
121 }
122
123 cfg->fill_size = usr_cfg->fill_size;
124 cfg->comp_size = usr_cfg->comp_size;
125 cfg->frame_size = usr_cfg->frame_size;
126 cfg->frame_headroom = usr_cfg->frame_headroom;
127 }
128
129 static void xsk_set_xdp_socket_config(struct xsk_socket_config *cfg,
130 const struct xsk_socket_config *usr_cfg)
131 {
132 if (!usr_cfg) {
133 cfg->rx_size = XSK_RING_CONS__DEFAULT_NUM_DESCS;
134 cfg->tx_size = XSK_RING_PROD__DEFAULT_NUM_DESCS;
135 cfg->libbpf_flags = 0;
136 cfg->xdp_flags = 0;
137 cfg->bind_flags = 0;
138 return;
139 }
140
141 cfg->rx_size = usr_cfg->rx_size;
142 cfg->tx_size = usr_cfg->tx_size;
143 cfg->libbpf_flags = usr_cfg->libbpf_flags;
144 cfg->xdp_flags = usr_cfg->xdp_flags;
145 cfg->bind_flags = usr_cfg->bind_flags;
146 }
147
148 int xsk_umem__create(struct xsk_umem **umem_ptr, void *umem_area, __u64 size,
149 struct xsk_ring_prod *fill, struct xsk_ring_cons *comp,
150 const struct xsk_umem_config *usr_config)
151 {
152 struct xdp_mmap_offsets off;
153 struct xdp_umem_reg mr;
154 struct xsk_umem *umem;
155 socklen_t optlen;
156 void *map;
157 int err;
158
159 if (!umem_area || !umem_ptr || !fill || !comp)
160 return -EFAULT;
161 if (!size && !xsk_page_aligned(umem_area))
162 return -EINVAL;
163
164 umem = calloc(1, sizeof(*umem));
165 if (!umem)
166 return -ENOMEM;
167
168 umem->fd = socket(AF_XDP, SOCK_RAW, 0);
169 if (umem->fd < 0) {
170 err = -errno;
171 goto out_umem_alloc;
172 }
173
174 umem->umem_area = umem_area;
175 xsk_set_umem_config(&umem->config, usr_config);
176
177 mr.addr = (uintptr_t)umem_area;
178 mr.len = size;
179 mr.chunk_size = umem->config.frame_size;
180 mr.headroom = umem->config.frame_headroom;
181
182 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_REG, &mr, sizeof(mr));
183 if (err) {
184 err = -errno;
185 goto out_socket;
186 }
187 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_FILL_RING,
188 &umem->config.fill_size,
189 sizeof(umem->config.fill_size));
190 if (err) {
191 err = -errno;
192 goto out_socket;
193 }
194 err = setsockopt(umem->fd, SOL_XDP, XDP_UMEM_COMPLETION_RING,
195 &umem->config.comp_size,
196 sizeof(umem->config.comp_size));
197 if (err) {
198 err = -errno;
199 goto out_socket;
200 }
201
202 optlen = sizeof(off);
203 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
204 if (err) {
205 err = -errno;
206 goto out_socket;
207 }
208
209 map = xsk_mmap(NULL, off.fr.desc +
210 umem->config.fill_size * sizeof(__u64),
211 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
212 umem->fd, XDP_UMEM_PGOFF_FILL_RING);
213 if (map == MAP_FAILED) {
214 err = -errno;
215 goto out_socket;
216 }
217
218 umem->fill = fill;
219 fill->mask = umem->config.fill_size - 1;
220 fill->size = umem->config.fill_size;
221 fill->producer = map + off.fr.producer;
222 fill->consumer = map + off.fr.consumer;
223 fill->ring = map + off.fr.desc;
224 fill->cached_cons = umem->config.fill_size;
225
226 map = xsk_mmap(NULL,
227 off.cr.desc + umem->config.comp_size * sizeof(__u64),
228 PROT_READ | PROT_WRITE, MAP_SHARED | MAP_POPULATE,
229 umem->fd, XDP_UMEM_PGOFF_COMPLETION_RING);
230 if (map == MAP_FAILED) {
231 err = -errno;
232 goto out_mmap;
233 }
234
235 umem->comp = comp;
236 comp->mask = umem->config.comp_size - 1;
237 comp->size = umem->config.comp_size;
238 comp->producer = map + off.cr.producer;
239 comp->consumer = map + off.cr.consumer;
240 comp->ring = map + off.cr.desc;
241
242 *umem_ptr = umem;
243 return 0;
244
245 out_mmap:
246 munmap(umem->fill,
247 off.fr.desc + umem->config.fill_size * sizeof(__u64));
248 out_socket:
249 close(umem->fd);
250 out_umem_alloc:
251 free(umem);
252 return err;
253 }
254
255 static int xsk_load_xdp_prog(struct xsk_socket *xsk)
256 {
257 char bpf_log_buf[BPF_LOG_BUF_SIZE];
258 int err, prog_fd;
259
260 /* This is the C-program:
261 * SEC("xdp_sock") int xdp_sock_prog(struct xdp_md *ctx)
262 * {
263 * int *qidconf, index = ctx->rx_queue_index;
264 *
265 * // A set entry here means that the correspnding queue_id
266 * // has an active AF_XDP socket bound to it.
267 * qidconf = bpf_map_lookup_elem(&qidconf_map, &index);
268 * if (!qidconf)
269 * return XDP_ABORTED;
270 *
271 * if (*qidconf)
272 * return bpf_redirect_map(&xsks_map, index, 0);
273 *
274 * return XDP_PASS;
275 * }
276 */
277 struct bpf_insn prog[] = {
278 /* r1 = *(u32 *)(r1 + 16) */
279 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 16),
280 /* *(u32 *)(r10 - 4) = r1 */
281 BPF_STX_MEM(BPF_W, BPF_REG_10, BPF_REG_1, -4),
282 BPF_MOV64_REG(BPF_REG_2, BPF_REG_10),
283 BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, -4),
284 BPF_LD_MAP_FD(BPF_REG_1, xsk->qidconf_map_fd),
285 BPF_EMIT_CALL(BPF_FUNC_map_lookup_elem),
286 BPF_MOV64_REG(BPF_REG_1, BPF_REG_0),
287 BPF_MOV32_IMM(BPF_REG_0, 0),
288 /* if r1 == 0 goto +8 */
289 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 8),
290 BPF_MOV32_IMM(BPF_REG_0, 2),
291 /* r1 = *(u32 *)(r1 + 0) */
292 BPF_LDX_MEM(BPF_W, BPF_REG_1, BPF_REG_1, 0),
293 /* if r1 == 0 goto +5 */
294 BPF_JMP_IMM(BPF_JEQ, BPF_REG_1, 0, 5),
295 /* r2 = *(u32 *)(r10 - 4) */
296 BPF_LD_MAP_FD(BPF_REG_1, xsk->xsks_map_fd),
297 BPF_LDX_MEM(BPF_W, BPF_REG_2, BPF_REG_10, -4),
298 BPF_MOV32_IMM(BPF_REG_3, 0),
299 BPF_EMIT_CALL(BPF_FUNC_redirect_map),
300 /* The jumps are to this instruction */
301 BPF_EXIT_INSN(),
302 };
303 size_t insns_cnt = sizeof(prog) / sizeof(struct bpf_insn);
304
305 prog_fd = bpf_load_program(BPF_PROG_TYPE_XDP, prog, insns_cnt,
306 "LGPL-2.1 or BSD-2-Clause", 0, bpf_log_buf,
307 BPF_LOG_BUF_SIZE);
308 if (prog_fd < 0) {
309 pr_warning("BPF log buffer:\n%s", bpf_log_buf);
310 return prog_fd;
311 }
312
313 err = bpf_set_link_xdp_fd(xsk->ifindex, prog_fd, xsk->config.xdp_flags);
314 if (err) {
315 close(prog_fd);
316 return err;
317 }
318
319 xsk->prog_fd = prog_fd;
320 return 0;
321 }
322
323 static int xsk_get_max_queues(struct xsk_socket *xsk)
324 {
325 struct ethtool_channels channels;
326 struct ifreq ifr;
327 int fd, err, ret;
328
329 fd = socket(AF_INET, SOCK_DGRAM, 0);
330 if (fd < 0)
331 return -errno;
332
333 channels.cmd = ETHTOOL_GCHANNELS;
334 ifr.ifr_data = (void *)&channels;
335 strncpy(ifr.ifr_name, xsk->ifname, IFNAMSIZ);
336 err = ioctl(fd, SIOCETHTOOL, &ifr);
337 if (err && errno != EOPNOTSUPP) {
338 ret = -errno;
339 goto out;
340 }
341
342 if (channels.max_combined == 0 || errno == EOPNOTSUPP)
343 /* If the device says it has no channels, then all traffic
344 * is sent to a single stream, so max queues = 1.
345 */
346 ret = 1;
347 else
348 ret = channels.max_combined;
349
350 out:
351 close(fd);
352 return ret;
353 }
354
355 static int xsk_create_bpf_maps(struct xsk_socket *xsk)
356 {
357 int max_queues;
358 int fd;
359
360 max_queues = xsk_get_max_queues(xsk);
361 if (max_queues < 0)
362 return max_queues;
363
364 fd = bpf_create_map_name(BPF_MAP_TYPE_ARRAY, "qidconf_map",
365 sizeof(int), sizeof(int), max_queues, 0);
366 if (fd < 0)
367 return fd;
368 xsk->qidconf_map_fd = fd;
369
370 fd = bpf_create_map_name(BPF_MAP_TYPE_XSKMAP, "xsks_map",
371 sizeof(int), sizeof(int), max_queues, 0);
372 if (fd < 0) {
373 close(xsk->qidconf_map_fd);
374 return fd;
375 }
376 xsk->xsks_map_fd = fd;
377
378 return 0;
379 }
380
381 static void xsk_delete_bpf_maps(struct xsk_socket *xsk)
382 {
383 close(xsk->qidconf_map_fd);
384 close(xsk->xsks_map_fd);
385 }
386
387 static int xsk_update_bpf_maps(struct xsk_socket *xsk, int qidconf_value,
388 int xsks_value)
389 {
390 bool qidconf_map_updated = false, xsks_map_updated = false;
391 struct bpf_prog_info prog_info = {};
392 __u32 prog_len = sizeof(prog_info);
393 struct bpf_map_info map_info;
394 __u32 map_len = sizeof(map_info);
395 __u32 *map_ids;
396 int reset_value = 0;
397 __u32 num_maps;
398 unsigned int i;
399 int err;
400
401 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
402 if (err)
403 return err;
404
405 num_maps = prog_info.nr_map_ids;
406
407 map_ids = calloc(prog_info.nr_map_ids, sizeof(*map_ids));
408 if (!map_ids)
409 return -ENOMEM;
410
411 memset(&prog_info, 0, prog_len);
412 prog_info.nr_map_ids = num_maps;
413 prog_info.map_ids = (__u64)(unsigned long)map_ids;
414
415 err = bpf_obj_get_info_by_fd(xsk->prog_fd, &prog_info, &prog_len);
416 if (err)
417 goto out_map_ids;
418
419 for (i = 0; i < prog_info.nr_map_ids; i++) {
420 int fd;
421
422 fd = bpf_map_get_fd_by_id(map_ids[i]);
423 if (fd < 0) {
424 err = -errno;
425 goto out_maps;
426 }
427
428 err = bpf_obj_get_info_by_fd(fd, &map_info, &map_len);
429 if (err)
430 goto out_maps;
431
432 if (!strcmp(map_info.name, "qidconf_map")) {
433 err = bpf_map_update_elem(fd, &xsk->queue_id,
434 &qidconf_value, 0);
435 if (err)
436 goto out_maps;
437 qidconf_map_updated = true;
438 xsk->qidconf_map_fd = fd;
439 } else if (!strcmp(map_info.name, "xsks_map")) {
440 err = bpf_map_update_elem(fd, &xsk->queue_id,
441 &xsks_value, 0);
442 if (err)
443 goto out_maps;
444 xsks_map_updated = true;
445 xsk->xsks_map_fd = fd;
446 }
447
448 if (qidconf_map_updated && xsks_map_updated)
449 break;
450 }
451
452 if (!(qidconf_map_updated && xsks_map_updated)) {
453 err = -ENOENT;
454 goto out_maps;
455 }
456
457 err = 0;
458 goto out_success;
459
460 out_maps:
461 if (qidconf_map_updated)
462 (void)bpf_map_update_elem(xsk->qidconf_map_fd, &xsk->queue_id,
463 &reset_value, 0);
464 if (xsks_map_updated)
465 (void)bpf_map_update_elem(xsk->xsks_map_fd, &xsk->queue_id,
466 &reset_value, 0);
467 out_success:
468 if (qidconf_map_updated)
469 close(xsk->qidconf_map_fd);
470 if (xsks_map_updated)
471 close(xsk->xsks_map_fd);
472 out_map_ids:
473 free(map_ids);
474 return err;
475 }
476
477 static int xsk_setup_xdp_prog(struct xsk_socket *xsk)
478 {
479 bool prog_attached = false;
480 __u32 prog_id = 0;
481 int err;
482
483 err = bpf_get_link_xdp_id(xsk->ifindex, &prog_id,
484 xsk->config.xdp_flags);
485 if (err)
486 return err;
487
488 if (!prog_id) {
489 prog_attached = true;
490 err = xsk_create_bpf_maps(xsk);
491 if (err)
492 return err;
493
494 err = xsk_load_xdp_prog(xsk);
495 if (err)
496 goto out_maps;
497 } else {
498 xsk->prog_fd = bpf_prog_get_fd_by_id(prog_id);
499 }
500
501 err = xsk_update_bpf_maps(xsk, true, xsk->fd);
502 if (err)
503 goto out_load;
504
505 return 0;
506
507 out_load:
508 if (prog_attached)
509 close(xsk->prog_fd);
510 out_maps:
511 if (prog_attached)
512 xsk_delete_bpf_maps(xsk);
513 return err;
514 }
515
516 int xsk_socket__create(struct xsk_socket **xsk_ptr, const char *ifname,
517 __u32 queue_id, struct xsk_umem *umem,
518 struct xsk_ring_cons *rx, struct xsk_ring_prod *tx,
519 const struct xsk_socket_config *usr_config)
520 {
521 struct sockaddr_xdp sxdp = {};
522 struct xdp_mmap_offsets off;
523 struct xsk_socket *xsk;
524 socklen_t optlen;
525 void *map;
526 int err;
527
528 if (!umem || !xsk_ptr || !rx || !tx)
529 return -EFAULT;
530
531 if (umem->refcount) {
532 pr_warning("Error: shared umems not supported by libbpf.\n");
533 return -EBUSY;
534 }
535
536 xsk = calloc(1, sizeof(*xsk));
537 if (!xsk)
538 return -ENOMEM;
539
540 if (umem->refcount++ > 0) {
541 xsk->fd = socket(AF_XDP, SOCK_RAW, 0);
542 if (xsk->fd < 0) {
543 err = -errno;
544 goto out_xsk_alloc;
545 }
546 } else {
547 xsk->fd = umem->fd;
548 }
549
550 xsk->outstanding_tx = 0;
551 xsk->queue_id = queue_id;
552 xsk->umem = umem;
553 xsk->ifindex = if_nametoindex(ifname);
554 if (!xsk->ifindex) {
555 err = -errno;
556 goto out_socket;
557 }
558 strncpy(xsk->ifname, ifname, IFNAMSIZ);
559
560 xsk_set_xdp_socket_config(&xsk->config, usr_config);
561
562 if (rx) {
563 err = setsockopt(xsk->fd, SOL_XDP, XDP_RX_RING,
564 &xsk->config.rx_size,
565 sizeof(xsk->config.rx_size));
566 if (err) {
567 err = -errno;
568 goto out_socket;
569 }
570 }
571 if (tx) {
572 err = setsockopt(xsk->fd, SOL_XDP, XDP_TX_RING,
573 &xsk->config.tx_size,
574 sizeof(xsk->config.tx_size));
575 if (err) {
576 err = -errno;
577 goto out_socket;
578 }
579 }
580
581 optlen = sizeof(off);
582 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
583 if (err) {
584 err = -errno;
585 goto out_socket;
586 }
587
588 if (rx) {
589 map = xsk_mmap(NULL, off.rx.desc +
590 xsk->config.rx_size * sizeof(struct xdp_desc),
591 PROT_READ | PROT_WRITE,
592 MAP_SHARED | MAP_POPULATE,
593 xsk->fd, XDP_PGOFF_RX_RING);
594 if (map == MAP_FAILED) {
595 err = -errno;
596 goto out_socket;
597 }
598
599 rx->mask = xsk->config.rx_size - 1;
600 rx->size = xsk->config.rx_size;
601 rx->producer = map + off.rx.producer;
602 rx->consumer = map + off.rx.consumer;
603 rx->ring = map + off.rx.desc;
604 }
605 xsk->rx = rx;
606
607 if (tx) {
608 map = xsk_mmap(NULL, off.tx.desc +
609 xsk->config.tx_size * sizeof(struct xdp_desc),
610 PROT_READ | PROT_WRITE,
611 MAP_SHARED | MAP_POPULATE,
612 xsk->fd, XDP_PGOFF_TX_RING);
613 if (map == MAP_FAILED) {
614 err = -errno;
615 goto out_mmap_rx;
616 }
617
618 tx->mask = xsk->config.tx_size - 1;
619 tx->size = xsk->config.tx_size;
620 tx->producer = map + off.tx.producer;
621 tx->consumer = map + off.tx.consumer;
622 tx->ring = map + off.tx.desc;
623 tx->cached_cons = xsk->config.tx_size;
624 }
625 xsk->tx = tx;
626
627 sxdp.sxdp_family = PF_XDP;
628 sxdp.sxdp_ifindex = xsk->ifindex;
629 sxdp.sxdp_queue_id = xsk->queue_id;
630 sxdp.sxdp_flags = xsk->config.bind_flags;
631
632 err = bind(xsk->fd, (struct sockaddr *)&sxdp, sizeof(sxdp));
633 if (err) {
634 err = -errno;
635 goto out_mmap_tx;
636 }
637
638 if (!(xsk->config.libbpf_flags & XSK_LIBBPF_FLAGS__INHIBIT_PROG_LOAD)) {
639 err = xsk_setup_xdp_prog(xsk);
640 if (err)
641 goto out_mmap_tx;
642 }
643
644 *xsk_ptr = xsk;
645 return 0;
646
647 out_mmap_tx:
648 if (tx)
649 munmap(xsk->tx,
650 off.tx.desc +
651 xsk->config.tx_size * sizeof(struct xdp_desc));
652 out_mmap_rx:
653 if (rx)
654 munmap(xsk->rx,
655 off.rx.desc +
656 xsk->config.rx_size * sizeof(struct xdp_desc));
657 out_socket:
658 if (--umem->refcount)
659 close(xsk->fd);
660 out_xsk_alloc:
661 free(xsk);
662 return err;
663 }
664
665 int xsk_umem__delete(struct xsk_umem *umem)
666 {
667 struct xdp_mmap_offsets off;
668 socklen_t optlen;
669 int err;
670
671 if (!umem)
672 return 0;
673
674 if (umem->refcount)
675 return -EBUSY;
676
677 optlen = sizeof(off);
678 err = getsockopt(umem->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
679 if (!err) {
680 munmap(umem->fill->ring,
681 off.fr.desc + umem->config.fill_size * sizeof(__u64));
682 munmap(umem->comp->ring,
683 off.cr.desc + umem->config.comp_size * sizeof(__u64));
684 }
685
686 close(umem->fd);
687 free(umem);
688
689 return 0;
690 }
691
692 void xsk_socket__delete(struct xsk_socket *xsk)
693 {
694 struct xdp_mmap_offsets off;
695 socklen_t optlen;
696 int err;
697
698 if (!xsk)
699 return;
700
701 (void)xsk_update_bpf_maps(xsk, 0, 0);
702
703 optlen = sizeof(off);
704 err = getsockopt(xsk->fd, SOL_XDP, XDP_MMAP_OFFSETS, &off, &optlen);
705 if (!err) {
706 if (xsk->rx)
707 munmap(xsk->rx->ring,
708 off.rx.desc +
709 xsk->config.rx_size * sizeof(struct xdp_desc));
710 if (xsk->tx)
711 munmap(xsk->tx->ring,
712 off.tx.desc +
713 xsk->config.tx_size * sizeof(struct xdp_desc));
714 }
715
716 xsk->umem->refcount--;
717 /* Do not close an fd that also has an associated umem connected
718 * to it.
719 */
720 if (xsk->fd != xsk->umem->fd)
721 close(xsk->fd);
722 free(xsk);
723 }