]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/lib/librte_eal/linuxapp/eal/eal_pci_vfio.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / seastar / dpdk / lib / librte_eal / linuxapp / eal / eal_pci_vfio.c
1 /*-
2 * BSD LICENSE
3 *
4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
5 * All rights reserved.
6 *
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
9 * are met:
10 *
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
16 * distribution.
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
20 *
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 */
33
34 #include <string.h>
35 #include <fcntl.h>
36 #include <linux/pci_regs.h>
37 #include <sys/eventfd.h>
38 #include <sys/socket.h>
39 #include <sys/ioctl.h>
40 #include <sys/mman.h>
41 #include <stdbool.h>
42
43 #include <rte_log.h>
44 #include <rte_pci.h>
45 #include <rte_eal_memconfig.h>
46 #include <rte_malloc.h>
47
48 #include "eal_filesystem.h"
49 #include "eal_pci_init.h"
50 #include "eal_vfio.h"
51 #include "eal_private.h"
52
53 /**
54 * @file
55 * PCI probing under linux (VFIO version)
56 *
57 * This code tries to determine if the PCI device is bound to VFIO driver,
58 * and initialize it (map BARs, set up interrupts) if that's the case.
59 *
60 * This file is only compiled if CONFIG_RTE_EAL_VFIO is set to "y".
61 */
62
63 #ifdef VFIO_PRESENT
64
65 #define PAGE_SIZE (sysconf(_SC_PAGESIZE))
66 #define PAGE_MASK (~(PAGE_SIZE - 1))
67
68 static struct rte_tailq_elem rte_vfio_tailq = {
69 .name = "VFIO_RESOURCE_LIST",
70 };
71 EAL_REGISTER_TAILQ(rte_vfio_tailq)
72
73 int
74 pci_vfio_read_config(const struct rte_intr_handle *intr_handle,
75 void *buf, size_t len, off_t offs)
76 {
77 return pread64(intr_handle->vfio_dev_fd, buf, len,
78 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
79 }
80
81 int
82 pci_vfio_write_config(const struct rte_intr_handle *intr_handle,
83 const void *buf, size_t len, off_t offs)
84 {
85 return pwrite64(intr_handle->vfio_dev_fd, buf, len,
86 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) + offs);
87 }
88
89 /* get PCI BAR number where MSI-X interrupts are */
90 static int
91 pci_vfio_get_msix_bar(int fd, int *msix_bar, uint32_t *msix_table_offset,
92 uint32_t *msix_table_size)
93 {
94 int ret;
95 uint32_t reg;
96 uint16_t flags;
97 uint8_t cap_id, cap_offset;
98
99 /* read PCI capability pointer from config space */
100 ret = pread64(fd, &reg, sizeof(reg),
101 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
102 PCI_CAPABILITY_LIST);
103 if (ret != sizeof(reg)) {
104 RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
105 "config space!\n");
106 return -1;
107 }
108
109 /* we need first byte */
110 cap_offset = reg & 0xFF;
111
112 while (cap_offset) {
113
114 /* read PCI capability ID */
115 ret = pread64(fd, &reg, sizeof(reg),
116 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
117 cap_offset);
118 if (ret != sizeof(reg)) {
119 RTE_LOG(ERR, EAL, "Cannot read capability ID from PCI "
120 "config space!\n");
121 return -1;
122 }
123
124 /* we need first byte */
125 cap_id = reg & 0xFF;
126
127 /* if we haven't reached MSI-X, check next capability */
128 if (cap_id != PCI_CAP_ID_MSIX) {
129 ret = pread64(fd, &reg, sizeof(reg),
130 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
131 cap_offset);
132 if (ret != sizeof(reg)) {
133 RTE_LOG(ERR, EAL, "Cannot read capability pointer from PCI "
134 "config space!\n");
135 return -1;
136 }
137
138 /* we need second byte */
139 cap_offset = (reg & 0xFF00) >> 8;
140
141 continue;
142 }
143 /* else, read table offset */
144 else {
145 /* table offset resides in the next 4 bytes */
146 ret = pread64(fd, &reg, sizeof(reg),
147 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
148 cap_offset + 4);
149 if (ret != sizeof(reg)) {
150 RTE_LOG(ERR, EAL, "Cannot read table offset from PCI config "
151 "space!\n");
152 return -1;
153 }
154
155 ret = pread64(fd, &flags, sizeof(flags),
156 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
157 cap_offset + 2);
158 if (ret != sizeof(flags)) {
159 RTE_LOG(ERR, EAL, "Cannot read table flags from PCI config "
160 "space!\n");
161 return -1;
162 }
163
164 *msix_bar = reg & RTE_PCI_MSIX_TABLE_BIR;
165 *msix_table_offset = reg & RTE_PCI_MSIX_TABLE_OFFSET;
166 *msix_table_size = 16 * (1 + (flags & RTE_PCI_MSIX_FLAGS_QSIZE));
167
168 return 0;
169 }
170 }
171 return 0;
172 }
173
174 /* set PCI bus mastering */
175 static int
176 pci_vfio_set_bus_master(int dev_fd, bool op)
177 {
178 uint16_t reg;
179 int ret;
180
181 ret = pread64(dev_fd, &reg, sizeof(reg),
182 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
183 PCI_COMMAND);
184 if (ret != sizeof(reg)) {
185 RTE_LOG(ERR, EAL, "Cannot read command from PCI config space!\n");
186 return -1;
187 }
188
189 if (op)
190 /* set the master bit */
191 reg |= PCI_COMMAND_MASTER;
192 else
193 reg &= ~(PCI_COMMAND_MASTER);
194
195 ret = pwrite64(dev_fd, &reg, sizeof(reg),
196 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX) +
197 PCI_COMMAND);
198
199 if (ret != sizeof(reg)) {
200 RTE_LOG(ERR, EAL, "Cannot write command to PCI config space!\n");
201 return -1;
202 }
203
204 return 0;
205 }
206
207 /* set up interrupt support (but not enable interrupts) */
208 static int
209 pci_vfio_setup_interrupts(struct rte_pci_device *dev, int vfio_dev_fd)
210 {
211 int i, ret, intr_idx;
212
213 /* default to invalid index */
214 intr_idx = VFIO_PCI_NUM_IRQS;
215
216 /* get interrupt type from internal config (MSI-X by default, can be
217 * overriden from the command line
218 */
219 switch (internal_config.vfio_intr_mode) {
220 case RTE_INTR_MODE_MSIX:
221 intr_idx = VFIO_PCI_MSIX_IRQ_INDEX;
222 break;
223 case RTE_INTR_MODE_MSI:
224 intr_idx = VFIO_PCI_MSI_IRQ_INDEX;
225 break;
226 case RTE_INTR_MODE_LEGACY:
227 intr_idx = VFIO_PCI_INTX_IRQ_INDEX;
228 break;
229 /* don't do anything if we want to automatically determine interrupt type */
230 case RTE_INTR_MODE_NONE:
231 break;
232 default:
233 RTE_LOG(ERR, EAL, " unknown default interrupt type!\n");
234 return -1;
235 }
236
237 /* start from MSI-X interrupt type */
238 for (i = VFIO_PCI_MSIX_IRQ_INDEX; i >= 0; i--) {
239 struct vfio_irq_info irq = { .argsz = sizeof(irq) };
240 int fd = -1;
241
242 /* skip interrupt modes we don't want */
243 if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE &&
244 i != intr_idx)
245 continue;
246
247 irq.index = i;
248
249 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq);
250 if (ret < 0) {
251 RTE_LOG(ERR, EAL, " cannot get IRQ info, "
252 "error %i (%s)\n", errno, strerror(errno));
253 return -1;
254 }
255
256 /* if this vector cannot be used with eventfd, fail if we explicitly
257 * specified interrupt type, otherwise continue */
258 if ((irq.flags & VFIO_IRQ_INFO_EVENTFD) == 0) {
259 if (internal_config.vfio_intr_mode != RTE_INTR_MODE_NONE) {
260 RTE_LOG(ERR, EAL,
261 " interrupt vector does not support eventfd!\n");
262 return -1;
263 } else
264 continue;
265 }
266
267 /* set up an eventfd for interrupts */
268 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
269 if (fd < 0) {
270 RTE_LOG(ERR, EAL, " cannot set up eventfd, "
271 "error %i (%s)\n", errno, strerror(errno));
272 return -1;
273 }
274
275 dev->intr_handle.fd = fd;
276 dev->intr_handle.vfio_dev_fd = vfio_dev_fd;
277
278 switch (i) {
279 case VFIO_PCI_MSIX_IRQ_INDEX:
280 internal_config.vfio_intr_mode = RTE_INTR_MODE_MSIX;
281 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSIX;
282 break;
283 case VFIO_PCI_MSI_IRQ_INDEX:
284 internal_config.vfio_intr_mode = RTE_INTR_MODE_MSI;
285 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_MSI;
286 break;
287 case VFIO_PCI_INTX_IRQ_INDEX:
288 internal_config.vfio_intr_mode = RTE_INTR_MODE_LEGACY;
289 dev->intr_handle.type = RTE_INTR_HANDLE_VFIO_LEGACY;
290 break;
291 default:
292 RTE_LOG(ERR, EAL, " unknown interrupt type!\n");
293 return -1;
294 }
295
296 return 0;
297 }
298
299 /* if we're here, we haven't found a suitable interrupt vector */
300 return -1;
301 }
302
303 /*
304 * map the PCI resources of a PCI device in virtual memory (VFIO version).
305 * primary and secondary processes follow almost exactly the same path
306 */
307 int
308 pci_vfio_map_resource(struct rte_pci_device *dev)
309 {
310 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
311 char pci_addr[PATH_MAX] = {0};
312 int vfio_dev_fd;
313 struct rte_pci_addr *loc = &dev->addr;
314 int i, ret, msix_bar;
315 struct mapped_pci_resource *vfio_res = NULL;
316 struct mapped_pci_res_list *vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
317
318 struct pci_map *maps;
319 uint32_t msix_table_offset = 0;
320 uint32_t msix_table_size = 0;
321 uint32_t ioport_bar;
322
323 dev->intr_handle.fd = -1;
324 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
325
326 /* store PCI address string */
327 snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
328 loc->domain, loc->bus, loc->devid, loc->function);
329
330 if ((ret = vfio_setup_device(pci_get_sysfs_path(), pci_addr,
331 &vfio_dev_fd, &device_info)))
332 return ret;
333
334 /* get MSI-X BAR, if any (we have to know where it is because we can't
335 * easily mmap it when using VFIO) */
336 msix_bar = -1;
337 ret = pci_vfio_get_msix_bar(vfio_dev_fd, &msix_bar,
338 &msix_table_offset, &msix_table_size);
339 if (ret < 0) {
340 RTE_LOG(ERR, EAL, " %s cannot get MSI-X BAR number!\n", pci_addr);
341 close(vfio_dev_fd);
342 return -1;
343 }
344
345 /* if we're in a primary process, allocate vfio_res and get region info */
346 if (internal_config.process_type == RTE_PROC_PRIMARY) {
347 vfio_res = rte_zmalloc("VFIO_RES", sizeof(*vfio_res), 0);
348 if (vfio_res == NULL) {
349 RTE_LOG(ERR, EAL,
350 "%s(): cannot store uio mmap details\n", __func__);
351 close(vfio_dev_fd);
352 return -1;
353 }
354 memcpy(&vfio_res->pci_addr, &dev->addr, sizeof(vfio_res->pci_addr));
355
356 /* get number of registers (up to BAR5) */
357 vfio_res->nb_maps = RTE_MIN((int) device_info.num_regions,
358 VFIO_PCI_BAR5_REGION_INDEX + 1);
359 } else {
360 /* if we're in a secondary process, just find our tailq entry */
361 TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
362 if (rte_eal_compare_pci_addr(&vfio_res->pci_addr,
363 &dev->addr))
364 continue;
365 break;
366 }
367 /* if we haven't found our tailq entry, something's wrong */
368 if (vfio_res == NULL) {
369 RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
370 pci_addr);
371 close(vfio_dev_fd);
372 return -1;
373 }
374 }
375
376 /* map BARs */
377 maps = vfio_res->maps;
378
379 for (i = 0; i < (int) vfio_res->nb_maps; i++) {
380 struct vfio_region_info reg = { .argsz = sizeof(reg) };
381 void *bar_addr;
382 struct memreg {
383 unsigned long offset, size;
384 } memreg[2] = {};
385
386 reg.index = i;
387
388 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_REGION_INFO, &reg);
389
390 if (ret) {
391 RTE_LOG(ERR, EAL, " %s cannot get device region info "
392 "error %i (%s)\n", pci_addr, errno, strerror(errno));
393 close(vfio_dev_fd);
394 if (internal_config.process_type == RTE_PROC_PRIMARY)
395 rte_free(vfio_res);
396 return -1;
397 }
398
399 /* chk for io port region */
400 ret = pread64(vfio_dev_fd, &ioport_bar, sizeof(ioport_bar),
401 VFIO_GET_REGION_ADDR(VFIO_PCI_CONFIG_REGION_INDEX)
402 + PCI_BASE_ADDRESS_0 + i*4);
403
404 if (ret != sizeof(ioport_bar)) {
405 RTE_LOG(ERR, EAL,
406 "Cannot read command (%x) from config space!\n",
407 PCI_BASE_ADDRESS_0 + i*4);
408 return -1;
409 }
410
411 if (ioport_bar & PCI_BASE_ADDRESS_SPACE_IO) {
412 RTE_LOG(INFO, EAL,
413 "Ignore mapping IO port bar(%d) addr: %x\n",
414 i, ioport_bar);
415 continue;
416 }
417
418 /* skip non-mmapable BARs */
419 if ((reg.flags & VFIO_REGION_INFO_FLAG_MMAP) == 0)
420 continue;
421
422 if (i == msix_bar) {
423 /*
424 * VFIO will not let us map the MSI-X table,
425 * but we can map around it.
426 */
427 uint32_t table_start = msix_table_offset;
428 uint32_t table_end = table_start + msix_table_size;
429 table_end = (table_end + ~PAGE_MASK) & PAGE_MASK;
430 table_start &= PAGE_MASK;
431
432 if (table_start == 0 && table_end >= reg.size) {
433 /* Cannot map this BAR */
434 RTE_LOG(DEBUG, EAL, "Skipping BAR %d\n", i);
435 continue;
436 } else {
437 memreg[0].offset = reg.offset;
438 memreg[0].size = table_start;
439 memreg[1].offset = reg.offset + table_end;
440 memreg[1].size = reg.size - table_end;
441
442 RTE_LOG(DEBUG, EAL,
443 "Trying to map BAR %d that contains the MSI-X "
444 "table. Trying offsets: "
445 "0x%04lx:0x%04lx, 0x%04lx:0x%04lx\n", i,
446 memreg[0].offset, memreg[0].size,
447 memreg[1].offset, memreg[1].size);
448 }
449 } else {
450 memreg[0].offset = reg.offset;
451 memreg[0].size = reg.size;
452 }
453
454 /* try to figure out an address */
455 if (internal_config.process_type == RTE_PROC_PRIMARY) {
456 /* try mapping somewhere close to the end of hugepages */
457 if (pci_map_addr == NULL)
458 pci_map_addr = pci_find_max_end_va();
459
460 bar_addr = pci_map_addr;
461 pci_map_addr = RTE_PTR_ADD(bar_addr, (size_t) reg.size);
462 } else {
463 bar_addr = maps[i].addr;
464 }
465
466 /* reserve the address using an inaccessible mapping */
467 bar_addr = mmap(bar_addr, reg.size, 0, MAP_PRIVATE |
468 MAP_ANONYMOUS, -1, 0);
469 if (bar_addr != MAP_FAILED) {
470 void *map_addr = NULL;
471 if (memreg[0].size) {
472 /* actual map of first part */
473 map_addr = pci_map_resource(bar_addr, vfio_dev_fd,
474 memreg[0].offset,
475 memreg[0].size,
476 MAP_FIXED);
477 }
478
479 /* if there's a second part, try to map it */
480 if (map_addr != MAP_FAILED
481 && memreg[1].offset && memreg[1].size) {
482 void *second_addr = RTE_PTR_ADD(bar_addr,
483 memreg[1].offset -
484 (uintptr_t)reg.offset);
485 map_addr = pci_map_resource(second_addr,
486 vfio_dev_fd, memreg[1].offset,
487 memreg[1].size,
488 MAP_FIXED);
489 }
490
491 if (map_addr == MAP_FAILED || !map_addr) {
492 munmap(bar_addr, reg.size);
493 bar_addr = MAP_FAILED;
494 }
495 }
496
497 if (bar_addr == MAP_FAILED ||
498 (internal_config.process_type == RTE_PROC_SECONDARY &&
499 bar_addr != maps[i].addr)) {
500 RTE_LOG(ERR, EAL, " %s mapping BAR%i failed: %s\n", pci_addr, i,
501 strerror(errno));
502 close(vfio_dev_fd);
503 if (internal_config.process_type == RTE_PROC_PRIMARY)
504 rte_free(vfio_res);
505 return -1;
506 }
507
508 maps[i].addr = bar_addr;
509 maps[i].offset = reg.offset;
510 maps[i].size = reg.size;
511 maps[i].path = NULL; /* vfio doesn't have per-resource paths */
512 dev->mem_resource[i].addr = bar_addr;
513 }
514
515 /* if secondary process, do not set up interrupts */
516 if (internal_config.process_type == RTE_PROC_PRIMARY) {
517 if (pci_vfio_setup_interrupts(dev, vfio_dev_fd) != 0) {
518 RTE_LOG(ERR, EAL, " %s error setting up interrupts!\n", pci_addr);
519 close(vfio_dev_fd);
520 rte_free(vfio_res);
521 return -1;
522 }
523
524 /* set bus mastering for the device */
525 if (pci_vfio_set_bus_master(vfio_dev_fd, true)) {
526 RTE_LOG(ERR, EAL, " %s cannot set up bus mastering!\n", pci_addr);
527 close(vfio_dev_fd);
528 rte_free(vfio_res);
529 return -1;
530 }
531
532 /* Reset the device */
533 ioctl(vfio_dev_fd, VFIO_DEVICE_RESET);
534 }
535
536 if (internal_config.process_type == RTE_PROC_PRIMARY)
537 TAILQ_INSERT_TAIL(vfio_res_list, vfio_res, next);
538
539 return 0;
540 }
541
542 int
543 pci_vfio_unmap_resource(struct rte_pci_device *dev)
544 {
545 char pci_addr[PATH_MAX] = {0};
546 struct rte_pci_addr *loc = &dev->addr;
547 int i, ret;
548 struct mapped_pci_resource *vfio_res = NULL;
549 struct mapped_pci_res_list *vfio_res_list;
550
551 struct pci_map *maps;
552
553 /* store PCI address string */
554 snprintf(pci_addr, sizeof(pci_addr), PCI_PRI_FMT,
555 loc->domain, loc->bus, loc->devid, loc->function);
556
557
558 if (close(dev->intr_handle.fd) < 0) {
559 RTE_LOG(INFO, EAL, "Error when closing eventfd file descriptor for %s\n",
560 pci_addr);
561 return -1;
562 }
563
564 if (pci_vfio_set_bus_master(dev->intr_handle.vfio_dev_fd, false)) {
565 RTE_LOG(ERR, EAL, " %s cannot unset bus mastering for PCI device!\n",
566 pci_addr);
567 return -1;
568 }
569
570 ret = vfio_release_device(pci_get_sysfs_path(), pci_addr,
571 dev->intr_handle.vfio_dev_fd);
572 if (ret < 0) {
573 RTE_LOG(ERR, EAL,
574 "%s(): cannot release device\n", __func__);
575 return ret;
576 }
577
578 vfio_res_list = RTE_TAILQ_CAST(rte_vfio_tailq.head, mapped_pci_res_list);
579 /* Get vfio_res */
580 TAILQ_FOREACH(vfio_res, vfio_res_list, next) {
581 if (memcmp(&vfio_res->pci_addr, &dev->addr, sizeof(dev->addr)))
582 continue;
583 break;
584 }
585 /* if we haven't found our tailq entry, something's wrong */
586 if (vfio_res == NULL) {
587 RTE_LOG(ERR, EAL, " %s cannot find TAILQ entry for PCI device!\n",
588 pci_addr);
589 return -1;
590 }
591
592 /* unmap BARs */
593 maps = vfio_res->maps;
594
595 RTE_LOG(INFO, EAL, "Releasing pci mapped resource for %s\n",
596 pci_addr);
597 for (i = 0; i < (int) vfio_res->nb_maps; i++) {
598
599 /*
600 * We do not need to be aware of MSI-X table BAR mappings as
601 * when mapping. Just using current maps array is enough
602 */
603 if (maps[i].addr) {
604 RTE_LOG(INFO, EAL, "Calling pci_unmap_resource for %s at %p\n",
605 pci_addr, maps[i].addr);
606 pci_unmap_resource(maps[i].addr, maps[i].size);
607 }
608 }
609
610 TAILQ_REMOVE(vfio_res_list, vfio_res, next);
611
612 return 0;
613 }
614
615 int
616 pci_vfio_ioport_map(struct rte_pci_device *dev, int bar,
617 struct rte_pci_ioport *p)
618 {
619 if (bar < VFIO_PCI_BAR0_REGION_INDEX ||
620 bar > VFIO_PCI_BAR5_REGION_INDEX) {
621 RTE_LOG(ERR, EAL, "invalid bar (%d)!\n", bar);
622 return -1;
623 }
624
625 p->dev = dev;
626 p->base = VFIO_GET_REGION_ADDR(bar);
627 return 0;
628 }
629
630 void
631 pci_vfio_ioport_read(struct rte_pci_ioport *p,
632 void *data, size_t len, off_t offset)
633 {
634 const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
635
636 if (pread64(intr_handle->vfio_dev_fd, data,
637 len, p->base + offset) <= 0)
638 RTE_LOG(ERR, EAL,
639 "Can't read from PCI bar (%" PRIu64 ") : offset (%x)\n",
640 VFIO_GET_REGION_IDX(p->base), (int)offset);
641 }
642
643 void
644 pci_vfio_ioport_write(struct rte_pci_ioport *p,
645 const void *data, size_t len, off_t offset)
646 {
647 const struct rte_intr_handle *intr_handle = &p->dev->intr_handle;
648
649 if (pwrite64(intr_handle->vfio_dev_fd, data,
650 len, p->base + offset) <= 0)
651 RTE_LOG(ERR, EAL,
652 "Can't write to PCI bar (%" PRIu64 ") : offset (%x)\n",
653 VFIO_GET_REGION_IDX(p->base), (int)offset);
654 }
655
656 int
657 pci_vfio_ioport_unmap(struct rte_pci_ioport *p)
658 {
659 RTE_SET_USED(p);
660 return -1;
661 }
662
663 int
664 pci_vfio_enable(void)
665 {
666 return vfio_enable("vfio_pci");
667 }
668
669 int
670 pci_vfio_is_enabled(void)
671 {
672 return vfio_is_enabled("vfio_pci");
673 }
674 #endif