]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/bus/pci/linux/pci.c
update sources to ceph Nautilus 14.2.1
[ceph.git] / ceph / src / spdk / dpdk / drivers / bus / pci / linux / pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <string.h>
6 #include <dirent.h>
7
8 #include <rte_log.h>
9 #include <rte_bus.h>
10 #include <rte_pci.h>
11 #include <rte_bus_pci.h>
12 #include <rte_eal_memconfig.h>
13 #include <rte_malloc.h>
14 #include <rte_devargs.h>
15 #include <rte_memcpy.h>
16 #include <rte_vfio.h>
17
18 #include "eal_filesystem.h"
19
20 #include "private.h"
21 #include "pci_init.h"
22
23 /**
24 * @file
25 * PCI probing under linux
26 *
27 * This code is used to simulate a PCI probe by parsing information in sysfs.
28 * When a registered device matches a driver, it is then initialized with
29 * IGB_UIO driver (or doesn't initialize, if the device wasn't bound to it).
30 */
31
32 extern struct rte_pci_bus rte_pci_bus;
33
34 static int
35 pci_get_kernel_driver_by_path(const char *filename, char *dri_name,
36 size_t len)
37 {
38 int count;
39 char path[PATH_MAX];
40 char *name;
41
42 if (!filename || !dri_name)
43 return -1;
44
45 count = readlink(filename, path, PATH_MAX);
46 if (count >= PATH_MAX)
47 return -1;
48
49 /* For device does not have a driver */
50 if (count < 0)
51 return 1;
52
53 path[count] = '\0';
54
55 name = strrchr(path, '/');
56 if (name) {
57 strlcpy(dri_name, name + 1, len);
58 return 0;
59 }
60
61 return -1;
62 }
63
64 /* Map pci device */
65 int
66 rte_pci_map_device(struct rte_pci_device *dev)
67 {
68 int ret = -1;
69
70 /* try mapping the NIC resources using VFIO if it exists */
71 switch (dev->kdrv) {
72 case RTE_KDRV_VFIO:
73 #ifdef VFIO_PRESENT
74 if (pci_vfio_is_enabled())
75 ret = pci_vfio_map_resource(dev);
76 #endif
77 break;
78 case RTE_KDRV_IGB_UIO:
79 case RTE_KDRV_UIO_GENERIC:
80 if (rte_eal_using_phys_addrs()) {
81 /* map resources for devices that use uio */
82 ret = pci_uio_map_resource(dev);
83 }
84 break;
85 default:
86 RTE_LOG(DEBUG, EAL,
87 " Not managed by a supported kernel driver, skipped\n");
88 ret = 1;
89 break;
90 }
91
92 return ret;
93 }
94
95 /* Unmap pci device */
96 void
97 rte_pci_unmap_device(struct rte_pci_device *dev)
98 {
99 /* try unmapping the NIC resources using VFIO if it exists */
100 switch (dev->kdrv) {
101 case RTE_KDRV_VFIO:
102 #ifdef VFIO_PRESENT
103 if (pci_vfio_is_enabled())
104 pci_vfio_unmap_resource(dev);
105 #endif
106 break;
107 case RTE_KDRV_IGB_UIO:
108 case RTE_KDRV_UIO_GENERIC:
109 /* unmap resources for devices that use uio */
110 pci_uio_unmap_resource(dev);
111 break;
112 default:
113 RTE_LOG(DEBUG, EAL,
114 " Not managed by a supported kernel driver, skipped\n");
115 break;
116 }
117 }
118
119 static int
120 find_max_end_va(const struct rte_memseg_list *msl, void *arg)
121 {
122 size_t sz = msl->memseg_arr.len * msl->page_sz;
123 void *end_va = RTE_PTR_ADD(msl->base_va, sz);
124 void **max_va = arg;
125
126 if (*max_va < end_va)
127 *max_va = end_va;
128 return 0;
129 }
130
131 void *
132 pci_find_max_end_va(void)
133 {
134 void *va = NULL;
135
136 rte_memseg_list_walk(find_max_end_va, &va);
137 return va;
138 }
139
140
141 /* parse one line of the "resource" sysfs file (note that the 'line'
142 * string is modified)
143 */
144 int
145 pci_parse_one_sysfs_resource(char *line, size_t len, uint64_t *phys_addr,
146 uint64_t *end_addr, uint64_t *flags)
147 {
148 union pci_resource_info {
149 struct {
150 char *phys_addr;
151 char *end_addr;
152 char *flags;
153 };
154 char *ptrs[PCI_RESOURCE_FMT_NVAL];
155 } res_info;
156
157 if (rte_strsplit(line, len, res_info.ptrs, 3, ' ') != 3) {
158 RTE_LOG(ERR, EAL,
159 "%s(): bad resource format\n", __func__);
160 return -1;
161 }
162 errno = 0;
163 *phys_addr = strtoull(res_info.phys_addr, NULL, 16);
164 *end_addr = strtoull(res_info.end_addr, NULL, 16);
165 *flags = strtoull(res_info.flags, NULL, 16);
166 if (errno != 0) {
167 RTE_LOG(ERR, EAL,
168 "%s(): bad resource format\n", __func__);
169 return -1;
170 }
171
172 return 0;
173 }
174
175 /* parse the "resource" sysfs file */
176 static int
177 pci_parse_sysfs_resource(const char *filename, struct rte_pci_device *dev)
178 {
179 FILE *f;
180 char buf[BUFSIZ];
181 int i;
182 uint64_t phys_addr, end_addr, flags;
183
184 f = fopen(filename, "r");
185 if (f == NULL) {
186 RTE_LOG(ERR, EAL, "Cannot open sysfs resource\n");
187 return -1;
188 }
189
190 for (i = 0; i<PCI_MAX_RESOURCE; i++) {
191
192 if (fgets(buf, sizeof(buf), f) == NULL) {
193 RTE_LOG(ERR, EAL,
194 "%s(): cannot read resource\n", __func__);
195 goto error;
196 }
197 if (pci_parse_one_sysfs_resource(buf, sizeof(buf), &phys_addr,
198 &end_addr, &flags) < 0)
199 goto error;
200
201 if (flags & IORESOURCE_MEM) {
202 dev->mem_resource[i].phys_addr = phys_addr;
203 dev->mem_resource[i].len = end_addr - phys_addr + 1;
204 /* not mapped for now */
205 dev->mem_resource[i].addr = NULL;
206 }
207 }
208 fclose(f);
209 return 0;
210
211 error:
212 fclose(f);
213 return -1;
214 }
215
216 /* Scan one pci sysfs entry, and fill the devices list from it. */
217 static int
218 pci_scan_one(const char *dirname, const struct rte_pci_addr *addr)
219 {
220 char filename[PATH_MAX];
221 unsigned long tmp;
222 struct rte_pci_device *dev;
223 char driver[PATH_MAX];
224 int ret;
225
226 dev = malloc(sizeof(*dev));
227 if (dev == NULL)
228 return -1;
229
230 memset(dev, 0, sizeof(*dev));
231 dev->addr = *addr;
232
233 /* get vendor id */
234 snprintf(filename, sizeof(filename), "%s/vendor", dirname);
235 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
236 free(dev);
237 return -1;
238 }
239 dev->id.vendor_id = (uint16_t)tmp;
240
241 /* get device id */
242 snprintf(filename, sizeof(filename), "%s/device", dirname);
243 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
244 free(dev);
245 return -1;
246 }
247 dev->id.device_id = (uint16_t)tmp;
248
249 /* get subsystem_vendor id */
250 snprintf(filename, sizeof(filename), "%s/subsystem_vendor",
251 dirname);
252 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
253 free(dev);
254 return -1;
255 }
256 dev->id.subsystem_vendor_id = (uint16_t)tmp;
257
258 /* get subsystem_device id */
259 snprintf(filename, sizeof(filename), "%s/subsystem_device",
260 dirname);
261 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
262 free(dev);
263 return -1;
264 }
265 dev->id.subsystem_device_id = (uint16_t)tmp;
266
267 /* get class_id */
268 snprintf(filename, sizeof(filename), "%s/class",
269 dirname);
270 if (eal_parse_sysfs_value(filename, &tmp) < 0) {
271 free(dev);
272 return -1;
273 }
274 /* the least 24 bits are valid: class, subclass, program interface */
275 dev->id.class_id = (uint32_t)tmp & RTE_CLASS_ANY_ID;
276
277 /* get max_vfs */
278 dev->max_vfs = 0;
279 snprintf(filename, sizeof(filename), "%s/max_vfs", dirname);
280 if (!access(filename, F_OK) &&
281 eal_parse_sysfs_value(filename, &tmp) == 0)
282 dev->max_vfs = (uint16_t)tmp;
283 else {
284 /* for non igb_uio driver, need kernel version >= 3.8 */
285 snprintf(filename, sizeof(filename),
286 "%s/sriov_numvfs", dirname);
287 if (!access(filename, F_OK) &&
288 eal_parse_sysfs_value(filename, &tmp) == 0)
289 dev->max_vfs = (uint16_t)tmp;
290 }
291
292 /* get numa node, default to 0 if not present */
293 snprintf(filename, sizeof(filename), "%s/numa_node",
294 dirname);
295
296 if (access(filename, F_OK) != -1) {
297 if (eal_parse_sysfs_value(filename, &tmp) == 0)
298 dev->device.numa_node = tmp;
299 else
300 dev->device.numa_node = -1;
301 } else {
302 dev->device.numa_node = 0;
303 }
304
305 pci_name_set(dev);
306
307 /* parse resources */
308 snprintf(filename, sizeof(filename), "%s/resource", dirname);
309 if (pci_parse_sysfs_resource(filename, dev) < 0) {
310 RTE_LOG(ERR, EAL, "%s(): cannot parse resource\n", __func__);
311 free(dev);
312 return -1;
313 }
314
315 /* parse driver */
316 snprintf(filename, sizeof(filename), "%s/driver", dirname);
317 ret = pci_get_kernel_driver_by_path(filename, driver, sizeof(driver));
318 if (ret < 0) {
319 RTE_LOG(ERR, EAL, "Fail to get kernel driver\n");
320 free(dev);
321 return -1;
322 }
323
324 if (!ret) {
325 if (!strcmp(driver, "vfio-pci"))
326 dev->kdrv = RTE_KDRV_VFIO;
327 else if (!strcmp(driver, "igb_uio"))
328 dev->kdrv = RTE_KDRV_IGB_UIO;
329 else if (!strcmp(driver, "uio_pci_generic"))
330 dev->kdrv = RTE_KDRV_UIO_GENERIC;
331 else
332 dev->kdrv = RTE_KDRV_UNKNOWN;
333 } else
334 dev->kdrv = RTE_KDRV_NONE;
335
336 /* device is valid, add in list (sorted) */
337 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
338 rte_pci_add_device(dev);
339 } else {
340 struct rte_pci_device *dev2;
341 int ret;
342
343 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
344 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
345 if (ret > 0)
346 continue;
347
348 if (ret < 0) {
349 rte_pci_insert_device(dev2, dev);
350 } else { /* already registered */
351 if (dev2->driver == NULL) {
352 dev2->kdrv = dev->kdrv;
353 dev2->max_vfs = dev->max_vfs;
354 pci_name_set(dev2);
355 memmove(dev2->mem_resource,
356 dev->mem_resource,
357 sizeof(dev->mem_resource));
358 } else {
359 /**
360 * If device is plugged and driver is
361 * probed already, we don't need to do
362 * anything here. (This happens when we
363 * call rte_eal_hotplug_add)
364 */
365 if (dev2->kdrv != dev->kdrv ||
366 dev2->max_vfs != dev->max_vfs)
367 /*
368 * This should not happens.
369 * But it is still possible if
370 * we unbind a device from
371 * vfio or uio before hotplug
372 * remove and rebind it with
373 * a different configure.
374 * So we just print out the
375 * error as an alarm.
376 */
377 RTE_LOG(ERR, EAL, "Unexpected device scan at %s!\n",
378 filename);
379 }
380 free(dev);
381 }
382 return 0;
383 }
384
385 rte_pci_add_device(dev);
386 }
387
388 return 0;
389 }
390
391 int
392 pci_update_device(const struct rte_pci_addr *addr)
393 {
394 char filename[PATH_MAX];
395
396 snprintf(filename, sizeof(filename), "%s/" PCI_PRI_FMT,
397 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
398 addr->function);
399
400 return pci_scan_one(filename, addr);
401 }
402
403 /*
404 * split up a pci address into its constituent parts.
405 */
406 static int
407 parse_pci_addr_format(const char *buf, int bufsize, struct rte_pci_addr *addr)
408 {
409 /* first split on ':' */
410 union splitaddr {
411 struct {
412 char *domain;
413 char *bus;
414 char *devid;
415 char *function;
416 };
417 char *str[PCI_FMT_NVAL]; /* last element-separator is "." not ":" */
418 } splitaddr;
419
420 char *buf_copy = strndup(buf, bufsize);
421 if (buf_copy == NULL)
422 return -1;
423
424 if (rte_strsplit(buf_copy, bufsize, splitaddr.str, PCI_FMT_NVAL, ':')
425 != PCI_FMT_NVAL - 1)
426 goto error;
427 /* final split is on '.' between devid and function */
428 splitaddr.function = strchr(splitaddr.devid,'.');
429 if (splitaddr.function == NULL)
430 goto error;
431 *splitaddr.function++ = '\0';
432
433 /* now convert to int values */
434 errno = 0;
435 addr->domain = strtoul(splitaddr.domain, NULL, 16);
436 addr->bus = strtoul(splitaddr.bus, NULL, 16);
437 addr->devid = strtoul(splitaddr.devid, NULL, 16);
438 addr->function = strtoul(splitaddr.function, NULL, 10);
439 if (errno != 0)
440 goto error;
441
442 free(buf_copy); /* free the copy made with strdup */
443 return 0;
444 error:
445 free(buf_copy);
446 return -1;
447 }
448
449 /*
450 * Scan the content of the PCI bus, and the devices in the devices
451 * list
452 */
453 int
454 rte_pci_scan(void)
455 {
456 struct dirent *e;
457 DIR *dir;
458 char dirname[PATH_MAX];
459 struct rte_pci_addr addr;
460
461 /* for debug purposes, PCI can be disabled */
462 if (!rte_eal_has_pci())
463 return 0;
464
465 #ifdef VFIO_PRESENT
466 if (!pci_vfio_is_enabled())
467 RTE_LOG(DEBUG, EAL, "VFIO PCI modules not loaded\n");
468 #endif
469
470 dir = opendir(rte_pci_get_sysfs_path());
471 if (dir == NULL) {
472 RTE_LOG(ERR, EAL, "%s(): opendir failed: %s\n",
473 __func__, strerror(errno));
474 return -1;
475 }
476
477 while ((e = readdir(dir)) != NULL) {
478 if (e->d_name[0] == '.')
479 continue;
480
481 if (parse_pci_addr_format(e->d_name, sizeof(e->d_name), &addr) != 0)
482 continue;
483
484 snprintf(dirname, sizeof(dirname), "%s/%s",
485 rte_pci_get_sysfs_path(), e->d_name);
486
487 if (pci_scan_one(dirname, &addr) < 0)
488 goto error;
489 }
490 closedir(dir);
491 return 0;
492
493 error:
494 closedir(dir);
495 return -1;
496 }
497
498 /*
499 * Is pci device bound to any kdrv
500 */
501 static inline int
502 pci_one_device_is_bound(void)
503 {
504 struct rte_pci_device *dev = NULL;
505 int ret = 0;
506
507 FOREACH_DEVICE_ON_PCIBUS(dev) {
508 if (dev->kdrv == RTE_KDRV_UNKNOWN ||
509 dev->kdrv == RTE_KDRV_NONE) {
510 continue;
511 } else {
512 ret = 1;
513 break;
514 }
515 }
516 return ret;
517 }
518
519 /*
520 * Any one of the device bound to uio
521 */
522 static inline int
523 pci_one_device_bound_uio(void)
524 {
525 struct rte_pci_device *dev = NULL;
526 struct rte_devargs *devargs;
527 int need_check;
528
529 FOREACH_DEVICE_ON_PCIBUS(dev) {
530 devargs = dev->device.devargs;
531
532 need_check = 0;
533 switch (rte_pci_bus.bus.conf.scan_mode) {
534 case RTE_BUS_SCAN_WHITELIST:
535 if (devargs && devargs->policy == RTE_DEV_WHITELISTED)
536 need_check = 1;
537 break;
538 case RTE_BUS_SCAN_UNDEFINED:
539 case RTE_BUS_SCAN_BLACKLIST:
540 if (devargs == NULL ||
541 devargs->policy != RTE_DEV_BLACKLISTED)
542 need_check = 1;
543 break;
544 }
545
546 if (!need_check)
547 continue;
548
549 if (dev->kdrv == RTE_KDRV_IGB_UIO ||
550 dev->kdrv == RTE_KDRV_UIO_GENERIC) {
551 return 1;
552 }
553 }
554 return 0;
555 }
556
557 /*
558 * Any one of the device has iova as va
559 */
560 static inline int
561 pci_one_device_has_iova_va(void)
562 {
563 struct rte_pci_device *dev = NULL;
564 struct rte_pci_driver *drv = NULL;
565
566 FOREACH_DRIVER_ON_PCIBUS(drv) {
567 if (drv && drv->drv_flags & RTE_PCI_DRV_IOVA_AS_VA) {
568 FOREACH_DEVICE_ON_PCIBUS(dev) {
569 if (dev->kdrv == RTE_KDRV_VFIO &&
570 rte_pci_match(drv, dev))
571 return 1;
572 }
573 }
574 }
575 return 0;
576 }
577
578 #if defined(RTE_ARCH_X86)
579 static bool
580 pci_one_device_iommu_support_va(struct rte_pci_device *dev)
581 {
582 #define VTD_CAP_MGAW_SHIFT 16
583 #define VTD_CAP_MGAW_MASK (0x3fULL << VTD_CAP_MGAW_SHIFT)
584 #define X86_VA_WIDTH 47 /* From Documentation/x86/x86_64/mm.txt */
585 struct rte_pci_addr *addr = &dev->addr;
586 char filename[PATH_MAX];
587 FILE *fp;
588 uint64_t mgaw, vtd_cap_reg = 0;
589
590 snprintf(filename, sizeof(filename),
591 "%s/" PCI_PRI_FMT "/iommu/intel-iommu/cap",
592 rte_pci_get_sysfs_path(), addr->domain, addr->bus, addr->devid,
593 addr->function);
594 if (access(filename, F_OK) == -1) {
595 /* We don't have an Intel IOMMU, assume VA supported*/
596 return true;
597 }
598
599 /* We have an intel IOMMU */
600 fp = fopen(filename, "r");
601 if (fp == NULL) {
602 RTE_LOG(ERR, EAL, "%s(): can't open %s\n", __func__, filename);
603 return false;
604 }
605
606 if (fscanf(fp, "%" PRIx64, &vtd_cap_reg) != 1) {
607 RTE_LOG(ERR, EAL, "%s(): can't read %s\n", __func__, filename);
608 fclose(fp);
609 return false;
610 }
611
612 fclose(fp);
613
614 mgaw = ((vtd_cap_reg & VTD_CAP_MGAW_MASK) >> VTD_CAP_MGAW_SHIFT) + 1;
615 if (mgaw < X86_VA_WIDTH)
616 return false;
617
618 return true;
619 }
620 #elif defined(RTE_ARCH_PPC_64)
621 static bool
622 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
623 {
624 return false;
625 }
626 #else
627 static bool
628 pci_one_device_iommu_support_va(__rte_unused struct rte_pci_device *dev)
629 {
630 return true;
631 }
632 #endif
633
634 /*
635 * All devices IOMMUs support VA as IOVA
636 */
637 static bool
638 pci_devices_iommu_support_va(void)
639 {
640 struct rte_pci_device *dev = NULL;
641 struct rte_pci_driver *drv = NULL;
642
643 FOREACH_DRIVER_ON_PCIBUS(drv) {
644 FOREACH_DEVICE_ON_PCIBUS(dev) {
645 if (!rte_pci_match(drv, dev))
646 continue;
647 if (!pci_one_device_iommu_support_va(dev))
648 return false;
649 }
650 }
651 return true;
652 }
653
654 /*
655 * Get iommu class of PCI devices on the bus.
656 */
657 enum rte_iova_mode
658 rte_pci_get_iommu_class(void)
659 {
660 bool is_bound;
661 bool is_vfio_noiommu_enabled = true;
662 bool has_iova_va;
663 bool is_bound_uio;
664 bool iommu_no_va;
665
666 is_bound = pci_one_device_is_bound();
667 if (!is_bound)
668 return RTE_IOVA_DC;
669
670 has_iova_va = pci_one_device_has_iova_va();
671 is_bound_uio = pci_one_device_bound_uio();
672 iommu_no_va = !pci_devices_iommu_support_va();
673 #ifdef VFIO_PRESENT
674 is_vfio_noiommu_enabled = rte_vfio_noiommu_is_enabled() == true ?
675 true : false;
676 #endif
677
678 if (has_iova_va && !is_bound_uio && !is_vfio_noiommu_enabled &&
679 !iommu_no_va)
680 return RTE_IOVA_VA;
681
682 if (has_iova_va) {
683 RTE_LOG(WARNING, EAL, "Some devices want iova as va but pa will be used because.. ");
684 if (is_vfio_noiommu_enabled)
685 RTE_LOG(WARNING, EAL, "vfio-noiommu mode configured\n");
686 if (is_bound_uio)
687 RTE_LOG(WARNING, EAL, "few device bound to UIO\n");
688 if (iommu_no_va)
689 RTE_LOG(WARNING, EAL, "IOMMU does not support IOVA as VA\n");
690 }
691
692 return RTE_IOVA_PA;
693 }
694
695 /* Read PCI config space. */
696 int rte_pci_read_config(const struct rte_pci_device *device,
697 void *buf, size_t len, off_t offset)
698 {
699 const struct rte_intr_handle *intr_handle = &device->intr_handle;
700
701 switch (intr_handle->type) {
702 case RTE_INTR_HANDLE_UIO:
703 case RTE_INTR_HANDLE_UIO_INTX:
704 return pci_uio_read_config(intr_handle, buf, len, offset);
705
706 #ifdef VFIO_PRESENT
707 case RTE_INTR_HANDLE_VFIO_MSIX:
708 case RTE_INTR_HANDLE_VFIO_MSI:
709 case RTE_INTR_HANDLE_VFIO_LEGACY:
710 return pci_vfio_read_config(intr_handle, buf, len, offset);
711 #endif
712 default:
713 RTE_LOG(ERR, EAL,
714 "Unknown handle type of fd %d\n",
715 intr_handle->fd);
716 return -1;
717 }
718 }
719
720 /* Write PCI config space. */
721 int rte_pci_write_config(const struct rte_pci_device *device,
722 const void *buf, size_t len, off_t offset)
723 {
724 const struct rte_intr_handle *intr_handle = &device->intr_handle;
725
726 switch (intr_handle->type) {
727 case RTE_INTR_HANDLE_UIO:
728 case RTE_INTR_HANDLE_UIO_INTX:
729 return pci_uio_write_config(intr_handle, buf, len, offset);
730
731 #ifdef VFIO_PRESENT
732 case RTE_INTR_HANDLE_VFIO_MSIX:
733 case RTE_INTR_HANDLE_VFIO_MSI:
734 case RTE_INTR_HANDLE_VFIO_LEGACY:
735 return pci_vfio_write_config(intr_handle, buf, len, offset);
736 #endif
737 default:
738 RTE_LOG(ERR, EAL,
739 "Unknown handle type of fd %d\n",
740 intr_handle->fd);
741 return -1;
742 }
743 }
744
745 #if defined(RTE_ARCH_X86)
746 static int
747 pci_ioport_map(struct rte_pci_device *dev, int bar __rte_unused,
748 struct rte_pci_ioport *p)
749 {
750 uint16_t start, end;
751 FILE *fp;
752 char *line = NULL;
753 char pci_id[16];
754 int found = 0;
755 size_t linesz;
756
757 snprintf(pci_id, sizeof(pci_id), PCI_PRI_FMT,
758 dev->addr.domain, dev->addr.bus,
759 dev->addr.devid, dev->addr.function);
760
761 fp = fopen("/proc/ioports", "r");
762 if (fp == NULL) {
763 RTE_LOG(ERR, EAL, "%s(): can't open ioports\n", __func__);
764 return -1;
765 }
766
767 while (getdelim(&line, &linesz, '\n', fp) > 0) {
768 char *ptr = line;
769 char *left;
770 int n;
771
772 n = strcspn(ptr, ":");
773 ptr[n] = 0;
774 left = &ptr[n + 1];
775
776 while (*left && isspace(*left))
777 left++;
778
779 if (!strncmp(left, pci_id, strlen(pci_id))) {
780 found = 1;
781
782 while (*ptr && isspace(*ptr))
783 ptr++;
784
785 sscanf(ptr, "%04hx-%04hx", &start, &end);
786
787 break;
788 }
789 }
790
791 free(line);
792 fclose(fp);
793
794 if (!found)
795 return -1;
796
797 p->base = start;
798 RTE_LOG(DEBUG, EAL, "PCI Port IO found start=0x%x\n", start);
799
800 return 0;
801 }
802 #endif
803
804 int
805 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
806 struct rte_pci_ioport *p)
807 {
808 int ret = -1;
809
810 switch (dev->kdrv) {
811 #ifdef VFIO_PRESENT
812 case RTE_KDRV_VFIO:
813 if (pci_vfio_is_enabled())
814 ret = pci_vfio_ioport_map(dev, bar, p);
815 break;
816 #endif
817 case RTE_KDRV_IGB_UIO:
818 ret = pci_uio_ioport_map(dev, bar, p);
819 break;
820 case RTE_KDRV_UIO_GENERIC:
821 #if defined(RTE_ARCH_X86)
822 ret = pci_ioport_map(dev, bar, p);
823 #else
824 ret = pci_uio_ioport_map(dev, bar, p);
825 #endif
826 break;
827 case RTE_KDRV_NONE:
828 #if defined(RTE_ARCH_X86)
829 ret = pci_ioport_map(dev, bar, p);
830 #endif
831 break;
832 default:
833 break;
834 }
835
836 if (!ret)
837 p->dev = dev;
838
839 return ret;
840 }
841
842 void
843 rte_pci_ioport_read(struct rte_pci_ioport *p,
844 void *data, size_t len, off_t offset)
845 {
846 switch (p->dev->kdrv) {
847 #ifdef VFIO_PRESENT
848 case RTE_KDRV_VFIO:
849 pci_vfio_ioport_read(p, data, len, offset);
850 break;
851 #endif
852 case RTE_KDRV_IGB_UIO:
853 pci_uio_ioport_read(p, data, len, offset);
854 break;
855 case RTE_KDRV_UIO_GENERIC:
856 pci_uio_ioport_read(p, data, len, offset);
857 break;
858 case RTE_KDRV_NONE:
859 #if defined(RTE_ARCH_X86)
860 pci_uio_ioport_read(p, data, len, offset);
861 #endif
862 break;
863 default:
864 break;
865 }
866 }
867
868 void
869 rte_pci_ioport_write(struct rte_pci_ioport *p,
870 const void *data, size_t len, off_t offset)
871 {
872 switch (p->dev->kdrv) {
873 #ifdef VFIO_PRESENT
874 case RTE_KDRV_VFIO:
875 pci_vfio_ioport_write(p, data, len, offset);
876 break;
877 #endif
878 case RTE_KDRV_IGB_UIO:
879 pci_uio_ioport_write(p, data, len, offset);
880 break;
881 case RTE_KDRV_UIO_GENERIC:
882 pci_uio_ioport_write(p, data, len, offset);
883 break;
884 case RTE_KDRV_NONE:
885 #if defined(RTE_ARCH_X86)
886 pci_uio_ioport_write(p, data, len, offset);
887 #endif
888 break;
889 default:
890 break;
891 }
892 }
893
894 int
895 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
896 {
897 int ret = -1;
898
899 switch (p->dev->kdrv) {
900 #ifdef VFIO_PRESENT
901 case RTE_KDRV_VFIO:
902 if (pci_vfio_is_enabled())
903 ret = pci_vfio_ioport_unmap(p);
904 break;
905 #endif
906 case RTE_KDRV_IGB_UIO:
907 ret = pci_uio_ioport_unmap(p);
908 break;
909 case RTE_KDRV_UIO_GENERIC:
910 #if defined(RTE_ARCH_X86)
911 ret = 0;
912 #else
913 ret = pci_uio_ioport_unmap(p);
914 #endif
915 break;
916 case RTE_KDRV_NONE:
917 #if defined(RTE_ARCH_X86)
918 ret = 0;
919 #endif
920 break;
921 default:
922 break;
923 }
924
925 return ret;
926 }