]>
git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
12 #include <sys/types.h>
18 #include <sys/queue.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
28 #include <rte_interrupts.h>
31 #include <rte_bus_pci.h>
32 #include <rte_common.h>
33 #include <rte_launch.h>
34 #include <rte_memory.h>
36 #include <rte_eal_memconfig.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_malloc.h>
40 #include <rte_string_fns.h>
41 #include <rte_debug.h>
42 #include <rte_devargs.h>
44 #include "eal_filesystem.h"
49 * PCI probing under BSD
51 * This code is used to simulate a PCI probe by parsing information in
52 * sysfs. Moreover, when a registered driver matches a device, the
53 * kernel driver currently using it is unloaded and replaced by
54 * igb_uio module, which is a very minimal userland driver for Intel
55 * network card, only providing access to PCI BAR to applications, and
56 * enabling bus master.
59 extern struct rte_pci_bus rte_pci_bus
;
63 rte_pci_map_device(struct rte_pci_device
*dev
)
67 /* try mapping the NIC resources */
69 case RTE_KDRV_NIC_UIO
:
70 /* map resources for devices that use uio */
71 ret
= pci_uio_map_resource(dev
);
75 " Not managed by a supported kernel driver, skipped\n");
83 /* Unmap pci device */
85 rte_pci_unmap_device(struct rte_pci_device
*dev
)
87 /* try unmapping the NIC resources */
89 case RTE_KDRV_NIC_UIO
:
90 /* unmap resources for devices that use uio */
91 pci_uio_unmap_resource(dev
);
95 " Not managed by a supported kernel driver, skipped\n");
101 pci_uio_free_resource(struct rte_pci_device
*dev
,
102 struct mapped_pci_resource
*uio_res
)
106 if (dev
->intr_handle
.fd
) {
107 close(dev
->intr_handle
.fd
);
108 dev
->intr_handle
.fd
= -1;
109 dev
->intr_handle
.type
= RTE_INTR_HANDLE_UNKNOWN
;
114 pci_uio_alloc_resource(struct rte_pci_device
*dev
,
115 struct mapped_pci_resource
**uio_res
)
117 char devname
[PATH_MAX
]; /* contains the /dev/uioX */
118 struct rte_pci_addr
*loc
;
122 snprintf(devname
, sizeof(devname
), "/dev/uio@pci:%u:%u:%u",
123 dev
->addr
.bus
, dev
->addr
.devid
, dev
->addr
.function
);
125 if (access(devname
, O_RDWR
) < 0) {
126 RTE_LOG(WARNING
, EAL
, " "PCI_PRI_FMT
" not managed by UIO driver, "
127 "skipping\n", loc
->domain
, loc
->bus
, loc
->devid
, loc
->function
);
131 /* save fd if in primary process */
132 dev
->intr_handle
.fd
= open(devname
, O_RDWR
);
133 if (dev
->intr_handle
.fd
< 0) {
134 RTE_LOG(ERR
, EAL
, "Cannot open %s: %s\n",
135 devname
, strerror(errno
));
138 dev
->intr_handle
.type
= RTE_INTR_HANDLE_UIO
;
140 /* allocate the mapping details for secondary processes*/
141 *uio_res
= rte_zmalloc("UIO_RES", sizeof(**uio_res
), 0);
142 if (*uio_res
== NULL
) {
144 "%s(): cannot store uio mmap details\n", __func__
);
148 strlcpy((*uio_res
)->path
, devname
, sizeof((*uio_res
)->path
));
149 memcpy(&(*uio_res
)->pci_addr
, &dev
->addr
, sizeof((*uio_res
)->pci_addr
));
154 pci_uio_free_resource(dev
, *uio_res
);
159 pci_uio_map_resource_by_index(struct rte_pci_device
*dev
, int res_idx
,
160 struct mapped_pci_resource
*uio_res
, int map_idx
)
167 struct pci_map
*maps
;
169 maps
= uio_res
->maps
;
170 devname
= uio_res
->path
;
171 pagesz
= sysconf(_SC_PAGESIZE
);
173 /* allocate memory to keep path */
174 maps
[map_idx
].path
= rte_malloc(NULL
, strlen(devname
) + 1, 0);
175 if (maps
[map_idx
].path
== NULL
) {
176 RTE_LOG(ERR
, EAL
, "Cannot allocate memory for path: %s\n",
182 * open resource file, to mmap it
184 fd
= open(devname
, O_RDWR
);
186 RTE_LOG(ERR
, EAL
, "Cannot open %s: %s\n",
187 devname
, strerror(errno
));
191 /* if matching map is found, then use it */
192 offset
= res_idx
* pagesz
;
193 mapaddr
= pci_map_resource(NULL
, fd
, (off_t
)offset
,
194 (size_t)dev
->mem_resource
[res_idx
].len
, 0);
196 if (mapaddr
== MAP_FAILED
)
199 maps
[map_idx
].phaddr
= dev
->mem_resource
[res_idx
].phys_addr
;
200 maps
[map_idx
].size
= dev
->mem_resource
[res_idx
].len
;
201 maps
[map_idx
].addr
= mapaddr
;
202 maps
[map_idx
].offset
= offset
;
203 strcpy(maps
[map_idx
].path
, devname
);
204 dev
->mem_resource
[res_idx
].addr
= mapaddr
;
209 rte_free(maps
[map_idx
].path
);
214 pci_scan_one(int dev_pci_fd
, struct pci_conf
*conf
)
216 struct rte_pci_device
*dev
;
217 struct pci_bar_io bar
;
220 dev
= malloc(sizeof(*dev
));
225 memset(dev
, 0, sizeof(*dev
));
226 dev
->device
.bus
= &rte_pci_bus
.bus
;
228 dev
->addr
.domain
= conf
->pc_sel
.pc_domain
;
229 dev
->addr
.bus
= conf
->pc_sel
.pc_bus
;
230 dev
->addr
.devid
= conf
->pc_sel
.pc_dev
;
231 dev
->addr
.function
= conf
->pc_sel
.pc_func
;
234 dev
->id
.vendor_id
= conf
->pc_vendor
;
237 dev
->id
.device_id
= conf
->pc_device
;
239 /* get subsystem_vendor id */
240 dev
->id
.subsystem_vendor_id
= conf
->pc_subvendor
;
242 /* get subsystem_device id */
243 dev
->id
.subsystem_device_id
= conf
->pc_subdevice
;
246 dev
->id
.class_id
= (conf
->pc_class
<< 16) |
247 (conf
->pc_subclass
<< 8) |
250 /* TODO: get max_vfs */
253 /* FreeBSD has no NUMA support (yet) */
254 dev
->device
.numa_node
= 0;
258 /* FreeBSD has only one pass through driver */
259 dev
->kdrv
= RTE_KDRV_NIC_UIO
;
261 /* parse resources */
262 switch (conf
->pc_hdr
& PCIM_HDRTYPE
) {
263 case PCIM_HDRTYPE_NORMAL
:
264 max
= PCIR_MAX_BAR_0
;
266 case PCIM_HDRTYPE_BRIDGE
:
267 max
= PCIR_MAX_BAR_1
;
269 case PCIM_HDRTYPE_CARDBUS
:
270 max
= PCIR_MAX_BAR_2
;
276 for (i
= 0; i
<= max
; i
++) {
277 bar
.pbi_sel
= conf
->pc_sel
;
278 bar
.pbi_reg
= PCIR_BAR(i
);
279 if (ioctl(dev_pci_fd
, PCIOCGETBAR
, &bar
) < 0)
282 dev
->mem_resource
[i
].len
= bar
.pbi_length
;
283 if (PCI_BAR_IO(bar
.pbi_base
)) {
284 dev
->mem_resource
[i
].addr
= (void *)(bar
.pbi_base
& ~((uint64_t)0xf));
287 dev
->mem_resource
[i
].phys_addr
= bar
.pbi_base
& ~((uint64_t)0xf);
290 /* device is valid, add in list (sorted) */
291 if (TAILQ_EMPTY(&rte_pci_bus
.device_list
)) {
292 rte_pci_add_device(dev
);
295 struct rte_pci_device
*dev2
= NULL
;
298 TAILQ_FOREACH(dev2
, &rte_pci_bus
.device_list
, next
) {
299 ret
= rte_pci_addr_cmp(&dev
->addr
, &dev2
->addr
);
303 rte_pci_insert_device(dev2
, dev
);
304 } else { /* already registered */
305 dev2
->kdrv
= dev
->kdrv
;
306 dev2
->max_vfs
= dev
->max_vfs
;
308 memmove(dev2
->mem_resource
,
310 sizeof(dev
->mem_resource
));
315 rte_pci_add_device(dev
);
326 * Scan the content of the PCI bus, and add the devices in the devices
327 * list. Call pci_scan_one() for each pci entry found.
333 unsigned dev_count
= 0;
334 struct pci_conf matches
[16];
335 struct pci_conf_io conf_io
= {
339 .match_buf_len
= sizeof(matches
),
340 .matches
= &matches
[0],
343 /* for debug purposes, PCI can be disabled */
344 if (!rte_eal_has_pci())
347 fd
= open("/dev/pci", O_RDONLY
);
349 RTE_LOG(ERR
, EAL
, "%s(): error opening /dev/pci\n", __func__
);
355 if (ioctl(fd
, PCIOCGETCONF
, &conf_io
) < 0) {
356 RTE_LOG(ERR
, EAL
, "%s(): error with ioctl on /dev/pci: %s\n",
357 __func__
, strerror(errno
));
361 for (i
= 0; i
< conf_io
.num_matches
; i
++)
362 if (pci_scan_one(fd
, &matches
[i
]) < 0)
365 dev_count
+= conf_io
.num_matches
;
366 } while(conf_io
.status
== PCI_GETCONF_MORE_DEVS
);
370 RTE_LOG(DEBUG
, EAL
, "PCI scan found %u devices\n", dev_count
);
380 * Get iommu class of PCI devices on the bus.
383 rte_pci_get_iommu_class(void)
385 /* Supports only RTE_KDRV_NIC_UIO */
390 pci_update_device(const struct rte_pci_addr
*addr
)
393 struct pci_conf matches
[2];
394 struct pci_match_conf match
= {
396 .pc_domain
= addr
->domain
,
398 .pc_dev
= addr
->devid
,
399 .pc_func
= addr
->function
,
402 struct pci_conf_io conf_io
= {
406 .match_buf_len
= sizeof(matches
),
407 .matches
= &matches
[0],
410 fd
= open("/dev/pci", O_RDONLY
);
412 RTE_LOG(ERR
, EAL
, "%s(): error opening /dev/pci\n", __func__
);
416 if (ioctl(fd
, PCIOCGETCONF
, &conf_io
) < 0) {
417 RTE_LOG(ERR
, EAL
, "%s(): error with ioctl on /dev/pci: %s\n",
418 __func__
, strerror(errno
));
422 if (conf_io
.num_matches
!= 1)
425 if (pci_scan_one(fd
, &matches
[0]) < 0)
438 /* Read PCI config space. */
439 int rte_pci_read_config(const struct rte_pci_device
*dev
,
440 void *buf
, size_t len
, off_t offset
)
444 /* Copy Linux implementation's behaviour */
445 const int return_len
= len
;
448 .pc_domain
= dev
->addr
.domain
,
449 .pc_bus
= dev
->addr
.bus
,
450 .pc_dev
= dev
->addr
.devid
,
451 .pc_func
= dev
->addr
.function
,
456 fd
= open("/dev/pci", O_RDWR
);
458 RTE_LOG(ERR
, EAL
, "%s(): error opening /dev/pci\n", __func__
);
463 size
= (len
>= 4) ? 4 : ((len
>= 2) ? 2 : 1);
466 if (ioctl(fd
, PCIOCREAD
, &pi
) < 0)
468 memcpy(buf
, &pi
.pi_data
, size
);
470 buf
= (char *)buf
+ size
;
484 /* Write PCI config space. */
485 int rte_pci_write_config(const struct rte_pci_device
*dev
,
486 const void *buf
, size_t len
, off_t offset
)
492 .pc_domain
= dev
->addr
.domain
,
493 .pc_bus
= dev
->addr
.bus
,
494 .pc_dev
= dev
->addr
.devid
,
495 .pc_func
= dev
->addr
.function
,
498 .pi_data
= *(const uint32_t *)buf
,
502 if (len
== 3 || len
> sizeof(pi
.pi_data
)) {
503 RTE_LOG(ERR
, EAL
, "%s(): invalid pci read length\n", __func__
);
507 memcpy(&pi
.pi_data
, buf
, len
);
509 fd
= open("/dev/pci", O_RDWR
);
511 RTE_LOG(ERR
, EAL
, "%s(): error opening /dev/pci\n", __func__
);
515 if (ioctl(fd
, PCIOCWRITE
, &pi
) < 0)
528 rte_pci_ioport_map(struct rte_pci_device
*dev
, int bar
,
529 struct rte_pci_ioport
*p
)
534 #if defined(RTE_ARCH_X86)
535 case RTE_KDRV_NIC_UIO
:
536 if ((uintptr_t) dev
->mem_resource
[bar
].addr
<= UINT16_MAX
) {
537 p
->base
= (uintptr_t)dev
->mem_resource
[bar
].addr
;
555 pci_uio_ioport_read(struct rte_pci_ioport
*p
,
556 void *data
, size_t len
, off_t offset
)
558 #if defined(RTE_ARCH_X86)
561 unsigned short reg
= p
->base
+ offset
;
563 for (d
= data
; len
> 0; d
+= size
, reg
+= size
, len
-= size
) {
566 *(uint32_t *)d
= inl(reg
);
567 } else if (len
>= 2) {
569 *(uint16_t *)d
= inw(reg
);
579 RTE_SET_USED(offset
);
584 rte_pci_ioport_read(struct rte_pci_ioport
*p
,
585 void *data
, size_t len
, off_t offset
)
587 switch (p
->dev
->kdrv
) {
588 case RTE_KDRV_NIC_UIO
:
589 pci_uio_ioport_read(p
, data
, len
, offset
);
597 pci_uio_ioport_write(struct rte_pci_ioport
*p
,
598 const void *data
, size_t len
, off_t offset
)
600 #if defined(RTE_ARCH_X86)
603 unsigned short reg
= p
->base
+ offset
;
605 for (s
= data
; len
> 0; s
+= size
, reg
+= size
, len
-= size
) {
608 outl(reg
, *(const uint32_t *)s
);
609 } else if (len
>= 2) {
611 outw(reg
, *(const uint16_t *)s
);
621 RTE_SET_USED(offset
);
626 rte_pci_ioport_write(struct rte_pci_ioport
*p
,
627 const void *data
, size_t len
, off_t offset
)
629 switch (p
->dev
->kdrv
) {
630 case RTE_KDRV_NIC_UIO
:
631 pci_uio_ioport_write(p
, data
, len
, offset
);
639 rte_pci_ioport_unmap(struct rte_pci_ioport
*p
)
643 switch (p
->dev
->kdrv
) {
644 #if defined(RTE_ARCH_X86)
645 case RTE_KDRV_NIC_UIO
: