]> git.proxmox.com Git - ceph.git/blob - ceph/src/spdk/dpdk/drivers/bus/pci/bsd/pci.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / spdk / dpdk / drivers / bus / pci / bsd / pci.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 * Copyright(c) 2010-2014 Intel Corporation
3 */
4
5 #include <ctype.h>
6 #include <stdio.h>
7 #include <stdlib.h>
8 #include <string.h>
9 #include <stdarg.h>
10 #include <unistd.h>
11 #include <inttypes.h>
12 #include <sys/types.h>
13 #include <sys/stat.h>
14 #include <fcntl.h>
15 #include <errno.h>
16 #include <dirent.h>
17 #include <limits.h>
18 #include <sys/queue.h>
19 #include <sys/mman.h>
20 #include <sys/ioctl.h>
21 #include <sys/pciio.h>
22 #include <dev/pci/pcireg.h>
23
24 #if defined(RTE_ARCH_X86)
25 #include <machine/cpufunc.h>
26 #endif
27
28 #include <rte_interrupts.h>
29 #include <rte_log.h>
30 #include <rte_pci.h>
31 #include <rte_bus_pci.h>
32 #include <rte_common.h>
33 #include <rte_launch.h>
34 #include <rte_memory.h>
35 #include <rte_eal.h>
36 #include <rte_eal_memconfig.h>
37 #include <rte_per_lcore.h>
38 #include <rte_lcore.h>
39 #include <rte_malloc.h>
40 #include <rte_string_fns.h>
41 #include <rte_debug.h>
42 #include <rte_devargs.h>
43
44 #include "eal_filesystem.h"
45 #include "private.h"
46
47 /**
48 * @file
49 * PCI probing under BSD
50 *
51 * This code is used to simulate a PCI probe by parsing information in
52 * sysfs. Moreover, when a registered driver matches a device, the
53 * kernel driver currently using it is unloaded and replaced by
54 * igb_uio module, which is a very minimal userland driver for Intel
55 * network card, only providing access to PCI BAR to applications, and
56 * enabling bus master.
57 */
58
59 extern struct rte_pci_bus rte_pci_bus;
60
61 /* Map pci device */
62 int
63 rte_pci_map_device(struct rte_pci_device *dev)
64 {
65 int ret = -1;
66
67 /* try mapping the NIC resources */
68 switch (dev->kdrv) {
69 case RTE_KDRV_NIC_UIO:
70 /* map resources for devices that use uio */
71 ret = pci_uio_map_resource(dev);
72 break;
73 default:
74 RTE_LOG(DEBUG, EAL,
75 " Not managed by a supported kernel driver, skipped\n");
76 ret = 1;
77 break;
78 }
79
80 return ret;
81 }
82
83 /* Unmap pci device */
84 void
85 rte_pci_unmap_device(struct rte_pci_device *dev)
86 {
87 /* try unmapping the NIC resources */
88 switch (dev->kdrv) {
89 case RTE_KDRV_NIC_UIO:
90 /* unmap resources for devices that use uio */
91 pci_uio_unmap_resource(dev);
92 break;
93 default:
94 RTE_LOG(DEBUG, EAL,
95 " Not managed by a supported kernel driver, skipped\n");
96 break;
97 }
98 }
99
100 void
101 pci_uio_free_resource(struct rte_pci_device *dev,
102 struct mapped_pci_resource *uio_res)
103 {
104 rte_free(uio_res);
105
106 if (dev->intr_handle.fd) {
107 close(dev->intr_handle.fd);
108 dev->intr_handle.fd = -1;
109 dev->intr_handle.type = RTE_INTR_HANDLE_UNKNOWN;
110 }
111 }
112
113 int
114 pci_uio_alloc_resource(struct rte_pci_device *dev,
115 struct mapped_pci_resource **uio_res)
116 {
117 char devname[PATH_MAX]; /* contains the /dev/uioX */
118 struct rte_pci_addr *loc;
119
120 loc = &dev->addr;
121
122 snprintf(devname, sizeof(devname), "/dev/uio@pci:%u:%u:%u",
123 dev->addr.bus, dev->addr.devid, dev->addr.function);
124
125 if (access(devname, O_RDWR) < 0) {
126 RTE_LOG(WARNING, EAL, " "PCI_PRI_FMT" not managed by UIO driver, "
127 "skipping\n", loc->domain, loc->bus, loc->devid, loc->function);
128 return 1;
129 }
130
131 /* save fd if in primary process */
132 dev->intr_handle.fd = open(devname, O_RDWR);
133 if (dev->intr_handle.fd < 0) {
134 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
135 devname, strerror(errno));
136 goto error;
137 }
138 dev->intr_handle.type = RTE_INTR_HANDLE_UIO;
139
140 /* allocate the mapping details for secondary processes*/
141 *uio_res = rte_zmalloc("UIO_RES", sizeof(**uio_res), 0);
142 if (*uio_res == NULL) {
143 RTE_LOG(ERR, EAL,
144 "%s(): cannot store uio mmap details\n", __func__);
145 goto error;
146 }
147
148 strlcpy((*uio_res)->path, devname, sizeof((*uio_res)->path));
149 memcpy(&(*uio_res)->pci_addr, &dev->addr, sizeof((*uio_res)->pci_addr));
150
151 return 0;
152
153 error:
154 pci_uio_free_resource(dev, *uio_res);
155 return -1;
156 }
157
158 int
159 pci_uio_map_resource_by_index(struct rte_pci_device *dev, int res_idx,
160 struct mapped_pci_resource *uio_res, int map_idx)
161 {
162 int fd;
163 char *devname;
164 void *mapaddr;
165 uint64_t offset;
166 uint64_t pagesz;
167 struct pci_map *maps;
168
169 maps = uio_res->maps;
170 devname = uio_res->path;
171 pagesz = sysconf(_SC_PAGESIZE);
172
173 /* allocate memory to keep path */
174 maps[map_idx].path = rte_malloc(NULL, strlen(devname) + 1, 0);
175 if (maps[map_idx].path == NULL) {
176 RTE_LOG(ERR, EAL, "Cannot allocate memory for path: %s\n",
177 strerror(errno));
178 return -1;
179 }
180
181 /*
182 * open resource file, to mmap it
183 */
184 fd = open(devname, O_RDWR);
185 if (fd < 0) {
186 RTE_LOG(ERR, EAL, "Cannot open %s: %s\n",
187 devname, strerror(errno));
188 goto error;
189 }
190
191 /* if matching map is found, then use it */
192 offset = res_idx * pagesz;
193 mapaddr = pci_map_resource(NULL, fd, (off_t)offset,
194 (size_t)dev->mem_resource[res_idx].len, 0);
195 close(fd);
196 if (mapaddr == MAP_FAILED)
197 goto error;
198
199 maps[map_idx].phaddr = dev->mem_resource[res_idx].phys_addr;
200 maps[map_idx].size = dev->mem_resource[res_idx].len;
201 maps[map_idx].addr = mapaddr;
202 maps[map_idx].offset = offset;
203 strcpy(maps[map_idx].path, devname);
204 dev->mem_resource[res_idx].addr = mapaddr;
205
206 return 0;
207
208 error:
209 rte_free(maps[map_idx].path);
210 return -1;
211 }
212
213 static int
214 pci_scan_one(int dev_pci_fd, struct pci_conf *conf)
215 {
216 struct rte_pci_device *dev;
217 struct pci_bar_io bar;
218 unsigned i, max;
219
220 dev = malloc(sizeof(*dev));
221 if (dev == NULL) {
222 return -1;
223 }
224
225 memset(dev, 0, sizeof(*dev));
226 dev->device.bus = &rte_pci_bus.bus;
227
228 dev->addr.domain = conf->pc_sel.pc_domain;
229 dev->addr.bus = conf->pc_sel.pc_bus;
230 dev->addr.devid = conf->pc_sel.pc_dev;
231 dev->addr.function = conf->pc_sel.pc_func;
232
233 /* get vendor id */
234 dev->id.vendor_id = conf->pc_vendor;
235
236 /* get device id */
237 dev->id.device_id = conf->pc_device;
238
239 /* get subsystem_vendor id */
240 dev->id.subsystem_vendor_id = conf->pc_subvendor;
241
242 /* get subsystem_device id */
243 dev->id.subsystem_device_id = conf->pc_subdevice;
244
245 /* get class id */
246 dev->id.class_id = (conf->pc_class << 16) |
247 (conf->pc_subclass << 8) |
248 (conf->pc_progif);
249
250 /* TODO: get max_vfs */
251 dev->max_vfs = 0;
252
253 /* FreeBSD has no NUMA support (yet) */
254 dev->device.numa_node = 0;
255
256 pci_name_set(dev);
257
258 /* FreeBSD has only one pass through driver */
259 dev->kdrv = RTE_KDRV_NIC_UIO;
260
261 /* parse resources */
262 switch (conf->pc_hdr & PCIM_HDRTYPE) {
263 case PCIM_HDRTYPE_NORMAL:
264 max = PCIR_MAX_BAR_0;
265 break;
266 case PCIM_HDRTYPE_BRIDGE:
267 max = PCIR_MAX_BAR_1;
268 break;
269 case PCIM_HDRTYPE_CARDBUS:
270 max = PCIR_MAX_BAR_2;
271 break;
272 default:
273 goto skipdev;
274 }
275
276 for (i = 0; i <= max; i++) {
277 bar.pbi_sel = conf->pc_sel;
278 bar.pbi_reg = PCIR_BAR(i);
279 if (ioctl(dev_pci_fd, PCIOCGETBAR, &bar) < 0)
280 continue;
281
282 dev->mem_resource[i].len = bar.pbi_length;
283 if (PCI_BAR_IO(bar.pbi_base)) {
284 dev->mem_resource[i].addr = (void *)(bar.pbi_base & ~((uint64_t)0xf));
285 continue;
286 }
287 dev->mem_resource[i].phys_addr = bar.pbi_base & ~((uint64_t)0xf);
288 }
289
290 /* device is valid, add in list (sorted) */
291 if (TAILQ_EMPTY(&rte_pci_bus.device_list)) {
292 rte_pci_add_device(dev);
293 }
294 else {
295 struct rte_pci_device *dev2 = NULL;
296 int ret;
297
298 TAILQ_FOREACH(dev2, &rte_pci_bus.device_list, next) {
299 ret = rte_pci_addr_cmp(&dev->addr, &dev2->addr);
300 if (ret > 0)
301 continue;
302 else if (ret < 0) {
303 rte_pci_insert_device(dev2, dev);
304 } else { /* already registered */
305 dev2->kdrv = dev->kdrv;
306 dev2->max_vfs = dev->max_vfs;
307 pci_name_set(dev2);
308 memmove(dev2->mem_resource,
309 dev->mem_resource,
310 sizeof(dev->mem_resource));
311 free(dev);
312 }
313 return 0;
314 }
315 rte_pci_add_device(dev);
316 }
317
318 return 0;
319
320 skipdev:
321 free(dev);
322 return 0;
323 }
324
325 /*
326 * Scan the content of the PCI bus, and add the devices in the devices
327 * list. Call pci_scan_one() for each pci entry found.
328 */
329 int
330 rte_pci_scan(void)
331 {
332 int fd;
333 unsigned dev_count = 0;
334 struct pci_conf matches[16];
335 struct pci_conf_io conf_io = {
336 .pat_buf_len = 0,
337 .num_patterns = 0,
338 .patterns = NULL,
339 .match_buf_len = sizeof(matches),
340 .matches = &matches[0],
341 };
342
343 /* for debug purposes, PCI can be disabled */
344 if (!rte_eal_has_pci())
345 return 0;
346
347 fd = open("/dev/pci", O_RDONLY);
348 if (fd < 0) {
349 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
350 goto error;
351 }
352
353 do {
354 unsigned i;
355 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
356 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
357 __func__, strerror(errno));
358 goto error;
359 }
360
361 for (i = 0; i < conf_io.num_matches; i++)
362 if (pci_scan_one(fd, &matches[i]) < 0)
363 goto error;
364
365 dev_count += conf_io.num_matches;
366 } while(conf_io.status == PCI_GETCONF_MORE_DEVS);
367
368 close(fd);
369
370 RTE_LOG(DEBUG, EAL, "PCI scan found %u devices\n", dev_count);
371 return 0;
372
373 error:
374 if (fd >= 0)
375 close(fd);
376 return -1;
377 }
378
379 /*
380 * Get iommu class of PCI devices on the bus.
381 */
382 enum rte_iova_mode
383 rte_pci_get_iommu_class(void)
384 {
385 /* Supports only RTE_KDRV_NIC_UIO */
386 return RTE_IOVA_PA;
387 }
388
389 int
390 pci_update_device(const struct rte_pci_addr *addr)
391 {
392 int fd;
393 struct pci_conf matches[2];
394 struct pci_match_conf match = {
395 .pc_sel = {
396 .pc_domain = addr->domain,
397 .pc_bus = addr->bus,
398 .pc_dev = addr->devid,
399 .pc_func = addr->function,
400 },
401 };
402 struct pci_conf_io conf_io = {
403 .pat_buf_len = 0,
404 .num_patterns = 1,
405 .patterns = &match,
406 .match_buf_len = sizeof(matches),
407 .matches = &matches[0],
408 };
409
410 fd = open("/dev/pci", O_RDONLY);
411 if (fd < 0) {
412 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
413 goto error;
414 }
415
416 if (ioctl(fd, PCIOCGETCONF, &conf_io) < 0) {
417 RTE_LOG(ERR, EAL, "%s(): error with ioctl on /dev/pci: %s\n",
418 __func__, strerror(errno));
419 goto error;
420 }
421
422 if (conf_io.num_matches != 1)
423 goto error;
424
425 if (pci_scan_one(fd, &matches[0]) < 0)
426 goto error;
427
428 close(fd);
429
430 return 0;
431
432 error:
433 if (fd >= 0)
434 close(fd);
435 return -1;
436 }
437
438 /* Read PCI config space. */
439 int rte_pci_read_config(const struct rte_pci_device *dev,
440 void *buf, size_t len, off_t offset)
441 {
442 int fd = -1;
443 int size;
444 /* Copy Linux implementation's behaviour */
445 const int return_len = len;
446 struct pci_io pi = {
447 .pi_sel = {
448 .pc_domain = dev->addr.domain,
449 .pc_bus = dev->addr.bus,
450 .pc_dev = dev->addr.devid,
451 .pc_func = dev->addr.function,
452 },
453 .pi_reg = offset,
454 };
455
456 fd = open("/dev/pci", O_RDWR);
457 if (fd < 0) {
458 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
459 goto error;
460 }
461
462 while (len > 0) {
463 size = (len >= 4) ? 4 : ((len >= 2) ? 2 : 1);
464 pi.pi_width = size;
465
466 if (ioctl(fd, PCIOCREAD, &pi) < 0)
467 goto error;
468 memcpy(buf, &pi.pi_data, size);
469
470 buf = (char *)buf + size;
471 pi.pi_reg += size;
472 len -= size;
473 }
474 close(fd);
475
476 return return_len;
477
478 error:
479 if (fd >= 0)
480 close(fd);
481 return -1;
482 }
483
484 /* Write PCI config space. */
485 int rte_pci_write_config(const struct rte_pci_device *dev,
486 const void *buf, size_t len, off_t offset)
487 {
488 int fd = -1;
489
490 struct pci_io pi = {
491 .pi_sel = {
492 .pc_domain = dev->addr.domain,
493 .pc_bus = dev->addr.bus,
494 .pc_dev = dev->addr.devid,
495 .pc_func = dev->addr.function,
496 },
497 .pi_reg = offset,
498 .pi_data = *(const uint32_t *)buf,
499 .pi_width = len,
500 };
501
502 if (len == 3 || len > sizeof(pi.pi_data)) {
503 RTE_LOG(ERR, EAL, "%s(): invalid pci read length\n", __func__);
504 goto error;
505 }
506
507 memcpy(&pi.pi_data, buf, len);
508
509 fd = open("/dev/pci", O_RDWR);
510 if (fd < 0) {
511 RTE_LOG(ERR, EAL, "%s(): error opening /dev/pci\n", __func__);
512 goto error;
513 }
514
515 if (ioctl(fd, PCIOCWRITE, &pi) < 0)
516 goto error;
517
518 close(fd);
519 return 0;
520
521 error:
522 if (fd >= 0)
523 close(fd);
524 return -1;
525 }
526
527 int
528 rte_pci_ioport_map(struct rte_pci_device *dev, int bar,
529 struct rte_pci_ioport *p)
530 {
531 int ret;
532
533 switch (dev->kdrv) {
534 #if defined(RTE_ARCH_X86)
535 case RTE_KDRV_NIC_UIO:
536 if ((uintptr_t) dev->mem_resource[bar].addr <= UINT16_MAX) {
537 p->base = (uintptr_t)dev->mem_resource[bar].addr;
538 ret = 0;
539 } else
540 ret = -1;
541 break;
542 #endif
543 default:
544 ret = -1;
545 break;
546 }
547
548 if (!ret)
549 p->dev = dev;
550
551 return ret;
552 }
553
554 static void
555 pci_uio_ioport_read(struct rte_pci_ioport *p,
556 void *data, size_t len, off_t offset)
557 {
558 #if defined(RTE_ARCH_X86)
559 uint8_t *d;
560 int size;
561 unsigned short reg = p->base + offset;
562
563 for (d = data; len > 0; d += size, reg += size, len -= size) {
564 if (len >= 4) {
565 size = 4;
566 *(uint32_t *)d = inl(reg);
567 } else if (len >= 2) {
568 size = 2;
569 *(uint16_t *)d = inw(reg);
570 } else {
571 size = 1;
572 *d = inb(reg);
573 }
574 }
575 #else
576 RTE_SET_USED(p);
577 RTE_SET_USED(data);
578 RTE_SET_USED(len);
579 RTE_SET_USED(offset);
580 #endif
581 }
582
583 void
584 rte_pci_ioport_read(struct rte_pci_ioport *p,
585 void *data, size_t len, off_t offset)
586 {
587 switch (p->dev->kdrv) {
588 case RTE_KDRV_NIC_UIO:
589 pci_uio_ioport_read(p, data, len, offset);
590 break;
591 default:
592 break;
593 }
594 }
595
596 static void
597 pci_uio_ioport_write(struct rte_pci_ioport *p,
598 const void *data, size_t len, off_t offset)
599 {
600 #if defined(RTE_ARCH_X86)
601 const uint8_t *s;
602 int size;
603 unsigned short reg = p->base + offset;
604
605 for (s = data; len > 0; s += size, reg += size, len -= size) {
606 if (len >= 4) {
607 size = 4;
608 outl(reg, *(const uint32_t *)s);
609 } else if (len >= 2) {
610 size = 2;
611 outw(reg, *(const uint16_t *)s);
612 } else {
613 size = 1;
614 outb(reg, *s);
615 }
616 }
617 #else
618 RTE_SET_USED(p);
619 RTE_SET_USED(data);
620 RTE_SET_USED(len);
621 RTE_SET_USED(offset);
622 #endif
623 }
624
625 void
626 rte_pci_ioport_write(struct rte_pci_ioport *p,
627 const void *data, size_t len, off_t offset)
628 {
629 switch (p->dev->kdrv) {
630 case RTE_KDRV_NIC_UIO:
631 pci_uio_ioport_write(p, data, len, offset);
632 break;
633 default:
634 break;
635 }
636 }
637
638 int
639 rte_pci_ioport_unmap(struct rte_pci_ioport *p)
640 {
641 int ret;
642
643 switch (p->dev->kdrv) {
644 #if defined(RTE_ARCH_X86)
645 case RTE_KDRV_NIC_UIO:
646 ret = 0;
647 break;
648 #endif
649 default:
650 ret = -1;
651 break;
652 }
653
654 return ret;
655 }