]> git.proxmox.com Git - ceph.git/blob - ceph/src/seastar/dpdk/drivers/bus/fslmc/fslmc_vfio.c
import 15.2.0 Octopus source
[ceph.git] / ceph / src / seastar / dpdk / drivers / bus / fslmc / fslmc_vfio.c
1 /* SPDX-License-Identifier: BSD-3-Clause
2 *
3 * Copyright (c) 2015-2016 Freescale Semiconductor, Inc. All rights reserved.
4 * Copyright 2016-2018 NXP
5 *
6 */
7
8 #include <unistd.h>
9 #include <stdio.h>
10 #include <sys/types.h>
11 #include <string.h>
12 #include <stdlib.h>
13 #include <fcntl.h>
14 #include <errno.h>
15 #include <sys/ioctl.h>
16 #include <sys/stat.h>
17 #include <sys/mman.h>
18 #include <sys/vfs.h>
19 #include <libgen.h>
20 #include <dirent.h>
21 #include <sys/eventfd.h>
22
23 #include <eal_filesystem.h>
24 #include <rte_mbuf.h>
25 #include <rte_ethdev_driver.h>
26 #include <rte_malloc.h>
27 #include <rte_memcpy.h>
28 #include <rte_string_fns.h>
29 #include <rte_cycles.h>
30 #include <rte_kvargs.h>
31 #include <rte_dev.h>
32 #include <rte_bus.h>
33 #include <rte_eal_memconfig.h>
34
35 #include "rte_fslmc.h"
36 #include "fslmc_vfio.h"
37 #include "fslmc_logs.h"
38 #include <mc/fsl_dpmng.h>
39
40 #include "portal/dpaa2_hw_pvt.h"
41 #include "portal/dpaa2_hw_dpio.h"
42
43 /** Pathname of FSL-MC devices directory. */
44 #define SYSFS_FSL_MC_DEVICES "/sys/bus/fsl-mc/devices"
45
46 #define FSLMC_CONTAINER_MAX_LEN 8 /**< Of the format dprc.XX */
47
48 /* Number of VFIO containers & groups with in */
49 static struct fslmc_vfio_group vfio_group;
50 static struct fslmc_vfio_container vfio_container;
51 static int container_device_fd;
52 static char *g_container;
53 static uint32_t *msi_intr_vaddr;
54 void *(*rte_mcp_ptr_list);
55
56 static struct rte_dpaa2_object_list dpaa2_obj_list =
57 TAILQ_HEAD_INITIALIZER(dpaa2_obj_list);
58
59 /*register a fslmc bus based dpaa2 driver */
60 void
61 rte_fslmc_object_register(struct rte_dpaa2_object *object)
62 {
63 RTE_VERIFY(object);
64
65 TAILQ_INSERT_TAIL(&dpaa2_obj_list, object, next);
66 }
67
68 int
69 fslmc_get_container_group(int *groupid)
70 {
71 int ret;
72 char *container;
73
74 if (!g_container) {
75 container = getenv("DPRC");
76 if (container == NULL) {
77 DPAA2_BUS_DEBUG("DPAA2: DPRC not available");
78 return -EINVAL;
79 }
80
81 if (strlen(container) >= FSLMC_CONTAINER_MAX_LEN) {
82 DPAA2_BUS_ERR("Invalid container name: %s", container);
83 return -1;
84 }
85
86 g_container = strdup(container);
87 if (!g_container) {
88 DPAA2_BUS_ERR("Mem alloc failure; Container name");
89 return -ENOMEM;
90 }
91 }
92
93 /* get group number */
94 ret = rte_vfio_get_group_num(SYSFS_FSL_MC_DEVICES,
95 g_container, groupid);
96 if (ret <= 0) {
97 DPAA2_BUS_ERR("Unable to find %s IOMMU group", g_container);
98 return -1;
99 }
100
101 DPAA2_BUS_DEBUG("Container: %s has VFIO iommu group id = %d",
102 g_container, *groupid);
103
104 return 0;
105 }
106
107 static int
108 vfio_connect_container(void)
109 {
110 int fd, ret;
111
112 if (vfio_container.used) {
113 DPAA2_BUS_DEBUG("No container available");
114 return -1;
115 }
116
117 /* Try connecting to vfio container if already created */
118 if (!ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER,
119 &vfio_container.fd)) {
120 DPAA2_BUS_DEBUG(
121 "Container pre-exists with FD[0x%x] for this group",
122 vfio_container.fd);
123 vfio_group.container = &vfio_container;
124 return 0;
125 }
126
127 /* Opens main vfio file descriptor which represents the "container" */
128 fd = rte_vfio_get_container_fd();
129 if (fd < 0) {
130 DPAA2_BUS_ERR("Failed to open VFIO container");
131 return -errno;
132 }
133
134 /* Check whether support for SMMU type IOMMU present or not */
135 if (ioctl(fd, VFIO_CHECK_EXTENSION, VFIO_TYPE1_IOMMU)) {
136 /* Connect group to container */
137 ret = ioctl(vfio_group.fd, VFIO_GROUP_SET_CONTAINER, &fd);
138 if (ret) {
139 DPAA2_BUS_ERR("Failed to setup group container");
140 close(fd);
141 return -errno;
142 }
143
144 ret = ioctl(fd, VFIO_SET_IOMMU, VFIO_TYPE1_IOMMU);
145 if (ret) {
146 DPAA2_BUS_ERR("Failed to setup VFIO iommu");
147 close(fd);
148 return -errno;
149 }
150 } else {
151 DPAA2_BUS_ERR("No supported IOMMU available");
152 close(fd);
153 return -EINVAL;
154 }
155
156 vfio_container.used = 1;
157 vfio_container.fd = fd;
158 vfio_container.group = &vfio_group;
159 vfio_group.container = &vfio_container;
160
161 return 0;
162 }
163
164 static int vfio_map_irq_region(struct fslmc_vfio_group *group)
165 {
166 int ret;
167 unsigned long *vaddr = NULL;
168 struct vfio_iommu_type1_dma_map map = {
169 .argsz = sizeof(map),
170 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
171 .vaddr = 0x6030000,
172 .iova = 0x6030000,
173 .size = 0x1000,
174 };
175
176 vaddr = (unsigned long *)mmap(NULL, 0x1000, PROT_WRITE |
177 PROT_READ, MAP_SHARED, container_device_fd, 0x6030000);
178 if (vaddr == MAP_FAILED) {
179 DPAA2_BUS_INFO("Unable to map region (errno = %d)", errno);
180 return -errno;
181 }
182
183 msi_intr_vaddr = (uint32_t *)((char *)(vaddr) + 64);
184 map.vaddr = (unsigned long)vaddr;
185 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &map);
186 if (ret == 0)
187 return 0;
188
189 DPAA2_BUS_ERR("Unable to map DMA address (errno = %d)", errno);
190 return -errno;
191 }
192
193 static int fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
194 static int fslmc_unmap_dma(uint64_t vaddr, rte_iova_t iovaddr, size_t len);
195
196 static void
197 fslmc_memevent_cb(enum rte_mem_event type, const void *addr, size_t len,
198 void *arg __rte_unused)
199 {
200 struct rte_memseg_list *msl;
201 struct rte_memseg *ms;
202 size_t cur_len = 0, map_len = 0;
203 uint64_t virt_addr;
204 rte_iova_t iova_addr;
205 int ret;
206
207 msl = rte_mem_virt2memseg_list(addr);
208
209 while (cur_len < len) {
210 const void *va = RTE_PTR_ADD(addr, cur_len);
211
212 ms = rte_mem_virt2memseg(va, msl);
213 iova_addr = ms->iova;
214 virt_addr = ms->addr_64;
215 map_len = ms->len;
216
217 DPAA2_BUS_DEBUG("Request for %s, va=%p, "
218 "virt_addr=0x%" PRIx64 ", "
219 "iova=0x%" PRIx64 ", map_len=%zu",
220 type == RTE_MEM_EVENT_ALLOC ?
221 "alloc" : "dealloc",
222 va, virt_addr, iova_addr, map_len);
223
224 /* iova_addr may be set to RTE_BAD_IOVA */
225 if (iova_addr == RTE_BAD_IOVA) {
226 DPAA2_BUS_DEBUG("Segment has invalid iova, skipping\n");
227 cur_len += map_len;
228 continue;
229 }
230
231 if (type == RTE_MEM_EVENT_ALLOC)
232 ret = fslmc_map_dma(virt_addr, iova_addr, map_len);
233 else
234 ret = fslmc_unmap_dma(virt_addr, iova_addr, map_len);
235
236 if (ret != 0) {
237 DPAA2_BUS_ERR("DMA Mapping/Unmapping failed. "
238 "Map=%d, addr=%p, len=%zu, err:(%d)",
239 type, va, map_len, ret);
240 return;
241 }
242
243 cur_len += map_len;
244 }
245
246 if (type == RTE_MEM_EVENT_ALLOC)
247 DPAA2_BUS_DEBUG("Total Mapped: addr=%p, len=%zu",
248 addr, len);
249 else
250 DPAA2_BUS_DEBUG("Total Unmapped: addr=%p, len=%zu",
251 addr, len);
252 }
253
254 static int
255 fslmc_map_dma(uint64_t vaddr, rte_iova_t iovaddr __rte_unused, size_t len)
256 {
257 struct fslmc_vfio_group *group;
258 struct vfio_iommu_type1_dma_map dma_map = {
259 .argsz = sizeof(struct vfio_iommu_type1_dma_map),
260 .flags = VFIO_DMA_MAP_FLAG_READ | VFIO_DMA_MAP_FLAG_WRITE,
261 };
262 int ret;
263
264 dma_map.size = len;
265 dma_map.vaddr = vaddr;
266
267 #ifdef RTE_LIBRTE_DPAA2_USE_PHYS_IOVA
268 dma_map.iova = iovaddr;
269 #else
270 dma_map.iova = dma_map.vaddr;
271 #endif
272
273 /* SET DMA MAP for IOMMU */
274 group = &vfio_group;
275
276 if (!group->container) {
277 DPAA2_BUS_ERR("Container is not connected ");
278 return -1;
279 }
280
281 DPAA2_BUS_DEBUG("--> Map address: 0x%"PRIx64", size: %"PRIu64"",
282 (uint64_t)dma_map.vaddr, (uint64_t)dma_map.size);
283 ret = ioctl(group->container->fd, VFIO_IOMMU_MAP_DMA, &dma_map);
284 if (ret) {
285 DPAA2_BUS_ERR("VFIO_IOMMU_MAP_DMA API(errno = %d)",
286 errno);
287 return -1;
288 }
289
290 return 0;
291 }
292
293 static int
294 fslmc_unmap_dma(uint64_t vaddr, uint64_t iovaddr __rte_unused, size_t len)
295 {
296 struct fslmc_vfio_group *group;
297 struct vfio_iommu_type1_dma_unmap dma_unmap = {
298 .argsz = sizeof(struct vfio_iommu_type1_dma_unmap),
299 .flags = 0,
300 };
301 int ret;
302
303 dma_unmap.size = len;
304 dma_unmap.iova = vaddr;
305
306 /* SET DMA MAP for IOMMU */
307 group = &vfio_group;
308
309 if (!group->container) {
310 DPAA2_BUS_ERR("Container is not connected ");
311 return -1;
312 }
313
314 DPAA2_BUS_DEBUG("--> Unmap address: 0x%"PRIx64", size: %"PRIu64"",
315 (uint64_t)dma_unmap.iova, (uint64_t)dma_unmap.size);
316 ret = ioctl(group->container->fd, VFIO_IOMMU_UNMAP_DMA, &dma_unmap);
317 if (ret) {
318 DPAA2_BUS_ERR("VFIO_IOMMU_UNMAP_DMA API(errno = %d)",
319 errno);
320 return -1;
321 }
322
323 return 0;
324 }
325
326 static int
327 fslmc_dmamap_seg(const struct rte_memseg_list *msl __rte_unused,
328 const struct rte_memseg *ms, void *arg)
329 {
330 int *n_segs = arg;
331 int ret;
332
333 /* if IOVA address is invalid, skip */
334 if (ms->iova == RTE_BAD_IOVA)
335 return 0;
336
337 ret = fslmc_map_dma(ms->addr_64, ms->iova, ms->len);
338 if (ret)
339 DPAA2_BUS_ERR("Unable to VFIO map (addr=%p, len=%zu)",
340 ms->addr, ms->len);
341 else
342 (*n_segs)++;
343
344 return ret;
345 }
346
347 int rte_fslmc_vfio_dmamap(void)
348 {
349 int i = 0, ret;
350 struct rte_mem_config *mcfg = rte_eal_get_configuration()->mem_config;
351 rte_rwlock_t *mem_lock = &mcfg->memory_hotplug_lock;
352
353 /* Lock before parsing and registering callback to memory subsystem */
354 rte_rwlock_read_lock(mem_lock);
355
356 if (rte_memseg_walk(fslmc_dmamap_seg, &i) < 0) {
357 rte_rwlock_read_unlock(mem_lock);
358 return -1;
359 }
360
361 ret = rte_mem_event_callback_register("fslmc_memevent_clb",
362 fslmc_memevent_cb, NULL);
363 if (ret && rte_errno == ENOTSUP)
364 DPAA2_BUS_DEBUG("Memory event callbacks not supported");
365 else if (ret)
366 DPAA2_BUS_DEBUG("Unable to install memory handler");
367 else
368 DPAA2_BUS_DEBUG("Installed memory callback handler");
369
370 DPAA2_BUS_DEBUG("Total %d segments found.", i);
371
372 /* TODO - This is a W.A. as VFIO currently does not add the mapping of
373 * the interrupt region to SMMU. This should be removed once the
374 * support is added in the Kernel.
375 */
376 vfio_map_irq_region(&vfio_group);
377
378 /* Existing segments have been mapped and memory callback for hotplug
379 * has been installed.
380 */
381 rte_rwlock_read_unlock(mem_lock);
382
383 return 0;
384 }
385
386 static int64_t vfio_map_mcp_obj(struct fslmc_vfio_group *group, char *mcp_obj)
387 {
388 intptr_t v_addr = (intptr_t)MAP_FAILED;
389 int32_t ret, mc_fd;
390
391 struct vfio_device_info d_info = { .argsz = sizeof(d_info) };
392 struct vfio_region_info reg_info = { .argsz = sizeof(reg_info) };
393
394 /* getting the mcp object's fd*/
395 mc_fd = ioctl(group->fd, VFIO_GROUP_GET_DEVICE_FD, mcp_obj);
396 if (mc_fd < 0) {
397 DPAA2_BUS_ERR("Error in VFIO get dev %s fd from group %d",
398 mcp_obj, group->fd);
399 return v_addr;
400 }
401
402 /* getting device info*/
403 ret = ioctl(mc_fd, VFIO_DEVICE_GET_INFO, &d_info);
404 if (ret < 0) {
405 DPAA2_BUS_ERR("Error in VFIO getting DEVICE_INFO");
406 goto MC_FAILURE;
407 }
408
409 /* getting device region info*/
410 ret = ioctl(mc_fd, VFIO_DEVICE_GET_REGION_INFO, &reg_info);
411 if (ret < 0) {
412 DPAA2_BUS_ERR("Error in VFIO getting REGION_INFO");
413 goto MC_FAILURE;
414 }
415
416 v_addr = (size_t)mmap(NULL, reg_info.size,
417 PROT_WRITE | PROT_READ, MAP_SHARED,
418 mc_fd, reg_info.offset);
419
420 MC_FAILURE:
421 close(mc_fd);
422
423 return v_addr;
424 }
425
426 #define IRQ_SET_BUF_LEN (sizeof(struct vfio_irq_set) + sizeof(int))
427
428 int rte_dpaa2_intr_enable(struct rte_intr_handle *intr_handle, int index)
429 {
430 int len, ret;
431 char irq_set_buf[IRQ_SET_BUF_LEN];
432 struct vfio_irq_set *irq_set;
433 int *fd_ptr;
434
435 len = sizeof(irq_set_buf);
436
437 irq_set = (struct vfio_irq_set *)irq_set_buf;
438 irq_set->argsz = len;
439 irq_set->count = 1;
440 irq_set->flags =
441 VFIO_IRQ_SET_DATA_EVENTFD | VFIO_IRQ_SET_ACTION_TRIGGER;
442 irq_set->index = index;
443 irq_set->start = 0;
444 fd_ptr = (int *)&irq_set->data;
445 *fd_ptr = intr_handle->fd;
446
447 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
448 if (ret) {
449 DPAA2_BUS_ERR("Error:dpaa2 SET IRQs fd=%d, err = %d(%s)",
450 intr_handle->fd, errno, strerror(errno));
451 return ret;
452 }
453
454 return ret;
455 }
456
457 int rte_dpaa2_intr_disable(struct rte_intr_handle *intr_handle, int index)
458 {
459 struct vfio_irq_set *irq_set;
460 char irq_set_buf[IRQ_SET_BUF_LEN];
461 int len, ret;
462
463 len = sizeof(struct vfio_irq_set);
464
465 irq_set = (struct vfio_irq_set *)irq_set_buf;
466 irq_set->argsz = len;
467 irq_set->flags = VFIO_IRQ_SET_DATA_NONE | VFIO_IRQ_SET_ACTION_TRIGGER;
468 irq_set->index = index;
469 irq_set->start = 0;
470 irq_set->count = 0;
471
472 ret = ioctl(intr_handle->vfio_dev_fd, VFIO_DEVICE_SET_IRQS, irq_set);
473 if (ret)
474 DPAA2_BUS_ERR(
475 "Error disabling dpaa2 interrupts for fd %d",
476 intr_handle->fd);
477
478 return ret;
479 }
480
481 /* set up interrupt support (but not enable interrupts) */
482 int
483 rte_dpaa2_vfio_setup_intr(struct rte_intr_handle *intr_handle,
484 int vfio_dev_fd,
485 int num_irqs)
486 {
487 int i, ret;
488
489 /* start from MSI-X interrupt type */
490 for (i = 0; i < num_irqs; i++) {
491 struct vfio_irq_info irq_info = { .argsz = sizeof(irq_info) };
492 int fd = -1;
493
494 irq_info.index = i;
495
496 ret = ioctl(vfio_dev_fd, VFIO_DEVICE_GET_IRQ_INFO, &irq_info);
497 if (ret < 0) {
498 DPAA2_BUS_ERR("Cannot get IRQ(%d) info, error %i (%s)",
499 i, errno, strerror(errno));
500 return -1;
501 }
502
503 /* if this vector cannot be used with eventfd,
504 * fail if we explicitly
505 * specified interrupt type, otherwise continue
506 */
507 if ((irq_info.flags & VFIO_IRQ_INFO_EVENTFD) == 0)
508 continue;
509
510 /* set up an eventfd for interrupts */
511 fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
512 if (fd < 0) {
513 DPAA2_BUS_ERR("Cannot set up eventfd, error %i (%s)",
514 errno, strerror(errno));
515 return -1;
516 }
517
518 intr_handle->fd = fd;
519 intr_handle->type = RTE_INTR_HANDLE_VFIO_MSI;
520 intr_handle->vfio_dev_fd = vfio_dev_fd;
521
522 return 0;
523 }
524
525 /* if we're here, we haven't found a suitable interrupt vector */
526 return -1;
527 }
528
529 /*
530 * fslmc_process_iodevices for processing only IO (ETH, CRYPTO, and possibly
531 * EVENT) devices.
532 */
533 static int
534 fslmc_process_iodevices(struct rte_dpaa2_device *dev)
535 {
536 int dev_fd;
537 struct vfio_device_info device_info = { .argsz = sizeof(device_info) };
538 struct rte_dpaa2_object *object = NULL;
539
540 dev_fd = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD,
541 dev->device.name);
542 if (dev_fd <= 0) {
543 DPAA2_BUS_ERR("Unable to obtain device FD for device:%s",
544 dev->device.name);
545 return -1;
546 }
547
548 if (ioctl(dev_fd, VFIO_DEVICE_GET_INFO, &device_info)) {
549 DPAA2_BUS_ERR("Unable to obtain information for device:%s",
550 dev->device.name);
551 return -1;
552 }
553
554 switch (dev->dev_type) {
555 case DPAA2_ETH:
556 rte_dpaa2_vfio_setup_intr(&dev->intr_handle, dev_fd,
557 device_info.num_irqs);
558 break;
559 case DPAA2_CON:
560 case DPAA2_IO:
561 case DPAA2_CI:
562 case DPAA2_BPOOL:
563 case DPAA2_MUX:
564 TAILQ_FOREACH(object, &dpaa2_obj_list, next) {
565 if (dev->dev_type == object->dev_type)
566 object->create(dev_fd, &device_info,
567 dev->object_id);
568 else
569 continue;
570 }
571 break;
572 default:
573 break;
574 }
575
576 DPAA2_BUS_LOG(DEBUG, "Device (%s) abstracted from VFIO",
577 dev->device.name);
578 return 0;
579 }
580
581 static int
582 fslmc_process_mcp(struct rte_dpaa2_device *dev)
583 {
584 int ret;
585 intptr_t v_addr;
586 char *dev_name = NULL;
587 struct fsl_mc_io dpmng = {0};
588 struct mc_version mc_ver_info = {0};
589
590 rte_mcp_ptr_list = malloc(sizeof(void *) * 1);
591 if (!rte_mcp_ptr_list) {
592 DPAA2_BUS_ERR("Unable to allocate MC portal memory");
593 ret = -ENOMEM;
594 goto cleanup;
595 }
596
597 dev_name = strdup(dev->device.name);
598 if (!dev_name) {
599 DPAA2_BUS_ERR("Unable to allocate MC device name memory");
600 ret = -ENOMEM;
601 goto cleanup;
602 }
603
604 v_addr = vfio_map_mcp_obj(&vfio_group, dev_name);
605 if (v_addr == (intptr_t)MAP_FAILED) {
606 DPAA2_BUS_ERR("Error mapping region (errno = %d)", errno);
607 ret = -1;
608 goto cleanup;
609 }
610
611 /* check the MC version compatibility */
612 dpmng.regs = (void *)v_addr;
613
614 /* In case of secondary processes, MC version check is no longer
615 * required.
616 */
617 if (rte_eal_process_type() == RTE_PROC_SECONDARY) {
618 rte_mcp_ptr_list[0] = (void *)v_addr;
619 return 0;
620 }
621
622 if (mc_get_version(&dpmng, CMD_PRI_LOW, &mc_ver_info)) {
623 DPAA2_BUS_ERR("Unable to obtain MC version");
624 ret = -1;
625 goto cleanup;
626 }
627
628 if ((mc_ver_info.major != MC_VER_MAJOR) ||
629 (mc_ver_info.minor < MC_VER_MINOR)) {
630 DPAA2_BUS_ERR("DPAA2 MC version not compatible!"
631 " Expected %d.%d.x, Detected %d.%d.%d",
632 MC_VER_MAJOR, MC_VER_MINOR,
633 mc_ver_info.major, mc_ver_info.minor,
634 mc_ver_info.revision);
635 ret = -1;
636 goto cleanup;
637 }
638 rte_mcp_ptr_list[0] = (void *)v_addr;
639
640 free(dev_name);
641 return 0;
642
643 cleanup:
644 if (dev_name)
645 free(dev_name);
646
647 if (rte_mcp_ptr_list) {
648 free(rte_mcp_ptr_list);
649 rte_mcp_ptr_list = NULL;
650 }
651
652 return ret;
653 }
654
655 int
656 fslmc_vfio_process_group(void)
657 {
658 int ret;
659 int found_mportal = 0;
660 struct rte_dpaa2_device *dev, *dev_temp;
661
662 /* Search the MCP as that should be initialized first. */
663 TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
664 if (dev->dev_type == DPAA2_MPORTAL) {
665 if (dev->device.devargs &&
666 dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
667 DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
668 dev->device.name);
669 TAILQ_REMOVE(&rte_fslmc_bus.device_list,
670 dev, next);
671 continue;
672 }
673
674 ret = fslmc_process_mcp(dev);
675 if (ret) {
676 DPAA2_BUS_ERR("Unable to map MC Portal");
677 return -1;
678 }
679 if (!found_mportal)
680 found_mportal = 1;
681
682 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
683 free(dev);
684 dev = NULL;
685 /* Ideally there is only a single dpmcp, but in case
686 * multiple exists, looping on remaining devices.
687 */
688 }
689 }
690
691 /* Cannot continue if there is not even a single mportal */
692 if (!found_mportal) {
693 DPAA2_BUS_ERR("No MC Portal device found. Not continuing");
694 return -1;
695 }
696
697 TAILQ_FOREACH_SAFE(dev, &rte_fslmc_bus.device_list, next, dev_temp) {
698 if (dev->device.devargs &&
699 dev->device.devargs->policy == RTE_DEV_BLACKLISTED) {
700 DPAA2_BUS_LOG(DEBUG, "%s Blacklisted, skipping",
701 dev->device.name);
702 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
703 continue;
704 }
705 switch (dev->dev_type) {
706 case DPAA2_ETH:
707 case DPAA2_CRYPTO:
708 case DPAA2_QDMA:
709 ret = fslmc_process_iodevices(dev);
710 if (ret) {
711 DPAA2_BUS_DEBUG("Dev (%s) init failed",
712 dev->device.name);
713 return ret;
714 }
715 break;
716 case DPAA2_CON:
717 case DPAA2_CI:
718 case DPAA2_BPOOL:
719 case DPAA2_MUX:
720 /* IN case of secondary processes, all control objects
721 * like dpbp, dpcon, dpci are not initialized/required
722 * - all of these are assumed to be initialized and made
723 * available by primary.
724 */
725 if (rte_eal_process_type() == RTE_PROC_SECONDARY)
726 continue;
727
728 /* Call the object creation routine and remove the
729 * device entry from device list
730 */
731 ret = fslmc_process_iodevices(dev);
732 if (ret) {
733 DPAA2_BUS_DEBUG("Dev (%s) init failed",
734 dev->device.name);
735 return -1;
736 }
737
738 break;
739 case DPAA2_IO:
740 ret = fslmc_process_iodevices(dev);
741 if (ret) {
742 DPAA2_BUS_DEBUG("Dev (%s) init failed",
743 dev->device.name);
744 return -1;
745 }
746
747 break;
748 case DPAA2_UNKNOWN:
749 default:
750 /* Unknown - ignore */
751 DPAA2_BUS_DEBUG("Found unknown device (%s)",
752 dev->device.name);
753 TAILQ_REMOVE(&rte_fslmc_bus.device_list, dev, next);
754 free(dev);
755 dev = NULL;
756 }
757 }
758
759 return 0;
760 }
761
762 int
763 fslmc_vfio_setup_group(void)
764 {
765 int groupid;
766 int ret;
767 struct vfio_group_status status = { .argsz = sizeof(status) };
768
769 /* if already done once */
770 if (container_device_fd)
771 return 0;
772
773 ret = fslmc_get_container_group(&groupid);
774 if (ret)
775 return ret;
776
777 /* In case this group was already opened, continue without any
778 * processing.
779 */
780 if (vfio_group.groupid == groupid) {
781 DPAA2_BUS_ERR("groupid already exists %d", groupid);
782 return 0;
783 }
784
785 /* Get the actual group fd */
786 ret = rte_vfio_get_group_fd(groupid);
787 if (ret < 0)
788 return ret;
789 vfio_group.fd = ret;
790
791 /* Check group viability */
792 ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_STATUS, &status);
793 if (ret) {
794 DPAA2_BUS_ERR("VFIO error getting group status");
795 close(vfio_group.fd);
796 rte_vfio_clear_group(vfio_group.fd);
797 return ret;
798 }
799
800 if (!(status.flags & VFIO_GROUP_FLAGS_VIABLE)) {
801 DPAA2_BUS_ERR("VFIO group not viable");
802 close(vfio_group.fd);
803 rte_vfio_clear_group(vfio_group.fd);
804 return -EPERM;
805 }
806 /* Since Group is VIABLE, Store the groupid */
807 vfio_group.groupid = groupid;
808
809 /* check if group does not have a container yet */
810 if (!(status.flags & VFIO_GROUP_FLAGS_CONTAINER_SET)) {
811 /* Now connect this IOMMU group to given container */
812 ret = vfio_connect_container();
813 if (ret) {
814 DPAA2_BUS_ERR(
815 "Error connecting container with groupid %d",
816 groupid);
817 close(vfio_group.fd);
818 rte_vfio_clear_group(vfio_group.fd);
819 return ret;
820 }
821 }
822
823 /* Get Device information */
824 ret = ioctl(vfio_group.fd, VFIO_GROUP_GET_DEVICE_FD, g_container);
825 if (ret < 0) {
826 DPAA2_BUS_ERR("Error getting device %s fd from group %d",
827 g_container, vfio_group.groupid);
828 close(vfio_group.fd);
829 rte_vfio_clear_group(vfio_group.fd);
830 return ret;
831 }
832 container_device_fd = ret;
833 DPAA2_BUS_DEBUG("VFIO Container FD is [0x%X]",
834 container_device_fd);
835
836 return 0;
837 }