4 * Copyright (c) Intel Corporation.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
36 #include "spdk/stdinc.h"
38 static unsigned char *device_type
[] = {
39 "PCI Express Endpoint",
40 "Legacy PCI Express Endpoint",
43 "Root Port of PCI Express Root Complex",
44 "Upstream Port of PCI Express Switch",
45 "Downstream Port of PCI Express Switch",
46 "PCI Express to PCI/PCI-X Bridge",
47 "PCI/PCI-X to PCI Express Bridge",
48 "Root Complex Integrated Endpoint",
49 "Root Complex Event Collector",
54 * Container for all VMD adapter probed in the system.
56 struct vmd_container
{
58 struct vmd_adapter vmd
[MAX_VMD_SUPPORTED
];
61 static struct vmd_container g_vmd_container
;
62 static uint8_t g_end_device_count
;
65 vmd_is_valid_cfg_addr(struct vmd_pci_bus
*bus
, uint64_t addr
)
67 return addr
>= (uint64_t)bus
->vmd
->cfg_vaddr
&&
68 addr
< bus
->vmd
->cfgbar_size
+ (uint64_t)bus
->vmd
->cfg_vaddr
;
72 vmd_align_base_addrs(struct vmd_adapter
*vmd
, uint32_t alignment
)
76 * Device is not in hot plug path, align the base address remaining from membar 1.
78 if (vmd
->physical_addr
& (alignment
- 1)) {
79 pad
= alignment
- (vmd
->physical_addr
& (alignment
- 1));
80 vmd
->physical_addr
+= pad
;
81 vmd
->current_addr_size
-= pad
;
86 * Allocates an address from vmd membar for the input memory size
87 * vmdAdapter - vmd adapter object
88 * dev - vmd_pci_device to allocate a base address for.
89 * size - size of the memory window requested.
90 * Size must be an integral multiple of 2. Addresses are returned on the size boundary.
91 * Returns physical address within the VMD membar window, or 0x0 if cannot allocate window.
92 * Consider increasing the size of vmd membar if 0x0 is returned.
95 vmd_allocate_base_addr(struct vmd_adapter
*vmd
, struct vmd_pci_device
*dev
, uint32_t size
)
97 uint64_t base_address
= 0;
98 struct vmd_pci_bus
*hp_bus
;
100 if (size
&& ((size
& (~size
+ 1)) != size
)) {
105 * If device is downstream of a hot plug port, allocate address from the
106 * range dedicated for the hot plug slot. Search the list of addresses allocated to determine
107 * if a free range exists that satisfy the input request. If a free range cannot be found,
108 * get a buffer from the unused chunk. First fit algorithm, is used.
111 hp_bus
= vmd_is_dev_in_hotplug_path(dev
);
112 if (hp_bus
&& hp_bus
->self
) {
113 return vmd_hp_allocate_base_addr(hp_bus
->self
->hp
, size
);
117 /* Ensure physical membar allocated is size aligned */
118 if (vmd
->physical_addr
& (size
- 1)) {
119 uint32_t pad
= size
- (vmd
->physical_addr
& (size
- 1));
120 vmd
->physical_addr
+= pad
;
121 vmd
->current_addr_size
-= pad
;
124 /* Allocate from membar if enough memory is left */
125 if (vmd
->current_addr_size
>= size
) {
126 base_address
= vmd
->physical_addr
;
127 vmd
->physical_addr
+= size
;
128 vmd
->current_addr_size
-= size
;
131 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "allocated(size) %lx (%x)\n", base_address
, size
);
137 vmd_is_end_device(struct vmd_pci_device
*dev
)
139 return (dev
&& dev
->header
) &&
140 ((dev
->header
->common
.header_type
& ~PCI_MULTI_FUNCTION
) == PCI_HEADER_TYPE_NORMAL
);
144 vmd_update_base_limit_register(struct vmd_pci_device
*dev
, uint16_t base
, uint16_t limit
)
146 struct vmd_pci_bus
*bus
= dev
->parent
;
147 struct vmd_pci_device
*bridge
;
149 if (base
== 0 || limit
== 0) {
153 while (bus
&& bus
->self
!= NULL
) {
156 /* This is only for 32-bit memory space, need to revisit to support 64-bit */
157 if (bridge
->header
->one
.mem_base
> base
) {
158 bridge
->header
->one
.mem_base
= base
;
159 base
= bridge
->header
->one
.mem_base
;
162 if (bridge
->header
->one
.mem_limit
< limit
) {
163 bridge
->header
->one
.mem_limit
= limit
;
164 limit
= bridge
->header
->one
.mem_limit
;
172 vmd_assign_base_addrs(struct vmd_pci_device
*dev
)
174 uint16_t mem_base
= 0, mem_limit
= 0;
175 unsigned char mem_attr
= 0;
176 int last
= dev
->header_type
? 2 : 6;
177 struct vmd_adapter
*vmd
= NULL
;
178 bool ret_val
= false;
180 uint32_t table_offset
;
182 if (dev
&& dev
->bus
) {
190 vmd_align_base_addrs(vmd
, ONE_MB
);
192 for (int i
= 0; i
< last
; i
++) {
193 bar_value
= dev
->header
->zero
.BAR
[i
];
194 dev
->header
->zero
.BAR
[i
] = ~(0U);
195 dev
->bar
[i
].size
= dev
->header
->zero
.BAR
[i
];
196 dev
->header
->zero
.BAR
[i
] = bar_value
;
198 if (dev
->bar
[i
].size
== ~(0U) || dev
->bar
[i
].size
== 0 ||
199 dev
->header
->zero
.BAR
[i
] & 1) {
200 dev
->bar
[i
].size
= 0;
203 mem_attr
= dev
->bar
[i
].size
& PCI_BASE_ADDR_MASK
;
204 dev
->bar
[i
].size
= TWOS_COMPLEMENT(dev
->bar
[i
].size
& PCI_BASE_ADDR_MASK
);
205 dev
->bar
[i
].start
= vmd_allocate_base_addr(vmd
, dev
, dev
->bar
[i
].size
);
206 dev
->header
->zero
.BAR
[i
] = (uint32_t)dev
->bar
[i
].start
;
208 if (!dev
->bar
[i
].start
) {
209 if (mem_attr
== (PCI_BAR_MEMORY_PREFETCH
| PCI_BAR_MEMORY_TYPE_64
)) {
215 dev
->bar
[i
].vaddr
= ((uint64_t)vmd
->mem_vaddr
+ (dev
->bar
[i
].start
- vmd
->membar
));
216 mem_limit
= BRIDGE_BASEREG(dev
->header
->zero
.BAR
[i
]) +
217 BRIDGE_BASEREG(dev
->bar
[i
].size
- 1);
219 mem_base
= BRIDGE_BASEREG(dev
->header
->zero
.BAR
[i
]);
224 if (mem_attr
== (PCI_BAR_MEMORY_PREFETCH
| PCI_BAR_MEMORY_TYPE_64
)) {
227 dev
->header
->zero
.BAR
[i
] = (uint32_t)(dev
->bar
[i
].start
>> PCI_DWORD_SHIFT
);
232 /* Enable device MEM and bus mastering */
233 dev
->header
->zero
.command
|= (PCI_COMMAND_MEMORY
| PCI_COMMAND_MASTER
);
234 uint16_t cmd
= dev
->header
->zero
.command
;
237 if (dev
->msix_cap
&& ret_val
) {
238 table_offset
= ((volatile struct pci_msix_cap
*)dev
->msix_cap
)->msix_table_offset
;
239 if (dev
->bar
[table_offset
& 0x3].vaddr
) {
240 dev
->msix_table
= (volatile struct pci_msix_table_entry
*)
241 (dev
->bar
[table_offset
& 0x3].vaddr
+ (table_offset
& 0xfff8));
245 if (ret_val
&& vmd_is_end_device(dev
)) {
246 vmd_update_base_limit_register(dev
, mem_base
, mem_limit
);
253 vmd_get_device_capabilities(struct vmd_pci_device
*dev
)
256 volatile uint8_t *config_space
;
257 uint8_t capabilities_offset
;
258 struct pci_capabilities_header
*capabilities_hdr
;
260 config_space
= (volatile uint8_t *)dev
->header
;
261 if ((dev
->header
->common
.status
& PCI_CAPABILITIES_LIST
) == 0) {
265 capabilities_offset
= dev
->header
->zero
.cap_pointer
;
266 if (dev
->header
->common
.header_type
& PCI_HEADER_TYPE_BRIDGE
) {
267 capabilities_offset
= dev
->header
->one
.cap_pointer
;
270 while (capabilities_offset
> 0) {
271 capabilities_hdr
= (struct pci_capabilities_header
*)
272 &config_space
[capabilities_offset
];
273 switch (capabilities_hdr
->capability_id
) {
274 case CAPABILITY_ID_PCI_EXPRESS
:
275 dev
->pcie_cap
= (volatile struct pci_express_cap
*)(capabilities_hdr
);
278 case CAPABILITY_ID_MSI
:
279 dev
->msi_cap
= (volatile struct pci_msi_cap
*)capabilities_hdr
;
282 case CAPABILITY_ID_MSIX
:
283 dev
->msix_cap
= (volatile struct pci_msix_capability
*)capabilities_hdr
;
284 dev
->msix_table_size
= dev
->msix_cap
->message_control
.bit
.table_size
+ 1;
290 capabilities_offset
= capabilities_hdr
->next
;
294 static volatile struct pci_enhanced_capability_header
*
295 vmd_get_enhanced_capabilities(struct vmd_pci_device
*dev
, uint16_t capability_id
)
298 uint16_t cap_offset
= EXTENDED_CAPABILITY_OFFSET
;
299 volatile struct pci_enhanced_capability_header
*cap_hdr
= NULL
;
301 data
= (uint8_t *)dev
->header
;
302 while (cap_offset
>= EXTENDED_CAPABILITY_OFFSET
) {
303 cap_hdr
= (volatile struct pci_enhanced_capability_header
*) &data
[cap_offset
];
304 if (cap_hdr
->capability_id
== capability_id
) {
307 cap_offset
= cap_hdr
->next
;
308 if (cap_offset
== 0 || cap_offset
< EXTENDED_CAPABILITY_OFFSET
) {
317 vmd_read_config_space(struct vmd_pci_device
*dev
)
320 * Writes to the pci config space is posted weite. To ensure transaction reaches its destination
321 * before another write is posed, an immediate read of the written value should be performed.
323 dev
->header
->common
.command
|= (BUS_MASTER_ENABLE
| MEMORY_SPACE_ENABLE
);
324 { uint16_t cmd
= dev
->header
->common
.command
; (void)cmd
; }
326 vmd_get_device_capabilities(dev
);
327 dev
->sn_cap
= (struct serial_number_capability
*)vmd_get_enhanced_capabilities(dev
,
328 DEVICE_SERIAL_NUMBER_CAP_ID
);
331 static struct vmd_pci_device
*
332 vmd_alloc_dev(struct vmd_pci_bus
*bus
, uint32_t devfn
)
334 struct vmd_pci_device
*dev
= NULL
;
335 struct pci_header
volatile *header
;
339 header
= (struct pci_header
* volatile)(bus
->vmd
->cfg_vaddr
+
340 CONFIG_OFFSET_ADDR(bus
->bus_number
, devfn
, 0, 0));
341 if (!vmd_is_valid_cfg_addr(bus
, (uint64_t)header
)) {
345 if (header
->common
.vendor_id
== PCI_INVALID_VENDORID
|| header
->common
.vendor_id
== 0x0) {
349 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "PCI device found: %04x:%04x ***\n",
350 header
->common
.vendor_id
, header
->common
.device_id
);
352 dev
= calloc(1, sizeof(*dev
));
357 dev
->header
= header
;
358 dev
->vid
= dev
->header
->common
.vendor_id
;
359 dev
->did
= dev
->header
->common
.device_id
;
363 header_type
= dev
->header
->common
.header_type
;
364 rev_class
= dev
->header
->common
.rev_class
;
365 dev
->class = rev_class
>> 8;
366 dev
->header_type
= header_type
& 0x7;
368 if (header_type
== PCI_HEADER_TYPE_BRIDGE
) {
369 dev
->header
->one
.mem_base
= 0xfff0;
370 dev
->header
->one
.mem_limit
= 0x0;
371 dev
->header
->one
.prefetch_base_upper
= 0x0;
372 dev
->header
->one
.prefetch_limit_upper
= 0x0;
373 dev
->header
->one
.io_base_upper
= 0x0;
374 dev
->header
->one
.io_limit_upper
= 0x0;
375 dev
->header
->one
.primary
= 0;
376 dev
->header
->one
.secondary
= 0;
377 dev
->header
->one
.subordinate
= 0;
380 vmd_read_config_space(dev
);
386 vmd_add_bus_to_list(struct vmd_adapter
*vmd
, struct vmd_pci_bus
*bus
)
388 struct vmd_pci_bus
*blist
;
390 blist
= vmd
->bus_list
;
397 while (blist
->next
!= NULL
) {
405 vmd_pcibus_remove_device(struct vmd_pci_bus
*bus
, struct vmd_pci_device
*device
)
407 struct vmd_pci_device
*list
= bus
->dev_list
;
409 if (list
== device
) {
410 bus
->dev_list
= NULL
;
413 while (list
->next
!= NULL
) {
414 if (list
->next
== device
) {
415 assert(list
->next
->next
);
416 list
->next
= list
->next
->next
;
424 vmd_bus_add_device(struct vmd_pci_bus
*bus
, struct vmd_pci_device
*device
)
426 struct vmd_pci_device
*next_dev
= bus
->dev_list
;
429 if (next_dev
== NULL
) {
430 bus
->dev_list
= device
;
434 while (next_dev
->next
!= NULL
) {
435 next_dev
= next_dev
->next
;
438 next_dev
->next
= device
;
443 static struct vmd_pci_bus
*
444 vmd_create_new_bus(struct vmd_pci_bus
*parent
, struct vmd_pci_device
*bridge
, uint8_t bus_number
)
446 struct vmd_pci_bus
*new_bus
;
448 new_bus
= calloc(1, sizeof(*new_bus
));
453 new_bus
->parent
= parent
;
454 new_bus
->domain
= parent
->domain
;
455 new_bus
->bus_number
= bus_number
;
456 new_bus
->secondary_bus
= new_bus
->subordinate_bus
= bus_number
;
457 new_bus
->self
= bridge
;
458 new_bus
->vmd
= parent
->vmd
;
459 bridge
->subordinate
= new_bus
;
461 bridge
->pci
.addr
.bus
= new_bus
->bus_number
;
462 bridge
->pci
.addr
.dev
= bridge
->devfn
;
463 bridge
->pci
.addr
.func
= 0;
464 bridge
->pci
.addr
.domain
= parent
->vmd
->pci
.addr
.domain
;
470 * Assigns a bus number from the list of available
471 * bus numbers. If the device is downstream of a hot plug port,
472 * assign the bus number from thiose assigned to the HP port. Otherwise,
473 * assign the next bus number from the vmd bus number list.
476 vmd_get_next_bus_number(struct vmd_pci_device
*dev
, struct vmd_adapter
*vmd
)
479 struct vmd_pci_bus
*hp_bus
;
482 hp_bus
= vmd_is_dev_in_hotplug_path(dev
);
483 if (hp_bus
&& hp_bus
->self
&& hp_bus
->self
->hp
) {
484 return vmd_hp_get_next_bus_number(hp_bus
->self
->hp
);
488 /* Device is not under a hot plug path. Return next global bus number */
489 if ((vmd
->next_bus_number
+ 1) < vmd
->max_pci_bus
) {
490 bus
= vmd
->next_bus_number
;
491 vmd
->next_bus_number
++;
497 vmd_get_hotplug_bus_numbers(struct vmd_pci_device
*dev
)
499 uint8_t bus_number
= 0xff;
501 if (dev
&& dev
->bus
&& dev
->bus
->vmd
&&
502 ((dev
->bus
->vmd
->next_bus_number
+ RESERVED_HOTPLUG_BUSES
) < dev
->bus
->vmd
->max_pci_bus
)) {
503 bus_number
= RESERVED_HOTPLUG_BUSES
;
504 dev
->bus
->vmd
->next_bus_number
+= RESERVED_HOTPLUG_BUSES
;
511 vmd_enable_msix(struct vmd_pci_device
*dev
)
513 volatile uint16_t control
;
515 control
= dev
->msix_cap
->message_control
.as_uint16_t
| (1 << 14);
516 dev
->msix_cap
->message_control
.as_uint16_t
= control
;
517 control
= dev
->msix_cap
->message_control
.as_uint16_t
;
518 dev
->msix_cap
->message_control
.as_uint16_t
= (control
| (1 << 15));
519 control
= dev
->msix_cap
->message_control
.as_uint16_t
;
520 control
= control
& ~(1 << 14);
521 dev
->msix_cap
->message_control
.as_uint16_t
= control
;
522 control
= dev
->msix_cap
->message_control
.as_uint16_t
;
526 vmd_disable_msix(struct vmd_pci_device
*dev
)
528 volatile uint16_t control
;
530 control
= dev
->msix_cap
->message_control
.as_uint16_t
| (1 << 14);
531 dev
->msix_cap
->message_control
.as_uint16_t
= control
;
532 control
= dev
->msix_cap
->message_control
.as_uint16_t
& ~(1 << 15);
533 dev
->msix_cap
->message_control
.as_uint16_t
= control
;
534 control
= dev
->msix_cap
->message_control
.as_uint16_t
;
538 * Set up MSI-X table entries for the port. Vmd MSIX vector 0 is used for
539 * port interrupt, so vector 0 is mapped to all MSIX entries for the port.
542 vmd_setup_msix(struct vmd_pci_device
*dev
, volatile struct pci_msix_table_entry
*vmdEntry
)
546 if (!dev
|| !vmdEntry
|| !dev
->msix_cap
) {
550 vmd_disable_msix(dev
);
551 if (dev
->msix_table
== NULL
|| dev
->msix_table_size
> MAX_MSIX_TABLE_SIZE
) {
555 for (entry
= 0; entry
< dev
->msix_table_size
; ++entry
) {
556 dev
->msix_table
[entry
].vector_control
= 1;
558 vmd_enable_msix(dev
);
562 vmd_bus_update_bridge_info(struct vmd_pci_device
*bridge
)
564 /* Update the subordinate bus of all bridges above this bridge */
565 volatile struct vmd_pci_device
*dev
= bridge
;
566 uint8_t subordinate_bus
;
571 subordinate_bus
= bridge
->header
->one
.subordinate
;
572 while (dev
->parent_bridge
!= NULL
) {
573 dev
= dev
->parent_bridge
;
574 if (dev
->header
->one
.subordinate
< subordinate_bus
) {
575 dev
->header
->one
.subordinate
= subordinate_bus
;
576 subordinate_bus
= dev
->header
->one
.subordinate
;
582 vmd_is_supported_device(struct vmd_pci_device
*dev
)
584 return dev
->class == PCI_CLASS_STORAGE_EXPRESS
;
588 vmd_dev_map_bar(struct spdk_pci_device
*pci_dev
, uint32_t bar
,
589 void **mapped_addr
, uint64_t *phys_addr
, uint64_t *size
)
591 struct vmd_pci_device
*dev
= SPDK_CONTAINEROF(pci_dev
, struct vmd_pci_device
, pci
);
593 *size
= dev
->bar
[bar
].size
;
594 *phys_addr
= dev
->bar
[bar
].start
;
595 *mapped_addr
= (void *)dev
->bar
[bar
].vaddr
;
601 vmd_dev_unmap_bar(struct spdk_pci_device
*_dev
, uint32_t bar
, void *addr
)
607 vmd_dev_cfg_read(struct spdk_pci_device
*_dev
, void *value
, uint32_t len
,
610 struct vmd_pci_device
*dev
= SPDK_CONTAINEROF(_dev
, struct vmd_pci_device
, pci
);
611 volatile uint8_t *src
= (volatile uint8_t *)dev
->header
;
612 uint8_t *dst
= value
;
615 if (len
+ offset
> PCI_MAX_CFG_SIZE
) {
619 for (i
= 0; i
< len
; ++i
) {
620 dst
[i
] = src
[offset
+ i
];
627 vmd_dev_cfg_write(struct spdk_pci_device
*_dev
, void *value
,
628 uint32_t len
, uint32_t offset
)
630 struct vmd_pci_device
*dev
= SPDK_CONTAINEROF(_dev
, struct vmd_pci_device
, pci
);
631 volatile uint8_t *dst
= (volatile uint8_t *)dev
->header
;
632 uint8_t *src
= value
;
635 if ((len
+ offset
) > PCI_MAX_CFG_SIZE
) {
639 for (i
= 0; i
< len
; ++i
) {
640 dst
[offset
+ i
] = src
[i
];
647 vmd_dev_detach(struct spdk_pci_device
*dev
)
653 vmd_dev_init(struct vmd_pci_device
*dev
)
657 dev
->pci
.addr
.domain
= dev
->bus
->vmd
->domain
;
658 dev
->pci
.addr
.bus
= dev
->bus
->bus_number
;
659 dev
->pci
.addr
.dev
= dev
->devfn
;
660 dev
->pci
.addr
.func
= 0;
661 dev
->pci
.id
.vendor_id
= dev
->header
->common
.vendor_id
;
662 dev
->pci
.id
.device_id
= dev
->header
->common
.device_id
;
663 dev
->pci
.map_bar
= vmd_dev_map_bar
;
664 dev
->pci
.unmap_bar
= vmd_dev_unmap_bar
;
665 dev
->pci
.cfg_read
= vmd_dev_cfg_read
;
666 dev
->pci
.cfg_write
= vmd_dev_cfg_write
;
667 dev
->pci
.detach
= vmd_dev_detach
;
669 if (vmd_is_supported_device(dev
)) {
670 spdk_pci_addr_fmt(bdf
, sizeof(bdf
), &dev
->pci
.addr
);
671 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "Initalizing NVMe device at %s\n", bdf
);
672 spdk_pci_hook_device(spdk_pci_nvme_get_driver(), &dev
->pci
);
677 * Scans a single bus for all devices attached and return a count of
678 * how many devices found. In the VMD topology, it is assume there are no multi-
679 * function devices. Hence a bus(bridge) will not have multi function with both type
682 * The other option for implementing this function is the bus is an int and
683 * create a new device PciBridge. PciBridge would inherit from PciDevice with extra fields,
684 * sub/pri/sec bus. The input becomes PciPort, bus number and parent_bridge.
686 * The bus number is scanned and if a device is found, based on the header_type, create
687 * either PciBridge(1) or PciDevice(0).
689 * If a PciBridge, assign bus numbers and rescan new bus. The currenty PciBridge being
690 * scanned becomes the passed in parent_bridge with the new bus number.
692 * The linked list becomes list of pciBridges with PciDevices attached.
694 * Return count of how many devices found(type1 + type 0 header devices)
697 vmd_scan_single_bus(struct vmd_pci_bus
*bus
, struct vmd_pci_device
*parent_bridge
)
699 /* assuming only single function devices are on the bus */
700 struct vmd_pci_device
*new_dev
;
701 struct vmd_adapter
*vmd
;
702 union express_slot_capabilities_register slot_cap
;
703 struct vmd_pci_bus
*new_bus
;
704 uint8_t device_number
, dev_cnt
= 0;
707 for (device_number
= 0; device_number
< 32; device_number
++) {
708 new_dev
= vmd_alloc_dev(bus
, device_number
);
709 if (new_dev
== NULL
) {
714 if (new_dev
->header
->common
.header_type
& PCI_HEADER_TYPE_BRIDGE
) {
715 slot_cap
.as_uint32_t
= 0;
716 if (new_dev
->pcie_cap
!= NULL
) {
717 slot_cap
.as_uint32_t
= new_dev
->pcie_cap
->slot_cap
.as_uint32_t
;
720 new_bus_num
= vmd_get_next_bus_number(bus
->vmd
->is_hotplug_scan
? new_dev
: NULL
, bus
->vmd
);
721 if (new_bus_num
== 0xff) {
725 new_bus
= vmd_create_new_bus(bus
, new_dev
, new_bus_num
);
730 new_bus
->primary_bus
= bus
->secondary_bus
;
731 new_bus
->self
= new_dev
;
732 new_dev
->bus_object
= new_bus
;
734 if (slot_cap
.bit_field
.hotplug_capable
) {
735 new_bus
->hotplug_buses
= vmd_get_hotplug_bus_numbers(new_dev
);
736 new_bus
->subordinate_bus
+= new_bus
->hotplug_buses
;
738 new_dev
->parent_bridge
= parent_bridge
;
739 new_dev
->header
->one
.primary
= new_bus
->primary_bus
;
740 new_dev
->header
->one
.secondary
= new_bus
->secondary_bus
;
741 new_dev
->header
->one
.subordinate
= new_bus
->subordinate_bus
;
743 vmd_bus_update_bridge_info(new_dev
);
744 vmd_add_bus_to_list(bus
->vmd
, new_bus
);
746 /* Attach hot plug instance if HP is supported */
747 if (slot_cap
.bit_field
.hotplug_capable
) {
748 new_dev
->hp
= vmd_new_hotplug(new_bus
, new_bus
->hotplug_buses
);
751 vmd_dev_init(new_dev
);
753 dev_cnt
+= vmd_scan_single_bus(new_bus
, new_dev
);
754 if (new_dev
->pcie_cap
!= NULL
) {
755 if (new_dev
->pcie_cap
->express_cap_register
.bit_field
.device_type
== SwitchUpstreamPort
) {
760 /* Attach the device to the current bus and assign base addresses */
761 vmd_bus_add_device(bus
, new_dev
);
762 g_end_device_count
++;
763 if (vmd_assign_base_addrs(new_dev
)) {
764 vmd_setup_msix(new_dev
, &bus
->vmd
->msix_table
[0]);
765 vmd_dev_init(new_dev
);
766 if (vmd_is_supported_device(new_dev
)) {
768 vmd
->target
[vmd
->nvme_count
] = new_dev
;
772 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "Removing failed device:%p\n", new_dev
);
773 vmd_pcibus_remove_device(bus
, new_dev
);
785 vmd_print_pci_info(struct vmd_pci_device
*dev
)
791 if (dev
->pcie_cap
!= NULL
) {
792 SPDK_INFOLOG(SPDK_LOG_VMD
, "PCI DEVICE: [%04X:%04X] type(%x) : %s\n",
793 dev
->header
->common
.vendor_id
, dev
->header
->common
.device_id
,
794 dev
->pcie_cap
->express_cap_register
.bit_field
.device_type
,
795 device_type
[dev
->pcie_cap
->express_cap_register
.bit_field
.device_type
]);
797 SPDK_INFOLOG(SPDK_LOG_VMD
, "PCI DEVICE: [%04X:%04X]\n",
798 dev
->header
->common
.vendor_id
, dev
->header
->common
.device_id
);
801 SPDK_INFOLOG(SPDK_LOG_VMD
, "\tDOMAIN:BDF: %04x:%02x:%02x:%x\n", dev
->pci
.addr
.domain
,
802 dev
->pci
.addr
.bus
, dev
->pci
.addr
.dev
, dev
->pci
.addr
.func
);
804 if (!(dev
->header_type
& PCI_HEADER_TYPE_BRIDGE
) && dev
->bus
) {
805 SPDK_INFOLOG(SPDK_LOG_VMD
, "\tbase addr: %x : %p\n",
806 dev
->header
->zero
.BAR
[0], (void *)dev
->bar
[0].vaddr
);
809 if ((dev
->header_type
& PCI_HEADER_TYPE_BRIDGE
)) {
810 SPDK_INFOLOG(SPDK_LOG_VMD
, "\tPrimary = %d, Secondary = %d, Subordinate = %d\n",
811 dev
->header
->one
.primary
, dev
->header
->one
.secondary
, dev
->header
->one
.subordinate
);
812 if (dev
->pcie_cap
&& dev
->pcie_cap
->express_cap_register
.bit_field
.slot_implemented
) {
813 SPDK_INFOLOG(SPDK_LOG_VMD
, "\tSlot implemented on this device.\n");
814 if (dev
->pcie_cap
->slot_cap
.bit_field
.hotplug_capable
) {
815 SPDK_INFOLOG(SPDK_LOG_VMD
, "Device has HOT-PLUG capable slot.\n");
820 if (dev
->sn_cap
!= NULL
) {
821 uint8_t *snLow
= (uint8_t *)&dev
->sn_cap
->sn_low
;
822 uint8_t *snHi
= (uint8_t *)&dev
->sn_cap
->sn_hi
;
824 SPDK_INFOLOG(SPDK_LOG_VMD
, "\tSN: %02x-%02x-%02x-%02x-%02x-%02x-%02x-%02x\n",
825 snHi
[3], snHi
[2], snHi
[1], snHi
[0], snLow
[3], snLow
[2], snLow
[1], snLow
[0]);
830 vmd_pci_print(struct vmd_pci_bus
*bus_list
)
832 struct vmd_pci_bus
*bus
= bus_list
;
833 struct vmd_pci_device
*dev
;
835 SPDK_INFOLOG(SPDK_LOG_VMD
, "\n ...PCIE devices attached to VMD %04x:%02x:%02x:%x...\n",
836 bus_list
->vmd
->pci
.addr
.domain
, bus_list
->vmd
->pci
.addr
.bus
,
837 bus_list
->vmd
->pci
.addr
.dev
, bus_list
->vmd
->pci
.addr
.func
);
838 SPDK_INFOLOG(SPDK_LOG_VMD
, "----------------------------------------------\n");
840 while (bus
!= NULL
) {
841 vmd_print_pci_info(bus
->self
);
843 while (dev
!= NULL
) {
844 vmd_print_pci_info(dev
);
852 vmd_scan_pcibus(struct vmd_pci_bus
*bus
)
856 g_end_device_count
= 0;
857 vmd_add_bus_to_list(bus
->vmd
, bus
);
858 bus
->vmd
->next_bus_number
= bus
->bus_number
+ 1;
859 dev_cnt
= vmd_scan_single_bus(bus
, NULL
);
861 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "\tVMD scan found %u devices\n", dev_cnt
);
862 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "\tVMD scan found %u END DEVICES\n", g_end_device_count
);
864 vmd_pci_print(bus
->vmd
->bus_list
);
871 vmd_map_bars(struct vmd_adapter
*vmd
, struct spdk_pci_device
*dev
)
873 int rc
= spdk_pci_device_map_bar(dev
, 0, (void **)&vmd
->cfg_vaddr
,
874 &vmd
->cfgbar
, &vmd
->cfgbar_size
);
876 rc
= spdk_pci_device_map_bar(dev
, 2, (void **)&vmd
->mem_vaddr
,
877 &vmd
->membar
, &vmd
->membar_size
);
881 rc
= spdk_pci_device_map_bar(dev
, 4, (void **)&vmd
->msix_vaddr
,
882 &vmd
->msixbar
, &vmd
->msixbar_size
);
886 vmd
->physical_addr
= vmd
->membar
;
887 vmd
->current_addr_size
= vmd
->membar_size
;
893 vmd_enumerate_devices(struct vmd_adapter
*vmd
)
895 vmd
->vmd_bus
.vmd
= vmd
;
896 vmd
->vmd_bus
.secondary_bus
= vmd
->vmd_bus
.subordinate_bus
= 0;
897 vmd
->vmd_bus
.primary_bus
= vmd
->vmd_bus
.bus_number
= 0;
898 vmd
->vmd_bus
.domain
= vmd
->pci
.addr
.domain
;
900 return vmd_scan_pcibus(&vmd
->vmd_bus
);
904 vmd_enum_cb(void *ctx
, struct spdk_pci_device
*pci_dev
)
906 uint32_t cmd_reg
= 0;
908 struct vmd_container
*vmd_c
= ctx
;
911 spdk_pci_device_cfg_read32(pci_dev
, &cmd_reg
, 4);
912 cmd_reg
|= 0x6; /* PCI bus master/memory enable. */
913 spdk_pci_device_cfg_write32(pci_dev
, cmd_reg
, 4);
915 spdk_pci_addr_fmt(bdf
, sizeof(bdf
), &pci_dev
->addr
);
916 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "Found a VMD[ %d ] at %s\n", vmd_c
->count
, bdf
);
920 vmd_c
->vmd
[i
].pci
= *pci_dev
;
921 vmd_c
->vmd
[i
].vmd_index
= i
;
922 vmd_c
->vmd
[i
].domain
=
923 (pci_dev
->addr
.bus
<< 16) | (pci_dev
->addr
.dev
<< 8) | pci_dev
->addr
.func
;
924 vmd_c
->vmd
[i
].max_pci_bus
= PCI_MAX_BUS_NUMBER
;
925 if (vmd_map_bars(&vmd_c
->vmd
[i
], pci_dev
) == -1) {
929 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "vmd config bar(%p) vaddr(%p) size(%x)\n",
930 (void *)vmd_c
->vmd
[i
].cfgbar
, (void *)vmd_c
->vmd
[i
].cfg_vaddr
,
931 (uint32_t)vmd_c
->vmd
[i
].cfgbar_size
);
932 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "vmd mem bar(%p) vaddr(%p) size(%x)\n",
933 (void *)vmd_c
->vmd
[i
].membar
, (void *)vmd_c
->vmd
[i
].mem_vaddr
,
934 (uint32_t)vmd_c
->vmd
[i
].membar_size
);
935 SPDK_DEBUGLOG(SPDK_LOG_VMD
, "vmd msix bar(%p) vaddr(%p) size(%x)\n\n",
936 (void *)vmd_c
->vmd
[i
].msixbar
, (void *)vmd_c
->vmd
[i
].msix_vaddr
,
937 (uint32_t)vmd_c
->vmd
[i
].msixbar_size
);
939 vmd_c
->count
= i
+ 1;
941 vmd_enumerate_devices(&vmd_c
->vmd
[i
]);
947 spdk_vmd_pci_device_list(struct spdk_pci_addr vmd_addr
, struct spdk_pci_device
*nvme_list
)
950 struct vmd_pci_bus
*bus
;
951 struct vmd_pci_device
*dev
;
957 for (int i
= 0; i
< MAX_VMD_TARGET
; ++i
) {
958 if (spdk_pci_addr_compare(&vmd_addr
, &g_vmd_container
.vmd
[i
].pci
.addr
) == 0) {
959 bus
= g_vmd_container
.vmd
[i
].bus_list
;
960 while (bus
!= NULL
) {
962 while (dev
!= NULL
) {
963 nvme_list
[cnt
++] = dev
->pci
;
964 if (!dev
->is_hooked
) {
981 return spdk_pci_enumerate(spdk_pci_vmd_get_driver(), vmd_enum_cb
, &g_vmd_container
);
984 SPDK_LOG_REGISTER_COMPONENT("vmd", SPDK_LOG_VMD
)