4 * Copyright(c) 2010-2014 Intel Corporation. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
11 * * Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * * Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
17 * * Neither the name of Intel Corporation nor the names of its
18 * contributors may be used to endorse or promote products derived
19 * from this software without specific prior written permission.
21 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
22 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
23 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
24 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
25 * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
26 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
27 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
28 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
29 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
30 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
31 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
35 #ifdef RTE_EXEC_ENV_LINUXAPP
42 #include "virtio_pci.h"
43 #include "virtio_logs.h"
44 #include "virtqueue.h"
47 * Following macros are derived from linux/pci_regs.h, however,
48 * we can't simply include that header here, as there is no such
49 * file for non-Linux platform.
51 #define PCI_CAPABILITY_LIST 0x34
52 #define PCI_CAP_ID_VNDR 0x09
53 #define PCI_CAP_ID_MSIX 0x11
56 * The remaining space is defined by each driver as the per-driver
57 * configuration space.
59 #define VIRTIO_PCI_CONFIG(hw) (((hw)->use_msix) ? 24 : 20)
62 check_vq_phys_addr_ok(struct virtqueue
*vq
)
64 /* Virtio PCI device VIRTIO_PCI_QUEUE_PF register is 32bit,
65 * and only accepts 32 bit page frame number.
66 * Check if the allocated physical memory exceeds 16TB.
68 if ((vq
->vq_ring_mem
+ vq
->vq_ring_size
- 1) >>
69 (VIRTIO_PCI_QUEUE_ADDR_SHIFT
+ 32)) {
70 PMD_INIT_LOG(ERR
, "vring address shouldn't be above 16TB!");
78 * Since we are in legacy mode:
79 * http://ozlabs.org/~rusty/virtio-spec/virtio-0.9.5.pdf
81 * "Note that this is possible because while the virtio header is PCI (i.e.
82 * little) endian, the device-specific region is encoded in the native endian of
83 * the guest (where such distinction is applicable)."
85 * For powerpc which supports both, qemu supposes that cpu is big endian and
86 * enforces this for the virtio-net stuff.
89 legacy_read_dev_config(struct virtio_hw
*hw
, size_t offset
,
90 void *dst
, int length
)
92 #ifdef RTE_ARCH_PPC_64
98 rte_pci_ioport_read(VTPCI_IO(hw
), dst
, size
,
99 VIRTIO_PCI_CONFIG(hw
) + offset
);
100 *(uint32_t *)dst
= rte_be_to_cpu_32(*(uint32_t *)dst
);
101 } else if (length
>= 2) {
103 rte_pci_ioport_read(VTPCI_IO(hw
), dst
, size
,
104 VIRTIO_PCI_CONFIG(hw
) + offset
);
105 *(uint16_t *)dst
= rte_be_to_cpu_16(*(uint16_t *)dst
);
108 rte_pci_ioport_read(VTPCI_IO(hw
), dst
, size
,
109 VIRTIO_PCI_CONFIG(hw
) + offset
);
112 dst
= (char *)dst
+ size
;
117 rte_pci_ioport_read(VTPCI_IO(hw
), dst
, length
,
118 VIRTIO_PCI_CONFIG(hw
) + offset
);
123 legacy_write_dev_config(struct virtio_hw
*hw
, size_t offset
,
124 const void *src
, int length
)
126 #ifdef RTE_ARCH_PPC_64
136 tmp
.u32
= rte_cpu_to_be_32(*(const uint32_t *)src
);
137 rte_pci_ioport_write(VTPCI_IO(hw
), &tmp
.u32
, size
,
138 VIRTIO_PCI_CONFIG(hw
) + offset
);
139 } else if (length
>= 2) {
141 tmp
.u16
= rte_cpu_to_be_16(*(const uint16_t *)src
);
142 rte_pci_ioport_write(VTPCI_IO(hw
), &tmp
.u16
, size
,
143 VIRTIO_PCI_CONFIG(hw
) + offset
);
146 rte_pci_ioport_write(VTPCI_IO(hw
), src
, size
,
147 VIRTIO_PCI_CONFIG(hw
) + offset
);
150 src
= (const char *)src
+ size
;
155 rte_pci_ioport_write(VTPCI_IO(hw
), src
, length
,
156 VIRTIO_PCI_CONFIG(hw
) + offset
);
161 legacy_get_features(struct virtio_hw
*hw
)
165 rte_pci_ioport_read(VTPCI_IO(hw
), &dst
, 4, VIRTIO_PCI_HOST_FEATURES
);
170 legacy_set_features(struct virtio_hw
*hw
, uint64_t features
)
172 if ((features
>> 32) != 0) {
174 "only 32 bit features are allowed for legacy virtio!");
177 rte_pci_ioport_write(VTPCI_IO(hw
), &features
, 4,
178 VIRTIO_PCI_GUEST_FEATURES
);
182 legacy_get_status(struct virtio_hw
*hw
)
186 rte_pci_ioport_read(VTPCI_IO(hw
), &dst
, 1, VIRTIO_PCI_STATUS
);
191 legacy_set_status(struct virtio_hw
*hw
, uint8_t status
)
193 rte_pci_ioport_write(VTPCI_IO(hw
), &status
, 1, VIRTIO_PCI_STATUS
);
197 legacy_reset(struct virtio_hw
*hw
)
199 legacy_set_status(hw
, VIRTIO_CONFIG_STATUS_RESET
);
203 legacy_get_isr(struct virtio_hw
*hw
)
207 rte_pci_ioport_read(VTPCI_IO(hw
), &dst
, 1, VIRTIO_PCI_ISR
);
211 /* Enable one vector (0) for Link State Intrerrupt */
213 legacy_set_config_irq(struct virtio_hw
*hw
, uint16_t vec
)
217 rte_pci_ioport_write(VTPCI_IO(hw
), &vec
, 2, VIRTIO_MSI_CONFIG_VECTOR
);
218 rte_pci_ioport_read(VTPCI_IO(hw
), &dst
, 2, VIRTIO_MSI_CONFIG_VECTOR
);
223 legacy_set_queue_irq(struct virtio_hw
*hw
, struct virtqueue
*vq
, uint16_t vec
)
227 rte_pci_ioport_write(VTPCI_IO(hw
), &vq
->vq_queue_index
, 2,
228 VIRTIO_PCI_QUEUE_SEL
);
229 rte_pci_ioport_write(VTPCI_IO(hw
), &vec
, 2, VIRTIO_MSI_QUEUE_VECTOR
);
230 rte_pci_ioport_read(VTPCI_IO(hw
), &dst
, 2, VIRTIO_MSI_QUEUE_VECTOR
);
235 legacy_get_queue_num(struct virtio_hw
*hw
, uint16_t queue_id
)
239 rte_pci_ioport_write(VTPCI_IO(hw
), &queue_id
, 2, VIRTIO_PCI_QUEUE_SEL
);
240 rte_pci_ioport_read(VTPCI_IO(hw
), &dst
, 2, VIRTIO_PCI_QUEUE_NUM
);
245 legacy_setup_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
249 if (!check_vq_phys_addr_ok(vq
))
252 rte_pci_ioport_write(VTPCI_IO(hw
), &vq
->vq_queue_index
, 2,
253 VIRTIO_PCI_QUEUE_SEL
);
254 src
= vq
->vq_ring_mem
>> VIRTIO_PCI_QUEUE_ADDR_SHIFT
;
255 rte_pci_ioport_write(VTPCI_IO(hw
), &src
, 4, VIRTIO_PCI_QUEUE_PFN
);
261 legacy_del_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
265 rte_pci_ioport_write(VTPCI_IO(hw
), &vq
->vq_queue_index
, 2,
266 VIRTIO_PCI_QUEUE_SEL
);
267 rte_pci_ioport_write(VTPCI_IO(hw
), &src
, 4, VIRTIO_PCI_QUEUE_PFN
);
271 legacy_notify_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
273 rte_pci_ioport_write(VTPCI_IO(hw
), &vq
->vq_queue_index
, 2,
274 VIRTIO_PCI_QUEUE_NOTIFY
);
277 const struct virtio_pci_ops legacy_ops
= {
278 .read_dev_cfg
= legacy_read_dev_config
,
279 .write_dev_cfg
= legacy_write_dev_config
,
280 .reset
= legacy_reset
,
281 .get_status
= legacy_get_status
,
282 .set_status
= legacy_set_status
,
283 .get_features
= legacy_get_features
,
284 .set_features
= legacy_set_features
,
285 .get_isr
= legacy_get_isr
,
286 .set_config_irq
= legacy_set_config_irq
,
287 .set_queue_irq
= legacy_set_queue_irq
,
288 .get_queue_num
= legacy_get_queue_num
,
289 .setup_queue
= legacy_setup_queue
,
290 .del_queue
= legacy_del_queue
,
291 .notify_queue
= legacy_notify_queue
,
295 io_write64_twopart(uint64_t val
, uint32_t *lo
, uint32_t *hi
)
297 rte_write32(val
& ((1ULL << 32) - 1), lo
);
298 rte_write32(val
>> 32, hi
);
302 modern_read_dev_config(struct virtio_hw
*hw
, size_t offset
,
303 void *dst
, int length
)
307 uint8_t old_gen
, new_gen
;
310 old_gen
= rte_read8(&hw
->common_cfg
->config_generation
);
313 for (i
= 0; i
< length
; i
++)
314 *p
++ = rte_read8((uint8_t *)hw
->dev_cfg
+ offset
+ i
);
316 new_gen
= rte_read8(&hw
->common_cfg
->config_generation
);
317 } while (old_gen
!= new_gen
);
321 modern_write_dev_config(struct virtio_hw
*hw
, size_t offset
,
322 const void *src
, int length
)
325 const uint8_t *p
= src
;
327 for (i
= 0; i
< length
; i
++)
328 rte_write8((*p
++), (((uint8_t *)hw
->dev_cfg
) + offset
+ i
));
332 modern_get_features(struct virtio_hw
*hw
)
334 uint32_t features_lo
, features_hi
;
336 rte_write32(0, &hw
->common_cfg
->device_feature_select
);
337 features_lo
= rte_read32(&hw
->common_cfg
->device_feature
);
339 rte_write32(1, &hw
->common_cfg
->device_feature_select
);
340 features_hi
= rte_read32(&hw
->common_cfg
->device_feature
);
342 return ((uint64_t)features_hi
<< 32) | features_lo
;
346 modern_set_features(struct virtio_hw
*hw
, uint64_t features
)
348 rte_write32(0, &hw
->common_cfg
->guest_feature_select
);
349 rte_write32(features
& ((1ULL << 32) - 1),
350 &hw
->common_cfg
->guest_feature
);
352 rte_write32(1, &hw
->common_cfg
->guest_feature_select
);
353 rte_write32(features
>> 32,
354 &hw
->common_cfg
->guest_feature
);
358 modern_get_status(struct virtio_hw
*hw
)
360 return rte_read8(&hw
->common_cfg
->device_status
);
364 modern_set_status(struct virtio_hw
*hw
, uint8_t status
)
366 rte_write8(status
, &hw
->common_cfg
->device_status
);
370 modern_reset(struct virtio_hw
*hw
)
372 modern_set_status(hw
, VIRTIO_CONFIG_STATUS_RESET
);
373 modern_get_status(hw
);
377 modern_get_isr(struct virtio_hw
*hw
)
379 return rte_read8(hw
->isr
);
383 modern_set_config_irq(struct virtio_hw
*hw
, uint16_t vec
)
385 rte_write16(vec
, &hw
->common_cfg
->msix_config
);
386 return rte_read16(&hw
->common_cfg
->msix_config
);
390 modern_set_queue_irq(struct virtio_hw
*hw
, struct virtqueue
*vq
, uint16_t vec
)
392 rte_write16(vq
->vq_queue_index
, &hw
->common_cfg
->queue_select
);
393 rte_write16(vec
, &hw
->common_cfg
->queue_msix_vector
);
394 return rte_read16(&hw
->common_cfg
->queue_msix_vector
);
398 modern_get_queue_num(struct virtio_hw
*hw
, uint16_t queue_id
)
400 rte_write16(queue_id
, &hw
->common_cfg
->queue_select
);
401 return rte_read16(&hw
->common_cfg
->queue_size
);
405 modern_setup_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
407 uint64_t desc_addr
, avail_addr
, used_addr
;
410 if (!check_vq_phys_addr_ok(vq
))
413 desc_addr
= vq
->vq_ring_mem
;
414 avail_addr
= desc_addr
+ vq
->vq_nentries
* sizeof(struct vring_desc
);
415 used_addr
= RTE_ALIGN_CEIL(avail_addr
+ offsetof(struct vring_avail
,
416 ring
[vq
->vq_nentries
]),
417 VIRTIO_PCI_VRING_ALIGN
);
419 rte_write16(vq
->vq_queue_index
, &hw
->common_cfg
->queue_select
);
421 io_write64_twopart(desc_addr
, &hw
->common_cfg
->queue_desc_lo
,
422 &hw
->common_cfg
->queue_desc_hi
);
423 io_write64_twopart(avail_addr
, &hw
->common_cfg
->queue_avail_lo
,
424 &hw
->common_cfg
->queue_avail_hi
);
425 io_write64_twopart(used_addr
, &hw
->common_cfg
->queue_used_lo
,
426 &hw
->common_cfg
->queue_used_hi
);
428 notify_off
= rte_read16(&hw
->common_cfg
->queue_notify_off
);
429 vq
->notify_addr
= (void *)((uint8_t *)hw
->notify_base
+
430 notify_off
* hw
->notify_off_multiplier
);
432 rte_write16(1, &hw
->common_cfg
->queue_enable
);
434 PMD_INIT_LOG(DEBUG
, "queue %u addresses:", vq
->vq_queue_index
);
435 PMD_INIT_LOG(DEBUG
, "\t desc_addr: %" PRIx64
, desc_addr
);
436 PMD_INIT_LOG(DEBUG
, "\t aval_addr: %" PRIx64
, avail_addr
);
437 PMD_INIT_LOG(DEBUG
, "\t used_addr: %" PRIx64
, used_addr
);
438 PMD_INIT_LOG(DEBUG
, "\t notify addr: %p (notify offset: %u)",
439 vq
->notify_addr
, notify_off
);
445 modern_del_queue(struct virtio_hw
*hw
, struct virtqueue
*vq
)
447 rte_write16(vq
->vq_queue_index
, &hw
->common_cfg
->queue_select
);
449 io_write64_twopart(0, &hw
->common_cfg
->queue_desc_lo
,
450 &hw
->common_cfg
->queue_desc_hi
);
451 io_write64_twopart(0, &hw
->common_cfg
->queue_avail_lo
,
452 &hw
->common_cfg
->queue_avail_hi
);
453 io_write64_twopart(0, &hw
->common_cfg
->queue_used_lo
,
454 &hw
->common_cfg
->queue_used_hi
);
456 rte_write16(0, &hw
->common_cfg
->queue_enable
);
460 modern_notify_queue(struct virtio_hw
*hw __rte_unused
, struct virtqueue
*vq
)
462 rte_write16(vq
->vq_queue_index
, vq
->notify_addr
);
465 const struct virtio_pci_ops modern_ops
= {
466 .read_dev_cfg
= modern_read_dev_config
,
467 .write_dev_cfg
= modern_write_dev_config
,
468 .reset
= modern_reset
,
469 .get_status
= modern_get_status
,
470 .set_status
= modern_set_status
,
471 .get_features
= modern_get_features
,
472 .set_features
= modern_set_features
,
473 .get_isr
= modern_get_isr
,
474 .set_config_irq
= modern_set_config_irq
,
475 .set_queue_irq
= modern_set_queue_irq
,
476 .get_queue_num
= modern_get_queue_num
,
477 .setup_queue
= modern_setup_queue
,
478 .del_queue
= modern_del_queue
,
479 .notify_queue
= modern_notify_queue
,
484 vtpci_read_dev_config(struct virtio_hw
*hw
, size_t offset
,
485 void *dst
, int length
)
487 VTPCI_OPS(hw
)->read_dev_cfg(hw
, offset
, dst
, length
);
491 vtpci_write_dev_config(struct virtio_hw
*hw
, size_t offset
,
492 const void *src
, int length
)
494 VTPCI_OPS(hw
)->write_dev_cfg(hw
, offset
, src
, length
);
498 vtpci_negotiate_features(struct virtio_hw
*hw
, uint64_t host_features
)
503 * Limit negotiated features to what the driver, virtqueue, and
506 features
= host_features
& hw
->guest_features
;
507 VTPCI_OPS(hw
)->set_features(hw
, features
);
513 vtpci_reset(struct virtio_hw
*hw
)
515 VTPCI_OPS(hw
)->set_status(hw
, VIRTIO_CONFIG_STATUS_RESET
);
516 /* flush status write */
517 VTPCI_OPS(hw
)->get_status(hw
);
521 vtpci_reinit_complete(struct virtio_hw
*hw
)
523 vtpci_set_status(hw
, VIRTIO_CONFIG_STATUS_DRIVER_OK
);
527 vtpci_set_status(struct virtio_hw
*hw
, uint8_t status
)
529 if (status
!= VIRTIO_CONFIG_STATUS_RESET
)
530 status
|= VTPCI_OPS(hw
)->get_status(hw
);
532 VTPCI_OPS(hw
)->set_status(hw
, status
);
536 vtpci_get_status(struct virtio_hw
*hw
)
538 return VTPCI_OPS(hw
)->get_status(hw
);
542 vtpci_isr(struct virtio_hw
*hw
)
544 return VTPCI_OPS(hw
)->get_isr(hw
);
548 get_cfg_addr(struct rte_pci_device
*dev
, struct virtio_pci_cap
*cap
)
550 uint8_t bar
= cap
->bar
;
551 uint32_t length
= cap
->length
;
552 uint32_t offset
= cap
->offset
;
556 PMD_INIT_LOG(ERR
, "invalid bar: %u", bar
);
560 if (offset
+ length
< offset
) {
561 PMD_INIT_LOG(ERR
, "offset(%u) + length(%u) overflows",
566 if (offset
+ length
> dev
->mem_resource
[bar
].len
) {
568 "invalid cap: overflows bar space: %u > %" PRIu64
,
569 offset
+ length
, dev
->mem_resource
[bar
].len
);
573 base
= dev
->mem_resource
[bar
].addr
;
575 PMD_INIT_LOG(ERR
, "bar %u base addr is NULL", bar
);
579 return base
+ offset
;
583 virtio_read_caps(struct rte_pci_device
*dev
, struct virtio_hw
*hw
)
586 struct virtio_pci_cap cap
;
589 if (rte_pci_map_device(dev
)) {
590 PMD_INIT_LOG(DEBUG
, "failed to map pci device!");
594 ret
= rte_pci_read_config(dev
, &pos
, 1, PCI_CAPABILITY_LIST
);
596 PMD_INIT_LOG(DEBUG
, "failed to read pci capability list");
601 ret
= rte_pci_read_config(dev
, &cap
, sizeof(cap
), pos
);
604 "failed to read pci cap at pos: %x", pos
);
608 if (cap
.cap_vndr
== PCI_CAP_ID_MSIX
)
611 if (cap
.cap_vndr
!= PCI_CAP_ID_VNDR
) {
613 "[%2x] skipping non VNDR cap id: %02x",
619 "[%2x] cfg type: %u, bar: %u, offset: %04x, len: %u",
620 pos
, cap
.cfg_type
, cap
.bar
, cap
.offset
, cap
.length
);
622 switch (cap
.cfg_type
) {
623 case VIRTIO_PCI_CAP_COMMON_CFG
:
624 hw
->common_cfg
= get_cfg_addr(dev
, &cap
);
626 case VIRTIO_PCI_CAP_NOTIFY_CFG
:
627 rte_pci_read_config(dev
, &hw
->notify_off_multiplier
,
628 4, pos
+ sizeof(cap
));
629 hw
->notify_base
= get_cfg_addr(dev
, &cap
);
631 case VIRTIO_PCI_CAP_DEVICE_CFG
:
632 hw
->dev_cfg
= get_cfg_addr(dev
, &cap
);
634 case VIRTIO_PCI_CAP_ISR_CFG
:
635 hw
->isr
= get_cfg_addr(dev
, &cap
);
643 if (hw
->common_cfg
== NULL
|| hw
->notify_base
== NULL
||
644 hw
->dev_cfg
== NULL
|| hw
->isr
== NULL
) {
645 PMD_INIT_LOG(INFO
, "no modern virtio pci device found.");
649 PMD_INIT_LOG(INFO
, "found modern virtio pci device.");
651 PMD_INIT_LOG(DEBUG
, "common cfg mapped at: %p", hw
->common_cfg
);
652 PMD_INIT_LOG(DEBUG
, "device cfg mapped at: %p", hw
->dev_cfg
);
653 PMD_INIT_LOG(DEBUG
, "isr cfg mapped at: %p", hw
->isr
);
654 PMD_INIT_LOG(DEBUG
, "notify base: %p, notify off multiplier: %u",
655 hw
->notify_base
, hw
->notify_off_multiplier
);
662 * if there is error mapping with VFIO/UIO.
663 * if port map error when driver type is KDRV_NONE.
664 * if whitelisted but driver type is KDRV_UNKNOWN.
665 * Return 1 if kernel driver is managing the device.
666 * Return 0 on success.
669 vtpci_init(struct rte_pci_device
*dev
, struct virtio_hw
*hw
)
672 * Try if we can succeed reading virtio pci caps, which exists
673 * only on modern pci device. If failed, we fallback to legacy
676 if (virtio_read_caps(dev
, hw
) == 0) {
677 PMD_INIT_LOG(INFO
, "modern virtio pci detected.");
678 virtio_hw_internal
[hw
->port_id
].vtpci_ops
= &modern_ops
;
683 PMD_INIT_LOG(INFO
, "trying with legacy virtio pci.");
684 if (rte_pci_ioport_map(dev
, 0, VTPCI_IO(hw
)) < 0) {
685 if (dev
->kdrv
== RTE_KDRV_UNKNOWN
&&
686 (!dev
->device
.devargs
||
687 dev
->device
.devargs
->type
!=
688 RTE_DEVTYPE_WHITELISTED_PCI
)) {
690 "skip kernel managed virtio device.");
696 virtio_hw_internal
[hw
->port_id
].vtpci_ops
= &legacy_ops
;