F: docs/system/s390x/vfio-ap.rst
L: qemu-s390x@nongnu.org
+iommufd
+M: Yi Liu <yi.l.liu@intel.com>
+M: Eric Auger <eric.auger@redhat.com>
+M: Zhenzhong Duan <zhenzhong.duan@intel.com>
+S: Supported
+F: backends/iommufd.c
+F: include/sysemu/iommufd.h
+F: include/qemu/chardev_open.h
+F: util/chardev_open.c
+F: docs/devel/vfio-iommufd.rst
+
vhost
M: Michael S. Tsirkin <mst@redhat.com>
S: Supported
bool kvm_readonly_mem_allowed;
bool kvm_vm_attributes_allowed;
bool kvm_msi_use_devid;
-bool kvm_has_guest_debug;
+static bool kvm_has_guest_debug;
static int kvm_sstep_flags;
static bool kvm_immediate_exit;
static hwaddr kvm_max_slot_size = ~0;
source tpm/Kconfig
+
+config IOMMUFD
+ bool
+ depends on VFIO
--- /dev/null
+/*
+ * iommufd container backend
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ * Eric Auger <eric.auger@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "sysemu/iommufd.h"
+#include "qapi/error.h"
+#include "qapi/qmp/qerror.h"
+#include "qemu/module.h"
+#include "qom/object_interfaces.h"
+#include "qemu/error-report.h"
+#include "monitor/monitor.h"
+#include "trace.h"
+#include <sys/ioctl.h>
+#include <linux/iommufd.h>
+
+static void iommufd_backend_init(Object *obj)
+{
+ IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
+
+ be->fd = -1;
+ be->users = 0;
+ be->owned = true;
+ qemu_mutex_init(&be->lock);
+}
+
+static void iommufd_backend_finalize(Object *obj)
+{
+ IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
+
+ if (be->owned) {
+ close(be->fd);
+ be->fd = -1;
+ }
+}
+
+static void iommufd_backend_set_fd(Object *obj, const char *str, Error **errp)
+{
+ IOMMUFDBackend *be = IOMMUFD_BACKEND(obj);
+ int fd = -1;
+
+ fd = monitor_fd_param(monitor_cur(), str, errp);
+ if (fd == -1) {
+ error_prepend(errp, "Could not parse remote object fd %s:", str);
+ return;
+ }
+ qemu_mutex_lock(&be->lock);
+ be->fd = fd;
+ be->owned = false;
+ qemu_mutex_unlock(&be->lock);
+ trace_iommu_backend_set_fd(be->fd);
+}
+
+static bool iommufd_backend_can_be_deleted(UserCreatable *uc)
+{
+ IOMMUFDBackend *be = IOMMUFD_BACKEND(uc);
+
+ return !be->users;
+}
+
+static void iommufd_backend_class_init(ObjectClass *oc, void *data)
+{
+ UserCreatableClass *ucc = USER_CREATABLE_CLASS(oc);
+
+ ucc->can_be_deleted = iommufd_backend_can_be_deleted;
+
+ object_class_property_add_str(oc, "fd", NULL, iommufd_backend_set_fd);
+}
+
+int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp)
+{
+ int fd, ret = 0;
+
+ qemu_mutex_lock(&be->lock);
+ if (be->users == UINT32_MAX) {
+ error_setg(errp, "too many connections");
+ ret = -E2BIG;
+ goto out;
+ }
+ if (be->owned && !be->users) {
+ fd = qemu_open_old("/dev/iommu", O_RDWR);
+ if (fd < 0) {
+ error_setg_errno(errp, errno, "/dev/iommu opening failed");
+ ret = fd;
+ goto out;
+ }
+ be->fd = fd;
+ }
+ be->users++;
+out:
+ trace_iommufd_backend_connect(be->fd, be->owned,
+ be->users, ret);
+ qemu_mutex_unlock(&be->lock);
+ return ret;
+}
+
+void iommufd_backend_disconnect(IOMMUFDBackend *be)
+{
+ qemu_mutex_lock(&be->lock);
+ if (!be->users) {
+ goto out;
+ }
+ be->users--;
+ if (!be->users && be->owned) {
+ close(be->fd);
+ be->fd = -1;
+ }
+out:
+ trace_iommufd_backend_disconnect(be->fd, be->users);
+ qemu_mutex_unlock(&be->lock);
+}
+
+int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
+ Error **errp)
+{
+ int ret, fd = be->fd;
+ struct iommu_ioas_alloc alloc_data = {
+ .size = sizeof(alloc_data),
+ .flags = 0,
+ };
+
+ ret = ioctl(fd, IOMMU_IOAS_ALLOC, &alloc_data);
+ if (ret) {
+ error_setg_errno(errp, errno, "Failed to allocate ioas");
+ return ret;
+ }
+
+ *ioas_id = alloc_data.out_ioas_id;
+ trace_iommufd_backend_alloc_ioas(fd, *ioas_id, ret);
+
+ return ret;
+}
+
+void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id)
+{
+ int ret, fd = be->fd;
+ struct iommu_destroy des = {
+ .size = sizeof(des),
+ .id = id,
+ };
+
+ ret = ioctl(fd, IOMMU_DESTROY, &des);
+ trace_iommufd_backend_free_id(fd, id, ret);
+ if (ret) {
+ error_report("Failed to free id: %u %m", id);
+ }
+}
+
+int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly)
+{
+ int ret, fd = be->fd;
+ struct iommu_ioas_map map = {
+ .size = sizeof(map),
+ .flags = IOMMU_IOAS_MAP_READABLE |
+ IOMMU_IOAS_MAP_FIXED_IOVA,
+ .ioas_id = ioas_id,
+ .__reserved = 0,
+ .user_va = (uintptr_t)vaddr,
+ .iova = iova,
+ .length = size,
+ };
+
+ if (!readonly) {
+ map.flags |= IOMMU_IOAS_MAP_WRITEABLE;
+ }
+
+ ret = ioctl(fd, IOMMU_IOAS_MAP, &map);
+ trace_iommufd_backend_map_dma(fd, ioas_id, iova, size,
+ vaddr, readonly, ret);
+ if (ret) {
+ ret = -errno;
+
+ /* TODO: Not support mapping hardware PCI BAR region for now. */
+ if (errno == EFAULT) {
+ warn_report("IOMMU_IOAS_MAP failed: %m, PCI BAR?");
+ } else {
+ error_report("IOMMU_IOAS_MAP failed: %m");
+ }
+ }
+ return ret;
+}
+
+int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
+ hwaddr iova, ram_addr_t size)
+{
+ int ret, fd = be->fd;
+ struct iommu_ioas_unmap unmap = {
+ .size = sizeof(unmap),
+ .ioas_id = ioas_id,
+ .iova = iova,
+ .length = size,
+ };
+
+ ret = ioctl(fd, IOMMU_IOAS_UNMAP, &unmap);
+ /*
+ * IOMMUFD takes mapping as some kind of object, unmapping
+ * nonexistent mapping is treated as deleting a nonexistent
+ * object and return ENOENT. This is different from legacy
+ * backend which allows it. vIOMMU may trigger a lot of
+ * redundant unmapping, to avoid flush the log, treat them
+ * as succeess for IOMMUFD just like legacy backend.
+ */
+ if (ret && errno == ENOENT) {
+ trace_iommufd_backend_unmap_dma_non_exist(fd, ioas_id, iova, size, ret);
+ ret = 0;
+ } else {
+ trace_iommufd_backend_unmap_dma(fd, ioas_id, iova, size, ret);
+ }
+
+ if (ret) {
+ ret = -errno;
+ error_report("IOMMU_IOAS_UNMAP failed: %m");
+ }
+ return ret;
+}
+
+static const TypeInfo iommufd_backend_info = {
+ .name = TYPE_IOMMUFD_BACKEND,
+ .parent = TYPE_OBJECT,
+ .instance_size = sizeof(IOMMUFDBackend),
+ .instance_init = iommufd_backend_init,
+ .instance_finalize = iommufd_backend_finalize,
+ .class_size = sizeof(IOMMUFDBackendClass),
+ .class_init = iommufd_backend_class_init,
+ .interfaces = (InterfaceInfo[]) {
+ { TYPE_USER_CREATABLE },
+ { }
+ }
+};
+
+static void register_types(void)
+{
+ type_register_static(&iommufd_backend_info);
+}
+
+type_init(register_types);
system_ss.add(when: 'CONFIG_VIRTIO', if_true: files('vhost-user.c'))
endif
system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost.c'))
+system_ss.add(when: 'CONFIG_IOMMUFD', if_true: files('iommufd.c'))
if have_vhost_user_crypto
system_ss.add(when: 'CONFIG_VIRTIO_CRYPTO', if_true: files('cryptodev-vhost-user.c'))
endif
dbus_vmstate_post_load(int version_id) "version_id: %d"
dbus_vmstate_loading(const char *id) "id: %s"
dbus_vmstate_saving(const char *id) "id: %s"
+
+# iommufd.c
+iommufd_backend_connect(int fd, bool owned, uint32_t users, int ret) "fd=%d owned=%d users=%d (%d)"
+iommufd_backend_disconnect(int fd, uint32_t users) "fd=%d users=%d"
+iommu_backend_set_fd(int fd) "pre-opened /dev/iommu fd=%d"
+iommufd_backend_map_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, void *vaddr, bool readonly, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" addr=%p readonly=%d (%d)"
+iommufd_backend_unmap_dma_non_exist(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " Unmap nonexistent mapping: iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
+iommufd_backend_unmap_dma(int iommufd, uint32_t ioas, uint64_t iova, uint64_t size, int ret) " iommufd=%d ioas=%d iova=0x%"PRIx64" size=0x%"PRIx64" (%d)"
+iommufd_backend_alloc_ioas(int iommufd, uint32_t ioas, int ret) " iommufd=%d ioas=%d (%d)"
+iommufd_backend_free_id(int iommufd, uint32_t id, int ret) " iommufd=%d id=%d (%d)"
s390-dasd-ipl
tracing
vfio-migration
+ vfio-iommufd
writing-monitor-commands
virtio-backends
--- /dev/null
+===============================
+IOMMUFD BACKEND usage with VFIO
+===============================
+
+(Same meaning for backend/container/BE)
+
+With the introduction of iommufd, the Linux kernel provides a generic
+interface for user space drivers to propagate their DMA mappings to kernel
+for assigned devices. While the legacy kernel interface is group-centric,
+the new iommufd interface is device-centric, relying on device fd and iommufd.
+
+To support both interfaces in the QEMU VFIO device, introduce a base container
+to abstract the common part of VFIO legacy and iommufd container. So that the
+generic VFIO code can use either container.
+
+The base container implements generic functions such as memory_listener and
+address space management whereas the derived container implements callbacks
+specific to either legacy or iommufd. Each container has its own way to setup
+secure context and dma management interface. The below diagram shows how it
+looks like with both containers.
+
+::
+
+ VFIO AddressSpace/Memory
+ +-------+ +----------+ +-----+ +-----+
+ | pci | | platform | | ap | | ccw |
+ +---+---+ +----+-----+ +--+--+ +--+--+ +----------------------+
+ | | | | | AddressSpace |
+ | | | | +------------+---------+
+ +---V-----------V-----------V--------V----+ /
+ | VFIOAddressSpace | <------------+
+ | | | MemoryListener
+ | VFIOContainerBase list |
+ +-------+----------------------------+----+
+ | |
+ | |
+ +-------V------+ +--------V----------+
+ | iommufd | | vfio legacy |
+ | container | | container |
+ +-------+------+ +--------+----------+
+ | |
+ | /dev/iommu | /dev/vfio/vfio
+ | /dev/vfio/devices/vfioX | /dev/vfio/$group_id
+ Userspace | |
+ ============+============================+===========================
+ Kernel | device fd |
+ +---------------+ | group/container fd
+ | (BIND_IOMMUFD | | (SET_CONTAINER/SET_IOMMU)
+ | ATTACH_IOAS) | | device fd
+ | | |
+ | +-------V------------V-----------------+
+ iommufd | | vfio |
+ (map/unmap | +---------+--------------------+-------+
+ ioas_copy) | | | map/unmap
+ | | |
+ +------V------+ +-----V------+ +------V--------+
+ | iommfd core | | device | | vfio iommu |
+ +-------------+ +------------+ +---------------+
+
+* Secure Context setup
+
+ - iommufd BE: uses device fd and iommufd to setup secure context
+ (bind_iommufd, attach_ioas)
+ - vfio legacy BE: uses group fd and container fd to setup secure context
+ (set_container, set_iommu)
+
+* Device access
+
+ - iommufd BE: device fd is opened through ``/dev/vfio/devices/vfioX``
+ - vfio legacy BE: device fd is retrieved from group fd ioctl
+
+* DMA Mapping flow
+
+ 1. VFIOAddressSpace receives MemoryRegion add/del via MemoryListener
+ 2. VFIO populates DMA map/unmap via the container BEs
+ * iommufd BE: uses iommufd
+ * vfio legacy BE: uses container fd
+
+Example configuration
+=====================
+
+Step 1: configure the host device
+---------------------------------
+
+It's exactly same as the VFIO device with legacy VFIO container.
+
+Step 2: configure QEMU
+----------------------
+
+Interactions with the ``/dev/iommu`` are abstracted by a new iommufd
+object (compiled in with the ``CONFIG_IOMMUFD`` option).
+
+Any QEMU device (e.g. VFIO device) wishing to use ``/dev/iommu`` must
+be linked with an iommufd object. It gets a new optional property
+named iommufd which allows to pass an iommufd object. Take ``vfio-pci``
+device for example:
+
+.. code-block:: bash
+
+ -object iommufd,id=iommufd0
+ -device vfio-pci,host=0000:02:00.0,iommufd=iommufd0
+
+Note the ``/dev/iommu`` and VFIO cdev can be externally opened by a
+management layer. In such a case the fd is passed, the fd supports a
+string naming the fd or a number, for example:
+
+.. code-block:: bash
+
+ -object iommufd,id=iommufd0,fd=22
+ -device vfio-pci,iommufd=iommufd0,fd=23
+
+If the ``fd`` property is not passed, the fd is opened by QEMU.
+
+If no ``iommufd`` object is passed to the ``vfio-pci`` device, iommufd
+is not used and the user gets the behavior based on the legacy VFIO
+container:
+
+.. code-block:: bash
+
+ -device vfio-pci,host=0000:02:00.0
+
+Supported platform
+==================
+
+Supports x86, ARM and s390x currently.
+
+Caveats
+=======
+
+Dirty page sync
+---------------
+
+Dirty page sync with iommufd backend is unsupported yet, live migration is
+disabled by default. But it can be force enabled like below, low efficient
+though.
+
+.. code-block:: bash
+
+ -object iommufd,id=iommufd0
+ -device vfio-pci,host=0000:02:00.0,iommufd=iommufd0,enable-migration=on
+
+P2P DMA
+-------
+
+PCI p2p DMA is unsupported as IOMMUFD doesn't support mapping hardware PCI
+BAR region yet. Below warning shows for assigned PCI device, it's not a bug.
+
+.. code-block:: none
+
+ qemu-system-x86_64: warning: IOMMU_IOAS_MAP failed: Bad address, PCI BAR?
+ qemu-system-x86_64: vfio_container_dma_map(0x560cb6cb1620, 0xe000000021000, 0x3000, 0x7f32ed55c000) = -14 (Bad address)
+
+FD passing with mdev
+--------------------
+
+``vfio-pci`` device checks sysfsdev property to decide if backend is a mdev.
+If FD passing is used, there is no way to know that and the mdev is treated
+like a real PCI device. There is an error as below if user wants to enable
+RAM discarding for mdev.
+
+.. code-block:: none
+
+ qemu-system-x86_64: -device vfio-pci,iommufd=iommufd0,x-balloon-allowed=on,fd=9: vfio VFIO_FD9: x-balloon-allowed only potentially compatible with mdev devices
+
+``vfio-ap`` and ``vfio-ccw`` devices don't have same issue as their backend
+devices are always mdev and RAM discarding is force enabled.
imply TPM_TIS_SYSBUS
imply TPM_TIS_I2C
imply NVDIMM
+ imply IOMMUFD
select ARM_GIC
select ACPI
select ARM_SMMUV3
if (pmu) {
assert(arm_feature(&ARM_CPU(cpu)->env, ARM_FEATURE_PMU));
if (kvm_irqchip_in_kernel()) {
- kvm_arm_pmu_set_irq(cpu, VIRTUAL_PMU_IRQ);
+ kvm_arm_pmu_set_irq(ARM_CPU(cpu), VIRTUAL_PMU_IRQ);
}
- kvm_arm_pmu_init(cpu);
+ kvm_arm_pmu_init(ARM_CPU(cpu));
}
if (steal_time) {
- kvm_arm_pvtime_init(cpu, pvtime_reg_base +
- cpu->cpu_index * PVTIME_SIZE_PER_CPU);
+ kvm_arm_pvtime_init(ARM_CPU(cpu), pvtime_reg_base
+ + cpu->cpu_index
+ * PVTIME_SIZE_PER_CPU);
}
}
} else {
imply E1000E_PCI_EXPRESS
imply VMPORT
imply VMMOUSE
+ imply IOMMUFD
select PC_PCI
select PC_ACPI
select PCI_EXPRESS_Q35
#include "qemu/osdep.h"
#include "qapi/error.h"
#include "qemu/module.h"
+#include "qemu/error-report.h"
#include "hw/intc/arm_gicv3_its_common.h"
#include "hw/qdev-properties.h"
#include "sysemu/runstate.h"
*/
#include "qemu/osdep.h"
+#include "qemu/bitops.h"
+#include "qemu/timer.h"
+#include "migration/vmstate.h"
#include "hw/misc/imx7_snvs.h"
+#include "qemu/cutils.h"
#include "qemu/module.h"
+#include "sysemu/sysemu.h"
+#include "sysemu/rtc.h"
#include "sysemu/runstate.h"
#include "trace.h"
+#define RTC_FREQ 32768ULL
+
+static const VMStateDescription vmstate_imx7_snvs = {
+ .name = TYPE_IMX7_SNVS,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(tick_offset, IMX7SNVSState),
+ VMSTATE_UINT64(lpcr, IMX7SNVSState),
+ VMSTATE_END_OF_LIST()
+ }
+};
+
+static uint64_t imx7_snvs_get_count(IMX7SNVSState *s)
+{
+ uint64_t ticks = muldiv64(qemu_clock_get_ns(rtc_clock), RTC_FREQ,
+ NANOSECONDS_PER_SECOND);
+ return s->tick_offset + ticks;
+}
+
static uint64_t imx7_snvs_read(void *opaque, hwaddr offset, unsigned size)
{
- trace_imx7_snvs_read(offset, 0);
+ IMX7SNVSState *s = IMX7_SNVS(opaque);
+ uint64_t ret = 0;
+
+ switch (offset) {
+ case SNVS_LPSRTCMR:
+ ret = extract64(imx7_snvs_get_count(s), 32, 15);
+ break;
+ case SNVS_LPSRTCLR:
+ ret = extract64(imx7_snvs_get_count(s), 0, 32);
+ break;
+ case SNVS_LPCR:
+ ret = s->lpcr;
+ break;
+ }
- return 0;
+ trace_imx7_snvs_read(offset, ret, size);
+
+ return ret;
+}
+
+static void imx7_snvs_reset(DeviceState *dev)
+{
+ IMX7SNVSState *s = IMX7_SNVS(dev);
+
+ s->lpcr = 0;
}
static void imx7_snvs_write(void *opaque, hwaddr offset,
uint64_t v, unsigned size)
{
- const uint32_t value = v;
- const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN;
+ trace_imx7_snvs_write(offset, v, size);
+
+ IMX7SNVSState *s = IMX7_SNVS(opaque);
- trace_imx7_snvs_write(offset, value);
+ uint64_t new_value = 0, snvs_count = 0;
- if (offset == SNVS_LPCR && ((value & mask) == mask)) {
- qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) {
+ snvs_count = imx7_snvs_get_count(s);
+ }
+
+ switch (offset) {
+ case SNVS_LPSRTCMR:
+ new_value = deposit64(snvs_count, 32, 32, v);
+ break;
+ case SNVS_LPSRTCLR:
+ new_value = deposit64(snvs_count, 0, 32, v);
+ break;
+ case SNVS_LPCR: {
+ s->lpcr = v;
+
+ const uint32_t mask = SNVS_LPCR_TOP | SNVS_LPCR_DP_EN;
+
+ if ((v & mask) == mask) {
+ qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
+ }
+ break;
+ }
+ }
+
+ if (offset == SNVS_LPSRTCMR || offset == SNVS_LPSRTCLR) {
+ s->tick_offset += new_value - snvs_count;
}
}
{
SysBusDevice *sd = SYS_BUS_DEVICE(obj);
IMX7SNVSState *s = IMX7_SNVS(obj);
+ struct tm tm;
memory_region_init_io(&s->mmio, obj, &imx7_snvs_ops, s,
TYPE_IMX7_SNVS, 0x1000);
sysbus_init_mmio(sd, &s->mmio);
+
+ qemu_get_timedate(&tm, 0);
+ s->tick_offset = mktimegm(&tm) -
+ qemu_clock_get_ns(rtc_clock) / NANOSECONDS_PER_SECOND;
}
static void imx7_snvs_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
+ dc->reset = imx7_snvs_reset;
+ dc->vmsd = &vmstate_imx7_snvs;
dc->desc = "i.MX7 Secure Non-Volatile Storage Module";
}
imx7_gpr_write(uint64_t offset, uint64_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx64
# imx7_snvs.c
-imx7_snvs_read(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32
-imx7_snvs_write(uint64_t offset, uint32_t value) "addr 0x%08" PRIx64 "value 0x%08" PRIx32
+imx7_snvs_read(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS read: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u"
+imx7_snvs_write(uint64_t offset, uint64_t value, unsigned size) "i.MX SNVS write: offset 0x%08" PRIx64 " value 0x%08" PRIx64 " size %u"
# mos6522.c
mos6522_set_counter(int index, unsigned int val) "T%d.counter=%d"
imply PCI_DEVICES
imply TEST_DEVICES
imply VIRTIO_VGA
+ imply VFIO_PCI if LINUX # needed by spapr_pci_vfio.c
select NVDIMM
select DIMM
select PCI
select SPAPR_VSCSI
- select VFIO if LINUX # needed by spapr_pci_vfio.c
select XICS
select XIVE
select MSI_NONBROKEN
#include "hw/pci/pci_device.h"
#include "hw/vfio/vfio-common.h"
#include "qemu/error-report.h"
+#include CONFIG_DEVICES /* CONFIG_VFIO_PCI */
/*
* Interfaces for IBM EEH (Enhanced Error Handling)
*/
+#ifdef CONFIG_VFIO_PCI
static bool vfio_eeh_container_ok(VFIOContainer *container)
{
/*
static VFIOContainer *vfio_eeh_as_container(AddressSpace *as)
{
VFIOAddressSpace *space = vfio_get_address_space(as);
- VFIOContainer *container = NULL;
+ VFIOContainerBase *bcontainer = NULL;
if (QLIST_EMPTY(&space->containers)) {
/* No containers to act on */
goto out;
}
- container = QLIST_FIRST(&space->containers);
+ bcontainer = QLIST_FIRST(&space->containers);
- if (QLIST_NEXT(container, next)) {
+ if (QLIST_NEXT(bcontainer, next)) {
/*
* We don't yet have logic to synchronize EEH state across
* multiple containers
*/
- container = NULL;
+ bcontainer = NULL;
goto out;
}
out:
vfio_put_address_space(space);
- return container;
+ return container_of(bcontainer, VFIOContainer, bcontainer);
}
static bool vfio_eeh_as_ok(AddressSpace *as)
return RTAS_OUT_SUCCESS;
}
+
+#else
+
+bool spapr_phb_eeh_available(SpaprPhbState *sphb)
+{
+ return false;
+}
+
+void spapr_phb_vfio_reset(DeviceState *qdev)
+{
+}
+
+int spapr_phb_vfio_eeh_set_option(SpaprPhbState *sphb,
+ unsigned int addr, int option)
+{
+ return RTAS_OUT_NOT_SUPPORTED;
+}
+
+int spapr_phb_vfio_eeh_get_state(SpaprPhbState *sphb, int *state)
+{
+ return RTAS_OUT_NOT_SUPPORTED;
+}
+
+int spapr_phb_vfio_eeh_reset(SpaprPhbState *sphb, int option)
+{
+ return RTAS_OUT_NOT_SUPPORTED;
+}
+
+int spapr_phb_vfio_eeh_configure(SpaprPhbState *sphb)
+{
+ return RTAS_OUT_NOT_SUPPORTED;
+}
+
+#endif /* CONFIG_VFIO_PCI */
imply VFIO_CCW
imply WDT_DIAG288
imply PCIE_DEVICES
+ imply IOMMUFD
select PCI_EXPRESS
select S390_FLIC
select S390_FLIC_KVM if KVM
*/
#include "qemu/osdep.h"
+#include CONFIG_DEVICES /* CONFIG_IOMMUFD */
#include <linux/vfio.h>
#include <sys/ioctl.h>
#include "qapi/error.h"
#include "hw/vfio/vfio-common.h"
+#include "sysemu/iommufd.h"
#include "hw/s390x/ap-device.h"
#include "qemu/error-report.h"
#include "qemu/event_notifier.h"
VFIOAPDevice *vapdev = VFIO_AP_DEVICE(dev);
VFIODevice *vbasedev = &vapdev->vdev;
- vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
- vbasedev->ops = &vfio_ap_ops;
- vbasedev->type = VFIO_DEVICE_TYPE_AP;
- vbasedev->dev = dev;
-
- /*
- * vfio-ap devices operate in a way compatible with discarding of
- * memory in RAM blocks, as no pages are pinned in the host.
- * This needs to be set before vfio_get_device() for vfio common to
- * handle ram_block_discard_disable().
- */
- vapdev->vdev.ram_block_discard_allowed = true;
+ if (vfio_device_get_name(vbasedev, errp) < 0) {
+ return;
+ }
ret = vfio_attach_device(vbasedev->name, vbasedev,
&address_space_memory, errp);
static Property vfio_ap_properties[] = {
DEFINE_PROP_STRING("sysfsdev", VFIOAPDevice, vdev.sysfsdev),
+#ifdef CONFIG_IOMMUFD
+ DEFINE_PROP_LINK("iommufd", VFIOAPDevice, vdev.iommufd,
+ TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
+#endif
DEFINE_PROP_END_OF_LIST(),
};
.unmigratable = 1,
};
+static void vfio_ap_instance_init(Object *obj)
+{
+ VFIOAPDevice *vapdev = VFIO_AP_DEVICE(obj);
+ VFIODevice *vbasedev = &vapdev->vdev;
+
+ /*
+ * vfio-ap devices operate in a way compatible with discarding of
+ * memory in RAM blocks, as no pages are pinned in the host.
+ * This needs to be set before vfio_get_device() for vfio common to
+ * handle ram_block_discard_disable().
+ */
+ vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_AP, &vfio_ap_ops,
+ DEVICE(vapdev), true);
+}
+
+#ifdef CONFIG_IOMMUFD
+static void vfio_ap_set_fd(Object *obj, const char *str, Error **errp)
+{
+ vfio_device_set_fd(&VFIO_AP_DEVICE(obj)->vdev, str, errp);
+}
+#endif
+
static void vfio_ap_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
device_class_set_props(dc, vfio_ap_properties);
+#ifdef CONFIG_IOMMUFD
+ object_class_property_add_str(klass, "fd", NULL, vfio_ap_set_fd);
+#endif
dc->vmsd = &vfio_ap_vmstate;
dc->desc = "VFIO-based AP device assignment";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
.name = TYPE_VFIO_AP_DEVICE,
.parent = TYPE_AP_DEVICE,
.instance_size = sizeof(VFIOAPDevice),
+ .instance_init = vfio_ap_instance_init,
.class_init = vfio_ap_class_init,
};
*/
#include "qemu/osdep.h"
+#include CONFIG_DEVICES /* CONFIG_IOMMUFD */
#include <linux/vfio.h>
#include <linux/vfio_ccw.h>
#include <sys/ioctl.h>
#include "qapi/error.h"
#include "hw/vfio/vfio-common.h"
+#include "sysemu/iommufd.h"
#include "hw/s390x/s390-ccw.h"
#include "hw/s390x/vfio-ccw.h"
#include "hw/qdev-properties.h"
}
}
- vbasedev->ops = &vfio_ccw_ops;
- vbasedev->type = VFIO_DEVICE_TYPE_CCW;
- vbasedev->name = g_strdup_printf("%x.%x.%04x", vcdev->cdev.hostid.cssid,
- vcdev->cdev.hostid.ssid,
- vcdev->cdev.hostid.devid);
- vbasedev->dev = dev;
-
- /*
- * All vfio-ccw devices are believed to operate in a way compatible with
- * discarding of memory in RAM blocks, ie. pages pinned in the host are
- * in the current working set of the guest driver and therefore never
- * overlap e.g., with pages available to the guest balloon driver. This
- * needs to be set before vfio_get_device() for vfio common to handle
- * ram_block_discard_disable().
- */
- vbasedev->ram_block_discard_allowed = true;
+ if (vfio_device_get_name(vbasedev, errp) < 0) {
+ return;
+ }
ret = vfio_attach_device(cdev->mdevid, vbasedev,
&address_space_memory, errp);
static Property vfio_ccw_properties[] = {
DEFINE_PROP_STRING("sysfsdev", VFIOCCWDevice, vdev.sysfsdev),
DEFINE_PROP_BOOL("force-orb-pfch", VFIOCCWDevice, force_orb_pfch, false),
+#ifdef CONFIG_IOMMUFD
+ DEFINE_PROP_LINK("iommufd", VFIOCCWDevice, vdev.iommufd,
+ TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
+#endif
DEFINE_PROP_END_OF_LIST(),
};
.unmigratable = 1,
};
+static void vfio_ccw_instance_init(Object *obj)
+{
+ VFIOCCWDevice *vcdev = VFIO_CCW(obj);
+ VFIODevice *vbasedev = &vcdev->vdev;
+
+ /*
+ * All vfio-ccw devices are believed to operate in a way compatible with
+ * discarding of memory in RAM blocks, ie. pages pinned in the host are
+ * in the current working set of the guest driver and therefore never
+ * overlap e.g., with pages available to the guest balloon driver. This
+ * needs to be set before vfio_get_device() for vfio common to handle
+ * ram_block_discard_disable().
+ */
+ vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_CCW, &vfio_ccw_ops,
+ DEVICE(vcdev), true);
+}
+
+#ifdef CONFIG_IOMMUFD
+static void vfio_ccw_set_fd(Object *obj, const char *str, Error **errp)
+{
+ vfio_device_set_fd(&VFIO_CCW(obj)->vdev, str, errp);
+}
+#endif
+
static void vfio_ccw_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
S390CCWDeviceClass *cdc = S390_CCW_DEVICE_CLASS(klass);
device_class_set_props(dc, vfio_ccw_properties);
+#ifdef CONFIG_IOMMUFD
+ object_class_property_add_str(klass, "fd", NULL, vfio_ccw_set_fd);
+#endif
dc->vmsd = &vfio_ccw_vmstate;
dc->desc = "VFIO-based subchannel assignment";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
.name = TYPE_VFIO_CCW,
.parent = TYPE_S390_CCW,
.instance_size = sizeof(VFIOCCWDevice),
+ .instance_init = vfio_ccw_instance_init,
.class_init = vfio_ccw_class_init,
};
*/
#include "qemu/osdep.h"
+#include CONFIG_DEVICES /* CONFIG_IOMMUFD */
#include <sys/ioctl.h>
#ifdef CONFIG_KVM
#include <linux/kvm.h>
bool vfio_viommu_preset(VFIODevice *vbasedev)
{
- return vbasedev->container->space->as != &address_space_memory;
+ return vbasedev->bcontainer->space->as != &address_space_memory;
}
static void vfio_set_migration_error(int err)
migration->device_state == VFIO_DEVICE_STATE_PRE_COPY_P2P;
}
-static bool vfio_devices_all_dirty_tracking(VFIOContainer *container)
+static bool vfio_devices_all_dirty_tracking(VFIOContainerBase *bcontainer)
{
VFIODevice *vbasedev;
MigrationState *ms = migrate_get_current();
return false;
}
- QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
VFIOMigration *migration = vbasedev->migration;
if (!migration) {
return true;
}
-bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container)
+bool vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer)
{
VFIODevice *vbasedev;
- QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
if (!vbasedev->dirty_pages_supported) {
return false;
}
* Check if all VFIO devices are running and migration is active, which is
* essentially equivalent to the migration being in pre-copy phase.
*/
-bool vfio_devices_all_running_and_mig_active(VFIOContainer *container)
+bool
+vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer)
{
VFIODevice *vbasedev;
return false;
}
- QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
VFIOMigration *migration = vbasedev->migration;
if (!migration) {
static void vfio_iommu_map_notify(IOMMUNotifier *n, IOMMUTLBEntry *iotlb)
{
VFIOGuestIOMMU *giommu = container_of(n, VFIOGuestIOMMU, n);
- VFIOContainer *container = giommu->container;
+ VFIOContainerBase *bcontainer = giommu->bcontainer;
hwaddr iova = iotlb->iova + giommu->iommu_offset;
void *vaddr;
int ret;
* of vaddr will always be there, even if the memory object is
* destroyed and its backing memory munmap-ed.
*/
- ret = vfio_dma_map(container, iova,
- iotlb->addr_mask + 1, vaddr,
- read_only);
+ ret = vfio_container_dma_map(bcontainer, iova,
+ iotlb->addr_mask + 1, vaddr,
+ read_only);
if (ret) {
- error_report("vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx", %p) = %d (%s)",
- container, iova,
+ bcontainer, iova,
iotlb->addr_mask + 1, vaddr, ret, strerror(-ret));
}
} else {
- ret = vfio_dma_unmap(container, iova, iotlb->addr_mask + 1, iotlb);
+ ret = vfio_container_dma_unmap(bcontainer, iova,
+ iotlb->addr_mask + 1, iotlb);
if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
- container, iova,
+ bcontainer, iova,
iotlb->addr_mask + 1, ret, strerror(-ret));
vfio_set_migration_error(ret);
}
{
VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
listener);
+ VFIOContainerBase *bcontainer = vrdl->bcontainer;
const hwaddr size = int128_get64(section->size);
const hwaddr iova = section->offset_within_address_space;
int ret;
/* Unmap with a single call. */
- ret = vfio_dma_unmap(vrdl->container, iova, size , NULL);
+ ret = vfio_container_dma_unmap(bcontainer, iova, size , NULL);
if (ret) {
- error_report("%s: vfio_dma_unmap() failed: %s", __func__,
+ error_report("%s: vfio_container_dma_unmap() failed: %s", __func__,
strerror(-ret));
}
}
{
VFIORamDiscardListener *vrdl = container_of(rdl, VFIORamDiscardListener,
listener);
+ VFIOContainerBase *bcontainer = vrdl->bcontainer;
const hwaddr end = section->offset_within_region +
int128_get64(section->size);
hwaddr start, next, iova;
section->offset_within_address_space;
vaddr = memory_region_get_ram_ptr(section->mr) + start;
- ret = vfio_dma_map(vrdl->container, iova, next - start,
- vaddr, section->readonly);
+ ret = vfio_container_dma_map(bcontainer, iova, next - start,
+ vaddr, section->readonly);
if (ret) {
/* Rollback */
vfio_ram_discard_notify_discard(rdl, section);
return 0;
}
-static void vfio_register_ram_discard_listener(VFIOContainer *container,
+static void vfio_register_ram_discard_listener(VFIOContainerBase *bcontainer,
MemoryRegionSection *section)
{
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
g_assert(QEMU_IS_ALIGNED(int128_get64(section->size), TARGET_PAGE_SIZE));
vrdl = g_new0(VFIORamDiscardListener, 1);
- vrdl->container = container;
+ vrdl->bcontainer = bcontainer;
vrdl->mr = section->mr;
vrdl->offset_within_address_space = section->offset_within_address_space;
vrdl->size = int128_get64(section->size);
section->mr);
g_assert(vrdl->granularity && is_power_of_2(vrdl->granularity));
- g_assert(container->pgsizes &&
- vrdl->granularity >= 1ULL << ctz64(container->pgsizes));
+ g_assert(bcontainer->pgsizes &&
+ vrdl->granularity >= 1ULL << ctz64(bcontainer->pgsizes));
ram_discard_listener_init(&vrdl->listener,
vfio_ram_discard_notify_populate,
vfio_ram_discard_notify_discard, true);
ram_discard_manager_register_listener(rdm, &vrdl->listener, section);
- QLIST_INSERT_HEAD(&container->vrdl_list, vrdl, next);
+ QLIST_INSERT_HEAD(&bcontainer->vrdl_list, vrdl, next);
/*
* Sanity-check if we have a theoretically problematic setup where we could
* number of sections in the address space we could have over time,
* also consuming DMA mappings.
*/
- if (container->dma_max_mappings) {
+ if (bcontainer->dma_max_mappings) {
unsigned int vrdl_count = 0, vrdl_mappings = 0, max_memslots = 512;
#ifdef CONFIG_KVM
}
#endif
- QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
hwaddr start, end;
start = QEMU_ALIGN_DOWN(vrdl->offset_within_address_space,
}
if (vrdl_mappings + max_memslots - vrdl_count >
- container->dma_max_mappings) {
+ bcontainer->dma_max_mappings) {
warn_report("%s: possibly running out of DMA mappings. E.g., try"
" increasing the 'block-size' of virtio-mem devies."
" Maximum possible DMA mappings: %d, Maximum possible"
- " memslots: %d", __func__, container->dma_max_mappings,
+ " memslots: %d", __func__, bcontainer->dma_max_mappings,
max_memslots);
}
}
}
-static void vfio_unregister_ram_discard_listener(VFIOContainer *container,
+static void vfio_unregister_ram_discard_listener(VFIOContainerBase *bcontainer,
MemoryRegionSection *section)
{
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
VFIORamDiscardListener *vrdl = NULL;
- QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
if (vrdl->mr == section->mr &&
vrdl->offset_within_address_space ==
section->offset_within_address_space) {
return true;
}
-static bool vfio_get_section_iova_range(VFIOContainer *container,
+static bool vfio_get_section_iova_range(VFIOContainerBase *bcontainer,
MemoryRegionSection *section,
hwaddr *out_iova, hwaddr *out_end,
Int128 *out_llend)
static void vfio_listener_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
- VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
hwaddr iova, end;
Int128 llend, llsize;
void *vaddr;
return;
}
- if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) {
+ if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
+ &llend)) {
if (memory_region_is_ram_device(section->mr)) {
trace_vfio_listener_region_add_no_dma_map(
memory_region_name(section->mr),
return;
}
- if (vfio_container_add_section_window(container, section, &err)) {
+ if (vfio_container_add_section_window(bcontainer, section, &err)) {
goto fail;
}
giommu->iommu_mr = iommu_mr;
giommu->iommu_offset = section->offset_within_address_space -
section->offset_within_region;
- giommu->container = container;
+ giommu->bcontainer = bcontainer;
llend = int128_add(int128_make64(section->offset_within_region),
section->size);
llend = int128_sub(llend, int128_one());
iommu_idx);
ret = memory_region_iommu_set_page_size_mask(giommu->iommu_mr,
- container->pgsizes,
+ bcontainer->pgsizes,
&err);
if (ret) {
g_free(giommu);
goto fail;
}
- if (container->iova_ranges) {
+ if (bcontainer->iova_ranges) {
ret = memory_region_iommu_set_iova_ranges(giommu->iommu_mr,
- container->iova_ranges, &err);
+ bcontainer->iova_ranges,
+ &err);
if (ret) {
g_free(giommu);
goto fail;
g_free(giommu);
goto fail;
}
- QLIST_INSERT_HEAD(&container->giommu_list, giommu, giommu_next);
+ QLIST_INSERT_HEAD(&bcontainer->giommu_list, giommu, giommu_next);
memory_region_iommu_replay(giommu->iommu_mr, &giommu->n);
return;
* about changes.
*/
if (memory_region_has_ram_discard_manager(section->mr)) {
- vfio_register_ram_discard_listener(container, section);
+ vfio_register_ram_discard_listener(bcontainer, section);
return;
}
llsize = int128_sub(llend, int128_make64(iova));
if (memory_region_is_ram_device(section->mr)) {
- hwaddr pgmask = (1ULL << ctz64(container->pgsizes)) - 1;
+ hwaddr pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
if ((iova & pgmask) || (int128_get64(llsize) & pgmask)) {
trace_vfio_listener_region_add_no_dma_map(
}
}
- ret = vfio_dma_map(container, iova, int128_get64(llsize),
- vaddr, section->readonly);
+ ret = vfio_container_dma_map(bcontainer, iova, int128_get64(llsize),
+ vaddr, section->readonly);
if (ret) {
- error_setg(&err, "vfio_dma_map(%p, 0x%"HWADDR_PRIx", "
+ error_setg(&err, "vfio_container_dma_map(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx", %p) = %d (%s)",
- container, iova, int128_get64(llsize), vaddr, ret,
+ bcontainer, iova, int128_get64(llsize), vaddr, ret,
strerror(-ret));
if (memory_region_is_ram_device(section->mr)) {
/* Allow unexpected mappings not to be fatal for RAM devices */
* can gracefully fail. Runtime, there's not much we can do other
* than throw a hardware error.
*/
- if (!container->initialized) {
- if (!container->error) {
- error_propagate_prepend(&container->error, err,
+ if (!bcontainer->initialized) {
+ if (!bcontainer->error) {
+ error_propagate_prepend(&bcontainer->error, err,
"Region %s: ",
memory_region_name(section->mr));
} else {
static void vfio_listener_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
- VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
hwaddr iova, end;
Int128 llend, llsize;
int ret;
if (memory_region_is_iommu(section->mr)) {
VFIOGuestIOMMU *giommu;
- QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
+ QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
giommu->n.start == section->offset_within_region) {
memory_region_unregister_iommu_notifier(section->mr,
*/
}
- if (!vfio_get_section_iova_range(container, section, &iova, &end, &llend)) {
+ if (!vfio_get_section_iova_range(bcontainer, section, &iova, &end,
+ &llend)) {
return;
}
if (memory_region_is_ram_device(section->mr)) {
hwaddr pgmask;
- pgmask = (1ULL << ctz64(container->pgsizes)) - 1;
+ pgmask = (1ULL << ctz64(bcontainer->pgsizes)) - 1;
try_unmap = !((iova & pgmask) || (int128_get64(llsize) & pgmask));
} else if (memory_region_has_ram_discard_manager(section->mr)) {
- vfio_unregister_ram_discard_listener(container, section);
+ vfio_unregister_ram_discard_listener(bcontainer, section);
/* Unregistering will trigger an unmap. */
try_unmap = false;
}
if (int128_eq(llsize, int128_2_64())) {
/* The unmap ioctl doesn't accept a full 64-bit span. */
llsize = int128_rshift(llsize, 1);
- ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
+ ret = vfio_container_dma_unmap(bcontainer, iova,
+ int128_get64(llsize), NULL);
if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
- container, iova, int128_get64(llsize), ret,
+ bcontainer, iova, int128_get64(llsize), ret,
strerror(-ret));
}
iova += int128_get64(llsize);
}
- ret = vfio_dma_unmap(container, iova, int128_get64(llsize), NULL);
+ ret = vfio_container_dma_unmap(bcontainer, iova,
+ int128_get64(llsize), NULL);
if (ret) {
- error_report("vfio_dma_unmap(%p, 0x%"HWADDR_PRIx", "
+ error_report("vfio_container_dma_unmap(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
- container, iova, int128_get64(llsize), ret,
+ bcontainer, iova, int128_get64(llsize), ret,
strerror(-ret));
}
}
memory_region_unref(section->mr);
- vfio_container_del_section_window(container, section);
+ vfio_container_del_section_window(bcontainer, section);
}
typedef struct VFIODirtyRanges {
} VFIODirtyRanges;
typedef struct VFIODirtyRangesListener {
- VFIOContainer *container;
+ VFIOContainerBase *bcontainer;
VFIODirtyRanges ranges;
MemoryListener listener;
} VFIODirtyRangesListener;
static bool vfio_section_is_vfio_pci(MemoryRegionSection *section,
- VFIOContainer *container)
+ VFIOContainerBase *bcontainer)
{
VFIOPCIDevice *pcidev;
VFIODevice *vbasedev;
owner = memory_region_owner(section->mr);
- QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
if (vbasedev->type != VFIO_DEVICE_TYPE_PCI) {
continue;
}
hwaddr iova, end, *min, *max;
if (!vfio_listener_valid_section(section, "tracking_update") ||
- !vfio_get_section_iova_range(dirty->container, section,
+ !vfio_get_section_iova_range(dirty->bcontainer, section,
&iova, &end, NULL)) {
return;
}
* The alternative would be an IOVATree but that has a much bigger runtime
* overhead and unnecessary complexity.
*/
- if (vfio_section_is_vfio_pci(section, dirty->container) &&
+ if (vfio_section_is_vfio_pci(section, dirty->bcontainer) &&
iova >= UINT32_MAX) {
min = &range->minpci64;
max = &range->maxpci64;
.region_add = vfio_dirty_tracking_update,
};
-static void vfio_dirty_tracking_init(VFIOContainer *container,
+static void vfio_dirty_tracking_init(VFIOContainerBase *bcontainer,
VFIODirtyRanges *ranges)
{
VFIODirtyRangesListener dirty;
dirty.ranges.min64 = UINT64_MAX;
dirty.ranges.minpci64 = UINT64_MAX;
dirty.listener = vfio_dirty_tracking_listener;
- dirty.container = container;
+ dirty.bcontainer = bcontainer;
memory_listener_register(&dirty.listener,
- container->space->as);
+ bcontainer->space->as);
*ranges = dirty.ranges;
memory_listener_unregister(&dirty.listener);
}
-static void vfio_devices_dma_logging_stop(VFIOContainer *container)
+static void vfio_devices_dma_logging_stop(VFIOContainerBase *bcontainer)
{
uint64_t buf[DIV_ROUND_UP(sizeof(struct vfio_device_feature),
sizeof(uint64_t))] = {};
feature->flags = VFIO_DEVICE_FEATURE_SET |
VFIO_DEVICE_FEATURE_DMA_LOGGING_STOP;
- QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
if (!vbasedev->dirty_tracking) {
continue;
}
}
static struct vfio_device_feature *
-vfio_device_feature_dma_logging_start_create(VFIOContainer *container,
+vfio_device_feature_dma_logging_start_create(VFIOContainerBase *bcontainer,
VFIODirtyRanges *tracking)
{
struct vfio_device_feature *feature;
g_free(feature);
}
-static int vfio_devices_dma_logging_start(VFIOContainer *container)
+static int vfio_devices_dma_logging_start(VFIOContainerBase *bcontainer)
{
struct vfio_device_feature *feature;
VFIODirtyRanges ranges;
VFIODevice *vbasedev;
int ret = 0;
- vfio_dirty_tracking_init(container, &ranges);
- feature = vfio_device_feature_dma_logging_start_create(container,
+ vfio_dirty_tracking_init(bcontainer, &ranges);
+ feature = vfio_device_feature_dma_logging_start_create(bcontainer,
&ranges);
if (!feature) {
return -errno;
}
- QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
if (vbasedev->dirty_tracking) {
continue;
}
out:
if (ret) {
- vfio_devices_dma_logging_stop(container);
+ vfio_devices_dma_logging_stop(bcontainer);
}
vfio_device_feature_dma_logging_start_destroy(feature);
static void vfio_listener_log_global_start(MemoryListener *listener)
{
- VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
int ret;
- if (vfio_devices_all_device_dirty_tracking(container)) {
- ret = vfio_devices_dma_logging_start(container);
+ if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
+ ret = vfio_devices_dma_logging_start(bcontainer);
} else {
- ret = vfio_set_dirty_page_tracking(container, true);
+ ret = vfio_container_set_dirty_page_tracking(bcontainer, true);
}
if (ret) {
static void vfio_listener_log_global_stop(MemoryListener *listener)
{
- VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
int ret = 0;
- if (vfio_devices_all_device_dirty_tracking(container)) {
- vfio_devices_dma_logging_stop(container);
+ if (vfio_devices_all_device_dirty_tracking(bcontainer)) {
+ vfio_devices_dma_logging_stop(bcontainer);
} else {
- ret = vfio_set_dirty_page_tracking(container, false);
+ ret = vfio_container_set_dirty_page_tracking(bcontainer, false);
}
if (ret) {
return 0;
}
-int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
+int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
VFIOBitmap *vbmap, hwaddr iova,
hwaddr size)
{
VFIODevice *vbasedev;
int ret;
- QLIST_FOREACH(vbasedev, &container->device_list, container_next) {
+ QLIST_FOREACH(vbasedev, &bcontainer->device_list, container_next) {
ret = vfio_device_dma_logging_report(vbasedev, iova, size,
vbmap->bitmap);
if (ret) {
return 0;
}
-int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
+int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
uint64_t size, ram_addr_t ram_addr)
{
bool all_device_dirty_tracking =
- vfio_devices_all_device_dirty_tracking(container);
+ vfio_devices_all_device_dirty_tracking(bcontainer);
uint64_t dirty_pages;
VFIOBitmap vbmap;
int ret;
- if (!container->dirty_pages_supported && !all_device_dirty_tracking) {
+ if (!bcontainer->dirty_pages_supported && !all_device_dirty_tracking) {
cpu_physical_memory_set_dirty_range(ram_addr, size,
tcg_enabled() ? DIRTY_CLIENTS_ALL :
DIRTY_CLIENTS_NOCODE);
}
if (all_device_dirty_tracking) {
- ret = vfio_devices_query_dirty_bitmap(container, &vbmap, iova, size);
+ ret = vfio_devices_query_dirty_bitmap(bcontainer, &vbmap, iova, size);
} else {
- ret = vfio_query_dirty_bitmap(container, &vbmap, iova, size);
+ ret = vfio_container_query_dirty_bitmap(bcontainer, &vbmap, iova, size);
}
if (ret) {
dirty_pages = cpu_physical_memory_set_dirty_lebitmap(vbmap.bitmap, ram_addr,
vbmap.pages);
- trace_vfio_get_dirty_bitmap(container->fd, iova, size, vbmap.size,
- ram_addr, dirty_pages);
+ trace_vfio_get_dirty_bitmap(iova, size, vbmap.size, ram_addr, dirty_pages);
out:
g_free(vbmap.bitmap);
vfio_giommu_dirty_notifier *gdn = container_of(n,
vfio_giommu_dirty_notifier, n);
VFIOGuestIOMMU *giommu = gdn->giommu;
- VFIOContainer *container = giommu->container;
+ VFIOContainerBase *bcontainer = giommu->bcontainer;
hwaddr iova = iotlb->iova + giommu->iommu_offset;
ram_addr_t translated_addr;
int ret = -EINVAL;
rcu_read_lock();
if (vfio_get_xlat_addr(iotlb, NULL, &translated_addr, NULL)) {
- ret = vfio_get_dirty_bitmap(container, iova, iotlb->addr_mask + 1,
+ ret = vfio_get_dirty_bitmap(bcontainer, iova, iotlb->addr_mask + 1,
translated_addr);
if (ret) {
error_report("vfio_iommu_map_dirty_notify(%p, 0x%"HWADDR_PRIx", "
"0x%"HWADDR_PRIx") = %d (%s)",
- container, iova, iotlb->addr_mask + 1, ret,
+ bcontainer, iova, iotlb->addr_mask + 1, ret,
strerror(-ret));
}
}
* Sync the whole mapped region (spanning multiple individual mappings)
* in one go.
*/
- return vfio_get_dirty_bitmap(vrdl->container, iova, size, ram_addr);
+ return vfio_get_dirty_bitmap(vrdl->bcontainer, iova, size, ram_addr);
}
-static int vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainer *container,
- MemoryRegionSection *section)
+static int
+vfio_sync_ram_discard_listener_dirty_bitmap(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
{
RamDiscardManager *rdm = memory_region_get_ram_discard_manager(section->mr);
VFIORamDiscardListener *vrdl = NULL;
- QLIST_FOREACH(vrdl, &container->vrdl_list, next) {
+ QLIST_FOREACH(vrdl, &bcontainer->vrdl_list, next) {
if (vrdl->mr == section->mr &&
vrdl->offset_within_address_space ==
section->offset_within_address_space) {
&vrdl);
}
-static int vfio_sync_dirty_bitmap(VFIOContainer *container,
+static int vfio_sync_dirty_bitmap(VFIOContainerBase *bcontainer,
MemoryRegionSection *section)
{
ram_addr_t ram_addr;
if (memory_region_is_iommu(section->mr)) {
VFIOGuestIOMMU *giommu;
- QLIST_FOREACH(giommu, &container->giommu_list, giommu_next) {
+ QLIST_FOREACH(giommu, &bcontainer->giommu_list, giommu_next) {
if (MEMORY_REGION(giommu->iommu_mr) == section->mr &&
giommu->n.start == section->offset_within_region) {
Int128 llend;
}
return 0;
} else if (memory_region_has_ram_discard_manager(section->mr)) {
- return vfio_sync_ram_discard_listener_dirty_bitmap(container, section);
+ return vfio_sync_ram_discard_listener_dirty_bitmap(bcontainer, section);
}
ram_addr = memory_region_get_ram_addr(section->mr) +
section->offset_within_region;
- return vfio_get_dirty_bitmap(container,
+ return vfio_get_dirty_bitmap(bcontainer,
REAL_HOST_PAGE_ALIGN(section->offset_within_address_space),
int128_get64(section->size), ram_addr);
}
static void vfio_listener_log_sync(MemoryListener *listener,
MemoryRegionSection *section)
{
- VFIOContainer *container = container_of(listener, VFIOContainer, listener);
+ VFIOContainerBase *bcontainer = container_of(listener, VFIOContainerBase,
+ listener);
int ret;
if (vfio_listener_skipped_section(section)) {
return;
}
- if (vfio_devices_all_dirty_tracking(container)) {
- ret = vfio_sync_dirty_bitmap(container, section);
+ if (vfio_devices_all_dirty_tracking(bcontainer)) {
+ ret = vfio_sync_dirty_bitmap(bcontainer, section);
if (ret) {
error_report("vfio: Failed to sync dirty bitmap, err: %d (%s)", ret,
strerror(-ret));
void vfio_put_address_space(VFIOAddressSpace *space)
{
- if (QLIST_EMPTY(&space->containers)) {
- QLIST_REMOVE(space, list);
- g_free(space);
+ if (!QLIST_EMPTY(&space->containers)) {
+ return;
}
+
+ QLIST_REMOVE(space, list);
+ g_free(space);
+
if (QLIST_EMPTY(&vfio_address_spaces)) {
qemu_unregister_reset(vfio_reset_handler, NULL);
}
return info;
}
+
+int vfio_attach_device(char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp)
+{
+ const VFIOIOMMUOps *ops = &vfio_legacy_ops;
+
+#ifdef CONFIG_IOMMUFD
+ if (vbasedev->iommufd) {
+ ops = &vfio_iommufd_ops;
+ }
+#endif
+ return ops->attach_device(name, vbasedev, as, errp);
+}
+
+void vfio_detach_device(VFIODevice *vbasedev)
+{
+ if (!vbasedev->bcontainer) {
+ return;
+ }
+ vbasedev->bcontainer->ops->detach_device(vbasedev);
+}
--- /dev/null
+/*
+ * VFIO BASE CONTAINER
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ * Eric Auger <eric.auger@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include "qapi/error.h"
+#include "qemu/error-report.h"
+#include "hw/vfio/vfio-container-base.h"
+
+int vfio_container_dma_map(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly)
+{
+ g_assert(bcontainer->ops->dma_map);
+ return bcontainer->ops->dma_map(bcontainer, iova, size, vaddr, readonly);
+}
+
+int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
+{
+ g_assert(bcontainer->ops->dma_unmap);
+ return bcontainer->ops->dma_unmap(bcontainer, iova, size, iotlb);
+}
+
+int vfio_container_add_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp)
+{
+ if (!bcontainer->ops->add_window) {
+ return 0;
+ }
+
+ return bcontainer->ops->add_window(bcontainer, section, errp);
+}
+
+void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
+{
+ if (!bcontainer->ops->del_window) {
+ return;
+ }
+
+ return bcontainer->ops->del_window(bcontainer, section);
+}
+
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+ bool start)
+{
+ if (!bcontainer->dirty_pages_supported) {
+ return 0;
+ }
+
+ g_assert(bcontainer->ops->set_dirty_page_tracking);
+ return bcontainer->ops->set_dirty_page_tracking(bcontainer, start);
+}
+
+int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size)
+{
+ g_assert(bcontainer->ops->query_dirty_bitmap);
+ return bcontainer->ops->query_dirty_bitmap(bcontainer, vbmap, iova, size);
+}
+
+void vfio_container_init(VFIOContainerBase *bcontainer, VFIOAddressSpace *space,
+ const VFIOIOMMUOps *ops)
+{
+ bcontainer->ops = ops;
+ bcontainer->space = space;
+ bcontainer->error = NULL;
+ bcontainer->dirty_pages_supported = false;
+ bcontainer->dma_max_mappings = 0;
+ bcontainer->iova_ranges = NULL;
+ QLIST_INIT(&bcontainer->giommu_list);
+ QLIST_INIT(&bcontainer->vrdl_list);
+}
+
+void vfio_container_destroy(VFIOContainerBase *bcontainer)
+{
+ VFIOGuestIOMMU *giommu, *tmp;
+
+ QLIST_REMOVE(bcontainer, next);
+
+ QLIST_FOREACH_SAFE(giommu, &bcontainer->giommu_list, giommu_next, tmp) {
+ memory_region_unregister_iommu_notifier(
+ MEMORY_REGION(giommu->iommu_mr), &giommu->n);
+ QLIST_REMOVE(giommu, giommu_next);
+ g_free(giommu);
+ }
+
+ g_list_free_full(bcontainer->iova_ranges, g_free);
+}
#include "trace.h"
#include "qapi/error.h"
#include "migration/migration.h"
+#include "pci.h"
VFIOGroupList vfio_group_list =
QLIST_HEAD_INITIALIZER(vfio_group_list);
}
}
-static int vfio_dma_unmap_bitmap(VFIOContainer *container,
+static int vfio_dma_unmap_bitmap(const VFIOContainer *container,
hwaddr iova, ram_addr_t size,
IOMMUTLBEntry *iotlb)
{
+ const VFIOContainerBase *bcontainer = &container->bcontainer;
struct vfio_iommu_type1_dma_unmap *unmap;
struct vfio_bitmap *bitmap;
VFIOBitmap vbmap;
bitmap->size = vbmap.size;
bitmap->data = (__u64 *)vbmap.bitmap;
- if (vbmap.size > container->max_dirty_bitmap_size) {
+ if (vbmap.size > bcontainer->max_dirty_bitmap_size) {
error_report("UNMAP: Size of bitmap too big 0x%"PRIx64, vbmap.size);
ret = -E2BIG;
goto unmap_exit;
/*
* DMA - Mapping and unmapping for the "type1" IOMMU interface used on x86
*/
-int vfio_dma_unmap(VFIOContainer *container, hwaddr iova,
- ram_addr_t size, IOMMUTLBEntry *iotlb)
+static int vfio_legacy_dma_unmap(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
{
+ const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
struct vfio_iommu_type1_dma_unmap unmap = {
.argsz = sizeof(unmap),
.flags = 0,
bool need_dirty_sync = false;
int ret;
- if (iotlb && vfio_devices_all_running_and_mig_active(container)) {
- if (!vfio_devices_all_device_dirty_tracking(container) &&
- container->dirty_pages_supported) {
+ if (iotlb && vfio_devices_all_running_and_mig_active(bcontainer)) {
+ if (!vfio_devices_all_device_dirty_tracking(bcontainer) &&
+ bcontainer->dirty_pages_supported) {
return vfio_dma_unmap_bitmap(container, iova, size, iotlb);
}
*/
if (errno == EINVAL && unmap.size && !(unmap.iova + unmap.size) &&
container->iommu_type == VFIO_TYPE1v2_IOMMU) {
- trace_vfio_dma_unmap_overflow_workaround();
- unmap.size -= 1ULL << ctz64(container->pgsizes);
+ trace_vfio_legacy_dma_unmap_overflow_workaround();
+ unmap.size -= 1ULL << ctz64(bcontainer->pgsizes);
continue;
}
error_report("VFIO_UNMAP_DMA failed: %s", strerror(errno));
}
if (need_dirty_sync) {
- ret = vfio_get_dirty_bitmap(container, iova, size,
+ ret = vfio_get_dirty_bitmap(bcontainer, iova, size,
iotlb->translated_addr);
if (ret) {
return ret;
return 0;
}
-int vfio_dma_map(VFIOContainer *container, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly)
+static int vfio_legacy_dma_map(const VFIOContainerBase *bcontainer, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly)
{
+ const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
struct vfio_iommu_type1_dma_map map = {
.argsz = sizeof(map),
.flags = VFIO_DMA_MAP_FLAG_READ,
* the VGA ROM space.
*/
if (ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0 ||
- (errno == EBUSY && vfio_dma_unmap(container, iova, size, NULL) == 0 &&
+ (errno == EBUSY &&
+ vfio_legacy_dma_unmap(bcontainer, iova, size, NULL) == 0 &&
ioctl(container->fd, VFIO_IOMMU_MAP_DMA, &map) == 0)) {
return 0;
}
return -errno;
}
-int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start)
+static int
+vfio_legacy_set_dirty_page_tracking(const VFIOContainerBase *bcontainer,
+ bool start)
{
+ const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
int ret;
struct vfio_iommu_type1_dirty_bitmap dirty = {
.argsz = sizeof(dirty),
};
- if (!container->dirty_pages_supported) {
- return 0;
- }
-
if (start) {
dirty.flags = VFIO_IOMMU_DIRTY_PAGES_FLAG_START;
} else {
return ret;
}
-int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
- hwaddr iova, hwaddr size)
+static int vfio_legacy_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size)
{
+ const VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
struct vfio_iommu_type1_dirty_bitmap *dbitmap;
struct vfio_iommu_type1_dirty_bitmap_get *range;
int ret;
}
static bool vfio_get_info_iova_range(struct vfio_iommu_type1_info *info,
- VFIOContainer *container)
+ VFIOContainerBase *bcontainer)
{
struct vfio_info_cap_header *hdr;
struct vfio_iommu_type1_info_cap_iova_range *cap;
range_set_bounds(range, cap->iova_ranges[i].start,
cap->iova_ranges[i].end);
- container->iova_ranges =
- range_list_insert(container->iova_ranges, range);
+ bcontainer->iova_ranges =
+ range_list_insert(bcontainer->iova_ranges, range);
}
return true;
{
struct vfio_info_cap_header *hdr;
struct vfio_iommu_type1_info_cap_migration *cap_mig;
+ VFIOContainerBase *bcontainer = &container->bcontainer;
hdr = vfio_get_iommu_info_cap(info, VFIO_IOMMU_TYPE1_INFO_CAP_MIGRATION);
if (!hdr) {
* qemu_real_host_page_size to mark those dirty.
*/
if (cap_mig->pgsize_bitmap & qemu_real_host_page_size()) {
- container->dirty_pages_supported = true;
- container->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
- container->dirty_pgsizes = cap_mig->pgsize_bitmap;
+ bcontainer->dirty_pages_supported = true;
+ bcontainer->max_dirty_bitmap_size = cap_mig->max_dirty_bitmap_size;
+ bcontainer->dirty_pgsizes = cap_mig->pgsize_bitmap;
}
}
-static void vfio_free_container(VFIOContainer *container)
-{
- g_list_free_full(container->iova_ranges, g_free);
- g_free(container);
-}
-
static int vfio_connect_container(VFIOGroup *group, AddressSpace *as,
Error **errp)
{
VFIOContainer *container;
+ VFIOContainerBase *bcontainer;
int ret, fd;
VFIOAddressSpace *space;
* details once we know which type of IOMMU we are using.
*/
- QLIST_FOREACH(container, &space->containers, next) {
+ QLIST_FOREACH(bcontainer, &space->containers, next) {
+ container = container_of(bcontainer, VFIOContainer, bcontainer);
if (!ioctl(group->fd, VFIO_GROUP_SET_CONTAINER, &container->fd)) {
ret = vfio_ram_block_discard_disable(container, true);
if (ret) {
}
container = g_malloc0(sizeof(*container));
- container->space = space;
container->fd = fd;
- container->error = NULL;
- container->dirty_pages_supported = false;
- container->dma_max_mappings = 0;
- container->iova_ranges = NULL;
- QLIST_INIT(&container->giommu_list);
- QLIST_INIT(&container->vrdl_list);
+ bcontainer = &container->bcontainer;
+ vfio_container_init(bcontainer, space, &vfio_legacy_ops);
ret = vfio_init_container(container, group->fd, errp);
if (ret) {
}
if (info->flags & VFIO_IOMMU_INFO_PGSIZES) {
- container->pgsizes = info->iova_pgsizes;
+ bcontainer->pgsizes = info->iova_pgsizes;
} else {
- container->pgsizes = qemu_real_host_page_size();
+ bcontainer->pgsizes = qemu_real_host_page_size();
}
- if (!vfio_get_info_dma_avail(info, &container->dma_max_mappings)) {
- container->dma_max_mappings = 65535;
+ if (!vfio_get_info_dma_avail(info, &bcontainer->dma_max_mappings)) {
+ bcontainer->dma_max_mappings = 65535;
}
- vfio_get_info_iova_range(info, container);
+ vfio_get_info_iova_range(info, bcontainer);
vfio_get_iommu_info_migration(container, info);
g_free(info);
vfio_kvm_device_add_group(group);
QLIST_INIT(&container->group_list);
- QLIST_INSERT_HEAD(&space->containers, container, next);
+ QLIST_INSERT_HEAD(&space->containers, bcontainer, next);
group->container = container;
QLIST_INSERT_HEAD(&container->group_list, group, container_next);
- container->listener = vfio_memory_listener;
+ bcontainer->listener = vfio_memory_listener;
+ memory_listener_register(&bcontainer->listener, bcontainer->space->as);
- memory_listener_register(&container->listener, container->space->as);
-
- if (container->error) {
+ if (bcontainer->error) {
ret = -1;
- error_propagate_prepend(errp, container->error,
+ error_propagate_prepend(errp, bcontainer->error,
"memory listener initialization failed: ");
goto listener_release_exit;
}
- container->initialized = true;
+ bcontainer->initialized = true;
return 0;
listener_release_exit:
QLIST_REMOVE(group, container_next);
- QLIST_REMOVE(container, next);
+ QLIST_REMOVE(bcontainer, next);
vfio_kvm_device_del_group(group);
- memory_listener_unregister(&container->listener);
+ memory_listener_unregister(&bcontainer->listener);
if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU ||
container->iommu_type == VFIO_SPAPR_TCE_IOMMU) {
vfio_spapr_container_deinit(container);
vfio_ram_block_discard_disable(container, false);
free_container_exit:
- vfio_free_container(container);
+ g_free(container);
close_fd_exit:
close(fd);
static void vfio_disconnect_container(VFIOGroup *group)
{
VFIOContainer *container = group->container;
+ VFIOContainerBase *bcontainer = &container->bcontainer;
QLIST_REMOVE(group, container_next);
group->container = NULL;
* group.
*/
if (QLIST_EMPTY(&container->group_list)) {
- memory_listener_unregister(&container->listener);
+ memory_listener_unregister(&bcontainer->listener);
if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU ||
container->iommu_type == VFIO_SPAPR_TCE_IOMMU) {
vfio_spapr_container_deinit(container);
}
if (QLIST_EMPTY(&container->group_list)) {
- VFIOAddressSpace *space = container->space;
- VFIOGuestIOMMU *giommu, *tmp;
+ VFIOAddressSpace *space = bcontainer->space;
- QLIST_REMOVE(container, next);
-
- QLIST_FOREACH_SAFE(giommu, &container->giommu_list, giommu_next, tmp) {
- memory_region_unregister_iommu_notifier(
- MEMORY_REGION(giommu->iommu_mr), &giommu->n);
- QLIST_REMOVE(giommu, giommu_next);
- g_free(giommu);
- }
+ vfio_container_destroy(bcontainer);
trace_vfio_disconnect_container(container->fd);
close(container->fd);
- vfio_free_container(container);
+ g_free(container);
vfio_put_address_space(space);
}
QLIST_FOREACH(group, &vfio_group_list, next) {
if (group->groupid == groupid) {
/* Found it. Now is it already in the right context? */
- if (group->container->space->as == as) {
+ if (group->container->bcontainer.space->as == as) {
return group;
} else {
error_setg(errp, "group %d used in multiple address spaces",
* @name and @vbasedev->name are likely to be different depending
* on the type of the device, hence the need for passing @name
*/
-int vfio_attach_device(char *name, VFIODevice *vbasedev,
- AddressSpace *as, Error **errp)
+static int vfio_legacy_attach_device(const char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp)
{
int groupid = vfio_device_groupid(vbasedev, errp);
VFIODevice *vbasedev_iter;
VFIOGroup *group;
- VFIOContainer *container;
+ VFIOContainerBase *bcontainer;
int ret;
if (groupid < 0) {
return ret;
}
- container = group->container;
- vbasedev->container = container;
- QLIST_INSERT_HEAD(&container->device_list, vbasedev, container_next);
+ bcontainer = &group->container->bcontainer;
+ vbasedev->bcontainer = bcontainer;
+ QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
return ret;
}
-void vfio_detach_device(VFIODevice *vbasedev)
+static void vfio_legacy_detach_device(VFIODevice *vbasedev)
{
VFIOGroup *group = vbasedev->group;
- if (!vbasedev->container) {
- return;
- }
-
QLIST_REMOVE(vbasedev, global_next);
QLIST_REMOVE(vbasedev, container_next);
- vbasedev->container = NULL;
+ vbasedev->bcontainer = NULL;
trace_vfio_detach_device(vbasedev->name, group->groupid);
vfio_put_base_device(vbasedev);
vfio_put_group(group);
}
+
+static int vfio_legacy_pci_hot_reset(VFIODevice *vbasedev, bool single)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ VFIOGroup *group;
+ struct vfio_pci_hot_reset_info *info = NULL;
+ struct vfio_pci_dependent_device *devices;
+ struct vfio_pci_hot_reset *reset;
+ int32_t *fds;
+ int ret, i, count;
+ bool multi = false;
+
+ trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
+
+ if (!single) {
+ vfio_pci_pre_reset(vdev);
+ }
+ vdev->vbasedev.needs_reset = false;
+
+ ret = vfio_pci_get_pci_hot_reset_info(vdev, &info);
+
+ if (ret) {
+ goto out_single;
+ }
+ devices = &info->devices[0];
+
+ trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
+
+ /* Verify that we have all the groups required */
+ for (i = 0; i < info->count; i++) {
+ PCIHostDeviceAddress host;
+ VFIOPCIDevice *tmp;
+ VFIODevice *vbasedev_iter;
+
+ host.domain = devices[i].segment;
+ host.bus = devices[i].bus;
+ host.slot = PCI_SLOT(devices[i].devfn);
+ host.function = PCI_FUNC(devices[i].devfn);
+
+ trace_vfio_pci_hot_reset_dep_devices(host.domain,
+ host.bus, host.slot, host.function, devices[i].group_id);
+
+ if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
+ continue;
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ if (group->groupid == devices[i].group_id) {
+ break;
+ }
+ }
+
+ if (!group) {
+ if (!vdev->has_pm_reset) {
+ error_report("vfio: Cannot reset device %s, "
+ "depends on group %d which is not owned.",
+ vdev->vbasedev.name, devices[i].group_id);
+ }
+ ret = -EPERM;
+ goto out;
+ }
+
+ /* Prep dependent devices for reset and clear our marker. */
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
+ if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
+ if (single) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+ vfio_pci_pre_reset(tmp);
+ tmp->vbasedev.needs_reset = false;
+ multi = true;
+ break;
+ }
+ }
+ }
+
+ if (!single && !multi) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+
+ /* Determine how many group fds need to be passed */
+ count = 0;
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ for (i = 0; i < info->count; i++) {
+ if (group->groupid == devices[i].group_id) {
+ count++;
+ break;
+ }
+ }
+ }
+
+ reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
+ reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
+ fds = &reset->group_fds[0];
+
+ /* Fill in group fds */
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ for (i = 0; i < info->count; i++) {
+ if (group->groupid == devices[i].group_id) {
+ fds[reset->count++] = group->fd;
+ break;
+ }
+ }
+ }
+
+ /* Bus reset! */
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
+ g_free(reset);
+ if (ret) {
+ ret = -errno;
+ }
+
+ trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
+ ret ? strerror(errno) : "Success");
+
+out:
+ /* Re-enable INTx on affected devices */
+ for (i = 0; i < info->count; i++) {
+ PCIHostDeviceAddress host;
+ VFIOPCIDevice *tmp;
+ VFIODevice *vbasedev_iter;
+
+ host.domain = devices[i].segment;
+ host.bus = devices[i].bus;
+ host.slot = PCI_SLOT(devices[i].devfn);
+ host.function = PCI_FUNC(devices[i].devfn);
+
+ if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
+ continue;
+ }
+
+ QLIST_FOREACH(group, &vfio_group_list, next) {
+ if (group->groupid == devices[i].group_id) {
+ break;
+ }
+ }
+
+ if (!group) {
+ break;
+ }
+
+ QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
+ if (!vbasedev_iter->dev->realized ||
+ vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
+ continue;
+ }
+ tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
+ if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
+ vfio_pci_post_reset(tmp);
+ break;
+ }
+ }
+ }
+out_single:
+ if (!single) {
+ vfio_pci_post_reset(vdev);
+ }
+ g_free(info);
+
+ return ret;
+}
+
+const VFIOIOMMUOps vfio_legacy_ops = {
+ .dma_map = vfio_legacy_dma_map,
+ .dma_unmap = vfio_legacy_dma_unmap,
+ .attach_device = vfio_legacy_attach_device,
+ .detach_device = vfio_legacy_detach_device,
+ .set_dirty_page_tracking = vfio_legacy_set_dirty_page_tracking,
+ .query_dirty_bitmap = vfio_legacy_query_dirty_bitmap,
+ .pci_hot_reset = vfio_legacy_pci_hot_reset,
+};
#include "trace.h"
#include "qapi/error.h"
#include "qemu/error-report.h"
+#include "monitor/monitor.h"
/*
* Common VFIO interrupt disable
return ret;
}
+
+int vfio_device_get_name(VFIODevice *vbasedev, Error **errp)
+{
+ struct stat st;
+
+ if (vbasedev->fd < 0) {
+ if (stat(vbasedev->sysfsdev, &st) < 0) {
+ error_setg_errno(errp, errno, "no such host device");
+ error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
+ return -errno;
+ }
+ /* User may specify a name, e.g: VFIO platform device */
+ if (!vbasedev->name) {
+ vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
+ }
+ } else {
+ if (!vbasedev->iommufd) {
+ error_setg(errp, "Use FD passing only with iommufd backend");
+ return -EINVAL;
+ }
+ /*
+ * Give a name with fd so any function printing out vbasedev->name
+ * will not break.
+ */
+ if (!vbasedev->name) {
+ vbasedev->name = g_strdup_printf("VFIO_FD%d", vbasedev->fd);
+ }
+ }
+
+ return 0;
+}
+
+void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp)
+{
+ int fd = monitor_fd_param(monitor_cur(), str, errp);
+
+ if (fd < 0) {
+ error_prepend(errp, "Could not parse remote object fd %s:", str);
+ return;
+ }
+ vbasedev->fd = fd;
+}
+
+void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
+ DeviceState *dev, bool ram_discard)
+{
+ vbasedev->type = type;
+ vbasedev->ops = ops;
+ vbasedev->dev = dev;
+ vbasedev->fd = -1;
+
+ vbasedev->ram_block_discard_allowed = ram_discard;
+}
--- /dev/null
+/*
+ * iommufd container backend
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ * Eric Auger <eric.auger@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#include "qemu/osdep.h"
+#include <sys/ioctl.h>
+#include <linux/vfio.h>
+#include <linux/iommufd.h>
+
+#include "hw/vfio/vfio-common.h"
+#include "qemu/error-report.h"
+#include "trace.h"
+#include "qapi/error.h"
+#include "sysemu/iommufd.h"
+#include "hw/qdev-core.h"
+#include "sysemu/reset.h"
+#include "qemu/cutils.h"
+#include "qemu/chardev_open.h"
+#include "pci.h"
+
+static int iommufd_cdev_map(const VFIOContainerBase *bcontainer, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly)
+{
+ const VFIOIOMMUFDContainer *container =
+ container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
+
+ return iommufd_backend_map_dma(container->be,
+ container->ioas_id,
+ iova, size, vaddr, readonly);
+}
+
+static int iommufd_cdev_unmap(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb)
+{
+ const VFIOIOMMUFDContainer *container =
+ container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
+
+ /* TODO: Handle dma_unmap_bitmap with iotlb args (migration) */
+ return iommufd_backend_unmap_dma(container->be,
+ container->ioas_id, iova, size);
+}
+
+static int iommufd_cdev_kvm_device_add(VFIODevice *vbasedev, Error **errp)
+{
+ return vfio_kvm_device_add_fd(vbasedev->fd, errp);
+}
+
+static void iommufd_cdev_kvm_device_del(VFIODevice *vbasedev)
+{
+ Error *err = NULL;
+
+ if (vfio_kvm_device_del_fd(vbasedev->fd, &err)) {
+ error_report_err(err);
+ }
+}
+
+static int iommufd_cdev_connect_and_bind(VFIODevice *vbasedev, Error **errp)
+{
+ IOMMUFDBackend *iommufd = vbasedev->iommufd;
+ struct vfio_device_bind_iommufd bind = {
+ .argsz = sizeof(bind),
+ .flags = 0,
+ };
+ int ret;
+
+ ret = iommufd_backend_connect(iommufd, errp);
+ if (ret) {
+ return ret;
+ }
+
+ /*
+ * Add device to kvm-vfio to be prepared for the tracking
+ * in KVM. Especially for some emulated devices, it requires
+ * to have kvm information in the device open.
+ */
+ ret = iommufd_cdev_kvm_device_add(vbasedev, errp);
+ if (ret) {
+ goto err_kvm_device_add;
+ }
+
+ /* Bind device to iommufd */
+ bind.iommufd = iommufd->fd;
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_BIND_IOMMUFD, &bind);
+ if (ret) {
+ error_setg_errno(errp, errno, "error bind device fd=%d to iommufd=%d",
+ vbasedev->fd, bind.iommufd);
+ goto err_bind;
+ }
+
+ vbasedev->devid = bind.out_devid;
+ trace_iommufd_cdev_connect_and_bind(bind.iommufd, vbasedev->name,
+ vbasedev->fd, vbasedev->devid);
+ return ret;
+err_bind:
+ iommufd_cdev_kvm_device_del(vbasedev);
+err_kvm_device_add:
+ iommufd_backend_disconnect(iommufd);
+ return ret;
+}
+
+static void iommufd_cdev_unbind_and_disconnect(VFIODevice *vbasedev)
+{
+ /* Unbind is automatically conducted when device fd is closed */
+ iommufd_cdev_kvm_device_del(vbasedev);
+ iommufd_backend_disconnect(vbasedev->iommufd);
+}
+
+static int iommufd_cdev_getfd(const char *sysfs_path, Error **errp)
+{
+ long int ret = -ENOTTY;
+ char *path, *vfio_dev_path = NULL, *vfio_path = NULL;
+ DIR *dir = NULL;
+ struct dirent *dent;
+ gchar *contents;
+ struct stat st;
+ gsize length;
+ int major, minor;
+ dev_t vfio_devt;
+
+ path = g_strdup_printf("%s/vfio-dev", sysfs_path);
+ if (stat(path, &st) < 0) {
+ error_setg_errno(errp, errno, "no such host device");
+ goto out_free_path;
+ }
+
+ dir = opendir(path);
+ if (!dir) {
+ error_setg_errno(errp, errno, "couldn't open directory %s", path);
+ goto out_free_path;
+ }
+
+ while ((dent = readdir(dir))) {
+ if (!strncmp(dent->d_name, "vfio", 4)) {
+ vfio_dev_path = g_strdup_printf("%s/%s/dev", path, dent->d_name);
+ break;
+ }
+ }
+
+ if (!vfio_dev_path) {
+ error_setg(errp, "failed to find vfio-dev/vfioX/dev");
+ goto out_close_dir;
+ }
+
+ if (!g_file_get_contents(vfio_dev_path, &contents, &length, NULL)) {
+ error_setg(errp, "failed to load \"%s\"", vfio_dev_path);
+ goto out_free_dev_path;
+ }
+
+ if (sscanf(contents, "%d:%d", &major, &minor) != 2) {
+ error_setg(errp, "failed to get major:minor for \"%s\"", vfio_dev_path);
+ goto out_free_dev_path;
+ }
+ g_free(contents);
+ vfio_devt = makedev(major, minor);
+
+ vfio_path = g_strdup_printf("/dev/vfio/devices/%s", dent->d_name);
+ ret = open_cdev(vfio_path, vfio_devt);
+ if (ret < 0) {
+ error_setg(errp, "Failed to open %s", vfio_path);
+ }
+
+ trace_iommufd_cdev_getfd(vfio_path, ret);
+ g_free(vfio_path);
+
+out_free_dev_path:
+ g_free(vfio_dev_path);
+out_close_dir:
+ closedir(dir);
+out_free_path:
+ if (*errp) {
+ error_prepend(errp, VFIO_MSG_PREFIX, path);
+ }
+ g_free(path);
+
+ return ret;
+}
+
+static int iommufd_cdev_attach_ioas_hwpt(VFIODevice *vbasedev, uint32_t id,
+ Error **errp)
+{
+ int ret, iommufd = vbasedev->iommufd->fd;
+ struct vfio_device_attach_iommufd_pt attach_data = {
+ .argsz = sizeof(attach_data),
+ .flags = 0,
+ .pt_id = id,
+ };
+
+ /* Attach device to an IOAS or hwpt within iommufd */
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_ATTACH_IOMMUFD_PT, &attach_data);
+ if (ret) {
+ error_setg_errno(errp, errno,
+ "[iommufd=%d] error attach %s (%d) to id=%d",
+ iommufd, vbasedev->name, vbasedev->fd, id);
+ } else {
+ trace_iommufd_cdev_attach_ioas_hwpt(iommufd, vbasedev->name,
+ vbasedev->fd, id);
+ }
+ return ret;
+}
+
+static int iommufd_cdev_detach_ioas_hwpt(VFIODevice *vbasedev, Error **errp)
+{
+ int ret, iommufd = vbasedev->iommufd->fd;
+ struct vfio_device_detach_iommufd_pt detach_data = {
+ .argsz = sizeof(detach_data),
+ .flags = 0,
+ };
+
+ ret = ioctl(vbasedev->fd, VFIO_DEVICE_DETACH_IOMMUFD_PT, &detach_data);
+ if (ret) {
+ error_setg_errno(errp, errno, "detach %s failed", vbasedev->name);
+ } else {
+ trace_iommufd_cdev_detach_ioas_hwpt(iommufd, vbasedev->name);
+ }
+ return ret;
+}
+
+static int iommufd_cdev_attach_container(VFIODevice *vbasedev,
+ VFIOIOMMUFDContainer *container,
+ Error **errp)
+{
+ return iommufd_cdev_attach_ioas_hwpt(vbasedev, container->ioas_id, errp);
+}
+
+static void iommufd_cdev_detach_container(VFIODevice *vbasedev,
+ VFIOIOMMUFDContainer *container)
+{
+ Error *err = NULL;
+
+ if (iommufd_cdev_detach_ioas_hwpt(vbasedev, &err)) {
+ error_report_err(err);
+ }
+}
+
+static void iommufd_cdev_container_destroy(VFIOIOMMUFDContainer *container)
+{
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+
+ if (!QLIST_EMPTY(&bcontainer->device_list)) {
+ return;
+ }
+ memory_listener_unregister(&bcontainer->listener);
+ vfio_container_destroy(bcontainer);
+ iommufd_backend_free_id(container->be, container->ioas_id);
+ g_free(container);
+}
+
+static int iommufd_cdev_ram_block_discard_disable(bool state)
+{
+ /*
+ * We support coordinated discarding of RAM via the RamDiscardManager.
+ */
+ return ram_block_uncoordinated_discard_disable(state);
+}
+
+static int iommufd_cdev_get_info_iova_range(VFIOIOMMUFDContainer *container,
+ uint32_t ioas_id, Error **errp)
+{
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+ struct iommu_ioas_iova_ranges *info;
+ struct iommu_iova_range *iova_ranges;
+ int ret, sz, fd = container->be->fd;
+
+ info = g_malloc0(sizeof(*info));
+ info->size = sizeof(*info);
+ info->ioas_id = ioas_id;
+
+ ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
+ if (ret && errno != EMSGSIZE) {
+ goto error;
+ }
+
+ sz = info->num_iovas * sizeof(struct iommu_iova_range);
+ info = g_realloc(info, sizeof(*info) + sz);
+ info->allowed_iovas = (uintptr_t)(info + 1);
+
+ ret = ioctl(fd, IOMMU_IOAS_IOVA_RANGES, info);
+ if (ret) {
+ goto error;
+ }
+
+ iova_ranges = (struct iommu_iova_range *)(uintptr_t)info->allowed_iovas;
+
+ for (int i = 0; i < info->num_iovas; i++) {
+ Range *range = g_new(Range, 1);
+
+ range_set_bounds(range, iova_ranges[i].start, iova_ranges[i].last);
+ bcontainer->iova_ranges =
+ range_list_insert(bcontainer->iova_ranges, range);
+ }
+ bcontainer->pgsizes = info->out_iova_alignment;
+
+ g_free(info);
+ return 0;
+
+error:
+ ret = -errno;
+ g_free(info);
+ error_setg_errno(errp, errno, "Cannot get IOVA ranges");
+ return ret;
+}
+
+static int iommufd_cdev_attach(const char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp)
+{
+ VFIOContainerBase *bcontainer;
+ VFIOIOMMUFDContainer *container;
+ VFIOAddressSpace *space;
+ struct vfio_device_info dev_info = { .argsz = sizeof(dev_info) };
+ int ret, devfd;
+ uint32_t ioas_id;
+ Error *err = NULL;
+
+ if (vbasedev->fd < 0) {
+ devfd = iommufd_cdev_getfd(vbasedev->sysfsdev, errp);
+ if (devfd < 0) {
+ return devfd;
+ }
+ vbasedev->fd = devfd;
+ } else {
+ devfd = vbasedev->fd;
+ }
+
+ ret = iommufd_cdev_connect_and_bind(vbasedev, errp);
+ if (ret) {
+ goto err_connect_bind;
+ }
+
+ space = vfio_get_address_space(as);
+
+ /* try to attach to an existing container in this space */
+ QLIST_FOREACH(bcontainer, &space->containers, next) {
+ container = container_of(bcontainer, VFIOIOMMUFDContainer, bcontainer);
+ if (bcontainer->ops != &vfio_iommufd_ops ||
+ vbasedev->iommufd != container->be) {
+ continue;
+ }
+ if (iommufd_cdev_attach_container(vbasedev, container, &err)) {
+ const char *msg = error_get_pretty(err);
+
+ trace_iommufd_cdev_fail_attach_existing_container(msg);
+ error_free(err);
+ err = NULL;
+ } else {
+ ret = iommufd_cdev_ram_block_discard_disable(true);
+ if (ret) {
+ error_setg(errp,
+ "Cannot set discarding of RAM broken (%d)", ret);
+ goto err_discard_disable;
+ }
+ goto found_container;
+ }
+ }
+
+ /* Need to allocate a new dedicated container */
+ ret = iommufd_backend_alloc_ioas(vbasedev->iommufd, &ioas_id, errp);
+ if (ret < 0) {
+ goto err_alloc_ioas;
+ }
+
+ trace_iommufd_cdev_alloc_ioas(vbasedev->iommufd->fd, ioas_id);
+
+ container = g_malloc0(sizeof(*container));
+ container->be = vbasedev->iommufd;
+ container->ioas_id = ioas_id;
+
+ bcontainer = &container->bcontainer;
+ vfio_container_init(bcontainer, space, &vfio_iommufd_ops);
+ QLIST_INSERT_HEAD(&space->containers, bcontainer, next);
+
+ ret = iommufd_cdev_attach_container(vbasedev, container, errp);
+ if (ret) {
+ goto err_attach_container;
+ }
+
+ ret = iommufd_cdev_ram_block_discard_disable(true);
+ if (ret) {
+ goto err_discard_disable;
+ }
+
+ ret = iommufd_cdev_get_info_iova_range(container, ioas_id, &err);
+ if (ret) {
+ error_append_hint(&err,
+ "Fallback to default 64bit IOVA range and 4K page size\n");
+ warn_report_err(err);
+ err = NULL;
+ bcontainer->pgsizes = qemu_real_host_page_size();
+ }
+
+ bcontainer->listener = vfio_memory_listener;
+ memory_listener_register(&bcontainer->listener, bcontainer->space->as);
+
+ if (bcontainer->error) {
+ ret = -1;
+ error_propagate_prepend(errp, bcontainer->error,
+ "memory listener initialization failed: ");
+ goto err_listener_register;
+ }
+
+ bcontainer->initialized = true;
+
+found_container:
+ ret = ioctl(devfd, VFIO_DEVICE_GET_INFO, &dev_info);
+ if (ret) {
+ error_setg_errno(errp, errno, "error getting device info");
+ goto err_listener_register;
+ }
+
+ /*
+ * TODO: examine RAM_BLOCK_DISCARD stuff, should we do group level
+ * for discarding incompatibility check as well?
+ */
+ if (vbasedev->ram_block_discard_allowed) {
+ iommufd_cdev_ram_block_discard_disable(false);
+ }
+
+ vbasedev->group = 0;
+ vbasedev->num_irqs = dev_info.num_irqs;
+ vbasedev->num_regions = dev_info.num_regions;
+ vbasedev->flags = dev_info.flags;
+ vbasedev->reset_works = !!(dev_info.flags & VFIO_DEVICE_FLAGS_RESET);
+ vbasedev->bcontainer = bcontainer;
+ QLIST_INSERT_HEAD(&bcontainer->device_list, vbasedev, container_next);
+ QLIST_INSERT_HEAD(&vfio_device_list, vbasedev, global_next);
+
+ trace_iommufd_cdev_device_info(vbasedev->name, devfd, vbasedev->num_irqs,
+ vbasedev->num_regions, vbasedev->flags);
+ return 0;
+
+err_listener_register:
+ iommufd_cdev_ram_block_discard_disable(false);
+err_discard_disable:
+ iommufd_cdev_detach_container(vbasedev, container);
+err_attach_container:
+ iommufd_cdev_container_destroy(container);
+err_alloc_ioas:
+ vfio_put_address_space(space);
+ iommufd_cdev_unbind_and_disconnect(vbasedev);
+err_connect_bind:
+ close(vbasedev->fd);
+ return ret;
+}
+
+static void iommufd_cdev_detach(VFIODevice *vbasedev)
+{
+ VFIOContainerBase *bcontainer = vbasedev->bcontainer;
+ VFIOAddressSpace *space = bcontainer->space;
+ VFIOIOMMUFDContainer *container = container_of(bcontainer,
+ VFIOIOMMUFDContainer,
+ bcontainer);
+ QLIST_REMOVE(vbasedev, global_next);
+ QLIST_REMOVE(vbasedev, container_next);
+ vbasedev->bcontainer = NULL;
+
+ if (!vbasedev->ram_block_discard_allowed) {
+ iommufd_cdev_ram_block_discard_disable(false);
+ }
+
+ iommufd_cdev_detach_container(vbasedev, container);
+ iommufd_cdev_container_destroy(container);
+ vfio_put_address_space(space);
+
+ iommufd_cdev_unbind_and_disconnect(vbasedev);
+ close(vbasedev->fd);
+}
+
+static VFIODevice *iommufd_cdev_pci_find_by_devid(__u32 devid)
+{
+ VFIODevice *vbasedev_iter;
+
+ QLIST_FOREACH(vbasedev_iter, &vfio_device_list, global_next) {
+ if (vbasedev_iter->bcontainer->ops != &vfio_iommufd_ops) {
+ continue;
+ }
+ if (devid == vbasedev_iter->devid) {
+ return vbasedev_iter;
+ }
+ }
+ return NULL;
+}
+
+static VFIOPCIDevice *
+iommufd_cdev_dep_get_realized_vpdev(struct vfio_pci_dependent_device *dep_dev,
+ VFIODevice *reset_dev)
+{
+ VFIODevice *vbasedev_tmp;
+
+ if (dep_dev->devid == reset_dev->devid ||
+ dep_dev->devid == VFIO_PCI_DEVID_OWNED) {
+ return NULL;
+ }
+
+ vbasedev_tmp = iommufd_cdev_pci_find_by_devid(dep_dev->devid);
+ if (!vbasedev_tmp || !vbasedev_tmp->dev->realized ||
+ vbasedev_tmp->type != VFIO_DEVICE_TYPE_PCI) {
+ return NULL;
+ }
+
+ return container_of(vbasedev_tmp, VFIOPCIDevice, vbasedev);
+}
+
+static int iommufd_cdev_pci_hot_reset(VFIODevice *vbasedev, bool single)
+{
+ VFIOPCIDevice *vdev = container_of(vbasedev, VFIOPCIDevice, vbasedev);
+ struct vfio_pci_hot_reset_info *info = NULL;
+ struct vfio_pci_dependent_device *devices;
+ struct vfio_pci_hot_reset *reset;
+ int ret, i;
+ bool multi = false;
+
+ trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
+
+ if (!single) {
+ vfio_pci_pre_reset(vdev);
+ }
+ vdev->vbasedev.needs_reset = false;
+
+ ret = vfio_pci_get_pci_hot_reset_info(vdev, &info);
+
+ if (ret) {
+ goto out_single;
+ }
+
+ assert(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID);
+
+ devices = &info->devices[0];
+
+ if (!(info->flags & VFIO_PCI_HOT_RESET_FLAG_DEV_ID_OWNED)) {
+ if (!vdev->has_pm_reset) {
+ for (i = 0; i < info->count; i++) {
+ if (devices[i].devid == VFIO_PCI_DEVID_NOT_OWNED) {
+ error_report("vfio: Cannot reset device %s, "
+ "depends on device %04x:%02x:%02x.%x "
+ "which is not owned.",
+ vdev->vbasedev.name, devices[i].segment,
+ devices[i].bus, PCI_SLOT(devices[i].devfn),
+ PCI_FUNC(devices[i].devfn));
+ }
+ }
+ }
+ ret = -EPERM;
+ goto out_single;
+ }
+
+ trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
+
+ for (i = 0; i < info->count; i++) {
+ VFIOPCIDevice *tmp;
+
+ trace_iommufd_cdev_pci_hot_reset_dep_devices(devices[i].segment,
+ devices[i].bus,
+ PCI_SLOT(devices[i].devfn),
+ PCI_FUNC(devices[i].devfn),
+ devices[i].devid);
+
+ /*
+ * If a VFIO cdev device is resettable, all the dependent devices
+ * are either bound to same iommufd or within same iommu_groups as
+ * one of the iommufd bound devices.
+ */
+ assert(devices[i].devid != VFIO_PCI_DEVID_NOT_OWNED);
+
+ tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev);
+ if (!tmp) {
+ continue;
+ }
+
+ if (single) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+ vfio_pci_pre_reset(tmp);
+ tmp->vbasedev.needs_reset = false;
+ multi = true;
+ }
+
+ if (!single && !multi) {
+ ret = -EINVAL;
+ goto out_single;
+ }
+
+ /* Use zero length array for hot reset with iommufd backend */
+ reset = g_malloc0(sizeof(*reset));
+ reset->argsz = sizeof(*reset);
+
+ /* Bus reset! */
+ ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
+ g_free(reset);
+ if (ret) {
+ ret = -errno;
+ }
+
+ trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
+ ret ? strerror(errno) : "Success");
+
+ /* Re-enable INTx on affected devices */
+ for (i = 0; i < info->count; i++) {
+ VFIOPCIDevice *tmp;
+
+ tmp = iommufd_cdev_dep_get_realized_vpdev(&devices[i], &vdev->vbasedev);
+ if (!tmp) {
+ continue;
+ }
+ vfio_pci_post_reset(tmp);
+ }
+out_single:
+ if (!single) {
+ vfio_pci_post_reset(vdev);
+ }
+ g_free(info);
+
+ return ret;
+}
+
+const VFIOIOMMUOps vfio_iommufd_ops = {
+ .dma_map = iommufd_cdev_map,
+ .dma_unmap = iommufd_cdev_unmap,
+ .attach_device = iommufd_cdev_attach,
+ .detach_device = iommufd_cdev_detach,
+ .pci_hot_reset = iommufd_cdev_pci_hot_reset,
+};
vfio_ss.add(files(
'helpers.c',
'common.c',
+ 'container-base.c',
'container.c',
'spapr.c',
'migration.c',
))
+vfio_ss.add(when: 'CONFIG_IOMMUFD', if_true: files(
+ 'iommufd.c',
+))
vfio_ss.add(when: 'CONFIG_VFIO_PCI', if_true: files(
'display.c',
'pci-quirks.c',
*/
#include "qemu/osdep.h"
+#include CONFIG_DEVICES /* CONFIG_IOMMUFD */
#include <linux/vfio.h>
#include <sys/ioctl.h>
#include "qapi/error.h"
#include "migration/blocker.h"
#include "migration/qemu-file.h"
+#include "sysemu/iommufd.h"
#define TYPE_VFIO_PCI_NOHOTPLUG "vfio-pci-nohotplug"
return 0;
}
-static void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
+void vfio_pci_pre_reset(VFIOPCIDevice *vdev)
{
PCIDevice *pdev = &vdev->pdev;
uint16_t cmd;
vfio_pci_write_config(pdev, PCI_COMMAND, cmd, 2);
}
-static void vfio_pci_post_reset(VFIOPCIDevice *vdev)
+void vfio_pci_post_reset(VFIOPCIDevice *vdev)
{
Error *err = NULL;
int nr;
vfio_quirk_reset(vdev);
}
-static bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
+bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name)
{
char tmp[13];
return (strcmp(tmp, name) == 0);
}
-static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
+int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
+ struct vfio_pci_hot_reset_info **info_p)
{
- VFIOGroup *group;
struct vfio_pci_hot_reset_info *info;
- struct vfio_pci_dependent_device *devices;
- struct vfio_pci_hot_reset *reset;
- int32_t *fds;
- int ret, i, count;
- bool multi = false;
+ int ret, count;
- trace_vfio_pci_hot_reset(vdev->vbasedev.name, single ? "one" : "multi");
-
- if (!single) {
- vfio_pci_pre_reset(vdev);
- }
- vdev->vbasedev.needs_reset = false;
+ assert(info_p && !*info_p);
info = g_malloc0(sizeof(*info));
info->argsz = sizeof(*info);
ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
if (ret && errno != ENOSPC) {
ret = -errno;
+ g_free(info);
if (!vdev->has_pm_reset) {
error_report("vfio: Cannot reset device %s, "
"no available reset mechanism.", vdev->vbasedev.name);
}
- goto out_single;
+ return ret;
}
count = info->count;
- info = g_realloc(info, sizeof(*info) + (count * sizeof(*devices)));
- info->argsz = sizeof(*info) + (count * sizeof(*devices));
- devices = &info->devices[0];
+ info = g_realloc(info, sizeof(*info) + (count * sizeof(info->devices[0])));
+ info->argsz = sizeof(*info) + (count * sizeof(info->devices[0]));
ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_GET_PCI_HOT_RESET_INFO, info);
if (ret) {
ret = -errno;
+ g_free(info);
error_report("vfio: hot reset info failed: %m");
- goto out_single;
- }
-
- trace_vfio_pci_hot_reset_has_dep_devices(vdev->vbasedev.name);
-
- /* Verify that we have all the groups required */
- for (i = 0; i < info->count; i++) {
- PCIHostDeviceAddress host;
- VFIOPCIDevice *tmp;
- VFIODevice *vbasedev_iter;
-
- host.domain = devices[i].segment;
- host.bus = devices[i].bus;
- host.slot = PCI_SLOT(devices[i].devfn);
- host.function = PCI_FUNC(devices[i].devfn);
-
- trace_vfio_pci_hot_reset_dep_devices(host.domain,
- host.bus, host.slot, host.function, devices[i].group_id);
-
- if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
- continue;
- }
-
- QLIST_FOREACH(group, &vfio_group_list, next) {
- if (group->groupid == devices[i].group_id) {
- break;
- }
- }
-
- if (!group) {
- if (!vdev->has_pm_reset) {
- error_report("vfio: Cannot reset device %s, "
- "depends on group %d which is not owned.",
- vdev->vbasedev.name, devices[i].group_id);
- }
- ret = -EPERM;
- goto out;
- }
-
- /* Prep dependent devices for reset and clear our marker. */
- QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
- if (!vbasedev_iter->dev->realized ||
- vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
- continue;
- }
- tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
- if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
- if (single) {
- ret = -EINVAL;
- goto out_single;
- }
- vfio_pci_pre_reset(tmp);
- tmp->vbasedev.needs_reset = false;
- multi = true;
- break;
- }
- }
- }
-
- if (!single && !multi) {
- ret = -EINVAL;
- goto out_single;
- }
-
- /* Determine how many group fds need to be passed */
- count = 0;
- QLIST_FOREACH(group, &vfio_group_list, next) {
- for (i = 0; i < info->count; i++) {
- if (group->groupid == devices[i].group_id) {
- count++;
- break;
- }
- }
- }
-
- reset = g_malloc0(sizeof(*reset) + (count * sizeof(*fds)));
- reset->argsz = sizeof(*reset) + (count * sizeof(*fds));
- fds = &reset->group_fds[0];
-
- /* Fill in group fds */
- QLIST_FOREACH(group, &vfio_group_list, next) {
- for (i = 0; i < info->count; i++) {
- if (group->groupid == devices[i].group_id) {
- fds[reset->count++] = group->fd;
- break;
- }
- }
+ return ret;
}
- /* Bus reset! */
- ret = ioctl(vdev->vbasedev.fd, VFIO_DEVICE_PCI_HOT_RESET, reset);
- g_free(reset);
-
- trace_vfio_pci_hot_reset_result(vdev->vbasedev.name,
- ret ? strerror(errno) : "Success");
-
-out:
- /* Re-enable INTx on affected devices */
- for (i = 0; i < info->count; i++) {
- PCIHostDeviceAddress host;
- VFIOPCIDevice *tmp;
- VFIODevice *vbasedev_iter;
-
- host.domain = devices[i].segment;
- host.bus = devices[i].bus;
- host.slot = PCI_SLOT(devices[i].devfn);
- host.function = PCI_FUNC(devices[i].devfn);
-
- if (vfio_pci_host_match(&host, vdev->vbasedev.name)) {
- continue;
- }
-
- QLIST_FOREACH(group, &vfio_group_list, next) {
- if (group->groupid == devices[i].group_id) {
- break;
- }
- }
-
- if (!group) {
- break;
- }
+ *info_p = info;
+ return 0;
+}
- QLIST_FOREACH(vbasedev_iter, &group->device_list, next) {
- if (!vbasedev_iter->dev->realized ||
- vbasedev_iter->type != VFIO_DEVICE_TYPE_PCI) {
- continue;
- }
- tmp = container_of(vbasedev_iter, VFIOPCIDevice, vbasedev);
- if (vfio_pci_host_match(&host, tmp->vbasedev.name)) {
- vfio_pci_post_reset(tmp);
- break;
- }
- }
- }
-out_single:
- if (!single) {
- vfio_pci_post_reset(vdev);
- }
- g_free(info);
+static int vfio_pci_hot_reset(VFIOPCIDevice *vdev, bool single)
+{
+ VFIODevice *vbasedev = &vdev->vbasedev;
+ const VFIOIOMMUOps *ops = vbasedev->bcontainer->ops;
- return ret;
+ return ops->pci_hot_reset(vbasedev, single);
}
/*
VFIODevice *vbasedev = &vdev->vbasedev;
char *tmp, *subsys;
Error *err = NULL;
- struct stat st;
int i, ret;
bool is_mdev;
char uuid[UUID_STR_LEN];
char *name;
- if (!vbasedev->sysfsdev) {
+ if (vbasedev->fd < 0 && !vbasedev->sysfsdev) {
if (!(~vdev->host.domain || ~vdev->host.bus ||
~vdev->host.slot || ~vdev->host.function)) {
error_setg(errp, "No provided host device");
error_append_hint(errp, "Use -device vfio-pci,host=DDDD:BB:DD.F "
+#ifdef CONFIG_IOMMUFD
+ "or -device vfio-pci,fd=DEVICE_FD "
+#endif
"or -device vfio-pci,sysfsdev=PATH_TO_DEVICE\n");
return;
}
vdev->host.slot, vdev->host.function);
}
- if (stat(vbasedev->sysfsdev, &st) < 0) {
- error_setg_errno(errp, errno, "no such host device");
- error_prepend(errp, VFIO_MSG_PREFIX, vbasedev->sysfsdev);
+ if (vfio_device_get_name(vbasedev, errp) < 0) {
return;
}
- vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
- vbasedev->ops = &vfio_pci_ops;
- vbasedev->type = VFIO_DEVICE_TYPE_PCI;
- vbasedev->dev = DEVICE(vdev);
-
/*
* Mediated devices *might* operate compatibly with discarding of RAM, but
* we cannot know for certain, it depends on whether the mdev vendor driver
{
PCIDevice *pci_dev = PCI_DEVICE(obj);
VFIOPCIDevice *vdev = VFIO_PCI(obj);
+ VFIODevice *vbasedev = &vdev->vbasedev;
device_add_bootindex_property(obj, &vdev->bootindex,
"bootindex", NULL,
vdev->host.slot = ~0U;
vdev->host.function = ~0U;
+ vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PCI, &vfio_pci_ops,
+ DEVICE(vdev), false);
+
vdev->nv_gpudirect_clique = 0xFF;
/* QEMU_PCI_CAP_EXPRESS initialization does not depend on QEMU command
qdev_prop_nv_gpudirect_clique, uint8_t),
DEFINE_PROP_OFF_AUTO_PCIBAR("x-msix-relocation", VFIOPCIDevice, msix_relo,
OFF_AUTOPCIBAR_OFF),
- /*
- * TODO - support passed fds... is this necessary?
- * DEFINE_PROP_STRING("vfiofd", VFIOPCIDevice, vfiofd_name),
- * DEFINE_PROP_STRING("vfiogroupfd, VFIOPCIDevice, vfiogroupfd_name),
- */
+#ifdef CONFIG_IOMMUFD
+ DEFINE_PROP_LINK("iommufd", VFIOPCIDevice, vbasedev.iommufd,
+ TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
+#endif
DEFINE_PROP_END_OF_LIST(),
};
+#ifdef CONFIG_IOMMUFD
+static void vfio_pci_set_fd(Object *obj, const char *str, Error **errp)
+{
+ vfio_device_set_fd(&VFIO_PCI(obj)->vbasedev, str, errp);
+}
+#endif
+
static void vfio_pci_dev_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->reset = vfio_pci_reset;
device_class_set_props(dc, vfio_pci_dev_properties);
+#ifdef CONFIG_IOMMUFD
+ object_class_property_add_str(klass, "fd", NULL, vfio_pci_set_fd);
+#endif
dc->desc = "VFIO-based PCI device assignment";
set_bit(DEVICE_CATEGORY_MISC, dc->categories);
pdc->realize = vfio_realize;
extern const PropertyInfo qdev_prop_nv_gpudirect_clique;
+void vfio_pci_pre_reset(VFIOPCIDevice *vdev);
+void vfio_pci_post_reset(VFIOPCIDevice *vdev);
+bool vfio_pci_host_match(PCIHostDeviceAddress *addr, const char *name);
+int vfio_pci_get_pci_hot_reset_info(VFIOPCIDevice *vdev,
+ struct vfio_pci_hot_reset_info **info_p);
+
int vfio_populate_vga(VFIOPCIDevice *vdev, Error **errp);
int vfio_pci_igd_opregion_init(VFIOPCIDevice *vdev,
*/
#include "qemu/osdep.h"
+#include CONFIG_DEVICES /* CONFIG_IOMMUFD */
#include "qapi/error.h"
#include <sys/ioctl.h>
#include <linux/vfio.h>
#include "hw/vfio/vfio-platform.h"
+#include "sysemu/iommufd.h"
#include "migration/vmstate.h"
#include "qemu/error-report.h"
#include "qemu/lockable.h"
*/
static int vfio_base_device_init(VFIODevice *vbasedev, Error **errp)
{
- struct stat st;
int ret;
- /* @sysfsdev takes precedence over @host */
- if (vbasedev->sysfsdev) {
+ /* @fd takes precedence over @sysfsdev which takes precedence over @host */
+ if (vbasedev->fd < 0 && vbasedev->sysfsdev) {
g_free(vbasedev->name);
vbasedev->name = g_path_get_basename(vbasedev->sysfsdev);
- } else {
+ } else if (vbasedev->fd < 0) {
if (!vbasedev->name || strchr(vbasedev->name, '/')) {
error_setg(errp, "wrong host device name");
return -EINVAL;
vbasedev->name);
}
- if (stat(vbasedev->sysfsdev, &st) < 0) {
- error_setg_errno(errp, errno,
- "failed to get the sysfs host device file status");
- return -errno;
+ ret = vfio_device_get_name(vbasedev, errp);
+ if (ret) {
+ return ret;
}
ret = vfio_attach_device(vbasedev->name, vbasedev,
VFIODevice *vbasedev = &vdev->vbasedev;
int i, ret;
- vbasedev->type = VFIO_DEVICE_TYPE_PLATFORM;
- vbasedev->dev = dev;
- vbasedev->ops = &vfio_platform_ops;
-
qemu_mutex_init(&vdev->intp_mutex);
trace_vfio_platform_realize(vbasedev->sysfsdev ?
DEFINE_PROP_UINT32("mmap-timeout-ms", VFIOPlatformDevice,
mmap_timeout, 1100),
DEFINE_PROP_BOOL("x-irqfd", VFIOPlatformDevice, irqfd_allowed, true),
+#ifdef CONFIG_IOMMUFD
+ DEFINE_PROP_LINK("iommufd", VFIOPlatformDevice, vbasedev.iommufd,
+ TYPE_IOMMUFD_BACKEND, IOMMUFDBackend *),
+#endif
DEFINE_PROP_END_OF_LIST(),
};
+static void vfio_platform_instance_init(Object *obj)
+{
+ VFIOPlatformDevice *vdev = VFIO_PLATFORM_DEVICE(obj);
+ VFIODevice *vbasedev = &vdev->vbasedev;
+
+ vfio_device_init(vbasedev, VFIO_DEVICE_TYPE_PLATFORM, &vfio_platform_ops,
+ DEVICE(vdev), false);
+}
+
+#ifdef CONFIG_IOMMUFD
+static void vfio_platform_set_fd(Object *obj, const char *str, Error **errp)
+{
+ vfio_device_set_fd(&VFIO_PLATFORM_DEVICE(obj)->vbasedev, str, errp);
+}
+#endif
+
static void vfio_platform_class_init(ObjectClass *klass, void *data)
{
DeviceClass *dc = DEVICE_CLASS(klass);
dc->realize = vfio_platform_realize;
device_class_set_props(dc, vfio_platform_dev_properties);
+#ifdef CONFIG_IOMMUFD
+ object_class_property_add_str(klass, "fd", NULL, vfio_platform_set_fd);
+#endif
dc->vmsd = &vfio_platform_vmstate;
dc->desc = "VFIO-based platform device assignment";
sbc->connect_irq_notifier = vfio_start_irqfd_injection;
.name = TYPE_VFIO_PLATFORM,
.parent = TYPE_SYS_BUS_DEVICE,
.instance_size = sizeof(VFIOPlatformDevice),
+ .instance_init = vfio_platform_instance_init,
.class_init = vfio_platform_class_init,
.class_size = sizeof(VFIOPlatformDeviceClass),
};
#include "qapi/error.h"
#include "trace.h"
+typedef struct VFIOSpaprContainer {
+ VFIOContainer container;
+ MemoryListener prereg_listener;
+ QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
+} VFIOSpaprContainer;
+
static bool vfio_prereg_listener_skipped_section(MemoryRegionSection *section)
{
if (memory_region_is_iommu(section->mr)) {
static void vfio_prereg_listener_region_add(MemoryListener *listener,
MemoryRegionSection *section)
{
- VFIOContainer *container = container_of(listener, VFIOContainer,
- prereg_listener);
+ VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer,
+ prereg_listener);
+ VFIOContainer *container = &scontainer->container;
+ VFIOContainerBase *bcontainer = &container->bcontainer;
const hwaddr gpa = section->offset_within_address_space;
hwaddr end;
int ret;
* can gracefully fail. Runtime, there's not much we can do other
* than throw a hardware error.
*/
- if (!container->initialized) {
- if (!container->error) {
- error_setg_errno(&container->error, -ret,
+ if (!bcontainer->initialized) {
+ if (!bcontainer->error) {
+ error_setg_errno(&bcontainer->error, -ret,
"Memory registering failed");
}
} else {
static void vfio_prereg_listener_region_del(MemoryListener *listener,
MemoryRegionSection *section)
{
- VFIOContainer *container = container_of(listener, VFIOContainer,
- prereg_listener);
+ VFIOSpaprContainer *scontainer = container_of(listener, VFIOSpaprContainer,
+ prereg_listener);
+ VFIOContainer *container = &scontainer->container;
const hwaddr gpa = section->offset_within_address_space;
hwaddr end;
int ret;
.region_del = vfio_prereg_listener_region_del,
};
-static void vfio_host_win_add(VFIOContainer *container, hwaddr min_iova,
+static void vfio_host_win_add(VFIOSpaprContainer *scontainer, hwaddr min_iova,
hwaddr max_iova, uint64_t iova_pgsizes)
{
VFIOHostDMAWindow *hostwin;
- QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) {
if (ranges_overlap(hostwin->min_iova,
hostwin->max_iova - hostwin->min_iova + 1,
min_iova,
hostwin->min_iova = min_iova;
hostwin->max_iova = max_iova;
hostwin->iova_pgsizes = iova_pgsizes;
- QLIST_INSERT_HEAD(&container->hostwin_list, hostwin, hostwin_next);
+ QLIST_INSERT_HEAD(&scontainer->hostwin_list, hostwin, hostwin_next);
}
-static int vfio_host_win_del(VFIOContainer *container,
+static int vfio_host_win_del(VFIOSpaprContainer *scontainer,
hwaddr min_iova, hwaddr max_iova)
{
VFIOHostDMAWindow *hostwin;
- QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) {
if (hostwin->min_iova == min_iova && hostwin->max_iova == max_iova) {
QLIST_REMOVE(hostwin, hostwin_next);
g_free(hostwin);
return -1;
}
-static VFIOHostDMAWindow *vfio_find_hostwin(VFIOContainer *container,
+static VFIOHostDMAWindow *vfio_find_hostwin(VFIOSpaprContainer *container,
hwaddr iova, hwaddr end)
{
VFIOHostDMAWindow *hostwin;
hwaddr *pgsize)
{
int ret = 0;
+ VFIOContainerBase *bcontainer = &container->bcontainer;
IOMMUMemoryRegion *iommu_mr = IOMMU_MEMORY_REGION(section->mr);
uint64_t pagesize = memory_region_iommu_get_min_page_size(iommu_mr), pgmask;
unsigned entries, bits_total, bits_per_level, max_levels;
if (pagesize > rampagesize) {
pagesize = rampagesize;
}
- pgmask = container->pgsizes & (pagesize | (pagesize - 1));
+ pgmask = bcontainer->pgsizes & (pagesize | (pagesize - 1));
pagesize = pgmask ? (1ULL << (63 - clz64(pgmask))) : 0;
if (!pagesize) {
error_report("Host doesn't support page size 0x%"PRIx64
", the supported mask is 0x%lx",
memory_region_iommu_get_min_page_size(iommu_mr),
- container->pgsizes);
+ bcontainer->pgsizes);
return -EINVAL;
}
return 0;
}
-int vfio_container_add_section_window(VFIOContainer *container,
- MemoryRegionSection *section,
- Error **errp)
+static int
+vfio_spapr_container_add_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp)
{
+ VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
+ VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
+ container);
VFIOHostDMAWindow *hostwin;
hwaddr pgsize = 0;
int ret;
iova = section->offset_within_address_space;
end = iova + int128_get64(section->size) - 1;
- if (!vfio_find_hostwin(container, iova, end)) {
+ if (!vfio_find_hostwin(scontainer, iova, end)) {
error_setg(errp, "Container %p can't map guest IOVA region"
" 0x%"HWADDR_PRIx"..0x%"HWADDR_PRIx, container,
iova, end);
}
/* For now intersections are not allowed, we may relax this later */
- QLIST_FOREACH(hostwin, &container->hostwin_list, hostwin_next) {
+ QLIST_FOREACH(hostwin, &scontainer->hostwin_list, hostwin_next) {
if (ranges_overlap(hostwin->min_iova,
hostwin->max_iova - hostwin->min_iova + 1,
section->offset_within_address_space,
return ret;
}
- vfio_host_win_add(container, section->offset_within_address_space,
+ vfio_host_win_add(scontainer, section->offset_within_address_space,
section->offset_within_address_space +
int128_get64(section->size) - 1, pgsize);
#ifdef CONFIG_KVM
return 0;
}
-void vfio_container_del_section_window(VFIOContainer *container,
- MemoryRegionSection *section)
+static void
+vfio_spapr_container_del_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section)
{
+ VFIOContainer *container = container_of(bcontainer, VFIOContainer,
+ bcontainer);
+ VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
+ container);
+
if (container->iommu_type != VFIO_SPAPR_TCE_v2_IOMMU) {
return;
}
vfio_spapr_remove_window(container,
section->offset_within_address_space);
- if (vfio_host_win_del(container,
+ if (vfio_host_win_del(scontainer,
section->offset_within_address_space,
section->offset_within_address_space +
int128_get64(section->size) - 1) < 0) {
}
}
+static VFIOIOMMUOps vfio_iommu_spapr_ops;
+
+static void setup_spapr_ops(VFIOContainerBase *bcontainer)
+{
+ vfio_iommu_spapr_ops = *bcontainer->ops;
+ vfio_iommu_spapr_ops.add_window = vfio_spapr_container_add_section_window;
+ vfio_iommu_spapr_ops.del_window = vfio_spapr_container_del_section_window;
+ bcontainer->ops = &vfio_iommu_spapr_ops;
+}
+
int vfio_spapr_container_init(VFIOContainer *container, Error **errp)
{
+ VFIOContainerBase *bcontainer = &container->bcontainer;
+ VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
+ container);
struct vfio_iommu_spapr_tce_info info;
bool v2 = container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU;
int ret, fd = container->fd;
- QLIST_INIT(&container->hostwin_list);
+ QLIST_INIT(&scontainer->hostwin_list);
/*
* The host kernel code implementing VFIO_IOMMU_DISABLE is called
return -errno;
}
} else {
- container->prereg_listener = vfio_prereg_listener;
+ scontainer->prereg_listener = vfio_prereg_listener;
- memory_listener_register(&container->prereg_listener,
+ memory_listener_register(&scontainer->prereg_listener,
&address_space_memory);
- if (container->error) {
+ if (bcontainer->error) {
ret = -1;
- error_propagate_prepend(errp, container->error,
+ error_propagate_prepend(errp, bcontainer->error,
"RAM memory listener initialization failed: ");
goto listener_unregister_exit;
}
}
if (v2) {
- container->pgsizes = info.ddw.pgsizes;
+ bcontainer->pgsizes = info.ddw.pgsizes;
/*
* There is a default window in just created container.
* To make region_add/del simpler, we better remove this
}
} else {
/* The default table uses 4K pages */
- container->pgsizes = 0x1000;
- vfio_host_win_add(container, info.dma32_window_start,
+ bcontainer->pgsizes = 0x1000;
+ vfio_host_win_add(scontainer, info.dma32_window_start,
info.dma32_window_start +
info.dma32_window_size - 1,
0x1000);
}
+ setup_spapr_ops(bcontainer);
+
return 0;
listener_unregister_exit:
if (v2) {
- memory_listener_unregister(&container->prereg_listener);
+ memory_listener_unregister(&scontainer->prereg_listener);
}
return ret;
}
void vfio_spapr_container_deinit(VFIOContainer *container)
{
+ VFIOSpaprContainer *scontainer = container_of(container, VFIOSpaprContainer,
+ container);
VFIOHostDMAWindow *hostwin, *next;
if (container->iommu_type == VFIO_SPAPR_TCE_v2_IOMMU) {
- memory_listener_unregister(&container->prereg_listener);
+ memory_listener_unregister(&scontainer->prereg_listener);
}
- QLIST_FOREACH_SAFE(hostwin, &container->hostwin_list, hostwin_next,
+ QLIST_FOREACH_SAFE(hostwin, &scontainer->hostwin_list, hostwin_next,
next) {
QLIST_REMOVE(hostwin, hostwin_next);
g_free(hostwin);
vfio_region_sparse_mmap_header(const char *name, int index, int nr_areas) "Device %s region %d: %d sparse mmap entries"
vfio_region_sparse_mmap_entry(int i, unsigned long start, unsigned long end) "sparse entry %d [0x%lx - 0x%lx]"
vfio_get_dev_region(const char *name, int index, uint32_t type, uint32_t subtype) "%s index %d, %08x/%08x"
-vfio_dma_unmap_overflow_workaround(void) ""
-vfio_get_dirty_bitmap(int fd, uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "container fd=%d, iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64
+vfio_legacy_dma_unmap_overflow_workaround(void) ""
+vfio_get_dirty_bitmap(uint64_t iova, uint64_t size, uint64_t bitmap_size, uint64_t start, uint64_t dirty_pages) "iova=0x%"PRIx64" size= 0x%"PRIx64" bitmap_size=0x%"PRIx64" start=0x%"PRIx64" dirty_pages=%"PRIu64
vfio_iommu_map_dirty_notify(uint64_t iova_start, uint64_t iova_end) "iommu dirty @ 0x%"PRIx64" - 0x%"PRIx64
# platform.c
vfio_state_pending_exact(const char *name, uint64_t precopy, uint64_t postcopy, uint64_t stopcopy_size, uint64_t precopy_init_size, uint64_t precopy_dirty_size) " (%s) precopy 0x%"PRIx64" postcopy 0x%"PRIx64" stopcopy size 0x%"PRIx64" precopy initial size 0x%"PRIx64" precopy dirty size 0x%"PRIx64
vfio_vmstate_change(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s"
vfio_vmstate_change_prepare(const char *name, int running, const char *reason, const char *dev_state) " (%s) running %d reason %s device state %s"
+
+#iommufd.c
+
+iommufd_cdev_connect_and_bind(int iommufd, const char *name, int devfd, int devid) " [iommufd=%d] Successfully bound device %s (fd=%d): output devid=%d"
+iommufd_cdev_getfd(const char *dev, int devfd) " %s (fd=%d)"
+iommufd_cdev_attach_ioas_hwpt(int iommufd, const char *name, int devfd, int id) " [iommufd=%d] Successfully attached device %s (%d) to id=%d"
+iommufd_cdev_detach_ioas_hwpt(int iommufd, const char *name) " [iommufd=%d] Successfully detached %s"
+iommufd_cdev_fail_attach_existing_container(const char *msg) " %s"
+iommufd_cdev_alloc_ioas(int iommufd, int ioas_id) " [iommufd=%d] new IOMMUFD container with ioasid=%d"
+iommufd_cdev_device_info(char *name, int devfd, int num_irqs, int num_regions, int flags) " %s (%d) num_irqs=%d num_regions=%d flags=%d"
+iommufd_cdev_pci_hot_reset_dep_devices(int domain, int bus, int slot, int function, int dev_id) "\t%04x:%02x:%02x.%x devid %d"
enum IMX7SNVSRegisters {
SNVS_LPCR = 0x38,
SNVS_LPCR_TOP = BIT(6),
- SNVS_LPCR_DP_EN = BIT(5)
+ SNVS_LPCR_DP_EN = BIT(5),
+ SNVS_LPSRTCMR = 0x050, /* Secure Real Time Counter MSB Register */
+ SNVS_LPSRTCLR = 0x054, /* Secure Real Time Counter LSB Register */
};
#define TYPE_IMX7_SNVS "imx7.snvs"
SysBusDevice parent_obj;
MemoryRegion mmio;
+
+ uint64_t tick_offset;
+ uint64_t lpcr;
};
#endif /* IMX7_SNVS_H */
#include <linux/vfio.h>
#endif
#include "sysemu/sysemu.h"
+#include "hw/vfio/vfio-container-base.h"
#define VFIO_MSG_PREFIX "vfio %s: "
bool initial_data_sent;
} VFIOMigration;
-typedef struct VFIOAddressSpace {
- AddressSpace *as;
- QLIST_HEAD(, VFIOContainer) containers;
- QLIST_ENTRY(VFIOAddressSpace) list;
-} VFIOAddressSpace;
-
struct VFIOGroup;
typedef struct VFIOContainer {
- VFIOAddressSpace *space;
+ VFIOContainerBase bcontainer;
int fd; /* /dev/vfio/vfio, empowered by the attached groups */
- MemoryListener listener;
- MemoryListener prereg_listener;
unsigned iommu_type;
- Error *error;
- bool initialized;
- bool dirty_pages_supported;
- uint64_t dirty_pgsizes;
- uint64_t max_dirty_bitmap_size;
- unsigned long pgsizes;
- unsigned int dma_max_mappings;
- QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
- QLIST_HEAD(, VFIOHostDMAWindow) hostwin_list;
QLIST_HEAD(, VFIOGroup) group_list;
- QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
- QLIST_ENTRY(VFIOContainer) next;
- QLIST_HEAD(, VFIODevice) device_list;
- GList *iova_ranges;
} VFIOContainer;
-typedef struct VFIOGuestIOMMU {
- VFIOContainer *container;
- IOMMUMemoryRegion *iommu_mr;
- hwaddr iommu_offset;
- IOMMUNotifier n;
- QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
-} VFIOGuestIOMMU;
-
-typedef struct VFIORamDiscardListener {
- VFIOContainer *container;
- MemoryRegion *mr;
- hwaddr offset_within_address_space;
- hwaddr size;
- uint64_t granularity;
- RamDiscardListener listener;
- QLIST_ENTRY(VFIORamDiscardListener) next;
-} VFIORamDiscardListener;
-
typedef struct VFIOHostDMAWindow {
hwaddr min_iova;
hwaddr max_iova;
QLIST_ENTRY(VFIOHostDMAWindow) hostwin_next;
} VFIOHostDMAWindow;
+typedef struct IOMMUFDBackend IOMMUFDBackend;
+
+typedef struct VFIOIOMMUFDContainer {
+ VFIOContainerBase bcontainer;
+ IOMMUFDBackend *be;
+ uint32_t ioas_id;
+} VFIOIOMMUFDContainer;
+
typedef struct VFIODeviceOps VFIODeviceOps;
typedef struct VFIODevice {
QLIST_ENTRY(VFIODevice) container_next;
QLIST_ENTRY(VFIODevice) global_next;
struct VFIOGroup *group;
- VFIOContainer *container;
+ VFIOContainerBase *bcontainer;
char *sysfsdev;
char *name;
DeviceState *dev;
OnOffAuto pre_copy_dirty_page_tracking;
bool dirty_pages_supported;
bool dirty_tracking;
+ int devid;
+ IOMMUFDBackend *iommufd;
} VFIODevice;
struct VFIODeviceOps {
} dmabuf;
} VFIODisplay;
-typedef struct {
- unsigned long *bitmap;
- hwaddr size;
- hwaddr pages;
-} VFIOBitmap;
-
VFIOAddressSpace *vfio_get_address_space(AddressSpace *as);
void vfio_put_address_space(VFIOAddressSpace *space);
-bool vfio_devices_all_running_and_saving(VFIOContainer *container);
-
-/* container->fd */
-int vfio_dma_unmap(VFIOContainer *container, hwaddr iova,
- ram_addr_t size, IOMMUTLBEntry *iotlb);
-int vfio_dma_map(VFIOContainer *container, hwaddr iova,
- ram_addr_t size, void *vaddr, bool readonly);
-int vfio_set_dirty_page_tracking(VFIOContainer *container, bool start);
-int vfio_query_dirty_bitmap(VFIOContainer *container, VFIOBitmap *vbmap,
- hwaddr iova, hwaddr size);
/* SPAPR specific */
-int vfio_container_add_section_window(VFIOContainer *container,
- MemoryRegionSection *section,
- Error **errp);
-void vfio_container_del_section_window(VFIOContainer *container,
- MemoryRegionSection *section);
int vfio_spapr_container_init(VFIOContainer *container, Error **errp);
void vfio_spapr_container_deinit(VFIOContainer *container);
typedef QLIST_HEAD(VFIODeviceList, VFIODevice) VFIODeviceList;
extern VFIOGroupList vfio_group_list;
extern VFIODeviceList vfio_device_list;
-
+extern const VFIOIOMMUOps vfio_legacy_ops;
+extern const VFIOIOMMUOps vfio_iommufd_ops;
extern const MemoryListener vfio_memory_listener;
extern int vfio_kvm_device_fd;
void vfio_migration_exit(VFIODevice *vbasedev);
int vfio_bitmap_alloc(VFIOBitmap *vbmap, hwaddr size);
-bool vfio_devices_all_running_and_mig_active(VFIOContainer *container);
-bool vfio_devices_all_device_dirty_tracking(VFIOContainer *container);
-int vfio_devices_query_dirty_bitmap(VFIOContainer *container,
+bool
+vfio_devices_all_running_and_mig_active(const VFIOContainerBase *bcontainer);
+bool
+vfio_devices_all_device_dirty_tracking(const VFIOContainerBase *bcontainer);
+int vfio_devices_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
VFIOBitmap *vbmap, hwaddr iova,
hwaddr size);
-int vfio_get_dirty_bitmap(VFIOContainer *container, uint64_t iova,
- uint64_t size, ram_addr_t ram_addr);
+int vfio_get_dirty_bitmap(const VFIOContainerBase *bcontainer, uint64_t iova,
+ uint64_t size, ram_addr_t ram_addr);
+
+/* Returns 0 on success, or a negative errno. */
+int vfio_device_get_name(VFIODevice *vbasedev, Error **errp);
+void vfio_device_set_fd(VFIODevice *vbasedev, const char *str, Error **errp);
+void vfio_device_init(VFIODevice *vbasedev, int type, VFIODeviceOps *ops,
+ DeviceState *dev, bool ram_discard);
#endif /* HW_VFIO_VFIO_COMMON_H */
--- /dev/null
+/*
+ * VFIO BASE CONTAINER
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ * Copyright Red Hat, Inc. 2023
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ * Eric Auger <eric.auger@redhat.com>
+ *
+ * SPDX-License-Identifier: GPL-2.0-or-later
+ */
+
+#ifndef HW_VFIO_VFIO_CONTAINER_BASE_H
+#define HW_VFIO_VFIO_CONTAINER_BASE_H
+
+#include "exec/memory.h"
+
+typedef struct VFIODevice VFIODevice;
+typedef struct VFIOIOMMUOps VFIOIOMMUOps;
+
+typedef struct {
+ unsigned long *bitmap;
+ hwaddr size;
+ hwaddr pages;
+} VFIOBitmap;
+
+typedef struct VFIOAddressSpace {
+ AddressSpace *as;
+ QLIST_HEAD(, VFIOContainerBase) containers;
+ QLIST_ENTRY(VFIOAddressSpace) list;
+} VFIOAddressSpace;
+
+/*
+ * This is the base object for vfio container backends
+ */
+typedef struct VFIOContainerBase {
+ const VFIOIOMMUOps *ops;
+ VFIOAddressSpace *space;
+ MemoryListener listener;
+ Error *error;
+ bool initialized;
+ uint64_t dirty_pgsizes;
+ uint64_t max_dirty_bitmap_size;
+ unsigned long pgsizes;
+ unsigned int dma_max_mappings;
+ bool dirty_pages_supported;
+ QLIST_HEAD(, VFIOGuestIOMMU) giommu_list;
+ QLIST_HEAD(, VFIORamDiscardListener) vrdl_list;
+ QLIST_ENTRY(VFIOContainerBase) next;
+ QLIST_HEAD(, VFIODevice) device_list;
+ GList *iova_ranges;
+} VFIOContainerBase;
+
+typedef struct VFIOGuestIOMMU {
+ VFIOContainerBase *bcontainer;
+ IOMMUMemoryRegion *iommu_mr;
+ hwaddr iommu_offset;
+ IOMMUNotifier n;
+ QLIST_ENTRY(VFIOGuestIOMMU) giommu_next;
+} VFIOGuestIOMMU;
+
+typedef struct VFIORamDiscardListener {
+ VFIOContainerBase *bcontainer;
+ MemoryRegion *mr;
+ hwaddr offset_within_address_space;
+ hwaddr size;
+ uint64_t granularity;
+ RamDiscardListener listener;
+ QLIST_ENTRY(VFIORamDiscardListener) next;
+} VFIORamDiscardListener;
+
+int vfio_container_dma_map(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly);
+int vfio_container_dma_unmap(VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb);
+int vfio_container_add_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp);
+void vfio_container_del_section_window(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section);
+int vfio_container_set_dirty_page_tracking(VFIOContainerBase *bcontainer,
+ bool start);
+int vfio_container_query_dirty_bitmap(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size);
+
+void vfio_container_init(VFIOContainerBase *bcontainer,
+ VFIOAddressSpace *space,
+ const VFIOIOMMUOps *ops);
+void vfio_container_destroy(VFIOContainerBase *bcontainer);
+
+struct VFIOIOMMUOps {
+ /* basic feature */
+ int (*dma_map)(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ void *vaddr, bool readonly);
+ int (*dma_unmap)(const VFIOContainerBase *bcontainer,
+ hwaddr iova, ram_addr_t size,
+ IOMMUTLBEntry *iotlb);
+ int (*attach_device)(const char *name, VFIODevice *vbasedev,
+ AddressSpace *as, Error **errp);
+ void (*detach_device)(VFIODevice *vbasedev);
+ /* migration feature */
+ int (*set_dirty_page_tracking)(const VFIOContainerBase *bcontainer,
+ bool start);
+ int (*query_dirty_bitmap)(const VFIOContainerBase *bcontainer,
+ VFIOBitmap *vbmap,
+ hwaddr iova, hwaddr size);
+ /* PCI specific */
+ int (*pci_hot_reset)(VFIODevice *vbasedev, bool single);
+
+ /* SPAPR specific */
+ int (*add_window)(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section,
+ Error **errp);
+ void (*del_window)(VFIOContainerBase *bcontainer,
+ MemoryRegionSection *section);
+};
+#endif /* HW_VFIO_VFIO_CONTAINER_BASE_H */
--- /dev/null
+/*
+ * QEMU Chardev Helper
+ *
+ * Copyright (C) 2023 Intel Corporation.
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ *
+ * This work is licensed under the terms of the GNU GPL, version 2. See
+ * the COPYING file in the top-level directory.
+ */
+
+#ifndef QEMU_CHARDEV_OPEN_H
+#define QEMU_CHARDEV_OPEN_H
+
+int open_cdev(const char *devpath, dev_t cdev);
+#endif
--- /dev/null
+#ifndef SYSEMU_IOMMUFD_H
+#define SYSEMU_IOMMUFD_H
+
+#include "qom/object.h"
+#include "qemu/thread.h"
+#include "exec/hwaddr.h"
+#include "exec/cpu-common.h"
+
+#define TYPE_IOMMUFD_BACKEND "iommufd"
+OBJECT_DECLARE_TYPE(IOMMUFDBackend, IOMMUFDBackendClass, IOMMUFD_BACKEND)
+
+struct IOMMUFDBackendClass {
+ ObjectClass parent_class;
+};
+
+struct IOMMUFDBackend {
+ Object parent;
+
+ /*< protected >*/
+ int fd; /* /dev/iommu file descriptor */
+ bool owned; /* is the /dev/iommu opened internally */
+ QemuMutex lock;
+ uint32_t users;
+
+ /*< public >*/
+};
+
+int iommufd_backend_connect(IOMMUFDBackend *be, Error **errp);
+void iommufd_backend_disconnect(IOMMUFDBackend *be);
+
+int iommufd_backend_alloc_ioas(IOMMUFDBackend *be, uint32_t *ioas_id,
+ Error **errp);
+void iommufd_backend_free_id(IOMMUFDBackend *be, uint32_t id);
+int iommufd_backend_map_dma(IOMMUFDBackend *be, uint32_t ioas_id, hwaddr iova,
+ ram_addr_t size, void *vaddr, bool readonly);
+int iommufd_backend_unmap_dma(IOMMUFDBackend *be, uint32_t ioas_id,
+ hwaddr iova, ram_addr_t size);
+#endif
{ 'struct': 'VfioUserServerProperties',
'data': { 'socket': 'SocketAddress', 'device': 'str' } }
+##
+# @IOMMUFDProperties:
+#
+# Properties for iommufd objects.
+#
+# @fd: file descriptor name previously passed via 'getfd' command,
+# which represents a pre-opened /dev/iommu. This allows the
+# iommufd object to be shared accross several subsystems
+# (VFIO, VDPA, ...), and the file descriptor to be shared
+# with other process, e.g. DPDK. (default: QEMU opens
+# /dev/iommu by itself)
+#
+# Since: 9.0
+##
+{ 'struct': 'IOMMUFDProperties',
+ 'data': { '*fd': 'str' } }
+
##
# @RngProperties:
#
'input-barrier',
{ 'name': 'input-linux',
'if': 'CONFIG_LINUX' },
+ 'iommufd',
'iothread',
'main-loop',
{ 'name': 'memory-backend-epc',
'input-barrier': 'InputBarrierProperties',
'input-linux': { 'type': 'InputLinuxProperties',
'if': 'CONFIG_LINUX' },
+ 'iommufd': 'IOMMUFDProperties',
'iothread': 'IothreadProperties',
'main-loop': 'MainLoopProperties',
'memory-backend-epc': { 'type': 'MemoryBackendEpcProperties',
The ``share`` boolean option is on by default with memfd.
+ ``-object iommufd,id=id[,fd=fd]``
+ Creates an iommufd backend which allows control of DMA mapping
+ through the ``/dev/iommu`` device.
+
+ The ``id`` parameter is a unique ID which frontends (such as
+ vfio-pci of vdpa) will use to connect with the iommufd backend.
+
+ The ``fd`` parameter is an optional pre-opened file descriptor
+ resulting from ``/dev/iommu`` opening. Usually the iommufd is shared
+ across all subsystems, bringing the benefit of centralized
+ reference counting.
+
``-object rng-builtin,id=id``
Creates a random number generator backend which obtains entropy
from QEMU builtin functions. The ``id`` parameter is a unique ID
}
if (kvm_enabled()) {
- kvm_arm_add_vcpu_properties(obj);
+ kvm_arm_add_vcpu_properties(cpu);
}
#ifndef CONFIG_USER_ONLY
*/
if (kvm_enabled()) {
if (kvm_arm_sve_supported()) {
- cpu->sve_vq.supported = kvm_arm_sve_get_vls(CPU(cpu));
+ cpu->sve_vq.supported = kvm_arm_sve_get_vls(cpu);
vq_supported = cpu->sve_vq.supported;
} else {
assert(!cpu_isar_feature(aa64_sve, cpu));
.cp = 14, .opc1 = 0, .crn = 0, .crm = 7, .opc2 = 0,
.access = PL1_RW, .accessfn = access_tda,
.type = ARM_CP_NOP },
- /*
- * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
- * to save and restore a 32-bit guest's DBGVCR)
- */
- { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
- .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
- .access = PL2_RW, .accessfn = access_tda,
- .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP },
/*
* Dummy MDCCINT_EL1, since we don't implement the Debug Communications
* Channel but Linux may try to access this register. The 32-bit
.fieldoffset = offsetof(CPUARMState, cp15.dbgclaim) },
};
+/* These are present only when EL1 supports AArch32 */
+static const ARMCPRegInfo debug_aa32_el1_reginfo[] = {
+ /*
+ * Dummy DBGVCR32_EL2 (which is only for a 64-bit hypervisor
+ * to save and restore a 32-bit guest's DBGVCR)
+ */
+ { .name = "DBGVCR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 2, .opc1 = 4, .crn = 0, .crm = 7, .opc2 = 0,
+ .access = PL2_RW, .accessfn = access_tda,
+ .type = ARM_CP_NOP | ARM_CP_EL3_NO_EL2_KEEP },
+};
+
static const ARMCPRegInfo debug_lpae_cp_reginfo[] = {
/* 64 bit access versions of the (dummy) debug registers */
{ .name = "DBGDRAR", .cp = 14, .crm = 1, .opc1 = 0,
assert(ctx_cmps <= brps);
define_arm_cp_regs(cpu, debug_cp_reginfo);
+ if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
+ define_arm_cp_regs(cpu, debug_aa32_el1_reginfo);
+ }
if (arm_feature(&cpu->env, ARM_FEATURE_LPAE)) {
define_arm_cp_regs(cpu, debug_lpae_cp_reginfo);
pmu_op_finish(env);
}
+static uint64_t pmcr_read(CPUARMState *env, const ARMCPRegInfo *ri)
+{
+ uint64_t pmcr = env->cp15.c9_pmcr;
+
+ /*
+ * If EL2 is implemented and enabled for the current security state, reads
+ * of PMCR.N from EL1 or EL0 return the value of MDCR_EL2.HPMN or HDCR.HPMN.
+ */
+ if (arm_current_el(env) <= 1 && arm_is_el2_enabled(env)) {
+ pmcr &= ~PMCRN_MASK;
+ pmcr |= (env->cp15.mdcr_el2 & MDCR_HPMN) << PMCRN_SHIFT;
+ }
+
+ return pmcr;
+}
+
static void pmswinc_write(CPUARMState *env, const ARMCPRegInfo *ri,
uint64_t value)
{
.opc0 = 3, .opc1 = 0, .crn = 4, .crm = 2, .opc2 = 0,
.type = ARM_CP_NO_RAW,
.access = PL1_RW, .readfn = spsel_read, .writefn = spsel_write },
- { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
- .access = PL2_RW,
- .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
- .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
- { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
- .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
- .writefn = dacr_write, .raw_writefn = raw_write,
- .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
- { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
- .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
- .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
- .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
{ .name = "SPSR_IRQ", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_ALIAS,
.opc0 = 3, .opc1 = 4, .crn = 4, .crm = 3, .opc2 = 0,
.fieldoffset = offsetoflow32(CPUARMState, cp15.mdcr_el3) },
};
+/* These are present only when EL1 supports AArch32 */
+static const ARMCPRegInfo v8_aa32_el1_reginfo[] = {
+ { .name = "FPEXC32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 3, .opc2 = 0,
+ .access = PL2_RW,
+ .type = ARM_CP_ALIAS | ARM_CP_FPU | ARM_CP_EL3_NO_EL2_KEEP,
+ .fieldoffset = offsetof(CPUARMState, vfp.xregs[ARM_VFP_FPEXC]) },
+ { .name = "DACR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 3, .crm = 0, .opc2 = 0,
+ .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
+ .writefn = dacr_write, .raw_writefn = raw_write,
+ .fieldoffset = offsetof(CPUARMState, cp15.dacr32_el2) },
+ { .name = "IFSR32_EL2", .state = ARM_CP_STATE_AA64,
+ .opc0 = 3, .opc1 = 4, .crn = 5, .crm = 0, .opc2 = 1,
+ .access = PL2_RW, .resetvalue = 0, .type = ARM_CP_EL3_NO_EL2_KEEP,
+ .fieldoffset = offsetof(CPUARMState, cp15.ifsr32_el2) },
+};
+
static void do_hcr_write(CPUARMState *env, uint64_t value, uint64_t valid_mask)
{
ARMCPU *cpu = env_archcpu(env);
.fgt = FGT_PMCR_EL0,
.type = ARM_CP_IO | ARM_CP_ALIAS,
.fieldoffset = offsetoflow32(CPUARMState, cp15.c9_pmcr),
- .accessfn = pmreg_access, .writefn = pmcr_write,
- .raw_writefn = raw_write,
+ .accessfn = pmreg_access,
+ .readfn = pmcr_read, .raw_readfn = raw_read,
+ .writefn = pmcr_write, .raw_writefn = raw_write,
};
ARMCPRegInfo pmcr64 = {
.name = "PMCR_EL0", .state = ARM_CP_STATE_AA64,
.type = ARM_CP_IO,
.fieldoffset = offsetof(CPUARMState, cp15.c9_pmcr),
.resetvalue = cpu->isar.reset_pmcr_el0,
+ .readfn = pmcr_read, .raw_readfn = raw_read,
.writefn = pmcr_write, .raw_writefn = raw_write,
};
static void dccvap_writefn(CPUARMState *env, const ARMCPRegInfo *opaque,
uint64_t value)
{
+#ifdef CONFIG_TCG
ARMCPU *cpu = env_archcpu(env);
/* CTR_EL0 System register -> DminLine, bits [19:16] */
uint64_t dline_size = 4 << ((cpu->ctr >> 16) & 0xF);
}
#endif /*CONFIG_USER_ONLY*/
}
+#else
+ /* Handled by hardware accelerator. */
+ g_assert_not_reached();
+#endif /* CONFIG_TCG */
}
static const ARMCPRegInfo dcpop_reg[] = {
}
define_arm_cp_regs(cpu, v8_idregs);
define_arm_cp_regs(cpu, v8_cp_reginfo);
+ if (cpu_isar_feature(aa64_aa32_el1, cpu)) {
+ define_arm_cp_regs(cpu, v8_aa32_el1_reginfo);
+ }
for (i = 4; i < 16; i++) {
/*
}
}
-/* Sign/zero extend */
-uint32_t HELPER(sxtb16)(uint32_t x)
-{
- uint32_t res;
- res = (uint16_t)(int8_t)x;
- res |= (uint32_t)(int8_t)(x >> 16) << 16;
- return res;
-}
-
-static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
-{
- /*
- * Take a division-by-zero exception if necessary; otherwise return
- * to get the usual non-trapping division behaviour (result of 0)
- */
- if (arm_feature(env, ARM_FEATURE_M)
- && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
- raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
- }
-}
-
-uint32_t HELPER(uxtb16)(uint32_t x)
-{
- uint32_t res;
- res = (uint16_t)(uint8_t)x;
- res |= (uint32_t)(uint8_t)(x >> 16) << 16;
- return res;
-}
-
-int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
-{
- if (den == 0) {
- handle_possible_div0_trap(env, GETPC());
- return 0;
- }
- if (num == INT_MIN && den == -1) {
- return INT_MIN;
- }
- return num / den;
-}
-
-uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
-{
- if (den == 0) {
- handle_possible_div0_trap(env, GETPC());
- return 0;
- }
- return num / den;
-}
-
-uint32_t HELPER(rbit)(uint32_t x)
-{
- return revbit32(x);
-}
-
#ifdef CONFIG_USER_ONLY
static void switch_mode(CPUARMState *env, int mode)
* ARM implementation of KVM hooks
*
* Copyright Christoffer Dall 2009-2010
+ * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
+ * Copyright Alex Bennée 2014, Linaro
*
* This work is licensed under the terms of the GNU GPL, version 2 or later.
* See the COPYING file in the top-level directory.
#include "qom/object.h"
#include "qapi/error.h"
#include "sysemu/sysemu.h"
+#include "sysemu/runstate.h"
#include "sysemu/kvm.h"
#include "sysemu/kvm_int.h"
#include "kvm_arm.h"
#include "hw/pci/pci.h"
#include "exec/memattrs.h"
#include "exec/address-spaces.h"
+#include "exec/gdbstub.h"
#include "hw/boards.h"
#include "hw/irq.h"
#include "qapi/visitor.h"
#include "qemu/log.h"
+#include "hw/acpi/acpi.h"
+#include "hw/acpi/ghes.h"
const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
KVM_CAP_LAST_INFO
static bool cap_has_inject_serror_esr;
static bool cap_has_inject_ext_dabt;
+/**
+ * ARMHostCPUFeatures: information about the host CPU (identified
+ * by asking the host kernel)
+ */
+typedef struct ARMHostCPUFeatures {
+ ARMISARegisters isar;
+ uint64_t features;
+ uint32_t target;
+ const char *dtb_compatible;
+} ARMHostCPUFeatures;
+
static ARMHostCPUFeatures arm_host_cpu_features;
-int kvm_arm_vcpu_init(CPUState *cs)
+/**
+ * kvm_arm_vcpu_init:
+ * @cpu: ARMCPU
+ *
+ * Initialize (or reinitialize) the VCPU by invoking the
+ * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature
+ * bitmask specified in the CPUState.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+static int kvm_arm_vcpu_init(ARMCPU *cpu)
{
- ARMCPU *cpu = ARM_CPU(cs);
struct kvm_vcpu_init init;
init.target = cpu->kvm_target;
memcpy(init.features, cpu->kvm_init_features, sizeof(init.features));
- return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_INIT, &init);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_INIT, &init);
}
-int kvm_arm_vcpu_finalize(CPUState *cs, int feature)
-{
- return kvm_vcpu_ioctl(cs, KVM_ARM_VCPU_FINALIZE, &feature);
-}
-
-void kvm_arm_init_serror_injection(CPUState *cs)
+/**
+ * kvm_arm_vcpu_finalize:
+ * @cpu: ARMCPU
+ * @feature: feature to finalize
+ *
+ * Finalizes the configuration of the specified VCPU feature by
+ * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring
+ * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of
+ * KVM's API documentation.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+static int kvm_arm_vcpu_finalize(ARMCPU *cpu, int feature)
{
- cap_has_inject_serror_esr = kvm_check_extension(cs->kvm_state,
- KVM_CAP_ARM_INJECT_SERROR_ESR);
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_ARM_VCPU_FINALIZE, &feature);
}
bool kvm_arm_create_scratch_host_vcpu(const uint32_t *cpus_to_try,
}
}
+static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
+{
+ uint64_t ret;
+ struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
+ int err;
+
+ assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
+ err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
+ if (err < 0) {
+ return -1;
+ }
+ *pret = ret;
+ return 0;
+}
+
+static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
+{
+ struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
+
+ assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
+ return ioctl(fd, KVM_GET_ONE_REG, &idreg);
+}
+
+static bool kvm_arm_pauth_supported(void)
+{
+ return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) &&
+ kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC));
+}
+
+static bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
+{
+ /* Identify the feature bits corresponding to the host CPU, and
+ * fill out the ARMHostCPUClass fields accordingly. To do this
+ * we have to create a scratch VM, create a single CPU inside it,
+ * and then query that CPU for the relevant ID registers.
+ */
+ int fdarray[3];
+ bool sve_supported;
+ bool pmu_supported = false;
+ uint64_t features = 0;
+ int err;
+
+ /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
+ * we know these will only support creating one kind of guest CPU,
+ * which is its preferred CPU type. Fortunately these old kernels
+ * support only a very limited number of CPUs.
+ */
+ static const uint32_t cpus_to_try[] = {
+ KVM_ARM_TARGET_AEM_V8,
+ KVM_ARM_TARGET_FOUNDATION_V8,
+ KVM_ARM_TARGET_CORTEX_A57,
+ QEMU_KVM_ARM_TARGET_NONE
+ };
+ /*
+ * target = -1 informs kvm_arm_create_scratch_host_vcpu()
+ * to use the preferred target
+ */
+ struct kvm_vcpu_init init = { .target = -1, };
+
+ /*
+ * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
+ * which is otherwise RAZ.
+ */
+ sve_supported = kvm_arm_sve_supported();
+ if (sve_supported) {
+ init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
+ }
+
+ /*
+ * Ask for Pointer Authentication if supported, so that we get
+ * the unsanitized field values for AA64ISAR1_EL1.
+ */
+ if (kvm_arm_pauth_supported()) {
+ init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
+ 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
+ }
+
+ if (kvm_arm_pmu_supported()) {
+ init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
+ pmu_supported = true;
+ }
+
+ if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
+ return false;
+ }
+
+ ahcf->target = init.target;
+ ahcf->dtb_compatible = "arm,arm-v8";
+
+ err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
+ ARM64_SYS_REG(3, 0, 0, 4, 0));
+ if (unlikely(err < 0)) {
+ /*
+ * Before v4.15, the kernel only exposed a limited number of system
+ * registers, not including any of the interesting AArch64 ID regs.
+ * For the most part we could leave these fields as zero with minimal
+ * effect, since this does not affect the values seen by the guest.
+ *
+ * However, it could cause problems down the line for QEMU,
+ * so provide a minimal v8.0 default.
+ *
+ * ??? Could read MIDR and use knowledge from cpu64.c.
+ * ??? Could map a page of memory into our temp guest and
+ * run the tiniest of hand-crafted kernels to extract
+ * the values seen by the guest.
+ * ??? Either of these sounds like too much effort just
+ * to work around running a modern host kernel.
+ */
+ ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
+ err = 0;
+ } else {
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
+ ARM64_SYS_REG(3, 0, 0, 4, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
+ ARM64_SYS_REG(3, 0, 0, 4, 5));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
+ ARM64_SYS_REG(3, 0, 0, 5, 0));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
+ ARM64_SYS_REG(3, 0, 0, 5, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
+ ARM64_SYS_REG(3, 0, 0, 6, 0));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
+ ARM64_SYS_REG(3, 0, 0, 6, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2,
+ ARM64_SYS_REG(3, 0, 0, 6, 2));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
+ ARM64_SYS_REG(3, 0, 0, 7, 0));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
+ ARM64_SYS_REG(3, 0, 0, 7, 1));
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
+ ARM64_SYS_REG(3, 0, 0, 7, 2));
+
+ /*
+ * Note that if AArch32 support is not present in the host,
+ * the AArch32 sysregs are present to be read, but will
+ * return UNKNOWN values. This is neither better nor worse
+ * than skipping the reads and leaving 0, as we must avoid
+ * considering the values in every case.
+ */
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
+ ARM64_SYS_REG(3, 0, 0, 1, 0));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
+ ARM64_SYS_REG(3, 0, 0, 1, 1));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
+ ARM64_SYS_REG(3, 0, 0, 1, 2));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
+ ARM64_SYS_REG(3, 0, 0, 1, 4));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
+ ARM64_SYS_REG(3, 0, 0, 1, 5));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
+ ARM64_SYS_REG(3, 0, 0, 1, 6));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
+ ARM64_SYS_REG(3, 0, 0, 1, 7));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
+ ARM64_SYS_REG(3, 0, 0, 2, 0));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
+ ARM64_SYS_REG(3, 0, 0, 2, 1));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
+ ARM64_SYS_REG(3, 0, 0, 2, 2));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
+ ARM64_SYS_REG(3, 0, 0, 2, 3));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
+ ARM64_SYS_REG(3, 0, 0, 2, 4));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
+ ARM64_SYS_REG(3, 0, 0, 2, 5));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
+ ARM64_SYS_REG(3, 0, 0, 2, 6));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
+ ARM64_SYS_REG(3, 0, 0, 2, 7));
+
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
+ ARM64_SYS_REG(3, 0, 0, 3, 0));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
+ ARM64_SYS_REG(3, 0, 0, 3, 1));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
+ ARM64_SYS_REG(3, 0, 0, 3, 2));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
+ ARM64_SYS_REG(3, 0, 0, 3, 4));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1,
+ ARM64_SYS_REG(3, 0, 0, 3, 5));
+ err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5,
+ ARM64_SYS_REG(3, 0, 0, 3, 6));
+
+ /*
+ * DBGDIDR is a bit complicated because the kernel doesn't
+ * provide an accessor for it in 64-bit mode, which is what this
+ * scratch VM is in, and there's no architected "64-bit sysreg
+ * which reads the same as the 32-bit register" the way there is
+ * for other ID registers. Instead we synthesize a value from the
+ * AArch64 ID_AA64DFR0, the same way the kernel code in
+ * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
+ * We only do this if the CPU supports AArch32 at EL1.
+ */
+ if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
+ int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
+ int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
+ int ctx_cmps =
+ FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
+ int version = 6; /* ARMv8 debug architecture */
+ bool has_el3 =
+ !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
+ uint32_t dbgdidr = 0;
+
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
+ dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
+ dbgdidr |= (1 << 15); /* RES1 bit */
+ ahcf->isar.dbgdidr = dbgdidr;
+ }
+
+ if (pmu_supported) {
+ /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
+ ARM64_SYS_REG(3, 3, 9, 12, 0));
+ }
+
+ if (sve_supported) {
+ /*
+ * There is a range of kernels between kernel commit 73433762fcae
+ * and f81cb2c3ad41 which have a bug where the kernel doesn't
+ * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
+ * enabled SVE support, which resulted in an error rather than RAZ.
+ * So only read the register if we set KVM_ARM_VCPU_SVE above.
+ */
+ err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
+ ARM64_SYS_REG(3, 0, 0, 4, 4));
+ }
+ }
+
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+
+ if (err < 0) {
+ return false;
+ }
+
+ /*
+ * We can assume any KVM supporting CPU is at least a v8
+ * with VFPv4+Neon; this in turn implies most of the other
+ * feature bits.
+ */
+ features |= 1ULL << ARM_FEATURE_V8;
+ features |= 1ULL << ARM_FEATURE_NEON;
+ features |= 1ULL << ARM_FEATURE_AARCH64;
+ features |= 1ULL << ARM_FEATURE_PMU;
+ features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
+
+ ahcf->features = features;
+
+ return true;
+}
+
void kvm_arm_set_cpu_features_from_host(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
}
/* KVM VCPU properties should be prefixed with "kvm-". */
-void kvm_arm_add_vcpu_properties(Object *obj)
+void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
{
- ARMCPU *cpu = ARM_CPU(obj);
CPUARMState *env = &cpu->env;
+ Object *obj = OBJECT(cpu);
if (arm_feature(env, ARM_FEATURE_GENERIC_TIMER)) {
cpu->kvm_adjvtime = true;
cap_has_mp_state = kvm_check_extension(s, KVM_CAP_MP_STATE);
+ /* Check whether user space can specify guest syndrome value */
+ cap_has_inject_serror_esr =
+ kvm_check_extension(s, KVM_CAP_ARM_INJECT_SERROR_ESR);
+
if (ms->smp.cpus > 256 &&
!kvm_check_extension(s, KVM_CAP_ARM_IRQ_LINE_LAYOUT_2)) {
error_report("Using more than 256 vcpus requires a host kernel "
}
}
- kvm_arm_init_debug(s);
+ max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS);
+ hw_watchpoints = g_array_sized_new(true, true,
+ sizeof(HWWatchpoint), max_hw_wps);
+
+ max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS);
+ hw_breakpoints = g_array_sized_new(true, true,
+ sizeof(HWBreakpoint), max_hw_bps);
return ret;
}
return &cpu->cpreg_values[res - cpu->cpreg_indexes];
}
-/* Initialize the ARMCPU cpreg list according to the kernel's
+/**
+ * kvm_arm_reg_syncs_via_cpreg_list:
+ * @regidx: KVM register index
+ *
+ * Return true if this KVM register should be synchronized via the
+ * cpreg list of arbitrary system registers, false if it is synchronized
+ * by hand using code in kvm_arch_get/put_registers().
+ */
+static bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
+{
+ switch (regidx & KVM_REG_ARM_COPROC_MASK) {
+ case KVM_REG_ARM_CORE:
+ case KVM_REG_ARM64_SVE:
+ return false;
+ default:
+ return true;
+ }
+}
+
+/**
+ * kvm_arm_init_cpreg_list:
+ * @cpu: ARMCPU
+ *
+ * Initialize the ARMCPU cpreg list according to the kernel's
* definition of what CPU registers it knows about (and throw away
* the previous TCG-created cpreg list).
+ *
+ * Returns: 0 if success, else < 0 error code
*/
-int kvm_arm_init_cpreg_list(ARMCPU *cpu)
+static int kvm_arm_init_cpreg_list(ARMCPU *cpu)
{
struct kvm_reg_list rl;
struct kvm_reg_list *rlp;
return ret;
}
+/**
+ * kvm_arm_cpreg_level:
+ * @regidx: KVM register index
+ *
+ * Return the level of this coprocessor/system register. Return value is
+ * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE.
+ */
+static int kvm_arm_cpreg_level(uint64_t regidx)
+{
+ /*
+ * All system registers are assumed to be level KVM_PUT_RUNTIME_STATE.
+ * If a register should be written less often, you must add it here
+ * with a state of either KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
+ */
+ switch (regidx) {
+ case KVM_REG_ARM_TIMER_CNT:
+ case KVM_REG_ARM_PTIMER_CNT:
+ return KVM_PUT_FULL_STATE;
+ }
+ return KVM_PUT_RUNTIME_STATE;
+}
+
bool write_kvmstate_to_list(ARMCPU *cpu)
{
CPUState *cs = CPU(cpu);
/* Re-init VCPU so that all registers are set to
* their respective reset values.
*/
- ret = kvm_arm_vcpu_init(CPU(cpu));
+ ret = kvm_arm_vcpu_init(cpu);
if (ret < 0) {
fprintf(stderr, "kvm_arm_vcpu_init failed: %s\n", strerror(-ret));
abort();
/*
* Update KVM's MP_STATE based on what QEMU thinks it is
*/
-int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
+static int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu)
{
if (cap_has_mp_state) {
struct kvm_mp_state mp_state = {
.mp_state = (cpu->power_state == PSCI_OFF) ?
KVM_MP_STATE_STOPPED : KVM_MP_STATE_RUNNABLE
};
- int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
- if (ret) {
- fprintf(stderr, "%s: failed to set MP_STATE %d/%s\n",
- __func__, ret, strerror(-ret));
- return -1;
- }
+ return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
}
-
return 0;
}
/*
* Sync the KVM MP_STATE into QEMU
*/
-int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
+static int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu)
{
if (cap_has_mp_state) {
struct kvm_mp_state mp_state;
int ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_MP_STATE, &mp_state);
if (ret) {
- fprintf(stderr, "%s: failed to get MP_STATE %d/%s\n",
- __func__, ret, strerror(-ret));
- abort();
+ return ret;
}
cpu->power_state = (mp_state.mp_state == KVM_MP_STATE_STOPPED) ?
PSCI_OFF : PSCI_ON;
}
-
return 0;
}
-void kvm_arm_get_virtual_time(CPUState *cs)
+/**
+ * kvm_arm_get_virtual_time:
+ * @cpu: ARMCPU
+ *
+ * Gets the VCPU's virtual counter and stores it in the KVM CPU state.
+ */
+static void kvm_arm_get_virtual_time(ARMCPU *cpu)
{
- ARMCPU *cpu = ARM_CPU(cs);
int ret;
if (cpu->kvm_vtime_dirty) {
return;
}
- ret = kvm_get_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
+ ret = kvm_get_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
if (ret) {
error_report("Failed to get KVM_REG_ARM_TIMER_CNT");
abort();
cpu->kvm_vtime_dirty = true;
}
-void kvm_arm_put_virtual_time(CPUState *cs)
+/**
+ * kvm_arm_put_virtual_time:
+ * @cpu: ARMCPU
+ *
+ * Sets the VCPU's virtual counter to the value stored in the KVM CPU state.
+ */
+static void kvm_arm_put_virtual_time(ARMCPU *cpu)
{
- ARMCPU *cpu = ARM_CPU(cs);
int ret;
if (!cpu->kvm_vtime_dirty) {
return;
}
- ret = kvm_set_one_reg(cs, KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
+ ret = kvm_set_one_reg(CPU(cpu), KVM_REG_ARM_TIMER_CNT, &cpu->kvm_vtime);
if (ret) {
error_report("Failed to set KVM_REG_ARM_TIMER_CNT");
abort();
cpu->kvm_vtime_dirty = false;
}
-int kvm_put_vcpu_events(ARMCPU *cpu)
+/**
+ * kvm_put_vcpu_events:
+ * @cpu: ARMCPU
+ *
+ * Put VCPU related state to kvm.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+static int kvm_put_vcpu_events(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
struct kvm_vcpu_events events;
return ret;
}
-int kvm_get_vcpu_events(ARMCPU *cpu)
+/**
+ * kvm_get_vcpu_events:
+ * @cpu: ARMCPU
+ *
+ * Get VCPU related state from kvm.
+ *
+ * Returns: 0 if success else < 0 error code
+ */
+static int kvm_get_vcpu_events(ARMCPU *cpu)
{
CPUARMState *env = &cpu->env;
struct kvm_vcpu_events events;
return 0;
}
+#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
+#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
+
+/*
+ * ESR_EL1
+ * ISS encoding
+ * AARCH64: DFSC, bits [5:0]
+ * AARCH32:
+ * TTBCR.EAE == 0
+ * FS[4] - DFSR[10]
+ * FS[3:0] - DFSR[3:0]
+ * TTBCR.EAE == 1
+ * FS, bits [5:0]
+ */
+#define ESR_DFSC(aarch64, lpae, v) \
+ ((aarch64 || (lpae)) ? ((v) & 0x3F) \
+ : (((v) >> 6) | ((v) & 0x1F)))
+
+#define ESR_DFSC_EXTABT(aarch64, lpae) \
+ ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
+
+/**
+ * kvm_arm_verify_ext_dabt_pending:
+ * @cpu: ARMCPU
+ *
+ * Verify the fault status code wrt the Ext DABT injection
+ *
+ * Returns: true if the fault status code is as expected, false otherwise
+ */
+static bool kvm_arm_verify_ext_dabt_pending(ARMCPU *cpu)
+{
+ CPUState *cs = CPU(cpu);
+ uint64_t dfsr_val;
+
+ if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
+ CPUARMState *env = &cpu->env;
+ int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
+ int lpae = 0;
+
+ if (!aarch64_mode) {
+ uint64_t ttbcr;
+
+ if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
+ lpae = arm_feature(env, ARM_FEATURE_LPAE)
+ && (ttbcr & TTBCR_EAE);
+ }
+ }
+ /*
+ * The verification here is based on the DFSC bits
+ * of the ESR_EL1 reg only
+ */
+ return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
+ ESR_DFSC_EXTABT(aarch64_mode, lpae));
+ }
+ return false;
+}
+
void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
{
ARMCPU *cpu = ARM_CPU(cs);
* an IMPLEMENTATION DEFINED exception (for 32-bit EL1)
*/
if (!arm_feature(env, ARM_FEATURE_AARCH64) &&
- unlikely(!kvm_arm_verify_ext_dabt_pending(cs))) {
+ unlikely(!kvm_arm_verify_ext_dabt_pending(cpu))) {
error_report("Data abort exception with no valid ISS generated by "
"guest memory access. KVM unable to emulate faulting "
return MEMTXATTRS_UNSPECIFIED;
}
-void kvm_arm_vm_state_change(void *opaque, bool running, RunState state)
+static void kvm_arm_vm_state_change(void *opaque, bool running, RunState state)
{
- CPUState *cs = opaque;
- ARMCPU *cpu = ARM_CPU(cs);
+ ARMCPU *cpu = opaque;
if (running) {
if (cpu->kvm_adjvtime) {
- kvm_arm_put_virtual_time(cs);
+ kvm_arm_put_virtual_time(cpu);
}
} else {
if (cpu->kvm_adjvtime) {
- kvm_arm_get_virtual_time(cs);
+ kvm_arm_get_virtual_time(cpu);
}
}
}
/**
* kvm_arm_handle_dabt_nisv:
- * @cs: CPUState
+ * @cpu: ARMCPU
* @esr_iss: ISS encoding (limited) for the exception from Data Abort
* ISV bit set to '0b0' -> no valid instruction syndrome
* @fault_ipa: faulting address for the synchronous data abort
*
* Returns: 0 if the exception has been handled, < 0 otherwise
*/
-static int kvm_arm_handle_dabt_nisv(CPUState *cs, uint64_t esr_iss,
+static int kvm_arm_handle_dabt_nisv(ARMCPU *cpu, uint64_t esr_iss,
uint64_t fault_ipa)
{
- ARMCPU *cpu = ARM_CPU(cs);
CPUARMState *env = &cpu->env;
/*
* Request KVM to inject the external data abort into the guest
*/
events.exception.ext_dabt_pending = 1;
/* KVM_CAP_ARM_INJECT_EXT_DABT implies KVM_CAP_VCPU_EVENTS */
- if (!kvm_vcpu_ioctl(cs, KVM_SET_VCPU_EVENTS, &events)) {
+ if (!kvm_vcpu_ioctl(CPU(cpu), KVM_SET_VCPU_EVENTS, &events)) {
env->ext_dabt_raised = 1;
return 0;
}
return -1;
}
+/**
+ * kvm_arm_handle_debug:
+ * @cpu: ARMCPU
+ * @debug_exit: debug part of the KVM exit structure
+ *
+ * Returns: TRUE if the debug exception was handled.
+ *
+ * See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
+ *
+ * To minimise translating between kernel and user-space the kernel
+ * ABI just provides user-space with the full exception syndrome
+ * register value to be decoded in QEMU.
+ */
+static bool kvm_arm_handle_debug(ARMCPU *cpu,
+ struct kvm_debug_exit_arch *debug_exit)
+{
+ int hsr_ec = syn_get_ec(debug_exit->hsr);
+ CPUState *cs = CPU(cpu);
+ CPUARMState *env = &cpu->env;
+
+ /* Ensure PC is synchronised */
+ kvm_cpu_synchronize_state(cs);
+
+ switch (hsr_ec) {
+ case EC_SOFTWARESTEP:
+ if (cs->singlestep_enabled) {
+ return true;
+ } else {
+ /*
+ * The kernel should have suppressed the guest's ability to
+ * single step at this point so something has gone wrong.
+ */
+ error_report("%s: guest single-step while debugging unsupported"
+ " (%"PRIx64", %"PRIx32")",
+ __func__, env->pc, debug_exit->hsr);
+ return false;
+ }
+ break;
+ case EC_AA64_BKPT:
+ if (kvm_find_sw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_BREAKPOINT:
+ if (find_hw_breakpoint(cs, env->pc)) {
+ return true;
+ }
+ break;
+ case EC_WATCHPOINT:
+ {
+ CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
+ if (wp) {
+ cs->watchpoint_hit = wp;
+ return true;
+ }
+ break;
+ }
+ default:
+ error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
+ __func__, debug_exit->hsr, env->pc);
+ }
+
+ /* If we are not handling the debug exception it must belong to
+ * the guest. Let's re-use the existing TCG interrupt code to set
+ * everything up properly.
+ */
+ cs->exception_index = EXCP_BKPT;
+ env->exception.syndrome = debug_exit->hsr;
+ env->exception.vaddress = debug_exit->far;
+ env->exception.target_el = 1;
+ qemu_mutex_lock_iothread();
+ arm_cpu_do_interrupt(cs);
+ qemu_mutex_unlock_iothread();
+
+ return false;
+}
+
int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
{
+ ARMCPU *cpu = ARM_CPU(cs);
int ret = 0;
switch (run->exit_reason) {
case KVM_EXIT_DEBUG:
- if (kvm_arm_handle_debug(cs, &run->debug.arch)) {
+ if (kvm_arm_handle_debug(cpu, &run->debug.arch)) {
ret = EXCP_DEBUG;
} /* otherwise return to guest */
break;
case KVM_EXIT_ARM_NISV:
/* External DABT with no valid iss to decode */
- ret = kvm_arm_handle_dabt_nisv(cs, run->arm_nisv.esr_iss,
+ ret = kvm_arm_handle_dabt_nisv(cpu, run->arm_nisv.esr_iss,
run->arm_nisv.fault_ipa);
break;
default:
return 0;
}
+/**
+ * kvm_arm_hw_debug_active:
+ * @cpu: ARMCPU
+ *
+ * Return: TRUE if any hardware breakpoints in use.
+ */
+static bool kvm_arm_hw_debug_active(ARMCPU *cpu)
+{
+ return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
+}
+
+/**
+ * kvm_arm_copy_hw_debug_data:
+ * @ptr: kvm_guest_debug_arch structure
+ *
+ * Copy the architecture specific debug registers into the
+ * kvm_guest_debug ioctl structure.
+ */
+static void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
+{
+ int i;
+ memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
+
+ for (i = 0; i < max_hw_wps; i++) {
+ HWWatchpoint *wp = get_hw_wp(i);
+ ptr->dbg_wcr[i] = wp->wcr;
+ ptr->dbg_wvr[i] = wp->wvr;
+ }
+ for (i = 0; i < max_hw_bps; i++) {
+ HWBreakpoint *bp = get_hw_bp(i);
+ ptr->dbg_bcr[i] = bp->bcr;
+ ptr->dbg_bvr[i] = bp->bvr;
+ }
+}
+
void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
{
if (kvm_sw_breakpoints_active(cs)) {
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
}
- if (kvm_arm_hw_debug_active(cs)) {
+ if (kvm_arm_hw_debug_active(ARM_CPU(cs))) {
dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW;
kvm_arm_copy_hw_debug_data(&dbg->arch);
}
object_class_property_set_description(oc, "eager-split-size",
"Eager Page Split chunk size for hugepages. (default: 0, disabled)");
}
+
+int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return insert_hw_breakpoint(addr);
+ break;
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return insert_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
+{
+ switch (type) {
+ case GDB_BREAKPOINT_HW:
+ return delete_hw_breakpoint(addr);
+ case GDB_WATCHPOINT_READ:
+ case GDB_WATCHPOINT_WRITE:
+ case GDB_WATCHPOINT_ACCESS:
+ return delete_hw_watchpoint(addr, len, type);
+ default:
+ return -ENOSYS;
+ }
+}
+
+void kvm_arch_remove_all_hw_breakpoints(void)
+{
+ if (cur_hw_wps > 0) {
+ g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
+ }
+ if (cur_hw_bps > 0) {
+ g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
+ }
+}
+
+static bool kvm_arm_set_device_attr(ARMCPU *cpu, struct kvm_device_attr *attr,
+ const char *name)
+{
+ int err;
+
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_HAS_DEVICE_ATTR, attr);
+ if (err != 0) {
+ error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
+ return false;
+ }
+
+ err = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_DEVICE_ATTR, attr);
+ if (err != 0) {
+ error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
+ return false;
+ }
+
+ return true;
+}
+
+void kvm_arm_pmu_init(ARMCPU *cpu)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .attr = KVM_ARM_VCPU_PMU_V3_INIT,
+ };
+
+ if (!cpu->has_pmu) {
+ return;
+ }
+ if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) {
+ error_report("failed to init PMU");
+ abort();
+ }
+}
+
+void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PMU_V3_CTRL,
+ .addr = (intptr_t)&irq,
+ .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
+ };
+
+ if (!cpu->has_pmu) {
+ return;
+ }
+ if (!kvm_arm_set_device_attr(cpu, &attr, "PMU")) {
+ error_report("failed to set irq for PMU");
+ abort();
+ }
+}
+
+void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
+{
+ struct kvm_device_attr attr = {
+ .group = KVM_ARM_VCPU_PVTIME_CTRL,
+ .attr = KVM_ARM_VCPU_PVTIME_IPA,
+ .addr = (uint64_t)&ipa,
+ };
+
+ if (cpu->kvm_steal_time == ON_OFF_AUTO_OFF) {
+ return;
+ }
+ if (!kvm_arm_set_device_attr(cpu, &attr, "PVTIME IPA")) {
+ error_report("failed to init PVTIME IPA");
+ abort();
+ }
+}
+
+void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
+{
+ bool has_steal_time = kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
+
+ if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
+ if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
+ } else {
+ cpu->kvm_steal_time = ON_OFF_AUTO_ON;
+ }
+ } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
+ if (!has_steal_time) {
+ error_setg(errp, "'kvm-steal-time' cannot be enabled "
+ "on this host");
+ return;
+ } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ /*
+ * DEN0057A chapter 2 says "This specification only covers
+ * systems in which the Execution state of the hypervisor
+ * as well as EL1 of virtual machines is AArch64.". And,
+ * to ensure that, the smc/hvc calls are only specified as
+ * smc64/hvc64.
+ */
+ error_setg(errp, "'kvm-steal-time' cannot be enabled "
+ "for AArch32 guests");
+ return;
+ }
+ }
+}
+
+bool kvm_arm_aarch32_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
+}
+
+bool kvm_arm_sve_supported(void)
+{
+ return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
+}
+
+QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
+
+uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
+{
+ /* Only call this function if kvm_arm_sve_supported() returns true. */
+ static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
+ static bool probed;
+ uint32_t vq = 0;
+ int i;
+
+ /*
+ * KVM ensures all host CPUs support the same set of vector lengths.
+ * So we only need to create the scratch VCPUs once and then cache
+ * the results.
+ */
+ if (!probed) {
+ struct kvm_vcpu_init init = {
+ .target = -1,
+ .features[0] = (1 << KVM_ARM_VCPU_SVE),
+ };
+ struct kvm_one_reg reg = {
+ .id = KVM_REG_ARM64_SVE_VLS,
+ .addr = (uint64_t)&vls[0],
+ };
+ int fdarray[3], ret;
+
+ probed = true;
+
+ if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
+ error_report("failed to create scratch VCPU with SVE enabled");
+ abort();
+ }
+ ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®);
+ kvm_arm_destroy_scratch_host_vcpu(fdarray);
+ if (ret) {
+ error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
+ strerror(errno));
+ abort();
+ }
+
+ for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
+ if (vls[i]) {
+ vq = 64 - clz64(vls[i]) + i * 64;
+ break;
+ }
+ }
+ if (vq > ARM_MAX_VQ) {
+ warn_report("KVM supports vector lengths larger than "
+ "QEMU can enable");
+ vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ);
+ }
+ }
+
+ return vls[0];
+}
+
+static int kvm_arm_sve_set_vls(ARMCPU *cpu)
+{
+ uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
+
+ assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
+
+ return kvm_set_one_reg(CPU(cpu), KVM_REG_ARM64_SVE_VLS, &vls[0]);
+}
+
+#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
+
+int kvm_arch_init_vcpu(CPUState *cs)
+{
+ int ret;
+ uint64_t mpidr;
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint64_t psciver;
+
+ if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
+ !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
+ error_report("KVM is not supported for this guest CPU type");
+ return -EINVAL;
+ }
+
+ qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cpu);
+
+ /* Determine init features for this CPU */
+ memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
+ if (cs->start_powered_off) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
+ }
+ if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
+ cpu->psci_version = QEMU_PSCI_VERSION_0_2;
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
+ }
+ if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
+ }
+ if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
+ cpu->has_pmu = false;
+ }
+ if (cpu->has_pmu) {
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
+ } else {
+ env->features &= ~(1ULL << ARM_FEATURE_PMU);
+ }
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ assert(kvm_arm_sve_supported());
+ cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
+ }
+ if (cpu_isar_feature(aa64_pauth, cpu)) {
+ cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
+ 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
+ }
+
+ /* Do KVM_ARM_VCPU_INIT ioctl */
+ ret = kvm_arm_vcpu_init(cpu);
+ if (ret) {
+ return ret;
+ }
+
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = kvm_arm_sve_set_vls(cpu);
+ if (ret) {
+ return ret;
+ }
+ ret = kvm_arm_vcpu_finalize(cpu, KVM_ARM_VCPU_SVE);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /*
+ * KVM reports the exact PSCI version it is implementing via a
+ * special sysreg. If it is present, use its contents to determine
+ * what to report to the guest in the dtb (it is the PSCI version,
+ * in the same 15-bits major 16-bits minor format that PSCI_VERSION
+ * returns).
+ */
+ if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) {
+ cpu->psci_version = psciver;
+ }
+
+ /*
+ * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
+ * Currently KVM has its own idea about MPIDR assignment, so we
+ * override our defaults with what we get from KVM.
+ */
+ ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
+ if (ret) {
+ return ret;
+ }
+ cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
+
+ return kvm_arm_init_cpreg_list(cpu);
+}
+
+int kvm_arch_destroy_vcpu(CPUState *cs)
+{
+ return 0;
+}
+
+/* Callers must hold the iothread mutex lock */
+static void kvm_inject_arm_sea(CPUState *c)
+{
+ ARMCPU *cpu = ARM_CPU(c);
+ CPUARMState *env = &cpu->env;
+ uint32_t esr;
+ bool same_el;
+
+ c->exception_index = EXCP_DATA_ABORT;
+ env->exception.target_el = 1;
+
+ /*
+ * Set the DFSC to synchronous external abort and set FnV to not valid,
+ * this will tell guest the FAR_ELx is UNKNOWN for this abort.
+ */
+ same_el = arm_current_el(env) == env->exception.target_el;
+ esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
+
+ env->exception.syndrome = esr;
+
+ arm_cpu_do_interrupt(c);
+}
+
+#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
+ KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
+
+static int kvm_arch_put_fpsimd(CPUState *cs)
+{
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ int i, ret;
+
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+#if HOST_BIG_ENDIAN
+ uint64_t fp_val[2] = { q[1], q[0] };
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]),
+ fp_val);
+#else
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
+#endif
+ if (ret) {
+ return ret;
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
+ * code the slice index to zero for now as it's unlikely we'll need more than
+ * one slice for quite some time.
+ */
+static int kvm_arch_put_sve(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint64_t tmp[ARM_MAX_VQ * 2];
+ uint64_t *r;
+ int n, ret;
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
+ r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
+ ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
+ r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
+ DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
+ if (ret) {
+ return ret;
+ }
+
+ return 0;
+}
+
+int kvm_arch_put_registers(CPUState *cs, int level)
+{
+ uint64_t val;
+ uint32_t fpr;
+ int i, ret;
+ unsigned int el;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
+ * AArch64 registers before pushing them out to 64-bit KVM.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_32_to_64(env);
+ }
+
+ for (i = 0; i < 31; i++) {
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
+ &env->xregs[i]);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ aarch64_save_sp(env, 1);
+
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
+ if (ret) {
+ return ret;
+ }
+
+ /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
+ if (is_a64(env)) {
+ val = pstate_read(env);
+ } else {
+ val = cpsr_read(env);
+ }
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
+ if (ret) {
+ return ret;
+ }
+
+ /* Saved Program State Registers
+ *
+ * Before we restore from the banked_spsr[] array we need to
+ * ensure that any modifications to env->spsr are correctly
+ * reflected in the banks.
+ */
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->banked_spsr[i] = env->spsr;
+ }
+
+ /* KVM 0-4 map to QEMU banks 1-5 */
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
+ &env->banked_spsr[i + 1]);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = kvm_arch_put_sve(cs);
+ } else {
+ ret = kvm_arch_put_fpsimd(cs);
+ }
+ if (ret) {
+ return ret;
+ }
+
+ fpr = vfp_get_fpsr(env);
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
+ if (ret) {
+ return ret;
+ }
+
+ fpr = vfp_get_fpcr(env);
+ ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
+ if (ret) {
+ return ret;
+ }
+
+ write_cpustate_to_list(cpu, true);
+
+ if (!write_list_to_kvmstate(cpu, level)) {
+ return -EINVAL;
+ }
+
+ /*
+ * Setting VCPU events should be triggered after syncing the registers
+ * to avoid overwriting potential changes made by KVM upon calling
+ * KVM_SET_VCPU_EVENTS ioctl
+ */
+ ret = kvm_put_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
+ return kvm_arm_sync_mpstate_to_kvm(cpu);
+}
+
+static int kvm_arch_get_fpsimd(CPUState *cs)
+{
+ CPUARMState *env = &ARM_CPU(cs)->env;
+ int i, ret;
+
+ for (i = 0; i < 32; i++) {
+ uint64_t *q = aa64_vfp_qreg(env, i);
+ ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
+ if (ret) {
+ return ret;
+ } else {
+#if HOST_BIG_ENDIAN
+ uint64_t t;
+ t = q[0], q[0] = q[1], q[1] = t;
+#endif
+ }
+ }
+
+ return 0;
+}
+
+/*
+ * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
+ * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
+ * code the slice index to zero for now as it's unlikely we'll need more than
+ * one slice for quite some time.
+ */
+static int kvm_arch_get_sve(CPUState *cs)
+{
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+ uint64_t *r;
+ int n, ret;
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
+ r = &env->vfp.zregs[n].d[0];
+ ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
+ if (ret) {
+ return ret;
+ }
+ sve_bswap64(r, r, cpu->sve_max_vq * 2);
+ }
+
+ for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
+ r = &env->vfp.pregs[n].p[0];
+ ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
+ if (ret) {
+ return ret;
+ }
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+ }
+
+ r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
+ ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
+ if (ret) {
+ return ret;
+ }
+ sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
+
+ return 0;
+}
+
+int kvm_arch_get_registers(CPUState *cs)
+{
+ uint64_t val;
+ unsigned int el;
+ uint32_t fpr;
+ int i, ret;
+
+ ARMCPU *cpu = ARM_CPU(cs);
+ CPUARMState *env = &cpu->env;
+
+ for (i = 0; i < 31; i++) {
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
+ &env->xregs[i]);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
+ if (ret) {
+ return ret;
+ }
+
+ env->aarch64 = ((val & PSTATE_nRW) == 0);
+ if (is_a64(env)) {
+ pstate_write(env, val);
+ } else {
+ cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
+ }
+
+ /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
+ * QEMU side we keep the current SP in xregs[31] as well.
+ */
+ aarch64_restore_sp(env, 1);
+
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
+ if (ret) {
+ return ret;
+ }
+
+ /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
+ * incoming AArch64 regs received from 64-bit KVM.
+ * We must perform this after all of the registers have been acquired from
+ * the kernel.
+ */
+ if (!is_a64(env)) {
+ aarch64_sync_64_to_32(env);
+ }
+
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
+ if (ret) {
+ return ret;
+ }
+
+ /* Fetch the SPSR registers
+ *
+ * KVM SPSRs 0-4 map to QEMU banks 1-5
+ */
+ for (i = 0; i < KVM_NR_SPSR; i++) {
+ ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
+ &env->banked_spsr[i + 1]);
+ if (ret) {
+ return ret;
+ }
+ }
+
+ el = arm_current_el(env);
+ if (el > 0 && !is_a64(env)) {
+ i = bank_number(env->uncached_cpsr & CPSR_M);
+ env->spsr = env->banked_spsr[i];
+ }
+
+ if (cpu_isar_feature(aa64_sve, cpu)) {
+ ret = kvm_arch_get_sve(cs);
+ } else {
+ ret = kvm_arch_get_fpsimd(cs);
+ }
+ if (ret) {
+ return ret;
+ }
+
+ ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpsr(env, fpr);
+
+ ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
+ if (ret) {
+ return ret;
+ }
+ vfp_set_fpcr(env, fpr);
+
+ ret = kvm_get_vcpu_events(cpu);
+ if (ret) {
+ return ret;
+ }
+
+ if (!write_kvmstate_to_list(cpu)) {
+ return -EINVAL;
+ }
+ /* Note that it's OK to have registers which aren't in CPUState,
+ * so we can ignore a failure return here.
+ */
+ write_list_to_cpustate(cpu);
+
+ ret = kvm_arm_sync_mpstate_to_qemu(cpu);
+
+ /* TODO: other registers */
+ return ret;
+}
+
+void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
+{
+ ram_addr_t ram_addr;
+ hwaddr paddr;
+
+ assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
+
+ if (acpi_ghes_present() && addr) {
+ ram_addr = qemu_ram_addr_from_host(addr);
+ if (ram_addr != RAM_ADDR_INVALID &&
+ kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
+ kvm_hwpoison_page_add(ram_addr);
+ /*
+ * If this is a BUS_MCEERR_AR, we know we have been called
+ * synchronously from the vCPU thread, so we can easily
+ * synchronize the state and inject an error.
+ *
+ * TODO: we currently don't tell the guest at all about
+ * BUS_MCEERR_AO. In that case we might either be being
+ * called synchronously from the vCPU thread, or a bit
+ * later from the main thread, so doing the injection of
+ * the error would be more complicated.
+ */
+ if (code == BUS_MCEERR_AR) {
+ kvm_cpu_synchronize_state(c);
+ if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
+ kvm_inject_arm_sea(c);
+ } else {
+ error_report("failed to record the error");
+ abort();
+ }
+ }
+ return;
+ }
+ if (code == BUS_MCEERR_AO) {
+ error_report("Hardware memory error at addr %p for memory used by "
+ "QEMU itself instead of guest system!", addr);
+ }
+ }
+
+ if (code == BUS_MCEERR_AR) {
+ error_report("Hardware memory error!");
+ exit(1);
+ }
+}
+
+/* C6.6.29 BRK instruction */
+static const uint32_t brk_insn = 0xd4200000;
+
+int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+
+int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
+{
+ static uint32_t brk;
+
+ if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
+ brk != brk_insn ||
+ cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
+ return -EINVAL;
+ }
+ return 0;
+}
+++ /dev/null
-/*
- * ARM implementation of KVM hooks, 64 bit specific code
- *
- * Copyright Mian-M. Hamayun 2013, Virtual Open Systems
- * Copyright Alex Bennée 2014, Linaro
- *
- * This work is licensed under the terms of the GNU GPL, version 2 or later.
- * See the COPYING file in the top-level directory.
- *
- */
-
-#include "qemu/osdep.h"
-#include <sys/ioctl.h>
-#include <sys/ptrace.h>
-
-#include <linux/elf.h>
-#include <linux/kvm.h>
-
-#include "qapi/error.h"
-#include "cpu.h"
-#include "qemu/timer.h"
-#include "qemu/error-report.h"
-#include "qemu/host-utils.h"
-#include "qemu/main-loop.h"
-#include "exec/gdbstub.h"
-#include "sysemu/runstate.h"
-#include "sysemu/kvm.h"
-#include "sysemu/kvm_int.h"
-#include "kvm_arm.h"
-#include "internals.h"
-#include "cpu-features.h"
-#include "hw/acpi/acpi.h"
-#include "hw/acpi/ghes.h"
-
-static bool have_guest_debug;
-
-void kvm_arm_init_debug(KVMState *s)
-{
- have_guest_debug = kvm_check_extension(s,
- KVM_CAP_SET_GUEST_DEBUG);
-
- max_hw_wps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_WPS);
- hw_watchpoints = g_array_sized_new(true, true,
- sizeof(HWWatchpoint), max_hw_wps);
-
- max_hw_bps = kvm_check_extension(s, KVM_CAP_GUEST_DEBUG_HW_BPS);
- hw_breakpoints = g_array_sized_new(true, true,
- sizeof(HWBreakpoint), max_hw_bps);
- return;
-}
-
-int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
-{
- switch (type) {
- case GDB_BREAKPOINT_HW:
- return insert_hw_breakpoint(addr);
- break;
- case GDB_WATCHPOINT_READ:
- case GDB_WATCHPOINT_WRITE:
- case GDB_WATCHPOINT_ACCESS:
- return insert_hw_watchpoint(addr, len, type);
- default:
- return -ENOSYS;
- }
-}
-
-int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
-{
- switch (type) {
- case GDB_BREAKPOINT_HW:
- return delete_hw_breakpoint(addr);
- case GDB_WATCHPOINT_READ:
- case GDB_WATCHPOINT_WRITE:
- case GDB_WATCHPOINT_ACCESS:
- return delete_hw_watchpoint(addr, len, type);
- default:
- return -ENOSYS;
- }
-}
-
-
-void kvm_arch_remove_all_hw_breakpoints(void)
-{
- if (cur_hw_wps > 0) {
- g_array_remove_range(hw_watchpoints, 0, cur_hw_wps);
- }
- if (cur_hw_bps > 0) {
- g_array_remove_range(hw_breakpoints, 0, cur_hw_bps);
- }
-}
-
-void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr)
-{
- int i;
- memset(ptr, 0, sizeof(struct kvm_guest_debug_arch));
-
- for (i = 0; i < max_hw_wps; i++) {
- HWWatchpoint *wp = get_hw_wp(i);
- ptr->dbg_wcr[i] = wp->wcr;
- ptr->dbg_wvr[i] = wp->wvr;
- }
- for (i = 0; i < max_hw_bps; i++) {
- HWBreakpoint *bp = get_hw_bp(i);
- ptr->dbg_bcr[i] = bp->bcr;
- ptr->dbg_bvr[i] = bp->bvr;
- }
-}
-
-bool kvm_arm_hw_debug_active(CPUState *cs)
-{
- return ((cur_hw_wps > 0) || (cur_hw_bps > 0));
-}
-
-static bool kvm_arm_set_device_attr(CPUState *cs, struct kvm_device_attr *attr,
- const char *name)
-{
- int err;
-
- err = kvm_vcpu_ioctl(cs, KVM_HAS_DEVICE_ATTR, attr);
- if (err != 0) {
- error_report("%s: KVM_HAS_DEVICE_ATTR: %s", name, strerror(-err));
- return false;
- }
-
- err = kvm_vcpu_ioctl(cs, KVM_SET_DEVICE_ATTR, attr);
- if (err != 0) {
- error_report("%s: KVM_SET_DEVICE_ATTR: %s", name, strerror(-err));
- return false;
- }
-
- return true;
-}
-
-void kvm_arm_pmu_init(CPUState *cs)
-{
- struct kvm_device_attr attr = {
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
- .attr = KVM_ARM_VCPU_PMU_V3_INIT,
- };
-
- if (!ARM_CPU(cs)->has_pmu) {
- return;
- }
- if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
- error_report("failed to init PMU");
- abort();
- }
-}
-
-void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
-{
- struct kvm_device_attr attr = {
- .group = KVM_ARM_VCPU_PMU_V3_CTRL,
- .addr = (intptr_t)&irq,
- .attr = KVM_ARM_VCPU_PMU_V3_IRQ,
- };
-
- if (!ARM_CPU(cs)->has_pmu) {
- return;
- }
- if (!kvm_arm_set_device_attr(cs, &attr, "PMU")) {
- error_report("failed to set irq for PMU");
- abort();
- }
-}
-
-void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
-{
- struct kvm_device_attr attr = {
- .group = KVM_ARM_VCPU_PVTIME_CTRL,
- .attr = KVM_ARM_VCPU_PVTIME_IPA,
- .addr = (uint64_t)&ipa,
- };
-
- if (ARM_CPU(cs)->kvm_steal_time == ON_OFF_AUTO_OFF) {
- return;
- }
- if (!kvm_arm_set_device_attr(cs, &attr, "PVTIME IPA")) {
- error_report("failed to init PVTIME IPA");
- abort();
- }
-}
-
-static int read_sys_reg32(int fd, uint32_t *pret, uint64_t id)
-{
- uint64_t ret;
- struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)&ret };
- int err;
-
- assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
- err = ioctl(fd, KVM_GET_ONE_REG, &idreg);
- if (err < 0) {
- return -1;
- }
- *pret = ret;
- return 0;
-}
-
-static int read_sys_reg64(int fd, uint64_t *pret, uint64_t id)
-{
- struct kvm_one_reg idreg = { .id = id, .addr = (uintptr_t)pret };
-
- assert((id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64);
- return ioctl(fd, KVM_GET_ONE_REG, &idreg);
-}
-
-static bool kvm_arm_pauth_supported(void)
-{
- return (kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_ADDRESS) &&
- kvm_check_extension(kvm_state, KVM_CAP_ARM_PTRAUTH_GENERIC));
-}
-
-bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf)
-{
- /* Identify the feature bits corresponding to the host CPU, and
- * fill out the ARMHostCPUClass fields accordingly. To do this
- * we have to create a scratch VM, create a single CPU inside it,
- * and then query that CPU for the relevant ID registers.
- */
- int fdarray[3];
- bool sve_supported;
- bool pmu_supported = false;
- uint64_t features = 0;
- int err;
-
- /* Old kernels may not know about the PREFERRED_TARGET ioctl: however
- * we know these will only support creating one kind of guest CPU,
- * which is its preferred CPU type. Fortunately these old kernels
- * support only a very limited number of CPUs.
- */
- static const uint32_t cpus_to_try[] = {
- KVM_ARM_TARGET_AEM_V8,
- KVM_ARM_TARGET_FOUNDATION_V8,
- KVM_ARM_TARGET_CORTEX_A57,
- QEMU_KVM_ARM_TARGET_NONE
- };
- /*
- * target = -1 informs kvm_arm_create_scratch_host_vcpu()
- * to use the preferred target
- */
- struct kvm_vcpu_init init = { .target = -1, };
-
- /*
- * Ask for SVE if supported, so that we can query ID_AA64ZFR0,
- * which is otherwise RAZ.
- */
- sve_supported = kvm_arm_sve_supported();
- if (sve_supported) {
- init.features[0] |= 1 << KVM_ARM_VCPU_SVE;
- }
-
- /*
- * Ask for Pointer Authentication if supported, so that we get
- * the unsanitized field values for AA64ISAR1_EL1.
- */
- if (kvm_arm_pauth_supported()) {
- init.features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
- 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
- }
-
- if (kvm_arm_pmu_supported()) {
- init.features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
- pmu_supported = true;
- }
-
- if (!kvm_arm_create_scratch_host_vcpu(cpus_to_try, fdarray, &init)) {
- return false;
- }
-
- ahcf->target = init.target;
- ahcf->dtb_compatible = "arm,arm-v8";
-
- err = read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 0));
- if (unlikely(err < 0)) {
- /*
- * Before v4.15, the kernel only exposed a limited number of system
- * registers, not including any of the interesting AArch64 ID regs.
- * For the most part we could leave these fields as zero with minimal
- * effect, since this does not affect the values seen by the guest.
- *
- * However, it could cause problems down the line for QEMU,
- * so provide a minimal v8.0 default.
- *
- * ??? Could read MIDR and use knowledge from cpu64.c.
- * ??? Could map a page of memory into our temp guest and
- * run the tiniest of hand-crafted kernels to extract
- * the values seen by the guest.
- * ??? Either of these sounds like too much effort just
- * to work around running a modern host kernel.
- */
- ahcf->isar.id_aa64pfr0 = 0x00000011; /* EL1&0, AArch64 only */
- err = 0;
- } else {
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64pfr1,
- ARM64_SYS_REG(3, 0, 0, 4, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64smfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 5));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr0,
- ARM64_SYS_REG(3, 0, 0, 5, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64dfr1,
- ARM64_SYS_REG(3, 0, 0, 5, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar0,
- ARM64_SYS_REG(3, 0, 0, 6, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar1,
- ARM64_SYS_REG(3, 0, 0, 6, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64isar2,
- ARM64_SYS_REG(3, 0, 0, 6, 2));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr0,
- ARM64_SYS_REG(3, 0, 0, 7, 0));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr1,
- ARM64_SYS_REG(3, 0, 0, 7, 1));
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64mmfr2,
- ARM64_SYS_REG(3, 0, 0, 7, 2));
-
- /*
- * Note that if AArch32 support is not present in the host,
- * the AArch32 sysregs are present to be read, but will
- * return UNKNOWN values. This is neither better nor worse
- * than skipping the reads and leaving 0, as we must avoid
- * considering the values in every case.
- */
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr1,
- ARM64_SYS_REG(3, 0, 0, 1, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr0,
- ARM64_SYS_REG(3, 0, 0, 1, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr1,
- ARM64_SYS_REG(3, 0, 0, 1, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr2,
- ARM64_SYS_REG(3, 0, 0, 1, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr3,
- ARM64_SYS_REG(3, 0, 0, 1, 7));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar0,
- ARM64_SYS_REG(3, 0, 0, 2, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar1,
- ARM64_SYS_REG(3, 0, 0, 2, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar2,
- ARM64_SYS_REG(3, 0, 0, 2, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar3,
- ARM64_SYS_REG(3, 0, 0, 2, 3));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar4,
- ARM64_SYS_REG(3, 0, 0, 2, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar5,
- ARM64_SYS_REG(3, 0, 0, 2, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr4,
- ARM64_SYS_REG(3, 0, 0, 2, 6));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_isar6,
- ARM64_SYS_REG(3, 0, 0, 2, 7));
-
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr0,
- ARM64_SYS_REG(3, 0, 0, 3, 0));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr1,
- ARM64_SYS_REG(3, 0, 0, 3, 1));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.mvfr2,
- ARM64_SYS_REG(3, 0, 0, 3, 2));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_pfr2,
- ARM64_SYS_REG(3, 0, 0, 3, 4));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_dfr1,
- ARM64_SYS_REG(3, 0, 0, 3, 5));
- err |= read_sys_reg32(fdarray[2], &ahcf->isar.id_mmfr5,
- ARM64_SYS_REG(3, 0, 0, 3, 6));
-
- /*
- * DBGDIDR is a bit complicated because the kernel doesn't
- * provide an accessor for it in 64-bit mode, which is what this
- * scratch VM is in, and there's no architected "64-bit sysreg
- * which reads the same as the 32-bit register" the way there is
- * for other ID registers. Instead we synthesize a value from the
- * AArch64 ID_AA64DFR0, the same way the kernel code in
- * arch/arm64/kvm/sys_regs.c:trap_dbgidr() does.
- * We only do this if the CPU supports AArch32 at EL1.
- */
- if (FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL1) >= 2) {
- int wrps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, WRPS);
- int brps = FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, BRPS);
- int ctx_cmps =
- FIELD_EX64(ahcf->isar.id_aa64dfr0, ID_AA64DFR0, CTX_CMPS);
- int version = 6; /* ARMv8 debug architecture */
- bool has_el3 =
- !!FIELD_EX32(ahcf->isar.id_aa64pfr0, ID_AA64PFR0, EL3);
- uint32_t dbgdidr = 0;
-
- dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, WRPS, wrps);
- dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, BRPS, brps);
- dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, CTX_CMPS, ctx_cmps);
- dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, VERSION, version);
- dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, NSUHD_IMP, has_el3);
- dbgdidr = FIELD_DP32(dbgdidr, DBGDIDR, SE_IMP, has_el3);
- dbgdidr |= (1 << 15); /* RES1 bit */
- ahcf->isar.dbgdidr = dbgdidr;
- }
-
- if (pmu_supported) {
- /* PMCR_EL0 is only accessible if the vCPU has feature PMU_V3 */
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.reset_pmcr_el0,
- ARM64_SYS_REG(3, 3, 9, 12, 0));
- }
-
- if (sve_supported) {
- /*
- * There is a range of kernels between kernel commit 73433762fcae
- * and f81cb2c3ad41 which have a bug where the kernel doesn't
- * expose SYS_ID_AA64ZFR0_EL1 via the ONE_REG API unless the VM has
- * enabled SVE support, which resulted in an error rather than RAZ.
- * So only read the register if we set KVM_ARM_VCPU_SVE above.
- */
- err |= read_sys_reg64(fdarray[2], &ahcf->isar.id_aa64zfr0,
- ARM64_SYS_REG(3, 0, 0, 4, 4));
- }
- }
-
- kvm_arm_destroy_scratch_host_vcpu(fdarray);
-
- if (err < 0) {
- return false;
- }
-
- /*
- * We can assume any KVM supporting CPU is at least a v8
- * with VFPv4+Neon; this in turn implies most of the other
- * feature bits.
- */
- features |= 1ULL << ARM_FEATURE_V8;
- features |= 1ULL << ARM_FEATURE_NEON;
- features |= 1ULL << ARM_FEATURE_AARCH64;
- features |= 1ULL << ARM_FEATURE_PMU;
- features |= 1ULL << ARM_FEATURE_GENERIC_TIMER;
-
- ahcf->features = features;
-
- return true;
-}
-
-void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp)
-{
- bool has_steal_time = kvm_arm_steal_time_supported();
-
- if (cpu->kvm_steal_time == ON_OFF_AUTO_AUTO) {
- if (!has_steal_time || !arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- cpu->kvm_steal_time = ON_OFF_AUTO_OFF;
- } else {
- cpu->kvm_steal_time = ON_OFF_AUTO_ON;
- }
- } else if (cpu->kvm_steal_time == ON_OFF_AUTO_ON) {
- if (!has_steal_time) {
- error_setg(errp, "'kvm-steal-time' cannot be enabled "
- "on this host");
- return;
- } else if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- /*
- * DEN0057A chapter 2 says "This specification only covers
- * systems in which the Execution state of the hypervisor
- * as well as EL1 of virtual machines is AArch64.". And,
- * to ensure that, the smc/hvc calls are only specified as
- * smc64/hvc64.
- */
- error_setg(errp, "'kvm-steal-time' cannot be enabled "
- "for AArch32 guests");
- return;
- }
- }
-}
-
-bool kvm_arm_aarch32_supported(void)
-{
- return kvm_check_extension(kvm_state, KVM_CAP_ARM_EL1_32BIT);
-}
-
-bool kvm_arm_sve_supported(void)
-{
- return kvm_check_extension(kvm_state, KVM_CAP_ARM_SVE);
-}
-
-bool kvm_arm_steal_time_supported(void)
-{
- return kvm_check_extension(kvm_state, KVM_CAP_STEAL_TIME);
-}
-
-QEMU_BUILD_BUG_ON(KVM_ARM64_SVE_VQ_MIN != 1);
-
-uint32_t kvm_arm_sve_get_vls(CPUState *cs)
-{
- /* Only call this function if kvm_arm_sve_supported() returns true. */
- static uint64_t vls[KVM_ARM64_SVE_VLS_WORDS];
- static bool probed;
- uint32_t vq = 0;
- int i;
-
- /*
- * KVM ensures all host CPUs support the same set of vector lengths.
- * So we only need to create the scratch VCPUs once and then cache
- * the results.
- */
- if (!probed) {
- struct kvm_vcpu_init init = {
- .target = -1,
- .features[0] = (1 << KVM_ARM_VCPU_SVE),
- };
- struct kvm_one_reg reg = {
- .id = KVM_REG_ARM64_SVE_VLS,
- .addr = (uint64_t)&vls[0],
- };
- int fdarray[3], ret;
-
- probed = true;
-
- if (!kvm_arm_create_scratch_host_vcpu(NULL, fdarray, &init)) {
- error_report("failed to create scratch VCPU with SVE enabled");
- abort();
- }
- ret = ioctl(fdarray[2], KVM_GET_ONE_REG, ®);
- kvm_arm_destroy_scratch_host_vcpu(fdarray);
- if (ret) {
- error_report("failed to get KVM_REG_ARM64_SVE_VLS: %s",
- strerror(errno));
- abort();
- }
-
- for (i = KVM_ARM64_SVE_VLS_WORDS - 1; i >= 0; --i) {
- if (vls[i]) {
- vq = 64 - clz64(vls[i]) + i * 64;
- break;
- }
- }
- if (vq > ARM_MAX_VQ) {
- warn_report("KVM supports vector lengths larger than "
- "QEMU can enable");
- vls[0] &= MAKE_64BIT_MASK(0, ARM_MAX_VQ);
- }
- }
-
- return vls[0];
-}
-
-static int kvm_arm_sve_set_vls(CPUState *cs)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- uint64_t vls[KVM_ARM64_SVE_VLS_WORDS] = { cpu->sve_vq.map };
-
- assert(cpu->sve_max_vq <= KVM_ARM64_SVE_VQ_MAX);
-
- return kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_VLS, &vls[0]);
-}
-
-#define ARM_CPU_ID_MPIDR 3, 0, 0, 0, 5
-
-int kvm_arch_init_vcpu(CPUState *cs)
-{
- int ret;
- uint64_t mpidr;
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint64_t psciver;
-
- if (cpu->kvm_target == QEMU_KVM_ARM_TARGET_NONE ||
- !object_dynamic_cast(OBJECT(cpu), TYPE_AARCH64_CPU)) {
- error_report("KVM is not supported for this guest CPU type");
- return -EINVAL;
- }
-
- qemu_add_vm_change_state_handler(kvm_arm_vm_state_change, cs);
-
- /* Determine init features for this CPU */
- memset(cpu->kvm_init_features, 0, sizeof(cpu->kvm_init_features));
- if (cs->start_powered_off) {
- cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_POWER_OFF;
- }
- if (kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PSCI_0_2)) {
- cpu->psci_version = QEMU_PSCI_VERSION_0_2;
- cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PSCI_0_2;
- }
- if (!arm_feature(&cpu->env, ARM_FEATURE_AARCH64)) {
- cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_EL1_32BIT;
- }
- if (!kvm_check_extension(cs->kvm_state, KVM_CAP_ARM_PMU_V3)) {
- cpu->has_pmu = false;
- }
- if (cpu->has_pmu) {
- cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_PMU_V3;
- } else {
- env->features &= ~(1ULL << ARM_FEATURE_PMU);
- }
- if (cpu_isar_feature(aa64_sve, cpu)) {
- assert(kvm_arm_sve_supported());
- cpu->kvm_init_features[0] |= 1 << KVM_ARM_VCPU_SVE;
- }
- if (cpu_isar_feature(aa64_pauth, cpu)) {
- cpu->kvm_init_features[0] |= (1 << KVM_ARM_VCPU_PTRAUTH_ADDRESS |
- 1 << KVM_ARM_VCPU_PTRAUTH_GENERIC);
- }
-
- /* Do KVM_ARM_VCPU_INIT ioctl */
- ret = kvm_arm_vcpu_init(cs);
- if (ret) {
- return ret;
- }
-
- if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = kvm_arm_sve_set_vls(cs);
- if (ret) {
- return ret;
- }
- ret = kvm_arm_vcpu_finalize(cs, KVM_ARM_VCPU_SVE);
- if (ret) {
- return ret;
- }
- }
-
- /*
- * KVM reports the exact PSCI version it is implementing via a
- * special sysreg. If it is present, use its contents to determine
- * what to report to the guest in the dtb (it is the PSCI version,
- * in the same 15-bits major 16-bits minor format that PSCI_VERSION
- * returns).
- */
- if (!kvm_get_one_reg(cs, KVM_REG_ARM_PSCI_VERSION, &psciver)) {
- cpu->psci_version = psciver;
- }
-
- /*
- * When KVM is in use, PSCI is emulated in-kernel and not by qemu.
- * Currently KVM has its own idea about MPIDR assignment, so we
- * override our defaults with what we get from KVM.
- */
- ret = kvm_get_one_reg(cs, ARM64_SYS_REG(ARM_CPU_ID_MPIDR), &mpidr);
- if (ret) {
- return ret;
- }
- cpu->mp_affinity = mpidr & ARM64_AFFINITY_MASK;
-
- /* Check whether user space can specify guest syndrome value */
- kvm_arm_init_serror_injection(cs);
-
- return kvm_arm_init_cpreg_list(cpu);
-}
-
-int kvm_arch_destroy_vcpu(CPUState *cs)
-{
- return 0;
-}
-
-bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx)
-{
- /* Return true if the regidx is a register we should synchronize
- * via the cpreg_tuples array (ie is not a core or sve reg that
- * we sync by hand in kvm_arch_get/put_registers())
- */
- switch (regidx & KVM_REG_ARM_COPROC_MASK) {
- case KVM_REG_ARM_CORE:
- case KVM_REG_ARM64_SVE:
- return false;
- default:
- return true;
- }
-}
-
-typedef struct CPRegStateLevel {
- uint64_t regidx;
- int level;
-} CPRegStateLevel;
-
-/* All system registers not listed in the following table are assumed to be
- * of the level KVM_PUT_RUNTIME_STATE. If a register should be written less
- * often, you must add it to this table with a state of either
- * KVM_PUT_RESET_STATE or KVM_PUT_FULL_STATE.
- */
-static const CPRegStateLevel non_runtime_cpregs[] = {
- { KVM_REG_ARM_TIMER_CNT, KVM_PUT_FULL_STATE },
- { KVM_REG_ARM_PTIMER_CNT, KVM_PUT_FULL_STATE },
-};
-
-int kvm_arm_cpreg_level(uint64_t regidx)
-{
- int i;
-
- for (i = 0; i < ARRAY_SIZE(non_runtime_cpregs); i++) {
- const CPRegStateLevel *l = &non_runtime_cpregs[i];
- if (l->regidx == regidx) {
- return l->level;
- }
- }
-
- return KVM_PUT_RUNTIME_STATE;
-}
-
-/* Callers must hold the iothread mutex lock */
-static void kvm_inject_arm_sea(CPUState *c)
-{
- ARMCPU *cpu = ARM_CPU(c);
- CPUARMState *env = &cpu->env;
- uint32_t esr;
- bool same_el;
-
- c->exception_index = EXCP_DATA_ABORT;
- env->exception.target_el = 1;
-
- /*
- * Set the DFSC to synchronous external abort and set FnV to not valid,
- * this will tell guest the FAR_ELx is UNKNOWN for this abort.
- */
- same_el = arm_current_el(env) == env->exception.target_el;
- esr = syn_data_abort_no_iss(same_el, 1, 0, 0, 0, 0, 0x10);
-
- env->exception.syndrome = esr;
-
- arm_cpu_do_interrupt(c);
-}
-
-#define AARCH64_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | \
- KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
-
-#define AARCH64_SIMD_CORE_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U128 | \
- KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
-
-#define AARCH64_SIMD_CTRL_REG(x) (KVM_REG_ARM64 | KVM_REG_SIZE_U32 | \
- KVM_REG_ARM_CORE | KVM_REG_ARM_CORE_REG(x))
-
-static int kvm_arch_put_fpsimd(CPUState *cs)
-{
- CPUARMState *env = &ARM_CPU(cs)->env;
- int i, ret;
-
- for (i = 0; i < 32; i++) {
- uint64_t *q = aa64_vfp_qreg(env, i);
-#if HOST_BIG_ENDIAN
- uint64_t fp_val[2] = { q[1], q[0] };
- ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]),
- fp_val);
-#else
- ret = kvm_set_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
-#endif
- if (ret) {
- return ret;
- }
- }
-
- return 0;
-}
-
-/*
- * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
- * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
- * code the slice index to zero for now as it's unlikely we'll need more than
- * one slice for quite some time.
- */
-static int kvm_arch_put_sve(CPUState *cs)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint64_t tmp[ARM_MAX_VQ * 2];
- uint64_t *r;
- int n, ret;
-
- for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
- r = sve_bswap64(tmp, &env->vfp.zregs[n].d[0], cpu->sve_max_vq * 2);
- ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
- if (ret) {
- return ret;
- }
- }
-
- for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
- r = sve_bswap64(tmp, r = &env->vfp.pregs[n].p[0],
- DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
- ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
- if (ret) {
- return ret;
- }
- }
-
- r = sve_bswap64(tmp, &env->vfp.pregs[FFR_PRED_NUM].p[0],
- DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
- ret = kvm_set_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
- if (ret) {
- return ret;
- }
-
- return 0;
-}
-
-int kvm_arch_put_registers(CPUState *cs, int level)
-{
- uint64_t val;
- uint32_t fpr;
- int i, ret;
- unsigned int el;
-
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- /* If we are in AArch32 mode then we need to copy the AArch32 regs to the
- * AArch64 registers before pushing them out to 64-bit KVM.
- */
- if (!is_a64(env)) {
- aarch64_sync_32_to_64(env);
- }
-
- for (i = 0; i < 31; i++) {
- ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
- &env->xregs[i]);
- if (ret) {
- return ret;
- }
- }
-
- /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
- * QEMU side we keep the current SP in xregs[31] as well.
- */
- aarch64_save_sp(env, 1);
-
- ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
- if (ret) {
- return ret;
- }
-
- ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
- if (ret) {
- return ret;
- }
-
- /* Note that KVM thinks pstate is 64 bit but we use a uint32_t */
- if (is_a64(env)) {
- val = pstate_read(env);
- } else {
- val = cpsr_read(env);
- }
- ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
- if (ret) {
- return ret;
- }
-
- ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
- if (ret) {
- return ret;
- }
-
- ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
- if (ret) {
- return ret;
- }
-
- /* Saved Program State Registers
- *
- * Before we restore from the banked_spsr[] array we need to
- * ensure that any modifications to env->spsr are correctly
- * reflected in the banks.
- */
- el = arm_current_el(env);
- if (el > 0 && !is_a64(env)) {
- i = bank_number(env->uncached_cpsr & CPSR_M);
- env->banked_spsr[i] = env->spsr;
- }
-
- /* KVM 0-4 map to QEMU banks 1-5 */
- for (i = 0; i < KVM_NR_SPSR; i++) {
- ret = kvm_set_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
- &env->banked_spsr[i + 1]);
- if (ret) {
- return ret;
- }
- }
-
- if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = kvm_arch_put_sve(cs);
- } else {
- ret = kvm_arch_put_fpsimd(cs);
- }
- if (ret) {
- return ret;
- }
-
- fpr = vfp_get_fpsr(env);
- ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
- if (ret) {
- return ret;
- }
-
- fpr = vfp_get_fpcr(env);
- ret = kvm_set_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
- if (ret) {
- return ret;
- }
-
- write_cpustate_to_list(cpu, true);
-
- if (!write_list_to_kvmstate(cpu, level)) {
- return -EINVAL;
- }
-
- /*
- * Setting VCPU events should be triggered after syncing the registers
- * to avoid overwriting potential changes made by KVM upon calling
- * KVM_SET_VCPU_EVENTS ioctl
- */
- ret = kvm_put_vcpu_events(cpu);
- if (ret) {
- return ret;
- }
-
- kvm_arm_sync_mpstate_to_kvm(cpu);
-
- return ret;
-}
-
-static int kvm_arch_get_fpsimd(CPUState *cs)
-{
- CPUARMState *env = &ARM_CPU(cs)->env;
- int i, ret;
-
- for (i = 0; i < 32; i++) {
- uint64_t *q = aa64_vfp_qreg(env, i);
- ret = kvm_get_one_reg(cs, AARCH64_SIMD_CORE_REG(fp_regs.vregs[i]), q);
- if (ret) {
- return ret;
- } else {
-#if HOST_BIG_ENDIAN
- uint64_t t;
- t = q[0], q[0] = q[1], q[1] = t;
-#endif
- }
- }
-
- return 0;
-}
-
-/*
- * KVM SVE registers come in slices where ZREGs have a slice size of 2048 bits
- * and PREGS and the FFR have a slice size of 256 bits. However we simply hard
- * code the slice index to zero for now as it's unlikely we'll need more than
- * one slice for quite some time.
- */
-static int kvm_arch_get_sve(CPUState *cs)
-{
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- uint64_t *r;
- int n, ret;
-
- for (n = 0; n < KVM_ARM64_SVE_NUM_ZREGS; ++n) {
- r = &env->vfp.zregs[n].d[0];
- ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_ZREG(n, 0), r);
- if (ret) {
- return ret;
- }
- sve_bswap64(r, r, cpu->sve_max_vq * 2);
- }
-
- for (n = 0; n < KVM_ARM64_SVE_NUM_PREGS; ++n) {
- r = &env->vfp.pregs[n].p[0];
- ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_PREG(n, 0), r);
- if (ret) {
- return ret;
- }
- sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
- }
-
- r = &env->vfp.pregs[FFR_PRED_NUM].p[0];
- ret = kvm_get_one_reg(cs, KVM_REG_ARM64_SVE_FFR(0), r);
- if (ret) {
- return ret;
- }
- sve_bswap64(r, r, DIV_ROUND_UP(cpu->sve_max_vq * 2, 8));
-
- return 0;
-}
-
-int kvm_arch_get_registers(CPUState *cs)
-{
- uint64_t val;
- unsigned int el;
- uint32_t fpr;
- int i, ret;
-
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- for (i = 0; i < 31; i++) {
- ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.regs[i]),
- &env->xregs[i]);
- if (ret) {
- return ret;
- }
- }
-
- ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.sp), &env->sp_el[0]);
- if (ret) {
- return ret;
- }
-
- ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(sp_el1), &env->sp_el[1]);
- if (ret) {
- return ret;
- }
-
- ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pstate), &val);
- if (ret) {
- return ret;
- }
-
- env->aarch64 = ((val & PSTATE_nRW) == 0);
- if (is_a64(env)) {
- pstate_write(env, val);
- } else {
- cpsr_write(env, val, 0xffffffff, CPSRWriteRaw);
- }
-
- /* KVM puts SP_EL0 in regs.sp and SP_EL1 in regs.sp_el1. On the
- * QEMU side we keep the current SP in xregs[31] as well.
- */
- aarch64_restore_sp(env, 1);
-
- ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(regs.pc), &env->pc);
- if (ret) {
- return ret;
- }
-
- /* If we are in AArch32 mode then we need to sync the AArch32 regs with the
- * incoming AArch64 regs received from 64-bit KVM.
- * We must perform this after all of the registers have been acquired from
- * the kernel.
- */
- if (!is_a64(env)) {
- aarch64_sync_64_to_32(env);
- }
-
- ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(elr_el1), &env->elr_el[1]);
- if (ret) {
- return ret;
- }
-
- /* Fetch the SPSR registers
- *
- * KVM SPSRs 0-4 map to QEMU banks 1-5
- */
- for (i = 0; i < KVM_NR_SPSR; i++) {
- ret = kvm_get_one_reg(cs, AARCH64_CORE_REG(spsr[i]),
- &env->banked_spsr[i + 1]);
- if (ret) {
- return ret;
- }
- }
-
- el = arm_current_el(env);
- if (el > 0 && !is_a64(env)) {
- i = bank_number(env->uncached_cpsr & CPSR_M);
- env->spsr = env->banked_spsr[i];
- }
-
- if (cpu_isar_feature(aa64_sve, cpu)) {
- ret = kvm_arch_get_sve(cs);
- } else {
- ret = kvm_arch_get_fpsimd(cs);
- }
- if (ret) {
- return ret;
- }
-
- ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpsr), &fpr);
- if (ret) {
- return ret;
- }
- vfp_set_fpsr(env, fpr);
-
- ret = kvm_get_one_reg(cs, AARCH64_SIMD_CTRL_REG(fp_regs.fpcr), &fpr);
- if (ret) {
- return ret;
- }
- vfp_set_fpcr(env, fpr);
-
- ret = kvm_get_vcpu_events(cpu);
- if (ret) {
- return ret;
- }
-
- if (!write_kvmstate_to_list(cpu)) {
- return -EINVAL;
- }
- /* Note that it's OK to have registers which aren't in CPUState,
- * so we can ignore a failure return here.
- */
- write_list_to_cpustate(cpu);
-
- kvm_arm_sync_mpstate_to_qemu(cpu);
-
- /* TODO: other registers */
- return ret;
-}
-
-void kvm_arch_on_sigbus_vcpu(CPUState *c, int code, void *addr)
-{
- ram_addr_t ram_addr;
- hwaddr paddr;
-
- assert(code == BUS_MCEERR_AR || code == BUS_MCEERR_AO);
-
- if (acpi_ghes_present() && addr) {
- ram_addr = qemu_ram_addr_from_host(addr);
- if (ram_addr != RAM_ADDR_INVALID &&
- kvm_physical_memory_addr_from_host(c->kvm_state, addr, &paddr)) {
- kvm_hwpoison_page_add(ram_addr);
- /*
- * If this is a BUS_MCEERR_AR, we know we have been called
- * synchronously from the vCPU thread, so we can easily
- * synchronize the state and inject an error.
- *
- * TODO: we currently don't tell the guest at all about
- * BUS_MCEERR_AO. In that case we might either be being
- * called synchronously from the vCPU thread, or a bit
- * later from the main thread, so doing the injection of
- * the error would be more complicated.
- */
- if (code == BUS_MCEERR_AR) {
- kvm_cpu_synchronize_state(c);
- if (!acpi_ghes_record_errors(ACPI_HEST_SRC_ID_SEA, paddr)) {
- kvm_inject_arm_sea(c);
- } else {
- error_report("failed to record the error");
- abort();
- }
- }
- return;
- }
- if (code == BUS_MCEERR_AO) {
- error_report("Hardware memory error at addr %p for memory used by "
- "QEMU itself instead of guest system!", addr);
- }
- }
-
- if (code == BUS_MCEERR_AR) {
- error_report("Hardware memory error!");
- exit(1);
- }
-}
-
-/* C6.6.29 BRK instruction */
-static const uint32_t brk_insn = 0xd4200000;
-
-int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
-{
- if (have_guest_debug) {
- if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 0) ||
- cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk_insn, 4, 1)) {
- return -EINVAL;
- }
- return 0;
- } else {
- error_report("guest debug not supported on this kernel");
- return -EINVAL;
- }
-}
-
-int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
-{
- static uint32_t brk;
-
- if (have_guest_debug) {
- if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&brk, 4, 0) ||
- brk != brk_insn ||
- cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, 4, 1)) {
- return -EINVAL;
- }
- return 0;
- } else {
- error_report("guest debug not supported on this kernel");
- return -EINVAL;
- }
-}
-
-/* See v8 ARM ARM D7.2.27 ESR_ELx, Exception Syndrome Register
- *
- * To minimise translating between kernel and user-space the kernel
- * ABI just provides user-space with the full exception syndrome
- * register value to be decoded in QEMU.
- */
-
-bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit)
-{
- int hsr_ec = syn_get_ec(debug_exit->hsr);
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
-
- /* Ensure PC is synchronised */
- kvm_cpu_synchronize_state(cs);
-
- switch (hsr_ec) {
- case EC_SOFTWARESTEP:
- if (cs->singlestep_enabled) {
- return true;
- } else {
- /*
- * The kernel should have suppressed the guest's ability to
- * single step at this point so something has gone wrong.
- */
- error_report("%s: guest single-step while debugging unsupported"
- " (%"PRIx64", %"PRIx32")",
- __func__, env->pc, debug_exit->hsr);
- return false;
- }
- break;
- case EC_AA64_BKPT:
- if (kvm_find_sw_breakpoint(cs, env->pc)) {
- return true;
- }
- break;
- case EC_BREAKPOINT:
- if (find_hw_breakpoint(cs, env->pc)) {
- return true;
- }
- break;
- case EC_WATCHPOINT:
- {
- CPUWatchpoint *wp = find_hw_watchpoint(cs, debug_exit->far);
- if (wp) {
- cs->watchpoint_hit = wp;
- return true;
- }
- break;
- }
- default:
- error_report("%s: unhandled debug exit (%"PRIx32", %"PRIx64")",
- __func__, debug_exit->hsr, env->pc);
- }
-
- /* If we are not handling the debug exception it must belong to
- * the guest. Let's re-use the existing TCG interrupt code to set
- * everything up properly.
- */
- cs->exception_index = EXCP_BKPT;
- env->exception.syndrome = debug_exit->hsr;
- env->exception.vaddress = debug_exit->far;
- env->exception.target_el = 1;
- qemu_mutex_lock_iothread();
- arm_cpu_do_interrupt(cs);
- qemu_mutex_unlock_iothread();
-
- return false;
-}
-
-#define ARM64_REG_ESR_EL1 ARM64_SYS_REG(3, 0, 5, 2, 0)
-#define ARM64_REG_TCR_EL1 ARM64_SYS_REG(3, 0, 2, 0, 2)
-
-/*
- * ESR_EL1
- * ISS encoding
- * AARCH64: DFSC, bits [5:0]
- * AARCH32:
- * TTBCR.EAE == 0
- * FS[4] - DFSR[10]
- * FS[3:0] - DFSR[3:0]
- * TTBCR.EAE == 1
- * FS, bits [5:0]
- */
-#define ESR_DFSC(aarch64, lpae, v) \
- ((aarch64 || (lpae)) ? ((v) & 0x3F) \
- : (((v) >> 6) | ((v) & 0x1F)))
-
-#define ESR_DFSC_EXTABT(aarch64, lpae) \
- ((aarch64) ? 0x10 : (lpae) ? 0x10 : 0x8)
-
-bool kvm_arm_verify_ext_dabt_pending(CPUState *cs)
-{
- uint64_t dfsr_val;
-
- if (!kvm_get_one_reg(cs, ARM64_REG_ESR_EL1, &dfsr_val)) {
- ARMCPU *cpu = ARM_CPU(cs);
- CPUARMState *env = &cpu->env;
- int aarch64_mode = arm_feature(env, ARM_FEATURE_AARCH64);
- int lpae = 0;
-
- if (!aarch64_mode) {
- uint64_t ttbcr;
-
- if (!kvm_get_one_reg(cs, ARM64_REG_TCR_EL1, &ttbcr)) {
- lpae = arm_feature(env, ARM_FEATURE_LPAE)
- && (ttbcr & TTBCR_EAE);
- }
- }
- /*
- * The verification here is based on the DFSC bits
- * of the ESR_EL1 reg only
- */
- return (ESR_DFSC(aarch64_mode, lpae, dfsr_val) ==
- ESR_DFSC_EXTABT(aarch64_mode, lpae));
- }
- return false;
-}
#define QEMU_KVM_ARM_H
#include "sysemu/kvm.h"
-#include "exec/memory.h"
-#include "qemu/error-report.h"
#define KVM_ARM_VGIC_V2 (1 << 0)
#define KVM_ARM_VGIC_V3 (1 << 1)
-/**
- * kvm_arm_init_debug() - initialize guest debug capabilities
- * @s: KVMState
- *
- * Should be called only once before using guest debug capabilities.
- */
-void kvm_arm_init_debug(KVMState *s);
-
-/**
- * kvm_arm_vcpu_init:
- * @cs: CPUState
- *
- * Initialize (or reinitialize) the VCPU by invoking the
- * KVM_ARM_VCPU_INIT ioctl with the CPU type and feature
- * bitmask specified in the CPUState.
- *
- * Returns: 0 if success else < 0 error code
- */
-int kvm_arm_vcpu_init(CPUState *cs);
-
-/**
- * kvm_arm_vcpu_finalize:
- * @cs: CPUState
- * @feature: feature to finalize
- *
- * Finalizes the configuration of the specified VCPU feature by
- * invoking the KVM_ARM_VCPU_FINALIZE ioctl. Features requiring
- * this are documented in the "KVM_ARM_VCPU_FINALIZE" section of
- * KVM's API documentation.
- *
- * Returns: 0 if success else < 0 error code
- */
-int kvm_arm_vcpu_finalize(CPUState *cs, int feature);
-
/**
* kvm_arm_register_device:
* @mr: memory region for this device
void kvm_arm_register_device(MemoryRegion *mr, uint64_t devid, uint64_t group,
uint64_t attr, int dev_fd, uint64_t addr_ormask);
-/**
- * kvm_arm_init_cpreg_list:
- * @cpu: ARMCPU
- *
- * Initialize the ARMCPU cpreg list according to the kernel's
- * definition of what CPU registers it knows about (and throw away
- * the previous TCG-created cpreg list).
- *
- * Returns: 0 if success, else < 0 error code
- */
-int kvm_arm_init_cpreg_list(ARMCPU *cpu);
-
-/**
- * kvm_arm_reg_syncs_via_cpreg_list:
- * @regidx: KVM register index
- *
- * Return true if this KVM register should be synchronized via the
- * cpreg list of arbitrary system registers, false if it is synchronized
- * by hand using code in kvm_arch_get/put_registers().
- */
-bool kvm_arm_reg_syncs_via_cpreg_list(uint64_t regidx);
-
-/**
- * kvm_arm_cpreg_level:
- * @regidx: KVM register index
- *
- * Return the level of this coprocessor/system register. Return value is
- * either KVM_PUT_RUNTIME_STATE, KVM_PUT_RESET_STATE, or KVM_PUT_FULL_STATE.
- */
-int kvm_arm_cpreg_level(uint64_t regidx);
-
/**
* write_list_to_kvmstate:
* @cpu: ARMCPU
*/
void kvm_arm_reset_vcpu(ARMCPU *cpu);
-/**
- * kvm_arm_init_serror_injection:
- * @cs: CPUState
- *
- * Check whether KVM can set guest SError syndrome.
- */
-void kvm_arm_init_serror_injection(CPUState *cs);
-
-/**
- * kvm_get_vcpu_events:
- * @cpu: ARMCPU
- *
- * Get VCPU related state from kvm.
- *
- * Returns: 0 if success else < 0 error code
- */
-int kvm_get_vcpu_events(ARMCPU *cpu);
-
-/**
- * kvm_put_vcpu_events:
- * @cpu: ARMCPU
- *
- * Put VCPU related state to kvm.
- *
- * Returns: 0 if success else < 0 error code
- */
-int kvm_put_vcpu_events(ARMCPU *cpu);
-
#ifdef CONFIG_KVM
/**
* kvm_arm_create_scratch_host_vcpu:
*/
void kvm_arm_destroy_scratch_host_vcpu(int *fdarray);
-/**
- * ARMHostCPUFeatures: information about the host CPU (identified
- * by asking the host kernel)
- */
-typedef struct ARMHostCPUFeatures {
- ARMISARegisters isar;
- uint64_t features;
- uint32_t target;
- const char *dtb_compatible;
-} ARMHostCPUFeatures;
-
-/**
- * kvm_arm_get_host_cpu_features:
- * @ahcf: ARMHostCPUClass to fill in
- *
- * Probe the capabilities of the host kernel's preferred CPU and fill
- * in the ARMHostCPUClass struct accordingly.
- *
- * Returns true on success and false otherwise.
- */
-bool kvm_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf);
-
/**
* kvm_arm_sve_get_vls:
- * @cs: CPUState
+ * @cpu: ARMCPU
*
* Get all the SVE vector lengths supported by the KVM host, setting
* the bits corresponding to their length in quadwords minus one
* (vq - 1) up to ARM_MAX_VQ. Return the resulting map.
*/
-uint32_t kvm_arm_sve_get_vls(CPUState *cs);
+uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu);
/**
* kvm_arm_set_cpu_features_from_host:
/**
* kvm_arm_add_vcpu_properties:
- * @obj: The CPU object to add the properties to
+ * @cpu: The CPU object to add the properties to
*
* Add all KVM specific CPU properties to the CPU object. These
* are the CPU properties with "kvm-" prefixed names.
*/
-void kvm_arm_add_vcpu_properties(Object *obj);
+void kvm_arm_add_vcpu_properties(ARMCPU *cpu);
/**
* kvm_arm_steal_time_finalize:
*/
void kvm_arm_steal_time_finalize(ARMCPU *cpu, Error **errp);
-/**
- * kvm_arm_steal_time_supported:
- *
- * Returns: true if KVM can enable steal time reporting
- * and false otherwise.
- */
-bool kvm_arm_steal_time_supported(void);
-
/**
* kvm_arm_aarch32_supported:
*
*/
int kvm_arm_get_max_vm_ipa_size(MachineState *ms, bool *fixed_ipa);
-/**
- * kvm_arm_sync_mpstate_to_kvm:
- * @cpu: ARMCPU
- *
- * If supported set the KVM MP_STATE based on QEMU's model.
- *
- * Returns 0 on success and -1 on failure.
- */
-int kvm_arm_sync_mpstate_to_kvm(ARMCPU *cpu);
-
-/**
- * kvm_arm_sync_mpstate_to_qemu:
- * @cpu: ARMCPU
- *
- * If supported get the MP_STATE from KVM and store in QEMU's model.
- *
- * Returns 0 on success and aborts on failure.
- */
-int kvm_arm_sync_mpstate_to_qemu(ARMCPU *cpu);
-
-/**
- * kvm_arm_get_virtual_time:
- * @cs: CPUState
- *
- * Gets the VCPU's virtual counter and stores it in the KVM CPU state.
- */
-void kvm_arm_get_virtual_time(CPUState *cs);
-
-/**
- * kvm_arm_put_virtual_time:
- * @cs: CPUState
- *
- * Sets the VCPU's virtual counter to the value stored in the KVM CPU state.
- */
-void kvm_arm_put_virtual_time(CPUState *cs);
-
-void kvm_arm_vm_state_change(void *opaque, bool running, RunState state);
-
int kvm_arm_vgic_probe(void);
-void kvm_arm_pmu_set_irq(CPUState *cs, int irq);
-void kvm_arm_pmu_init(CPUState *cs);
+void kvm_arm_pmu_init(ARMCPU *cpu);
+void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq);
/**
* kvm_arm_pvtime_init:
- * @cs: CPUState
+ * @cpu: ARMCPU
* @ipa: Per-vcpu guest physical base address of the pvtime structures
*
* Initializes PVTIME for the VCPU, setting the PVTIME IPA to @ipa.
*/
-void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa);
+void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa);
int kvm_arm_set_irq(int cpu, int irqtype, int irq, int level);
return false;
}
-static inline bool kvm_arm_steal_time_supported(void)
-{
- return false;
-}
-
/*
* These functions should never actually be called without KVM support.
*/
g_assert_not_reached();
}
-static inline void kvm_arm_add_vcpu_properties(Object *obj)
+static inline void kvm_arm_add_vcpu_properties(ARMCPU *cpu)
{
g_assert_not_reached();
}
g_assert_not_reached();
}
-static inline void kvm_arm_pmu_set_irq(CPUState *cs, int irq)
+static inline void kvm_arm_pmu_set_irq(ARMCPU *cpu, int irq)
{
g_assert_not_reached();
}
-static inline void kvm_arm_pmu_init(CPUState *cs)
+static inline void kvm_arm_pmu_init(ARMCPU *cpu)
{
g_assert_not_reached();
}
-static inline void kvm_arm_pvtime_init(CPUState *cs, uint64_t ipa)
+static inline void kvm_arm_pvtime_init(ARMCPU *cpu, uint64_t ipa)
{
g_assert_not_reached();
}
g_assert_not_reached();
}
-static inline uint32_t kvm_arm_sve_get_vls(CPUState *cs)
+static inline uint32_t kvm_arm_sve_get_vls(ARMCPU *cpu)
{
g_assert_not_reached();
}
#endif
-/**
- * kvm_arm_handle_debug:
- * @cs: CPUState
- * @debug_exit: debug part of the KVM exit structure
- *
- * Returns: TRUE if the debug exception was handled.
- */
-bool kvm_arm_handle_debug(CPUState *cs, struct kvm_debug_exit_arch *debug_exit);
-
-/**
- * kvm_arm_hw_debug_active:
- * @cs: CPU State
- *
- * Return: TRUE if any hardware breakpoints in use.
- */
-bool kvm_arm_hw_debug_active(CPUState *cs);
-
-/**
- * kvm_arm_copy_hw_debug_data:
- * @ptr: kvm_guest_debug_arch structure
- *
- * Copy the architecture specific debug registers into the
- * kvm_guest_debug ioctl structure.
- */
-struct kvm_guest_debug_arch;
-void kvm_arm_copy_hw_debug_data(struct kvm_guest_debug_arch *ptr);
-
-/**
- * kvm_arm_verify_ext_dabt_pending:
- * @cs: CPUState
- *
- * Verify the fault status code wrt the Ext DABT injection
- *
- * Returns: true if the fault status code is as expected, false otherwise
- */
-bool kvm_arm_verify_ext_dabt_pending(CPUState *cs);
-
#endif
))
arm_ss.add(zlib)
-arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c', 'kvm64.c'), if_false: files('kvm-stub.c'))
+arm_ss.add(when: 'CONFIG_KVM', if_true: files('hyp_gdbstub.c', 'kvm.c'), if_false: files('kvm-stub.c'))
arm_ss.add(when: 'CONFIG_HVF', if_true: files('hyp_gdbstub.c'))
arm_ss.add(when: 'TARGET_AARCH64', if_true: files(
}
}
+/* Sign/zero extend */
+uint32_t HELPER(sxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(int8_t)x;
+ res |= (uint32_t)(int8_t)(x >> 16) << 16;
+ return res;
+}
+
+static void handle_possible_div0_trap(CPUARMState *env, uintptr_t ra)
+{
+ /*
+ * Take a division-by-zero exception if necessary; otherwise return
+ * to get the usual non-trapping division behaviour (result of 0)
+ */
+ if (arm_feature(env, ARM_FEATURE_M)
+ && (env->v7m.ccr[env->v7m.secure] & R_V7M_CCR_DIV_0_TRP_MASK)) {
+ raise_exception_ra(env, EXCP_DIVBYZERO, 0, 1, ra);
+ }
+}
+
+uint32_t HELPER(uxtb16)(uint32_t x)
+{
+ uint32_t res;
+ res = (uint16_t)(uint8_t)x;
+ res |= (uint32_t)(uint8_t)(x >> 16) << 16;
+ return res;
+}
+
+int32_t HELPER(sdiv)(CPUARMState *env, int32_t num, int32_t den)
+{
+ if (den == 0) {
+ handle_possible_div0_trap(env, GETPC());
+ return 0;
+ }
+ if (num == INT_MIN && den == -1) {
+ return INT_MIN;
+ }
+ return num / den;
+}
+
+uint32_t HELPER(udiv)(CPUARMState *env, uint32_t num, uint32_t den)
+{
+ if (den == 0) {
+ handle_possible_div0_trap(env, GETPC());
+ return 0;
+ }
+ return num / den;
+}
+
+uint32_t HELPER(rbit)(uint32_t x)
+{
+ return revbit32(x);
+}
+
uint32_t HELPER(add_setq)(CPUARMState *env, uint32_t a, uint32_t b)
{
uint32_t res = a + b;
*/
#include "qemu/osdep.h"
+#include "exec/exec-all.h"
#include "translate.h"
#include "translate-a64.h"
#include "qemu/log.h"
--- /dev/null
+/*
+ * Copyright (c) 2019, Mellanox Technologies. All rights reserved.
+ * Copyright (C) 2023 Intel Corporation.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ *
+ * Authors: Yi Liu <yi.l.liu@intel.com>
+ *
+ * Copied from
+ * https://github.com/linux-rdma/rdma-core/blob/master/util/open_cdev.c
+ *
+ */
+
+#include "qemu/osdep.h"
+#include "qemu/chardev_open.h"
+
+static int open_cdev_internal(const char *path, dev_t cdev)
+{
+ struct stat st;
+ int fd;
+
+ fd = qemu_open_old(path, O_RDWR);
+ if (fd == -1) {
+ return -1;
+ }
+ if (fstat(fd, &st) || !S_ISCHR(st.st_mode) ||
+ (cdev != 0 && st.st_rdev != cdev)) {
+ close(fd);
+ return -1;
+ }
+ return fd;
+}
+
+static int open_cdev_robust(dev_t cdev)
+{
+ g_autofree char *devpath = NULL;
+
+ /*
+ * This assumes that udev is being used and is creating the /dev/char/
+ * symlinks.
+ */
+ devpath = g_strdup_printf("/dev/char/%u:%u", major(cdev), minor(cdev));
+ return open_cdev_internal(devpath, cdev);
+}
+
+int open_cdev(const char *devpath, dev_t cdev)
+{
+ int fd;
+
+ fd = open_cdev_internal(devpath, cdev);
+ if (fd == -1 && cdev != 0) {
+ return open_cdev_robust(cdev);
+ }
+ return fd;
+}
util_ss.add(files('filemonitor-stub.c'))
endif
util_ss.add(when: 'CONFIG_LINUX', if_true: files('vfio-helpers.c'))
+ util_ss.add(when: 'CONFIG_LINUX', if_true: files('chardev_open.c'))
endif
if cpu == 'aarch64'