#include "qemu/osdep.h"
#include "qemu/error-report.h"
+#include "qemu/main-loop.h"
#include "qapi/error.h"
#include "hw/sysbus.h"
#include "exec/address-spaces.h"
#include "intel_iommu_internal.h"
#include "hw/pci/pci.h"
#include "hw/pci/pci_bus.h"
+#include "hw/qdev-properties.h"
#include "hw/i386/pc.h"
#include "hw/i386/apic-msidef.h"
#include "hw/boards.h"
#include "sysemu/kvm.h"
#include "hw/i386/apic_internal.h"
#include "kvm_i386.h"
+#include "migration/vmstate.h"
#include "trace.h"
/* context entry operations */
qemu_mutex_unlock(&s->iommu_lock);
}
+static void vtd_update_scalable_state(IntelIOMMUState *s)
+{
+ uint64_t val = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
+
+ if (s->scalable_mode) {
+ s->root_scalable = val & VTD_RTADDR_SMT;
+ }
+}
+
/* Whether the address space needs to notify new mappings */
static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as)
{
static void vtd_root_table_setup(IntelIOMMUState *s)
{
s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
- s->root_extended = s->root & VTD_RTADDR_RTT;
- if (s->scalable_mode) {
- s->root_scalable = s->root & VTD_RTADDR_SMT;
- }
s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits);
- trace_vtd_reg_dmar_root(s->root, s->root_extended);
+ vtd_update_scalable_state(s);
+
+ trace_vtd_reg_dmar_root(s->root, s->root_scalable);
}
static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
IntelIOMMUState *s = vtd_as->iommu_state;
if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) {
- error_report("We need to set caching-mode=1 for intel-iommu to enable "
+ error_report("We need to set caching-mode=on for intel-iommu to enable "
"device assignment with IOMMU protection.");
exit(1);
}
*/
vtd_switch_address_space_all(iommu);
+ /*
+ * We don't need to migrate the root_scalable because we can
+ * simply do the calculation after the loading is complete. We
+ * can actually do similar things with root, dmar_enabled, etc.
+ * however since we've had them already so we'd better keep them
+ * for compatibility of migration.
+ */
+ vtd_update_scalable_state(iommu);
+
return 0;
}
VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
- VMSTATE_BOOL(root_extended, IntelIOMMUState),
- VMSTATE_BOOL(root_scalable, IntelIOMMUState),
+ VMSTATE_UNUSED(1), /* bool root_extended is obsolete by VT-d */
VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
return vtd_dev_as;
}
+static uint64_t get_naturally_aligned_size(uint64_t start,
+ uint64_t size, int gaw)
+{
+ uint64_t max_mask = 1ULL << gaw;
+ uint64_t alignment = start ? start & -start : max_mask;
+
+ alignment = MIN(alignment, max_mask);
+ size = MIN(size, max_mask);
+
+ if (alignment <= size) {
+ /* Increase the alignment of start */
+ return alignment;
+ } else {
+ /* Find the largest page mask from size */
+ return 1ULL << (63 - clz64(size));
+ }
+}
+
/* Unmap the whole range in the notifier's scope. */
static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
{
- IOMMUTLBEntry entry;
- hwaddr size;
+ hwaddr size, remain;
hwaddr start = n->start;
hwaddr end = n->end;
IntelIOMMUState *s = as->iommu_state;
* VT-d spec), otherwise we need to consider overflow of 64 bits.
*/
- if (end > VTD_ADDRESS_SIZE(s->aw_bits)) {
+ if (end > VTD_ADDRESS_SIZE(s->aw_bits) - 1) {
/*
* Don't need to unmap regions that is bigger than the whole
* VT-d supported address space size
*/
- end = VTD_ADDRESS_SIZE(s->aw_bits);
+ end = VTD_ADDRESS_SIZE(s->aw_bits) - 1;
}
assert(start <= end);
- size = end - start;
+ size = remain = end - start + 1;
- if (ctpop64(size) != 1) {
- /*
- * This size cannot format a correct mask. Let's enlarge it to
- * suite the minimum available mask.
- */
- int n = 64 - clz64(size);
- if (n > s->aw_bits) {
- /* should not happen, but in case it happens, limit it */
- n = s->aw_bits;
- }
- size = 1ULL << n;
+ while (remain >= VTD_PAGE_SIZE) {
+ IOMMUTLBEntry entry;
+ uint64_t mask = get_naturally_aligned_size(start, remain, s->aw_bits);
+
+ assert(mask);
+
+ entry.iova = start;
+ entry.addr_mask = mask - 1;
+ entry.target_as = &address_space_memory;
+ entry.perm = IOMMU_NONE;
+ /* This field is meaningless for unmap */
+ entry.translated_addr = 0;
+
+ memory_region_notify_one(n, &entry);
+
+ start += mask;
+ remain -= mask;
}
- entry.target_as = &address_space_memory;
- /* Adjust iova for the size */
- entry.iova = n->start & ~(size - 1);
- /* This field is meaningless for unmap */
- entry.translated_addr = 0;
- entry.perm = IOMMU_NONE;
- entry.addr_mask = size - 1;
+ assert(!remain);
trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
VTD_PCI_SLOT(as->devfn),
VTD_PCI_FUNC(as->devfn),
- entry.iova, size);
+ n->start, size);
- map.iova = entry.iova;
- map.size = entry.addr_mask;
+ map.iova = n->start;
+ map.size = size;
iova_tree_remove(as->iova_tree, &map);
-
- memory_region_notify_one(n, &entry);
}
static void vtd_address_space_unmap_all(IntelIOMMUState *s)
memset(s->womask, 0, DMAR_REG_SIZE);
s->root = 0;
- s->root_extended = false;
s->root_scalable = false;
s->dmar_enabled = false;
s->intr_enabled = false;
x86_class->int_remap = vtd_int_remap;
/* Supported by the pc-q35-* machine types */
dc->user_creatable = true;
+ set_bit(DEVICE_CATEGORY_MISC, dc->categories);
+ dc->desc = "Intel IOMMU (VT-d) DMA Remapping device";
}
static const TypeInfo vtd_info = {