qemu_mutex_unlock(&s->iommu_lock);
}
+static void vtd_update_scalable_state(IntelIOMMUState *s)
+{
+ uint64_t val = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
+
+ if (s->scalable_mode) {
+ s->root_scalable = val & VTD_RTADDR_SMT;
+ }
+}
+
/* Whether the address space needs to notify new mappings */
static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as)
{
/* Turn off first then on the other */
if (use_iommu) {
- memory_region_set_enabled(&as->sys_alias, false);
+ memory_region_set_enabled(&as->nodmar, false);
memory_region_set_enabled(MEMORY_REGION(&as->iommu), true);
} else {
memory_region_set_enabled(MEMORY_REGION(&as->iommu), false);
- memory_region_set_enabled(&as->sys_alias, true);
+ memory_region_set_enabled(&as->nodmar, true);
}
if (take_bql) {
{
s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
s->root_extended = s->root & VTD_RTADDR_RTT;
- if (s->scalable_mode) {
- s->root_scalable = s->root & VTD_RTADDR_SMT;
- }
s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits);
+ vtd_update_scalable_state(s);
+
trace_vtd_reg_dmar_root(s->root, s->root_extended);
}
IntelIOMMUState *s = vtd_as->iommu_state;
if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) {
- error_report("We need to set caching-mode=1 for intel-iommu to enable "
+ error_report("We need to set caching-mode=on for intel-iommu to enable "
"device assignment with IOMMU protection.");
exit(1);
}
*/
vtd_switch_address_space_all(iommu);
+ /*
+ * We don't need to migrate the root_scalable because we can
+ * simply do the calculation after the loading is complete. We
+ * can actually do similar things with root, dmar_enabled, etc.
+ * however since we've had them already so we'd better keep them
+ * for compatibility of migration.
+ */
+ vtd_update_scalable_state(iommu);
+
return 0;
}
VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
VMSTATE_BOOL(root_extended, IntelIOMMUState),
- VMSTATE_BOOL(root_scalable, IntelIOMMUState),
VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
vtd_dev_as = vtd_bus->dev_as[devfn];
if (!vtd_dev_as) {
- snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
+ snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn),
+ PCI_FUNC(devfn));
vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
vtd_dev_as->bus = bus;
vtd_dev_as->context_cache_entry.context_cache_gen = 0;
vtd_dev_as->iova_tree = iova_tree_new();
+ memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX);
+ address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root");
+
/*
- * Memory region relationships looks like (Address range shows
- * only lower 32 bits to make it short in length...):
- *
- * |-----------------+-------------------+----------|
- * | Name | Address range | Priority |
- * |-----------------+-------------------+----------+
- * | vtd_root | 00000000-ffffffff | 0 |
- * | intel_iommu | 00000000-ffffffff | 1 |
- * | vtd_sys_alias | 00000000-ffffffff | 1 |
- * | intel_iommu_ir | fee00000-feefffff | 64 |
- * |-----------------+-------------------+----------|
+ * Build the DMAR-disabled container with aliases to the
+ * shared MRs. Note that aliasing to a shared memory region
+ * could help the memory API to detect same FlatViews so we
+ * can have devices to share the same FlatView when DMAR is
+ * disabled (either by not providing "intel_iommu=on" or with
+ * "iommu=pt"). It will greatly reduce the total number of
+ * FlatViews of the system hence VM runs faster.
+ */
+ memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s),
+ "vtd-nodmar", &s->mr_nodmar, 0,
+ memory_region_size(&s->mr_nodmar));
+
+ /*
+ * Build the per-device DMAR-enabled container.
*
- * We enable/disable DMAR by switching enablement for
- * vtd_sys_alias and intel_iommu regions. IR region is always
- * enabled.
+ * TODO: currently we have per-device IOMMU memory region only
+ * because we have per-device IOMMU notifiers for devices. If
+ * one day we can abstract the IOMMU notifiers out of the
+ * memory regions then we can also share the same memory
+ * region here just like what we've done above with the nodmar
+ * region.
*/
+ strcat(name, "-dmar");
memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu),
TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s),
- "intel_iommu_dmar",
- UINT64_MAX);
- memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s),
- "vtd_sys_alias", get_system_memory(),
- 0, memory_region_size(get_system_memory()));
- memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s),
- &vtd_mem_ir_ops, s, "intel_iommu_ir",
- VTD_INTERRUPT_ADDR_SIZE);
- memory_region_init(&vtd_dev_as->root, OBJECT(s),
- "vtd_root", UINT64_MAX);
- memory_region_add_subregion_overlap(&vtd_dev_as->root,
+ name, UINT64_MAX);
+ memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir",
+ &s->mr_ir, 0, memory_region_size(&s->mr_ir));
+ memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu),
VTD_INTERRUPT_ADDR_FIRST,
- &vtd_dev_as->iommu_ir, 64);
- address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name);
- memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
- &vtd_dev_as->sys_alias, 1);
+ &vtd_dev_as->iommu_ir, 1);
+
+ /*
+ * Hook both the containers under the root container, we
+ * switch between DMAR & noDMAR by enable/disable
+ * corresponding sub-containers
+ */
memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
MEMORY_REGION(&vtd_dev_as->iommu),
- 1);
+ 0);
+ memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
+ &vtd_dev_as->nodmar, 0);
+
vtd_switch_address_space(vtd_dev_as);
}
return vtd_dev_as;
memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
"intel_iommu", DMAR_REG_SIZE);
+
+ /* Create the shared memory regions by all devices */
+ memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar",
+ UINT64_MAX);
+ memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops,
+ s, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE);
+ memory_region_init_alias(&s->mr_sys_alias, OBJECT(s),
+ "vtd-sys-alias", get_system_memory(), 0,
+ memory_region_size(get_system_memory()));
+ memory_region_add_subregion_overlap(&s->mr_nodmar, 0,
+ &s->mr_sys_alias, 0);
+ memory_region_add_subregion_overlap(&s->mr_nodmar,
+ VTD_INTERRUPT_ADDR_FIRST,
+ &s->mr_ir, 1);
+
sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
/* No corresponding destroy */
s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,