#include "sysemu/kvm.h"
#include "hw/i386/apic_internal.h"
#include "kvm_i386.h"
-
-/*#define DEBUG_INTEL_IOMMU*/
-#ifdef DEBUG_INTEL_IOMMU
-enum {
- DEBUG_GENERAL, DEBUG_CSR, DEBUG_INV, DEBUG_MMU, DEBUG_FLOG,
- DEBUG_CACHE, DEBUG_IR,
-};
-#define VTD_DBGBIT(x) (1 << DEBUG_##x)
-static int vtd_dbgflags = VTD_DBGBIT(GENERAL) | VTD_DBGBIT(CSR);
-
-#define VTD_DPRINTF(what, fmt, ...) do { \
- if (vtd_dbgflags & VTD_DBGBIT(what)) { \
- fprintf(stderr, "(vtd)%s: " fmt "\n", __func__, \
- ## __VA_ARGS__); } \
- } while (0)
-#else
-#define VTD_DPRINTF(what, fmt, ...) do {} while (0)
-#endif
+#include "trace.h"
static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
uint64_t wmask, uint64_t w1cmask)
/* The shift of an addr for a certain level of paging structure */
static inline uint32_t vtd_slpt_level_shift(uint32_t level)
{
+ assert(level != 0);
return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
}
GHashTableIter bus_it;
uint32_t devfn_it;
+ trace_vtd_context_cache_reset();
+
g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
- VTD_DPRINTF(CACHE, "global context_cache_gen=1");
while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
vtd_as = vtd_bus->dev_as[devfn_it];
uint64_t *key = g_malloc(sizeof(*key));
uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
- VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
- " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, slpte,
- domain_id);
+ trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
- VTD_DPRINTF(CACHE, "iotlb exceeds size limit, forced to reset");
+ trace_vtd_iotlb_reset("iotlb exceeds size limit");
vtd_reset_iotlb(s);
}
msi.address = vtd_get_long_raw(s, mesg_addr_reg);
msi.data = vtd_get_long_raw(s, mesg_data_reg);
- VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32,
- msi.address, msi.data);
+ trace_vtd_irq_generate(msi.address, msi.data);
+
apic_get_class()->send_msi(&msi);
}
{
if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
pre_fsts & VTD_FSTS_IQE) {
- VTD_DPRINTF(FLOG, "there are previous interrupt conditions "
- "to be serviced by software, fault event is not generated "
- "(FSTS_REG 0x%"PRIx32 ")", pre_fsts);
+ trace_vtd_err("There are previous interrupt conditions "
+ "to be serviced by software, fault event "
+ "is not generated.");
return;
}
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
- VTD_DPRINTF(FLOG, "Interrupt Mask set, fault event is not generated");
+ trace_vtd_err("Interrupt Mask set, irq is not generated.");
} else {
vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
}
}
vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
- VTD_DPRINTF(FLOG, "set PPF of FSTS_REG to %d", ppf_mask ? 1 : 0);
+ trace_vtd_fsts_ppf(!!ppf_mask);
}
static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
}
vtd_set_quad_raw(s, frcd_reg_addr, lo);
vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
- VTD_DPRINTF(FLOG, "record to FRCD_REG #%"PRIu16 ": hi 0x%"PRIx64
- ", lo 0x%"PRIx64, index, hi, lo);
+
+ trace_vtd_frr_new(index, hi, lo);
}
/* Try to collapse multiple pending faults from the same requester */
for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
frcd_reg = vtd_get_quad_raw(s, addr);
- VTD_DPRINTF(FLOG, "frcd_reg #%d 0x%"PRIx64, i, frcd_reg);
if ((frcd_reg & VTD_FRCD_F) &&
((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
return true;
/* This is not a normal fault reason case. Drop it. */
return;
}
- VTD_DPRINTF(FLOG, "sid 0x%"PRIx16 ", fault %d, addr 0x%"PRIx64
- ", is_write %d", source_id, fault, addr, is_write);
+
+ trace_vtd_dmar_fault(source_id, fault, addr, is_write);
+
if (fsts_reg & VTD_FSTS_PFO) {
- VTD_DPRINTF(FLOG, "new fault is not recorded due to "
- "Primary Fault Overflow");
+ trace_vtd_err("New fault is not recorded due to "
+ "Primary Fault Overflow.");
return;
}
+
if (vtd_try_collapse_fault(s, source_id)) {
- VTD_DPRINTF(FLOG, "new fault is not recorded due to "
- "compression of faults");
+ trace_vtd_err("New fault is not recorded due to "
+ "compression of faults.");
return;
}
+
if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
- VTD_DPRINTF(FLOG, "Primary Fault Overflow and "
- "new fault is not recorded, set PFO field");
+ trace_vtd_err("Next Fault Recording Reg is used, "
+ "new fault is not recorded, set PFO field.");
vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
return;
}
vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
if (fsts_reg & VTD_FSTS_PPF) {
- VTD_DPRINTF(FLOG, "there are pending faults already, "
- "fault event is not generated");
+ trace_vtd_err("There are pending faults already, "
+ "fault event is not generated.");
vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
s->next_frcd_reg++;
if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
/* Set the IWC field and try to generate an invalidation completion interrupt */
static void vtd_generate_completion_event(IntelIOMMUState *s)
{
- VTD_DPRINTF(INV, "completes an invalidation wait command with "
- "Interrupt Flag");
if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
- VTD_DPRINTF(INV, "there is a previous interrupt condition to be "
- "serviced by software, "
- "new invalidation event is not generated");
+ trace_vtd_inv_desc_wait_irq("One pending, skip current");
return;
}
vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
- VTD_DPRINTF(INV, "IM filed in IECTL_REG is set, new invalidation "
- "event is not generated");
+ trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
+ "new event not generated");
return;
} else {
/* Generate the interrupt event */
+ trace_vtd_inv_desc_wait_irq("Generating complete event");
vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
}
addr = s->root + index * sizeof(*re);
if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
- VTD_DPRINTF(GENERAL, "error: fail to access root-entry at 0x%"PRIx64
- " + %"PRIu8, s->root, index);
+ trace_vtd_re_invalid(re->rsvd, re->val);
re->val = 0;
return -VTD_FR_ROOT_TABLE_INV;
}
return 0;
}
-static inline bool vtd_context_entry_present(VTDContextEntry *context)
+static inline bool vtd_ce_present(VTDContextEntry *context)
{
return context->lo & VTD_CONTEXT_ENTRY_P;
}
{
dma_addr_t addr;
- if (!vtd_root_entry_present(root)) {
- VTD_DPRINTF(GENERAL, "error: root-entry is not present");
- return -VTD_FR_ROOT_ENTRY_P;
- }
+ /* we have checked that root entry is present */
addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
- VTD_DPRINTF(GENERAL, "error: fail to access context-entry at 0x%"PRIx64
- " + %"PRIu8,
- (uint64_t)(root->val & VTD_ROOT_ENTRY_CTP), index);
+ trace_vtd_re_invalid(root->rsvd, root->val);
return -VTD_FR_CONTEXT_TABLE_INV;
}
ce->lo = le64_to_cpu(ce->lo);
return 0;
}
-static inline dma_addr_t vtd_get_slpt_base_from_context(VTDContextEntry *ce)
+static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce)
{
return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
}
return slpte;
}
-/* Given a gpa and the level of paging structure, return the offset of current
- * level.
+/* Given an iova and the level of paging structure, return the offset
+ * of current level.
*/
-static inline uint32_t vtd_gpa_level_offset(uint64_t gpa, uint32_t level)
+static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
{
- return (gpa >> vtd_slpt_level_shift(level)) &
+ return (iova >> vtd_slpt_level_shift(level)) &
((1ULL << VTD_SL_LEVEL_BITS) - 1);
}
/* Get the page-table level that hardware should use for the second-level
* page-table walk from the Address Width field of context-entry.
*/
-static inline uint32_t vtd_get_level_from_context_entry(VTDContextEntry *ce)
+static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce)
{
return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
}
-static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry *ce)
+static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce)
{
return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
}
+static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce)
+{
+ return ce->lo & VTD_CONTEXT_ENTRY_TT;
+}
+
+/* Return true if check passed, otherwise false */
+static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu,
+ VTDContextEntry *ce)
+{
+ switch (vtd_ce_get_type(ce)) {
+ case VTD_CONTEXT_TT_MULTI_LEVEL:
+ /* Always supported */
+ break;
+ case VTD_CONTEXT_TT_DEV_IOTLB:
+ if (!x86_iommu->dt_supported) {
+ return false;
+ }
+ break;
+ case VTD_CONTEXT_TT_PASS_THROUGH:
+ if (!x86_iommu->pt_supported) {
+ return false;
+ }
+ break;
+ default:
+ /* Unknwon type */
+ return false;
+ }
+ return true;
+}
+
+static inline uint64_t vtd_iova_limit(VTDContextEntry *ce)
+{
+ uint32_t ce_agaw = vtd_ce_get_agaw(ce);
+ return 1ULL << MIN(ce_agaw, VTD_MGAW);
+}
+
+/* Return true if IOVA passes range check, otherwise false. */
+static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce)
+{
+ /*
+ * Check if @iova is above 2^X-1, where X is the minimum of MGAW
+ * in CAP_REG and AW in context-entry.
+ */
+ return !(iova & ~(vtd_iova_limit(ce) - 1));
+}
+
static const uint64_t vtd_paging_entry_rsvd_field[] = {
[0] = ~0ULL,
/* For not large page */
}
}
-/* Given the @gpa, get relevant @slptep. @slpte_level will be the last level
+/* Find the VTD address space associated with a given bus number */
+static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
+{
+ VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
+ if (!vtd_bus) {
+ /*
+ * Iterate over the registered buses to find the one which
+ * currently hold this bus number, and update the bus_num
+ * lookup table:
+ */
+ GHashTableIter iter;
+
+ g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
+ if (pci_bus_num(vtd_bus->bus) == bus_num) {
+ s->vtd_as_by_bus_num[bus_num] = vtd_bus;
+ return vtd_bus;
+ }
+ }
+ }
+ return vtd_bus;
+}
+
+/* Given the @iova, get relevant @slptep. @slpte_level will be the last level
* of the translation, can be used for deciding the size of large page.
*/
-static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
- uint64_t *slptep, uint32_t *slpte_level,
- bool *reads, bool *writes)
+static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
+ uint64_t *slptep, uint32_t *slpte_level,
+ bool *reads, bool *writes)
{
- dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
- uint32_t level = vtd_get_level_from_context_entry(ce);
+ dma_addr_t addr = vtd_ce_get_slpt_base(ce);
+ uint32_t level = vtd_ce_get_level(ce);
uint32_t offset;
uint64_t slpte;
- uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce);
uint64_t access_right_check;
- /* Check if @gpa is above 2^X-1, where X is the minimum of MGAW in CAP_REG
- * and AW in context-entry.
- */
- if (gpa & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) {
- VTD_DPRINTF(GENERAL, "error: gpa 0x%"PRIx64 " exceeds limits", gpa);
+ if (!vtd_iova_range_check(iova, ce)) {
+ trace_vtd_err_dmar_iova_overflow(iova);
return -VTD_FR_ADDR_BEYOND_MGAW;
}
access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
while (true) {
- offset = vtd_gpa_level_offset(gpa, level);
+ offset = vtd_iova_level_offset(iova, level);
slpte = vtd_get_slpte(addr, offset);
if (slpte == (uint64_t)-1) {
- VTD_DPRINTF(GENERAL, "error: fail to access second-level paging "
- "entry at level %"PRIu32 " for gpa 0x%"PRIx64,
- level, gpa);
- if (level == vtd_get_level_from_context_entry(ce)) {
+ trace_vtd_err_dmar_slpte_read_error(iova, level);
+ if (level == vtd_ce_get_level(ce)) {
/* Invalid programming of context-entry */
return -VTD_FR_CONTEXT_ENTRY_INV;
} else {
*reads = (*reads) && (slpte & VTD_SL_R);
*writes = (*writes) && (slpte & VTD_SL_W);
if (!(slpte & access_right_check)) {
- VTD_DPRINTF(GENERAL, "error: lack of %s permission for "
- "gpa 0x%"PRIx64 " slpte 0x%"PRIx64,
- (is_write ? "write" : "read"), gpa, slpte);
+ trace_vtd_err_dmar_slpte_perm_error(iova, level, slpte, is_write);
return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
}
if (vtd_slpte_nonzero_rsvd(slpte, level)) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in second "
- "level paging entry level %"PRIu32 " slpte 0x%"PRIx64,
- level, slpte);
+ trace_vtd_err_dmar_slpte_resv_error(iova, level, slpte);
return -VTD_FR_PAGING_ENTRY_RSVD;
}
}
}
+typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
+
+/**
+ * vtd_page_walk_level - walk over specific level for IOVA range
+ *
+ * @addr: base GPA addr to start the walk
+ * @start: IOVA range start address
+ * @end: IOVA range end address (start <= addr < end)
+ * @hook_fn: hook func to be called when detected page
+ * @private: private data to be passed into hook func
+ * @read: whether parent level has read permission
+ * @write: whether parent level has write permission
+ * @notify_unmap: whether we should notify invalid entries
+ */
+static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
+ uint64_t end, vtd_page_walk_hook hook_fn,
+ void *private, uint32_t level,
+ bool read, bool write, bool notify_unmap)
+{
+ bool read_cur, write_cur, entry_valid;
+ uint32_t offset;
+ uint64_t slpte;
+ uint64_t subpage_size, subpage_mask;
+ IOMMUTLBEntry entry;
+ uint64_t iova = start;
+ uint64_t iova_next;
+ int ret = 0;
+
+ trace_vtd_page_walk_level(addr, level, start, end);
+
+ subpage_size = 1ULL << vtd_slpt_level_shift(level);
+ subpage_mask = vtd_slpt_level_page_mask(level);
+
+ while (iova < end) {
+ iova_next = (iova & subpage_mask) + subpage_size;
+
+ offset = vtd_iova_level_offset(iova, level);
+ slpte = vtd_get_slpte(addr, offset);
+
+ if (slpte == (uint64_t)-1) {
+ trace_vtd_page_walk_skip_read(iova, iova_next);
+ goto next;
+ }
+
+ if (vtd_slpte_nonzero_rsvd(slpte, level)) {
+ trace_vtd_page_walk_skip_reserve(iova, iova_next);
+ goto next;
+ }
+
+ /* Permissions are stacked with parents' */
+ read_cur = read && (slpte & VTD_SL_R);
+ write_cur = write && (slpte & VTD_SL_W);
+
+ /*
+ * As long as we have either read/write permission, this is a
+ * valid entry. The rule works for both page entries and page
+ * table entries.
+ */
+ entry_valid = read_cur | write_cur;
+
+ if (vtd_is_last_slpte(slpte, level)) {
+ entry.target_as = &address_space_memory;
+ entry.iova = iova & subpage_mask;
+ /* NOTE: this is only meaningful if entry_valid == true */
+ entry.translated_addr = vtd_get_slpte_addr(slpte);
+ entry.addr_mask = ~subpage_mask;
+ entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
+ if (!entry_valid && !notify_unmap) {
+ trace_vtd_page_walk_skip_perm(iova, iova_next);
+ goto next;
+ }
+ trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr,
+ entry.addr_mask, entry.perm);
+ if (hook_fn) {
+ ret = hook_fn(&entry, private);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+ } else {
+ if (!entry_valid) {
+ trace_vtd_page_walk_skip_perm(iova, iova_next);
+ goto next;
+ }
+ ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova,
+ MIN(iova_next, end), hook_fn, private,
+ level - 1, read_cur, write_cur,
+ notify_unmap);
+ if (ret < 0) {
+ return ret;
+ }
+ }
+
+next:
+ iova = iova_next;
+ }
+
+ return 0;
+}
+
+/**
+ * vtd_page_walk - walk specific IOVA range, and call the hook
+ *
+ * @ce: context entry to walk upon
+ * @start: IOVA address to start the walk
+ * @end: IOVA range end address (start <= addr < end)
+ * @hook_fn: the hook that to be called for each detected area
+ * @private: private data for the hook function
+ */
+static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
+ vtd_page_walk_hook hook_fn, void *private,
+ bool notify_unmap)
+{
+ dma_addr_t addr = vtd_ce_get_slpt_base(ce);
+ uint32_t level = vtd_ce_get_level(ce);
+
+ if (!vtd_iova_range_check(start, ce)) {
+ return -VTD_FR_ADDR_BEYOND_MGAW;
+ }
+
+ if (!vtd_iova_range_check(end, ce)) {
+ /* Fix end so that it reaches the maximum */
+ end = vtd_iova_limit(ce);
+ }
+
+ return vtd_page_walk_level(addr, start, end, hook_fn, private,
+ level, true, true, notify_unmap);
+}
+
/* Map a device to its corresponding domain (context-entry) */
static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
uint8_t devfn, VTDContextEntry *ce)
{
VTDRootEntry re;
int ret_fr;
+ X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
ret_fr = vtd_get_root_entry(s, bus_num, &re);
if (ret_fr) {
}
if (!vtd_root_entry_present(&re)) {
- VTD_DPRINTF(GENERAL, "error: root-entry #%"PRIu8 " is not present",
- bus_num);
+ /* Not error - it's okay we don't have root entry. */
+ trace_vtd_re_not_present(bus_num);
return -VTD_FR_ROOT_ENTRY_P;
- } else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in root-entry "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64, re.rsvd, re.val);
+ }
+
+ if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
+ trace_vtd_re_invalid(re.rsvd, re.val);
return -VTD_FR_ROOT_ENTRY_RSVD;
}
return ret_fr;
}
- if (!vtd_context_entry_present(ce)) {
- VTD_DPRINTF(GENERAL,
- "error: context-entry #%"PRIu8 "(bus #%"PRIu8 ") "
- "is not present", devfn, bus_num);
+ if (!vtd_ce_present(ce)) {
+ /* Not error - it's okay we don't have context entry. */
+ trace_vtd_ce_not_present(bus_num, devfn);
return -VTD_FR_CONTEXT_ENTRY_P;
- } else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
- (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
- VTD_DPRINTF(GENERAL,
- "error: non-zero reserved field in context-entry "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64, ce->hi, ce->lo);
+ }
+
+ if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
+ (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
+ trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_RSVD;
}
+
/* Check if the programming of context-entry is valid */
- if (!vtd_is_level_supported(s, vtd_get_level_from_context_entry(ce))) {
- VTD_DPRINTF(GENERAL, "error: unsupported Address Width value in "
- "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
- ce->hi, ce->lo);
+ if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) {
+ trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
- } else if (ce->lo & VTD_CONTEXT_ENTRY_TT) {
- VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
- "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
- ce->hi, ce->lo);
+ }
+
+ /* Do translation type check */
+ if (!vtd_ce_type_check(x86_iommu, ce)) {
+ trace_vtd_ce_invalid(ce->hi, ce->lo);
return -VTD_FR_CONTEXT_ENTRY_INV;
}
+
return 0;
}
+/*
+ * Fetch translation type for specific device. Returns <0 if error
+ * happens, otherwise return the shifted type to check against
+ * VTD_CONTEXT_TT_*.
+ */
+static int vtd_dev_get_trans_type(VTDAddressSpace *as)
+{
+ IntelIOMMUState *s;
+ VTDContextEntry ce;
+ int ret;
+
+ s = as->iommu_state;
+
+ ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus),
+ as->devfn, &ce);
+ if (ret) {
+ return ret;
+ }
+
+ return vtd_ce_get_type(&ce);
+}
+
+static bool vtd_dev_pt_enabled(VTDAddressSpace *as)
+{
+ int ret;
+
+ assert(as);
+
+ ret = vtd_dev_get_trans_type(as);
+ if (ret < 0) {
+ /*
+ * Possibly failed to parse the context entry for some reason
+ * (e.g., during init, or any guest configuration errors on
+ * context entries). We should assume PT not enabled for
+ * safety.
+ */
+ return false;
+ }
+
+ return ret == VTD_CONTEXT_TT_PASS_THROUGH;
+}
+
+/* Return whether the device is using IOMMU translation. */
+static bool vtd_switch_address_space(VTDAddressSpace *as)
+{
+ bool use_iommu;
+
+ assert(as);
+
+ use_iommu = as->iommu_state->dmar_enabled & !vtd_dev_pt_enabled(as);
+
+ trace_vtd_switch_address_space(pci_bus_num(as->bus),
+ VTD_PCI_SLOT(as->devfn),
+ VTD_PCI_FUNC(as->devfn),
+ use_iommu);
+
+ /* Turn off first then on the other */
+ if (use_iommu) {
+ memory_region_set_enabled(&as->sys_alias, false);
+ memory_region_set_enabled(MEMORY_REGION(&as->iommu), true);
+ } else {
+ memory_region_set_enabled(MEMORY_REGION(&as->iommu), false);
+ memory_region_set_enabled(&as->sys_alias, true);
+ }
+
+ return use_iommu;
+}
+
+static void vtd_switch_address_space_all(IntelIOMMUState *s)
+{
+ GHashTableIter iter;
+ VTDBus *vtd_bus;
+ int i;
+
+ g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
+ while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
+ for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) {
+ if (!vtd_bus->dev_as[i]) {
+ continue;
+ }
+ vtd_switch_address_space(vtd_bus->dev_as[i]);
+ }
+ }
+}
+
static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn)
{
return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
}
+static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id)
+{
+ VTDBus *vtd_bus;
+ VTDAddressSpace *vtd_as;
+ bool success = false;
+
+ vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
+ if (!vtd_bus) {
+ goto out;
+ }
+
+ vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)];
+ if (!vtd_as) {
+ goto out;
+ }
+
+ if (vtd_switch_address_space(vtd_as) == false) {
+ /* We switched off IOMMU region successfully. */
+ success = true;
+ }
+
+out:
+ trace_vtd_pt_enable_fast_path(source_id, success);
+}
+
/* Map dev to context-entry then do a paging-structures walk to do a iommu
* translation.
*
* @devfn: The devfn, which is the combined of device and function number
* @is_write: The access is a write operation
* @entry: IOMMUTLBEntry that contain the addr to be translated and result
+ *
+ * Returns true if translation is successful, otherwise false.
*/
-static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
+static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
uint8_t devfn, hwaddr addr, bool is_write,
IOMMUTLBEntry *entry)
{
bool writes = true;
VTDIOTLBEntry *iotlb_entry;
- /* Check if the request is in interrupt address range */
- if (vtd_is_interrupt_addr(addr)) {
- if (is_write) {
- /* FIXME: since we don't know the length of the access here, we
- * treat Non-DWORD length write requests without PASID as
- * interrupt requests, too. Withoud interrupt remapping support,
- * we just use 1:1 mapping.
- */
- VTD_DPRINTF(MMU, "write request to interrupt address "
- "gpa 0x%"PRIx64, addr);
- entry->iova = addr & VTD_PAGE_MASK_4K;
- entry->translated_addr = addr & VTD_PAGE_MASK_4K;
- entry->addr_mask = ~VTD_PAGE_MASK_4K;
- entry->perm = IOMMU_WO;
- return;
- } else {
- VTD_DPRINTF(GENERAL, "error: read request from interrupt address "
- "gpa 0x%"PRIx64, addr);
- vtd_report_dmar_fault(s, source_id, addr, VTD_FR_READ, is_write);
- return;
- }
- }
+ /*
+ * We have standalone memory region for interrupt addresses, we
+ * should never receive translation requests in this region.
+ */
+ assert(!vtd_is_interrupt_addr(addr));
+
/* Try to fetch slpte form IOTLB */
iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
if (iotlb_entry) {
- VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
- " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr,
- iotlb_entry->slpte, iotlb_entry->domain_id);
+ trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
+ iotlb_entry->domain_id);
slpte = iotlb_entry->slpte;
reads = iotlb_entry->read_flags;
writes = iotlb_entry->write_flags;
page_mask = iotlb_entry->mask;
goto out;
}
+
/* Try to fetch context-entry from cache first */
if (cc_entry->context_cache_gen == s->context_cache_gen) {
- VTD_DPRINTF(CACHE, "hit context-cache bus %d devfn %d "
- "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 ")",
- bus_num, devfn, cc_entry->context_entry.hi,
- cc_entry->context_entry.lo, cc_entry->context_cache_gen);
+ trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
+ cc_entry->context_entry.lo,
+ cc_entry->context_cache_gen);
ce = cc_entry->context_entry;
is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
} else {
if (ret_fr) {
ret_fr = -ret_fr;
if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
- VTD_DPRINTF(FLOG, "fault processing is disabled for DMA "
- "requests through this context-entry "
- "(with FPD Set)");
+ trace_vtd_fault_disabled();
} else {
vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
}
- return;
+ goto error;
}
/* Update context-cache */
- VTD_DPRINTF(CACHE, "update context-cache bus %d devfn %d "
- "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 "->%"PRIu32 ")",
- bus_num, devfn, ce.hi, ce.lo,
- cc_entry->context_cache_gen, s->context_cache_gen);
+ trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
+ cc_entry->context_cache_gen,
+ s->context_cache_gen);
cc_entry->context_entry = ce;
cc_entry->context_cache_gen = s->context_cache_gen;
}
- ret_fr = vtd_gpa_to_slpte(&ce, addr, is_write, &slpte, &level,
- &reads, &writes);
+ /*
+ * We don't need to translate for pass-through context entries.
+ * Also, let's ignore IOTLB caching as well for PT devices.
+ */
+ if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) {
+ entry->iova = addr & VTD_PAGE_MASK;
+ entry->translated_addr = entry->iova;
+ entry->addr_mask = VTD_PAGE_MASK;
+ entry->perm = IOMMU_RW;
+ trace_vtd_translate_pt(source_id, entry->iova);
+
+ /*
+ * When this happens, it means firstly caching-mode is not
+ * enabled, and this is the first passthrough translation for
+ * the device. Let's enable the fast path for passthrough.
+ *
+ * When passthrough is disabled again for the device, we can
+ * capture it via the context entry invalidation, then the
+ * IOMMU region can be swapped back.
+ */
+ vtd_pt_enable_fast_path(s, source_id);
+
+ return true;
+ }
+
+ ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level,
+ &reads, &writes);
if (ret_fr) {
ret_fr = -ret_fr;
if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
- VTD_DPRINTF(FLOG, "fault processing is disabled for DMA requests "
- "through this context-entry (with FPD Set)");
+ trace_vtd_fault_disabled();
} else {
vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
}
- return;
+ goto error;
}
page_mask = vtd_slpt_level_page_mask(level);
entry->iova = addr & page_mask;
entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
entry->addr_mask = ~page_mask;
- entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0);
+ entry->perm = IOMMU_ACCESS_FLAG(reads, writes);
+ return true;
+
+error:
+ entry->iova = 0;
+ entry->translated_addr = 0;
+ entry->addr_mask = 0;
+ entry->perm = IOMMU_NONE;
+ return false;
}
static void vtd_root_table_setup(IntelIOMMUState *s)
s->root_extended = s->root & VTD_RTADDR_RTT;
s->root &= VTD_RTADDR_ADDR_MASK;
- VTD_DPRINTF(CSR, "root_table addr 0x%"PRIx64 " %s", s->root,
- (s->root_extended ? "(extended)" : ""));
+ trace_vtd_reg_dmar_root(s->root, s->root_extended);
}
static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
/* Notify global invalidation */
vtd_iec_notify_all(s, true, 0, 0);
- VTD_DPRINTF(CSR, "int remap table addr 0x%"PRIx64 " size %"PRIu32,
- s->intr_root, s->intr_size);
+ trace_vtd_reg_ir_root(s->intr_root, s->intr_size);
}
-static void vtd_context_global_invalidate(IntelIOMMUState *s)
+static void vtd_iommu_replay_all(IntelIOMMUState *s)
{
- s->context_cache_gen++;
- if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
- vtd_reset_context_cache(s);
+ IntelIOMMUNotifierNode *node;
+
+ QLIST_FOREACH(node, &s->notifiers_list, next) {
+ memory_region_iommu_replay_all(&node->vtd_as->iommu);
}
}
-
-/* Find the VTD address space currently associated with a given bus number,
- */
-static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
+static void vtd_context_global_invalidate(IntelIOMMUState *s)
{
- VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
- if (!vtd_bus) {
- /* Iterate over the registered buses to find the one
- * which currently hold this bus number, and update the bus_num lookup table:
- */
- GHashTableIter iter;
-
- g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
- while (g_hash_table_iter_next (&iter, NULL, (void**)&vtd_bus)) {
- if (pci_bus_num(vtd_bus->bus) == bus_num) {
- s->vtd_as_by_bus_num[bus_num] = vtd_bus;
- return vtd_bus;
- }
- }
+ trace_vtd_inv_desc_cc_global();
+ s->context_cache_gen++;
+ if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
+ vtd_reset_context_cache(s);
}
- return vtd_bus;
+ vtd_switch_address_space_all(s);
+ /*
+ * From VT-d spec 6.5.2.1, a global context entry invalidation
+ * should be followed by a IOTLB global invalidation, so we should
+ * be safe even without this. Hoewever, let's replay the region as
+ * well to be safer, and go back here when we need finer tunes for
+ * VT-d emulation codes.
+ */
+ vtd_iommu_replay_all(s);
}
/* Do a context-cache device-selective invalidation.
uint16_t mask;
VTDBus *vtd_bus;
VTDAddressSpace *vtd_as;
- uint16_t devfn;
+ uint8_t bus_n, devfn;
uint16_t devfn_it;
+ trace_vtd_inv_desc_cc_devices(source_id, func_mask);
+
switch (func_mask & 3) {
case 0:
mask = 0; /* No bits in the SID field masked */
mask = 7; /* Mask bit 2:0 in the SID field */
break;
}
- VTD_DPRINTF(INV, "device-selective invalidation source 0x%"PRIx16
- " mask %"PRIu16, source_id, mask);
- vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
+ mask = ~mask;
+
+ bus_n = VTD_SID_TO_BUS(source_id);
+ vtd_bus = vtd_find_as_from_bus_num(s, bus_n);
if (vtd_bus) {
devfn = VTD_SID_TO_DEVFN(source_id);
for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
vtd_as = vtd_bus->dev_as[devfn_it];
if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
- VTD_DPRINTF(INV, "invalidate context-cahce of devfn 0x%"PRIx16,
- devfn_it);
+ trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it),
+ VTD_PCI_FUNC(devfn_it));
vtd_as->context_cache_entry.context_cache_gen = 0;
+ /*
+ * Do switch address space when needed, in case if the
+ * device passthrough bit is switched.
+ */
+ vtd_switch_address_space(vtd_as);
+ /*
+ * So a device is moving out of (or moving into) a
+ * domain, a replay() suites here to notify all the
+ * IOMMU_NOTIFIER_MAP registers about this change.
+ * This won't bring bad even if we have no such
+ * notifier registered - the IOMMU notification
+ * framework will skip MAP notifications if that
+ * happened.
+ */
+ memory_region_iommu_replay_all(&vtd_as->iommu);
}
}
}
switch (type) {
case VTD_CCMD_DOMAIN_INVL:
- VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
- (uint16_t)VTD_CCMD_DID(val));
/* Fall through */
case VTD_CCMD_GLOBAL_INVL:
- VTD_DPRINTF(INV, "global invalidation");
caig = VTD_CCMD_GLOBAL_INVL_A;
vtd_context_global_invalidate(s);
break;
break;
default:
- VTD_DPRINTF(GENERAL, "error: invalid granularity");
+ trace_vtd_err("Context cache invalidate type error.");
caig = 0;
}
return caig;
static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
{
+ trace_vtd_inv_desc_iotlb_global();
vtd_reset_iotlb(s);
+ vtd_iommu_replay_all(s);
}
static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
{
+ IntelIOMMUNotifierNode *node;
+ VTDContextEntry ce;
+ VTDAddressSpace *vtd_as;
+
+ trace_vtd_inv_desc_iotlb_domain(domain_id);
+
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
&domain_id);
+
+ QLIST_FOREACH(node, &s->notifiers_list, next) {
+ vtd_as = node->vtd_as;
+ if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
+ vtd_as->devfn, &ce) &&
+ domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
+ memory_region_iommu_replay_all(&vtd_as->iommu);
+ }
+ }
+}
+
+static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry *entry,
+ void *private)
+{
+ memory_region_notify_iommu((IOMMUMemoryRegion *)private, *entry);
+ return 0;
+}
+
+static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
+ uint16_t domain_id, hwaddr addr,
+ uint8_t am)
+{
+ IntelIOMMUNotifierNode *node;
+ VTDContextEntry ce;
+ int ret;
+
+ QLIST_FOREACH(node, &(s->notifiers_list), next) {
+ VTDAddressSpace *vtd_as = node->vtd_as;
+ ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
+ vtd_as->devfn, &ce);
+ if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
+ vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE,
+ vtd_page_invalidate_notify_hook,
+ (void *)&vtd_as->iommu, true);
+ }
+ }
}
static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
{
VTDIOTLBPageInvInfo info;
+ trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am);
+
assert(am <= VTD_MAMV);
info.domain_id = domain_id;
info.addr = addr;
info.mask = ~((1 << am) - 1);
g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
+ vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am);
}
/* Flush IOTLB
switch (type) {
case VTD_TLB_GLOBAL_FLUSH:
- VTD_DPRINTF(INV, "global invalidation");
iaig = VTD_TLB_GLOBAL_FLUSH_A;
vtd_iotlb_global_invalidate(s);
break;
case VTD_TLB_DSI_FLUSH:
domain_id = VTD_TLB_DID(val);
- VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
- domain_id);
iaig = VTD_TLB_DSI_FLUSH_A;
vtd_iotlb_domain_invalidate(s, domain_id);
break;
addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
am = VTD_IVA_AM(addr);
addr = VTD_IVA_ADDR(addr);
- VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
- " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
if (am > VTD_MAMV) {
- VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
- "%"PRIu8, (uint8_t)VTD_MAMV);
+ trace_vtd_err("IOTLB PSI flush: address mask overflow.");
iaig = 0;
break;
}
break;
default:
- VTD_DPRINTF(GENERAL, "error: invalid granularity");
+ trace_vtd_err("IOTLB flush: invalid granularity.");
iaig = 0;
}
return iaig;
}
-static inline bool vtd_queued_inv_enable_check(IntelIOMMUState *s)
-{
- return s->iq_tail == 0;
-}
+static void vtd_fetch_inv_desc(IntelIOMMUState *s);
static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
{
{
uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
- VTD_DPRINTF(INV, "Queued Invalidation Enable %s", (en ? "on" : "off"));
+ trace_vtd_inv_qi_enable(en);
+
if (en) {
- if (vtd_queued_inv_enable_check(s)) {
- s->iq = iqa_val & VTD_IQA_IQA_MASK;
- /* 2^(x+8) entries */
- s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
- s->qi_enabled = true;
- VTD_DPRINTF(INV, "DMAR_IQA_REG 0x%"PRIx64, iqa_val);
- VTD_DPRINTF(INV, "Invalidation Queue addr 0x%"PRIx64 " size %d",
- s->iq, s->iq_size);
- /* Ok - report back to driver */
- vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
- } else {
- VTD_DPRINTF(GENERAL, "error: can't enable Queued Invalidation: "
- "tail %"PRIu16, s->iq_tail);
+ s->iq = iqa_val & VTD_IQA_IQA_MASK;
+ /* 2^(x+8) entries */
+ s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
+ s->qi_enabled = true;
+ trace_vtd_inv_qi_setup(s->iq, s->iq_size);
+ /* Ok - report back to driver */
+ vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
+
+ if (s->iq_tail != 0) {
+ /*
+ * This is a spec violation but Windows guests are known to set up
+ * Queued Invalidation this way so we allow the write and process
+ * Invalidation Descriptors right away.
+ */
+ trace_vtd_warn_invalid_qi_tail(s->iq_tail);
+ if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
+ vtd_fetch_inv_desc(s);
+ }
}
} else {
if (vtd_queued_inv_disable_check(s)) {
/* Ok - report back to driver */
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
} else {
- VTD_DPRINTF(GENERAL, "error: can't disable Queued Invalidation: "
- "head %"PRIu16 ", tail %"PRIu16
- ", last_descriptor %"PRIu8,
- s->iq_head, s->iq_tail, s->iq_last_desc_type);
+ trace_vtd_err_qi_disable(s->iq_head, s->iq_tail, s->iq_last_desc_type);
}
}
}
/* Set Root Table Pointer */
static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
{
- VTD_DPRINTF(CSR, "set Root Table Pointer");
-
vtd_root_table_setup(s);
/* Ok - report back to driver */
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
/* Set Interrupt Remap Table Pointer */
static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s)
{
- VTD_DPRINTF(CSR, "set Interrupt Remap Table Pointer");
-
vtd_interrupt_remap_table_setup(s);
/* Ok - report back to driver */
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS);
/* Handle Translation Enable/Disable */
static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
{
- VTD_DPRINTF(CSR, "Translation Enable %s", (en ? "on" : "off"));
+ if (s->dmar_enabled == en) {
+ return;
+ }
+
+ trace_vtd_dmar_enable(en);
if (en) {
s->dmar_enabled = true;
/* Ok - report back to driver */
vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
}
+
+ vtd_switch_address_space_all(s);
}
/* Handle Interrupt Remap Enable/Disable */
static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en)
{
- VTD_DPRINTF(CSR, "Interrupt Remap Enable %s", (en ? "on" : "off"));
+ trace_vtd_ir_enable(en);
if (en) {
s->intr_enabled = true;
uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
uint32_t changed = status ^ val;
- VTD_DPRINTF(CSR, "value 0x%"PRIx32 " status 0x%"PRIx32, val, status);
+ trace_vtd_reg_write_gcmd(status, val);
if (changed & VTD_GCMD_TE) {
/* Translation enable/disable */
vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
/* Context-cache invalidation request */
if (val & VTD_CCMD_ICC) {
if (s->qi_enabled) {
- VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
- "should not use register-based invalidation");
+ trace_vtd_err("Queued Invalidation enabled, "
+ "should not use register-based invalidation");
return;
}
ret = vtd_context_cache_invalidate(s, val);
vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
ret);
- VTD_DPRINTF(INV, "CCMD_REG write-back val: 0x%"PRIx64, ret);
}
}
/* IOTLB invalidation request */
if (val & VTD_TLB_IVT) {
if (s->qi_enabled) {
- VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
- "should not use register-based invalidation");
+ trace_vtd_err("Queued Invalidation enabled, "
+ "should not use register-based invalidation.");
return;
}
ret = vtd_iotlb_flush(s, val);
vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
VTD_TLB_FLUSH_GRANU_MASK_A, ret);
- VTD_DPRINTF(INV, "IOTLB_REG write-back val: 0x%"PRIx64, ret);
}
}
dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
if (dma_memory_read(&address_space_memory, addr, inv_desc,
sizeof(*inv_desc))) {
- VTD_DPRINTF(GENERAL, "error: fail to fetch Invalidation Descriptor "
- "base_addr 0x%"PRIx64 " offset %"PRIu32, base_addr, offset);
+ trace_vtd_err("Read INV DESC failed.");
inv_desc->lo = 0;
inv_desc->hi = 0;
-
return false;
}
inv_desc->lo = le64_to_cpu(inv_desc->lo);
{
if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
(inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Invalidation "
- "Wait Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
/* FIXME: need to be masked with HAW? */
dma_addr_t status_addr = inv_desc->hi;
- VTD_DPRINTF(INV, "status data 0x%x, status addr 0x%"PRIx64,
- status_data, status_addr);
+ trace_vtd_inv_desc_wait_sw(status_addr, status_data);
status_data = cpu_to_le32(status_data);
if (dma_memory_write(&address_space_memory, status_addr, &status_data,
sizeof(status_data))) {
- VTD_DPRINTF(GENERAL, "error: fail to perform a coherent write");
+ trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
return false;
}
} else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
/* Interrupt flag */
- VTD_DPRINTF(INV, "Invalidation Wait Descriptor interrupt completion");
vtd_generate_completion_event(s);
} else {
- VTD_DPRINTF(GENERAL, "error: invalid Invalidation Wait Descriptor: "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64, inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
return true;
static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
VTDInvDesc *inv_desc)
{
+ uint16_t sid, fmask;
+
if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Context-cache "
- "Invalidate Descriptor");
+ trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
case VTD_INV_DESC_CC_DOMAIN:
- VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
- (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
+ trace_vtd_inv_desc_cc_domain(
+ (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
/* Fall through */
case VTD_INV_DESC_CC_GLOBAL:
- VTD_DPRINTF(INV, "global invalidation");
vtd_context_global_invalidate(s);
break;
case VTD_INV_DESC_CC_DEVICE:
- vtd_context_device_invalidate(s, VTD_INV_DESC_CC_SID(inv_desc->lo),
- VTD_INV_DESC_CC_FM(inv_desc->lo));
+ sid = VTD_INV_DESC_CC_SID(inv_desc->lo);
+ fmask = VTD_INV_DESC_CC_FM(inv_desc->lo);
+ vtd_context_device_invalidate(s, sid, fmask);
break;
default:
- VTD_DPRINTF(GENERAL, "error: invalid granularity in Context-cache "
- "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
return true;
if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
(inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
- VTD_DPRINTF(GENERAL, "error: non-zero reserved field in IOTLB "
- "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
case VTD_INV_DESC_IOTLB_GLOBAL:
- VTD_DPRINTF(INV, "global invalidation");
vtd_iotlb_global_invalidate(s);
break;
case VTD_INV_DESC_IOTLB_DOMAIN:
domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
- VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
- domain_id);
vtd_iotlb_domain_invalidate(s, domain_id);
break;
domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
- VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
- " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
if (am > VTD_MAMV) {
- VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
- "%"PRIu8, (uint8_t)VTD_MAMV);
+ trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
vtd_iotlb_page_invalidate(s, domain_id, addr, am);
break;
default:
- VTD_DPRINTF(GENERAL, "error: invalid granularity in IOTLB Invalidate "
- "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc->hi, inv_desc->lo);
+ trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
return false;
}
return true;
static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
VTDInvDesc *inv_desc)
{
- VTD_DPRINTF(INV, "inv ir glob %d index %d mask %d",
- inv_desc->iec.granularity,
- inv_desc->iec.index,
- inv_desc->iec.index_mask);
+ trace_vtd_inv_desc_iec(inv_desc->iec.granularity,
+ inv_desc->iec.index,
+ inv_desc->iec.index_mask);
vtd_iec_notify_all(s, !inv_desc->iec.granularity,
inv_desc->iec.index,
inv_desc->iec.index_mask);
+ return true;
+}
+static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
+ VTDInvDesc *inv_desc)
+{
+ VTDAddressSpace *vtd_dev_as;
+ IOMMUTLBEntry entry;
+ struct VTDBus *vtd_bus;
+ hwaddr addr;
+ uint64_t sz;
+ uint16_t sid;
+ uint8_t devfn;
+ bool size;
+ uint8_t bus_num;
+
+ addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
+ sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
+ devfn = sid & 0xff;
+ bus_num = sid >> 8;
+ size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
+
+ if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
+ (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
+ trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
+ return false;
+ }
+
+ vtd_bus = vtd_find_as_from_bus_num(s, bus_num);
+ if (!vtd_bus) {
+ goto done;
+ }
+
+ vtd_dev_as = vtd_bus->dev_as[devfn];
+ if (!vtd_dev_as) {
+ goto done;
+ }
+
+ /* According to ATS spec table 2.4:
+ * S = 0, bits 15:12 = xxxx range size: 4K
+ * S = 1, bits 15:12 = xxx0 range size: 8K
+ * S = 1, bits 15:12 = xx01 range size: 16K
+ * S = 1, bits 15:12 = x011 range size: 32K
+ * S = 1, bits 15:12 = 0111 range size: 64K
+ * ...
+ */
+ if (size) {
+ sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT);
+ addr &= ~(sz - 1);
+ } else {
+ sz = VTD_PAGE_SIZE;
+ }
+
+ entry.target_as = &vtd_dev_as->as;
+ entry.addr_mask = sz - 1;
+ entry.iova = addr;
+ entry.perm = IOMMU_NONE;
+ entry.translated_addr = 0;
+ memory_region_notify_iommu(&vtd_dev_as->iommu, entry);
+
+done:
return true;
}
VTDInvDesc inv_desc;
uint8_t desc_type;
- VTD_DPRINTF(INV, "iq head %"PRIu16, s->iq_head);
+ trace_vtd_inv_qi_head(s->iq_head);
if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) {
s->iq_last_desc_type = VTD_INV_DESC_NONE;
return false;
switch (desc_type) {
case VTD_INV_DESC_CC:
- VTD_DPRINTF(INV, "Context-cache Invalidate Descriptor hi 0x%"PRIx64
- " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo);
if (!vtd_process_context_cache_desc(s, &inv_desc)) {
return false;
}
break;
case VTD_INV_DESC_IOTLB:
- VTD_DPRINTF(INV, "IOTLB Invalidate Descriptor hi 0x%"PRIx64
- " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo);
if (!vtd_process_iotlb_desc(s, &inv_desc)) {
return false;
}
break;
case VTD_INV_DESC_WAIT:
- VTD_DPRINTF(INV, "Invalidation Wait Descriptor hi 0x%"PRIx64
- " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo);
if (!vtd_process_wait_desc(s, &inv_desc)) {
return false;
}
break;
case VTD_INV_DESC_IEC:
- VTD_DPRINTF(INV, "Invalidation Interrupt Entry Cache "
- "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
- inv_desc.hi, inv_desc.lo);
+ trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo);
if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
return false;
}
break;
+ case VTD_INV_DESC_DEVICE:
+ trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo);
+ if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
+ return false;
+ }
+ break;
+
default:
- VTD_DPRINTF(GENERAL, "error: unkonw Invalidation Descriptor type "
- "hi 0x%"PRIx64 " lo 0x%"PRIx64 " type %"PRIu8,
- inv_desc.hi, inv_desc.lo, desc_type);
+ trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo);
return false;
}
s->iq_head++;
/* Try to fetch and process more Invalidation Descriptors */
static void vtd_fetch_inv_desc(IntelIOMMUState *s)
{
- VTD_DPRINTF(INV, "fetch Invalidation Descriptors");
+ trace_vtd_inv_qi_fetch();
+
if (s->iq_tail >= s->iq_size) {
/* Detects an invalid Tail pointer */
- VTD_DPRINTF(GENERAL, "error: iq_tail is %"PRIu16
- " while iq_size is %"PRIu16, s->iq_tail, s->iq_size);
+ trace_vtd_err_qi_tail(s->iq_tail, s->iq_size);
vtd_handle_inv_queue_error(s);
return;
}
uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
s->iq_tail = VTD_IQT_QT(val);
- VTD_DPRINTF(INV, "set iq tail %"PRIu16, s->iq_tail);
+ trace_vtd_inv_qi_tail(s->iq_tail);
+
if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
/* Process Invalidation Queue here */
vtd_fetch_inv_desc(s);
if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
- VTD_DPRINTF(FLOG, "all pending interrupt conditions serviced, clear "
- "IP field of FECTL_REG");
+ trace_vtd_fsts_clear_ip();
}
/* FIXME: when IQE is Clear, should we try to fetch some Invalidation
* Descriptors if there are any when Queued Invalidation is enabled?
* software clears the IM field? Or just check if the IM field is zero?
*/
fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
+
+ trace_vtd_reg_write_fectl(fectl_reg);
+
if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
- VTD_DPRINTF(FLOG, "IM field is cleared, generate "
- "fault event interrupt");
}
}
uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
+ trace_vtd_reg_ics_clear_ip();
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
- VTD_DPRINTF(INV, "pending completion interrupt condition serviced, "
- "clear IP field of IECTL_REG");
}
}
* software clears the IM field? Or just check if the IM field is zero?
*/
iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
+
+ trace_vtd_reg_write_iectl(iectl_reg);
+
if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
- VTD_DPRINTF(INV, "IM field is cleared, generate "
- "invalidation event interrupt");
}
}
IntelIOMMUState *s = opaque;
uint64_t val;
+ trace_vtd_reg_read(addr, size);
+
if (addr + size > DMAR_REG_SIZE) {
- VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
- ", got 0x%"PRIx64 " %d",
- (uint64_t)DMAR_REG_SIZE, addr, size);
+ trace_vtd_err("Read MMIO over range.");
return (uint64_t)-1;
}
val = vtd_get_quad(s, addr);
}
}
- VTD_DPRINTF(CSR, "addr 0x%"PRIx64 " size %d val 0x%"PRIx64,
- addr, size, val);
+
return val;
}
{
IntelIOMMUState *s = opaque;
+ trace_vtd_reg_write(addr, size, val);
+
if (addr + size > DMAR_REG_SIZE) {
- VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
- ", got 0x%"PRIx64 " %d",
- (uint64_t)DMAR_REG_SIZE, addr, size);
+ trace_vtd_err("Write MMIO over range.");
return;
}
switch (addr) {
/* Global Command Register, 32-bit */
case DMAR_GCMD_REG:
- VTD_DPRINTF(CSR, "DMAR_GCMD_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
vtd_set_long(s, addr, val);
vtd_handle_gcmd_write(s);
break;
/* Context Command Register, 64-bit */
case DMAR_CCMD_REG:
- VTD_DPRINTF(CSR, "DMAR_CCMD_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_CCMD_REG_HI:
- VTD_DPRINTF(CSR, "DMAR_CCMD_REG_HI write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
vtd_handle_ccmd_write(s);
/* IOTLB Invalidation Register, 64-bit */
case DMAR_IOTLB_REG:
- VTD_DPRINTF(INV, "DMAR_IOTLB_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_IOTLB_REG_HI:
- VTD_DPRINTF(INV, "DMAR_IOTLB_REG_HI write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
vtd_handle_iotlb_write(s);
/* Invalidate Address Register, 64-bit */
case DMAR_IVA_REG:
- VTD_DPRINTF(INV, "DMAR_IVA_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_IVA_REG_HI:
- VTD_DPRINTF(INV, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Fault Status Register, 32-bit */
case DMAR_FSTS_REG:
- VTD_DPRINTF(FLOG, "DMAR_FSTS_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
vtd_handle_fsts_write(s);
/* Fault Event Control Register, 32-bit */
case DMAR_FECTL_REG:
- VTD_DPRINTF(FLOG, "DMAR_FECTL_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
vtd_handle_fectl_write(s);
/* Fault Event Data Register, 32-bit */
case DMAR_FEDATA_REG:
- VTD_DPRINTF(FLOG, "DMAR_FEDATA_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Fault Event Address Register, 32-bit */
case DMAR_FEADDR_REG:
- VTD_DPRINTF(FLOG, "DMAR_FEADDR_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Fault Event Upper Address Register, 32-bit */
case DMAR_FEUADDR_REG:
- VTD_DPRINTF(FLOG, "DMAR_FEUADDR_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Protected Memory Enable Register, 32-bit */
case DMAR_PMEN_REG:
- VTD_DPRINTF(CSR, "DMAR_PMEN_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Root Table Address Register, 64-bit */
case DMAR_RTADDR_REG:
- VTD_DPRINTF(CSR, "DMAR_RTADDR_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_RTADDR_REG_HI:
- VTD_DPRINTF(CSR, "DMAR_RTADDR_REG_HI write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Invalidation Queue Tail Register, 64-bit */
case DMAR_IQT_REG:
- VTD_DPRINTF(INV, "DMAR_IQT_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_IQT_REG_HI:
- VTD_DPRINTF(INV, "DMAR_IQT_REG_HI write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
/* 19:63 of IQT_REG is RsvdZ, do nothing here */
/* Invalidation Queue Address Register, 64-bit */
case DMAR_IQA_REG:
- VTD_DPRINTF(INV, "DMAR_IQA_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_IQA_REG_HI:
- VTD_DPRINTF(INV, "DMAR_IQA_REG_HI write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Invalidation Completion Status Register, 32-bit */
case DMAR_ICS_REG:
- VTD_DPRINTF(INV, "DMAR_ICS_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
vtd_handle_ics_write(s);
/* Invalidation Event Control Register, 32-bit */
case DMAR_IECTL_REG:
- VTD_DPRINTF(INV, "DMAR_IECTL_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
vtd_handle_iectl_write(s);
/* Invalidation Event Data Register, 32-bit */
case DMAR_IEDATA_REG:
- VTD_DPRINTF(INV, "DMAR_IEDATA_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Invalidation Event Address Register, 32-bit */
case DMAR_IEADDR_REG:
- VTD_DPRINTF(INV, "DMAR_IEADDR_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Invalidation Event Upper Address Register, 32-bit */
case DMAR_IEUADDR_REG:
- VTD_DPRINTF(INV, "DMAR_IEUADDR_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
/* Fault Recording Registers, 128-bit */
case DMAR_FRCD_REG_0_0:
- VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_0 write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_FRCD_REG_0_1:
- VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_1 write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
case DMAR_FRCD_REG_0_2:
- VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_2 write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_FRCD_REG_0_3:
- VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_3 write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
/* May clear bit 127 (Fault), update PPF */
break;
case DMAR_IRTA_REG:
- VTD_DPRINTF(IR, "DMAR_IRTA_REG write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
break;
case DMAR_IRTA_REG_HI:
- VTD_DPRINTF(IR, "DMAR_IRTA_REG_HI write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
assert(size == 4);
vtd_set_long(s, addr, val);
break;
default:
- VTD_DPRINTF(GENERAL, "error: unhandled reg write addr 0x%"PRIx64
- ", size %d, val 0x%"PRIx64, addr, size, val);
if (size == 4) {
vtd_set_long(s, addr, val);
} else {
}
}
-static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
- bool is_write)
+static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr,
+ IOMMUAccessFlags flag)
{
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
IntelIOMMUState *s = vtd_as->iommu_state;
- IOMMUTLBEntry ret = {
+ IOMMUTLBEntry iotlb = {
+ /* We'll fill in the rest later. */
.target_as = &address_space_memory,
- .iova = addr,
- .translated_addr = 0,
- .addr_mask = ~(hwaddr)0,
- .perm = IOMMU_NONE,
};
+ bool success;
- if (!s->dmar_enabled) {
+ if (likely(s->dmar_enabled)) {
+ success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
+ addr, flag & IOMMU_WO, &iotlb);
+ } else {
/* DMAR disabled, passthrough, use 4k-page*/
- ret.iova = addr & VTD_PAGE_MASK_4K;
- ret.translated_addr = addr & VTD_PAGE_MASK_4K;
- ret.addr_mask = ~VTD_PAGE_MASK_4K;
- ret.perm = IOMMU_RW;
- return ret;
+ iotlb.iova = addr & VTD_PAGE_MASK_4K;
+ iotlb.translated_addr = addr & VTD_PAGE_MASK_4K;
+ iotlb.addr_mask = ~VTD_PAGE_MASK_4K;
+ iotlb.perm = IOMMU_RW;
+ success = true;
+ }
+
+ if (likely(success)) {
+ trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus),
+ VTD_PCI_SLOT(vtd_as->devfn),
+ VTD_PCI_FUNC(vtd_as->devfn),
+ iotlb.iova, iotlb.translated_addr,
+ iotlb.addr_mask);
+ } else {
+ trace_vtd_err_dmar_translate(pci_bus_num(vtd_as->bus),
+ VTD_PCI_SLOT(vtd_as->devfn),
+ VTD_PCI_FUNC(vtd_as->devfn),
+ iotlb.iova);
}
- vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, addr,
- is_write, &ret);
- VTD_DPRINTF(MMU,
- "bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
- " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
- VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
- vtd_as->devfn, addr, ret.translated_addr);
- return ret;
+ return iotlb;
}
-static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
+static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu,
IOMMUNotifierFlag old,
IOMMUNotifierFlag new)
{
VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
+ IntelIOMMUState *s = vtd_as->iommu_state;
+ IntelIOMMUNotifierNode *node = NULL;
+ IntelIOMMUNotifierNode *next_node = NULL;
- if (new & IOMMU_NOTIFIER_MAP) {
- error_report("Device at bus %s addr %02x.%d requires iommu "
- "notifier which is currently not supported by "
- "intel-iommu emulation",
- vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn),
- PCI_FUNC(vtd_as->devfn));
+ if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) {
+ error_report("We need to set cache_mode=1 for intel-iommu to enable "
+ "device assignment with IOMMU protection.");
exit(1);
}
+
+ if (old == IOMMU_NOTIFIER_NONE) {
+ node = g_malloc0(sizeof(*node));
+ node->vtd_as = vtd_as;
+ QLIST_INSERT_HEAD(&s->notifiers_list, node, next);
+ return;
+ }
+
+ /* update notifier node with new flags */
+ QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) {
+ if (node->vtd_as == vtd_as) {
+ if (new == IOMMU_NOTIFIER_NONE) {
+ QLIST_REMOVE(node, next);
+ g_free(node);
+ }
+ return;
+ }
+ }
+}
+
+static int vtd_post_load(void *opaque, int version_id)
+{
+ IntelIOMMUState *iommu = opaque;
+
+ /*
+ * Memory regions are dynamically turned on/off depending on
+ * context entry configurations from the guest. After migration,
+ * we need to make sure the memory regions are still correct.
+ */
+ vtd_switch_address_space_all(iommu);
+
+ return 0;
}
static const VMStateDescription vtd_vmstate = {
.name = "iommu-intel",
- .unmigratable = 1,
+ .version_id = 1,
+ .minimum_version_id = 1,
+ .priority = MIG_PRI_IOMMU,
+ .post_load = vtd_post_load,
+ .fields = (VMStateField[]) {
+ VMSTATE_UINT64(root, IntelIOMMUState),
+ VMSTATE_UINT64(intr_root, IntelIOMMUState),
+ VMSTATE_UINT64(iq, IntelIOMMUState),
+ VMSTATE_UINT32(intr_size, IntelIOMMUState),
+ VMSTATE_UINT16(iq_head, IntelIOMMUState),
+ VMSTATE_UINT16(iq_tail, IntelIOMMUState),
+ VMSTATE_UINT16(iq_size, IntelIOMMUState),
+ VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
+ VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
+ VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
+ VMSTATE_BOOL(root_extended, IntelIOMMUState),
+ VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
+ VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
+ VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
+ VMSTATE_BOOL(intr_eime, IntelIOMMUState),
+ VMSTATE_END_OF_LIST()
+ }
};
static const MemoryRegionOps vtd_mem_ops = {
DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
ON_OFF_AUTO_AUTO),
DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
+ DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
DEFINE_PROP_END_OF_LIST(),
};
addr = iommu->intr_root + index * sizeof(*entry);
if (dma_memory_read(&address_space_memory, addr, entry,
sizeof(*entry))) {
- VTD_DPRINTF(GENERAL, "error: fail to access IR root at 0x%"PRIx64
- " + %"PRIu16, iommu->intr_root, index);
+ trace_vtd_err("Memory read failed for IRTE.");
return -VTD_FR_IR_ROOT_INVAL;
}
+ trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]),
+ le64_to_cpu(entry->data[0]));
+
if (!entry->irte.present) {
- VTD_DPRINTF(GENERAL, "error: present flag not set in IRTE"
- " entry index %u value 0x%"PRIx64 " 0x%"PRIx64,
- index, le64_to_cpu(entry->data[1]),
- le64_to_cpu(entry->data[0]));
+ trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]),
+ le64_to_cpu(entry->data[0]));
return -VTD_FR_IR_ENTRY_P;
}
if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
entry->irte.__reserved_2) {
- VTD_DPRINTF(GENERAL, "error: IRTE entry index %"PRIu16
- " reserved fields non-zero: 0x%"PRIx64 " 0x%"PRIx64,
- index, le64_to_cpu(entry->data[1]),
- le64_to_cpu(entry->data[0]));
+ trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]),
+ le64_to_cpu(entry->data[0]));
return -VTD_FR_IR_IRTE_RSVD;
}
source_id = le32_to_cpu(entry->irte.source_id);
switch (entry->irte.sid_vtype) {
case VTD_SVT_NONE:
- VTD_DPRINTF(IR, "No SID validation for IRTE index %d", index);
break;
case VTD_SVT_ALL:
mask = vtd_svt_mask[entry->irte.sid_q];
if ((source_id & mask) != (sid & mask)) {
- VTD_DPRINTF(GENERAL, "SID validation for IRTE index "
- "%d failed (reqid 0x%04x sid 0x%04x)", index,
- sid, source_id);
+ trace_vtd_err_irte_sid(index, sid, source_id);
return -VTD_FR_IR_SID_ERR;
}
break;
bus_min = source_id & 0xff;
bus = sid >> 8;
if (bus > bus_max || bus < bus_min) {
- VTD_DPRINTF(GENERAL, "SID validation for IRTE index %d "
- "failed (bus %d outside %d-%d)", index, bus,
- bus_min, bus_max);
+ trace_vtd_err_irte_sid_bus(index, bus, bus_min, bus_max);
return -VTD_FR_IR_SID_ERR;
}
break;
default:
- VTD_DPRINTF(GENERAL, "Invalid SVT bits (0x%x) in IRTE index "
- "%d", entry->irte.sid_vtype, index);
+ trace_vtd_err_irte_svt(index, entry->irte.sid_vtype);
/* Take this as verification failure. */
return -VTD_FR_IR_SID_ERR;
break;
irq->dest_mode = irte.irte.dest_mode;
irq->redir_hint = irte.irte.redir_hint;
- VTD_DPRINTF(IR, "remapping interrupt index %d: trig:%u,vec:%u,"
- "deliver:%u,dest:%u,dest_mode:%u", index,
- irq->trigger_mode, irq->vector, irq->delivery_mode,
- irq->dest, irq->dest_mode);
+ trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector,
+ irq->delivery_mode, irq->dest, irq->dest_mode);
return 0;
}
assert(origin && translated);
+ trace_vtd_ir_remap_msi_req(origin->address, origin->data);
+
if (!iommu || !iommu->intr_enabled) {
- goto do_not_translate;
+ memcpy(translated, origin, sizeof(*origin));
+ goto out;
}
if (origin->address & VTD_MSI_ADDR_HI_MASK) {
- VTD_DPRINTF(GENERAL, "error: MSI addr high 32 bits nonzero"
- " during interrupt remapping: 0x%"PRIx32,
- (uint32_t)((origin->address & VTD_MSI_ADDR_HI_MASK) >> \
- VTD_MSI_ADDR_HI_SHIFT));
+ trace_vtd_err("MSI address high 32 bits non-zero when "
+ "Interrupt Remapping enabled.");
return -VTD_FR_IR_REQ_RSVD;
}
addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
if (addr.addr.__head != 0xfee) {
- VTD_DPRINTF(GENERAL, "error: MSI addr low 32 bits invalid: "
- "0x%"PRIx32, addr.data);
+ trace_vtd_err("MSI addr low 32 bit invalid.");
return -VTD_FR_IR_REQ_RSVD;
}
/* This is compatible mode. */
if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) {
- goto do_not_translate;
+ memcpy(translated, origin, sizeof(*origin));
+ goto out;
}
index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
}
if (addr.addr.sub_valid) {
- VTD_DPRINTF(IR, "received MSI interrupt");
+ trace_vtd_ir_remap_type("MSI");
if (origin->data & VTD_IR_MSI_DATA_RESERVED) {
- VTD_DPRINTF(GENERAL, "error: MSI data bits non-zero for "
- "interrupt remappable entry: 0x%"PRIx32,
- origin->data);
+ trace_vtd_err_ir_msi_invalid(sid, origin->address, origin->data);
return -VTD_FR_IR_REQ_RSVD;
}
} else {
uint8_t vector = origin->data & 0xff;
uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
- VTD_DPRINTF(IR, "received IOAPIC interrupt");
+ trace_vtd_ir_remap_type("IOAPIC");
/* IOAPIC entry vector should be aligned with IRTE vector
* (see vt-d spec 5.1.5.1). */
if (vector != irq.vector) {
- VTD_DPRINTF(GENERAL, "IOAPIC vector inconsistent: "
- "entry: %d, IRTE: %d, index: %d",
- vector, irq.vector, index);
+ trace_vtd_warn_ir_vector(sid, index, vector, irq.vector);
}
/* The Trigger Mode field must match the Trigger Mode in the IRTE.
* (see vt-d spec 5.1.5.1). */
if (trigger_mode != irq.trigger_mode) {
- VTD_DPRINTF(GENERAL, "IOAPIC trigger mode inconsistent: "
- "entry: %u, IRTE: %u, index: %d",
- trigger_mode, irq.trigger_mode, index);
+ trace_vtd_warn_ir_trigger(sid, index, trigger_mode,
+ irq.trigger_mode);
}
-
}
/*
/* Translate VTDIrq to MSI message */
vtd_generate_msi_message(&irq, translated);
- VTD_DPRINTF(IR, "mapping MSI 0x%"PRIx64":0x%"PRIx32 " -> "
- "0x%"PRIx64":0x%"PRIx32, origin->address, origin->data,
- translated->address, translated->data);
- return 0;
-
-do_not_translate:
- memcpy(translated, origin, sizeof(*origin));
+out:
+ trace_vtd_ir_remap_msi(origin->address, origin->data,
+ translated->address, translated->data);
return 0;
}
ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
if (ret) {
/* TODO: report error */
- VTD_DPRINTF(GENERAL, "int remap fail for addr 0x%"PRIx64
- " data 0x%"PRIx32, from.address, from.data);
/* Drop this interrupt */
return MEMTX_ERROR;
}
- VTD_DPRINTF(IR, "delivering MSI 0x%"PRIx64":0x%"PRIx32
- " for device sid 0x%04x",
- to.address, to.data, sid);
-
apic_get_class()->send_msi(&to);
return MEMTX_OK;
uintptr_t key = (uintptr_t)bus;
VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
VTDAddressSpace *vtd_dev_as;
+ char name[128];
if (!vtd_bus) {
+ uintptr_t *new_key = g_malloc(sizeof(*new_key));
+ *new_key = (uintptr_t)bus;
/* No corresponding free() */
vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
X86_IOMMU_PCI_DEVFN_MAX);
vtd_bus->bus = bus;
- key = (uintptr_t)bus;
- g_hash_table_insert(s->vtd_as_by_busptr, &key, vtd_bus);
+ g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus);
}
vtd_dev_as = vtd_bus->dev_as[devfn];
if (!vtd_dev_as) {
+ snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
vtd_dev_as->bus = bus;
vtd_dev_as->devfn = (uint8_t)devfn;
vtd_dev_as->iommu_state = s;
vtd_dev_as->context_cache_entry.context_cache_gen = 0;
- memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s),
- &s->iommu_ops, "intel_iommu", UINT64_MAX);
+
+ /*
+ * Memory region relationships looks like (Address range shows
+ * only lower 32 bits to make it short in length...):
+ *
+ * |-----------------+-------------------+----------|
+ * | Name | Address range | Priority |
+ * |-----------------+-------------------+----------+
+ * | vtd_root | 00000000-ffffffff | 0 |
+ * | intel_iommu | 00000000-ffffffff | 1 |
+ * | vtd_sys_alias | 00000000-ffffffff | 1 |
+ * | intel_iommu_ir | fee00000-feefffff | 64 |
+ * |-----------------+-------------------+----------|
+ *
+ * We enable/disable DMAR by switching enablement for
+ * vtd_sys_alias and intel_iommu regions. IR region is always
+ * enabled.
+ */
+ memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu),
+ TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s),
+ "intel_iommu_dmar",
+ UINT64_MAX);
+ memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s),
+ "vtd_sys_alias", get_system_memory(),
+ 0, memory_region_size(get_system_memory()));
memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s),
&vtd_mem_ir_ops, s, "intel_iommu_ir",
VTD_INTERRUPT_ADDR_SIZE);
- memory_region_add_subregion(&vtd_dev_as->iommu, VTD_INTERRUPT_ADDR_FIRST,
- &vtd_dev_as->iommu_ir);
- address_space_init(&vtd_dev_as->as,
- &vtd_dev_as->iommu, "intel_iommu");
+ memory_region_init(&vtd_dev_as->root, OBJECT(s),
+ "vtd_root", UINT64_MAX);
+ memory_region_add_subregion_overlap(&vtd_dev_as->root,
+ VTD_INTERRUPT_ADDR_FIRST,
+ &vtd_dev_as->iommu_ir, 64);
+ address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name);
+ memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
+ &vtd_dev_as->sys_alias, 1);
+ memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
+ MEMORY_REGION(&vtd_dev_as->iommu),
+ 1);
+ vtd_switch_address_space(vtd_dev_as);
}
return vtd_dev_as;
}
+/* Unmap the whole range in the notifier's scope. */
+static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
+{
+ IOMMUTLBEntry entry;
+ hwaddr size;
+ hwaddr start = n->start;
+ hwaddr end = n->end;
+
+ /*
+ * Note: all the codes in this function has a assumption that IOVA
+ * bits are no more than VTD_MGAW bits (which is restricted by
+ * VT-d spec), otherwise we need to consider overflow of 64 bits.
+ */
+
+ if (end > VTD_ADDRESS_SIZE) {
+ /*
+ * Don't need to unmap regions that is bigger than the whole
+ * VT-d supported address space size
+ */
+ end = VTD_ADDRESS_SIZE;
+ }
+
+ assert(start <= end);
+ size = end - start;
+
+ if (ctpop64(size) != 1) {
+ /*
+ * This size cannot format a correct mask. Let's enlarge it to
+ * suite the minimum available mask.
+ */
+ int n = 64 - clz64(size);
+ if (n > VTD_MGAW) {
+ /* should not happen, but in case it happens, limit it */
+ n = VTD_MGAW;
+ }
+ size = 1ULL << n;
+ }
+
+ entry.target_as = &address_space_memory;
+ /* Adjust iova for the size */
+ entry.iova = n->start & ~(size - 1);
+ /* This field is meaningless for unmap */
+ entry.translated_addr = 0;
+ entry.perm = IOMMU_NONE;
+ entry.addr_mask = size - 1;
+
+ trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
+ VTD_PCI_SLOT(as->devfn),
+ VTD_PCI_FUNC(as->devfn),
+ entry.iova, size);
+
+ memory_region_notify_one(n, &entry);
+}
+
+static void vtd_address_space_unmap_all(IntelIOMMUState *s)
+{
+ IntelIOMMUNotifierNode *node;
+ VTDAddressSpace *vtd_as;
+ IOMMUNotifier *n;
+
+ QLIST_FOREACH(node, &s->notifiers_list, next) {
+ vtd_as = node->vtd_as;
+ IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
+ vtd_address_space_unmap(vtd_as, n);
+ }
+ }
+}
+
+static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private)
+{
+ memory_region_notify_one((IOMMUNotifier *)private, entry);
+ return 0;
+}
+
+static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n)
+{
+ VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu);
+ IntelIOMMUState *s = vtd_as->iommu_state;
+ uint8_t bus_n = pci_bus_num(vtd_as->bus);
+ VTDContextEntry ce;
+
+ /*
+ * The replay can be triggered by either a invalidation or a newly
+ * created entry. No matter what, we release existing mappings
+ * (it means flushing caches for UNMAP-only registers).
+ */
+ vtd_address_space_unmap(vtd_as, n);
+
+ if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
+ trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn),
+ PCI_FUNC(vtd_as->devfn),
+ VTD_CONTEXT_ENTRY_DID(ce.hi),
+ ce.hi, ce.lo);
+ vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false);
+ } else {
+ trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
+ PCI_FUNC(vtd_as->devfn));
+ }
+
+ return;
+}
+
/* Do the initialization. It will also be called when reset, so pay
* attention when adding new initialization stuff.
*/
memset(s->w1cmask, 0, DMAR_REG_SIZE);
memset(s->womask, 0, DMAR_REG_SIZE);
- s->iommu_ops.translate = vtd_iommu_translate;
- s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed;
s->root = 0;
s->root_extended = false;
s->dmar_enabled = false;
assert(s->intr_eim != ON_OFF_AUTO_AUTO);
}
+ if (x86_iommu->dt_supported) {
+ s->ecap |= VTD_ECAP_DT;
+ }
+
+ if (x86_iommu->pt_supported) {
+ s->ecap |= VTD_ECAP_PT;
+ }
+
+ if (s->caching_mode) {
+ s->cap |= VTD_CAP_CM;
+ }
+
vtd_reset_context_cache(s);
vtd_reset_iotlb(s);
{
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
- VTD_DPRINTF(GENERAL, "");
vtd_init(s);
+
+ /*
+ * When device reset, throw away all mappings and external caches
+ */
+ vtd_address_space_unmap_all(s);
}
static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
static void vtd_realize(DeviceState *dev, Error **errp)
{
- PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
- PCIBus *bus = pcms->bus;
+ MachineState *ms = MACHINE(qdev_get_machine());
+ MachineClass *mc = MACHINE_GET_CLASS(ms);
+ PCMachineState *pcms =
+ PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE));
+ PCIBus *bus;
IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
- VTD_DPRINTF(GENERAL, "");
+ if (!pcms) {
+ error_setg(errp, "Machine-type '%s' not supported by intel-iommu",
+ mc->name);
+ return;
+ }
+
+ bus = pcms->bus;
x86_iommu->type = TYPE_INTEL;
if (!vtd_decide_config(s, errp)) {
return;
}
+ QLIST_INIT(&s->notifiers_list);
memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
"intel_iommu", DMAR_REG_SIZE);
dc->hotpluggable = false;
x86_class->realize = vtd_realize;
x86_class->int_remap = vtd_int_remap;
+ /* Supported by the pc-q35-* machine types */
+ dc->user_creatable = true;
}
static const TypeInfo vtd_info = {
.class_init = vtd_class_init,
};
+static void vtd_iommu_memory_region_class_init(ObjectClass *klass,
+ void *data)
+{
+ IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
+
+ imrc->translate = vtd_iommu_translate;
+ imrc->notify_flag_changed = vtd_iommu_notify_flag_changed;
+ imrc->replay = vtd_iommu_replay;
+}
+
+static const TypeInfo vtd_iommu_memory_region_info = {
+ .parent = TYPE_IOMMU_MEMORY_REGION,
+ .name = TYPE_INTEL_IOMMU_MEMORY_REGION,
+ .class_init = vtd_iommu_memory_region_class_init,
+};
+
static void vtd_register_types(void)
{
- VTD_DPRINTF(GENERAL, "");
type_register_static(&vtd_info);
+ type_register_static(&vtd_iommu_memory_region_info);
}
type_init(vtd_register_types)