2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qemu/error-report.h"
24 #include "qapi/error.h"
25 #include "hw/sysbus.h"
26 #include "exec/address-spaces.h"
27 #include "intel_iommu_internal.h"
28 #include "hw/pci/pci.h"
29 #include "hw/pci/pci_bus.h"
30 #include "hw/i386/pc.h"
31 #include "hw/i386/apic-msidef.h"
32 #include "hw/boards.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
35 #include "sysemu/kvm.h"
36 #include "hw/i386/apic_internal.h"
40 static void vtd_address_space_refresh_all(IntelIOMMUState
*s
);
41 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
);
43 static void vtd_define_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
,
44 uint64_t wmask
, uint64_t w1cmask
)
46 stq_le_p(&s
->csr
[addr
], val
);
47 stq_le_p(&s
->wmask
[addr
], wmask
);
48 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
51 static void vtd_define_quad_wo(IntelIOMMUState
*s
, hwaddr addr
, uint64_t mask
)
53 stq_le_p(&s
->womask
[addr
], mask
);
56 static void vtd_define_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
,
57 uint32_t wmask
, uint32_t w1cmask
)
59 stl_le_p(&s
->csr
[addr
], val
);
60 stl_le_p(&s
->wmask
[addr
], wmask
);
61 stl_le_p(&s
->w1cmask
[addr
], w1cmask
);
64 static void vtd_define_long_wo(IntelIOMMUState
*s
, hwaddr addr
, uint32_t mask
)
66 stl_le_p(&s
->womask
[addr
], mask
);
69 /* "External" get/set operations */
70 static void vtd_set_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
72 uint64_t oldval
= ldq_le_p(&s
->csr
[addr
]);
73 uint64_t wmask
= ldq_le_p(&s
->wmask
[addr
]);
74 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
75 stq_le_p(&s
->csr
[addr
],
76 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
79 static void vtd_set_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
)
81 uint32_t oldval
= ldl_le_p(&s
->csr
[addr
]);
82 uint32_t wmask
= ldl_le_p(&s
->wmask
[addr
]);
83 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
84 stl_le_p(&s
->csr
[addr
],
85 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
88 static uint64_t vtd_get_quad(IntelIOMMUState
*s
, hwaddr addr
)
90 uint64_t val
= ldq_le_p(&s
->csr
[addr
]);
91 uint64_t womask
= ldq_le_p(&s
->womask
[addr
]);
95 static uint32_t vtd_get_long(IntelIOMMUState
*s
, hwaddr addr
)
97 uint32_t val
= ldl_le_p(&s
->csr
[addr
]);
98 uint32_t womask
= ldl_le_p(&s
->womask
[addr
]);
102 /* "Internal" get/set operations */
103 static uint64_t vtd_get_quad_raw(IntelIOMMUState
*s
, hwaddr addr
)
105 return ldq_le_p(&s
->csr
[addr
]);
108 static uint32_t vtd_get_long_raw(IntelIOMMUState
*s
, hwaddr addr
)
110 return ldl_le_p(&s
->csr
[addr
]);
113 static void vtd_set_quad_raw(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
115 stq_le_p(&s
->csr
[addr
], val
);
118 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState
*s
, hwaddr addr
,
119 uint32_t clear
, uint32_t mask
)
121 uint32_t new_val
= (ldl_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
122 stl_le_p(&s
->csr
[addr
], new_val
);
126 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState
*s
, hwaddr addr
,
127 uint64_t clear
, uint64_t mask
)
129 uint64_t new_val
= (ldq_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
130 stq_le_p(&s
->csr
[addr
], new_val
);
134 static inline void vtd_iommu_lock(IntelIOMMUState
*s
)
136 qemu_mutex_lock(&s
->iommu_lock
);
139 static inline void vtd_iommu_unlock(IntelIOMMUState
*s
)
141 qemu_mutex_unlock(&s
->iommu_lock
);
144 /* Whether the address space needs to notify new mappings */
145 static inline gboolean
vtd_as_has_map_notifier(VTDAddressSpace
*as
)
147 return as
->notifier_flags
& IOMMU_NOTIFIER_MAP
;
150 /* GHashTable functions */
151 static gboolean
vtd_uint64_equal(gconstpointer v1
, gconstpointer v2
)
153 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
156 static guint
vtd_uint64_hash(gconstpointer v
)
158 return (guint
)*(const uint64_t *)v
;
161 static gboolean
vtd_hash_remove_by_domain(gpointer key
, gpointer value
,
164 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
165 uint16_t domain_id
= *(uint16_t *)user_data
;
166 return entry
->domain_id
== domain_id
;
169 /* The shift of an addr for a certain level of paging structure */
170 static inline uint32_t vtd_slpt_level_shift(uint32_t level
)
173 return VTD_PAGE_SHIFT_4K
+ (level
- 1) * VTD_SL_LEVEL_BITS
;
176 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level
)
178 return ~((1ULL << vtd_slpt_level_shift(level
)) - 1);
181 static gboolean
vtd_hash_remove_by_page(gpointer key
, gpointer value
,
184 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
185 VTDIOTLBPageInvInfo
*info
= (VTDIOTLBPageInvInfo
*)user_data
;
186 uint64_t gfn
= (info
->addr
>> VTD_PAGE_SHIFT_4K
) & info
->mask
;
187 uint64_t gfn_tlb
= (info
->addr
& entry
->mask
) >> VTD_PAGE_SHIFT_4K
;
188 return (entry
->domain_id
== info
->domain_id
) &&
189 (((entry
->gfn
& info
->mask
) == gfn
) ||
190 (entry
->gfn
== gfn_tlb
));
193 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
194 * IntelIOMMUState to 1. Must be called with IOMMU lock held.
196 static void vtd_reset_context_cache_locked(IntelIOMMUState
*s
)
198 VTDAddressSpace
*vtd_as
;
200 GHashTableIter bus_it
;
203 trace_vtd_context_cache_reset();
205 g_hash_table_iter_init(&bus_it
, s
->vtd_as_by_busptr
);
207 while (g_hash_table_iter_next (&bus_it
, NULL
, (void**)&vtd_bus
)) {
208 for (devfn_it
= 0; devfn_it
< PCI_DEVFN_MAX
; ++devfn_it
) {
209 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
213 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
216 s
->context_cache_gen
= 1;
219 /* Must be called with IOMMU lock held. */
220 static void vtd_reset_iotlb_locked(IntelIOMMUState
*s
)
223 g_hash_table_remove_all(s
->iotlb
);
226 static void vtd_reset_iotlb(IntelIOMMUState
*s
)
229 vtd_reset_iotlb_locked(s
);
233 static void vtd_reset_caches(IntelIOMMUState
*s
)
236 vtd_reset_iotlb_locked(s
);
237 vtd_reset_context_cache_locked(s
);
241 static uint64_t vtd_get_iotlb_key(uint64_t gfn
, uint16_t source_id
,
244 return gfn
| ((uint64_t)(source_id
) << VTD_IOTLB_SID_SHIFT
) |
245 ((uint64_t)(level
) << VTD_IOTLB_LVL_SHIFT
);
248 static uint64_t vtd_get_iotlb_gfn(hwaddr addr
, uint32_t level
)
250 return (addr
& vtd_slpt_level_page_mask(level
)) >> VTD_PAGE_SHIFT_4K
;
253 /* Must be called with IOMMU lock held */
254 static VTDIOTLBEntry
*vtd_lookup_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
257 VTDIOTLBEntry
*entry
;
261 for (level
= VTD_SL_PT_LEVEL
; level
< VTD_SL_PML4_LEVEL
; level
++) {
262 key
= vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr
, level
),
264 entry
= g_hash_table_lookup(s
->iotlb
, &key
);
274 /* Must be with IOMMU lock held */
275 static void vtd_update_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
276 uint16_t domain_id
, hwaddr addr
, uint64_t slpte
,
277 uint8_t access_flags
, uint32_t level
)
279 VTDIOTLBEntry
*entry
= g_malloc(sizeof(*entry
));
280 uint64_t *key
= g_malloc(sizeof(*key
));
281 uint64_t gfn
= vtd_get_iotlb_gfn(addr
, level
);
283 trace_vtd_iotlb_page_update(source_id
, addr
, slpte
, domain_id
);
284 if (g_hash_table_size(s
->iotlb
) >= VTD_IOTLB_MAX_SIZE
) {
285 trace_vtd_iotlb_reset("iotlb exceeds size limit");
286 vtd_reset_iotlb_locked(s
);
290 entry
->domain_id
= domain_id
;
291 entry
->slpte
= slpte
;
292 entry
->access_flags
= access_flags
;
293 entry
->mask
= vtd_slpt_level_page_mask(level
);
294 *key
= vtd_get_iotlb_key(gfn
, source_id
, level
);
295 g_hash_table_replace(s
->iotlb
, key
, entry
);
298 /* Given the reg addr of both the message data and address, generate an
301 static void vtd_generate_interrupt(IntelIOMMUState
*s
, hwaddr mesg_addr_reg
,
302 hwaddr mesg_data_reg
)
306 assert(mesg_data_reg
< DMAR_REG_SIZE
);
307 assert(mesg_addr_reg
< DMAR_REG_SIZE
);
309 msi
.address
= vtd_get_long_raw(s
, mesg_addr_reg
);
310 msi
.data
= vtd_get_long_raw(s
, mesg_data_reg
);
312 trace_vtd_irq_generate(msi
.address
, msi
.data
);
314 apic_get_class()->send_msi(&msi
);
317 /* Generate a fault event to software via MSI if conditions are met.
318 * Notice that the value of FSTS_REG being passed to it should be the one
321 static void vtd_generate_fault_event(IntelIOMMUState
*s
, uint32_t pre_fsts
)
323 if (pre_fsts
& VTD_FSTS_PPF
|| pre_fsts
& VTD_FSTS_PFO
||
324 pre_fsts
& VTD_FSTS_IQE
) {
325 error_report_once("There are previous interrupt conditions "
326 "to be serviced by software, fault event "
330 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, 0, VTD_FECTL_IP
);
331 if (vtd_get_long_raw(s
, DMAR_FECTL_REG
) & VTD_FECTL_IM
) {
332 error_report_once("Interrupt Mask set, irq is not generated");
334 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
335 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
339 /* Check if the Fault (F) field of the Fault Recording Register referenced by
342 static bool vtd_is_frcd_set(IntelIOMMUState
*s
, uint16_t index
)
344 /* Each reg is 128-bit */
345 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
346 addr
+= 8; /* Access the high 64-bit half */
348 assert(index
< DMAR_FRCD_REG_NR
);
350 return vtd_get_quad_raw(s
, addr
) & VTD_FRCD_F
;
353 /* Update the PPF field of Fault Status Register.
354 * Should be called whenever change the F field of any fault recording
357 static void vtd_update_fsts_ppf(IntelIOMMUState
*s
)
360 uint32_t ppf_mask
= 0;
362 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
363 if (vtd_is_frcd_set(s
, i
)) {
364 ppf_mask
= VTD_FSTS_PPF
;
368 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_PPF
, ppf_mask
);
369 trace_vtd_fsts_ppf(!!ppf_mask
);
372 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState
*s
, uint16_t index
)
374 /* Each reg is 128-bit */
375 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
376 addr
+= 8; /* Access the high 64-bit half */
378 assert(index
< DMAR_FRCD_REG_NR
);
380 vtd_set_clear_mask_quad(s
, addr
, 0, VTD_FRCD_F
);
381 vtd_update_fsts_ppf(s
);
384 /* Must not update F field now, should be done later */
385 static void vtd_record_frcd(IntelIOMMUState
*s
, uint16_t index
,
386 uint16_t source_id
, hwaddr addr
,
387 VTDFaultReason fault
, bool is_write
)
390 hwaddr frcd_reg_addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
392 assert(index
< DMAR_FRCD_REG_NR
);
394 lo
= VTD_FRCD_FI(addr
);
395 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
);
399 vtd_set_quad_raw(s
, frcd_reg_addr
, lo
);
400 vtd_set_quad_raw(s
, frcd_reg_addr
+ 8, hi
);
402 trace_vtd_frr_new(index
, hi
, lo
);
405 /* Try to collapse multiple pending faults from the same requester */
406 static bool vtd_try_collapse_fault(IntelIOMMUState
*s
, uint16_t source_id
)
410 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ 8; /* The high 64-bit half */
412 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
413 frcd_reg
= vtd_get_quad_raw(s
, addr
);
414 if ((frcd_reg
& VTD_FRCD_F
) &&
415 ((frcd_reg
& VTD_FRCD_SID_MASK
) == source_id
)) {
418 addr
+= 16; /* 128-bit for each */
423 /* Log and report an DMAR (address translation) fault to software */
424 static void vtd_report_dmar_fault(IntelIOMMUState
*s
, uint16_t source_id
,
425 hwaddr addr
, VTDFaultReason fault
,
428 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
430 assert(fault
< VTD_FR_MAX
);
432 if (fault
== VTD_FR_RESERVED_ERR
) {
433 /* This is not a normal fault reason case. Drop it. */
437 trace_vtd_dmar_fault(source_id
, fault
, addr
, is_write
);
439 if (fsts_reg
& VTD_FSTS_PFO
) {
440 error_report_once("New fault is not recorded due to "
441 "Primary Fault Overflow");
445 if (vtd_try_collapse_fault(s
, source_id
)) {
446 error_report_once("New fault is not recorded due to "
447 "compression of faults");
451 if (vtd_is_frcd_set(s
, s
->next_frcd_reg
)) {
452 error_report_once("Next Fault Recording Reg is used, "
453 "new fault is not recorded, set PFO field");
454 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_PFO
);
458 vtd_record_frcd(s
, s
->next_frcd_reg
, source_id
, addr
, fault
, is_write
);
460 if (fsts_reg
& VTD_FSTS_PPF
) {
461 error_report_once("There are pending faults already, "
462 "fault event is not generated");
463 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
);
465 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
466 s
->next_frcd_reg
= 0;
469 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_FRI_MASK
,
470 VTD_FSTS_FRI(s
->next_frcd_reg
));
471 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
); /* Will set PPF */
473 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
474 s
->next_frcd_reg
= 0;
476 /* This case actually cause the PPF to be Set.
477 * So generate fault event (interrupt).
479 vtd_generate_fault_event(s
, fsts_reg
);
483 /* Handle Invalidation Queue Errors of queued invalidation interface error
486 static void vtd_handle_inv_queue_error(IntelIOMMUState
*s
)
488 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
490 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_IQE
);
491 vtd_generate_fault_event(s
, fsts_reg
);
494 /* Set the IWC field and try to generate an invalidation completion interrupt */
495 static void vtd_generate_completion_event(IntelIOMMUState
*s
)
497 if (vtd_get_long_raw(s
, DMAR_ICS_REG
) & VTD_ICS_IWC
) {
498 trace_vtd_inv_desc_wait_irq("One pending, skip current");
501 vtd_set_clear_mask_long(s
, DMAR_ICS_REG
, 0, VTD_ICS_IWC
);
502 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, 0, VTD_IECTL_IP
);
503 if (vtd_get_long_raw(s
, DMAR_IECTL_REG
) & VTD_IECTL_IM
) {
504 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
505 "new event not generated");
508 /* Generate the interrupt event */
509 trace_vtd_inv_desc_wait_irq("Generating complete event");
510 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
511 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
515 static inline bool vtd_root_entry_present(VTDRootEntry
*root
)
517 return root
->val
& VTD_ROOT_ENTRY_P
;
520 static int vtd_get_root_entry(IntelIOMMUState
*s
, uint8_t index
,
525 addr
= s
->root
+ index
* sizeof(*re
);
526 if (dma_memory_read(&address_space_memory
, addr
, re
, sizeof(*re
))) {
528 return -VTD_FR_ROOT_TABLE_INV
;
530 re
->val
= le64_to_cpu(re
->val
);
534 static inline bool vtd_ce_present(VTDContextEntry
*context
)
536 return context
->lo
& VTD_CONTEXT_ENTRY_P
;
539 static int vtd_get_context_entry_from_root(VTDRootEntry
*root
, uint8_t index
,
544 /* we have checked that root entry is present */
545 addr
= (root
->val
& VTD_ROOT_ENTRY_CTP
) + index
* sizeof(*ce
);
546 if (dma_memory_read(&address_space_memory
, addr
, ce
, sizeof(*ce
))) {
547 return -VTD_FR_CONTEXT_TABLE_INV
;
549 ce
->lo
= le64_to_cpu(ce
->lo
);
550 ce
->hi
= le64_to_cpu(ce
->hi
);
554 static inline dma_addr_t
vtd_ce_get_slpt_base(VTDContextEntry
*ce
)
556 return ce
->lo
& VTD_CONTEXT_ENTRY_SLPTPTR
;
559 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte
, uint8_t aw
)
561 return slpte
& VTD_SL_PT_BASE_ADDR_MASK(aw
);
564 /* Whether the pte indicates the address of the page frame */
565 static inline bool vtd_is_last_slpte(uint64_t slpte
, uint32_t level
)
567 return level
== VTD_SL_PT_LEVEL
|| (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
);
570 /* Get the content of a spte located in @base_addr[@index] */
571 static uint64_t vtd_get_slpte(dma_addr_t base_addr
, uint32_t index
)
575 assert(index
< VTD_SL_PT_ENTRY_NR
);
577 if (dma_memory_read(&address_space_memory
,
578 base_addr
+ index
* sizeof(slpte
), &slpte
,
580 slpte
= (uint64_t)-1;
583 slpte
= le64_to_cpu(slpte
);
587 /* Given an iova and the level of paging structure, return the offset
590 static inline uint32_t vtd_iova_level_offset(uint64_t iova
, uint32_t level
)
592 return (iova
>> vtd_slpt_level_shift(level
)) &
593 ((1ULL << VTD_SL_LEVEL_BITS
) - 1);
596 /* Check Capability Register to see if the @level of page-table is supported */
597 static inline bool vtd_is_level_supported(IntelIOMMUState
*s
, uint32_t level
)
599 return VTD_CAP_SAGAW_MASK
& s
->cap
&
600 (1ULL << (level
- 2 + VTD_CAP_SAGAW_SHIFT
));
603 /* Get the page-table level that hardware should use for the second-level
604 * page-table walk from the Address Width field of context-entry.
606 static inline uint32_t vtd_ce_get_level(VTDContextEntry
*ce
)
608 return 2 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
);
611 static inline uint32_t vtd_ce_get_agaw(VTDContextEntry
*ce
)
613 return 30 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
) * 9;
616 static inline uint32_t vtd_ce_get_type(VTDContextEntry
*ce
)
618 return ce
->lo
& VTD_CONTEXT_ENTRY_TT
;
621 /* Return true if check passed, otherwise false */
622 static inline bool vtd_ce_type_check(X86IOMMUState
*x86_iommu
,
625 switch (vtd_ce_get_type(ce
)) {
626 case VTD_CONTEXT_TT_MULTI_LEVEL
:
627 /* Always supported */
629 case VTD_CONTEXT_TT_DEV_IOTLB
:
630 if (!x86_iommu
->dt_supported
) {
631 error_report_once("%s: DT specified but not supported", __func__
);
635 case VTD_CONTEXT_TT_PASS_THROUGH
:
636 if (!x86_iommu
->pt_supported
) {
637 error_report_once("%s: PT specified but not supported", __func__
);
643 error_report_once("%s: unknown ce type: %"PRIu32
, __func__
,
644 vtd_ce_get_type(ce
));
650 static inline uint64_t vtd_iova_limit(VTDContextEntry
*ce
, uint8_t aw
)
652 uint32_t ce_agaw
= vtd_ce_get_agaw(ce
);
653 return 1ULL << MIN(ce_agaw
, aw
);
656 /* Return true if IOVA passes range check, otherwise false. */
657 static inline bool vtd_iova_range_check(uint64_t iova
, VTDContextEntry
*ce
,
661 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
662 * in CAP_REG and AW in context-entry.
664 return !(iova
& ~(vtd_iova_limit(ce
, aw
) - 1));
668 * Rsvd field masks for spte:
669 * Index [1] to [4] 4k pages
670 * Index [5] to [8] large pages
672 static uint64_t vtd_paging_entry_rsvd_field
[9];
674 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte
, uint32_t level
)
676 if (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
) {
677 /* Maybe large page */
678 return slpte
& vtd_paging_entry_rsvd_field
[level
+ 4];
680 return slpte
& vtd_paging_entry_rsvd_field
[level
];
684 /* Find the VTD address space associated with a given bus number */
685 static VTDBus
*vtd_find_as_from_bus_num(IntelIOMMUState
*s
, uint8_t bus_num
)
687 VTDBus
*vtd_bus
= s
->vtd_as_by_bus_num
[bus_num
];
690 * Iterate over the registered buses to find the one which
691 * currently hold this bus number, and update the bus_num
696 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
697 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
698 if (pci_bus_num(vtd_bus
->bus
) == bus_num
) {
699 s
->vtd_as_by_bus_num
[bus_num
] = vtd_bus
;
707 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
708 * of the translation, can be used for deciding the size of large page.
710 static int vtd_iova_to_slpte(VTDContextEntry
*ce
, uint64_t iova
, bool is_write
,
711 uint64_t *slptep
, uint32_t *slpte_level
,
712 bool *reads
, bool *writes
, uint8_t aw_bits
)
714 dma_addr_t addr
= vtd_ce_get_slpt_base(ce
);
715 uint32_t level
= vtd_ce_get_level(ce
);
718 uint64_t access_right_check
;
720 if (!vtd_iova_range_check(iova
, ce
, aw_bits
)) {
721 error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64
")",
723 return -VTD_FR_ADDR_BEYOND_MGAW
;
726 /* FIXME: what is the Atomics request here? */
727 access_right_check
= is_write
? VTD_SL_W
: VTD_SL_R
;
730 offset
= vtd_iova_level_offset(iova
, level
);
731 slpte
= vtd_get_slpte(addr
, offset
);
733 if (slpte
== (uint64_t)-1) {
734 error_report_once("%s: detected read error on DMAR slpte "
735 "(iova=0x%" PRIx64
")", __func__
, iova
);
736 if (level
== vtd_ce_get_level(ce
)) {
737 /* Invalid programming of context-entry */
738 return -VTD_FR_CONTEXT_ENTRY_INV
;
740 return -VTD_FR_PAGING_ENTRY_INV
;
743 *reads
= (*reads
) && (slpte
& VTD_SL_R
);
744 *writes
= (*writes
) && (slpte
& VTD_SL_W
);
745 if (!(slpte
& access_right_check
)) {
746 error_report_once("%s: detected slpte permission error "
747 "(iova=0x%" PRIx64
", level=0x%" PRIx32
", "
748 "slpte=0x%" PRIx64
", write=%d)", __func__
,
749 iova
, level
, slpte
, is_write
);
750 return is_write
? -VTD_FR_WRITE
: -VTD_FR_READ
;
752 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
753 error_report_once("%s: detected splte reserve non-zero "
754 "iova=0x%" PRIx64
", level=0x%" PRIx32
755 "slpte=0x%" PRIx64
")", __func__
, iova
,
757 return -VTD_FR_PAGING_ENTRY_RSVD
;
760 if (vtd_is_last_slpte(slpte
, level
)) {
762 *slpte_level
= level
;
765 addr
= vtd_get_slpte_addr(slpte
, aw_bits
);
770 typedef int (*vtd_page_walk_hook
)(IOMMUTLBEntry
*entry
, void *private);
773 * Constant information used during page walking
775 * @hook_fn: hook func to be called when detected page
776 * @private: private data to be passed into hook func
777 * @notify_unmap: whether we should notify invalid entries
778 * @as: VT-d address space of the device
779 * @aw: maximum address width
780 * @domain: domain ID of the page walk
784 vtd_page_walk_hook hook_fn
;
789 } vtd_page_walk_info
;
791 static int vtd_page_walk_one(IOMMUTLBEntry
*entry
, vtd_page_walk_info
*info
)
793 VTDAddressSpace
*as
= info
->as
;
794 vtd_page_walk_hook hook_fn
= info
->hook_fn
;
795 void *private = info
->private;
798 .size
= entry
->addr_mask
,
799 .translated_addr
= entry
->translated_addr
,
802 DMAMap
*mapped
= iova_tree_find(as
->iova_tree
, &target
);
804 if (entry
->perm
== IOMMU_NONE
&& !info
->notify_unmap
) {
805 trace_vtd_page_walk_one_skip_unmap(entry
->iova
, entry
->addr_mask
);
811 /* Update local IOVA mapped ranges */
814 /* If it's exactly the same translation, skip */
815 if (!memcmp(mapped
, &target
, sizeof(target
))) {
816 trace_vtd_page_walk_one_skip_map(entry
->iova
, entry
->addr_mask
,
817 entry
->translated_addr
);
821 * Translation changed. Normally this should not
822 * happen, but it can happen when with buggy guest
823 * OSes. Note that there will be a small window that
824 * we don't have map at all. But that's the best
825 * effort we can do. The ideal way to emulate this is
826 * atomically modify the PTE to follow what has
827 * changed, but we can't. One example is that vfio
828 * driver only has VFIO_IOMMU_[UN]MAP_DMA but no
829 * interface to modify a mapping (meanwhile it seems
830 * meaningless to even provide one). Anyway, let's
831 * mark this as a TODO in case one day we'll have
834 IOMMUAccessFlags cache_perm
= entry
->perm
;
837 /* Emulate an UNMAP */
838 entry
->perm
= IOMMU_NONE
;
839 trace_vtd_page_walk_one(info
->domain_id
,
841 entry
->translated_addr
,
844 ret
= hook_fn(entry
, private);
848 /* Drop any existing mapping */
849 iova_tree_remove(as
->iova_tree
, &target
);
850 /* Recover the correct permission */
851 entry
->perm
= cache_perm
;
854 iova_tree_insert(as
->iova_tree
, &target
);
857 /* Skip since we didn't map this range at all */
858 trace_vtd_page_walk_one_skip_unmap(entry
->iova
, entry
->addr_mask
);
861 iova_tree_remove(as
->iova_tree
, &target
);
864 trace_vtd_page_walk_one(info
->domain_id
, entry
->iova
,
865 entry
->translated_addr
, entry
->addr_mask
,
867 return hook_fn(entry
, private);
871 * vtd_page_walk_level - walk over specific level for IOVA range
873 * @addr: base GPA addr to start the walk
874 * @start: IOVA range start address
875 * @end: IOVA range end address (start <= addr < end)
876 * @read: whether parent level has read permission
877 * @write: whether parent level has write permission
878 * @info: constant information for the page walk
880 static int vtd_page_walk_level(dma_addr_t addr
, uint64_t start
,
881 uint64_t end
, uint32_t level
, bool read
,
882 bool write
, vtd_page_walk_info
*info
)
884 bool read_cur
, write_cur
, entry_valid
;
887 uint64_t subpage_size
, subpage_mask
;
889 uint64_t iova
= start
;
893 trace_vtd_page_walk_level(addr
, level
, start
, end
);
895 subpage_size
= 1ULL << vtd_slpt_level_shift(level
);
896 subpage_mask
= vtd_slpt_level_page_mask(level
);
899 iova_next
= (iova
& subpage_mask
) + subpage_size
;
901 offset
= vtd_iova_level_offset(iova
, level
);
902 slpte
= vtd_get_slpte(addr
, offset
);
904 if (slpte
== (uint64_t)-1) {
905 trace_vtd_page_walk_skip_read(iova
, iova_next
);
909 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
910 trace_vtd_page_walk_skip_reserve(iova
, iova_next
);
914 /* Permissions are stacked with parents' */
915 read_cur
= read
&& (slpte
& VTD_SL_R
);
916 write_cur
= write
&& (slpte
& VTD_SL_W
);
919 * As long as we have either read/write permission, this is a
920 * valid entry. The rule works for both page entries and page
923 entry_valid
= read_cur
| write_cur
;
925 if (!vtd_is_last_slpte(slpte
, level
) && entry_valid
) {
927 * This is a valid PDE (or even bigger than PDE). We need
928 * to walk one further level.
930 ret
= vtd_page_walk_level(vtd_get_slpte_addr(slpte
, info
->aw
),
931 iova
, MIN(iova_next
, end
), level
- 1,
932 read_cur
, write_cur
, info
);
935 * This means we are either:
937 * (1) the real page entry (either 4K page, or huge page)
938 * (2) the whole range is invalid
940 * In either case, we send an IOTLB notification down.
942 entry
.target_as
= &address_space_memory
;
943 entry
.iova
= iova
& subpage_mask
;
944 entry
.perm
= IOMMU_ACCESS_FLAG(read_cur
, write_cur
);
945 entry
.addr_mask
= ~subpage_mask
;
946 /* NOTE: this is only meaningful if entry_valid == true */
947 entry
.translated_addr
= vtd_get_slpte_addr(slpte
, info
->aw
);
948 ret
= vtd_page_walk_one(&entry
, info
);
963 * vtd_page_walk - walk specific IOVA range, and call the hook
965 * @ce: context entry to walk upon
966 * @start: IOVA address to start the walk
967 * @end: IOVA range end address (start <= addr < end)
968 * @info: page walking information struct
970 static int vtd_page_walk(VTDContextEntry
*ce
, uint64_t start
, uint64_t end
,
971 vtd_page_walk_info
*info
)
973 dma_addr_t addr
= vtd_ce_get_slpt_base(ce
);
974 uint32_t level
= vtd_ce_get_level(ce
);
976 if (!vtd_iova_range_check(start
, ce
, info
->aw
)) {
977 return -VTD_FR_ADDR_BEYOND_MGAW
;
980 if (!vtd_iova_range_check(end
, ce
, info
->aw
)) {
981 /* Fix end so that it reaches the maximum */
982 end
= vtd_iova_limit(ce
, info
->aw
);
985 return vtd_page_walk_level(addr
, start
, end
, level
, true, true, info
);
988 /* Map a device to its corresponding domain (context-entry) */
989 static int vtd_dev_to_context_entry(IntelIOMMUState
*s
, uint8_t bus_num
,
990 uint8_t devfn
, VTDContextEntry
*ce
)
994 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
996 ret_fr
= vtd_get_root_entry(s
, bus_num
, &re
);
1001 if (!vtd_root_entry_present(&re
)) {
1002 /* Not error - it's okay we don't have root entry. */
1003 trace_vtd_re_not_present(bus_num
);
1004 return -VTD_FR_ROOT_ENTRY_P
;
1007 if (re
.rsvd
|| (re
.val
& VTD_ROOT_ENTRY_RSVD(s
->aw_bits
))) {
1008 error_report_once("%s: invalid root entry: rsvd=0x%"PRIx64
1009 ", val=0x%"PRIx64
" (reserved nonzero)",
1010 __func__
, re
.rsvd
, re
.val
);
1011 return -VTD_FR_ROOT_ENTRY_RSVD
;
1014 ret_fr
= vtd_get_context_entry_from_root(&re
, devfn
, ce
);
1019 if (!vtd_ce_present(ce
)) {
1020 /* Not error - it's okay we don't have context entry. */
1021 trace_vtd_ce_not_present(bus_num
, devfn
);
1022 return -VTD_FR_CONTEXT_ENTRY_P
;
1025 if ((ce
->hi
& VTD_CONTEXT_ENTRY_RSVD_HI
) ||
1026 (ce
->lo
& VTD_CONTEXT_ENTRY_RSVD_LO(s
->aw_bits
))) {
1027 error_report_once("%s: invalid context entry: hi=%"PRIx64
1028 ", lo=%"PRIx64
" (reserved nonzero)",
1029 __func__
, ce
->hi
, ce
->lo
);
1030 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
1033 /* Check if the programming of context-entry is valid */
1034 if (!vtd_is_level_supported(s
, vtd_ce_get_level(ce
))) {
1035 error_report_once("%s: invalid context entry: hi=%"PRIx64
1036 ", lo=%"PRIx64
" (level %d not supported)",
1037 __func__
, ce
->hi
, ce
->lo
, vtd_ce_get_level(ce
));
1038 return -VTD_FR_CONTEXT_ENTRY_INV
;
1041 /* Do translation type check */
1042 if (!vtd_ce_type_check(x86_iommu
, ce
)) {
1043 /* Errors dumped in vtd_ce_type_check() */
1044 return -VTD_FR_CONTEXT_ENTRY_INV
;
1050 static int vtd_sync_shadow_page_hook(IOMMUTLBEntry
*entry
,
1053 memory_region_notify_iommu((IOMMUMemoryRegion
*)private, 0, *entry
);
1057 static int vtd_sync_shadow_page_table_range(VTDAddressSpace
*vtd_as
,
1058 VTDContextEntry
*ce
,
1059 hwaddr addr
, hwaddr size
)
1061 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1062 vtd_page_walk_info info
= {
1063 .hook_fn
= vtd_sync_shadow_page_hook
,
1064 .private = (void *)&vtd_as
->iommu
,
1065 .notify_unmap
= true,
1068 .domain_id
= VTD_CONTEXT_ENTRY_DID(ce
->hi
),
1071 return vtd_page_walk(ce
, addr
, addr
+ size
, &info
);
1074 static int vtd_sync_shadow_page_table(VTDAddressSpace
*vtd_as
)
1080 ret
= vtd_dev_to_context_entry(vtd_as
->iommu_state
,
1081 pci_bus_num(vtd_as
->bus
),
1082 vtd_as
->devfn
, &ce
);
1084 if (ret
== -VTD_FR_CONTEXT_ENTRY_P
) {
1086 * It's a valid scenario to have a context entry that is
1087 * not present. For example, when a device is removed
1088 * from an existing domain then the context entry will be
1089 * zeroed by the guest before it was put into another
1090 * domain. When this happens, instead of synchronizing
1091 * the shadow pages we should invalidate all existing
1092 * mappings and notify the backends.
1094 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
1095 vtd_address_space_unmap(vtd_as
, n
);
1102 return vtd_sync_shadow_page_table_range(vtd_as
, &ce
, 0, UINT64_MAX
);
1106 * Fetch translation type for specific device. Returns <0 if error
1107 * happens, otherwise return the shifted type to check against
1110 static int vtd_dev_get_trans_type(VTDAddressSpace
*as
)
1116 s
= as
->iommu_state
;
1118 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(as
->bus
),
1124 return vtd_ce_get_type(&ce
);
1127 static bool vtd_dev_pt_enabled(VTDAddressSpace
*as
)
1133 ret
= vtd_dev_get_trans_type(as
);
1136 * Possibly failed to parse the context entry for some reason
1137 * (e.g., during init, or any guest configuration errors on
1138 * context entries). We should assume PT not enabled for
1144 return ret
== VTD_CONTEXT_TT_PASS_THROUGH
;
1147 /* Return whether the device is using IOMMU translation. */
1148 static bool vtd_switch_address_space(VTDAddressSpace
*as
)
1151 /* Whether we need to take the BQL on our own */
1152 bool take_bql
= !qemu_mutex_iothread_locked();
1156 use_iommu
= as
->iommu_state
->dmar_enabled
& !vtd_dev_pt_enabled(as
);
1158 trace_vtd_switch_address_space(pci_bus_num(as
->bus
),
1159 VTD_PCI_SLOT(as
->devfn
),
1160 VTD_PCI_FUNC(as
->devfn
),
1164 * It's possible that we reach here without BQL, e.g., when called
1165 * from vtd_pt_enable_fast_path(). However the memory APIs need
1166 * it. We'd better make sure we have had it already, or, take it.
1169 qemu_mutex_lock_iothread();
1172 /* Turn off first then on the other */
1174 memory_region_set_enabled(&as
->sys_alias
, false);
1175 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), true);
1177 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), false);
1178 memory_region_set_enabled(&as
->sys_alias
, true);
1182 qemu_mutex_unlock_iothread();
1188 static void vtd_switch_address_space_all(IntelIOMMUState
*s
)
1190 GHashTableIter iter
;
1194 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
1195 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
1196 for (i
= 0; i
< PCI_DEVFN_MAX
; i
++) {
1197 if (!vtd_bus
->dev_as
[i
]) {
1200 vtd_switch_address_space(vtd_bus
->dev_as
[i
]);
1205 static inline uint16_t vtd_make_source_id(uint8_t bus_num
, uint8_t devfn
)
1207 return ((bus_num
& 0xffUL
) << 8) | (devfn
& 0xffUL
);
1210 static const bool vtd_qualified_faults
[] = {
1211 [VTD_FR_RESERVED
] = false,
1212 [VTD_FR_ROOT_ENTRY_P
] = false,
1213 [VTD_FR_CONTEXT_ENTRY_P
] = true,
1214 [VTD_FR_CONTEXT_ENTRY_INV
] = true,
1215 [VTD_FR_ADDR_BEYOND_MGAW
] = true,
1216 [VTD_FR_WRITE
] = true,
1217 [VTD_FR_READ
] = true,
1218 [VTD_FR_PAGING_ENTRY_INV
] = true,
1219 [VTD_FR_ROOT_TABLE_INV
] = false,
1220 [VTD_FR_CONTEXT_TABLE_INV
] = false,
1221 [VTD_FR_ROOT_ENTRY_RSVD
] = false,
1222 [VTD_FR_PAGING_ENTRY_RSVD
] = true,
1223 [VTD_FR_CONTEXT_ENTRY_TT
] = true,
1224 [VTD_FR_RESERVED_ERR
] = false,
1225 [VTD_FR_MAX
] = false,
1228 /* To see if a fault condition is "qualified", which is reported to software
1229 * only if the FPD field in the context-entry used to process the faulting
1232 static inline bool vtd_is_qualified_fault(VTDFaultReason fault
)
1234 return vtd_qualified_faults
[fault
];
1237 static inline bool vtd_is_interrupt_addr(hwaddr addr
)
1239 return VTD_INTERRUPT_ADDR_FIRST
<= addr
&& addr
<= VTD_INTERRUPT_ADDR_LAST
;
1242 static void vtd_pt_enable_fast_path(IntelIOMMUState
*s
, uint16_t source_id
)
1245 VTDAddressSpace
*vtd_as
;
1246 bool success
= false;
1248 vtd_bus
= vtd_find_as_from_bus_num(s
, VTD_SID_TO_BUS(source_id
));
1253 vtd_as
= vtd_bus
->dev_as
[VTD_SID_TO_DEVFN(source_id
)];
1258 if (vtd_switch_address_space(vtd_as
) == false) {
1259 /* We switched off IOMMU region successfully. */
1264 trace_vtd_pt_enable_fast_path(source_id
, success
);
1267 /* Map dev to context-entry then do a paging-structures walk to do a iommu
1270 * Called from RCU critical section.
1272 * @bus_num: The bus number
1273 * @devfn: The devfn, which is the combined of device and function number
1274 * @is_write: The access is a write operation
1275 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
1277 * Returns true if translation is successful, otherwise false.
1279 static bool vtd_do_iommu_translate(VTDAddressSpace
*vtd_as
, PCIBus
*bus
,
1280 uint8_t devfn
, hwaddr addr
, bool is_write
,
1281 IOMMUTLBEntry
*entry
)
1283 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1285 uint8_t bus_num
= pci_bus_num(bus
);
1286 VTDContextCacheEntry
*cc_entry
;
1287 uint64_t slpte
, page_mask
;
1289 uint16_t source_id
= vtd_make_source_id(bus_num
, devfn
);
1291 bool is_fpd_set
= false;
1294 uint8_t access_flags
;
1295 VTDIOTLBEntry
*iotlb_entry
;
1298 * We have standalone memory region for interrupt addresses, we
1299 * should never receive translation requests in this region.
1301 assert(!vtd_is_interrupt_addr(addr
));
1305 cc_entry
= &vtd_as
->context_cache_entry
;
1307 /* Try to fetch slpte form IOTLB */
1308 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, addr
);
1310 trace_vtd_iotlb_page_hit(source_id
, addr
, iotlb_entry
->slpte
,
1311 iotlb_entry
->domain_id
);
1312 slpte
= iotlb_entry
->slpte
;
1313 access_flags
= iotlb_entry
->access_flags
;
1314 page_mask
= iotlb_entry
->mask
;
1318 /* Try to fetch context-entry from cache first */
1319 if (cc_entry
->context_cache_gen
== s
->context_cache_gen
) {
1320 trace_vtd_iotlb_cc_hit(bus_num
, devfn
, cc_entry
->context_entry
.hi
,
1321 cc_entry
->context_entry
.lo
,
1322 cc_entry
->context_cache_gen
);
1323 ce
= cc_entry
->context_entry
;
1324 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1326 ret_fr
= vtd_dev_to_context_entry(s
, bus_num
, devfn
, &ce
);
1327 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1330 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
1331 trace_vtd_fault_disabled();
1333 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
1337 /* Update context-cache */
1338 trace_vtd_iotlb_cc_update(bus_num
, devfn
, ce
.hi
, ce
.lo
,
1339 cc_entry
->context_cache_gen
,
1340 s
->context_cache_gen
);
1341 cc_entry
->context_entry
= ce
;
1342 cc_entry
->context_cache_gen
= s
->context_cache_gen
;
1346 * We don't need to translate for pass-through context entries.
1347 * Also, let's ignore IOTLB caching as well for PT devices.
1349 if (vtd_ce_get_type(&ce
) == VTD_CONTEXT_TT_PASS_THROUGH
) {
1350 entry
->iova
= addr
& VTD_PAGE_MASK_4K
;
1351 entry
->translated_addr
= entry
->iova
;
1352 entry
->addr_mask
= ~VTD_PAGE_MASK_4K
;
1353 entry
->perm
= IOMMU_RW
;
1354 trace_vtd_translate_pt(source_id
, entry
->iova
);
1357 * When this happens, it means firstly caching-mode is not
1358 * enabled, and this is the first passthrough translation for
1359 * the device. Let's enable the fast path for passthrough.
1361 * When passthrough is disabled again for the device, we can
1362 * capture it via the context entry invalidation, then the
1363 * IOMMU region can be swapped back.
1365 vtd_pt_enable_fast_path(s
, source_id
);
1366 vtd_iommu_unlock(s
);
1370 ret_fr
= vtd_iova_to_slpte(&ce
, addr
, is_write
, &slpte
, &level
,
1371 &reads
, &writes
, s
->aw_bits
);
1374 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
1375 trace_vtd_fault_disabled();
1377 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
1382 page_mask
= vtd_slpt_level_page_mask(level
);
1383 access_flags
= IOMMU_ACCESS_FLAG(reads
, writes
);
1384 vtd_update_iotlb(s
, source_id
, VTD_CONTEXT_ENTRY_DID(ce
.hi
), addr
, slpte
,
1385 access_flags
, level
);
1387 vtd_iommu_unlock(s
);
1388 entry
->iova
= addr
& page_mask
;
1389 entry
->translated_addr
= vtd_get_slpte_addr(slpte
, s
->aw_bits
) & page_mask
;
1390 entry
->addr_mask
= ~page_mask
;
1391 entry
->perm
= access_flags
;
1395 vtd_iommu_unlock(s
);
1397 entry
->translated_addr
= 0;
1398 entry
->addr_mask
= 0;
1399 entry
->perm
= IOMMU_NONE
;
1403 static void vtd_root_table_setup(IntelIOMMUState
*s
)
1405 s
->root
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
1406 s
->root_extended
= s
->root
& VTD_RTADDR_RTT
;
1407 s
->root
&= VTD_RTADDR_ADDR_MASK(s
->aw_bits
);
1409 trace_vtd_reg_dmar_root(s
->root
, s
->root_extended
);
1412 static void vtd_iec_notify_all(IntelIOMMUState
*s
, bool global
,
1413 uint32_t index
, uint32_t mask
)
1415 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s
), global
, index
, mask
);
1418 static void vtd_interrupt_remap_table_setup(IntelIOMMUState
*s
)
1421 value
= vtd_get_quad_raw(s
, DMAR_IRTA_REG
);
1422 s
->intr_size
= 1UL << ((value
& VTD_IRTA_SIZE_MASK
) + 1);
1423 s
->intr_root
= value
& VTD_IRTA_ADDR_MASK(s
->aw_bits
);
1424 s
->intr_eime
= value
& VTD_IRTA_EIME
;
1426 /* Notify global invalidation */
1427 vtd_iec_notify_all(s
, true, 0, 0);
1429 trace_vtd_reg_ir_root(s
->intr_root
, s
->intr_size
);
1432 static void vtd_iommu_replay_all(IntelIOMMUState
*s
)
1434 VTDAddressSpace
*vtd_as
;
1436 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
1437 vtd_sync_shadow_page_table(vtd_as
);
1441 static void vtd_context_global_invalidate(IntelIOMMUState
*s
)
1443 trace_vtd_inv_desc_cc_global();
1444 /* Protects context cache */
1446 s
->context_cache_gen
++;
1447 if (s
->context_cache_gen
== VTD_CONTEXT_CACHE_GEN_MAX
) {
1448 vtd_reset_context_cache_locked(s
);
1450 vtd_iommu_unlock(s
);
1451 vtd_address_space_refresh_all(s
);
1453 * From VT-d spec 6.5.2.1, a global context entry invalidation
1454 * should be followed by a IOTLB global invalidation, so we should
1455 * be safe even without this. Hoewever, let's replay the region as
1456 * well to be safer, and go back here when we need finer tunes for
1457 * VT-d emulation codes.
1459 vtd_iommu_replay_all(s
);
1462 /* Do a context-cache device-selective invalidation.
1463 * @func_mask: FM field after shifting
1465 static void vtd_context_device_invalidate(IntelIOMMUState
*s
,
1471 VTDAddressSpace
*vtd_as
;
1472 uint8_t bus_n
, devfn
;
1475 trace_vtd_inv_desc_cc_devices(source_id
, func_mask
);
1477 switch (func_mask
& 3) {
1479 mask
= 0; /* No bits in the SID field masked */
1482 mask
= 4; /* Mask bit 2 in the SID field */
1485 mask
= 6; /* Mask bit 2:1 in the SID field */
1488 mask
= 7; /* Mask bit 2:0 in the SID field */
1493 bus_n
= VTD_SID_TO_BUS(source_id
);
1494 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_n
);
1496 devfn
= VTD_SID_TO_DEVFN(source_id
);
1497 for (devfn_it
= 0; devfn_it
< PCI_DEVFN_MAX
; ++devfn_it
) {
1498 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
1499 if (vtd_as
&& ((devfn_it
& mask
) == (devfn
& mask
))) {
1500 trace_vtd_inv_desc_cc_device(bus_n
, VTD_PCI_SLOT(devfn_it
),
1501 VTD_PCI_FUNC(devfn_it
));
1503 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
1504 vtd_iommu_unlock(s
);
1506 * Do switch address space when needed, in case if the
1507 * device passthrough bit is switched.
1509 vtd_switch_address_space(vtd_as
);
1511 * So a device is moving out of (or moving into) a
1512 * domain, resync the shadow page table.
1513 * This won't bring bad even if we have no such
1514 * notifier registered - the IOMMU notification
1515 * framework will skip MAP notifications if that
1518 vtd_sync_shadow_page_table(vtd_as
);
1524 /* Context-cache invalidation
1525 * Returns the Context Actual Invalidation Granularity.
1526 * @val: the content of the CCMD_REG
1528 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState
*s
, uint64_t val
)
1531 uint64_t type
= val
& VTD_CCMD_CIRG_MASK
;
1534 case VTD_CCMD_DOMAIN_INVL
:
1536 case VTD_CCMD_GLOBAL_INVL
:
1537 caig
= VTD_CCMD_GLOBAL_INVL_A
;
1538 vtd_context_global_invalidate(s
);
1541 case VTD_CCMD_DEVICE_INVL
:
1542 caig
= VTD_CCMD_DEVICE_INVL_A
;
1543 vtd_context_device_invalidate(s
, VTD_CCMD_SID(val
), VTD_CCMD_FM(val
));
1547 error_report_once("%s: invalid context: 0x%" PRIx64
,
1554 static void vtd_iotlb_global_invalidate(IntelIOMMUState
*s
)
1556 trace_vtd_inv_desc_iotlb_global();
1558 vtd_iommu_replay_all(s
);
1561 static void vtd_iotlb_domain_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
)
1564 VTDAddressSpace
*vtd_as
;
1566 trace_vtd_inv_desc_iotlb_domain(domain_id
);
1569 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_domain
,
1571 vtd_iommu_unlock(s
);
1573 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
1574 if (!vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1575 vtd_as
->devfn
, &ce
) &&
1576 domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1577 vtd_sync_shadow_page_table(vtd_as
);
1582 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState
*s
,
1583 uint16_t domain_id
, hwaddr addr
,
1586 VTDAddressSpace
*vtd_as
;
1589 hwaddr size
= (1 << am
) * VTD_PAGE_SIZE
;
1591 QLIST_FOREACH(vtd_as
, &(s
->vtd_as_with_notifiers
), next
) {
1592 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1593 vtd_as
->devfn
, &ce
);
1594 if (!ret
&& domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1595 if (vtd_as_has_map_notifier(vtd_as
)) {
1597 * As long as we have MAP notifications registered in
1598 * any of our IOMMU notifiers, we need to sync the
1599 * shadow page table.
1601 vtd_sync_shadow_page_table_range(vtd_as
, &ce
, addr
, size
);
1604 * For UNMAP-only notifiers, we don't need to walk the
1605 * page tables. We just deliver the PSI down to
1606 * invalidate caches.
1608 IOMMUTLBEntry entry
= {
1609 .target_as
= &address_space_memory
,
1611 .translated_addr
= 0,
1612 .addr_mask
= size
- 1,
1615 memory_region_notify_iommu(&vtd_as
->iommu
, 0, entry
);
1621 static void vtd_iotlb_page_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
,
1622 hwaddr addr
, uint8_t am
)
1624 VTDIOTLBPageInvInfo info
;
1626 trace_vtd_inv_desc_iotlb_pages(domain_id
, addr
, am
);
1628 assert(am
<= VTD_MAMV
);
1629 info
.domain_id
= domain_id
;
1631 info
.mask
= ~((1 << am
) - 1);
1633 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_page
, &info
);
1634 vtd_iommu_unlock(s
);
1635 vtd_iotlb_page_invalidate_notify(s
, domain_id
, addr
, am
);
1639 * Returns the IOTLB Actual Invalidation Granularity.
1640 * @val: the content of the IOTLB_REG
1642 static uint64_t vtd_iotlb_flush(IntelIOMMUState
*s
, uint64_t val
)
1645 uint64_t type
= val
& VTD_TLB_FLUSH_GRANU_MASK
;
1651 case VTD_TLB_GLOBAL_FLUSH
:
1652 iaig
= VTD_TLB_GLOBAL_FLUSH_A
;
1653 vtd_iotlb_global_invalidate(s
);
1656 case VTD_TLB_DSI_FLUSH
:
1657 domain_id
= VTD_TLB_DID(val
);
1658 iaig
= VTD_TLB_DSI_FLUSH_A
;
1659 vtd_iotlb_domain_invalidate(s
, domain_id
);
1662 case VTD_TLB_PSI_FLUSH
:
1663 domain_id
= VTD_TLB_DID(val
);
1664 addr
= vtd_get_quad_raw(s
, DMAR_IVA_REG
);
1665 am
= VTD_IVA_AM(addr
);
1666 addr
= VTD_IVA_ADDR(addr
);
1667 if (am
> VTD_MAMV
) {
1668 error_report_once("%s: address mask overflow: 0x%" PRIx64
,
1669 __func__
, vtd_get_quad_raw(s
, DMAR_IVA_REG
));
1673 iaig
= VTD_TLB_PSI_FLUSH_A
;
1674 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1678 error_report_once("%s: invalid granularity: 0x%" PRIx64
,
1685 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
);
1687 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState
*s
)
1689 return s
->qi_enabled
&& (s
->iq_tail
== s
->iq_head
) &&
1690 (s
->iq_last_desc_type
== VTD_INV_DESC_WAIT
);
1693 static void vtd_handle_gcmd_qie(IntelIOMMUState
*s
, bool en
)
1695 uint64_t iqa_val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
1697 trace_vtd_inv_qi_enable(en
);
1700 s
->iq
= iqa_val
& VTD_IQA_IQA_MASK(s
->aw_bits
);
1701 /* 2^(x+8) entries */
1702 s
->iq_size
= 1UL << ((iqa_val
& VTD_IQA_QS
) + 8);
1703 s
->qi_enabled
= true;
1704 trace_vtd_inv_qi_setup(s
->iq
, s
->iq_size
);
1705 /* Ok - report back to driver */
1706 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_QIES
);
1708 if (s
->iq_tail
!= 0) {
1710 * This is a spec violation but Windows guests are known to set up
1711 * Queued Invalidation this way so we allow the write and process
1712 * Invalidation Descriptors right away.
1714 trace_vtd_warn_invalid_qi_tail(s
->iq_tail
);
1715 if (!(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
1716 vtd_fetch_inv_desc(s
);
1720 if (vtd_queued_inv_disable_check(s
)) {
1721 /* disable Queued Invalidation */
1722 vtd_set_quad_raw(s
, DMAR_IQH_REG
, 0);
1724 s
->qi_enabled
= false;
1725 /* Ok - report back to driver */
1726 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_QIES
, 0);
1728 error_report_once("%s: detected improper state when disable QI "
1729 "(head=0x%x, tail=0x%x, last_type=%d)",
1731 s
->iq_head
, s
->iq_tail
, s
->iq_last_desc_type
);
1736 /* Set Root Table Pointer */
1737 static void vtd_handle_gcmd_srtp(IntelIOMMUState
*s
)
1739 vtd_root_table_setup(s
);
1740 /* Ok - report back to driver */
1741 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_RTPS
);
1742 vtd_reset_caches(s
);
1743 vtd_address_space_refresh_all(s
);
1746 /* Set Interrupt Remap Table Pointer */
1747 static void vtd_handle_gcmd_sirtp(IntelIOMMUState
*s
)
1749 vtd_interrupt_remap_table_setup(s
);
1750 /* Ok - report back to driver */
1751 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRTPS
);
1754 /* Handle Translation Enable/Disable */
1755 static void vtd_handle_gcmd_te(IntelIOMMUState
*s
, bool en
)
1757 if (s
->dmar_enabled
== en
) {
1761 trace_vtd_dmar_enable(en
);
1764 s
->dmar_enabled
= true;
1765 /* Ok - report back to driver */
1766 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_TES
);
1768 s
->dmar_enabled
= false;
1770 /* Clear the index of Fault Recording Register */
1771 s
->next_frcd_reg
= 0;
1772 /* Ok - report back to driver */
1773 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_TES
, 0);
1776 vtd_reset_caches(s
);
1777 vtd_address_space_refresh_all(s
);
1780 /* Handle Interrupt Remap Enable/Disable */
1781 static void vtd_handle_gcmd_ire(IntelIOMMUState
*s
, bool en
)
1783 trace_vtd_ir_enable(en
);
1786 s
->intr_enabled
= true;
1787 /* Ok - report back to driver */
1788 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRES
);
1790 s
->intr_enabled
= false;
1791 /* Ok - report back to driver */
1792 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_IRES
, 0);
1796 /* Handle write to Global Command Register */
1797 static void vtd_handle_gcmd_write(IntelIOMMUState
*s
)
1799 uint32_t status
= vtd_get_long_raw(s
, DMAR_GSTS_REG
);
1800 uint32_t val
= vtd_get_long_raw(s
, DMAR_GCMD_REG
);
1801 uint32_t changed
= status
^ val
;
1803 trace_vtd_reg_write_gcmd(status
, val
);
1804 if (changed
& VTD_GCMD_TE
) {
1805 /* Translation enable/disable */
1806 vtd_handle_gcmd_te(s
, val
& VTD_GCMD_TE
);
1808 if (val
& VTD_GCMD_SRTP
) {
1809 /* Set/update the root-table pointer */
1810 vtd_handle_gcmd_srtp(s
);
1812 if (changed
& VTD_GCMD_QIE
) {
1813 /* Queued Invalidation Enable */
1814 vtd_handle_gcmd_qie(s
, val
& VTD_GCMD_QIE
);
1816 if (val
& VTD_GCMD_SIRTP
) {
1817 /* Set/update the interrupt remapping root-table pointer */
1818 vtd_handle_gcmd_sirtp(s
);
1820 if (changed
& VTD_GCMD_IRE
) {
1821 /* Interrupt remap enable/disable */
1822 vtd_handle_gcmd_ire(s
, val
& VTD_GCMD_IRE
);
1826 /* Handle write to Context Command Register */
1827 static void vtd_handle_ccmd_write(IntelIOMMUState
*s
)
1830 uint64_t val
= vtd_get_quad_raw(s
, DMAR_CCMD_REG
);
1832 /* Context-cache invalidation request */
1833 if (val
& VTD_CCMD_ICC
) {
1834 if (s
->qi_enabled
) {
1835 error_report_once("Queued Invalidation enabled, "
1836 "should not use register-based invalidation");
1839 ret
= vtd_context_cache_invalidate(s
, val
);
1840 /* Invalidation completed. Change something to show */
1841 vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_ICC
, 0ULL);
1842 ret
= vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_CAIG_MASK
,
1847 /* Handle write to IOTLB Invalidation Register */
1848 static void vtd_handle_iotlb_write(IntelIOMMUState
*s
)
1851 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IOTLB_REG
);
1853 /* IOTLB invalidation request */
1854 if (val
& VTD_TLB_IVT
) {
1855 if (s
->qi_enabled
) {
1856 error_report_once("Queued Invalidation enabled, "
1857 "should not use register-based invalidation");
1860 ret
= vtd_iotlb_flush(s
, val
);
1861 /* Invalidation completed. Change something to show */
1862 vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
, VTD_TLB_IVT
, 0ULL);
1863 ret
= vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
,
1864 VTD_TLB_FLUSH_GRANU_MASK_A
, ret
);
1868 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
1869 static bool vtd_get_inv_desc(dma_addr_t base_addr
, uint32_t offset
,
1870 VTDInvDesc
*inv_desc
)
1872 dma_addr_t addr
= base_addr
+ offset
* sizeof(*inv_desc
);
1873 if (dma_memory_read(&address_space_memory
, addr
, inv_desc
,
1874 sizeof(*inv_desc
))) {
1875 error_report_once("Read INV DESC failed");
1880 inv_desc
->lo
= le64_to_cpu(inv_desc
->lo
);
1881 inv_desc
->hi
= le64_to_cpu(inv_desc
->hi
);
1885 static bool vtd_process_wait_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1887 if ((inv_desc
->hi
& VTD_INV_DESC_WAIT_RSVD_HI
) ||
1888 (inv_desc
->lo
& VTD_INV_DESC_WAIT_RSVD_LO
)) {
1889 error_report_once("%s: invalid wait desc: hi=%"PRIx64
", lo=%"PRIx64
1890 " (reserved nonzero)", __func__
, inv_desc
->hi
,
1894 if (inv_desc
->lo
& VTD_INV_DESC_WAIT_SW
) {
1896 uint32_t status_data
= (uint32_t)(inv_desc
->lo
>>
1897 VTD_INV_DESC_WAIT_DATA_SHIFT
);
1899 assert(!(inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
));
1901 /* FIXME: need to be masked with HAW? */
1902 dma_addr_t status_addr
= inv_desc
->hi
;
1903 trace_vtd_inv_desc_wait_sw(status_addr
, status_data
);
1904 status_data
= cpu_to_le32(status_data
);
1905 if (dma_memory_write(&address_space_memory
, status_addr
, &status_data
,
1906 sizeof(status_data
))) {
1907 trace_vtd_inv_desc_wait_write_fail(inv_desc
->hi
, inv_desc
->lo
);
1910 } else if (inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
) {
1911 /* Interrupt flag */
1912 vtd_generate_completion_event(s
);
1914 error_report_once("%s: invalid wait desc: hi=%"PRIx64
", lo=%"PRIx64
1915 " (unknown type)", __func__
, inv_desc
->hi
,
1922 static bool vtd_process_context_cache_desc(IntelIOMMUState
*s
,
1923 VTDInvDesc
*inv_desc
)
1925 uint16_t sid
, fmask
;
1927 if ((inv_desc
->lo
& VTD_INV_DESC_CC_RSVD
) || inv_desc
->hi
) {
1928 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64
", lo=%"PRIx64
1929 " (reserved nonzero)", __func__
, inv_desc
->hi
,
1933 switch (inv_desc
->lo
& VTD_INV_DESC_CC_G
) {
1934 case VTD_INV_DESC_CC_DOMAIN
:
1935 trace_vtd_inv_desc_cc_domain(
1936 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc
->lo
));
1938 case VTD_INV_DESC_CC_GLOBAL
:
1939 vtd_context_global_invalidate(s
);
1942 case VTD_INV_DESC_CC_DEVICE
:
1943 sid
= VTD_INV_DESC_CC_SID(inv_desc
->lo
);
1944 fmask
= VTD_INV_DESC_CC_FM(inv_desc
->lo
);
1945 vtd_context_device_invalidate(s
, sid
, fmask
);
1949 error_report_once("%s: invalid cc inv desc: hi=%"PRIx64
", lo=%"PRIx64
1950 " (invalid type)", __func__
, inv_desc
->hi
,
1957 static bool vtd_process_iotlb_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1963 if ((inv_desc
->lo
& VTD_INV_DESC_IOTLB_RSVD_LO
) ||
1964 (inv_desc
->hi
& VTD_INV_DESC_IOTLB_RSVD_HI
)) {
1965 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
1966 ", lo=0x%"PRIx64
" (reserved bits unzero)\n",
1967 __func__
, inv_desc
->hi
, inv_desc
->lo
);
1971 switch (inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
) {
1972 case VTD_INV_DESC_IOTLB_GLOBAL
:
1973 vtd_iotlb_global_invalidate(s
);
1976 case VTD_INV_DESC_IOTLB_DOMAIN
:
1977 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1978 vtd_iotlb_domain_invalidate(s
, domain_id
);
1981 case VTD_INV_DESC_IOTLB_PAGE
:
1982 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1983 addr
= VTD_INV_DESC_IOTLB_ADDR(inv_desc
->hi
);
1984 am
= VTD_INV_DESC_IOTLB_AM(inv_desc
->hi
);
1985 if (am
> VTD_MAMV
) {
1986 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
1987 ", lo=0x%"PRIx64
" (am=%u > VTD_MAMV=%u)\n",
1988 __func__
, inv_desc
->hi
, inv_desc
->lo
,
1989 am
, (unsigned)VTD_MAMV
);
1992 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1996 error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64
1997 ", lo=0x%"PRIx64
" (type mismatch: 0x%llx)\n",
1998 __func__
, inv_desc
->hi
, inv_desc
->lo
,
1999 inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
);
2005 static bool vtd_process_inv_iec_desc(IntelIOMMUState
*s
,
2006 VTDInvDesc
*inv_desc
)
2008 trace_vtd_inv_desc_iec(inv_desc
->iec
.granularity
,
2009 inv_desc
->iec
.index
,
2010 inv_desc
->iec
.index_mask
);
2012 vtd_iec_notify_all(s
, !inv_desc
->iec
.granularity
,
2013 inv_desc
->iec
.index
,
2014 inv_desc
->iec
.index_mask
);
2018 static bool vtd_process_device_iotlb_desc(IntelIOMMUState
*s
,
2019 VTDInvDesc
*inv_desc
)
2021 VTDAddressSpace
*vtd_dev_as
;
2022 IOMMUTLBEntry entry
;
2023 struct VTDBus
*vtd_bus
;
2031 addr
= VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc
->hi
);
2032 sid
= VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc
->lo
);
2035 size
= VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc
->hi
);
2037 if ((inv_desc
->lo
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO
) ||
2038 (inv_desc
->hi
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI
)) {
2039 error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64
2040 ", lo=%"PRIx64
" (reserved nonzero)", __func__
,
2041 inv_desc
->hi
, inv_desc
->lo
);
2045 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_num
);
2050 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
2055 /* According to ATS spec table 2.4:
2056 * S = 0, bits 15:12 = xxxx range size: 4K
2057 * S = 1, bits 15:12 = xxx0 range size: 8K
2058 * S = 1, bits 15:12 = xx01 range size: 16K
2059 * S = 1, bits 15:12 = x011 range size: 32K
2060 * S = 1, bits 15:12 = 0111 range size: 64K
2064 sz
= (VTD_PAGE_SIZE
* 2) << cto64(addr
>> VTD_PAGE_SHIFT
);
2070 entry
.target_as
= &vtd_dev_as
->as
;
2071 entry
.addr_mask
= sz
- 1;
2073 entry
.perm
= IOMMU_NONE
;
2074 entry
.translated_addr
= 0;
2075 memory_region_notify_iommu(&vtd_dev_as
->iommu
, 0, entry
);
2081 static bool vtd_process_inv_desc(IntelIOMMUState
*s
)
2083 VTDInvDesc inv_desc
;
2086 trace_vtd_inv_qi_head(s
->iq_head
);
2087 if (!vtd_get_inv_desc(s
->iq
, s
->iq_head
, &inv_desc
)) {
2088 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
2091 desc_type
= inv_desc
.lo
& VTD_INV_DESC_TYPE
;
2092 /* FIXME: should update at first or at last? */
2093 s
->iq_last_desc_type
= desc_type
;
2095 switch (desc_type
) {
2096 case VTD_INV_DESC_CC
:
2097 trace_vtd_inv_desc("context-cache", inv_desc
.hi
, inv_desc
.lo
);
2098 if (!vtd_process_context_cache_desc(s
, &inv_desc
)) {
2103 case VTD_INV_DESC_IOTLB
:
2104 trace_vtd_inv_desc("iotlb", inv_desc
.hi
, inv_desc
.lo
);
2105 if (!vtd_process_iotlb_desc(s
, &inv_desc
)) {
2110 case VTD_INV_DESC_WAIT
:
2111 trace_vtd_inv_desc("wait", inv_desc
.hi
, inv_desc
.lo
);
2112 if (!vtd_process_wait_desc(s
, &inv_desc
)) {
2117 case VTD_INV_DESC_IEC
:
2118 trace_vtd_inv_desc("iec", inv_desc
.hi
, inv_desc
.lo
);
2119 if (!vtd_process_inv_iec_desc(s
, &inv_desc
)) {
2124 case VTD_INV_DESC_DEVICE
:
2125 trace_vtd_inv_desc("device", inv_desc
.hi
, inv_desc
.lo
);
2126 if (!vtd_process_device_iotlb_desc(s
, &inv_desc
)) {
2132 error_report_once("%s: invalid inv desc: hi=%"PRIx64
", lo=%"PRIx64
2133 " (unknown type)", __func__
, inv_desc
.hi
,
2138 if (s
->iq_head
== s
->iq_size
) {
2144 /* Try to fetch and process more Invalidation Descriptors */
2145 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
)
2147 trace_vtd_inv_qi_fetch();
2149 if (s
->iq_tail
>= s
->iq_size
) {
2150 /* Detects an invalid Tail pointer */
2151 error_report_once("%s: detected invalid QI tail "
2152 "(tail=0x%x, size=0x%x)",
2153 __func__
, s
->iq_tail
, s
->iq_size
);
2154 vtd_handle_inv_queue_error(s
);
2157 while (s
->iq_head
!= s
->iq_tail
) {
2158 if (!vtd_process_inv_desc(s
)) {
2159 /* Invalidation Queue Errors */
2160 vtd_handle_inv_queue_error(s
);
2163 /* Must update the IQH_REG in time */
2164 vtd_set_quad_raw(s
, DMAR_IQH_REG
,
2165 (((uint64_t)(s
->iq_head
)) << VTD_IQH_QH_SHIFT
) &
2170 /* Handle write to Invalidation Queue Tail Register */
2171 static void vtd_handle_iqt_write(IntelIOMMUState
*s
)
2173 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQT_REG
);
2175 s
->iq_tail
= VTD_IQT_QT(val
);
2176 trace_vtd_inv_qi_tail(s
->iq_tail
);
2178 if (s
->qi_enabled
&& !(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
2179 /* Process Invalidation Queue here */
2180 vtd_fetch_inv_desc(s
);
2184 static void vtd_handle_fsts_write(IntelIOMMUState
*s
)
2186 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
2187 uint32_t fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
2188 uint32_t status_fields
= VTD_FSTS_PFO
| VTD_FSTS_PPF
| VTD_FSTS_IQE
;
2190 if ((fectl_reg
& VTD_FECTL_IP
) && !(fsts_reg
& status_fields
)) {
2191 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
2192 trace_vtd_fsts_clear_ip();
2194 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
2195 * Descriptors if there are any when Queued Invalidation is enabled?
2199 static void vtd_handle_fectl_write(IntelIOMMUState
*s
)
2202 /* FIXME: when software clears the IM field, check the IP field. But do we
2203 * need to compare the old value and the new value to conclude that
2204 * software clears the IM field? Or just check if the IM field is zero?
2206 fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
2208 trace_vtd_reg_write_fectl(fectl_reg
);
2210 if ((fectl_reg
& VTD_FECTL_IP
) && !(fectl_reg
& VTD_FECTL_IM
)) {
2211 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
2212 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
2216 static void vtd_handle_ics_write(IntelIOMMUState
*s
)
2218 uint32_t ics_reg
= vtd_get_long_raw(s
, DMAR_ICS_REG
);
2219 uint32_t iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
2221 if ((iectl_reg
& VTD_IECTL_IP
) && !(ics_reg
& VTD_ICS_IWC
)) {
2222 trace_vtd_reg_ics_clear_ip();
2223 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
2227 static void vtd_handle_iectl_write(IntelIOMMUState
*s
)
2230 /* FIXME: when software clears the IM field, check the IP field. But do we
2231 * need to compare the old value and the new value to conclude that
2232 * software clears the IM field? Or just check if the IM field is zero?
2234 iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
2236 trace_vtd_reg_write_iectl(iectl_reg
);
2238 if ((iectl_reg
& VTD_IECTL_IP
) && !(iectl_reg
& VTD_IECTL_IM
)) {
2239 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
2240 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
2244 static uint64_t vtd_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
2246 IntelIOMMUState
*s
= opaque
;
2249 trace_vtd_reg_read(addr
, size
);
2251 if (addr
+ size
> DMAR_REG_SIZE
) {
2252 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2253 " size=0x%u", __func__
, addr
, size
);
2254 return (uint64_t)-1;
2258 /* Root Table Address Register, 64-bit */
2259 case DMAR_RTADDR_REG
:
2261 val
= s
->root
& ((1ULL << 32) - 1);
2267 case DMAR_RTADDR_REG_HI
:
2269 val
= s
->root
>> 32;
2272 /* Invalidation Queue Address Register, 64-bit */
2274 val
= s
->iq
| (vtd_get_quad(s
, DMAR_IQA_REG
) & VTD_IQA_QS
);
2276 val
= val
& ((1ULL << 32) - 1);
2280 case DMAR_IQA_REG_HI
:
2287 val
= vtd_get_long(s
, addr
);
2289 val
= vtd_get_quad(s
, addr
);
2296 static void vtd_mem_write(void *opaque
, hwaddr addr
,
2297 uint64_t val
, unsigned size
)
2299 IntelIOMMUState
*s
= opaque
;
2301 trace_vtd_reg_write(addr
, size
, val
);
2303 if (addr
+ size
> DMAR_REG_SIZE
) {
2304 error_report_once("%s: MMIO over range: addr=0x%" PRIx64
2305 " size=0x%u", __func__
, addr
, size
);
2310 /* Global Command Register, 32-bit */
2312 vtd_set_long(s
, addr
, val
);
2313 vtd_handle_gcmd_write(s
);
2316 /* Context Command Register, 64-bit */
2319 vtd_set_long(s
, addr
, val
);
2321 vtd_set_quad(s
, addr
, val
);
2322 vtd_handle_ccmd_write(s
);
2326 case DMAR_CCMD_REG_HI
:
2328 vtd_set_long(s
, addr
, val
);
2329 vtd_handle_ccmd_write(s
);
2332 /* IOTLB Invalidation Register, 64-bit */
2333 case DMAR_IOTLB_REG
:
2335 vtd_set_long(s
, addr
, val
);
2337 vtd_set_quad(s
, addr
, val
);
2338 vtd_handle_iotlb_write(s
);
2342 case DMAR_IOTLB_REG_HI
:
2344 vtd_set_long(s
, addr
, val
);
2345 vtd_handle_iotlb_write(s
);
2348 /* Invalidate Address Register, 64-bit */
2351 vtd_set_long(s
, addr
, val
);
2353 vtd_set_quad(s
, addr
, val
);
2357 case DMAR_IVA_REG_HI
:
2359 vtd_set_long(s
, addr
, val
);
2362 /* Fault Status Register, 32-bit */
2365 vtd_set_long(s
, addr
, val
);
2366 vtd_handle_fsts_write(s
);
2369 /* Fault Event Control Register, 32-bit */
2370 case DMAR_FECTL_REG
:
2372 vtd_set_long(s
, addr
, val
);
2373 vtd_handle_fectl_write(s
);
2376 /* Fault Event Data Register, 32-bit */
2377 case DMAR_FEDATA_REG
:
2379 vtd_set_long(s
, addr
, val
);
2382 /* Fault Event Address Register, 32-bit */
2383 case DMAR_FEADDR_REG
:
2385 vtd_set_long(s
, addr
, val
);
2388 * While the register is 32-bit only, some guests (Xen...) write to
2391 vtd_set_quad(s
, addr
, val
);
2395 /* Fault Event Upper Address Register, 32-bit */
2396 case DMAR_FEUADDR_REG
:
2398 vtd_set_long(s
, addr
, val
);
2401 /* Protected Memory Enable Register, 32-bit */
2404 vtd_set_long(s
, addr
, val
);
2407 /* Root Table Address Register, 64-bit */
2408 case DMAR_RTADDR_REG
:
2410 vtd_set_long(s
, addr
, val
);
2412 vtd_set_quad(s
, addr
, val
);
2416 case DMAR_RTADDR_REG_HI
:
2418 vtd_set_long(s
, addr
, val
);
2421 /* Invalidation Queue Tail Register, 64-bit */
2424 vtd_set_long(s
, addr
, val
);
2426 vtd_set_quad(s
, addr
, val
);
2428 vtd_handle_iqt_write(s
);
2431 case DMAR_IQT_REG_HI
:
2433 vtd_set_long(s
, addr
, val
);
2434 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2437 /* Invalidation Queue Address Register, 64-bit */
2440 vtd_set_long(s
, addr
, val
);
2442 vtd_set_quad(s
, addr
, val
);
2446 case DMAR_IQA_REG_HI
:
2448 vtd_set_long(s
, addr
, val
);
2451 /* Invalidation Completion Status Register, 32-bit */
2454 vtd_set_long(s
, addr
, val
);
2455 vtd_handle_ics_write(s
);
2458 /* Invalidation Event Control Register, 32-bit */
2459 case DMAR_IECTL_REG
:
2461 vtd_set_long(s
, addr
, val
);
2462 vtd_handle_iectl_write(s
);
2465 /* Invalidation Event Data Register, 32-bit */
2466 case DMAR_IEDATA_REG
:
2468 vtd_set_long(s
, addr
, val
);
2471 /* Invalidation Event Address Register, 32-bit */
2472 case DMAR_IEADDR_REG
:
2474 vtd_set_long(s
, addr
, val
);
2477 /* Invalidation Event Upper Address Register, 32-bit */
2478 case DMAR_IEUADDR_REG
:
2480 vtd_set_long(s
, addr
, val
);
2483 /* Fault Recording Registers, 128-bit */
2484 case DMAR_FRCD_REG_0_0
:
2486 vtd_set_long(s
, addr
, val
);
2488 vtd_set_quad(s
, addr
, val
);
2492 case DMAR_FRCD_REG_0_1
:
2494 vtd_set_long(s
, addr
, val
);
2497 case DMAR_FRCD_REG_0_2
:
2499 vtd_set_long(s
, addr
, val
);
2501 vtd_set_quad(s
, addr
, val
);
2502 /* May clear bit 127 (Fault), update PPF */
2503 vtd_update_fsts_ppf(s
);
2507 case DMAR_FRCD_REG_0_3
:
2509 vtd_set_long(s
, addr
, val
);
2510 /* May clear bit 127 (Fault), update PPF */
2511 vtd_update_fsts_ppf(s
);
2516 vtd_set_long(s
, addr
, val
);
2518 vtd_set_quad(s
, addr
, val
);
2522 case DMAR_IRTA_REG_HI
:
2524 vtd_set_long(s
, addr
, val
);
2529 vtd_set_long(s
, addr
, val
);
2531 vtd_set_quad(s
, addr
, val
);
2536 static IOMMUTLBEntry
vtd_iommu_translate(IOMMUMemoryRegion
*iommu
, hwaddr addr
,
2537 IOMMUAccessFlags flag
, int iommu_idx
)
2539 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2540 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2541 IOMMUTLBEntry iotlb
= {
2542 /* We'll fill in the rest later. */
2543 .target_as
= &address_space_memory
,
2547 if (likely(s
->dmar_enabled
)) {
2548 success
= vtd_do_iommu_translate(vtd_as
, vtd_as
->bus
, vtd_as
->devfn
,
2549 addr
, flag
& IOMMU_WO
, &iotlb
);
2551 /* DMAR disabled, passthrough, use 4k-page*/
2552 iotlb
.iova
= addr
& VTD_PAGE_MASK_4K
;
2553 iotlb
.translated_addr
= addr
& VTD_PAGE_MASK_4K
;
2554 iotlb
.addr_mask
= ~VTD_PAGE_MASK_4K
;
2555 iotlb
.perm
= IOMMU_RW
;
2559 if (likely(success
)) {
2560 trace_vtd_dmar_translate(pci_bus_num(vtd_as
->bus
),
2561 VTD_PCI_SLOT(vtd_as
->devfn
),
2562 VTD_PCI_FUNC(vtd_as
->devfn
),
2563 iotlb
.iova
, iotlb
.translated_addr
,
2566 error_report_once("%s: detected translation failure "
2567 "(dev=%02x:%02x:%02x, iova=0x%" PRIx64
")",
2568 __func__
, pci_bus_num(vtd_as
->bus
),
2569 VTD_PCI_SLOT(vtd_as
->devfn
),
2570 VTD_PCI_FUNC(vtd_as
->devfn
),
2577 static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
2578 IOMMUNotifierFlag old
,
2579 IOMMUNotifierFlag
new)
2581 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2582 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2584 if (!s
->caching_mode
&& new & IOMMU_NOTIFIER_MAP
) {
2585 error_report("We need to set caching-mode=1 for intel-iommu to enable "
2586 "device assignment with IOMMU protection.");
2590 /* Update per-address-space notifier flags */
2591 vtd_as
->notifier_flags
= new;
2593 if (old
== IOMMU_NOTIFIER_NONE
) {
2594 QLIST_INSERT_HEAD(&s
->vtd_as_with_notifiers
, vtd_as
, next
);
2595 } else if (new == IOMMU_NOTIFIER_NONE
) {
2596 QLIST_REMOVE(vtd_as
, next
);
2600 static int vtd_post_load(void *opaque
, int version_id
)
2602 IntelIOMMUState
*iommu
= opaque
;
2605 * Memory regions are dynamically turned on/off depending on
2606 * context entry configurations from the guest. After migration,
2607 * we need to make sure the memory regions are still correct.
2609 vtd_switch_address_space_all(iommu
);
2614 static const VMStateDescription vtd_vmstate
= {
2615 .name
= "iommu-intel",
2617 .minimum_version_id
= 1,
2618 .priority
= MIG_PRI_IOMMU
,
2619 .post_load
= vtd_post_load
,
2620 .fields
= (VMStateField
[]) {
2621 VMSTATE_UINT64(root
, IntelIOMMUState
),
2622 VMSTATE_UINT64(intr_root
, IntelIOMMUState
),
2623 VMSTATE_UINT64(iq
, IntelIOMMUState
),
2624 VMSTATE_UINT32(intr_size
, IntelIOMMUState
),
2625 VMSTATE_UINT16(iq_head
, IntelIOMMUState
),
2626 VMSTATE_UINT16(iq_tail
, IntelIOMMUState
),
2627 VMSTATE_UINT16(iq_size
, IntelIOMMUState
),
2628 VMSTATE_UINT16(next_frcd_reg
, IntelIOMMUState
),
2629 VMSTATE_UINT8_ARRAY(csr
, IntelIOMMUState
, DMAR_REG_SIZE
),
2630 VMSTATE_UINT8(iq_last_desc_type
, IntelIOMMUState
),
2631 VMSTATE_BOOL(root_extended
, IntelIOMMUState
),
2632 VMSTATE_BOOL(dmar_enabled
, IntelIOMMUState
),
2633 VMSTATE_BOOL(qi_enabled
, IntelIOMMUState
),
2634 VMSTATE_BOOL(intr_enabled
, IntelIOMMUState
),
2635 VMSTATE_BOOL(intr_eime
, IntelIOMMUState
),
2636 VMSTATE_END_OF_LIST()
2640 static const MemoryRegionOps vtd_mem_ops
= {
2641 .read
= vtd_mem_read
,
2642 .write
= vtd_mem_write
,
2643 .endianness
= DEVICE_LITTLE_ENDIAN
,
2645 .min_access_size
= 4,
2646 .max_access_size
= 8,
2649 .min_access_size
= 4,
2650 .max_access_size
= 8,
2654 static Property vtd_properties
[] = {
2655 DEFINE_PROP_UINT32("version", IntelIOMMUState
, version
, 0),
2656 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState
, intr_eim
,
2658 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState
, buggy_eim
, false),
2659 DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState
, aw_bits
,
2660 VTD_HOST_ADDRESS_WIDTH
),
2661 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState
, caching_mode
, FALSE
),
2662 DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState
, dma_drain
, true),
2663 DEFINE_PROP_END_OF_LIST(),
2666 /* Read IRTE entry with specific index */
2667 static int vtd_irte_get(IntelIOMMUState
*iommu
, uint16_t index
,
2668 VTD_IR_TableEntry
*entry
, uint16_t sid
)
2670 static const uint16_t vtd_svt_mask
[VTD_SQ_MAX
] = \
2671 {0xffff, 0xfffb, 0xfff9, 0xfff8};
2672 dma_addr_t addr
= 0x00;
2673 uint16_t mask
, source_id
;
2674 uint8_t bus
, bus_max
, bus_min
;
2676 addr
= iommu
->intr_root
+ index
* sizeof(*entry
);
2677 if (dma_memory_read(&address_space_memory
, addr
, entry
,
2679 error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64
,
2680 __func__
, index
, addr
);
2681 return -VTD_FR_IR_ROOT_INVAL
;
2684 trace_vtd_ir_irte_get(index
, le64_to_cpu(entry
->data
[1]),
2685 le64_to_cpu(entry
->data
[0]));
2687 if (!entry
->irte
.present
) {
2688 error_report_once("%s: detected non-present IRTE "
2689 "(index=%u, high=0x%" PRIx64
", low=0x%" PRIx64
")",
2690 __func__
, index
, le64_to_cpu(entry
->data
[1]),
2691 le64_to_cpu(entry
->data
[0]));
2692 return -VTD_FR_IR_ENTRY_P
;
2695 if (entry
->irte
.__reserved_0
|| entry
->irte
.__reserved_1
||
2696 entry
->irte
.__reserved_2
) {
2697 error_report_once("%s: detected non-zero reserved IRTE "
2698 "(index=%u, high=0x%" PRIx64
", low=0x%" PRIx64
")",
2699 __func__
, index
, le64_to_cpu(entry
->data
[1]),
2700 le64_to_cpu(entry
->data
[0]));
2701 return -VTD_FR_IR_IRTE_RSVD
;
2704 if (sid
!= X86_IOMMU_SID_INVALID
) {
2705 /* Validate IRTE SID */
2706 source_id
= le32_to_cpu(entry
->irte
.source_id
);
2707 switch (entry
->irte
.sid_vtype
) {
2712 mask
= vtd_svt_mask
[entry
->irte
.sid_q
];
2713 if ((source_id
& mask
) != (sid
& mask
)) {
2714 error_report_once("%s: invalid IRTE SID "
2715 "(index=%u, sid=%u, source_id=%u)",
2716 __func__
, index
, sid
, source_id
);
2717 return -VTD_FR_IR_SID_ERR
;
2722 bus_max
= source_id
>> 8;
2723 bus_min
= source_id
& 0xff;
2725 if (bus
> bus_max
|| bus
< bus_min
) {
2726 error_report_once("%s: invalid SVT_BUS "
2727 "(index=%u, bus=%u, min=%u, max=%u)",
2728 __func__
, index
, bus
, bus_min
, bus_max
);
2729 return -VTD_FR_IR_SID_ERR
;
2734 error_report_once("%s: detected invalid IRTE SVT "
2735 "(index=%u, type=%d)", __func__
,
2736 index
, entry
->irte
.sid_vtype
);
2737 /* Take this as verification failure. */
2738 return -VTD_FR_IR_SID_ERR
;
2746 /* Fetch IRQ information of specific IR index */
2747 static int vtd_remap_irq_get(IntelIOMMUState
*iommu
, uint16_t index
,
2748 X86IOMMUIrq
*irq
, uint16_t sid
)
2750 VTD_IR_TableEntry irte
= {};
2753 ret
= vtd_irte_get(iommu
, index
, &irte
, sid
);
2758 irq
->trigger_mode
= irte
.irte
.trigger_mode
;
2759 irq
->vector
= irte
.irte
.vector
;
2760 irq
->delivery_mode
= irte
.irte
.delivery_mode
;
2761 irq
->dest
= le32_to_cpu(irte
.irte
.dest_id
);
2762 if (!iommu
->intr_eime
) {
2763 #define VTD_IR_APIC_DEST_MASK (0xff00ULL)
2764 #define VTD_IR_APIC_DEST_SHIFT (8)
2765 irq
->dest
= (irq
->dest
& VTD_IR_APIC_DEST_MASK
) >>
2766 VTD_IR_APIC_DEST_SHIFT
;
2768 irq
->dest_mode
= irte
.irte
.dest_mode
;
2769 irq
->redir_hint
= irte
.irte
.redir_hint
;
2771 trace_vtd_ir_remap(index
, irq
->trigger_mode
, irq
->vector
,
2772 irq
->delivery_mode
, irq
->dest
, irq
->dest_mode
);
2777 /* Interrupt remapping for MSI/MSI-X entry */
2778 static int vtd_interrupt_remap_msi(IntelIOMMUState
*iommu
,
2780 MSIMessage
*translated
,
2784 VTD_IR_MSIAddress addr
;
2786 X86IOMMUIrq irq
= {};
2788 assert(origin
&& translated
);
2790 trace_vtd_ir_remap_msi_req(origin
->address
, origin
->data
);
2792 if (!iommu
|| !iommu
->intr_enabled
) {
2793 memcpy(translated
, origin
, sizeof(*origin
));
2797 if (origin
->address
& VTD_MSI_ADDR_HI_MASK
) {
2798 error_report_once("%s: MSI address high 32 bits non-zero detected: "
2799 "address=0x%" PRIx64
, __func__
, origin
->address
);
2800 return -VTD_FR_IR_REQ_RSVD
;
2803 addr
.data
= origin
->address
& VTD_MSI_ADDR_LO_MASK
;
2804 if (addr
.addr
.__head
!= 0xfee) {
2805 error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32
,
2806 __func__
, addr
.data
);
2807 return -VTD_FR_IR_REQ_RSVD
;
2810 /* This is compatible mode. */
2811 if (addr
.addr
.int_mode
!= VTD_IR_INT_FORMAT_REMAP
) {
2812 memcpy(translated
, origin
, sizeof(*origin
));
2816 index
= addr
.addr
.index_h
<< 15 | le16_to_cpu(addr
.addr
.index_l
);
2818 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
2819 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
2821 if (addr
.addr
.sub_valid
) {
2822 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
2823 index
+= origin
->data
& VTD_IR_MSI_DATA_SUBHANDLE
;
2826 ret
= vtd_remap_irq_get(iommu
, index
, &irq
, sid
);
2831 if (addr
.addr
.sub_valid
) {
2832 trace_vtd_ir_remap_type("MSI");
2833 if (origin
->data
& VTD_IR_MSI_DATA_RESERVED
) {
2834 error_report_once("%s: invalid IR MSI "
2835 "(sid=%u, address=0x%" PRIx64
2836 ", data=0x%" PRIx32
")",
2837 __func__
, sid
, origin
->address
, origin
->data
);
2838 return -VTD_FR_IR_REQ_RSVD
;
2841 uint8_t vector
= origin
->data
& 0xff;
2842 uint8_t trigger_mode
= (origin
->data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
2844 trace_vtd_ir_remap_type("IOAPIC");
2845 /* IOAPIC entry vector should be aligned with IRTE vector
2846 * (see vt-d spec 5.1.5.1). */
2847 if (vector
!= irq
.vector
) {
2848 trace_vtd_warn_ir_vector(sid
, index
, vector
, irq
.vector
);
2851 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
2852 * (see vt-d spec 5.1.5.1). */
2853 if (trigger_mode
!= irq
.trigger_mode
) {
2854 trace_vtd_warn_ir_trigger(sid
, index
, trigger_mode
,
2860 * We'd better keep the last two bits, assuming that guest OS
2861 * might modify it. Keep it does not hurt after all.
2863 irq
.msi_addr_last_bits
= addr
.addr
.__not_care
;
2865 /* Translate X86IOMMUIrq to MSI message */
2866 x86_iommu_irq_to_msi_message(&irq
, translated
);
2869 trace_vtd_ir_remap_msi(origin
->address
, origin
->data
,
2870 translated
->address
, translated
->data
);
2874 static int vtd_int_remap(X86IOMMUState
*iommu
, MSIMessage
*src
,
2875 MSIMessage
*dst
, uint16_t sid
)
2877 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu
),
2881 static MemTxResult
vtd_mem_ir_read(void *opaque
, hwaddr addr
,
2882 uint64_t *data
, unsigned size
,
2888 static MemTxResult
vtd_mem_ir_write(void *opaque
, hwaddr addr
,
2889 uint64_t value
, unsigned size
,
2893 MSIMessage from
= {}, to
= {};
2894 uint16_t sid
= X86_IOMMU_SID_INVALID
;
2896 from
.address
= (uint64_t) addr
+ VTD_INTERRUPT_ADDR_FIRST
;
2897 from
.data
= (uint32_t) value
;
2899 if (!attrs
.unspecified
) {
2900 /* We have explicit Source ID */
2901 sid
= attrs
.requester_id
;
2904 ret
= vtd_interrupt_remap_msi(opaque
, &from
, &to
, sid
);
2906 /* TODO: report error */
2907 /* Drop this interrupt */
2911 apic_get_class()->send_msi(&to
);
2916 static const MemoryRegionOps vtd_mem_ir_ops
= {
2917 .read_with_attrs
= vtd_mem_ir_read
,
2918 .write_with_attrs
= vtd_mem_ir_write
,
2919 .endianness
= DEVICE_LITTLE_ENDIAN
,
2921 .min_access_size
= 4,
2922 .max_access_size
= 4,
2925 .min_access_size
= 4,
2926 .max_access_size
= 4,
2930 VTDAddressSpace
*vtd_find_add_as(IntelIOMMUState
*s
, PCIBus
*bus
, int devfn
)
2932 uintptr_t key
= (uintptr_t)bus
;
2933 VTDBus
*vtd_bus
= g_hash_table_lookup(s
->vtd_as_by_busptr
, &key
);
2934 VTDAddressSpace
*vtd_dev_as
;
2938 uintptr_t *new_key
= g_malloc(sizeof(*new_key
));
2939 *new_key
= (uintptr_t)bus
;
2940 /* No corresponding free() */
2941 vtd_bus
= g_malloc0(sizeof(VTDBus
) + sizeof(VTDAddressSpace
*) * \
2944 g_hash_table_insert(s
->vtd_as_by_busptr
, new_key
, vtd_bus
);
2947 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
2950 snprintf(name
, sizeof(name
), "intel_iommu_devfn_%d", devfn
);
2951 vtd_bus
->dev_as
[devfn
] = vtd_dev_as
= g_malloc0(sizeof(VTDAddressSpace
));
2953 vtd_dev_as
->bus
= bus
;
2954 vtd_dev_as
->devfn
= (uint8_t)devfn
;
2955 vtd_dev_as
->iommu_state
= s
;
2956 vtd_dev_as
->context_cache_entry
.context_cache_gen
= 0;
2957 vtd_dev_as
->iova_tree
= iova_tree_new();
2960 * Memory region relationships looks like (Address range shows
2961 * only lower 32 bits to make it short in length...):
2963 * |-----------------+-------------------+----------|
2964 * | Name | Address range | Priority |
2965 * |-----------------+-------------------+----------+
2966 * | vtd_root | 00000000-ffffffff | 0 |
2967 * | intel_iommu | 00000000-ffffffff | 1 |
2968 * | vtd_sys_alias | 00000000-ffffffff | 1 |
2969 * | intel_iommu_ir | fee00000-feefffff | 64 |
2970 * |-----------------+-------------------+----------|
2972 * We enable/disable DMAR by switching enablement for
2973 * vtd_sys_alias and intel_iommu regions. IR region is always
2976 memory_region_init_iommu(&vtd_dev_as
->iommu
, sizeof(vtd_dev_as
->iommu
),
2977 TYPE_INTEL_IOMMU_MEMORY_REGION
, OBJECT(s
),
2980 memory_region_init_alias(&vtd_dev_as
->sys_alias
, OBJECT(s
),
2981 "vtd_sys_alias", get_system_memory(),
2982 0, memory_region_size(get_system_memory()));
2983 memory_region_init_io(&vtd_dev_as
->iommu_ir
, OBJECT(s
),
2984 &vtd_mem_ir_ops
, s
, "intel_iommu_ir",
2985 VTD_INTERRUPT_ADDR_SIZE
);
2986 memory_region_init(&vtd_dev_as
->root
, OBJECT(s
),
2987 "vtd_root", UINT64_MAX
);
2988 memory_region_add_subregion_overlap(&vtd_dev_as
->root
,
2989 VTD_INTERRUPT_ADDR_FIRST
,
2990 &vtd_dev_as
->iommu_ir
, 64);
2991 address_space_init(&vtd_dev_as
->as
, &vtd_dev_as
->root
, name
);
2992 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2993 &vtd_dev_as
->sys_alias
, 1);
2994 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2995 MEMORY_REGION(&vtd_dev_as
->iommu
),
2997 vtd_switch_address_space(vtd_dev_as
);
3002 /* Unmap the whole range in the notifier's scope. */
3003 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
)
3005 IOMMUTLBEntry entry
;
3007 hwaddr start
= n
->start
;
3008 hwaddr end
= n
->end
;
3009 IntelIOMMUState
*s
= as
->iommu_state
;
3013 * Note: all the codes in this function has a assumption that IOVA
3014 * bits are no more than VTD_MGAW bits (which is restricted by
3015 * VT-d spec), otherwise we need to consider overflow of 64 bits.
3018 if (end
> VTD_ADDRESS_SIZE(s
->aw_bits
)) {
3020 * Don't need to unmap regions that is bigger than the whole
3021 * VT-d supported address space size
3023 end
= VTD_ADDRESS_SIZE(s
->aw_bits
);
3026 assert(start
<= end
);
3029 if (ctpop64(size
) != 1) {
3031 * This size cannot format a correct mask. Let's enlarge it to
3032 * suite the minimum available mask.
3034 int n
= 64 - clz64(size
);
3035 if (n
> s
->aw_bits
) {
3036 /* should not happen, but in case it happens, limit it */
3042 entry
.target_as
= &address_space_memory
;
3043 /* Adjust iova for the size */
3044 entry
.iova
= n
->start
& ~(size
- 1);
3045 /* This field is meaningless for unmap */
3046 entry
.translated_addr
= 0;
3047 entry
.perm
= IOMMU_NONE
;
3048 entry
.addr_mask
= size
- 1;
3050 trace_vtd_as_unmap_whole(pci_bus_num(as
->bus
),
3051 VTD_PCI_SLOT(as
->devfn
),
3052 VTD_PCI_FUNC(as
->devfn
),
3055 map
.iova
= entry
.iova
;
3056 map
.size
= entry
.addr_mask
;
3057 iova_tree_remove(as
->iova_tree
, &map
);
3059 memory_region_notify_one(n
, &entry
);
3062 static void vtd_address_space_unmap_all(IntelIOMMUState
*s
)
3064 VTDAddressSpace
*vtd_as
;
3067 QLIST_FOREACH(vtd_as
, &s
->vtd_as_with_notifiers
, next
) {
3068 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
3069 vtd_address_space_unmap(vtd_as
, n
);
3074 static void vtd_address_space_refresh_all(IntelIOMMUState
*s
)
3076 vtd_address_space_unmap_all(s
);
3077 vtd_switch_address_space_all(s
);
3080 static int vtd_replay_hook(IOMMUTLBEntry
*entry
, void *private)
3082 memory_region_notify_one((IOMMUNotifier
*)private, entry
);
3086 static void vtd_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
3088 VTDAddressSpace
*vtd_as
= container_of(iommu_mr
, VTDAddressSpace
, iommu
);
3089 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
3090 uint8_t bus_n
= pci_bus_num(vtd_as
->bus
);
3094 * The replay can be triggered by either a invalidation or a newly
3095 * created entry. No matter what, we release existing mappings
3096 * (it means flushing caches for UNMAP-only registers).
3098 vtd_address_space_unmap(vtd_as
, n
);
3100 if (vtd_dev_to_context_entry(s
, bus_n
, vtd_as
->devfn
, &ce
) == 0) {
3101 trace_vtd_replay_ce_valid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
3102 PCI_FUNC(vtd_as
->devfn
),
3103 VTD_CONTEXT_ENTRY_DID(ce
.hi
),
3105 if (vtd_as_has_map_notifier(vtd_as
)) {
3106 /* This is required only for MAP typed notifiers */
3107 vtd_page_walk_info info
= {
3108 .hook_fn
= vtd_replay_hook
,
3109 .private = (void *)n
,
3110 .notify_unmap
= false,
3113 .domain_id
= VTD_CONTEXT_ENTRY_DID(ce
.hi
),
3116 vtd_page_walk(&ce
, 0, ~0ULL, &info
);
3119 trace_vtd_replay_ce_invalid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
3120 PCI_FUNC(vtd_as
->devfn
));
3126 /* Do the initialization. It will also be called when reset, so pay
3127 * attention when adding new initialization stuff.
3129 static void vtd_init(IntelIOMMUState
*s
)
3131 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
3133 memset(s
->csr
, 0, DMAR_REG_SIZE
);
3134 memset(s
->wmask
, 0, DMAR_REG_SIZE
);
3135 memset(s
->w1cmask
, 0, DMAR_REG_SIZE
);
3136 memset(s
->womask
, 0, DMAR_REG_SIZE
);
3139 s
->root_extended
= false;
3140 s
->dmar_enabled
= false;
3145 s
->qi_enabled
= false;
3146 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
3147 s
->next_frcd_reg
= 0;
3148 s
->cap
= VTD_CAP_FRO
| VTD_CAP_NFR
| VTD_CAP_ND
|
3149 VTD_CAP_MAMV
| VTD_CAP_PSI
| VTD_CAP_SLLPS
|
3150 VTD_CAP_SAGAW_39bit
| VTD_CAP_MGAW(s
->aw_bits
);
3152 s
->cap
|= VTD_CAP_DRAIN
;
3154 if (s
->aw_bits
== VTD_HOST_AW_48BIT
) {
3155 s
->cap
|= VTD_CAP_SAGAW_48bit
;
3157 s
->ecap
= VTD_ECAP_QI
| VTD_ECAP_IRO
;
3160 * Rsvd field masks for spte
3162 vtd_paging_entry_rsvd_field
[0] = ~0ULL;
3163 vtd_paging_entry_rsvd_field
[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s
->aw_bits
);
3164 vtd_paging_entry_rsvd_field
[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s
->aw_bits
);
3165 vtd_paging_entry_rsvd_field
[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s
->aw_bits
);
3166 vtd_paging_entry_rsvd_field
[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s
->aw_bits
);
3167 vtd_paging_entry_rsvd_field
[5] = VTD_SPTE_LPAGE_L1_RSVD_MASK(s
->aw_bits
);
3168 vtd_paging_entry_rsvd_field
[6] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s
->aw_bits
);
3169 vtd_paging_entry_rsvd_field
[7] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s
->aw_bits
);
3170 vtd_paging_entry_rsvd_field
[8] = VTD_SPTE_LPAGE_L4_RSVD_MASK(s
->aw_bits
);
3172 if (x86_iommu_ir_supported(x86_iommu
)) {
3173 s
->ecap
|= VTD_ECAP_IR
| VTD_ECAP_MHMV
;
3174 if (s
->intr_eim
== ON_OFF_AUTO_ON
) {
3175 s
->ecap
|= VTD_ECAP_EIM
;
3177 assert(s
->intr_eim
!= ON_OFF_AUTO_AUTO
);
3180 if (x86_iommu
->dt_supported
) {
3181 s
->ecap
|= VTD_ECAP_DT
;
3184 if (x86_iommu
->pt_supported
) {
3185 s
->ecap
|= VTD_ECAP_PT
;
3188 if (s
->caching_mode
) {
3189 s
->cap
|= VTD_CAP_CM
;
3192 vtd_reset_caches(s
);
3194 /* Define registers with default values and bit semantics */
3195 vtd_define_long(s
, DMAR_VER_REG
, 0x10UL
, 0, 0);
3196 vtd_define_quad(s
, DMAR_CAP_REG
, s
->cap
, 0, 0);
3197 vtd_define_quad(s
, DMAR_ECAP_REG
, s
->ecap
, 0, 0);
3198 vtd_define_long(s
, DMAR_GCMD_REG
, 0, 0xff800000UL
, 0);
3199 vtd_define_long_wo(s
, DMAR_GCMD_REG
, 0xff800000UL
);
3200 vtd_define_long(s
, DMAR_GSTS_REG
, 0, 0, 0);
3201 vtd_define_quad(s
, DMAR_RTADDR_REG
, 0, 0xfffffffffffff000ULL
, 0);
3202 vtd_define_quad(s
, DMAR_CCMD_REG
, 0, 0xe0000003ffffffffULL
, 0);
3203 vtd_define_quad_wo(s
, DMAR_CCMD_REG
, 0x3ffff0000ULL
);
3205 /* Advanced Fault Logging not supported */
3206 vtd_define_long(s
, DMAR_FSTS_REG
, 0, 0, 0x11UL
);
3207 vtd_define_long(s
, DMAR_FECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
3208 vtd_define_long(s
, DMAR_FEDATA_REG
, 0, 0x0000ffffUL
, 0);
3209 vtd_define_long(s
, DMAR_FEADDR_REG
, 0, 0xfffffffcUL
, 0);
3211 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
3212 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
3214 vtd_define_long(s
, DMAR_FEUADDR_REG
, 0, 0, 0);
3216 /* Treated as RO for implementations that PLMR and PHMR fields reported
3217 * as Clear in the CAP_REG.
3218 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
3220 vtd_define_long(s
, DMAR_PMEN_REG
, 0, 0, 0);
3222 vtd_define_quad(s
, DMAR_IQH_REG
, 0, 0, 0);
3223 vtd_define_quad(s
, DMAR_IQT_REG
, 0, 0x7fff0ULL
, 0);
3224 vtd_define_quad(s
, DMAR_IQA_REG
, 0, 0xfffffffffffff007ULL
, 0);
3225 vtd_define_long(s
, DMAR_ICS_REG
, 0, 0, 0x1UL
);
3226 vtd_define_long(s
, DMAR_IECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
3227 vtd_define_long(s
, DMAR_IEDATA_REG
, 0, 0xffffffffUL
, 0);
3228 vtd_define_long(s
, DMAR_IEADDR_REG
, 0, 0xfffffffcUL
, 0);
3229 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
3230 vtd_define_long(s
, DMAR_IEUADDR_REG
, 0, 0, 0);
3232 /* IOTLB registers */
3233 vtd_define_quad(s
, DMAR_IOTLB_REG
, 0, 0Xb003ffff00000000ULL
, 0);
3234 vtd_define_quad(s
, DMAR_IVA_REG
, 0, 0xfffffffffffff07fULL
, 0);
3235 vtd_define_quad_wo(s
, DMAR_IVA_REG
, 0xfffffffffffff07fULL
);
3237 /* Fault Recording Registers, 128-bit */
3238 vtd_define_quad(s
, DMAR_FRCD_REG_0_0
, 0, 0, 0);
3239 vtd_define_quad(s
, DMAR_FRCD_REG_0_2
, 0, 0, 0x8000000000000000ULL
);
3242 * Interrupt remapping registers.
3244 vtd_define_quad(s
, DMAR_IRTA_REG
, 0, 0xfffffffffffff80fULL
, 0);
3247 /* Should not reset address_spaces when reset because devices will still use
3248 * the address space they got at first (won't ask the bus again).
3250 static void vtd_reset(DeviceState
*dev
)
3252 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
3255 vtd_address_space_refresh_all(s
);
3258 static AddressSpace
*vtd_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
3260 IntelIOMMUState
*s
= opaque
;
3261 VTDAddressSpace
*vtd_as
;
3263 assert(0 <= devfn
&& devfn
< PCI_DEVFN_MAX
);
3265 vtd_as
= vtd_find_add_as(s
, bus
, devfn
);
3269 static bool vtd_decide_config(IntelIOMMUState
*s
, Error
**errp
)
3271 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
3273 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !x86_iommu_ir_supported(x86_iommu
)) {
3274 error_setg(errp
, "eim=on cannot be selected without intremap=on");
3278 if (s
->intr_eim
== ON_OFF_AUTO_AUTO
) {
3279 s
->intr_eim
= (kvm_irqchip_in_kernel() || s
->buggy_eim
)
3280 && x86_iommu_ir_supported(x86_iommu
) ?
3281 ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
3283 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !s
->buggy_eim
) {
3284 if (!kvm_irqchip_in_kernel()) {
3285 error_setg(errp
, "eim=on requires accel=kvm,kernel-irqchip=split");
3288 if (!kvm_enable_x2apic()) {
3289 error_setg(errp
, "eim=on requires support on the KVM side"
3290 "(X2APIC_API, first shipped in v4.7)");
3295 /* Currently only address widths supported are 39 and 48 bits */
3296 if ((s
->aw_bits
!= VTD_HOST_AW_39BIT
) &&
3297 (s
->aw_bits
!= VTD_HOST_AW_48BIT
)) {
3298 error_setg(errp
, "Supported values for x-aw-bits are: %d, %d",
3299 VTD_HOST_AW_39BIT
, VTD_HOST_AW_48BIT
);
3306 static void vtd_realize(DeviceState
*dev
, Error
**errp
)
3308 MachineState
*ms
= MACHINE(qdev_get_machine());
3309 PCMachineState
*pcms
= PC_MACHINE(ms
);
3310 PCIBus
*bus
= pcms
->bus
;
3311 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
3312 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(dev
);
3314 x86_iommu
->type
= TYPE_INTEL
;
3316 if (!vtd_decide_config(s
, errp
)) {
3320 QLIST_INIT(&s
->vtd_as_with_notifiers
);
3321 qemu_mutex_init(&s
->iommu_lock
);
3322 memset(s
->vtd_as_by_bus_num
, 0, sizeof(s
->vtd_as_by_bus_num
));
3323 memory_region_init_io(&s
->csrmem
, OBJECT(s
), &vtd_mem_ops
, s
,
3324 "intel_iommu", DMAR_REG_SIZE
);
3325 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->csrmem
);
3326 /* No corresponding destroy */
3327 s
->iotlb
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3329 s
->vtd_as_by_busptr
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3332 sysbus_mmio_map(SYS_BUS_DEVICE(s
), 0, Q35_HOST_BRIDGE_IOMMU_ADDR
);
3333 pci_setup_iommu(bus
, vtd_host_dma_iommu
, dev
);
3334 /* Pseudo address space under root PCI bus. */
3335 pcms
->ioapic_as
= vtd_host_dma_iommu(bus
, s
, Q35_PSEUDO_DEVFN_IOAPIC
);
3338 static void vtd_class_init(ObjectClass
*klass
, void *data
)
3340 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3341 X86IOMMUClass
*x86_class
= X86_IOMMU_CLASS(klass
);
3343 dc
->reset
= vtd_reset
;
3344 dc
->vmsd
= &vtd_vmstate
;
3345 dc
->props
= vtd_properties
;
3346 dc
->hotpluggable
= false;
3347 x86_class
->realize
= vtd_realize
;
3348 x86_class
->int_remap
= vtd_int_remap
;
3349 /* Supported by the pc-q35-* machine types */
3350 dc
->user_creatable
= true;
3353 static const TypeInfo vtd_info
= {
3354 .name
= TYPE_INTEL_IOMMU_DEVICE
,
3355 .parent
= TYPE_X86_IOMMU_DEVICE
,
3356 .instance_size
= sizeof(IntelIOMMUState
),
3357 .class_init
= vtd_class_init
,
3360 static void vtd_iommu_memory_region_class_init(ObjectClass
*klass
,
3363 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
3365 imrc
->translate
= vtd_iommu_translate
;
3366 imrc
->notify_flag_changed
= vtd_iommu_notify_flag_changed
;
3367 imrc
->replay
= vtd_iommu_replay
;
3370 static const TypeInfo vtd_iommu_memory_region_info
= {
3371 .parent
= TYPE_IOMMU_MEMORY_REGION
,
3372 .name
= TYPE_INTEL_IOMMU_MEMORY_REGION
,
3373 .class_init
= vtd_iommu_memory_region_class_init
,
3376 static void vtd_register_types(void)
3378 type_register_static(&vtd_info
);
3379 type_register_static(&vtd_iommu_memory_region_info
);
3382 type_init(vtd_register_types
)