2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qemu/error-report.h"
24 #include "qapi/error.h"
25 #include "hw/sysbus.h"
26 #include "exec/address-spaces.h"
27 #include "intel_iommu_internal.h"
28 #include "hw/pci/pci.h"
29 #include "hw/pci/pci_bus.h"
30 #include "hw/i386/pc.h"
31 #include "hw/i386/apic-msidef.h"
32 #include "hw/boards.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
35 #include "sysemu/kvm.h"
36 #include "hw/i386/apic_internal.h"
40 /*#define DEBUG_INTEL_IOMMU*/
41 #ifdef DEBUG_INTEL_IOMMU
43 DEBUG_GENERAL
, DEBUG_CSR
, DEBUG_INV
, DEBUG_MMU
, DEBUG_FLOG
,
44 DEBUG_CACHE
, DEBUG_IR
,
46 #define VTD_DBGBIT(x) (1 << DEBUG_##x)
47 static int vtd_dbgflags
= VTD_DBGBIT(GENERAL
) | VTD_DBGBIT(CSR
);
49 #define VTD_DPRINTF(what, fmt, ...) do { \
50 if (vtd_dbgflags & VTD_DBGBIT(what)) { \
51 fprintf(stderr, "(vtd)%s: " fmt "\n", __func__, \
55 #define VTD_DPRINTF(what, fmt, ...) do {} while (0)
58 static void vtd_define_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
,
59 uint64_t wmask
, uint64_t w1cmask
)
61 stq_le_p(&s
->csr
[addr
], val
);
62 stq_le_p(&s
->wmask
[addr
], wmask
);
63 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
66 static void vtd_define_quad_wo(IntelIOMMUState
*s
, hwaddr addr
, uint64_t mask
)
68 stq_le_p(&s
->womask
[addr
], mask
);
71 static void vtd_define_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
,
72 uint32_t wmask
, uint32_t w1cmask
)
74 stl_le_p(&s
->csr
[addr
], val
);
75 stl_le_p(&s
->wmask
[addr
], wmask
);
76 stl_le_p(&s
->w1cmask
[addr
], w1cmask
);
79 static void vtd_define_long_wo(IntelIOMMUState
*s
, hwaddr addr
, uint32_t mask
)
81 stl_le_p(&s
->womask
[addr
], mask
);
84 /* "External" get/set operations */
85 static void vtd_set_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
87 uint64_t oldval
= ldq_le_p(&s
->csr
[addr
]);
88 uint64_t wmask
= ldq_le_p(&s
->wmask
[addr
]);
89 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
90 stq_le_p(&s
->csr
[addr
],
91 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
94 static void vtd_set_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
)
96 uint32_t oldval
= ldl_le_p(&s
->csr
[addr
]);
97 uint32_t wmask
= ldl_le_p(&s
->wmask
[addr
]);
98 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
99 stl_le_p(&s
->csr
[addr
],
100 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
103 static uint64_t vtd_get_quad(IntelIOMMUState
*s
, hwaddr addr
)
105 uint64_t val
= ldq_le_p(&s
->csr
[addr
]);
106 uint64_t womask
= ldq_le_p(&s
->womask
[addr
]);
107 return val
& ~womask
;
110 static uint32_t vtd_get_long(IntelIOMMUState
*s
, hwaddr addr
)
112 uint32_t val
= ldl_le_p(&s
->csr
[addr
]);
113 uint32_t womask
= ldl_le_p(&s
->womask
[addr
]);
114 return val
& ~womask
;
117 /* "Internal" get/set operations */
118 static uint64_t vtd_get_quad_raw(IntelIOMMUState
*s
, hwaddr addr
)
120 return ldq_le_p(&s
->csr
[addr
]);
123 static uint32_t vtd_get_long_raw(IntelIOMMUState
*s
, hwaddr addr
)
125 return ldl_le_p(&s
->csr
[addr
]);
128 static void vtd_set_quad_raw(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
130 stq_le_p(&s
->csr
[addr
], val
);
133 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState
*s
, hwaddr addr
,
134 uint32_t clear
, uint32_t mask
)
136 uint32_t new_val
= (ldl_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
137 stl_le_p(&s
->csr
[addr
], new_val
);
141 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState
*s
, hwaddr addr
,
142 uint64_t clear
, uint64_t mask
)
144 uint64_t new_val
= (ldq_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
145 stq_le_p(&s
->csr
[addr
], new_val
);
149 /* GHashTable functions */
150 static gboolean
vtd_uint64_equal(gconstpointer v1
, gconstpointer v2
)
152 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
155 static guint
vtd_uint64_hash(gconstpointer v
)
157 return (guint
)*(const uint64_t *)v
;
160 static gboolean
vtd_hash_remove_by_domain(gpointer key
, gpointer value
,
163 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
164 uint16_t domain_id
= *(uint16_t *)user_data
;
165 return entry
->domain_id
== domain_id
;
168 /* The shift of an addr for a certain level of paging structure */
169 static inline uint32_t vtd_slpt_level_shift(uint32_t level
)
172 return VTD_PAGE_SHIFT_4K
+ (level
- 1) * VTD_SL_LEVEL_BITS
;
175 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level
)
177 return ~((1ULL << vtd_slpt_level_shift(level
)) - 1);
180 static gboolean
vtd_hash_remove_by_page(gpointer key
, gpointer value
,
183 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
184 VTDIOTLBPageInvInfo
*info
= (VTDIOTLBPageInvInfo
*)user_data
;
185 uint64_t gfn
= (info
->addr
>> VTD_PAGE_SHIFT_4K
) & info
->mask
;
186 uint64_t gfn_tlb
= (info
->addr
& entry
->mask
) >> VTD_PAGE_SHIFT_4K
;
187 return (entry
->domain_id
== info
->domain_id
) &&
188 (((entry
->gfn
& info
->mask
) == gfn
) ||
189 (entry
->gfn
== gfn_tlb
));
192 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
193 * IntelIOMMUState to 1.
195 static void vtd_reset_context_cache(IntelIOMMUState
*s
)
197 VTDAddressSpace
*vtd_as
;
199 GHashTableIter bus_it
;
202 g_hash_table_iter_init(&bus_it
, s
->vtd_as_by_busptr
);
204 VTD_DPRINTF(CACHE
, "global context_cache_gen=1");
205 while (g_hash_table_iter_next (&bus_it
, NULL
, (void**)&vtd_bus
)) {
206 for (devfn_it
= 0; devfn_it
< X86_IOMMU_PCI_DEVFN_MAX
; ++devfn_it
) {
207 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
211 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
214 s
->context_cache_gen
= 1;
217 static void vtd_reset_iotlb(IntelIOMMUState
*s
)
220 g_hash_table_remove_all(s
->iotlb
);
223 static uint64_t vtd_get_iotlb_key(uint64_t gfn
, uint16_t source_id
,
226 return gfn
| ((uint64_t)(source_id
) << VTD_IOTLB_SID_SHIFT
) |
227 ((uint64_t)(level
) << VTD_IOTLB_LVL_SHIFT
);
230 static uint64_t vtd_get_iotlb_gfn(hwaddr addr
, uint32_t level
)
232 return (addr
& vtd_slpt_level_page_mask(level
)) >> VTD_PAGE_SHIFT_4K
;
235 static VTDIOTLBEntry
*vtd_lookup_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
238 VTDIOTLBEntry
*entry
;
242 for (level
= VTD_SL_PT_LEVEL
; level
< VTD_SL_PML4_LEVEL
; level
++) {
243 key
= vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr
, level
),
245 entry
= g_hash_table_lookup(s
->iotlb
, &key
);
255 static void vtd_update_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
256 uint16_t domain_id
, hwaddr addr
, uint64_t slpte
,
257 bool read_flags
, bool write_flags
,
260 VTDIOTLBEntry
*entry
= g_malloc(sizeof(*entry
));
261 uint64_t *key
= g_malloc(sizeof(*key
));
262 uint64_t gfn
= vtd_get_iotlb_gfn(addr
, level
);
264 trace_vtd_iotlb_page_update(source_id
, addr
, slpte
, domain_id
);
265 if (g_hash_table_size(s
->iotlb
) >= VTD_IOTLB_MAX_SIZE
) {
266 trace_vtd_iotlb_reset("iotlb exceeds size limit");
271 entry
->domain_id
= domain_id
;
272 entry
->slpte
= slpte
;
273 entry
->read_flags
= read_flags
;
274 entry
->write_flags
= write_flags
;
275 entry
->mask
= vtd_slpt_level_page_mask(level
);
276 *key
= vtd_get_iotlb_key(gfn
, source_id
, level
);
277 g_hash_table_replace(s
->iotlb
, key
, entry
);
280 /* Given the reg addr of both the message data and address, generate an
283 static void vtd_generate_interrupt(IntelIOMMUState
*s
, hwaddr mesg_addr_reg
,
284 hwaddr mesg_data_reg
)
288 assert(mesg_data_reg
< DMAR_REG_SIZE
);
289 assert(mesg_addr_reg
< DMAR_REG_SIZE
);
291 msi
.address
= vtd_get_long_raw(s
, mesg_addr_reg
);
292 msi
.data
= vtd_get_long_raw(s
, mesg_data_reg
);
294 VTD_DPRINTF(FLOG
, "msi: addr 0x%"PRIx64
" data 0x%"PRIx32
,
295 msi
.address
, msi
.data
);
296 apic_get_class()->send_msi(&msi
);
299 /* Generate a fault event to software via MSI if conditions are met.
300 * Notice that the value of FSTS_REG being passed to it should be the one
303 static void vtd_generate_fault_event(IntelIOMMUState
*s
, uint32_t pre_fsts
)
305 if (pre_fsts
& VTD_FSTS_PPF
|| pre_fsts
& VTD_FSTS_PFO
||
306 pre_fsts
& VTD_FSTS_IQE
) {
307 VTD_DPRINTF(FLOG
, "there are previous interrupt conditions "
308 "to be serviced by software, fault event is not generated "
309 "(FSTS_REG 0x%"PRIx32
")", pre_fsts
);
312 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, 0, VTD_FECTL_IP
);
313 if (vtd_get_long_raw(s
, DMAR_FECTL_REG
) & VTD_FECTL_IM
) {
314 VTD_DPRINTF(FLOG
, "Interrupt Mask set, fault event is not generated");
316 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
317 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
321 /* Check if the Fault (F) field of the Fault Recording Register referenced by
324 static bool vtd_is_frcd_set(IntelIOMMUState
*s
, uint16_t index
)
326 /* Each reg is 128-bit */
327 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
328 addr
+= 8; /* Access the high 64-bit half */
330 assert(index
< DMAR_FRCD_REG_NR
);
332 return vtd_get_quad_raw(s
, addr
) & VTD_FRCD_F
;
335 /* Update the PPF field of Fault Status Register.
336 * Should be called whenever change the F field of any fault recording
339 static void vtd_update_fsts_ppf(IntelIOMMUState
*s
)
342 uint32_t ppf_mask
= 0;
344 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
345 if (vtd_is_frcd_set(s
, i
)) {
346 ppf_mask
= VTD_FSTS_PPF
;
350 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_PPF
, ppf_mask
);
351 VTD_DPRINTF(FLOG
, "set PPF of FSTS_REG to %d", ppf_mask
? 1 : 0);
354 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState
*s
, uint16_t index
)
356 /* Each reg is 128-bit */
357 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
358 addr
+= 8; /* Access the high 64-bit half */
360 assert(index
< DMAR_FRCD_REG_NR
);
362 vtd_set_clear_mask_quad(s
, addr
, 0, VTD_FRCD_F
);
363 vtd_update_fsts_ppf(s
);
366 /* Must not update F field now, should be done later */
367 static void vtd_record_frcd(IntelIOMMUState
*s
, uint16_t index
,
368 uint16_t source_id
, hwaddr addr
,
369 VTDFaultReason fault
, bool is_write
)
372 hwaddr frcd_reg_addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
374 assert(index
< DMAR_FRCD_REG_NR
);
376 lo
= VTD_FRCD_FI(addr
);
377 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
);
381 vtd_set_quad_raw(s
, frcd_reg_addr
, lo
);
382 vtd_set_quad_raw(s
, frcd_reg_addr
+ 8, hi
);
383 VTD_DPRINTF(FLOG
, "record to FRCD_REG #%"PRIu16
": hi 0x%"PRIx64
384 ", lo 0x%"PRIx64
, index
, hi
, lo
);
387 /* Try to collapse multiple pending faults from the same requester */
388 static bool vtd_try_collapse_fault(IntelIOMMUState
*s
, uint16_t source_id
)
392 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ 8; /* The high 64-bit half */
394 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
395 frcd_reg
= vtd_get_quad_raw(s
, addr
);
396 VTD_DPRINTF(FLOG
, "frcd_reg #%d 0x%"PRIx64
, i
, frcd_reg
);
397 if ((frcd_reg
& VTD_FRCD_F
) &&
398 ((frcd_reg
& VTD_FRCD_SID_MASK
) == source_id
)) {
401 addr
+= 16; /* 128-bit for each */
406 /* Log and report an DMAR (address translation) fault to software */
407 static void vtd_report_dmar_fault(IntelIOMMUState
*s
, uint16_t source_id
,
408 hwaddr addr
, VTDFaultReason fault
,
411 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
413 assert(fault
< VTD_FR_MAX
);
415 if (fault
== VTD_FR_RESERVED_ERR
) {
416 /* This is not a normal fault reason case. Drop it. */
419 VTD_DPRINTF(FLOG
, "sid 0x%"PRIx16
", fault %d, addr 0x%"PRIx64
420 ", is_write %d", source_id
, fault
, addr
, is_write
);
421 if (fsts_reg
& VTD_FSTS_PFO
) {
422 VTD_DPRINTF(FLOG
, "new fault is not recorded due to "
423 "Primary Fault Overflow");
426 if (vtd_try_collapse_fault(s
, source_id
)) {
427 VTD_DPRINTF(FLOG
, "new fault is not recorded due to "
428 "compression of faults");
431 if (vtd_is_frcd_set(s
, s
->next_frcd_reg
)) {
432 VTD_DPRINTF(FLOG
, "Primary Fault Overflow and "
433 "new fault is not recorded, set PFO field");
434 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_PFO
);
438 vtd_record_frcd(s
, s
->next_frcd_reg
, source_id
, addr
, fault
, is_write
);
440 if (fsts_reg
& VTD_FSTS_PPF
) {
441 VTD_DPRINTF(FLOG
, "there are pending faults already, "
442 "fault event is not generated");
443 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
);
445 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
446 s
->next_frcd_reg
= 0;
449 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_FRI_MASK
,
450 VTD_FSTS_FRI(s
->next_frcd_reg
));
451 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
); /* Will set PPF */
453 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
454 s
->next_frcd_reg
= 0;
456 /* This case actually cause the PPF to be Set.
457 * So generate fault event (interrupt).
459 vtd_generate_fault_event(s
, fsts_reg
);
463 /* Handle Invalidation Queue Errors of queued invalidation interface error
466 static void vtd_handle_inv_queue_error(IntelIOMMUState
*s
)
468 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
470 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_IQE
);
471 vtd_generate_fault_event(s
, fsts_reg
);
474 /* Set the IWC field and try to generate an invalidation completion interrupt */
475 static void vtd_generate_completion_event(IntelIOMMUState
*s
)
477 if (vtd_get_long_raw(s
, DMAR_ICS_REG
) & VTD_ICS_IWC
) {
478 trace_vtd_inv_desc_wait_irq("One pending, skip current");
481 vtd_set_clear_mask_long(s
, DMAR_ICS_REG
, 0, VTD_ICS_IWC
);
482 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, 0, VTD_IECTL_IP
);
483 if (vtd_get_long_raw(s
, DMAR_IECTL_REG
) & VTD_IECTL_IM
) {
484 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
485 "new event not generated");
488 /* Generate the interrupt event */
489 trace_vtd_inv_desc_wait_irq("Generating complete event");
490 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
491 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
495 static inline bool vtd_root_entry_present(VTDRootEntry
*root
)
497 return root
->val
& VTD_ROOT_ENTRY_P
;
500 static int vtd_get_root_entry(IntelIOMMUState
*s
, uint8_t index
,
505 addr
= s
->root
+ index
* sizeof(*re
);
506 if (dma_memory_read(&address_space_memory
, addr
, re
, sizeof(*re
))) {
507 trace_vtd_re_invalid(re
->rsvd
, re
->val
);
509 return -VTD_FR_ROOT_TABLE_INV
;
511 re
->val
= le64_to_cpu(re
->val
);
515 static inline bool vtd_context_entry_present(VTDContextEntry
*context
)
517 return context
->lo
& VTD_CONTEXT_ENTRY_P
;
520 static int vtd_get_context_entry_from_root(VTDRootEntry
*root
, uint8_t index
,
525 /* we have checked that root entry is present */
526 addr
= (root
->val
& VTD_ROOT_ENTRY_CTP
) + index
* sizeof(*ce
);
527 if (dma_memory_read(&address_space_memory
, addr
, ce
, sizeof(*ce
))) {
528 trace_vtd_re_invalid(root
->rsvd
, root
->val
);
529 return -VTD_FR_CONTEXT_TABLE_INV
;
531 ce
->lo
= le64_to_cpu(ce
->lo
);
532 ce
->hi
= le64_to_cpu(ce
->hi
);
536 static inline dma_addr_t
vtd_get_slpt_base_from_context(VTDContextEntry
*ce
)
538 return ce
->lo
& VTD_CONTEXT_ENTRY_SLPTPTR
;
541 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte
)
543 return slpte
& VTD_SL_PT_BASE_ADDR_MASK
;
546 /* Whether the pte indicates the address of the page frame */
547 static inline bool vtd_is_last_slpte(uint64_t slpte
, uint32_t level
)
549 return level
== VTD_SL_PT_LEVEL
|| (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
);
552 /* Get the content of a spte located in @base_addr[@index] */
553 static uint64_t vtd_get_slpte(dma_addr_t base_addr
, uint32_t index
)
557 assert(index
< VTD_SL_PT_ENTRY_NR
);
559 if (dma_memory_read(&address_space_memory
,
560 base_addr
+ index
* sizeof(slpte
), &slpte
,
562 slpte
= (uint64_t)-1;
565 slpte
= le64_to_cpu(slpte
);
569 /* Given an iova and the level of paging structure, return the offset
572 static inline uint32_t vtd_iova_level_offset(uint64_t iova
, uint32_t level
)
574 return (iova
>> vtd_slpt_level_shift(level
)) &
575 ((1ULL << VTD_SL_LEVEL_BITS
) - 1);
578 /* Check Capability Register to see if the @level of page-table is supported */
579 static inline bool vtd_is_level_supported(IntelIOMMUState
*s
, uint32_t level
)
581 return VTD_CAP_SAGAW_MASK
& s
->cap
&
582 (1ULL << (level
- 2 + VTD_CAP_SAGAW_SHIFT
));
585 /* Get the page-table level that hardware should use for the second-level
586 * page-table walk from the Address Width field of context-entry.
588 static inline uint32_t vtd_get_level_from_context_entry(VTDContextEntry
*ce
)
590 return 2 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
);
593 static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry
*ce
)
595 return 30 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
) * 9;
598 static inline uint64_t vtd_iova_limit(VTDContextEntry
*ce
)
600 uint32_t ce_agaw
= vtd_get_agaw_from_context_entry(ce
);
601 return 1ULL << MIN(ce_agaw
, VTD_MGAW
);
604 /* Return true if IOVA passes range check, otherwise false. */
605 static inline bool vtd_iova_range_check(uint64_t iova
, VTDContextEntry
*ce
)
608 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
609 * in CAP_REG and AW in context-entry.
611 return !(iova
& ~(vtd_iova_limit(ce
) - 1));
614 static const uint64_t vtd_paging_entry_rsvd_field
[] = {
616 /* For not large page */
617 [1] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
618 [2] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
619 [3] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
620 [4] = 0x880ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
622 [5] = 0x800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
623 [6] = 0x1ff800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
624 [7] = 0x3ffff800ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
625 [8] = 0x880ULL
| ~(VTD_HAW_MASK
| VTD_SL_IGN_COM
),
628 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte
, uint32_t level
)
630 if (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
) {
631 /* Maybe large page */
632 return slpte
& vtd_paging_entry_rsvd_field
[level
+ 4];
634 return slpte
& vtd_paging_entry_rsvd_field
[level
];
638 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
639 * of the translation, can be used for deciding the size of large page.
641 static int vtd_iova_to_slpte(VTDContextEntry
*ce
, uint64_t iova
, bool is_write
,
642 uint64_t *slptep
, uint32_t *slpte_level
,
643 bool *reads
, bool *writes
)
645 dma_addr_t addr
= vtd_get_slpt_base_from_context(ce
);
646 uint32_t level
= vtd_get_level_from_context_entry(ce
);
649 uint64_t access_right_check
;
651 if (!vtd_iova_range_check(iova
, ce
)) {
652 VTD_DPRINTF(GENERAL
, "error: iova 0x%"PRIx64
" exceeds limits", iova
);
653 return -VTD_FR_ADDR_BEYOND_MGAW
;
656 /* FIXME: what is the Atomics request here? */
657 access_right_check
= is_write
? VTD_SL_W
: VTD_SL_R
;
660 offset
= vtd_iova_level_offset(iova
, level
);
661 slpte
= vtd_get_slpte(addr
, offset
);
663 if (slpte
== (uint64_t)-1) {
664 VTD_DPRINTF(GENERAL
, "error: fail to access second-level paging "
665 "entry at level %"PRIu32
" for iova 0x%"PRIx64
,
667 if (level
== vtd_get_level_from_context_entry(ce
)) {
668 /* Invalid programming of context-entry */
669 return -VTD_FR_CONTEXT_ENTRY_INV
;
671 return -VTD_FR_PAGING_ENTRY_INV
;
674 *reads
= (*reads
) && (slpte
& VTD_SL_R
);
675 *writes
= (*writes
) && (slpte
& VTD_SL_W
);
676 if (!(slpte
& access_right_check
)) {
677 VTD_DPRINTF(GENERAL
, "error: lack of %s permission for "
678 "iova 0x%"PRIx64
" slpte 0x%"PRIx64
,
679 (is_write
? "write" : "read"), iova
, slpte
);
680 return is_write
? -VTD_FR_WRITE
: -VTD_FR_READ
;
682 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
683 VTD_DPRINTF(GENERAL
, "error: non-zero reserved field in second "
684 "level paging entry level %"PRIu32
" slpte 0x%"PRIx64
,
686 return -VTD_FR_PAGING_ENTRY_RSVD
;
689 if (vtd_is_last_slpte(slpte
, level
)) {
691 *slpte_level
= level
;
694 addr
= vtd_get_slpte_addr(slpte
);
699 typedef int (*vtd_page_walk_hook
)(IOMMUTLBEntry
*entry
, void *private);
702 * vtd_page_walk_level - walk over specific level for IOVA range
704 * @addr: base GPA addr to start the walk
705 * @start: IOVA range start address
706 * @end: IOVA range end address (start <= addr < end)
707 * @hook_fn: hook func to be called when detected page
708 * @private: private data to be passed into hook func
709 * @read: whether parent level has read permission
710 * @write: whether parent level has write permission
711 * @notify_unmap: whether we should notify invalid entries
713 static int vtd_page_walk_level(dma_addr_t addr
, uint64_t start
,
714 uint64_t end
, vtd_page_walk_hook hook_fn
,
715 void *private, uint32_t level
,
716 bool read
, bool write
, bool notify_unmap
)
718 bool read_cur
, write_cur
, entry_valid
;
721 uint64_t subpage_size
, subpage_mask
;
723 uint64_t iova
= start
;
727 trace_vtd_page_walk_level(addr
, level
, start
, end
);
729 subpage_size
= 1ULL << vtd_slpt_level_shift(level
);
730 subpage_mask
= vtd_slpt_level_page_mask(level
);
733 iova_next
= (iova
& subpage_mask
) + subpage_size
;
735 offset
= vtd_iova_level_offset(iova
, level
);
736 slpte
= vtd_get_slpte(addr
, offset
);
738 if (slpte
== (uint64_t)-1) {
739 trace_vtd_page_walk_skip_read(iova
, iova_next
);
743 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
744 trace_vtd_page_walk_skip_reserve(iova
, iova_next
);
748 /* Permissions are stacked with parents' */
749 read_cur
= read
&& (slpte
& VTD_SL_R
);
750 write_cur
= write
&& (slpte
& VTD_SL_W
);
753 * As long as we have either read/write permission, this is a
754 * valid entry. The rule works for both page entries and page
757 entry_valid
= read_cur
| write_cur
;
759 if (vtd_is_last_slpte(slpte
, level
)) {
760 entry
.target_as
= &address_space_memory
;
761 entry
.iova
= iova
& subpage_mask
;
762 /* NOTE: this is only meaningful if entry_valid == true */
763 entry
.translated_addr
= vtd_get_slpte_addr(slpte
);
764 entry
.addr_mask
= ~subpage_mask
;
765 entry
.perm
= IOMMU_ACCESS_FLAG(read_cur
, write_cur
);
766 if (!entry_valid
&& !notify_unmap
) {
767 trace_vtd_page_walk_skip_perm(iova
, iova_next
);
770 trace_vtd_page_walk_one(level
, entry
.iova
, entry
.translated_addr
,
771 entry
.addr_mask
, entry
.perm
);
773 ret
= hook_fn(&entry
, private);
780 trace_vtd_page_walk_skip_perm(iova
, iova_next
);
783 ret
= vtd_page_walk_level(vtd_get_slpte_addr(slpte
), iova
,
784 MIN(iova_next
, end
), hook_fn
, private,
785 level
- 1, read_cur
, write_cur
,
800 * vtd_page_walk - walk specific IOVA range, and call the hook
802 * @ce: context entry to walk upon
803 * @start: IOVA address to start the walk
804 * @end: IOVA range end address (start <= addr < end)
805 * @hook_fn: the hook that to be called for each detected area
806 * @private: private data for the hook function
808 static int vtd_page_walk(VTDContextEntry
*ce
, uint64_t start
, uint64_t end
,
809 vtd_page_walk_hook hook_fn
, void *private,
812 dma_addr_t addr
= vtd_get_slpt_base_from_context(ce
);
813 uint32_t level
= vtd_get_level_from_context_entry(ce
);
815 if (!vtd_iova_range_check(start
, ce
)) {
816 return -VTD_FR_ADDR_BEYOND_MGAW
;
819 if (!vtd_iova_range_check(end
, ce
)) {
820 /* Fix end so that it reaches the maximum */
821 end
= vtd_iova_limit(ce
);
824 return vtd_page_walk_level(addr
, start
, end
, hook_fn
, private,
825 level
, true, true, notify_unmap
);
828 /* Map a device to its corresponding domain (context-entry) */
829 static int vtd_dev_to_context_entry(IntelIOMMUState
*s
, uint8_t bus_num
,
830 uint8_t devfn
, VTDContextEntry
*ce
)
835 ret_fr
= vtd_get_root_entry(s
, bus_num
, &re
);
840 if (!vtd_root_entry_present(&re
)) {
841 /* Not error - it's okay we don't have root entry. */
842 trace_vtd_re_not_present(bus_num
);
843 return -VTD_FR_ROOT_ENTRY_P
;
844 } else if (re
.rsvd
|| (re
.val
& VTD_ROOT_ENTRY_RSVD
)) {
845 trace_vtd_re_invalid(re
.rsvd
, re
.val
);
846 return -VTD_FR_ROOT_ENTRY_RSVD
;
849 ret_fr
= vtd_get_context_entry_from_root(&re
, devfn
, ce
);
854 if (!vtd_context_entry_present(ce
)) {
855 /* Not error - it's okay we don't have context entry. */
856 trace_vtd_ce_not_present(bus_num
, devfn
);
857 return -VTD_FR_CONTEXT_ENTRY_P
;
858 } else if ((ce
->hi
& VTD_CONTEXT_ENTRY_RSVD_HI
) ||
859 (ce
->lo
& VTD_CONTEXT_ENTRY_RSVD_LO
)) {
860 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
861 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
863 /* Check if the programming of context-entry is valid */
864 if (!vtd_is_level_supported(s
, vtd_get_level_from_context_entry(ce
))) {
865 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
866 return -VTD_FR_CONTEXT_ENTRY_INV
;
868 switch (ce
->lo
& VTD_CONTEXT_ENTRY_TT
) {
869 case VTD_CONTEXT_TT_MULTI_LEVEL
:
871 case VTD_CONTEXT_TT_DEV_IOTLB
:
874 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
875 return -VTD_FR_CONTEXT_ENTRY_INV
;
881 static inline uint16_t vtd_make_source_id(uint8_t bus_num
, uint8_t devfn
)
883 return ((bus_num
& 0xffUL
) << 8) | (devfn
& 0xffUL
);
886 static const bool vtd_qualified_faults
[] = {
887 [VTD_FR_RESERVED
] = false,
888 [VTD_FR_ROOT_ENTRY_P
] = false,
889 [VTD_FR_CONTEXT_ENTRY_P
] = true,
890 [VTD_FR_CONTEXT_ENTRY_INV
] = true,
891 [VTD_FR_ADDR_BEYOND_MGAW
] = true,
892 [VTD_FR_WRITE
] = true,
893 [VTD_FR_READ
] = true,
894 [VTD_FR_PAGING_ENTRY_INV
] = true,
895 [VTD_FR_ROOT_TABLE_INV
] = false,
896 [VTD_FR_CONTEXT_TABLE_INV
] = false,
897 [VTD_FR_ROOT_ENTRY_RSVD
] = false,
898 [VTD_FR_PAGING_ENTRY_RSVD
] = true,
899 [VTD_FR_CONTEXT_ENTRY_TT
] = true,
900 [VTD_FR_RESERVED_ERR
] = false,
901 [VTD_FR_MAX
] = false,
904 /* To see if a fault condition is "qualified", which is reported to software
905 * only if the FPD field in the context-entry used to process the faulting
908 static inline bool vtd_is_qualified_fault(VTDFaultReason fault
)
910 return vtd_qualified_faults
[fault
];
913 static inline bool vtd_is_interrupt_addr(hwaddr addr
)
915 return VTD_INTERRUPT_ADDR_FIRST
<= addr
&& addr
<= VTD_INTERRUPT_ADDR_LAST
;
918 /* Map dev to context-entry then do a paging-structures walk to do a iommu
921 * Called from RCU critical section.
923 * @bus_num: The bus number
924 * @devfn: The devfn, which is the combined of device and function number
925 * @is_write: The access is a write operation
926 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
928 static void vtd_do_iommu_translate(VTDAddressSpace
*vtd_as
, PCIBus
*bus
,
929 uint8_t devfn
, hwaddr addr
, bool is_write
,
930 IOMMUTLBEntry
*entry
)
932 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
934 uint8_t bus_num
= pci_bus_num(bus
);
935 VTDContextCacheEntry
*cc_entry
= &vtd_as
->context_cache_entry
;
936 uint64_t slpte
, page_mask
;
938 uint16_t source_id
= vtd_make_source_id(bus_num
, devfn
);
940 bool is_fpd_set
= false;
943 VTDIOTLBEntry
*iotlb_entry
;
946 * We have standalone memory region for interrupt addresses, we
947 * should never receive translation requests in this region.
949 assert(!vtd_is_interrupt_addr(addr
));
951 /* Try to fetch slpte form IOTLB */
952 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, addr
);
954 trace_vtd_iotlb_page_hit(source_id
, addr
, iotlb_entry
->slpte
,
955 iotlb_entry
->domain_id
);
956 slpte
= iotlb_entry
->slpte
;
957 reads
= iotlb_entry
->read_flags
;
958 writes
= iotlb_entry
->write_flags
;
959 page_mask
= iotlb_entry
->mask
;
962 /* Try to fetch context-entry from cache first */
963 if (cc_entry
->context_cache_gen
== s
->context_cache_gen
) {
964 trace_vtd_iotlb_cc_hit(bus_num
, devfn
, cc_entry
->context_entry
.hi
,
965 cc_entry
->context_entry
.lo
,
966 cc_entry
->context_cache_gen
);
967 ce
= cc_entry
->context_entry
;
968 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
970 ret_fr
= vtd_dev_to_context_entry(s
, bus_num
, devfn
, &ce
);
971 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
974 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
975 trace_vtd_fault_disabled();
977 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
981 /* Update context-cache */
982 trace_vtd_iotlb_cc_update(bus_num
, devfn
, ce
.hi
, ce
.lo
,
983 cc_entry
->context_cache_gen
,
984 s
->context_cache_gen
);
985 cc_entry
->context_entry
= ce
;
986 cc_entry
->context_cache_gen
= s
->context_cache_gen
;
989 ret_fr
= vtd_iova_to_slpte(&ce
, addr
, is_write
, &slpte
, &level
,
993 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
994 trace_vtd_fault_disabled();
996 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
1001 page_mask
= vtd_slpt_level_page_mask(level
);
1002 vtd_update_iotlb(s
, source_id
, VTD_CONTEXT_ENTRY_DID(ce
.hi
), addr
, slpte
,
1003 reads
, writes
, level
);
1005 entry
->iova
= addr
& page_mask
;
1006 entry
->translated_addr
= vtd_get_slpte_addr(slpte
) & page_mask
;
1007 entry
->addr_mask
= ~page_mask
;
1008 entry
->perm
= (writes
? 2 : 0) + (reads
? 1 : 0);
1011 static void vtd_root_table_setup(IntelIOMMUState
*s
)
1013 s
->root
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
1014 s
->root_extended
= s
->root
& VTD_RTADDR_RTT
;
1015 s
->root
&= VTD_RTADDR_ADDR_MASK
;
1017 VTD_DPRINTF(CSR
, "root_table addr 0x%"PRIx64
" %s", s
->root
,
1018 (s
->root_extended
? "(extended)" : ""));
1021 static void vtd_iec_notify_all(IntelIOMMUState
*s
, bool global
,
1022 uint32_t index
, uint32_t mask
)
1024 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s
), global
, index
, mask
);
1027 static void vtd_interrupt_remap_table_setup(IntelIOMMUState
*s
)
1030 value
= vtd_get_quad_raw(s
, DMAR_IRTA_REG
);
1031 s
->intr_size
= 1UL << ((value
& VTD_IRTA_SIZE_MASK
) + 1);
1032 s
->intr_root
= value
& VTD_IRTA_ADDR_MASK
;
1033 s
->intr_eime
= value
& VTD_IRTA_EIME
;
1035 /* Notify global invalidation */
1036 vtd_iec_notify_all(s
, true, 0, 0);
1038 VTD_DPRINTF(CSR
, "int remap table addr 0x%"PRIx64
" size %"PRIu32
,
1039 s
->intr_root
, s
->intr_size
);
1042 static void vtd_iommu_replay_all(IntelIOMMUState
*s
)
1044 IntelIOMMUNotifierNode
*node
;
1046 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
1047 memory_region_iommu_replay_all(&node
->vtd_as
->iommu
);
1051 static void vtd_context_global_invalidate(IntelIOMMUState
*s
)
1053 trace_vtd_inv_desc_cc_global();
1054 s
->context_cache_gen
++;
1055 if (s
->context_cache_gen
== VTD_CONTEXT_CACHE_GEN_MAX
) {
1056 vtd_reset_context_cache(s
);
1059 * From VT-d spec 6.5.2.1, a global context entry invalidation
1060 * should be followed by a IOTLB global invalidation, so we should
1061 * be safe even without this. Hoewever, let's replay the region as
1062 * well to be safer, and go back here when we need finer tunes for
1063 * VT-d emulation codes.
1065 vtd_iommu_replay_all(s
);
1069 /* Find the VTD address space currently associated with a given bus number,
1071 static VTDBus
*vtd_find_as_from_bus_num(IntelIOMMUState
*s
, uint8_t bus_num
)
1073 VTDBus
*vtd_bus
= s
->vtd_as_by_bus_num
[bus_num
];
1075 /* Iterate over the registered buses to find the one
1076 * which currently hold this bus number, and update the bus_num lookup table:
1078 GHashTableIter iter
;
1080 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
1081 while (g_hash_table_iter_next (&iter
, NULL
, (void**)&vtd_bus
)) {
1082 if (pci_bus_num(vtd_bus
->bus
) == bus_num
) {
1083 s
->vtd_as_by_bus_num
[bus_num
] = vtd_bus
;
1091 /* Do a context-cache device-selective invalidation.
1092 * @func_mask: FM field after shifting
1094 static void vtd_context_device_invalidate(IntelIOMMUState
*s
,
1100 VTDAddressSpace
*vtd_as
;
1101 uint8_t bus_n
, devfn
;
1104 trace_vtd_inv_desc_cc_devices(source_id
, func_mask
);
1106 switch (func_mask
& 3) {
1108 mask
= 0; /* No bits in the SID field masked */
1111 mask
= 4; /* Mask bit 2 in the SID field */
1114 mask
= 6; /* Mask bit 2:1 in the SID field */
1117 mask
= 7; /* Mask bit 2:0 in the SID field */
1122 bus_n
= VTD_SID_TO_BUS(source_id
);
1123 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_n
);
1125 devfn
= VTD_SID_TO_DEVFN(source_id
);
1126 for (devfn_it
= 0; devfn_it
< X86_IOMMU_PCI_DEVFN_MAX
; ++devfn_it
) {
1127 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
1128 if (vtd_as
&& ((devfn_it
& mask
) == (devfn
& mask
))) {
1129 trace_vtd_inv_desc_cc_device(bus_n
, VTD_PCI_SLOT(devfn_it
),
1130 VTD_PCI_FUNC(devfn_it
));
1131 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
1133 * So a device is moving out of (or moving into) a
1134 * domain, a replay() suites here to notify all the
1135 * IOMMU_NOTIFIER_MAP registers about this change.
1136 * This won't bring bad even if we have no such
1137 * notifier registered - the IOMMU notification
1138 * framework will skip MAP notifications if that
1141 memory_region_iommu_replay_all(&vtd_as
->iommu
);
1147 /* Context-cache invalidation
1148 * Returns the Context Actual Invalidation Granularity.
1149 * @val: the content of the CCMD_REG
1151 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState
*s
, uint64_t val
)
1154 uint64_t type
= val
& VTD_CCMD_CIRG_MASK
;
1157 case VTD_CCMD_DOMAIN_INVL
:
1158 VTD_DPRINTF(INV
, "domain-selective invalidation domain 0x%"PRIx16
,
1159 (uint16_t)VTD_CCMD_DID(val
));
1161 case VTD_CCMD_GLOBAL_INVL
:
1162 VTD_DPRINTF(INV
, "global invalidation");
1163 caig
= VTD_CCMD_GLOBAL_INVL_A
;
1164 vtd_context_global_invalidate(s
);
1167 case VTD_CCMD_DEVICE_INVL
:
1168 caig
= VTD_CCMD_DEVICE_INVL_A
;
1169 vtd_context_device_invalidate(s
, VTD_CCMD_SID(val
), VTD_CCMD_FM(val
));
1173 VTD_DPRINTF(GENERAL
, "error: invalid granularity");
1179 static void vtd_iotlb_global_invalidate(IntelIOMMUState
*s
)
1181 trace_vtd_iotlb_reset("global invalidation recved");
1183 vtd_iommu_replay_all(s
);
1186 static void vtd_iotlb_domain_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
)
1188 IntelIOMMUNotifierNode
*node
;
1190 VTDAddressSpace
*vtd_as
;
1192 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_domain
,
1195 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
1196 vtd_as
= node
->vtd_as
;
1197 if (!vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1198 vtd_as
->devfn
, &ce
) &&
1199 domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1200 memory_region_iommu_replay_all(&vtd_as
->iommu
);
1205 static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry
*entry
,
1208 memory_region_notify_iommu((MemoryRegion
*)private, *entry
);
1212 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState
*s
,
1213 uint16_t domain_id
, hwaddr addr
,
1216 IntelIOMMUNotifierNode
*node
;
1220 QLIST_FOREACH(node
, &(s
->notifiers_list
), next
) {
1221 VTDAddressSpace
*vtd_as
= node
->vtd_as
;
1222 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1223 vtd_as
->devfn
, &ce
);
1224 if (!ret
&& domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1225 vtd_page_walk(&ce
, addr
, addr
+ (1 << am
) * VTD_PAGE_SIZE
,
1226 vtd_page_invalidate_notify_hook
,
1227 (void *)&vtd_as
->iommu
, true);
1232 static void vtd_iotlb_page_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
,
1233 hwaddr addr
, uint8_t am
)
1235 VTDIOTLBPageInvInfo info
;
1237 assert(am
<= VTD_MAMV
);
1238 info
.domain_id
= domain_id
;
1240 info
.mask
= ~((1 << am
) - 1);
1241 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_page
, &info
);
1242 vtd_iotlb_page_invalidate_notify(s
, domain_id
, addr
, am
);
1246 * Returns the IOTLB Actual Invalidation Granularity.
1247 * @val: the content of the IOTLB_REG
1249 static uint64_t vtd_iotlb_flush(IntelIOMMUState
*s
, uint64_t val
)
1252 uint64_t type
= val
& VTD_TLB_FLUSH_GRANU_MASK
;
1258 case VTD_TLB_GLOBAL_FLUSH
:
1259 VTD_DPRINTF(INV
, "global invalidation");
1260 iaig
= VTD_TLB_GLOBAL_FLUSH_A
;
1261 vtd_iotlb_global_invalidate(s
);
1264 case VTD_TLB_DSI_FLUSH
:
1265 domain_id
= VTD_TLB_DID(val
);
1266 VTD_DPRINTF(INV
, "domain-selective invalidation domain 0x%"PRIx16
,
1268 iaig
= VTD_TLB_DSI_FLUSH_A
;
1269 vtd_iotlb_domain_invalidate(s
, domain_id
);
1272 case VTD_TLB_PSI_FLUSH
:
1273 domain_id
= VTD_TLB_DID(val
);
1274 addr
= vtd_get_quad_raw(s
, DMAR_IVA_REG
);
1275 am
= VTD_IVA_AM(addr
);
1276 addr
= VTD_IVA_ADDR(addr
);
1277 VTD_DPRINTF(INV
, "page-selective invalidation domain 0x%"PRIx16
1278 " addr 0x%"PRIx64
" mask %"PRIu8
, domain_id
, addr
, am
);
1279 if (am
> VTD_MAMV
) {
1280 VTD_DPRINTF(GENERAL
, "error: supported max address mask value is "
1281 "%"PRIu8
, (uint8_t)VTD_MAMV
);
1285 iaig
= VTD_TLB_PSI_FLUSH_A
;
1286 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1290 VTD_DPRINTF(GENERAL
, "error: invalid granularity");
1296 static inline bool vtd_queued_inv_enable_check(IntelIOMMUState
*s
)
1298 return s
->iq_tail
== 0;
1301 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState
*s
)
1303 return s
->qi_enabled
&& (s
->iq_tail
== s
->iq_head
) &&
1304 (s
->iq_last_desc_type
== VTD_INV_DESC_WAIT
);
1307 static void vtd_handle_gcmd_qie(IntelIOMMUState
*s
, bool en
)
1309 uint64_t iqa_val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
1311 VTD_DPRINTF(INV
, "Queued Invalidation Enable %s", (en
? "on" : "off"));
1313 if (vtd_queued_inv_enable_check(s
)) {
1314 s
->iq
= iqa_val
& VTD_IQA_IQA_MASK
;
1315 /* 2^(x+8) entries */
1316 s
->iq_size
= 1UL << ((iqa_val
& VTD_IQA_QS
) + 8);
1317 s
->qi_enabled
= true;
1318 VTD_DPRINTF(INV
, "DMAR_IQA_REG 0x%"PRIx64
, iqa_val
);
1319 VTD_DPRINTF(INV
, "Invalidation Queue addr 0x%"PRIx64
" size %d",
1321 /* Ok - report back to driver */
1322 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_QIES
);
1324 VTD_DPRINTF(GENERAL
, "error: can't enable Queued Invalidation: "
1325 "tail %"PRIu16
, s
->iq_tail
);
1328 if (vtd_queued_inv_disable_check(s
)) {
1329 /* disable Queued Invalidation */
1330 vtd_set_quad_raw(s
, DMAR_IQH_REG
, 0);
1332 s
->qi_enabled
= false;
1333 /* Ok - report back to driver */
1334 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_QIES
, 0);
1336 VTD_DPRINTF(GENERAL
, "error: can't disable Queued Invalidation: "
1337 "head %"PRIu16
", tail %"PRIu16
1338 ", last_descriptor %"PRIu8
,
1339 s
->iq_head
, s
->iq_tail
, s
->iq_last_desc_type
);
1344 /* Set Root Table Pointer */
1345 static void vtd_handle_gcmd_srtp(IntelIOMMUState
*s
)
1347 VTD_DPRINTF(CSR
, "set Root Table Pointer");
1349 vtd_root_table_setup(s
);
1350 /* Ok - report back to driver */
1351 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_RTPS
);
1354 /* Set Interrupt Remap Table Pointer */
1355 static void vtd_handle_gcmd_sirtp(IntelIOMMUState
*s
)
1357 VTD_DPRINTF(CSR
, "set Interrupt Remap Table Pointer");
1359 vtd_interrupt_remap_table_setup(s
);
1360 /* Ok - report back to driver */
1361 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRTPS
);
1364 static void vtd_switch_address_space(VTDAddressSpace
*as
)
1368 trace_vtd_switch_address_space(pci_bus_num(as
->bus
),
1369 VTD_PCI_SLOT(as
->devfn
),
1370 VTD_PCI_FUNC(as
->devfn
),
1371 as
->iommu_state
->dmar_enabled
);
1373 /* Turn off first then on the other */
1374 if (as
->iommu_state
->dmar_enabled
) {
1375 memory_region_set_enabled(&as
->sys_alias
, false);
1376 memory_region_set_enabled(&as
->iommu
, true);
1378 memory_region_set_enabled(&as
->iommu
, false);
1379 memory_region_set_enabled(&as
->sys_alias
, true);
1383 static void vtd_switch_address_space_all(IntelIOMMUState
*s
)
1385 GHashTableIter iter
;
1389 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
1390 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
1391 for (i
= 0; i
< X86_IOMMU_PCI_DEVFN_MAX
; i
++) {
1392 if (!vtd_bus
->dev_as
[i
]) {
1395 vtd_switch_address_space(vtd_bus
->dev_as
[i
]);
1400 /* Handle Translation Enable/Disable */
1401 static void vtd_handle_gcmd_te(IntelIOMMUState
*s
, bool en
)
1403 if (s
->dmar_enabled
== en
) {
1407 VTD_DPRINTF(CSR
, "Translation Enable %s", (en
? "on" : "off"));
1410 s
->dmar_enabled
= true;
1411 /* Ok - report back to driver */
1412 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_TES
);
1414 s
->dmar_enabled
= false;
1416 /* Clear the index of Fault Recording Register */
1417 s
->next_frcd_reg
= 0;
1418 /* Ok - report back to driver */
1419 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_TES
, 0);
1422 vtd_switch_address_space_all(s
);
1425 /* Handle Interrupt Remap Enable/Disable */
1426 static void vtd_handle_gcmd_ire(IntelIOMMUState
*s
, bool en
)
1428 VTD_DPRINTF(CSR
, "Interrupt Remap Enable %s", (en
? "on" : "off"));
1431 s
->intr_enabled
= true;
1432 /* Ok - report back to driver */
1433 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRES
);
1435 s
->intr_enabled
= false;
1436 /* Ok - report back to driver */
1437 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_IRES
, 0);
1441 /* Handle write to Global Command Register */
1442 static void vtd_handle_gcmd_write(IntelIOMMUState
*s
)
1444 uint32_t status
= vtd_get_long_raw(s
, DMAR_GSTS_REG
);
1445 uint32_t val
= vtd_get_long_raw(s
, DMAR_GCMD_REG
);
1446 uint32_t changed
= status
^ val
;
1448 VTD_DPRINTF(CSR
, "value 0x%"PRIx32
" status 0x%"PRIx32
, val
, status
);
1449 if (changed
& VTD_GCMD_TE
) {
1450 /* Translation enable/disable */
1451 vtd_handle_gcmd_te(s
, val
& VTD_GCMD_TE
);
1453 if (val
& VTD_GCMD_SRTP
) {
1454 /* Set/update the root-table pointer */
1455 vtd_handle_gcmd_srtp(s
);
1457 if (changed
& VTD_GCMD_QIE
) {
1458 /* Queued Invalidation Enable */
1459 vtd_handle_gcmd_qie(s
, val
& VTD_GCMD_QIE
);
1461 if (val
& VTD_GCMD_SIRTP
) {
1462 /* Set/update the interrupt remapping root-table pointer */
1463 vtd_handle_gcmd_sirtp(s
);
1465 if (changed
& VTD_GCMD_IRE
) {
1466 /* Interrupt remap enable/disable */
1467 vtd_handle_gcmd_ire(s
, val
& VTD_GCMD_IRE
);
1471 /* Handle write to Context Command Register */
1472 static void vtd_handle_ccmd_write(IntelIOMMUState
*s
)
1475 uint64_t val
= vtd_get_quad_raw(s
, DMAR_CCMD_REG
);
1477 /* Context-cache invalidation request */
1478 if (val
& VTD_CCMD_ICC
) {
1479 if (s
->qi_enabled
) {
1480 VTD_DPRINTF(GENERAL
, "error: Queued Invalidation enabled, "
1481 "should not use register-based invalidation");
1484 ret
= vtd_context_cache_invalidate(s
, val
);
1485 /* Invalidation completed. Change something to show */
1486 vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_ICC
, 0ULL);
1487 ret
= vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_CAIG_MASK
,
1489 VTD_DPRINTF(INV
, "CCMD_REG write-back val: 0x%"PRIx64
, ret
);
1493 /* Handle write to IOTLB Invalidation Register */
1494 static void vtd_handle_iotlb_write(IntelIOMMUState
*s
)
1497 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IOTLB_REG
);
1499 /* IOTLB invalidation request */
1500 if (val
& VTD_TLB_IVT
) {
1501 if (s
->qi_enabled
) {
1502 VTD_DPRINTF(GENERAL
, "error: Queued Invalidation enabled, "
1503 "should not use register-based invalidation");
1506 ret
= vtd_iotlb_flush(s
, val
);
1507 /* Invalidation completed. Change something to show */
1508 vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
, VTD_TLB_IVT
, 0ULL);
1509 ret
= vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
,
1510 VTD_TLB_FLUSH_GRANU_MASK_A
, ret
);
1511 VTD_DPRINTF(INV
, "IOTLB_REG write-back val: 0x%"PRIx64
, ret
);
1515 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
1516 static bool vtd_get_inv_desc(dma_addr_t base_addr
, uint32_t offset
,
1517 VTDInvDesc
*inv_desc
)
1519 dma_addr_t addr
= base_addr
+ offset
* sizeof(*inv_desc
);
1520 if (dma_memory_read(&address_space_memory
, addr
, inv_desc
,
1521 sizeof(*inv_desc
))) {
1522 VTD_DPRINTF(GENERAL
, "error: fail to fetch Invalidation Descriptor "
1523 "base_addr 0x%"PRIx64
" offset %"PRIu32
, base_addr
, offset
);
1529 inv_desc
->lo
= le64_to_cpu(inv_desc
->lo
);
1530 inv_desc
->hi
= le64_to_cpu(inv_desc
->hi
);
1534 static bool vtd_process_wait_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1536 if ((inv_desc
->hi
& VTD_INV_DESC_WAIT_RSVD_HI
) ||
1537 (inv_desc
->lo
& VTD_INV_DESC_WAIT_RSVD_LO
)) {
1538 trace_vtd_inv_desc_wait_invalid(inv_desc
->hi
, inv_desc
->lo
);
1541 if (inv_desc
->lo
& VTD_INV_DESC_WAIT_SW
) {
1543 uint32_t status_data
= (uint32_t)(inv_desc
->lo
>>
1544 VTD_INV_DESC_WAIT_DATA_SHIFT
);
1546 assert(!(inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
));
1548 /* FIXME: need to be masked with HAW? */
1549 dma_addr_t status_addr
= inv_desc
->hi
;
1550 trace_vtd_inv_desc_wait_sw(status_addr
, status_data
);
1551 status_data
= cpu_to_le32(status_data
);
1552 if (dma_memory_write(&address_space_memory
, status_addr
, &status_data
,
1553 sizeof(status_data
))) {
1554 trace_vtd_inv_desc_wait_write_fail(inv_desc
->hi
, inv_desc
->lo
);
1557 } else if (inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
) {
1558 /* Interrupt flag */
1559 vtd_generate_completion_event(s
);
1561 trace_vtd_inv_desc_wait_invalid(inv_desc
->hi
, inv_desc
->lo
);
1567 static bool vtd_process_context_cache_desc(IntelIOMMUState
*s
,
1568 VTDInvDesc
*inv_desc
)
1570 uint16_t sid
, fmask
;
1572 if ((inv_desc
->lo
& VTD_INV_DESC_CC_RSVD
) || inv_desc
->hi
) {
1573 trace_vtd_inv_desc_cc_invalid(inv_desc
->hi
, inv_desc
->lo
);
1576 switch (inv_desc
->lo
& VTD_INV_DESC_CC_G
) {
1577 case VTD_INV_DESC_CC_DOMAIN
:
1578 trace_vtd_inv_desc_cc_domain(
1579 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc
->lo
));
1581 case VTD_INV_DESC_CC_GLOBAL
:
1582 vtd_context_global_invalidate(s
);
1585 case VTD_INV_DESC_CC_DEVICE
:
1586 sid
= VTD_INV_DESC_CC_SID(inv_desc
->lo
);
1587 fmask
= VTD_INV_DESC_CC_FM(inv_desc
->lo
);
1588 vtd_context_device_invalidate(s
, sid
, fmask
);
1592 trace_vtd_inv_desc_cc_invalid(inv_desc
->hi
, inv_desc
->lo
);
1598 static bool vtd_process_iotlb_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1604 if ((inv_desc
->lo
& VTD_INV_DESC_IOTLB_RSVD_LO
) ||
1605 (inv_desc
->hi
& VTD_INV_DESC_IOTLB_RSVD_HI
)) {
1606 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1610 switch (inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
) {
1611 case VTD_INV_DESC_IOTLB_GLOBAL
:
1612 trace_vtd_inv_desc_iotlb_global();
1613 vtd_iotlb_global_invalidate(s
);
1616 case VTD_INV_DESC_IOTLB_DOMAIN
:
1617 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1618 trace_vtd_inv_desc_iotlb_domain(domain_id
);
1619 vtd_iotlb_domain_invalidate(s
, domain_id
);
1622 case VTD_INV_DESC_IOTLB_PAGE
:
1623 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1624 addr
= VTD_INV_DESC_IOTLB_ADDR(inv_desc
->hi
);
1625 am
= VTD_INV_DESC_IOTLB_AM(inv_desc
->hi
);
1626 trace_vtd_inv_desc_iotlb_pages(domain_id
, addr
, am
);
1627 if (am
> VTD_MAMV
) {
1628 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1631 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1635 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1641 static bool vtd_process_inv_iec_desc(IntelIOMMUState
*s
,
1642 VTDInvDesc
*inv_desc
)
1644 VTD_DPRINTF(INV
, "inv ir glob %d index %d mask %d",
1645 inv_desc
->iec
.granularity
,
1646 inv_desc
->iec
.index
,
1647 inv_desc
->iec
.index_mask
);
1649 vtd_iec_notify_all(s
, !inv_desc
->iec
.granularity
,
1650 inv_desc
->iec
.index
,
1651 inv_desc
->iec
.index_mask
);
1655 static bool vtd_process_device_iotlb_desc(IntelIOMMUState
*s
,
1656 VTDInvDesc
*inv_desc
)
1658 VTDAddressSpace
*vtd_dev_as
;
1659 IOMMUTLBEntry entry
;
1660 struct VTDBus
*vtd_bus
;
1668 addr
= VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc
->hi
);
1669 sid
= VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc
->lo
);
1672 size
= VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc
->hi
);
1674 if ((inv_desc
->lo
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO
) ||
1675 (inv_desc
->hi
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI
)) {
1676 VTD_DPRINTF(GENERAL
, "error: non-zero reserved field in Device "
1677 "IOTLB Invalidate Descriptor hi 0x%"PRIx64
" lo 0x%"PRIx64
,
1678 inv_desc
->hi
, inv_desc
->lo
);
1682 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_num
);
1687 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
1692 /* According to ATS spec table 2.4:
1693 * S = 0, bits 15:12 = xxxx range size: 4K
1694 * S = 1, bits 15:12 = xxx0 range size: 8K
1695 * S = 1, bits 15:12 = xx01 range size: 16K
1696 * S = 1, bits 15:12 = x011 range size: 32K
1697 * S = 1, bits 15:12 = 0111 range size: 64K
1701 sz
= (VTD_PAGE_SIZE
* 2) << cto64(addr
>> VTD_PAGE_SHIFT
);
1707 entry
.target_as
= &vtd_dev_as
->as
;
1708 entry
.addr_mask
= sz
- 1;
1710 entry
.perm
= IOMMU_NONE
;
1711 entry
.translated_addr
= 0;
1712 memory_region_notify_iommu(&vtd_dev_as
->iommu
, entry
);
1718 static bool vtd_process_inv_desc(IntelIOMMUState
*s
)
1720 VTDInvDesc inv_desc
;
1723 VTD_DPRINTF(INV
, "iq head %"PRIu16
, s
->iq_head
);
1724 if (!vtd_get_inv_desc(s
->iq
, s
->iq_head
, &inv_desc
)) {
1725 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
1728 desc_type
= inv_desc
.lo
& VTD_INV_DESC_TYPE
;
1729 /* FIXME: should update at first or at last? */
1730 s
->iq_last_desc_type
= desc_type
;
1732 switch (desc_type
) {
1733 case VTD_INV_DESC_CC
:
1734 trace_vtd_inv_desc("context-cache", inv_desc
.hi
, inv_desc
.lo
);
1735 if (!vtd_process_context_cache_desc(s
, &inv_desc
)) {
1740 case VTD_INV_DESC_IOTLB
:
1741 trace_vtd_inv_desc("iotlb", inv_desc
.hi
, inv_desc
.lo
);
1742 if (!vtd_process_iotlb_desc(s
, &inv_desc
)) {
1747 case VTD_INV_DESC_WAIT
:
1748 trace_vtd_inv_desc("wait", inv_desc
.hi
, inv_desc
.lo
);
1749 if (!vtd_process_wait_desc(s
, &inv_desc
)) {
1754 case VTD_INV_DESC_IEC
:
1755 trace_vtd_inv_desc("iec", inv_desc
.hi
, inv_desc
.lo
);
1756 if (!vtd_process_inv_iec_desc(s
, &inv_desc
)) {
1761 case VTD_INV_DESC_DEVICE
:
1762 VTD_DPRINTF(INV
, "Device IOTLB Invalidation Descriptor hi 0x%"PRIx64
1763 " lo 0x%"PRIx64
, inv_desc
.hi
, inv_desc
.lo
);
1764 if (!vtd_process_device_iotlb_desc(s
, &inv_desc
)) {
1770 trace_vtd_inv_desc_invalid(inv_desc
.hi
, inv_desc
.lo
);
1774 if (s
->iq_head
== s
->iq_size
) {
1780 /* Try to fetch and process more Invalidation Descriptors */
1781 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
)
1783 VTD_DPRINTF(INV
, "fetch Invalidation Descriptors");
1784 if (s
->iq_tail
>= s
->iq_size
) {
1785 /* Detects an invalid Tail pointer */
1786 VTD_DPRINTF(GENERAL
, "error: iq_tail is %"PRIu16
1787 " while iq_size is %"PRIu16
, s
->iq_tail
, s
->iq_size
);
1788 vtd_handle_inv_queue_error(s
);
1791 while (s
->iq_head
!= s
->iq_tail
) {
1792 if (!vtd_process_inv_desc(s
)) {
1793 /* Invalidation Queue Errors */
1794 vtd_handle_inv_queue_error(s
);
1797 /* Must update the IQH_REG in time */
1798 vtd_set_quad_raw(s
, DMAR_IQH_REG
,
1799 (((uint64_t)(s
->iq_head
)) << VTD_IQH_QH_SHIFT
) &
1804 /* Handle write to Invalidation Queue Tail Register */
1805 static void vtd_handle_iqt_write(IntelIOMMUState
*s
)
1807 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQT_REG
);
1809 s
->iq_tail
= VTD_IQT_QT(val
);
1810 VTD_DPRINTF(INV
, "set iq tail %"PRIu16
, s
->iq_tail
);
1811 if (s
->qi_enabled
&& !(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
1812 /* Process Invalidation Queue here */
1813 vtd_fetch_inv_desc(s
);
1817 static void vtd_handle_fsts_write(IntelIOMMUState
*s
)
1819 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
1820 uint32_t fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1821 uint32_t status_fields
= VTD_FSTS_PFO
| VTD_FSTS_PPF
| VTD_FSTS_IQE
;
1823 if ((fectl_reg
& VTD_FECTL_IP
) && !(fsts_reg
& status_fields
)) {
1824 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1825 VTD_DPRINTF(FLOG
, "all pending interrupt conditions serviced, clear "
1826 "IP field of FECTL_REG");
1828 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1829 * Descriptors if there are any when Queued Invalidation is enabled?
1833 static void vtd_handle_fectl_write(IntelIOMMUState
*s
)
1836 /* FIXME: when software clears the IM field, check the IP field. But do we
1837 * need to compare the old value and the new value to conclude that
1838 * software clears the IM field? Or just check if the IM field is zero?
1840 fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1841 if ((fectl_reg
& VTD_FECTL_IP
) && !(fectl_reg
& VTD_FECTL_IM
)) {
1842 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
1843 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1844 VTD_DPRINTF(FLOG
, "IM field is cleared, generate "
1845 "fault event interrupt");
1849 static void vtd_handle_ics_write(IntelIOMMUState
*s
)
1851 uint32_t ics_reg
= vtd_get_long_raw(s
, DMAR_ICS_REG
);
1852 uint32_t iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1854 if ((iectl_reg
& VTD_IECTL_IP
) && !(ics_reg
& VTD_ICS_IWC
)) {
1855 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1856 VTD_DPRINTF(INV
, "pending completion interrupt condition serviced, "
1857 "clear IP field of IECTL_REG");
1861 static void vtd_handle_iectl_write(IntelIOMMUState
*s
)
1864 /* FIXME: when software clears the IM field, check the IP field. But do we
1865 * need to compare the old value and the new value to conclude that
1866 * software clears the IM field? Or just check if the IM field is zero?
1868 iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1869 if ((iectl_reg
& VTD_IECTL_IP
) && !(iectl_reg
& VTD_IECTL_IM
)) {
1870 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
1871 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1872 VTD_DPRINTF(INV
, "IM field is cleared, generate "
1873 "invalidation event interrupt");
1877 static uint64_t vtd_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
1879 IntelIOMMUState
*s
= opaque
;
1882 if (addr
+ size
> DMAR_REG_SIZE
) {
1883 VTD_DPRINTF(GENERAL
, "error: addr outside region: max 0x%"PRIx64
1884 ", got 0x%"PRIx64
" %d",
1885 (uint64_t)DMAR_REG_SIZE
, addr
, size
);
1886 return (uint64_t)-1;
1890 /* Root Table Address Register, 64-bit */
1891 case DMAR_RTADDR_REG
:
1893 val
= s
->root
& ((1ULL << 32) - 1);
1899 case DMAR_RTADDR_REG_HI
:
1901 val
= s
->root
>> 32;
1904 /* Invalidation Queue Address Register, 64-bit */
1906 val
= s
->iq
| (vtd_get_quad(s
, DMAR_IQA_REG
) & VTD_IQA_QS
);
1908 val
= val
& ((1ULL << 32) - 1);
1912 case DMAR_IQA_REG_HI
:
1919 val
= vtd_get_long(s
, addr
);
1921 val
= vtd_get_quad(s
, addr
);
1924 VTD_DPRINTF(CSR
, "addr 0x%"PRIx64
" size %d val 0x%"PRIx64
,
1929 static void vtd_mem_write(void *opaque
, hwaddr addr
,
1930 uint64_t val
, unsigned size
)
1932 IntelIOMMUState
*s
= opaque
;
1934 if (addr
+ size
> DMAR_REG_SIZE
) {
1935 VTD_DPRINTF(GENERAL
, "error: addr outside region: max 0x%"PRIx64
1936 ", got 0x%"PRIx64
" %d",
1937 (uint64_t)DMAR_REG_SIZE
, addr
, size
);
1942 /* Global Command Register, 32-bit */
1944 VTD_DPRINTF(CSR
, "DMAR_GCMD_REG write addr 0x%"PRIx64
1945 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1946 vtd_set_long(s
, addr
, val
);
1947 vtd_handle_gcmd_write(s
);
1950 /* Context Command Register, 64-bit */
1952 VTD_DPRINTF(CSR
, "DMAR_CCMD_REG write addr 0x%"PRIx64
1953 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1955 vtd_set_long(s
, addr
, val
);
1957 vtd_set_quad(s
, addr
, val
);
1958 vtd_handle_ccmd_write(s
);
1962 case DMAR_CCMD_REG_HI
:
1963 VTD_DPRINTF(CSR
, "DMAR_CCMD_REG_HI write addr 0x%"PRIx64
1964 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1966 vtd_set_long(s
, addr
, val
);
1967 vtd_handle_ccmd_write(s
);
1970 /* IOTLB Invalidation Register, 64-bit */
1971 case DMAR_IOTLB_REG
:
1972 VTD_DPRINTF(INV
, "DMAR_IOTLB_REG write addr 0x%"PRIx64
1973 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1975 vtd_set_long(s
, addr
, val
);
1977 vtd_set_quad(s
, addr
, val
);
1978 vtd_handle_iotlb_write(s
);
1982 case DMAR_IOTLB_REG_HI
:
1983 VTD_DPRINTF(INV
, "DMAR_IOTLB_REG_HI write addr 0x%"PRIx64
1984 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1986 vtd_set_long(s
, addr
, val
);
1987 vtd_handle_iotlb_write(s
);
1990 /* Invalidate Address Register, 64-bit */
1992 VTD_DPRINTF(INV
, "DMAR_IVA_REG write addr 0x%"PRIx64
1993 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
1995 vtd_set_long(s
, addr
, val
);
1997 vtd_set_quad(s
, addr
, val
);
2001 case DMAR_IVA_REG_HI
:
2002 VTD_DPRINTF(INV
, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
2003 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2005 vtd_set_long(s
, addr
, val
);
2008 /* Fault Status Register, 32-bit */
2010 VTD_DPRINTF(FLOG
, "DMAR_FSTS_REG write addr 0x%"PRIx64
2011 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2013 vtd_set_long(s
, addr
, val
);
2014 vtd_handle_fsts_write(s
);
2017 /* Fault Event Control Register, 32-bit */
2018 case DMAR_FECTL_REG
:
2019 VTD_DPRINTF(FLOG
, "DMAR_FECTL_REG write addr 0x%"PRIx64
2020 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2022 vtd_set_long(s
, addr
, val
);
2023 vtd_handle_fectl_write(s
);
2026 /* Fault Event Data Register, 32-bit */
2027 case DMAR_FEDATA_REG
:
2028 VTD_DPRINTF(FLOG
, "DMAR_FEDATA_REG write addr 0x%"PRIx64
2029 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2031 vtd_set_long(s
, addr
, val
);
2034 /* Fault Event Address Register, 32-bit */
2035 case DMAR_FEADDR_REG
:
2036 VTD_DPRINTF(FLOG
, "DMAR_FEADDR_REG write addr 0x%"PRIx64
2037 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2039 vtd_set_long(s
, addr
, val
);
2042 /* Fault Event Upper Address Register, 32-bit */
2043 case DMAR_FEUADDR_REG
:
2044 VTD_DPRINTF(FLOG
, "DMAR_FEUADDR_REG write addr 0x%"PRIx64
2045 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2047 vtd_set_long(s
, addr
, val
);
2050 /* Protected Memory Enable Register, 32-bit */
2052 VTD_DPRINTF(CSR
, "DMAR_PMEN_REG write addr 0x%"PRIx64
2053 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2055 vtd_set_long(s
, addr
, val
);
2058 /* Root Table Address Register, 64-bit */
2059 case DMAR_RTADDR_REG
:
2060 VTD_DPRINTF(CSR
, "DMAR_RTADDR_REG write addr 0x%"PRIx64
2061 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2063 vtd_set_long(s
, addr
, val
);
2065 vtd_set_quad(s
, addr
, val
);
2069 case DMAR_RTADDR_REG_HI
:
2070 VTD_DPRINTF(CSR
, "DMAR_RTADDR_REG_HI write addr 0x%"PRIx64
2071 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2073 vtd_set_long(s
, addr
, val
);
2076 /* Invalidation Queue Tail Register, 64-bit */
2078 VTD_DPRINTF(INV
, "DMAR_IQT_REG write addr 0x%"PRIx64
2079 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2081 vtd_set_long(s
, addr
, val
);
2083 vtd_set_quad(s
, addr
, val
);
2085 vtd_handle_iqt_write(s
);
2088 case DMAR_IQT_REG_HI
:
2089 VTD_DPRINTF(INV
, "DMAR_IQT_REG_HI write addr 0x%"PRIx64
2090 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2092 vtd_set_long(s
, addr
, val
);
2093 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2096 /* Invalidation Queue Address Register, 64-bit */
2098 VTD_DPRINTF(INV
, "DMAR_IQA_REG write addr 0x%"PRIx64
2099 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2101 vtd_set_long(s
, addr
, val
);
2103 vtd_set_quad(s
, addr
, val
);
2107 case DMAR_IQA_REG_HI
:
2108 VTD_DPRINTF(INV
, "DMAR_IQA_REG_HI write addr 0x%"PRIx64
2109 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2111 vtd_set_long(s
, addr
, val
);
2114 /* Invalidation Completion Status Register, 32-bit */
2116 VTD_DPRINTF(INV
, "DMAR_ICS_REG write addr 0x%"PRIx64
2117 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2119 vtd_set_long(s
, addr
, val
);
2120 vtd_handle_ics_write(s
);
2123 /* Invalidation Event Control Register, 32-bit */
2124 case DMAR_IECTL_REG
:
2125 VTD_DPRINTF(INV
, "DMAR_IECTL_REG write addr 0x%"PRIx64
2126 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2128 vtd_set_long(s
, addr
, val
);
2129 vtd_handle_iectl_write(s
);
2132 /* Invalidation Event Data Register, 32-bit */
2133 case DMAR_IEDATA_REG
:
2134 VTD_DPRINTF(INV
, "DMAR_IEDATA_REG write addr 0x%"PRIx64
2135 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2137 vtd_set_long(s
, addr
, val
);
2140 /* Invalidation Event Address Register, 32-bit */
2141 case DMAR_IEADDR_REG
:
2142 VTD_DPRINTF(INV
, "DMAR_IEADDR_REG write addr 0x%"PRIx64
2143 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2145 vtd_set_long(s
, addr
, val
);
2148 /* Invalidation Event Upper Address Register, 32-bit */
2149 case DMAR_IEUADDR_REG
:
2150 VTD_DPRINTF(INV
, "DMAR_IEUADDR_REG write addr 0x%"PRIx64
2151 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2153 vtd_set_long(s
, addr
, val
);
2156 /* Fault Recording Registers, 128-bit */
2157 case DMAR_FRCD_REG_0_0
:
2158 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_0 write addr 0x%"PRIx64
2159 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2161 vtd_set_long(s
, addr
, val
);
2163 vtd_set_quad(s
, addr
, val
);
2167 case DMAR_FRCD_REG_0_1
:
2168 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_1 write addr 0x%"PRIx64
2169 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2171 vtd_set_long(s
, addr
, val
);
2174 case DMAR_FRCD_REG_0_2
:
2175 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_2 write addr 0x%"PRIx64
2176 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2178 vtd_set_long(s
, addr
, val
);
2180 vtd_set_quad(s
, addr
, val
);
2181 /* May clear bit 127 (Fault), update PPF */
2182 vtd_update_fsts_ppf(s
);
2186 case DMAR_FRCD_REG_0_3
:
2187 VTD_DPRINTF(FLOG
, "DMAR_FRCD_REG_0_3 write addr 0x%"PRIx64
2188 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2190 vtd_set_long(s
, addr
, val
);
2191 /* May clear bit 127 (Fault), update PPF */
2192 vtd_update_fsts_ppf(s
);
2196 VTD_DPRINTF(IR
, "DMAR_IRTA_REG write addr 0x%"PRIx64
2197 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2199 vtd_set_long(s
, addr
, val
);
2201 vtd_set_quad(s
, addr
, val
);
2205 case DMAR_IRTA_REG_HI
:
2206 VTD_DPRINTF(IR
, "DMAR_IRTA_REG_HI write addr 0x%"PRIx64
2207 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2209 vtd_set_long(s
, addr
, val
);
2213 VTD_DPRINTF(GENERAL
, "error: unhandled reg write addr 0x%"PRIx64
2214 ", size %d, val 0x%"PRIx64
, addr
, size
, val
);
2216 vtd_set_long(s
, addr
, val
);
2218 vtd_set_quad(s
, addr
, val
);
2223 static IOMMUTLBEntry
vtd_iommu_translate(MemoryRegion
*iommu
, hwaddr addr
,
2226 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2227 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2228 IOMMUTLBEntry ret
= {
2229 .target_as
= &address_space_memory
,
2231 .translated_addr
= 0,
2232 .addr_mask
= ~(hwaddr
)0,
2236 if (!s
->dmar_enabled
) {
2237 /* DMAR disabled, passthrough, use 4k-page*/
2238 ret
.iova
= addr
& VTD_PAGE_MASK_4K
;
2239 ret
.translated_addr
= addr
& VTD_PAGE_MASK_4K
;
2240 ret
.addr_mask
= ~VTD_PAGE_MASK_4K
;
2241 ret
.perm
= IOMMU_RW
;
2245 vtd_do_iommu_translate(vtd_as
, vtd_as
->bus
, vtd_as
->devfn
, addr
,
2248 "bus %"PRIu8
" slot %"PRIu8
" func %"PRIu8
" devfn %"PRIu8
2249 " iova 0x%"PRIx64
" hpa 0x%"PRIx64
, pci_bus_num(vtd_as
->bus
),
2250 VTD_PCI_SLOT(vtd_as
->devfn
), VTD_PCI_FUNC(vtd_as
->devfn
),
2251 vtd_as
->devfn
, addr
, ret
.translated_addr
);
2255 static void vtd_iommu_notify_flag_changed(MemoryRegion
*iommu
,
2256 IOMMUNotifierFlag old
,
2257 IOMMUNotifierFlag
new)
2259 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2260 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2261 IntelIOMMUNotifierNode
*node
= NULL
;
2262 IntelIOMMUNotifierNode
*next_node
= NULL
;
2264 if (!s
->caching_mode
&& new & IOMMU_NOTIFIER_MAP
) {
2265 error_report("We need to set cache_mode=1 for intel-iommu to enable "
2266 "device assignment with IOMMU protection.");
2270 if (old
== IOMMU_NOTIFIER_NONE
) {
2271 node
= g_malloc0(sizeof(*node
));
2272 node
->vtd_as
= vtd_as
;
2273 QLIST_INSERT_HEAD(&s
->notifiers_list
, node
, next
);
2277 /* update notifier node with new flags */
2278 QLIST_FOREACH_SAFE(node
, &s
->notifiers_list
, next
, next_node
) {
2279 if (node
->vtd_as
== vtd_as
) {
2280 if (new == IOMMU_NOTIFIER_NONE
) {
2281 QLIST_REMOVE(node
, next
);
2289 static const VMStateDescription vtd_vmstate
= {
2290 .name
= "iommu-intel",
2292 .minimum_version_id
= 1,
2293 .priority
= MIG_PRI_IOMMU
,
2294 .fields
= (VMStateField
[]) {
2295 VMSTATE_UINT64(root
, IntelIOMMUState
),
2296 VMSTATE_UINT64(intr_root
, IntelIOMMUState
),
2297 VMSTATE_UINT64(iq
, IntelIOMMUState
),
2298 VMSTATE_UINT32(intr_size
, IntelIOMMUState
),
2299 VMSTATE_UINT16(iq_head
, IntelIOMMUState
),
2300 VMSTATE_UINT16(iq_tail
, IntelIOMMUState
),
2301 VMSTATE_UINT16(iq_size
, IntelIOMMUState
),
2302 VMSTATE_UINT16(next_frcd_reg
, IntelIOMMUState
),
2303 VMSTATE_UINT8_ARRAY(csr
, IntelIOMMUState
, DMAR_REG_SIZE
),
2304 VMSTATE_UINT8(iq_last_desc_type
, IntelIOMMUState
),
2305 VMSTATE_BOOL(root_extended
, IntelIOMMUState
),
2306 VMSTATE_BOOL(dmar_enabled
, IntelIOMMUState
),
2307 VMSTATE_BOOL(qi_enabled
, IntelIOMMUState
),
2308 VMSTATE_BOOL(intr_enabled
, IntelIOMMUState
),
2309 VMSTATE_BOOL(intr_eime
, IntelIOMMUState
),
2310 VMSTATE_END_OF_LIST()
2314 static const MemoryRegionOps vtd_mem_ops
= {
2315 .read
= vtd_mem_read
,
2316 .write
= vtd_mem_write
,
2317 .endianness
= DEVICE_LITTLE_ENDIAN
,
2319 .min_access_size
= 4,
2320 .max_access_size
= 8,
2323 .min_access_size
= 4,
2324 .max_access_size
= 8,
2328 static Property vtd_properties
[] = {
2329 DEFINE_PROP_UINT32("version", IntelIOMMUState
, version
, 0),
2330 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState
, intr_eim
,
2332 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState
, buggy_eim
, false),
2333 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState
, caching_mode
, FALSE
),
2334 DEFINE_PROP_END_OF_LIST(),
2337 /* Read IRTE entry with specific index */
2338 static int vtd_irte_get(IntelIOMMUState
*iommu
, uint16_t index
,
2339 VTD_IR_TableEntry
*entry
, uint16_t sid
)
2341 static const uint16_t vtd_svt_mask
[VTD_SQ_MAX
] = \
2342 {0xffff, 0xfffb, 0xfff9, 0xfff8};
2343 dma_addr_t addr
= 0x00;
2344 uint16_t mask
, source_id
;
2345 uint8_t bus
, bus_max
, bus_min
;
2347 addr
= iommu
->intr_root
+ index
* sizeof(*entry
);
2348 if (dma_memory_read(&address_space_memory
, addr
, entry
,
2350 VTD_DPRINTF(GENERAL
, "error: fail to access IR root at 0x%"PRIx64
2351 " + %"PRIu16
, iommu
->intr_root
, index
);
2352 return -VTD_FR_IR_ROOT_INVAL
;
2355 if (!entry
->irte
.present
) {
2356 VTD_DPRINTF(GENERAL
, "error: present flag not set in IRTE"
2357 " entry index %u value 0x%"PRIx64
" 0x%"PRIx64
,
2358 index
, le64_to_cpu(entry
->data
[1]),
2359 le64_to_cpu(entry
->data
[0]));
2360 return -VTD_FR_IR_ENTRY_P
;
2363 if (entry
->irte
.__reserved_0
|| entry
->irte
.__reserved_1
||
2364 entry
->irte
.__reserved_2
) {
2365 VTD_DPRINTF(GENERAL
, "error: IRTE entry index %"PRIu16
2366 " reserved fields non-zero: 0x%"PRIx64
" 0x%"PRIx64
,
2367 index
, le64_to_cpu(entry
->data
[1]),
2368 le64_to_cpu(entry
->data
[0]));
2369 return -VTD_FR_IR_IRTE_RSVD
;
2372 if (sid
!= X86_IOMMU_SID_INVALID
) {
2373 /* Validate IRTE SID */
2374 source_id
= le32_to_cpu(entry
->irte
.source_id
);
2375 switch (entry
->irte
.sid_vtype
) {
2377 VTD_DPRINTF(IR
, "No SID validation for IRTE index %d", index
);
2381 mask
= vtd_svt_mask
[entry
->irte
.sid_q
];
2382 if ((source_id
& mask
) != (sid
& mask
)) {
2383 VTD_DPRINTF(GENERAL
, "SID validation for IRTE index "
2384 "%d failed (reqid 0x%04x sid 0x%04x)", index
,
2386 return -VTD_FR_IR_SID_ERR
;
2391 bus_max
= source_id
>> 8;
2392 bus_min
= source_id
& 0xff;
2394 if (bus
> bus_max
|| bus
< bus_min
) {
2395 VTD_DPRINTF(GENERAL
, "SID validation for IRTE index %d "
2396 "failed (bus %d outside %d-%d)", index
, bus
,
2398 return -VTD_FR_IR_SID_ERR
;
2403 VTD_DPRINTF(GENERAL
, "Invalid SVT bits (0x%x) in IRTE index "
2404 "%d", entry
->irte
.sid_vtype
, index
);
2405 /* Take this as verification failure. */
2406 return -VTD_FR_IR_SID_ERR
;
2414 /* Fetch IRQ information of specific IR index */
2415 static int vtd_remap_irq_get(IntelIOMMUState
*iommu
, uint16_t index
,
2416 VTDIrq
*irq
, uint16_t sid
)
2418 VTD_IR_TableEntry irte
= {};
2421 ret
= vtd_irte_get(iommu
, index
, &irte
, sid
);
2426 irq
->trigger_mode
= irte
.irte
.trigger_mode
;
2427 irq
->vector
= irte
.irte
.vector
;
2428 irq
->delivery_mode
= irte
.irte
.delivery_mode
;
2429 irq
->dest
= le32_to_cpu(irte
.irte
.dest_id
);
2430 if (!iommu
->intr_eime
) {
2431 #define VTD_IR_APIC_DEST_MASK (0xff00ULL)
2432 #define VTD_IR_APIC_DEST_SHIFT (8)
2433 irq
->dest
= (irq
->dest
& VTD_IR_APIC_DEST_MASK
) >>
2434 VTD_IR_APIC_DEST_SHIFT
;
2436 irq
->dest_mode
= irte
.irte
.dest_mode
;
2437 irq
->redir_hint
= irte
.irte
.redir_hint
;
2439 VTD_DPRINTF(IR
, "remapping interrupt index %d: trig:%u,vec:%u,"
2440 "deliver:%u,dest:%u,dest_mode:%u", index
,
2441 irq
->trigger_mode
, irq
->vector
, irq
->delivery_mode
,
2442 irq
->dest
, irq
->dest_mode
);
2447 /* Generate one MSI message from VTDIrq info */
2448 static void vtd_generate_msi_message(VTDIrq
*irq
, MSIMessage
*msg_out
)
2450 VTD_MSIMessage msg
= {};
2452 /* Generate address bits */
2453 msg
.dest_mode
= irq
->dest_mode
;
2454 msg
.redir_hint
= irq
->redir_hint
;
2455 msg
.dest
= irq
->dest
;
2456 msg
.__addr_hi
= irq
->dest
& 0xffffff00;
2457 msg
.__addr_head
= cpu_to_le32(0xfee);
2458 /* Keep this from original MSI address bits */
2459 msg
.__not_used
= irq
->msi_addr_last_bits
;
2461 /* Generate data bits */
2462 msg
.vector
= irq
->vector
;
2463 msg
.delivery_mode
= irq
->delivery_mode
;
2465 msg
.trigger_mode
= irq
->trigger_mode
;
2467 msg_out
->address
= msg
.msi_addr
;
2468 msg_out
->data
= msg
.msi_data
;
2471 /* Interrupt remapping for MSI/MSI-X entry */
2472 static int vtd_interrupt_remap_msi(IntelIOMMUState
*iommu
,
2474 MSIMessage
*translated
,
2478 VTD_IR_MSIAddress addr
;
2482 assert(origin
&& translated
);
2484 if (!iommu
|| !iommu
->intr_enabled
) {
2485 goto do_not_translate
;
2488 if (origin
->address
& VTD_MSI_ADDR_HI_MASK
) {
2489 VTD_DPRINTF(GENERAL
, "error: MSI addr high 32 bits nonzero"
2490 " during interrupt remapping: 0x%"PRIx32
,
2491 (uint32_t)((origin
->address
& VTD_MSI_ADDR_HI_MASK
) >> \
2492 VTD_MSI_ADDR_HI_SHIFT
));
2493 return -VTD_FR_IR_REQ_RSVD
;
2496 addr
.data
= origin
->address
& VTD_MSI_ADDR_LO_MASK
;
2497 if (addr
.addr
.__head
!= 0xfee) {
2498 VTD_DPRINTF(GENERAL
, "error: MSI addr low 32 bits invalid: "
2499 "0x%"PRIx32
, addr
.data
);
2500 return -VTD_FR_IR_REQ_RSVD
;
2503 /* This is compatible mode. */
2504 if (addr
.addr
.int_mode
!= VTD_IR_INT_FORMAT_REMAP
) {
2505 goto do_not_translate
;
2508 index
= addr
.addr
.index_h
<< 15 | le16_to_cpu(addr
.addr
.index_l
);
2510 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
2511 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
2513 if (addr
.addr
.sub_valid
) {
2514 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
2515 index
+= origin
->data
& VTD_IR_MSI_DATA_SUBHANDLE
;
2518 ret
= vtd_remap_irq_get(iommu
, index
, &irq
, sid
);
2523 if (addr
.addr
.sub_valid
) {
2524 VTD_DPRINTF(IR
, "received MSI interrupt");
2525 if (origin
->data
& VTD_IR_MSI_DATA_RESERVED
) {
2526 VTD_DPRINTF(GENERAL
, "error: MSI data bits non-zero for "
2527 "interrupt remappable entry: 0x%"PRIx32
,
2529 return -VTD_FR_IR_REQ_RSVD
;
2532 uint8_t vector
= origin
->data
& 0xff;
2533 uint8_t trigger_mode
= (origin
->data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
2535 VTD_DPRINTF(IR
, "received IOAPIC interrupt");
2536 /* IOAPIC entry vector should be aligned with IRTE vector
2537 * (see vt-d spec 5.1.5.1). */
2538 if (vector
!= irq
.vector
) {
2539 VTD_DPRINTF(GENERAL
, "IOAPIC vector inconsistent: "
2540 "entry: %d, IRTE: %d, index: %d",
2541 vector
, irq
.vector
, index
);
2544 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
2545 * (see vt-d spec 5.1.5.1). */
2546 if (trigger_mode
!= irq
.trigger_mode
) {
2547 VTD_DPRINTF(GENERAL
, "IOAPIC trigger mode inconsistent: "
2548 "entry: %u, IRTE: %u, index: %d",
2549 trigger_mode
, irq
.trigger_mode
, index
);
2555 * We'd better keep the last two bits, assuming that guest OS
2556 * might modify it. Keep it does not hurt after all.
2558 irq
.msi_addr_last_bits
= addr
.addr
.__not_care
;
2560 /* Translate VTDIrq to MSI message */
2561 vtd_generate_msi_message(&irq
, translated
);
2563 VTD_DPRINTF(IR
, "mapping MSI 0x%"PRIx64
":0x%"PRIx32
" -> "
2564 "0x%"PRIx64
":0x%"PRIx32
, origin
->address
, origin
->data
,
2565 translated
->address
, translated
->data
);
2569 memcpy(translated
, origin
, sizeof(*origin
));
2573 static int vtd_int_remap(X86IOMMUState
*iommu
, MSIMessage
*src
,
2574 MSIMessage
*dst
, uint16_t sid
)
2576 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu
),
2580 static MemTxResult
vtd_mem_ir_read(void *opaque
, hwaddr addr
,
2581 uint64_t *data
, unsigned size
,
2587 static MemTxResult
vtd_mem_ir_write(void *opaque
, hwaddr addr
,
2588 uint64_t value
, unsigned size
,
2592 MSIMessage from
= {}, to
= {};
2593 uint16_t sid
= X86_IOMMU_SID_INVALID
;
2595 from
.address
= (uint64_t) addr
+ VTD_INTERRUPT_ADDR_FIRST
;
2596 from
.data
= (uint32_t) value
;
2598 if (!attrs
.unspecified
) {
2599 /* We have explicit Source ID */
2600 sid
= attrs
.requester_id
;
2603 ret
= vtd_interrupt_remap_msi(opaque
, &from
, &to
, sid
);
2605 /* TODO: report error */
2606 VTD_DPRINTF(GENERAL
, "int remap fail for addr 0x%"PRIx64
2607 " data 0x%"PRIx32
, from
.address
, from
.data
);
2608 /* Drop this interrupt */
2612 VTD_DPRINTF(IR
, "delivering MSI 0x%"PRIx64
":0x%"PRIx32
2613 " for device sid 0x%04x",
2614 to
.address
, to
.data
, sid
);
2616 apic_get_class()->send_msi(&to
);
2621 static const MemoryRegionOps vtd_mem_ir_ops
= {
2622 .read_with_attrs
= vtd_mem_ir_read
,
2623 .write_with_attrs
= vtd_mem_ir_write
,
2624 .endianness
= DEVICE_LITTLE_ENDIAN
,
2626 .min_access_size
= 4,
2627 .max_access_size
= 4,
2630 .min_access_size
= 4,
2631 .max_access_size
= 4,
2635 VTDAddressSpace
*vtd_find_add_as(IntelIOMMUState
*s
, PCIBus
*bus
, int devfn
)
2637 uintptr_t key
= (uintptr_t)bus
;
2638 VTDBus
*vtd_bus
= g_hash_table_lookup(s
->vtd_as_by_busptr
, &key
);
2639 VTDAddressSpace
*vtd_dev_as
;
2643 uintptr_t *new_key
= g_malloc(sizeof(*new_key
));
2644 *new_key
= (uintptr_t)bus
;
2645 /* No corresponding free() */
2646 vtd_bus
= g_malloc0(sizeof(VTDBus
) + sizeof(VTDAddressSpace
*) * \
2647 X86_IOMMU_PCI_DEVFN_MAX
);
2649 g_hash_table_insert(s
->vtd_as_by_busptr
, new_key
, vtd_bus
);
2652 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
2655 snprintf(name
, sizeof(name
), "intel_iommu_devfn_%d", devfn
);
2656 vtd_bus
->dev_as
[devfn
] = vtd_dev_as
= g_malloc0(sizeof(VTDAddressSpace
));
2658 vtd_dev_as
->bus
= bus
;
2659 vtd_dev_as
->devfn
= (uint8_t)devfn
;
2660 vtd_dev_as
->iommu_state
= s
;
2661 vtd_dev_as
->context_cache_entry
.context_cache_gen
= 0;
2664 * Memory region relationships looks like (Address range shows
2665 * only lower 32 bits to make it short in length...):
2667 * |-----------------+-------------------+----------|
2668 * | Name | Address range | Priority |
2669 * |-----------------+-------------------+----------+
2670 * | vtd_root | 00000000-ffffffff | 0 |
2671 * | intel_iommu | 00000000-ffffffff | 1 |
2672 * | vtd_sys_alias | 00000000-ffffffff | 1 |
2673 * | intel_iommu_ir | fee00000-feefffff | 64 |
2674 * |-----------------+-------------------+----------|
2676 * We enable/disable DMAR by switching enablement for
2677 * vtd_sys_alias and intel_iommu regions. IR region is always
2680 memory_region_init_iommu(&vtd_dev_as
->iommu
, OBJECT(s
),
2681 &s
->iommu_ops
, "intel_iommu_dmar",
2683 memory_region_init_alias(&vtd_dev_as
->sys_alias
, OBJECT(s
),
2684 "vtd_sys_alias", get_system_memory(),
2685 0, memory_region_size(get_system_memory()));
2686 memory_region_init_io(&vtd_dev_as
->iommu_ir
, OBJECT(s
),
2687 &vtd_mem_ir_ops
, s
, "intel_iommu_ir",
2688 VTD_INTERRUPT_ADDR_SIZE
);
2689 memory_region_init(&vtd_dev_as
->root
, OBJECT(s
),
2690 "vtd_root", UINT64_MAX
);
2691 memory_region_add_subregion_overlap(&vtd_dev_as
->root
,
2692 VTD_INTERRUPT_ADDR_FIRST
,
2693 &vtd_dev_as
->iommu_ir
, 64);
2694 address_space_init(&vtd_dev_as
->as
, &vtd_dev_as
->root
, name
);
2695 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2696 &vtd_dev_as
->sys_alias
, 1);
2697 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2698 &vtd_dev_as
->iommu
, 1);
2699 vtd_switch_address_space(vtd_dev_as
);
2704 /* Unmap the whole range in the notifier's scope. */
2705 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
)
2707 IOMMUTLBEntry entry
;
2709 hwaddr start
= n
->start
;
2710 hwaddr end
= n
->end
;
2713 * Note: all the codes in this function has a assumption that IOVA
2714 * bits are no more than VTD_MGAW bits (which is restricted by
2715 * VT-d spec), otherwise we need to consider overflow of 64 bits.
2718 if (end
> VTD_ADDRESS_SIZE
) {
2720 * Don't need to unmap regions that is bigger than the whole
2721 * VT-d supported address space size
2723 end
= VTD_ADDRESS_SIZE
;
2726 assert(start
<= end
);
2729 if (ctpop64(size
) != 1) {
2731 * This size cannot format a correct mask. Let's enlarge it to
2732 * suite the minimum available mask.
2734 int n
= 64 - clz64(size
);
2736 /* should not happen, but in case it happens, limit it */
2742 entry
.target_as
= &address_space_memory
;
2743 /* Adjust iova for the size */
2744 entry
.iova
= n
->start
& ~(size
- 1);
2745 /* This field is meaningless for unmap */
2746 entry
.translated_addr
= 0;
2747 entry
.perm
= IOMMU_NONE
;
2748 entry
.addr_mask
= size
- 1;
2750 trace_vtd_as_unmap_whole(pci_bus_num(as
->bus
),
2751 VTD_PCI_SLOT(as
->devfn
),
2752 VTD_PCI_FUNC(as
->devfn
),
2755 memory_region_notify_one(n
, &entry
);
2758 static void vtd_address_space_unmap_all(IntelIOMMUState
*s
)
2760 IntelIOMMUNotifierNode
*node
;
2761 VTDAddressSpace
*vtd_as
;
2764 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
2765 vtd_as
= node
->vtd_as
;
2766 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
2767 vtd_address_space_unmap(vtd_as
, n
);
2772 static int vtd_replay_hook(IOMMUTLBEntry
*entry
, void *private)
2774 memory_region_notify_one((IOMMUNotifier
*)private, entry
);
2778 static void vtd_iommu_replay(MemoryRegion
*mr
, IOMMUNotifier
*n
)
2780 VTDAddressSpace
*vtd_as
= container_of(mr
, VTDAddressSpace
, iommu
);
2781 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2782 uint8_t bus_n
= pci_bus_num(vtd_as
->bus
);
2786 * The replay can be triggered by either a invalidation or a newly
2787 * created entry. No matter what, we release existing mappings
2788 * (it means flushing caches for UNMAP-only registers).
2790 vtd_address_space_unmap(vtd_as
, n
);
2792 if (vtd_dev_to_context_entry(s
, bus_n
, vtd_as
->devfn
, &ce
) == 0) {
2793 trace_vtd_replay_ce_valid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
2794 PCI_FUNC(vtd_as
->devfn
),
2795 VTD_CONTEXT_ENTRY_DID(ce
.hi
),
2797 vtd_page_walk(&ce
, 0, ~0ULL, vtd_replay_hook
, (void *)n
, false);
2799 trace_vtd_replay_ce_invalid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
2800 PCI_FUNC(vtd_as
->devfn
));
2806 /* Do the initialization. It will also be called when reset, so pay
2807 * attention when adding new initialization stuff.
2809 static void vtd_init(IntelIOMMUState
*s
)
2811 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
2813 memset(s
->csr
, 0, DMAR_REG_SIZE
);
2814 memset(s
->wmask
, 0, DMAR_REG_SIZE
);
2815 memset(s
->w1cmask
, 0, DMAR_REG_SIZE
);
2816 memset(s
->womask
, 0, DMAR_REG_SIZE
);
2818 s
->iommu_ops
.translate
= vtd_iommu_translate
;
2819 s
->iommu_ops
.notify_flag_changed
= vtd_iommu_notify_flag_changed
;
2820 s
->iommu_ops
.replay
= vtd_iommu_replay
;
2822 s
->root_extended
= false;
2823 s
->dmar_enabled
= false;
2828 s
->qi_enabled
= false;
2829 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
2830 s
->next_frcd_reg
= 0;
2831 s
->cap
= VTD_CAP_FRO
| VTD_CAP_NFR
| VTD_CAP_ND
| VTD_CAP_MGAW
|
2832 VTD_CAP_SAGAW
| VTD_CAP_MAMV
| VTD_CAP_PSI
| VTD_CAP_SLLPS
;
2833 s
->ecap
= VTD_ECAP_QI
| VTD_ECAP_IRO
;
2835 if (x86_iommu
->intr_supported
) {
2836 s
->ecap
|= VTD_ECAP_IR
| VTD_ECAP_MHMV
;
2837 if (s
->intr_eim
== ON_OFF_AUTO_ON
) {
2838 s
->ecap
|= VTD_ECAP_EIM
;
2840 assert(s
->intr_eim
!= ON_OFF_AUTO_AUTO
);
2843 if (x86_iommu
->dt_supported
) {
2844 s
->ecap
|= VTD_ECAP_DT
;
2847 if (s
->caching_mode
) {
2848 s
->cap
|= VTD_CAP_CM
;
2851 vtd_reset_context_cache(s
);
2854 /* Define registers with default values and bit semantics */
2855 vtd_define_long(s
, DMAR_VER_REG
, 0x10UL
, 0, 0);
2856 vtd_define_quad(s
, DMAR_CAP_REG
, s
->cap
, 0, 0);
2857 vtd_define_quad(s
, DMAR_ECAP_REG
, s
->ecap
, 0, 0);
2858 vtd_define_long(s
, DMAR_GCMD_REG
, 0, 0xff800000UL
, 0);
2859 vtd_define_long_wo(s
, DMAR_GCMD_REG
, 0xff800000UL
);
2860 vtd_define_long(s
, DMAR_GSTS_REG
, 0, 0, 0);
2861 vtd_define_quad(s
, DMAR_RTADDR_REG
, 0, 0xfffffffffffff000ULL
, 0);
2862 vtd_define_quad(s
, DMAR_CCMD_REG
, 0, 0xe0000003ffffffffULL
, 0);
2863 vtd_define_quad_wo(s
, DMAR_CCMD_REG
, 0x3ffff0000ULL
);
2865 /* Advanced Fault Logging not supported */
2866 vtd_define_long(s
, DMAR_FSTS_REG
, 0, 0, 0x11UL
);
2867 vtd_define_long(s
, DMAR_FECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
2868 vtd_define_long(s
, DMAR_FEDATA_REG
, 0, 0x0000ffffUL
, 0);
2869 vtd_define_long(s
, DMAR_FEADDR_REG
, 0, 0xfffffffcUL
, 0);
2871 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
2872 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
2874 vtd_define_long(s
, DMAR_FEUADDR_REG
, 0, 0, 0);
2876 /* Treated as RO for implementations that PLMR and PHMR fields reported
2877 * as Clear in the CAP_REG.
2878 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
2880 vtd_define_long(s
, DMAR_PMEN_REG
, 0, 0, 0);
2882 vtd_define_quad(s
, DMAR_IQH_REG
, 0, 0, 0);
2883 vtd_define_quad(s
, DMAR_IQT_REG
, 0, 0x7fff0ULL
, 0);
2884 vtd_define_quad(s
, DMAR_IQA_REG
, 0, 0xfffffffffffff007ULL
, 0);
2885 vtd_define_long(s
, DMAR_ICS_REG
, 0, 0, 0x1UL
);
2886 vtd_define_long(s
, DMAR_IECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
2887 vtd_define_long(s
, DMAR_IEDATA_REG
, 0, 0xffffffffUL
, 0);
2888 vtd_define_long(s
, DMAR_IEADDR_REG
, 0, 0xfffffffcUL
, 0);
2889 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
2890 vtd_define_long(s
, DMAR_IEUADDR_REG
, 0, 0, 0);
2892 /* IOTLB registers */
2893 vtd_define_quad(s
, DMAR_IOTLB_REG
, 0, 0Xb003ffff00000000ULL
, 0);
2894 vtd_define_quad(s
, DMAR_IVA_REG
, 0, 0xfffffffffffff07fULL
, 0);
2895 vtd_define_quad_wo(s
, DMAR_IVA_REG
, 0xfffffffffffff07fULL
);
2897 /* Fault Recording Registers, 128-bit */
2898 vtd_define_quad(s
, DMAR_FRCD_REG_0_0
, 0, 0, 0);
2899 vtd_define_quad(s
, DMAR_FRCD_REG_0_2
, 0, 0, 0x8000000000000000ULL
);
2902 * Interrupt remapping registers.
2904 vtd_define_quad(s
, DMAR_IRTA_REG
, 0, 0xfffffffffffff80fULL
, 0);
2907 /* Should not reset address_spaces when reset because devices will still use
2908 * the address space they got at first (won't ask the bus again).
2910 static void vtd_reset(DeviceState
*dev
)
2912 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
2914 VTD_DPRINTF(GENERAL
, "");
2918 * When device reset, throw away all mappings and external caches
2920 vtd_address_space_unmap_all(s
);
2923 static AddressSpace
*vtd_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
2925 IntelIOMMUState
*s
= opaque
;
2926 VTDAddressSpace
*vtd_as
;
2928 assert(0 <= devfn
&& devfn
< X86_IOMMU_PCI_DEVFN_MAX
);
2930 vtd_as
= vtd_find_add_as(s
, bus
, devfn
);
2934 static bool vtd_decide_config(IntelIOMMUState
*s
, Error
**errp
)
2936 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
2938 /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
2939 if (x86_iommu
->intr_supported
&& kvm_irqchip_in_kernel() &&
2940 !kvm_irqchip_is_split()) {
2941 error_setg(errp
, "Intel Interrupt Remapping cannot work with "
2942 "kernel-irqchip=on, please use 'split|off'.");
2945 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !x86_iommu
->intr_supported
) {
2946 error_setg(errp
, "eim=on cannot be selected without intremap=on");
2950 if (s
->intr_eim
== ON_OFF_AUTO_AUTO
) {
2951 s
->intr_eim
= (kvm_irqchip_in_kernel() || s
->buggy_eim
)
2952 && x86_iommu
->intr_supported
?
2953 ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
2955 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !s
->buggy_eim
) {
2956 if (!kvm_irqchip_in_kernel()) {
2957 error_setg(errp
, "eim=on requires accel=kvm,kernel-irqchip=split");
2960 if (!kvm_enable_x2apic()) {
2961 error_setg(errp
, "eim=on requires support on the KVM side"
2962 "(X2APIC_API, first shipped in v4.7)");
2970 static void vtd_realize(DeviceState
*dev
, Error
**errp
)
2972 PCMachineState
*pcms
= PC_MACHINE(qdev_get_machine());
2973 PCIBus
*bus
= pcms
->bus
;
2974 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
2975 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(dev
);
2977 VTD_DPRINTF(GENERAL
, "");
2978 x86_iommu
->type
= TYPE_INTEL
;
2980 if (!vtd_decide_config(s
, errp
)) {
2984 QLIST_INIT(&s
->notifiers_list
);
2985 memset(s
->vtd_as_by_bus_num
, 0, sizeof(s
->vtd_as_by_bus_num
));
2986 memory_region_init_io(&s
->csrmem
, OBJECT(s
), &vtd_mem_ops
, s
,
2987 "intel_iommu", DMAR_REG_SIZE
);
2988 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->csrmem
);
2989 /* No corresponding destroy */
2990 s
->iotlb
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
2992 s
->vtd_as_by_busptr
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
2995 sysbus_mmio_map(SYS_BUS_DEVICE(s
), 0, Q35_HOST_BRIDGE_IOMMU_ADDR
);
2996 pci_setup_iommu(bus
, vtd_host_dma_iommu
, dev
);
2997 /* Pseudo address space under root PCI bus. */
2998 pcms
->ioapic_as
= vtd_host_dma_iommu(bus
, s
, Q35_PSEUDO_DEVFN_IOAPIC
);
3001 static void vtd_class_init(ObjectClass
*klass
, void *data
)
3003 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3004 X86IOMMUClass
*x86_class
= X86_IOMMU_CLASS(klass
);
3006 dc
->reset
= vtd_reset
;
3007 dc
->vmsd
= &vtd_vmstate
;
3008 dc
->props
= vtd_properties
;
3009 dc
->hotpluggable
= false;
3010 x86_class
->realize
= vtd_realize
;
3011 x86_class
->int_remap
= vtd_int_remap
;
3012 /* Supported by the pc-q35-* machine types */
3013 dc
->user_creatable
= true;
3016 static const TypeInfo vtd_info
= {
3017 .name
= TYPE_INTEL_IOMMU_DEVICE
,
3018 .parent
= TYPE_X86_IOMMU_DEVICE
,
3019 .instance_size
= sizeof(IntelIOMMUState
),
3020 .class_init
= vtd_class_init
,
3023 static void vtd_register_types(void)
3025 VTD_DPRINTF(GENERAL
, "");
3026 type_register_static(&vtd_info
);
3029 type_init(vtd_register_types
)