2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
22 #include "qemu/osdep.h"
23 #include "qemu/error-report.h"
24 #include "qapi/error.h"
25 #include "hw/sysbus.h"
26 #include "exec/address-spaces.h"
27 #include "intel_iommu_internal.h"
28 #include "hw/pci/pci.h"
29 #include "hw/pci/pci_bus.h"
30 #include "hw/i386/pc.h"
31 #include "hw/i386/apic-msidef.h"
32 #include "hw/boards.h"
33 #include "hw/i386/x86-iommu.h"
34 #include "hw/pci-host/q35.h"
35 #include "sysemu/kvm.h"
36 #include "hw/i386/apic_internal.h"
40 static void vtd_define_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
,
41 uint64_t wmask
, uint64_t w1cmask
)
43 stq_le_p(&s
->csr
[addr
], val
);
44 stq_le_p(&s
->wmask
[addr
], wmask
);
45 stq_le_p(&s
->w1cmask
[addr
], w1cmask
);
48 static void vtd_define_quad_wo(IntelIOMMUState
*s
, hwaddr addr
, uint64_t mask
)
50 stq_le_p(&s
->womask
[addr
], mask
);
53 static void vtd_define_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
,
54 uint32_t wmask
, uint32_t w1cmask
)
56 stl_le_p(&s
->csr
[addr
], val
);
57 stl_le_p(&s
->wmask
[addr
], wmask
);
58 stl_le_p(&s
->w1cmask
[addr
], w1cmask
);
61 static void vtd_define_long_wo(IntelIOMMUState
*s
, hwaddr addr
, uint32_t mask
)
63 stl_le_p(&s
->womask
[addr
], mask
);
66 /* "External" get/set operations */
67 static void vtd_set_quad(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
69 uint64_t oldval
= ldq_le_p(&s
->csr
[addr
]);
70 uint64_t wmask
= ldq_le_p(&s
->wmask
[addr
]);
71 uint64_t w1cmask
= ldq_le_p(&s
->w1cmask
[addr
]);
72 stq_le_p(&s
->csr
[addr
],
73 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
76 static void vtd_set_long(IntelIOMMUState
*s
, hwaddr addr
, uint32_t val
)
78 uint32_t oldval
= ldl_le_p(&s
->csr
[addr
]);
79 uint32_t wmask
= ldl_le_p(&s
->wmask
[addr
]);
80 uint32_t w1cmask
= ldl_le_p(&s
->w1cmask
[addr
]);
81 stl_le_p(&s
->csr
[addr
],
82 ((oldval
& ~wmask
) | (val
& wmask
)) & ~(w1cmask
& val
));
85 static uint64_t vtd_get_quad(IntelIOMMUState
*s
, hwaddr addr
)
87 uint64_t val
= ldq_le_p(&s
->csr
[addr
]);
88 uint64_t womask
= ldq_le_p(&s
->womask
[addr
]);
92 static uint32_t vtd_get_long(IntelIOMMUState
*s
, hwaddr addr
)
94 uint32_t val
= ldl_le_p(&s
->csr
[addr
]);
95 uint32_t womask
= ldl_le_p(&s
->womask
[addr
]);
99 /* "Internal" get/set operations */
100 static uint64_t vtd_get_quad_raw(IntelIOMMUState
*s
, hwaddr addr
)
102 return ldq_le_p(&s
->csr
[addr
]);
105 static uint32_t vtd_get_long_raw(IntelIOMMUState
*s
, hwaddr addr
)
107 return ldl_le_p(&s
->csr
[addr
]);
110 static void vtd_set_quad_raw(IntelIOMMUState
*s
, hwaddr addr
, uint64_t val
)
112 stq_le_p(&s
->csr
[addr
], val
);
115 static uint32_t vtd_set_clear_mask_long(IntelIOMMUState
*s
, hwaddr addr
,
116 uint32_t clear
, uint32_t mask
)
118 uint32_t new_val
= (ldl_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
119 stl_le_p(&s
->csr
[addr
], new_val
);
123 static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState
*s
, hwaddr addr
,
124 uint64_t clear
, uint64_t mask
)
126 uint64_t new_val
= (ldq_le_p(&s
->csr
[addr
]) & ~clear
) | mask
;
127 stq_le_p(&s
->csr
[addr
], new_val
);
131 /* GHashTable functions */
132 static gboolean
vtd_uint64_equal(gconstpointer v1
, gconstpointer v2
)
134 return *((const uint64_t *)v1
) == *((const uint64_t *)v2
);
137 static guint
vtd_uint64_hash(gconstpointer v
)
139 return (guint
)*(const uint64_t *)v
;
142 static gboolean
vtd_hash_remove_by_domain(gpointer key
, gpointer value
,
145 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
146 uint16_t domain_id
= *(uint16_t *)user_data
;
147 return entry
->domain_id
== domain_id
;
150 /* The shift of an addr for a certain level of paging structure */
151 static inline uint32_t vtd_slpt_level_shift(uint32_t level
)
154 return VTD_PAGE_SHIFT_4K
+ (level
- 1) * VTD_SL_LEVEL_BITS
;
157 static inline uint64_t vtd_slpt_level_page_mask(uint32_t level
)
159 return ~((1ULL << vtd_slpt_level_shift(level
)) - 1);
162 static gboolean
vtd_hash_remove_by_page(gpointer key
, gpointer value
,
165 VTDIOTLBEntry
*entry
= (VTDIOTLBEntry
*)value
;
166 VTDIOTLBPageInvInfo
*info
= (VTDIOTLBPageInvInfo
*)user_data
;
167 uint64_t gfn
= (info
->addr
>> VTD_PAGE_SHIFT_4K
) & info
->mask
;
168 uint64_t gfn_tlb
= (info
->addr
& entry
->mask
) >> VTD_PAGE_SHIFT_4K
;
169 return (entry
->domain_id
== info
->domain_id
) &&
170 (((entry
->gfn
& info
->mask
) == gfn
) ||
171 (entry
->gfn
== gfn_tlb
));
174 /* Reset all the gen of VTDAddressSpace to zero and set the gen of
175 * IntelIOMMUState to 1.
177 static void vtd_reset_context_cache(IntelIOMMUState
*s
)
179 VTDAddressSpace
*vtd_as
;
181 GHashTableIter bus_it
;
184 trace_vtd_context_cache_reset();
186 g_hash_table_iter_init(&bus_it
, s
->vtd_as_by_busptr
);
188 while (g_hash_table_iter_next (&bus_it
, NULL
, (void**)&vtd_bus
)) {
189 for (devfn_it
= 0; devfn_it
< PCI_DEVFN_MAX
; ++devfn_it
) {
190 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
194 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
197 s
->context_cache_gen
= 1;
200 static void vtd_reset_iotlb(IntelIOMMUState
*s
)
203 g_hash_table_remove_all(s
->iotlb
);
206 static uint64_t vtd_get_iotlb_key(uint64_t gfn
, uint16_t source_id
,
209 return gfn
| ((uint64_t)(source_id
) << VTD_IOTLB_SID_SHIFT
) |
210 ((uint64_t)(level
) << VTD_IOTLB_LVL_SHIFT
);
213 static uint64_t vtd_get_iotlb_gfn(hwaddr addr
, uint32_t level
)
215 return (addr
& vtd_slpt_level_page_mask(level
)) >> VTD_PAGE_SHIFT_4K
;
218 static VTDIOTLBEntry
*vtd_lookup_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
221 VTDIOTLBEntry
*entry
;
225 for (level
= VTD_SL_PT_LEVEL
; level
< VTD_SL_PML4_LEVEL
; level
++) {
226 key
= vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr
, level
),
228 entry
= g_hash_table_lookup(s
->iotlb
, &key
);
238 static void vtd_update_iotlb(IntelIOMMUState
*s
, uint16_t source_id
,
239 uint16_t domain_id
, hwaddr addr
, uint64_t slpte
,
240 uint8_t access_flags
, uint32_t level
)
242 VTDIOTLBEntry
*entry
= g_malloc(sizeof(*entry
));
243 uint64_t *key
= g_malloc(sizeof(*key
));
244 uint64_t gfn
= vtd_get_iotlb_gfn(addr
, level
);
246 trace_vtd_iotlb_page_update(source_id
, addr
, slpte
, domain_id
);
247 if (g_hash_table_size(s
->iotlb
) >= VTD_IOTLB_MAX_SIZE
) {
248 trace_vtd_iotlb_reset("iotlb exceeds size limit");
253 entry
->domain_id
= domain_id
;
254 entry
->slpte
= slpte
;
255 entry
->access_flags
= access_flags
;
256 entry
->mask
= vtd_slpt_level_page_mask(level
);
257 *key
= vtd_get_iotlb_key(gfn
, source_id
, level
);
258 g_hash_table_replace(s
->iotlb
, key
, entry
);
261 /* Given the reg addr of both the message data and address, generate an
264 static void vtd_generate_interrupt(IntelIOMMUState
*s
, hwaddr mesg_addr_reg
,
265 hwaddr mesg_data_reg
)
269 assert(mesg_data_reg
< DMAR_REG_SIZE
);
270 assert(mesg_addr_reg
< DMAR_REG_SIZE
);
272 msi
.address
= vtd_get_long_raw(s
, mesg_addr_reg
);
273 msi
.data
= vtd_get_long_raw(s
, mesg_data_reg
);
275 trace_vtd_irq_generate(msi
.address
, msi
.data
);
277 apic_get_class()->send_msi(&msi
);
280 /* Generate a fault event to software via MSI if conditions are met.
281 * Notice that the value of FSTS_REG being passed to it should be the one
284 static void vtd_generate_fault_event(IntelIOMMUState
*s
, uint32_t pre_fsts
)
286 if (pre_fsts
& VTD_FSTS_PPF
|| pre_fsts
& VTD_FSTS_PFO
||
287 pre_fsts
& VTD_FSTS_IQE
) {
288 trace_vtd_err("There are previous interrupt conditions "
289 "to be serviced by software, fault event "
290 "is not generated.");
293 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, 0, VTD_FECTL_IP
);
294 if (vtd_get_long_raw(s
, DMAR_FECTL_REG
) & VTD_FECTL_IM
) {
295 trace_vtd_err("Interrupt Mask set, irq is not generated.");
297 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
298 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
302 /* Check if the Fault (F) field of the Fault Recording Register referenced by
305 static bool vtd_is_frcd_set(IntelIOMMUState
*s
, uint16_t index
)
307 /* Each reg is 128-bit */
308 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
309 addr
+= 8; /* Access the high 64-bit half */
311 assert(index
< DMAR_FRCD_REG_NR
);
313 return vtd_get_quad_raw(s
, addr
) & VTD_FRCD_F
;
316 /* Update the PPF field of Fault Status Register.
317 * Should be called whenever change the F field of any fault recording
320 static void vtd_update_fsts_ppf(IntelIOMMUState
*s
)
323 uint32_t ppf_mask
= 0;
325 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
326 if (vtd_is_frcd_set(s
, i
)) {
327 ppf_mask
= VTD_FSTS_PPF
;
331 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_PPF
, ppf_mask
);
332 trace_vtd_fsts_ppf(!!ppf_mask
);
335 static void vtd_set_frcd_and_update_ppf(IntelIOMMUState
*s
, uint16_t index
)
337 /* Each reg is 128-bit */
338 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
339 addr
+= 8; /* Access the high 64-bit half */
341 assert(index
< DMAR_FRCD_REG_NR
);
343 vtd_set_clear_mask_quad(s
, addr
, 0, VTD_FRCD_F
);
344 vtd_update_fsts_ppf(s
);
347 /* Must not update F field now, should be done later */
348 static void vtd_record_frcd(IntelIOMMUState
*s
, uint16_t index
,
349 uint16_t source_id
, hwaddr addr
,
350 VTDFaultReason fault
, bool is_write
)
353 hwaddr frcd_reg_addr
= DMAR_FRCD_REG_OFFSET
+ (((uint64_t)index
) << 4);
355 assert(index
< DMAR_FRCD_REG_NR
);
357 lo
= VTD_FRCD_FI(addr
);
358 hi
= VTD_FRCD_SID(source_id
) | VTD_FRCD_FR(fault
);
362 vtd_set_quad_raw(s
, frcd_reg_addr
, lo
);
363 vtd_set_quad_raw(s
, frcd_reg_addr
+ 8, hi
);
365 trace_vtd_frr_new(index
, hi
, lo
);
368 /* Try to collapse multiple pending faults from the same requester */
369 static bool vtd_try_collapse_fault(IntelIOMMUState
*s
, uint16_t source_id
)
373 hwaddr addr
= DMAR_FRCD_REG_OFFSET
+ 8; /* The high 64-bit half */
375 for (i
= 0; i
< DMAR_FRCD_REG_NR
; i
++) {
376 frcd_reg
= vtd_get_quad_raw(s
, addr
);
377 if ((frcd_reg
& VTD_FRCD_F
) &&
378 ((frcd_reg
& VTD_FRCD_SID_MASK
) == source_id
)) {
381 addr
+= 16; /* 128-bit for each */
386 /* Log and report an DMAR (address translation) fault to software */
387 static void vtd_report_dmar_fault(IntelIOMMUState
*s
, uint16_t source_id
,
388 hwaddr addr
, VTDFaultReason fault
,
391 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
393 assert(fault
< VTD_FR_MAX
);
395 if (fault
== VTD_FR_RESERVED_ERR
) {
396 /* This is not a normal fault reason case. Drop it. */
400 trace_vtd_dmar_fault(source_id
, fault
, addr
, is_write
);
402 if (fsts_reg
& VTD_FSTS_PFO
) {
403 trace_vtd_err("New fault is not recorded due to "
404 "Primary Fault Overflow.");
408 if (vtd_try_collapse_fault(s
, source_id
)) {
409 trace_vtd_err("New fault is not recorded due to "
410 "compression of faults.");
414 if (vtd_is_frcd_set(s
, s
->next_frcd_reg
)) {
415 trace_vtd_err("Next Fault Recording Reg is used, "
416 "new fault is not recorded, set PFO field.");
417 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_PFO
);
421 vtd_record_frcd(s
, s
->next_frcd_reg
, source_id
, addr
, fault
, is_write
);
423 if (fsts_reg
& VTD_FSTS_PPF
) {
424 trace_vtd_err("There are pending faults already, "
425 "fault event is not generated.");
426 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
);
428 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
429 s
->next_frcd_reg
= 0;
432 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, VTD_FSTS_FRI_MASK
,
433 VTD_FSTS_FRI(s
->next_frcd_reg
));
434 vtd_set_frcd_and_update_ppf(s
, s
->next_frcd_reg
); /* Will set PPF */
436 if (s
->next_frcd_reg
== DMAR_FRCD_REG_NR
) {
437 s
->next_frcd_reg
= 0;
439 /* This case actually cause the PPF to be Set.
440 * So generate fault event (interrupt).
442 vtd_generate_fault_event(s
, fsts_reg
);
446 /* Handle Invalidation Queue Errors of queued invalidation interface error
449 static void vtd_handle_inv_queue_error(IntelIOMMUState
*s
)
451 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
453 vtd_set_clear_mask_long(s
, DMAR_FSTS_REG
, 0, VTD_FSTS_IQE
);
454 vtd_generate_fault_event(s
, fsts_reg
);
457 /* Set the IWC field and try to generate an invalidation completion interrupt */
458 static void vtd_generate_completion_event(IntelIOMMUState
*s
)
460 if (vtd_get_long_raw(s
, DMAR_ICS_REG
) & VTD_ICS_IWC
) {
461 trace_vtd_inv_desc_wait_irq("One pending, skip current");
464 vtd_set_clear_mask_long(s
, DMAR_ICS_REG
, 0, VTD_ICS_IWC
);
465 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, 0, VTD_IECTL_IP
);
466 if (vtd_get_long_raw(s
, DMAR_IECTL_REG
) & VTD_IECTL_IM
) {
467 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
468 "new event not generated");
471 /* Generate the interrupt event */
472 trace_vtd_inv_desc_wait_irq("Generating complete event");
473 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
474 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
478 static inline bool vtd_root_entry_present(VTDRootEntry
*root
)
480 return root
->val
& VTD_ROOT_ENTRY_P
;
483 static int vtd_get_root_entry(IntelIOMMUState
*s
, uint8_t index
,
488 addr
= s
->root
+ index
* sizeof(*re
);
489 if (dma_memory_read(&address_space_memory
, addr
, re
, sizeof(*re
))) {
490 trace_vtd_re_invalid(re
->rsvd
, re
->val
);
492 return -VTD_FR_ROOT_TABLE_INV
;
494 re
->val
= le64_to_cpu(re
->val
);
498 static inline bool vtd_ce_present(VTDContextEntry
*context
)
500 return context
->lo
& VTD_CONTEXT_ENTRY_P
;
503 static int vtd_get_context_entry_from_root(VTDRootEntry
*root
, uint8_t index
,
508 /* we have checked that root entry is present */
509 addr
= (root
->val
& VTD_ROOT_ENTRY_CTP
) + index
* sizeof(*ce
);
510 if (dma_memory_read(&address_space_memory
, addr
, ce
, sizeof(*ce
))) {
511 trace_vtd_re_invalid(root
->rsvd
, root
->val
);
512 return -VTD_FR_CONTEXT_TABLE_INV
;
514 ce
->lo
= le64_to_cpu(ce
->lo
);
515 ce
->hi
= le64_to_cpu(ce
->hi
);
519 static inline dma_addr_t
vtd_ce_get_slpt_base(VTDContextEntry
*ce
)
521 return ce
->lo
& VTD_CONTEXT_ENTRY_SLPTPTR
;
524 static inline uint64_t vtd_get_slpte_addr(uint64_t slpte
)
526 return slpte
& VTD_SL_PT_BASE_ADDR_MASK(VTD_HOST_ADDRESS_WIDTH
);
529 /* Whether the pte indicates the address of the page frame */
530 static inline bool vtd_is_last_slpte(uint64_t slpte
, uint32_t level
)
532 return level
== VTD_SL_PT_LEVEL
|| (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
);
535 /* Get the content of a spte located in @base_addr[@index] */
536 static uint64_t vtd_get_slpte(dma_addr_t base_addr
, uint32_t index
)
540 assert(index
< VTD_SL_PT_ENTRY_NR
);
542 if (dma_memory_read(&address_space_memory
,
543 base_addr
+ index
* sizeof(slpte
), &slpte
,
545 slpte
= (uint64_t)-1;
548 slpte
= le64_to_cpu(slpte
);
552 /* Given an iova and the level of paging structure, return the offset
555 static inline uint32_t vtd_iova_level_offset(uint64_t iova
, uint32_t level
)
557 return (iova
>> vtd_slpt_level_shift(level
)) &
558 ((1ULL << VTD_SL_LEVEL_BITS
) - 1);
561 /* Check Capability Register to see if the @level of page-table is supported */
562 static inline bool vtd_is_level_supported(IntelIOMMUState
*s
, uint32_t level
)
564 return VTD_CAP_SAGAW_MASK
& s
->cap
&
565 (1ULL << (level
- 2 + VTD_CAP_SAGAW_SHIFT
));
568 /* Get the page-table level that hardware should use for the second-level
569 * page-table walk from the Address Width field of context-entry.
571 static inline uint32_t vtd_ce_get_level(VTDContextEntry
*ce
)
573 return 2 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
);
576 static inline uint32_t vtd_ce_get_agaw(VTDContextEntry
*ce
)
578 return 30 + (ce
->hi
& VTD_CONTEXT_ENTRY_AW
) * 9;
581 static inline uint32_t vtd_ce_get_type(VTDContextEntry
*ce
)
583 return ce
->lo
& VTD_CONTEXT_ENTRY_TT
;
586 /* Return true if check passed, otherwise false */
587 static inline bool vtd_ce_type_check(X86IOMMUState
*x86_iommu
,
590 switch (vtd_ce_get_type(ce
)) {
591 case VTD_CONTEXT_TT_MULTI_LEVEL
:
592 /* Always supported */
594 case VTD_CONTEXT_TT_DEV_IOTLB
:
595 if (!x86_iommu
->dt_supported
) {
599 case VTD_CONTEXT_TT_PASS_THROUGH
:
600 if (!x86_iommu
->pt_supported
) {
611 static inline uint64_t vtd_iova_limit(VTDContextEntry
*ce
)
613 uint32_t ce_agaw
= vtd_ce_get_agaw(ce
);
614 return 1ULL << MIN(ce_agaw
, VTD_MGAW
);
617 /* Return true if IOVA passes range check, otherwise false. */
618 static inline bool vtd_iova_range_check(uint64_t iova
, VTDContextEntry
*ce
)
621 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
622 * in CAP_REG and AW in context-entry.
624 return !(iova
& ~(vtd_iova_limit(ce
) - 1));
628 * Rsvd field masks for spte:
629 * Index [1] to [4] 4k pages
630 * Index [5] to [8] large pages
632 static uint64_t vtd_paging_entry_rsvd_field
[9];
634 static bool vtd_slpte_nonzero_rsvd(uint64_t slpte
, uint32_t level
)
636 if (slpte
& VTD_SL_PT_PAGE_SIZE_MASK
) {
637 /* Maybe large page */
638 return slpte
& vtd_paging_entry_rsvd_field
[level
+ 4];
640 return slpte
& vtd_paging_entry_rsvd_field
[level
];
644 /* Find the VTD address space associated with a given bus number */
645 static VTDBus
*vtd_find_as_from_bus_num(IntelIOMMUState
*s
, uint8_t bus_num
)
647 VTDBus
*vtd_bus
= s
->vtd_as_by_bus_num
[bus_num
];
650 * Iterate over the registered buses to find the one which
651 * currently hold this bus number, and update the bus_num
656 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
657 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
658 if (pci_bus_num(vtd_bus
->bus
) == bus_num
) {
659 s
->vtd_as_by_bus_num
[bus_num
] = vtd_bus
;
667 /* Given the @iova, get relevant @slptep. @slpte_level will be the last level
668 * of the translation, can be used for deciding the size of large page.
670 static int vtd_iova_to_slpte(VTDContextEntry
*ce
, uint64_t iova
, bool is_write
,
671 uint64_t *slptep
, uint32_t *slpte_level
,
672 bool *reads
, bool *writes
)
674 dma_addr_t addr
= vtd_ce_get_slpt_base(ce
);
675 uint32_t level
= vtd_ce_get_level(ce
);
678 uint64_t access_right_check
;
680 if (!vtd_iova_range_check(iova
, ce
)) {
681 trace_vtd_err_dmar_iova_overflow(iova
);
682 return -VTD_FR_ADDR_BEYOND_MGAW
;
685 /* FIXME: what is the Atomics request here? */
686 access_right_check
= is_write
? VTD_SL_W
: VTD_SL_R
;
689 offset
= vtd_iova_level_offset(iova
, level
);
690 slpte
= vtd_get_slpte(addr
, offset
);
692 if (slpte
== (uint64_t)-1) {
693 trace_vtd_err_dmar_slpte_read_error(iova
, level
);
694 if (level
== vtd_ce_get_level(ce
)) {
695 /* Invalid programming of context-entry */
696 return -VTD_FR_CONTEXT_ENTRY_INV
;
698 return -VTD_FR_PAGING_ENTRY_INV
;
701 *reads
= (*reads
) && (slpte
& VTD_SL_R
);
702 *writes
= (*writes
) && (slpte
& VTD_SL_W
);
703 if (!(slpte
& access_right_check
)) {
704 trace_vtd_err_dmar_slpte_perm_error(iova
, level
, slpte
, is_write
);
705 return is_write
? -VTD_FR_WRITE
: -VTD_FR_READ
;
707 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
708 trace_vtd_err_dmar_slpte_resv_error(iova
, level
, slpte
);
709 return -VTD_FR_PAGING_ENTRY_RSVD
;
712 if (vtd_is_last_slpte(slpte
, level
)) {
714 *slpte_level
= level
;
717 addr
= vtd_get_slpte_addr(slpte
);
722 typedef int (*vtd_page_walk_hook
)(IOMMUTLBEntry
*entry
, void *private);
725 * vtd_page_walk_level - walk over specific level for IOVA range
727 * @addr: base GPA addr to start the walk
728 * @start: IOVA range start address
729 * @end: IOVA range end address (start <= addr < end)
730 * @hook_fn: hook func to be called when detected page
731 * @private: private data to be passed into hook func
732 * @read: whether parent level has read permission
733 * @write: whether parent level has write permission
734 * @notify_unmap: whether we should notify invalid entries
736 static int vtd_page_walk_level(dma_addr_t addr
, uint64_t start
,
737 uint64_t end
, vtd_page_walk_hook hook_fn
,
738 void *private, uint32_t level
,
739 bool read
, bool write
, bool notify_unmap
)
741 bool read_cur
, write_cur
, entry_valid
;
744 uint64_t subpage_size
, subpage_mask
;
746 uint64_t iova
= start
;
750 trace_vtd_page_walk_level(addr
, level
, start
, end
);
752 subpage_size
= 1ULL << vtd_slpt_level_shift(level
);
753 subpage_mask
= vtd_slpt_level_page_mask(level
);
756 iova_next
= (iova
& subpage_mask
) + subpage_size
;
758 offset
= vtd_iova_level_offset(iova
, level
);
759 slpte
= vtd_get_slpte(addr
, offset
);
761 if (slpte
== (uint64_t)-1) {
762 trace_vtd_page_walk_skip_read(iova
, iova_next
);
766 if (vtd_slpte_nonzero_rsvd(slpte
, level
)) {
767 trace_vtd_page_walk_skip_reserve(iova
, iova_next
);
771 /* Permissions are stacked with parents' */
772 read_cur
= read
&& (slpte
& VTD_SL_R
);
773 write_cur
= write
&& (slpte
& VTD_SL_W
);
776 * As long as we have either read/write permission, this is a
777 * valid entry. The rule works for both page entries and page
780 entry_valid
= read_cur
| write_cur
;
782 if (vtd_is_last_slpte(slpte
, level
)) {
783 entry
.target_as
= &address_space_memory
;
784 entry
.iova
= iova
& subpage_mask
;
785 /* NOTE: this is only meaningful if entry_valid == true */
786 entry
.translated_addr
= vtd_get_slpte_addr(slpte
);
787 entry
.addr_mask
= ~subpage_mask
;
788 entry
.perm
= IOMMU_ACCESS_FLAG(read_cur
, write_cur
);
789 if (!entry_valid
&& !notify_unmap
) {
790 trace_vtd_page_walk_skip_perm(iova
, iova_next
);
793 trace_vtd_page_walk_one(level
, entry
.iova
, entry
.translated_addr
,
794 entry
.addr_mask
, entry
.perm
);
796 ret
= hook_fn(&entry
, private);
803 trace_vtd_page_walk_skip_perm(iova
, iova_next
);
806 ret
= vtd_page_walk_level(vtd_get_slpte_addr(slpte
), iova
,
807 MIN(iova_next
, end
), hook_fn
, private,
808 level
- 1, read_cur
, write_cur
,
823 * vtd_page_walk - walk specific IOVA range, and call the hook
825 * @ce: context entry to walk upon
826 * @start: IOVA address to start the walk
827 * @end: IOVA range end address (start <= addr < end)
828 * @hook_fn: the hook that to be called for each detected area
829 * @private: private data for the hook function
831 static int vtd_page_walk(VTDContextEntry
*ce
, uint64_t start
, uint64_t end
,
832 vtd_page_walk_hook hook_fn
, void *private,
835 dma_addr_t addr
= vtd_ce_get_slpt_base(ce
);
836 uint32_t level
= vtd_ce_get_level(ce
);
838 if (!vtd_iova_range_check(start
, ce
)) {
839 return -VTD_FR_ADDR_BEYOND_MGAW
;
842 if (!vtd_iova_range_check(end
, ce
)) {
843 /* Fix end so that it reaches the maximum */
844 end
= vtd_iova_limit(ce
);
847 return vtd_page_walk_level(addr
, start
, end
, hook_fn
, private,
848 level
, true, true, notify_unmap
);
851 /* Map a device to its corresponding domain (context-entry) */
852 static int vtd_dev_to_context_entry(IntelIOMMUState
*s
, uint8_t bus_num
,
853 uint8_t devfn
, VTDContextEntry
*ce
)
857 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
859 ret_fr
= vtd_get_root_entry(s
, bus_num
, &re
);
864 if (!vtd_root_entry_present(&re
)) {
865 /* Not error - it's okay we don't have root entry. */
866 trace_vtd_re_not_present(bus_num
);
867 return -VTD_FR_ROOT_ENTRY_P
;
870 if (re
.rsvd
|| (re
.val
& VTD_ROOT_ENTRY_RSVD(VTD_HOST_ADDRESS_WIDTH
))) {
871 trace_vtd_re_invalid(re
.rsvd
, re
.val
);
872 return -VTD_FR_ROOT_ENTRY_RSVD
;
875 ret_fr
= vtd_get_context_entry_from_root(&re
, devfn
, ce
);
880 if (!vtd_ce_present(ce
)) {
881 /* Not error - it's okay we don't have context entry. */
882 trace_vtd_ce_not_present(bus_num
, devfn
);
883 return -VTD_FR_CONTEXT_ENTRY_P
;
886 if ((ce
->hi
& VTD_CONTEXT_ENTRY_RSVD_HI
) ||
887 (ce
->lo
& VTD_CONTEXT_ENTRY_RSVD_LO(VTD_HOST_ADDRESS_WIDTH
))) {
888 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
889 return -VTD_FR_CONTEXT_ENTRY_RSVD
;
892 /* Check if the programming of context-entry is valid */
893 if (!vtd_is_level_supported(s
, vtd_ce_get_level(ce
))) {
894 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
895 return -VTD_FR_CONTEXT_ENTRY_INV
;
898 /* Do translation type check */
899 if (!vtd_ce_type_check(x86_iommu
, ce
)) {
900 trace_vtd_ce_invalid(ce
->hi
, ce
->lo
);
901 return -VTD_FR_CONTEXT_ENTRY_INV
;
908 * Fetch translation type for specific device. Returns <0 if error
909 * happens, otherwise return the shifted type to check against
912 static int vtd_dev_get_trans_type(VTDAddressSpace
*as
)
920 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(as
->bus
),
926 return vtd_ce_get_type(&ce
);
929 static bool vtd_dev_pt_enabled(VTDAddressSpace
*as
)
935 ret
= vtd_dev_get_trans_type(as
);
938 * Possibly failed to parse the context entry for some reason
939 * (e.g., during init, or any guest configuration errors on
940 * context entries). We should assume PT not enabled for
946 return ret
== VTD_CONTEXT_TT_PASS_THROUGH
;
949 /* Return whether the device is using IOMMU translation. */
950 static bool vtd_switch_address_space(VTDAddressSpace
*as
)
953 /* Whether we need to take the BQL on our own */
954 bool take_bql
= !qemu_mutex_iothread_locked();
958 use_iommu
= as
->iommu_state
->dmar_enabled
& !vtd_dev_pt_enabled(as
);
960 trace_vtd_switch_address_space(pci_bus_num(as
->bus
),
961 VTD_PCI_SLOT(as
->devfn
),
962 VTD_PCI_FUNC(as
->devfn
),
966 * It's possible that we reach here without BQL, e.g., when called
967 * from vtd_pt_enable_fast_path(). However the memory APIs need
968 * it. We'd better make sure we have had it already, or, take it.
971 qemu_mutex_lock_iothread();
974 /* Turn off first then on the other */
976 memory_region_set_enabled(&as
->sys_alias
, false);
977 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), true);
979 memory_region_set_enabled(MEMORY_REGION(&as
->iommu
), false);
980 memory_region_set_enabled(&as
->sys_alias
, true);
984 qemu_mutex_unlock_iothread();
990 static void vtd_switch_address_space_all(IntelIOMMUState
*s
)
996 g_hash_table_iter_init(&iter
, s
->vtd_as_by_busptr
);
997 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&vtd_bus
)) {
998 for (i
= 0; i
< PCI_DEVFN_MAX
; i
++) {
999 if (!vtd_bus
->dev_as
[i
]) {
1002 vtd_switch_address_space(vtd_bus
->dev_as
[i
]);
1007 static inline uint16_t vtd_make_source_id(uint8_t bus_num
, uint8_t devfn
)
1009 return ((bus_num
& 0xffUL
) << 8) | (devfn
& 0xffUL
);
1012 static const bool vtd_qualified_faults
[] = {
1013 [VTD_FR_RESERVED
] = false,
1014 [VTD_FR_ROOT_ENTRY_P
] = false,
1015 [VTD_FR_CONTEXT_ENTRY_P
] = true,
1016 [VTD_FR_CONTEXT_ENTRY_INV
] = true,
1017 [VTD_FR_ADDR_BEYOND_MGAW
] = true,
1018 [VTD_FR_WRITE
] = true,
1019 [VTD_FR_READ
] = true,
1020 [VTD_FR_PAGING_ENTRY_INV
] = true,
1021 [VTD_FR_ROOT_TABLE_INV
] = false,
1022 [VTD_FR_CONTEXT_TABLE_INV
] = false,
1023 [VTD_FR_ROOT_ENTRY_RSVD
] = false,
1024 [VTD_FR_PAGING_ENTRY_RSVD
] = true,
1025 [VTD_FR_CONTEXT_ENTRY_TT
] = true,
1026 [VTD_FR_RESERVED_ERR
] = false,
1027 [VTD_FR_MAX
] = false,
1030 /* To see if a fault condition is "qualified", which is reported to software
1031 * only if the FPD field in the context-entry used to process the faulting
1034 static inline bool vtd_is_qualified_fault(VTDFaultReason fault
)
1036 return vtd_qualified_faults
[fault
];
1039 static inline bool vtd_is_interrupt_addr(hwaddr addr
)
1041 return VTD_INTERRUPT_ADDR_FIRST
<= addr
&& addr
<= VTD_INTERRUPT_ADDR_LAST
;
1044 static void vtd_pt_enable_fast_path(IntelIOMMUState
*s
, uint16_t source_id
)
1047 VTDAddressSpace
*vtd_as
;
1048 bool success
= false;
1050 vtd_bus
= vtd_find_as_from_bus_num(s
, VTD_SID_TO_BUS(source_id
));
1055 vtd_as
= vtd_bus
->dev_as
[VTD_SID_TO_DEVFN(source_id
)];
1060 if (vtd_switch_address_space(vtd_as
) == false) {
1061 /* We switched off IOMMU region successfully. */
1066 trace_vtd_pt_enable_fast_path(source_id
, success
);
1069 /* Map dev to context-entry then do a paging-structures walk to do a iommu
1072 * Called from RCU critical section.
1074 * @bus_num: The bus number
1075 * @devfn: The devfn, which is the combined of device and function number
1076 * @is_write: The access is a write operation
1077 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
1079 * Returns true if translation is successful, otherwise false.
1081 static bool vtd_do_iommu_translate(VTDAddressSpace
*vtd_as
, PCIBus
*bus
,
1082 uint8_t devfn
, hwaddr addr
, bool is_write
,
1083 IOMMUTLBEntry
*entry
)
1085 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
1087 uint8_t bus_num
= pci_bus_num(bus
);
1088 VTDContextCacheEntry
*cc_entry
= &vtd_as
->context_cache_entry
;
1089 uint64_t slpte
, page_mask
;
1091 uint16_t source_id
= vtd_make_source_id(bus_num
, devfn
);
1093 bool is_fpd_set
= false;
1096 uint8_t access_flags
;
1097 VTDIOTLBEntry
*iotlb_entry
;
1100 * We have standalone memory region for interrupt addresses, we
1101 * should never receive translation requests in this region.
1103 assert(!vtd_is_interrupt_addr(addr
));
1105 /* Try to fetch slpte form IOTLB */
1106 iotlb_entry
= vtd_lookup_iotlb(s
, source_id
, addr
);
1108 trace_vtd_iotlb_page_hit(source_id
, addr
, iotlb_entry
->slpte
,
1109 iotlb_entry
->domain_id
);
1110 slpte
= iotlb_entry
->slpte
;
1111 access_flags
= iotlb_entry
->access_flags
;
1112 page_mask
= iotlb_entry
->mask
;
1116 /* Try to fetch context-entry from cache first */
1117 if (cc_entry
->context_cache_gen
== s
->context_cache_gen
) {
1118 trace_vtd_iotlb_cc_hit(bus_num
, devfn
, cc_entry
->context_entry
.hi
,
1119 cc_entry
->context_entry
.lo
,
1120 cc_entry
->context_cache_gen
);
1121 ce
= cc_entry
->context_entry
;
1122 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1124 ret_fr
= vtd_dev_to_context_entry(s
, bus_num
, devfn
, &ce
);
1125 is_fpd_set
= ce
.lo
& VTD_CONTEXT_ENTRY_FPD
;
1128 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
1129 trace_vtd_fault_disabled();
1131 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
1135 /* Update context-cache */
1136 trace_vtd_iotlb_cc_update(bus_num
, devfn
, ce
.hi
, ce
.lo
,
1137 cc_entry
->context_cache_gen
,
1138 s
->context_cache_gen
);
1139 cc_entry
->context_entry
= ce
;
1140 cc_entry
->context_cache_gen
= s
->context_cache_gen
;
1144 * We don't need to translate for pass-through context entries.
1145 * Also, let's ignore IOTLB caching as well for PT devices.
1147 if (vtd_ce_get_type(&ce
) == VTD_CONTEXT_TT_PASS_THROUGH
) {
1148 entry
->iova
= addr
& VTD_PAGE_MASK_4K
;
1149 entry
->translated_addr
= entry
->iova
;
1150 entry
->addr_mask
= ~VTD_PAGE_MASK_4K
;
1151 entry
->perm
= IOMMU_RW
;
1152 trace_vtd_translate_pt(source_id
, entry
->iova
);
1155 * When this happens, it means firstly caching-mode is not
1156 * enabled, and this is the first passthrough translation for
1157 * the device. Let's enable the fast path for passthrough.
1159 * When passthrough is disabled again for the device, we can
1160 * capture it via the context entry invalidation, then the
1161 * IOMMU region can be swapped back.
1163 vtd_pt_enable_fast_path(s
, source_id
);
1168 ret_fr
= vtd_iova_to_slpte(&ce
, addr
, is_write
, &slpte
, &level
,
1172 if (is_fpd_set
&& vtd_is_qualified_fault(ret_fr
)) {
1173 trace_vtd_fault_disabled();
1175 vtd_report_dmar_fault(s
, source_id
, addr
, ret_fr
, is_write
);
1180 page_mask
= vtd_slpt_level_page_mask(level
);
1181 access_flags
= IOMMU_ACCESS_FLAG(reads
, writes
);
1182 vtd_update_iotlb(s
, source_id
, VTD_CONTEXT_ENTRY_DID(ce
.hi
), addr
, slpte
,
1183 access_flags
, level
);
1185 entry
->iova
= addr
& page_mask
;
1186 entry
->translated_addr
= vtd_get_slpte_addr(slpte
) & page_mask
;
1187 entry
->addr_mask
= ~page_mask
;
1188 entry
->perm
= access_flags
;
1193 entry
->translated_addr
= 0;
1194 entry
->addr_mask
= 0;
1195 entry
->perm
= IOMMU_NONE
;
1199 static void vtd_root_table_setup(IntelIOMMUState
*s
)
1201 s
->root
= vtd_get_quad_raw(s
, DMAR_RTADDR_REG
);
1202 s
->root_extended
= s
->root
& VTD_RTADDR_RTT
;
1203 s
->root
&= VTD_RTADDR_ADDR_MASK(VTD_HOST_ADDRESS_WIDTH
);
1205 trace_vtd_reg_dmar_root(s
->root
, s
->root_extended
);
1208 static void vtd_iec_notify_all(IntelIOMMUState
*s
, bool global
,
1209 uint32_t index
, uint32_t mask
)
1211 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s
), global
, index
, mask
);
1214 static void vtd_interrupt_remap_table_setup(IntelIOMMUState
*s
)
1217 value
= vtd_get_quad_raw(s
, DMAR_IRTA_REG
);
1218 s
->intr_size
= 1UL << ((value
& VTD_IRTA_SIZE_MASK
) + 1);
1219 s
->intr_root
= value
& VTD_IRTA_ADDR_MASK(VTD_HOST_ADDRESS_WIDTH
);
1220 s
->intr_eime
= value
& VTD_IRTA_EIME
;
1222 /* Notify global invalidation */
1223 vtd_iec_notify_all(s
, true, 0, 0);
1225 trace_vtd_reg_ir_root(s
->intr_root
, s
->intr_size
);
1228 static void vtd_iommu_replay_all(IntelIOMMUState
*s
)
1230 IntelIOMMUNotifierNode
*node
;
1232 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
1233 memory_region_iommu_replay_all(&node
->vtd_as
->iommu
);
1237 static void vtd_context_global_invalidate(IntelIOMMUState
*s
)
1239 trace_vtd_inv_desc_cc_global();
1240 s
->context_cache_gen
++;
1241 if (s
->context_cache_gen
== VTD_CONTEXT_CACHE_GEN_MAX
) {
1242 vtd_reset_context_cache(s
);
1244 vtd_switch_address_space_all(s
);
1246 * From VT-d spec 6.5.2.1, a global context entry invalidation
1247 * should be followed by a IOTLB global invalidation, so we should
1248 * be safe even without this. Hoewever, let's replay the region as
1249 * well to be safer, and go back here when we need finer tunes for
1250 * VT-d emulation codes.
1252 vtd_iommu_replay_all(s
);
1255 /* Do a context-cache device-selective invalidation.
1256 * @func_mask: FM field after shifting
1258 static void vtd_context_device_invalidate(IntelIOMMUState
*s
,
1264 VTDAddressSpace
*vtd_as
;
1265 uint8_t bus_n
, devfn
;
1268 trace_vtd_inv_desc_cc_devices(source_id
, func_mask
);
1270 switch (func_mask
& 3) {
1272 mask
= 0; /* No bits in the SID field masked */
1275 mask
= 4; /* Mask bit 2 in the SID field */
1278 mask
= 6; /* Mask bit 2:1 in the SID field */
1281 mask
= 7; /* Mask bit 2:0 in the SID field */
1286 bus_n
= VTD_SID_TO_BUS(source_id
);
1287 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_n
);
1289 devfn
= VTD_SID_TO_DEVFN(source_id
);
1290 for (devfn_it
= 0; devfn_it
< PCI_DEVFN_MAX
; ++devfn_it
) {
1291 vtd_as
= vtd_bus
->dev_as
[devfn_it
];
1292 if (vtd_as
&& ((devfn_it
& mask
) == (devfn
& mask
))) {
1293 trace_vtd_inv_desc_cc_device(bus_n
, VTD_PCI_SLOT(devfn_it
),
1294 VTD_PCI_FUNC(devfn_it
));
1295 vtd_as
->context_cache_entry
.context_cache_gen
= 0;
1297 * Do switch address space when needed, in case if the
1298 * device passthrough bit is switched.
1300 vtd_switch_address_space(vtd_as
);
1302 * So a device is moving out of (or moving into) a
1303 * domain, a replay() suites here to notify all the
1304 * IOMMU_NOTIFIER_MAP registers about this change.
1305 * This won't bring bad even if we have no such
1306 * notifier registered - the IOMMU notification
1307 * framework will skip MAP notifications if that
1310 memory_region_iommu_replay_all(&vtd_as
->iommu
);
1316 /* Context-cache invalidation
1317 * Returns the Context Actual Invalidation Granularity.
1318 * @val: the content of the CCMD_REG
1320 static uint64_t vtd_context_cache_invalidate(IntelIOMMUState
*s
, uint64_t val
)
1323 uint64_t type
= val
& VTD_CCMD_CIRG_MASK
;
1326 case VTD_CCMD_DOMAIN_INVL
:
1328 case VTD_CCMD_GLOBAL_INVL
:
1329 caig
= VTD_CCMD_GLOBAL_INVL_A
;
1330 vtd_context_global_invalidate(s
);
1333 case VTD_CCMD_DEVICE_INVL
:
1334 caig
= VTD_CCMD_DEVICE_INVL_A
;
1335 vtd_context_device_invalidate(s
, VTD_CCMD_SID(val
), VTD_CCMD_FM(val
));
1339 trace_vtd_err("Context cache invalidate type error.");
1345 static void vtd_iotlb_global_invalidate(IntelIOMMUState
*s
)
1347 trace_vtd_inv_desc_iotlb_global();
1349 vtd_iommu_replay_all(s
);
1352 static void vtd_iotlb_domain_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
)
1354 IntelIOMMUNotifierNode
*node
;
1356 VTDAddressSpace
*vtd_as
;
1358 trace_vtd_inv_desc_iotlb_domain(domain_id
);
1360 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_domain
,
1363 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
1364 vtd_as
= node
->vtd_as
;
1365 if (!vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1366 vtd_as
->devfn
, &ce
) &&
1367 domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1368 memory_region_iommu_replay_all(&vtd_as
->iommu
);
1373 static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry
*entry
,
1376 memory_region_notify_iommu((IOMMUMemoryRegion
*)private, *entry
);
1380 static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState
*s
,
1381 uint16_t domain_id
, hwaddr addr
,
1384 IntelIOMMUNotifierNode
*node
;
1388 QLIST_FOREACH(node
, &(s
->notifiers_list
), next
) {
1389 VTDAddressSpace
*vtd_as
= node
->vtd_as
;
1390 ret
= vtd_dev_to_context_entry(s
, pci_bus_num(vtd_as
->bus
),
1391 vtd_as
->devfn
, &ce
);
1392 if (!ret
&& domain_id
== VTD_CONTEXT_ENTRY_DID(ce
.hi
)) {
1393 vtd_page_walk(&ce
, addr
, addr
+ (1 << am
) * VTD_PAGE_SIZE
,
1394 vtd_page_invalidate_notify_hook
,
1395 (void *)&vtd_as
->iommu
, true);
1400 static void vtd_iotlb_page_invalidate(IntelIOMMUState
*s
, uint16_t domain_id
,
1401 hwaddr addr
, uint8_t am
)
1403 VTDIOTLBPageInvInfo info
;
1405 trace_vtd_inv_desc_iotlb_pages(domain_id
, addr
, am
);
1407 assert(am
<= VTD_MAMV
);
1408 info
.domain_id
= domain_id
;
1410 info
.mask
= ~((1 << am
) - 1);
1411 g_hash_table_foreach_remove(s
->iotlb
, vtd_hash_remove_by_page
, &info
);
1412 vtd_iotlb_page_invalidate_notify(s
, domain_id
, addr
, am
);
1416 * Returns the IOTLB Actual Invalidation Granularity.
1417 * @val: the content of the IOTLB_REG
1419 static uint64_t vtd_iotlb_flush(IntelIOMMUState
*s
, uint64_t val
)
1422 uint64_t type
= val
& VTD_TLB_FLUSH_GRANU_MASK
;
1428 case VTD_TLB_GLOBAL_FLUSH
:
1429 iaig
= VTD_TLB_GLOBAL_FLUSH_A
;
1430 vtd_iotlb_global_invalidate(s
);
1433 case VTD_TLB_DSI_FLUSH
:
1434 domain_id
= VTD_TLB_DID(val
);
1435 iaig
= VTD_TLB_DSI_FLUSH_A
;
1436 vtd_iotlb_domain_invalidate(s
, domain_id
);
1439 case VTD_TLB_PSI_FLUSH
:
1440 domain_id
= VTD_TLB_DID(val
);
1441 addr
= vtd_get_quad_raw(s
, DMAR_IVA_REG
);
1442 am
= VTD_IVA_AM(addr
);
1443 addr
= VTD_IVA_ADDR(addr
);
1444 if (am
> VTD_MAMV
) {
1445 trace_vtd_err("IOTLB PSI flush: address mask overflow.");
1449 iaig
= VTD_TLB_PSI_FLUSH_A
;
1450 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1454 trace_vtd_err("IOTLB flush: invalid granularity.");
1460 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
);
1462 static inline bool vtd_queued_inv_disable_check(IntelIOMMUState
*s
)
1464 return s
->qi_enabled
&& (s
->iq_tail
== s
->iq_head
) &&
1465 (s
->iq_last_desc_type
== VTD_INV_DESC_WAIT
);
1468 static void vtd_handle_gcmd_qie(IntelIOMMUState
*s
, bool en
)
1470 uint64_t iqa_val
= vtd_get_quad_raw(s
, DMAR_IQA_REG
);
1472 trace_vtd_inv_qi_enable(en
);
1475 s
->iq
= iqa_val
& VTD_IQA_IQA_MASK(VTD_HOST_ADDRESS_WIDTH
);
1476 /* 2^(x+8) entries */
1477 s
->iq_size
= 1UL << ((iqa_val
& VTD_IQA_QS
) + 8);
1478 s
->qi_enabled
= true;
1479 trace_vtd_inv_qi_setup(s
->iq
, s
->iq_size
);
1480 /* Ok - report back to driver */
1481 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_QIES
);
1483 if (s
->iq_tail
!= 0) {
1485 * This is a spec violation but Windows guests are known to set up
1486 * Queued Invalidation this way so we allow the write and process
1487 * Invalidation Descriptors right away.
1489 trace_vtd_warn_invalid_qi_tail(s
->iq_tail
);
1490 if (!(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
1491 vtd_fetch_inv_desc(s
);
1495 if (vtd_queued_inv_disable_check(s
)) {
1496 /* disable Queued Invalidation */
1497 vtd_set_quad_raw(s
, DMAR_IQH_REG
, 0);
1499 s
->qi_enabled
= false;
1500 /* Ok - report back to driver */
1501 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_QIES
, 0);
1503 trace_vtd_err_qi_disable(s
->iq_head
, s
->iq_tail
, s
->iq_last_desc_type
);
1508 /* Set Root Table Pointer */
1509 static void vtd_handle_gcmd_srtp(IntelIOMMUState
*s
)
1511 vtd_root_table_setup(s
);
1512 /* Ok - report back to driver */
1513 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_RTPS
);
1516 /* Set Interrupt Remap Table Pointer */
1517 static void vtd_handle_gcmd_sirtp(IntelIOMMUState
*s
)
1519 vtd_interrupt_remap_table_setup(s
);
1520 /* Ok - report back to driver */
1521 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRTPS
);
1524 /* Handle Translation Enable/Disable */
1525 static void vtd_handle_gcmd_te(IntelIOMMUState
*s
, bool en
)
1527 if (s
->dmar_enabled
== en
) {
1531 trace_vtd_dmar_enable(en
);
1534 s
->dmar_enabled
= true;
1535 /* Ok - report back to driver */
1536 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_TES
);
1538 s
->dmar_enabled
= false;
1540 /* Clear the index of Fault Recording Register */
1541 s
->next_frcd_reg
= 0;
1542 /* Ok - report back to driver */
1543 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_TES
, 0);
1546 vtd_switch_address_space_all(s
);
1549 /* Handle Interrupt Remap Enable/Disable */
1550 static void vtd_handle_gcmd_ire(IntelIOMMUState
*s
, bool en
)
1552 trace_vtd_ir_enable(en
);
1555 s
->intr_enabled
= true;
1556 /* Ok - report back to driver */
1557 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, 0, VTD_GSTS_IRES
);
1559 s
->intr_enabled
= false;
1560 /* Ok - report back to driver */
1561 vtd_set_clear_mask_long(s
, DMAR_GSTS_REG
, VTD_GSTS_IRES
, 0);
1565 /* Handle write to Global Command Register */
1566 static void vtd_handle_gcmd_write(IntelIOMMUState
*s
)
1568 uint32_t status
= vtd_get_long_raw(s
, DMAR_GSTS_REG
);
1569 uint32_t val
= vtd_get_long_raw(s
, DMAR_GCMD_REG
);
1570 uint32_t changed
= status
^ val
;
1572 trace_vtd_reg_write_gcmd(status
, val
);
1573 if (changed
& VTD_GCMD_TE
) {
1574 /* Translation enable/disable */
1575 vtd_handle_gcmd_te(s
, val
& VTD_GCMD_TE
);
1577 if (val
& VTD_GCMD_SRTP
) {
1578 /* Set/update the root-table pointer */
1579 vtd_handle_gcmd_srtp(s
);
1581 if (changed
& VTD_GCMD_QIE
) {
1582 /* Queued Invalidation Enable */
1583 vtd_handle_gcmd_qie(s
, val
& VTD_GCMD_QIE
);
1585 if (val
& VTD_GCMD_SIRTP
) {
1586 /* Set/update the interrupt remapping root-table pointer */
1587 vtd_handle_gcmd_sirtp(s
);
1589 if (changed
& VTD_GCMD_IRE
) {
1590 /* Interrupt remap enable/disable */
1591 vtd_handle_gcmd_ire(s
, val
& VTD_GCMD_IRE
);
1595 /* Handle write to Context Command Register */
1596 static void vtd_handle_ccmd_write(IntelIOMMUState
*s
)
1599 uint64_t val
= vtd_get_quad_raw(s
, DMAR_CCMD_REG
);
1601 /* Context-cache invalidation request */
1602 if (val
& VTD_CCMD_ICC
) {
1603 if (s
->qi_enabled
) {
1604 trace_vtd_err("Queued Invalidation enabled, "
1605 "should not use register-based invalidation");
1608 ret
= vtd_context_cache_invalidate(s
, val
);
1609 /* Invalidation completed. Change something to show */
1610 vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_ICC
, 0ULL);
1611 ret
= vtd_set_clear_mask_quad(s
, DMAR_CCMD_REG
, VTD_CCMD_CAIG_MASK
,
1616 /* Handle write to IOTLB Invalidation Register */
1617 static void vtd_handle_iotlb_write(IntelIOMMUState
*s
)
1620 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IOTLB_REG
);
1622 /* IOTLB invalidation request */
1623 if (val
& VTD_TLB_IVT
) {
1624 if (s
->qi_enabled
) {
1625 trace_vtd_err("Queued Invalidation enabled, "
1626 "should not use register-based invalidation.");
1629 ret
= vtd_iotlb_flush(s
, val
);
1630 /* Invalidation completed. Change something to show */
1631 vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
, VTD_TLB_IVT
, 0ULL);
1632 ret
= vtd_set_clear_mask_quad(s
, DMAR_IOTLB_REG
,
1633 VTD_TLB_FLUSH_GRANU_MASK_A
, ret
);
1637 /* Fetch an Invalidation Descriptor from the Invalidation Queue */
1638 static bool vtd_get_inv_desc(dma_addr_t base_addr
, uint32_t offset
,
1639 VTDInvDesc
*inv_desc
)
1641 dma_addr_t addr
= base_addr
+ offset
* sizeof(*inv_desc
);
1642 if (dma_memory_read(&address_space_memory
, addr
, inv_desc
,
1643 sizeof(*inv_desc
))) {
1644 trace_vtd_err("Read INV DESC failed.");
1649 inv_desc
->lo
= le64_to_cpu(inv_desc
->lo
);
1650 inv_desc
->hi
= le64_to_cpu(inv_desc
->hi
);
1654 static bool vtd_process_wait_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1656 if ((inv_desc
->hi
& VTD_INV_DESC_WAIT_RSVD_HI
) ||
1657 (inv_desc
->lo
& VTD_INV_DESC_WAIT_RSVD_LO
)) {
1658 trace_vtd_inv_desc_wait_invalid(inv_desc
->hi
, inv_desc
->lo
);
1661 if (inv_desc
->lo
& VTD_INV_DESC_WAIT_SW
) {
1663 uint32_t status_data
= (uint32_t)(inv_desc
->lo
>>
1664 VTD_INV_DESC_WAIT_DATA_SHIFT
);
1666 assert(!(inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
));
1668 /* FIXME: need to be masked with HAW? */
1669 dma_addr_t status_addr
= inv_desc
->hi
;
1670 trace_vtd_inv_desc_wait_sw(status_addr
, status_data
);
1671 status_data
= cpu_to_le32(status_data
);
1672 if (dma_memory_write(&address_space_memory
, status_addr
, &status_data
,
1673 sizeof(status_data
))) {
1674 trace_vtd_inv_desc_wait_write_fail(inv_desc
->hi
, inv_desc
->lo
);
1677 } else if (inv_desc
->lo
& VTD_INV_DESC_WAIT_IF
) {
1678 /* Interrupt flag */
1679 vtd_generate_completion_event(s
);
1681 trace_vtd_inv_desc_wait_invalid(inv_desc
->hi
, inv_desc
->lo
);
1687 static bool vtd_process_context_cache_desc(IntelIOMMUState
*s
,
1688 VTDInvDesc
*inv_desc
)
1690 uint16_t sid
, fmask
;
1692 if ((inv_desc
->lo
& VTD_INV_DESC_CC_RSVD
) || inv_desc
->hi
) {
1693 trace_vtd_inv_desc_cc_invalid(inv_desc
->hi
, inv_desc
->lo
);
1696 switch (inv_desc
->lo
& VTD_INV_DESC_CC_G
) {
1697 case VTD_INV_DESC_CC_DOMAIN
:
1698 trace_vtd_inv_desc_cc_domain(
1699 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc
->lo
));
1701 case VTD_INV_DESC_CC_GLOBAL
:
1702 vtd_context_global_invalidate(s
);
1705 case VTD_INV_DESC_CC_DEVICE
:
1706 sid
= VTD_INV_DESC_CC_SID(inv_desc
->lo
);
1707 fmask
= VTD_INV_DESC_CC_FM(inv_desc
->lo
);
1708 vtd_context_device_invalidate(s
, sid
, fmask
);
1712 trace_vtd_inv_desc_cc_invalid(inv_desc
->hi
, inv_desc
->lo
);
1718 static bool vtd_process_iotlb_desc(IntelIOMMUState
*s
, VTDInvDesc
*inv_desc
)
1724 if ((inv_desc
->lo
& VTD_INV_DESC_IOTLB_RSVD_LO
) ||
1725 (inv_desc
->hi
& VTD_INV_DESC_IOTLB_RSVD_HI
)) {
1726 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1730 switch (inv_desc
->lo
& VTD_INV_DESC_IOTLB_G
) {
1731 case VTD_INV_DESC_IOTLB_GLOBAL
:
1732 vtd_iotlb_global_invalidate(s
);
1735 case VTD_INV_DESC_IOTLB_DOMAIN
:
1736 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1737 vtd_iotlb_domain_invalidate(s
, domain_id
);
1740 case VTD_INV_DESC_IOTLB_PAGE
:
1741 domain_id
= VTD_INV_DESC_IOTLB_DID(inv_desc
->lo
);
1742 addr
= VTD_INV_DESC_IOTLB_ADDR(inv_desc
->hi
);
1743 am
= VTD_INV_DESC_IOTLB_AM(inv_desc
->hi
);
1744 if (am
> VTD_MAMV
) {
1745 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1748 vtd_iotlb_page_invalidate(s
, domain_id
, addr
, am
);
1752 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1758 static bool vtd_process_inv_iec_desc(IntelIOMMUState
*s
,
1759 VTDInvDesc
*inv_desc
)
1761 trace_vtd_inv_desc_iec(inv_desc
->iec
.granularity
,
1762 inv_desc
->iec
.index
,
1763 inv_desc
->iec
.index_mask
);
1765 vtd_iec_notify_all(s
, !inv_desc
->iec
.granularity
,
1766 inv_desc
->iec
.index
,
1767 inv_desc
->iec
.index_mask
);
1771 static bool vtd_process_device_iotlb_desc(IntelIOMMUState
*s
,
1772 VTDInvDesc
*inv_desc
)
1774 VTDAddressSpace
*vtd_dev_as
;
1775 IOMMUTLBEntry entry
;
1776 struct VTDBus
*vtd_bus
;
1784 addr
= VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc
->hi
);
1785 sid
= VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc
->lo
);
1788 size
= VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc
->hi
);
1790 if ((inv_desc
->lo
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO
) ||
1791 (inv_desc
->hi
& VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI
)) {
1792 trace_vtd_inv_desc_iotlb_invalid(inv_desc
->hi
, inv_desc
->lo
);
1796 vtd_bus
= vtd_find_as_from_bus_num(s
, bus_num
);
1801 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
1806 /* According to ATS spec table 2.4:
1807 * S = 0, bits 15:12 = xxxx range size: 4K
1808 * S = 1, bits 15:12 = xxx0 range size: 8K
1809 * S = 1, bits 15:12 = xx01 range size: 16K
1810 * S = 1, bits 15:12 = x011 range size: 32K
1811 * S = 1, bits 15:12 = 0111 range size: 64K
1815 sz
= (VTD_PAGE_SIZE
* 2) << cto64(addr
>> VTD_PAGE_SHIFT
);
1821 entry
.target_as
= &vtd_dev_as
->as
;
1822 entry
.addr_mask
= sz
- 1;
1824 entry
.perm
= IOMMU_NONE
;
1825 entry
.translated_addr
= 0;
1826 memory_region_notify_iommu(&vtd_dev_as
->iommu
, entry
);
1832 static bool vtd_process_inv_desc(IntelIOMMUState
*s
)
1834 VTDInvDesc inv_desc
;
1837 trace_vtd_inv_qi_head(s
->iq_head
);
1838 if (!vtd_get_inv_desc(s
->iq
, s
->iq_head
, &inv_desc
)) {
1839 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
1842 desc_type
= inv_desc
.lo
& VTD_INV_DESC_TYPE
;
1843 /* FIXME: should update at first or at last? */
1844 s
->iq_last_desc_type
= desc_type
;
1846 switch (desc_type
) {
1847 case VTD_INV_DESC_CC
:
1848 trace_vtd_inv_desc("context-cache", inv_desc
.hi
, inv_desc
.lo
);
1849 if (!vtd_process_context_cache_desc(s
, &inv_desc
)) {
1854 case VTD_INV_DESC_IOTLB
:
1855 trace_vtd_inv_desc("iotlb", inv_desc
.hi
, inv_desc
.lo
);
1856 if (!vtd_process_iotlb_desc(s
, &inv_desc
)) {
1861 case VTD_INV_DESC_WAIT
:
1862 trace_vtd_inv_desc("wait", inv_desc
.hi
, inv_desc
.lo
);
1863 if (!vtd_process_wait_desc(s
, &inv_desc
)) {
1868 case VTD_INV_DESC_IEC
:
1869 trace_vtd_inv_desc("iec", inv_desc
.hi
, inv_desc
.lo
);
1870 if (!vtd_process_inv_iec_desc(s
, &inv_desc
)) {
1875 case VTD_INV_DESC_DEVICE
:
1876 trace_vtd_inv_desc("device", inv_desc
.hi
, inv_desc
.lo
);
1877 if (!vtd_process_device_iotlb_desc(s
, &inv_desc
)) {
1883 trace_vtd_inv_desc_invalid(inv_desc
.hi
, inv_desc
.lo
);
1887 if (s
->iq_head
== s
->iq_size
) {
1893 /* Try to fetch and process more Invalidation Descriptors */
1894 static void vtd_fetch_inv_desc(IntelIOMMUState
*s
)
1896 trace_vtd_inv_qi_fetch();
1898 if (s
->iq_tail
>= s
->iq_size
) {
1899 /* Detects an invalid Tail pointer */
1900 trace_vtd_err_qi_tail(s
->iq_tail
, s
->iq_size
);
1901 vtd_handle_inv_queue_error(s
);
1904 while (s
->iq_head
!= s
->iq_tail
) {
1905 if (!vtd_process_inv_desc(s
)) {
1906 /* Invalidation Queue Errors */
1907 vtd_handle_inv_queue_error(s
);
1910 /* Must update the IQH_REG in time */
1911 vtd_set_quad_raw(s
, DMAR_IQH_REG
,
1912 (((uint64_t)(s
->iq_head
)) << VTD_IQH_QH_SHIFT
) &
1917 /* Handle write to Invalidation Queue Tail Register */
1918 static void vtd_handle_iqt_write(IntelIOMMUState
*s
)
1920 uint64_t val
= vtd_get_quad_raw(s
, DMAR_IQT_REG
);
1922 s
->iq_tail
= VTD_IQT_QT(val
);
1923 trace_vtd_inv_qi_tail(s
->iq_tail
);
1925 if (s
->qi_enabled
&& !(vtd_get_long_raw(s
, DMAR_FSTS_REG
) & VTD_FSTS_IQE
)) {
1926 /* Process Invalidation Queue here */
1927 vtd_fetch_inv_desc(s
);
1931 static void vtd_handle_fsts_write(IntelIOMMUState
*s
)
1933 uint32_t fsts_reg
= vtd_get_long_raw(s
, DMAR_FSTS_REG
);
1934 uint32_t fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1935 uint32_t status_fields
= VTD_FSTS_PFO
| VTD_FSTS_PPF
| VTD_FSTS_IQE
;
1937 if ((fectl_reg
& VTD_FECTL_IP
) && !(fsts_reg
& status_fields
)) {
1938 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1939 trace_vtd_fsts_clear_ip();
1941 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1942 * Descriptors if there are any when Queued Invalidation is enabled?
1946 static void vtd_handle_fectl_write(IntelIOMMUState
*s
)
1949 /* FIXME: when software clears the IM field, check the IP field. But do we
1950 * need to compare the old value and the new value to conclude that
1951 * software clears the IM field? Or just check if the IM field is zero?
1953 fectl_reg
= vtd_get_long_raw(s
, DMAR_FECTL_REG
);
1955 trace_vtd_reg_write_fectl(fectl_reg
);
1957 if ((fectl_reg
& VTD_FECTL_IP
) && !(fectl_reg
& VTD_FECTL_IM
)) {
1958 vtd_generate_interrupt(s
, DMAR_FEADDR_REG
, DMAR_FEDATA_REG
);
1959 vtd_set_clear_mask_long(s
, DMAR_FECTL_REG
, VTD_FECTL_IP
, 0);
1963 static void vtd_handle_ics_write(IntelIOMMUState
*s
)
1965 uint32_t ics_reg
= vtd_get_long_raw(s
, DMAR_ICS_REG
);
1966 uint32_t iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1968 if ((iectl_reg
& VTD_IECTL_IP
) && !(ics_reg
& VTD_ICS_IWC
)) {
1969 trace_vtd_reg_ics_clear_ip();
1970 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1974 static void vtd_handle_iectl_write(IntelIOMMUState
*s
)
1977 /* FIXME: when software clears the IM field, check the IP field. But do we
1978 * need to compare the old value and the new value to conclude that
1979 * software clears the IM field? Or just check if the IM field is zero?
1981 iectl_reg
= vtd_get_long_raw(s
, DMAR_IECTL_REG
);
1983 trace_vtd_reg_write_iectl(iectl_reg
);
1985 if ((iectl_reg
& VTD_IECTL_IP
) && !(iectl_reg
& VTD_IECTL_IM
)) {
1986 vtd_generate_interrupt(s
, DMAR_IEADDR_REG
, DMAR_IEDATA_REG
);
1987 vtd_set_clear_mask_long(s
, DMAR_IECTL_REG
, VTD_IECTL_IP
, 0);
1991 static uint64_t vtd_mem_read(void *opaque
, hwaddr addr
, unsigned size
)
1993 IntelIOMMUState
*s
= opaque
;
1996 trace_vtd_reg_read(addr
, size
);
1998 if (addr
+ size
> DMAR_REG_SIZE
) {
1999 trace_vtd_err("Read MMIO over range.");
2000 return (uint64_t)-1;
2004 /* Root Table Address Register, 64-bit */
2005 case DMAR_RTADDR_REG
:
2007 val
= s
->root
& ((1ULL << 32) - 1);
2013 case DMAR_RTADDR_REG_HI
:
2015 val
= s
->root
>> 32;
2018 /* Invalidation Queue Address Register, 64-bit */
2020 val
= s
->iq
| (vtd_get_quad(s
, DMAR_IQA_REG
) & VTD_IQA_QS
);
2022 val
= val
& ((1ULL << 32) - 1);
2026 case DMAR_IQA_REG_HI
:
2033 val
= vtd_get_long(s
, addr
);
2035 val
= vtd_get_quad(s
, addr
);
2042 static void vtd_mem_write(void *opaque
, hwaddr addr
,
2043 uint64_t val
, unsigned size
)
2045 IntelIOMMUState
*s
= opaque
;
2047 trace_vtd_reg_write(addr
, size
, val
);
2049 if (addr
+ size
> DMAR_REG_SIZE
) {
2050 trace_vtd_err("Write MMIO over range.");
2055 /* Global Command Register, 32-bit */
2057 vtd_set_long(s
, addr
, val
);
2058 vtd_handle_gcmd_write(s
);
2061 /* Context Command Register, 64-bit */
2064 vtd_set_long(s
, addr
, val
);
2066 vtd_set_quad(s
, addr
, val
);
2067 vtd_handle_ccmd_write(s
);
2071 case DMAR_CCMD_REG_HI
:
2073 vtd_set_long(s
, addr
, val
);
2074 vtd_handle_ccmd_write(s
);
2077 /* IOTLB Invalidation Register, 64-bit */
2078 case DMAR_IOTLB_REG
:
2080 vtd_set_long(s
, addr
, val
);
2082 vtd_set_quad(s
, addr
, val
);
2083 vtd_handle_iotlb_write(s
);
2087 case DMAR_IOTLB_REG_HI
:
2089 vtd_set_long(s
, addr
, val
);
2090 vtd_handle_iotlb_write(s
);
2093 /* Invalidate Address Register, 64-bit */
2096 vtd_set_long(s
, addr
, val
);
2098 vtd_set_quad(s
, addr
, val
);
2102 case DMAR_IVA_REG_HI
:
2104 vtd_set_long(s
, addr
, val
);
2107 /* Fault Status Register, 32-bit */
2110 vtd_set_long(s
, addr
, val
);
2111 vtd_handle_fsts_write(s
);
2114 /* Fault Event Control Register, 32-bit */
2115 case DMAR_FECTL_REG
:
2117 vtd_set_long(s
, addr
, val
);
2118 vtd_handle_fectl_write(s
);
2121 /* Fault Event Data Register, 32-bit */
2122 case DMAR_FEDATA_REG
:
2124 vtd_set_long(s
, addr
, val
);
2127 /* Fault Event Address Register, 32-bit */
2128 case DMAR_FEADDR_REG
:
2130 vtd_set_long(s
, addr
, val
);
2133 /* Fault Event Upper Address Register, 32-bit */
2134 case DMAR_FEUADDR_REG
:
2136 vtd_set_long(s
, addr
, val
);
2139 /* Protected Memory Enable Register, 32-bit */
2142 vtd_set_long(s
, addr
, val
);
2145 /* Root Table Address Register, 64-bit */
2146 case DMAR_RTADDR_REG
:
2148 vtd_set_long(s
, addr
, val
);
2150 vtd_set_quad(s
, addr
, val
);
2154 case DMAR_RTADDR_REG_HI
:
2156 vtd_set_long(s
, addr
, val
);
2159 /* Invalidation Queue Tail Register, 64-bit */
2162 vtd_set_long(s
, addr
, val
);
2164 vtd_set_quad(s
, addr
, val
);
2166 vtd_handle_iqt_write(s
);
2169 case DMAR_IQT_REG_HI
:
2171 vtd_set_long(s
, addr
, val
);
2172 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2175 /* Invalidation Queue Address Register, 64-bit */
2178 vtd_set_long(s
, addr
, val
);
2180 vtd_set_quad(s
, addr
, val
);
2184 case DMAR_IQA_REG_HI
:
2186 vtd_set_long(s
, addr
, val
);
2189 /* Invalidation Completion Status Register, 32-bit */
2192 vtd_set_long(s
, addr
, val
);
2193 vtd_handle_ics_write(s
);
2196 /* Invalidation Event Control Register, 32-bit */
2197 case DMAR_IECTL_REG
:
2199 vtd_set_long(s
, addr
, val
);
2200 vtd_handle_iectl_write(s
);
2203 /* Invalidation Event Data Register, 32-bit */
2204 case DMAR_IEDATA_REG
:
2206 vtd_set_long(s
, addr
, val
);
2209 /* Invalidation Event Address Register, 32-bit */
2210 case DMAR_IEADDR_REG
:
2212 vtd_set_long(s
, addr
, val
);
2215 /* Invalidation Event Upper Address Register, 32-bit */
2216 case DMAR_IEUADDR_REG
:
2218 vtd_set_long(s
, addr
, val
);
2221 /* Fault Recording Registers, 128-bit */
2222 case DMAR_FRCD_REG_0_0
:
2224 vtd_set_long(s
, addr
, val
);
2226 vtd_set_quad(s
, addr
, val
);
2230 case DMAR_FRCD_REG_0_1
:
2232 vtd_set_long(s
, addr
, val
);
2235 case DMAR_FRCD_REG_0_2
:
2237 vtd_set_long(s
, addr
, val
);
2239 vtd_set_quad(s
, addr
, val
);
2240 /* May clear bit 127 (Fault), update PPF */
2241 vtd_update_fsts_ppf(s
);
2245 case DMAR_FRCD_REG_0_3
:
2247 vtd_set_long(s
, addr
, val
);
2248 /* May clear bit 127 (Fault), update PPF */
2249 vtd_update_fsts_ppf(s
);
2254 vtd_set_long(s
, addr
, val
);
2256 vtd_set_quad(s
, addr
, val
);
2260 case DMAR_IRTA_REG_HI
:
2262 vtd_set_long(s
, addr
, val
);
2267 vtd_set_long(s
, addr
, val
);
2269 vtd_set_quad(s
, addr
, val
);
2274 static IOMMUTLBEntry
vtd_iommu_translate(IOMMUMemoryRegion
*iommu
, hwaddr addr
,
2275 IOMMUAccessFlags flag
)
2277 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2278 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2279 IOMMUTLBEntry iotlb
= {
2280 /* We'll fill in the rest later. */
2281 .target_as
= &address_space_memory
,
2285 if (likely(s
->dmar_enabled
)) {
2286 success
= vtd_do_iommu_translate(vtd_as
, vtd_as
->bus
, vtd_as
->devfn
,
2287 addr
, flag
& IOMMU_WO
, &iotlb
);
2289 /* DMAR disabled, passthrough, use 4k-page*/
2290 iotlb
.iova
= addr
& VTD_PAGE_MASK_4K
;
2291 iotlb
.translated_addr
= addr
& VTD_PAGE_MASK_4K
;
2292 iotlb
.addr_mask
= ~VTD_PAGE_MASK_4K
;
2293 iotlb
.perm
= IOMMU_RW
;
2297 if (likely(success
)) {
2298 trace_vtd_dmar_translate(pci_bus_num(vtd_as
->bus
),
2299 VTD_PCI_SLOT(vtd_as
->devfn
),
2300 VTD_PCI_FUNC(vtd_as
->devfn
),
2301 iotlb
.iova
, iotlb
.translated_addr
,
2304 trace_vtd_err_dmar_translate(pci_bus_num(vtd_as
->bus
),
2305 VTD_PCI_SLOT(vtd_as
->devfn
),
2306 VTD_PCI_FUNC(vtd_as
->devfn
),
2313 static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion
*iommu
,
2314 IOMMUNotifierFlag old
,
2315 IOMMUNotifierFlag
new)
2317 VTDAddressSpace
*vtd_as
= container_of(iommu
, VTDAddressSpace
, iommu
);
2318 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2319 IntelIOMMUNotifierNode
*node
= NULL
;
2320 IntelIOMMUNotifierNode
*next_node
= NULL
;
2322 if (!s
->caching_mode
&& new & IOMMU_NOTIFIER_MAP
) {
2323 error_report("We need to set caching-mode=1 for intel-iommu to enable "
2324 "device assignment with IOMMU protection.");
2328 if (old
== IOMMU_NOTIFIER_NONE
) {
2329 node
= g_malloc0(sizeof(*node
));
2330 node
->vtd_as
= vtd_as
;
2331 QLIST_INSERT_HEAD(&s
->notifiers_list
, node
, next
);
2335 /* update notifier node with new flags */
2336 QLIST_FOREACH_SAFE(node
, &s
->notifiers_list
, next
, next_node
) {
2337 if (node
->vtd_as
== vtd_as
) {
2338 if (new == IOMMU_NOTIFIER_NONE
) {
2339 QLIST_REMOVE(node
, next
);
2347 static int vtd_post_load(void *opaque
, int version_id
)
2349 IntelIOMMUState
*iommu
= opaque
;
2352 * Memory regions are dynamically turned on/off depending on
2353 * context entry configurations from the guest. After migration,
2354 * we need to make sure the memory regions are still correct.
2356 vtd_switch_address_space_all(iommu
);
2361 static const VMStateDescription vtd_vmstate
= {
2362 .name
= "iommu-intel",
2364 .minimum_version_id
= 1,
2365 .priority
= MIG_PRI_IOMMU
,
2366 .post_load
= vtd_post_load
,
2367 .fields
= (VMStateField
[]) {
2368 VMSTATE_UINT64(root
, IntelIOMMUState
),
2369 VMSTATE_UINT64(intr_root
, IntelIOMMUState
),
2370 VMSTATE_UINT64(iq
, IntelIOMMUState
),
2371 VMSTATE_UINT32(intr_size
, IntelIOMMUState
),
2372 VMSTATE_UINT16(iq_head
, IntelIOMMUState
),
2373 VMSTATE_UINT16(iq_tail
, IntelIOMMUState
),
2374 VMSTATE_UINT16(iq_size
, IntelIOMMUState
),
2375 VMSTATE_UINT16(next_frcd_reg
, IntelIOMMUState
),
2376 VMSTATE_UINT8_ARRAY(csr
, IntelIOMMUState
, DMAR_REG_SIZE
),
2377 VMSTATE_UINT8(iq_last_desc_type
, IntelIOMMUState
),
2378 VMSTATE_BOOL(root_extended
, IntelIOMMUState
),
2379 VMSTATE_BOOL(dmar_enabled
, IntelIOMMUState
),
2380 VMSTATE_BOOL(qi_enabled
, IntelIOMMUState
),
2381 VMSTATE_BOOL(intr_enabled
, IntelIOMMUState
),
2382 VMSTATE_BOOL(intr_eime
, IntelIOMMUState
),
2383 VMSTATE_END_OF_LIST()
2387 static const MemoryRegionOps vtd_mem_ops
= {
2388 .read
= vtd_mem_read
,
2389 .write
= vtd_mem_write
,
2390 .endianness
= DEVICE_LITTLE_ENDIAN
,
2392 .min_access_size
= 4,
2393 .max_access_size
= 8,
2396 .min_access_size
= 4,
2397 .max_access_size
= 8,
2401 static Property vtd_properties
[] = {
2402 DEFINE_PROP_UINT32("version", IntelIOMMUState
, version
, 0),
2403 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState
, intr_eim
,
2405 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState
, buggy_eim
, false),
2406 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState
, caching_mode
, FALSE
),
2407 DEFINE_PROP_END_OF_LIST(),
2410 /* Read IRTE entry with specific index */
2411 static int vtd_irte_get(IntelIOMMUState
*iommu
, uint16_t index
,
2412 VTD_IR_TableEntry
*entry
, uint16_t sid
)
2414 static const uint16_t vtd_svt_mask
[VTD_SQ_MAX
] = \
2415 {0xffff, 0xfffb, 0xfff9, 0xfff8};
2416 dma_addr_t addr
= 0x00;
2417 uint16_t mask
, source_id
;
2418 uint8_t bus
, bus_max
, bus_min
;
2420 addr
= iommu
->intr_root
+ index
* sizeof(*entry
);
2421 if (dma_memory_read(&address_space_memory
, addr
, entry
,
2423 trace_vtd_err("Memory read failed for IRTE.");
2424 return -VTD_FR_IR_ROOT_INVAL
;
2427 trace_vtd_ir_irte_get(index
, le64_to_cpu(entry
->data
[1]),
2428 le64_to_cpu(entry
->data
[0]));
2430 if (!entry
->irte
.present
) {
2431 trace_vtd_err_irte(index
, le64_to_cpu(entry
->data
[1]),
2432 le64_to_cpu(entry
->data
[0]));
2433 return -VTD_FR_IR_ENTRY_P
;
2436 if (entry
->irte
.__reserved_0
|| entry
->irte
.__reserved_1
||
2437 entry
->irte
.__reserved_2
) {
2438 trace_vtd_err_irte(index
, le64_to_cpu(entry
->data
[1]),
2439 le64_to_cpu(entry
->data
[0]));
2440 return -VTD_FR_IR_IRTE_RSVD
;
2443 if (sid
!= X86_IOMMU_SID_INVALID
) {
2444 /* Validate IRTE SID */
2445 source_id
= le32_to_cpu(entry
->irte
.source_id
);
2446 switch (entry
->irte
.sid_vtype
) {
2451 mask
= vtd_svt_mask
[entry
->irte
.sid_q
];
2452 if ((source_id
& mask
) != (sid
& mask
)) {
2453 trace_vtd_err_irte_sid(index
, sid
, source_id
);
2454 return -VTD_FR_IR_SID_ERR
;
2459 bus_max
= source_id
>> 8;
2460 bus_min
= source_id
& 0xff;
2462 if (bus
> bus_max
|| bus
< bus_min
) {
2463 trace_vtd_err_irte_sid_bus(index
, bus
, bus_min
, bus_max
);
2464 return -VTD_FR_IR_SID_ERR
;
2469 trace_vtd_err_irte_svt(index
, entry
->irte
.sid_vtype
);
2470 /* Take this as verification failure. */
2471 return -VTD_FR_IR_SID_ERR
;
2479 /* Fetch IRQ information of specific IR index */
2480 static int vtd_remap_irq_get(IntelIOMMUState
*iommu
, uint16_t index
,
2481 VTDIrq
*irq
, uint16_t sid
)
2483 VTD_IR_TableEntry irte
= {};
2486 ret
= vtd_irte_get(iommu
, index
, &irte
, sid
);
2491 irq
->trigger_mode
= irte
.irte
.trigger_mode
;
2492 irq
->vector
= irte
.irte
.vector
;
2493 irq
->delivery_mode
= irte
.irte
.delivery_mode
;
2494 irq
->dest
= le32_to_cpu(irte
.irte
.dest_id
);
2495 if (!iommu
->intr_eime
) {
2496 #define VTD_IR_APIC_DEST_MASK (0xff00ULL)
2497 #define VTD_IR_APIC_DEST_SHIFT (8)
2498 irq
->dest
= (irq
->dest
& VTD_IR_APIC_DEST_MASK
) >>
2499 VTD_IR_APIC_DEST_SHIFT
;
2501 irq
->dest_mode
= irte
.irte
.dest_mode
;
2502 irq
->redir_hint
= irte
.irte
.redir_hint
;
2504 trace_vtd_ir_remap(index
, irq
->trigger_mode
, irq
->vector
,
2505 irq
->delivery_mode
, irq
->dest
, irq
->dest_mode
);
2510 /* Generate one MSI message from VTDIrq info */
2511 static void vtd_generate_msi_message(VTDIrq
*irq
, MSIMessage
*msg_out
)
2513 VTD_MSIMessage msg
= {};
2515 /* Generate address bits */
2516 msg
.dest_mode
= irq
->dest_mode
;
2517 msg
.redir_hint
= irq
->redir_hint
;
2518 msg
.dest
= irq
->dest
;
2519 msg
.__addr_hi
= irq
->dest
& 0xffffff00;
2520 msg
.__addr_head
= cpu_to_le32(0xfee);
2521 /* Keep this from original MSI address bits */
2522 msg
.__not_used
= irq
->msi_addr_last_bits
;
2524 /* Generate data bits */
2525 msg
.vector
= irq
->vector
;
2526 msg
.delivery_mode
= irq
->delivery_mode
;
2528 msg
.trigger_mode
= irq
->trigger_mode
;
2530 msg_out
->address
= msg
.msi_addr
;
2531 msg_out
->data
= msg
.msi_data
;
2534 /* Interrupt remapping for MSI/MSI-X entry */
2535 static int vtd_interrupt_remap_msi(IntelIOMMUState
*iommu
,
2537 MSIMessage
*translated
,
2541 VTD_IR_MSIAddress addr
;
2545 assert(origin
&& translated
);
2547 trace_vtd_ir_remap_msi_req(origin
->address
, origin
->data
);
2549 if (!iommu
|| !iommu
->intr_enabled
) {
2550 memcpy(translated
, origin
, sizeof(*origin
));
2554 if (origin
->address
& VTD_MSI_ADDR_HI_MASK
) {
2555 trace_vtd_err("MSI address high 32 bits non-zero when "
2556 "Interrupt Remapping enabled.");
2557 return -VTD_FR_IR_REQ_RSVD
;
2560 addr
.data
= origin
->address
& VTD_MSI_ADDR_LO_MASK
;
2561 if (addr
.addr
.__head
!= 0xfee) {
2562 trace_vtd_err("MSI addr low 32 bit invalid.");
2563 return -VTD_FR_IR_REQ_RSVD
;
2566 /* This is compatible mode. */
2567 if (addr
.addr
.int_mode
!= VTD_IR_INT_FORMAT_REMAP
) {
2568 memcpy(translated
, origin
, sizeof(*origin
));
2572 index
= addr
.addr
.index_h
<< 15 | le16_to_cpu(addr
.addr
.index_l
);
2574 #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
2575 #define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
2577 if (addr
.addr
.sub_valid
) {
2578 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
2579 index
+= origin
->data
& VTD_IR_MSI_DATA_SUBHANDLE
;
2582 ret
= vtd_remap_irq_get(iommu
, index
, &irq
, sid
);
2587 if (addr
.addr
.sub_valid
) {
2588 trace_vtd_ir_remap_type("MSI");
2589 if (origin
->data
& VTD_IR_MSI_DATA_RESERVED
) {
2590 trace_vtd_err_ir_msi_invalid(sid
, origin
->address
, origin
->data
);
2591 return -VTD_FR_IR_REQ_RSVD
;
2594 uint8_t vector
= origin
->data
& 0xff;
2595 uint8_t trigger_mode
= (origin
->data
>> MSI_DATA_TRIGGER_SHIFT
) & 0x1;
2597 trace_vtd_ir_remap_type("IOAPIC");
2598 /* IOAPIC entry vector should be aligned with IRTE vector
2599 * (see vt-d spec 5.1.5.1). */
2600 if (vector
!= irq
.vector
) {
2601 trace_vtd_warn_ir_vector(sid
, index
, vector
, irq
.vector
);
2604 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
2605 * (see vt-d spec 5.1.5.1). */
2606 if (trigger_mode
!= irq
.trigger_mode
) {
2607 trace_vtd_warn_ir_trigger(sid
, index
, trigger_mode
,
2613 * We'd better keep the last two bits, assuming that guest OS
2614 * might modify it. Keep it does not hurt after all.
2616 irq
.msi_addr_last_bits
= addr
.addr
.__not_care
;
2618 /* Translate VTDIrq to MSI message */
2619 vtd_generate_msi_message(&irq
, translated
);
2622 trace_vtd_ir_remap_msi(origin
->address
, origin
->data
,
2623 translated
->address
, translated
->data
);
2627 static int vtd_int_remap(X86IOMMUState
*iommu
, MSIMessage
*src
,
2628 MSIMessage
*dst
, uint16_t sid
)
2630 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu
),
2634 static MemTxResult
vtd_mem_ir_read(void *opaque
, hwaddr addr
,
2635 uint64_t *data
, unsigned size
,
2641 static MemTxResult
vtd_mem_ir_write(void *opaque
, hwaddr addr
,
2642 uint64_t value
, unsigned size
,
2646 MSIMessage from
= {}, to
= {};
2647 uint16_t sid
= X86_IOMMU_SID_INVALID
;
2649 from
.address
= (uint64_t) addr
+ VTD_INTERRUPT_ADDR_FIRST
;
2650 from
.data
= (uint32_t) value
;
2652 if (!attrs
.unspecified
) {
2653 /* We have explicit Source ID */
2654 sid
= attrs
.requester_id
;
2657 ret
= vtd_interrupt_remap_msi(opaque
, &from
, &to
, sid
);
2659 /* TODO: report error */
2660 /* Drop this interrupt */
2664 apic_get_class()->send_msi(&to
);
2669 static const MemoryRegionOps vtd_mem_ir_ops
= {
2670 .read_with_attrs
= vtd_mem_ir_read
,
2671 .write_with_attrs
= vtd_mem_ir_write
,
2672 .endianness
= DEVICE_LITTLE_ENDIAN
,
2674 .min_access_size
= 4,
2675 .max_access_size
= 4,
2678 .min_access_size
= 4,
2679 .max_access_size
= 4,
2683 VTDAddressSpace
*vtd_find_add_as(IntelIOMMUState
*s
, PCIBus
*bus
, int devfn
)
2685 uintptr_t key
= (uintptr_t)bus
;
2686 VTDBus
*vtd_bus
= g_hash_table_lookup(s
->vtd_as_by_busptr
, &key
);
2687 VTDAddressSpace
*vtd_dev_as
;
2691 uintptr_t *new_key
= g_malloc(sizeof(*new_key
));
2692 *new_key
= (uintptr_t)bus
;
2693 /* No corresponding free() */
2694 vtd_bus
= g_malloc0(sizeof(VTDBus
) + sizeof(VTDAddressSpace
*) * \
2697 g_hash_table_insert(s
->vtd_as_by_busptr
, new_key
, vtd_bus
);
2700 vtd_dev_as
= vtd_bus
->dev_as
[devfn
];
2703 snprintf(name
, sizeof(name
), "intel_iommu_devfn_%d", devfn
);
2704 vtd_bus
->dev_as
[devfn
] = vtd_dev_as
= g_malloc0(sizeof(VTDAddressSpace
));
2706 vtd_dev_as
->bus
= bus
;
2707 vtd_dev_as
->devfn
= (uint8_t)devfn
;
2708 vtd_dev_as
->iommu_state
= s
;
2709 vtd_dev_as
->context_cache_entry
.context_cache_gen
= 0;
2712 * Memory region relationships looks like (Address range shows
2713 * only lower 32 bits to make it short in length...):
2715 * |-----------------+-------------------+----------|
2716 * | Name | Address range | Priority |
2717 * |-----------------+-------------------+----------+
2718 * | vtd_root | 00000000-ffffffff | 0 |
2719 * | intel_iommu | 00000000-ffffffff | 1 |
2720 * | vtd_sys_alias | 00000000-ffffffff | 1 |
2721 * | intel_iommu_ir | fee00000-feefffff | 64 |
2722 * |-----------------+-------------------+----------|
2724 * We enable/disable DMAR by switching enablement for
2725 * vtd_sys_alias and intel_iommu regions. IR region is always
2728 memory_region_init_iommu(&vtd_dev_as
->iommu
, sizeof(vtd_dev_as
->iommu
),
2729 TYPE_INTEL_IOMMU_MEMORY_REGION
, OBJECT(s
),
2732 memory_region_init_alias(&vtd_dev_as
->sys_alias
, OBJECT(s
),
2733 "vtd_sys_alias", get_system_memory(),
2734 0, memory_region_size(get_system_memory()));
2735 memory_region_init_io(&vtd_dev_as
->iommu_ir
, OBJECT(s
),
2736 &vtd_mem_ir_ops
, s
, "intel_iommu_ir",
2737 VTD_INTERRUPT_ADDR_SIZE
);
2738 memory_region_init(&vtd_dev_as
->root
, OBJECT(s
),
2739 "vtd_root", UINT64_MAX
);
2740 memory_region_add_subregion_overlap(&vtd_dev_as
->root
,
2741 VTD_INTERRUPT_ADDR_FIRST
,
2742 &vtd_dev_as
->iommu_ir
, 64);
2743 address_space_init(&vtd_dev_as
->as
, &vtd_dev_as
->root
, name
);
2744 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2745 &vtd_dev_as
->sys_alias
, 1);
2746 memory_region_add_subregion_overlap(&vtd_dev_as
->root
, 0,
2747 MEMORY_REGION(&vtd_dev_as
->iommu
),
2749 vtd_switch_address_space(vtd_dev_as
);
2754 /* Unmap the whole range in the notifier's scope. */
2755 static void vtd_address_space_unmap(VTDAddressSpace
*as
, IOMMUNotifier
*n
)
2757 IOMMUTLBEntry entry
;
2759 hwaddr start
= n
->start
;
2760 hwaddr end
= n
->end
;
2763 * Note: all the codes in this function has a assumption that IOVA
2764 * bits are no more than VTD_MGAW bits (which is restricted by
2765 * VT-d spec), otherwise we need to consider overflow of 64 bits.
2768 if (end
> VTD_ADDRESS_SIZE(VTD_HOST_ADDRESS_WIDTH
)) {
2770 * Don't need to unmap regions that is bigger than the whole
2771 * VT-d supported address space size
2773 end
= VTD_ADDRESS_SIZE(VTD_HOST_ADDRESS_WIDTH
);
2776 assert(start
<= end
);
2779 if (ctpop64(size
) != 1) {
2781 * This size cannot format a correct mask. Let's enlarge it to
2782 * suite the minimum available mask.
2784 int n
= 64 - clz64(size
);
2786 /* should not happen, but in case it happens, limit it */
2792 entry
.target_as
= &address_space_memory
;
2793 /* Adjust iova for the size */
2794 entry
.iova
= n
->start
& ~(size
- 1);
2795 /* This field is meaningless for unmap */
2796 entry
.translated_addr
= 0;
2797 entry
.perm
= IOMMU_NONE
;
2798 entry
.addr_mask
= size
- 1;
2800 trace_vtd_as_unmap_whole(pci_bus_num(as
->bus
),
2801 VTD_PCI_SLOT(as
->devfn
),
2802 VTD_PCI_FUNC(as
->devfn
),
2805 memory_region_notify_one(n
, &entry
);
2808 static void vtd_address_space_unmap_all(IntelIOMMUState
*s
)
2810 IntelIOMMUNotifierNode
*node
;
2811 VTDAddressSpace
*vtd_as
;
2814 QLIST_FOREACH(node
, &s
->notifiers_list
, next
) {
2815 vtd_as
= node
->vtd_as
;
2816 IOMMU_NOTIFIER_FOREACH(n
, &vtd_as
->iommu
) {
2817 vtd_address_space_unmap(vtd_as
, n
);
2822 static int vtd_replay_hook(IOMMUTLBEntry
*entry
, void *private)
2824 memory_region_notify_one((IOMMUNotifier
*)private, entry
);
2828 static void vtd_iommu_replay(IOMMUMemoryRegion
*iommu_mr
, IOMMUNotifier
*n
)
2830 VTDAddressSpace
*vtd_as
= container_of(iommu_mr
, VTDAddressSpace
, iommu
);
2831 IntelIOMMUState
*s
= vtd_as
->iommu_state
;
2832 uint8_t bus_n
= pci_bus_num(vtd_as
->bus
);
2836 * The replay can be triggered by either a invalidation or a newly
2837 * created entry. No matter what, we release existing mappings
2838 * (it means flushing caches for UNMAP-only registers).
2840 vtd_address_space_unmap(vtd_as
, n
);
2842 if (vtd_dev_to_context_entry(s
, bus_n
, vtd_as
->devfn
, &ce
) == 0) {
2843 trace_vtd_replay_ce_valid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
2844 PCI_FUNC(vtd_as
->devfn
),
2845 VTD_CONTEXT_ENTRY_DID(ce
.hi
),
2847 vtd_page_walk(&ce
, 0, ~0ULL, vtd_replay_hook
, (void *)n
, false);
2849 trace_vtd_replay_ce_invalid(bus_n
, PCI_SLOT(vtd_as
->devfn
),
2850 PCI_FUNC(vtd_as
->devfn
));
2856 /* Do the initialization. It will also be called when reset, so pay
2857 * attention when adding new initialization stuff.
2859 static void vtd_init(IntelIOMMUState
*s
)
2861 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
2862 uint8_t aw_bits
= VTD_HOST_ADDRESS_WIDTH
;
2864 memset(s
->csr
, 0, DMAR_REG_SIZE
);
2865 memset(s
->wmask
, 0, DMAR_REG_SIZE
);
2866 memset(s
->w1cmask
, 0, DMAR_REG_SIZE
);
2867 memset(s
->womask
, 0, DMAR_REG_SIZE
);
2870 s
->root_extended
= false;
2871 s
->dmar_enabled
= false;
2876 s
->qi_enabled
= false;
2877 s
->iq_last_desc_type
= VTD_INV_DESC_NONE
;
2878 s
->next_frcd_reg
= 0;
2879 s
->cap
= VTD_CAP_FRO
| VTD_CAP_NFR
| VTD_CAP_ND
|
2880 VTD_CAP_MAMV
| VTD_CAP_PSI
| VTD_CAP_SLLPS
|
2881 VTD_CAP_SAGAW_39bit
| VTD_CAP_MGAW(VTD_HOST_ADDRESS_WIDTH
);
2882 s
->ecap
= VTD_ECAP_QI
| VTD_ECAP_IRO
;
2885 * Rsvd field masks for spte
2887 vtd_paging_entry_rsvd_field
[0] = ~0ULL;
2888 vtd_paging_entry_rsvd_field
[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(aw_bits
);
2889 vtd_paging_entry_rsvd_field
[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(aw_bits
);
2890 vtd_paging_entry_rsvd_field
[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(aw_bits
);
2891 vtd_paging_entry_rsvd_field
[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(aw_bits
);
2892 vtd_paging_entry_rsvd_field
[5] = VTD_SPTE_LPAGE_L1_RSVD_MASK(aw_bits
);
2893 vtd_paging_entry_rsvd_field
[6] = VTD_SPTE_LPAGE_L2_RSVD_MASK(aw_bits
);
2894 vtd_paging_entry_rsvd_field
[7] = VTD_SPTE_LPAGE_L3_RSVD_MASK(aw_bits
);
2895 vtd_paging_entry_rsvd_field
[8] = VTD_SPTE_LPAGE_L4_RSVD_MASK(aw_bits
);
2897 if (x86_iommu
->intr_supported
) {
2898 s
->ecap
|= VTD_ECAP_IR
| VTD_ECAP_MHMV
;
2899 if (s
->intr_eim
== ON_OFF_AUTO_ON
) {
2900 s
->ecap
|= VTD_ECAP_EIM
;
2902 assert(s
->intr_eim
!= ON_OFF_AUTO_AUTO
);
2905 if (x86_iommu
->dt_supported
) {
2906 s
->ecap
|= VTD_ECAP_DT
;
2909 if (x86_iommu
->pt_supported
) {
2910 s
->ecap
|= VTD_ECAP_PT
;
2913 if (s
->caching_mode
) {
2914 s
->cap
|= VTD_CAP_CM
;
2917 vtd_reset_context_cache(s
);
2920 /* Define registers with default values and bit semantics */
2921 vtd_define_long(s
, DMAR_VER_REG
, 0x10UL
, 0, 0);
2922 vtd_define_quad(s
, DMAR_CAP_REG
, s
->cap
, 0, 0);
2923 vtd_define_quad(s
, DMAR_ECAP_REG
, s
->ecap
, 0, 0);
2924 vtd_define_long(s
, DMAR_GCMD_REG
, 0, 0xff800000UL
, 0);
2925 vtd_define_long_wo(s
, DMAR_GCMD_REG
, 0xff800000UL
);
2926 vtd_define_long(s
, DMAR_GSTS_REG
, 0, 0, 0);
2927 vtd_define_quad(s
, DMAR_RTADDR_REG
, 0, 0xfffffffffffff000ULL
, 0);
2928 vtd_define_quad(s
, DMAR_CCMD_REG
, 0, 0xe0000003ffffffffULL
, 0);
2929 vtd_define_quad_wo(s
, DMAR_CCMD_REG
, 0x3ffff0000ULL
);
2931 /* Advanced Fault Logging not supported */
2932 vtd_define_long(s
, DMAR_FSTS_REG
, 0, 0, 0x11UL
);
2933 vtd_define_long(s
, DMAR_FECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
2934 vtd_define_long(s
, DMAR_FEDATA_REG
, 0, 0x0000ffffUL
, 0);
2935 vtd_define_long(s
, DMAR_FEADDR_REG
, 0, 0xfffffffcUL
, 0);
2937 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
2938 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
2940 vtd_define_long(s
, DMAR_FEUADDR_REG
, 0, 0, 0);
2942 /* Treated as RO for implementations that PLMR and PHMR fields reported
2943 * as Clear in the CAP_REG.
2944 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
2946 vtd_define_long(s
, DMAR_PMEN_REG
, 0, 0, 0);
2948 vtd_define_quad(s
, DMAR_IQH_REG
, 0, 0, 0);
2949 vtd_define_quad(s
, DMAR_IQT_REG
, 0, 0x7fff0ULL
, 0);
2950 vtd_define_quad(s
, DMAR_IQA_REG
, 0, 0xfffffffffffff007ULL
, 0);
2951 vtd_define_long(s
, DMAR_ICS_REG
, 0, 0, 0x1UL
);
2952 vtd_define_long(s
, DMAR_IECTL_REG
, 0x80000000UL
, 0x80000000UL
, 0);
2953 vtd_define_long(s
, DMAR_IEDATA_REG
, 0, 0xffffffffUL
, 0);
2954 vtd_define_long(s
, DMAR_IEADDR_REG
, 0, 0xfffffffcUL
, 0);
2955 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
2956 vtd_define_long(s
, DMAR_IEUADDR_REG
, 0, 0, 0);
2958 /* IOTLB registers */
2959 vtd_define_quad(s
, DMAR_IOTLB_REG
, 0, 0Xb003ffff00000000ULL
, 0);
2960 vtd_define_quad(s
, DMAR_IVA_REG
, 0, 0xfffffffffffff07fULL
, 0);
2961 vtd_define_quad_wo(s
, DMAR_IVA_REG
, 0xfffffffffffff07fULL
);
2963 /* Fault Recording Registers, 128-bit */
2964 vtd_define_quad(s
, DMAR_FRCD_REG_0_0
, 0, 0, 0);
2965 vtd_define_quad(s
, DMAR_FRCD_REG_0_2
, 0, 0, 0x8000000000000000ULL
);
2968 * Interrupt remapping registers.
2970 vtd_define_quad(s
, DMAR_IRTA_REG
, 0, 0xfffffffffffff80fULL
, 0);
2973 /* Should not reset address_spaces when reset because devices will still use
2974 * the address space they got at first (won't ask the bus again).
2976 static void vtd_reset(DeviceState
*dev
)
2978 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
2983 * When device reset, throw away all mappings and external caches
2985 vtd_address_space_unmap_all(s
);
2988 static AddressSpace
*vtd_host_dma_iommu(PCIBus
*bus
, void *opaque
, int devfn
)
2990 IntelIOMMUState
*s
= opaque
;
2991 VTDAddressSpace
*vtd_as
;
2993 assert(0 <= devfn
&& devfn
< PCI_DEVFN_MAX
);
2995 vtd_as
= vtd_find_add_as(s
, bus
, devfn
);
2999 static bool vtd_decide_config(IntelIOMMUState
*s
, Error
**errp
)
3001 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(s
);
3003 /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
3004 if (x86_iommu
->intr_supported
&& kvm_irqchip_in_kernel() &&
3005 !kvm_irqchip_is_split()) {
3006 error_setg(errp
, "Intel Interrupt Remapping cannot work with "
3007 "kernel-irqchip=on, please use 'split|off'.");
3010 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !x86_iommu
->intr_supported
) {
3011 error_setg(errp
, "eim=on cannot be selected without intremap=on");
3015 if (s
->intr_eim
== ON_OFF_AUTO_AUTO
) {
3016 s
->intr_eim
= (kvm_irqchip_in_kernel() || s
->buggy_eim
)
3017 && x86_iommu
->intr_supported
?
3018 ON_OFF_AUTO_ON
: ON_OFF_AUTO_OFF
;
3020 if (s
->intr_eim
== ON_OFF_AUTO_ON
&& !s
->buggy_eim
) {
3021 if (!kvm_irqchip_in_kernel()) {
3022 error_setg(errp
, "eim=on requires accel=kvm,kernel-irqchip=split");
3025 if (!kvm_enable_x2apic()) {
3026 error_setg(errp
, "eim=on requires support on the KVM side"
3027 "(X2APIC_API, first shipped in v4.7)");
3035 static void vtd_realize(DeviceState
*dev
, Error
**errp
)
3037 MachineState
*ms
= MACHINE(qdev_get_machine());
3038 MachineClass
*mc
= MACHINE_GET_CLASS(ms
);
3039 PCMachineState
*pcms
=
3040 PC_MACHINE(object_dynamic_cast(OBJECT(ms
), TYPE_PC_MACHINE
));
3042 IntelIOMMUState
*s
= INTEL_IOMMU_DEVICE(dev
);
3043 X86IOMMUState
*x86_iommu
= X86_IOMMU_DEVICE(dev
);
3046 error_setg(errp
, "Machine-type '%s' not supported by intel-iommu",
3052 x86_iommu
->type
= TYPE_INTEL
;
3054 if (!vtd_decide_config(s
, errp
)) {
3058 QLIST_INIT(&s
->notifiers_list
);
3059 memset(s
->vtd_as_by_bus_num
, 0, sizeof(s
->vtd_as_by_bus_num
));
3060 memory_region_init_io(&s
->csrmem
, OBJECT(s
), &vtd_mem_ops
, s
,
3061 "intel_iommu", DMAR_REG_SIZE
);
3062 sysbus_init_mmio(SYS_BUS_DEVICE(s
), &s
->csrmem
);
3063 /* No corresponding destroy */
3064 s
->iotlb
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3066 s
->vtd_as_by_busptr
= g_hash_table_new_full(vtd_uint64_hash
, vtd_uint64_equal
,
3069 sysbus_mmio_map(SYS_BUS_DEVICE(s
), 0, Q35_HOST_BRIDGE_IOMMU_ADDR
);
3070 pci_setup_iommu(bus
, vtd_host_dma_iommu
, dev
);
3071 /* Pseudo address space under root PCI bus. */
3072 pcms
->ioapic_as
= vtd_host_dma_iommu(bus
, s
, Q35_PSEUDO_DEVFN_IOAPIC
);
3075 static void vtd_class_init(ObjectClass
*klass
, void *data
)
3077 DeviceClass
*dc
= DEVICE_CLASS(klass
);
3078 X86IOMMUClass
*x86_class
= X86_IOMMU_CLASS(klass
);
3080 dc
->reset
= vtd_reset
;
3081 dc
->vmsd
= &vtd_vmstate
;
3082 dc
->props
= vtd_properties
;
3083 dc
->hotpluggable
= false;
3084 x86_class
->realize
= vtd_realize
;
3085 x86_class
->int_remap
= vtd_int_remap
;
3086 /* Supported by the pc-q35-* machine types */
3087 dc
->user_creatable
= true;
3090 static const TypeInfo vtd_info
= {
3091 .name
= TYPE_INTEL_IOMMU_DEVICE
,
3092 .parent
= TYPE_X86_IOMMU_DEVICE
,
3093 .instance_size
= sizeof(IntelIOMMUState
),
3094 .class_init
= vtd_class_init
,
3097 static void vtd_iommu_memory_region_class_init(ObjectClass
*klass
,
3100 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
3102 imrc
->translate
= vtd_iommu_translate
;
3103 imrc
->notify_flag_changed
= vtd_iommu_notify_flag_changed
;
3104 imrc
->replay
= vtd_iommu_replay
;
3107 static const TypeInfo vtd_iommu_memory_region_info
= {
3108 .parent
= TYPE_IOMMU_MEMORY_REGION
,
3109 .name
= TYPE_INTEL_IOMMU_MEMORY_REGION
,
3110 .class_init
= vtd_iommu_memory_region_class_init
,
3113 static void vtd_register_types(void)
3115 type_register_static(&vtd_info
);
3116 type_register_static(&vtd_iommu_memory_region_info
);
3119 type_init(vtd_register_types
)