]>
Commit | Line | Data |
---|---|---|
1da12ec4 LT |
1 | /* |
2 | * QEMU emulation of an Intel IOMMU (VT-d) | |
3 | * (DMA Remapping device) | |
4 | * | |
5 | * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com> | |
6 | * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | ||
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | ||
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
b6a0aa05 | 22 | #include "qemu/osdep.h" |
4684a204 | 23 | #include "qemu/error-report.h" |
6333e93c | 24 | #include "qapi/error.h" |
1da12ec4 LT |
25 | #include "hw/sysbus.h" |
26 | #include "exec/address-spaces.h" | |
27 | #include "intel_iommu_internal.h" | |
7df953bd | 28 | #include "hw/pci/pci.h" |
3cb3b154 | 29 | #include "hw/pci/pci_bus.h" |
621d983a | 30 | #include "hw/i386/pc.h" |
dea651a9 | 31 | #include "hw/i386/apic-msidef.h" |
04af0e18 PX |
32 | #include "hw/boards.h" |
33 | #include "hw/i386/x86-iommu.h" | |
cb135f59 | 34 | #include "hw/pci-host/q35.h" |
4684a204 | 35 | #include "sysemu/kvm.h" |
32946019 | 36 | #include "hw/i386/apic_internal.h" |
fb506e70 | 37 | #include "kvm_i386.h" |
bc535e59 | 38 | #include "trace.h" |
1da12ec4 | 39 | |
fb43cf73 LY |
40 | /* context entry operations */ |
41 | #define VTD_CE_GET_RID2PASID(ce) \ | |
42 | ((ce)->val[1] & VTD_SM_CONTEXT_ENTRY_RID2PASID_MASK) | |
43 | #define VTD_CE_GET_PASID_DIR_TABLE(ce) \ | |
44 | ((ce)->val[0] & VTD_PASID_DIR_BASE_ADDR_MASK) | |
45 | ||
46 | /* pe operations */ | |
47 | #define VTD_PE_GET_TYPE(pe) ((pe)->val[0] & VTD_SM_PASID_ENTRY_PGTT) | |
48 | #define VTD_PE_GET_LEVEL(pe) (2 + (((pe)->val[0] >> 2) & VTD_SM_PASID_ENTRY_AW)) | |
49 | #define VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write) {\ | |
50 | if (ret_fr) { \ | |
51 | ret_fr = -ret_fr; \ | |
52 | if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { \ | |
53 | trace_vtd_fault_disabled(); \ | |
54 | } else { \ | |
55 | vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); \ | |
56 | } \ | |
57 | goto error; \ | |
58 | } \ | |
59 | } | |
60 | ||
2cc9ddcc | 61 | static void vtd_address_space_refresh_all(IntelIOMMUState *s); |
c28b535d | 62 | static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); |
2cc9ddcc | 63 | |
1da12ec4 LT |
64 | static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val, |
65 | uint64_t wmask, uint64_t w1cmask) | |
66 | { | |
67 | stq_le_p(&s->csr[addr], val); | |
68 | stq_le_p(&s->wmask[addr], wmask); | |
69 | stq_le_p(&s->w1cmask[addr], w1cmask); | |
70 | } | |
71 | ||
72 | static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask) | |
73 | { | |
74 | stq_le_p(&s->womask[addr], mask); | |
75 | } | |
76 | ||
77 | static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val, | |
78 | uint32_t wmask, uint32_t w1cmask) | |
79 | { | |
80 | stl_le_p(&s->csr[addr], val); | |
81 | stl_le_p(&s->wmask[addr], wmask); | |
82 | stl_le_p(&s->w1cmask[addr], w1cmask); | |
83 | } | |
84 | ||
85 | static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask) | |
86 | { | |
87 | stl_le_p(&s->womask[addr], mask); | |
88 | } | |
89 | ||
90 | /* "External" get/set operations */ | |
91 | static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val) | |
92 | { | |
93 | uint64_t oldval = ldq_le_p(&s->csr[addr]); | |
94 | uint64_t wmask = ldq_le_p(&s->wmask[addr]); | |
95 | uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); | |
96 | stq_le_p(&s->csr[addr], | |
97 | ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); | |
98 | } | |
99 | ||
100 | static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val) | |
101 | { | |
102 | uint32_t oldval = ldl_le_p(&s->csr[addr]); | |
103 | uint32_t wmask = ldl_le_p(&s->wmask[addr]); | |
104 | uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); | |
105 | stl_le_p(&s->csr[addr], | |
106 | ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); | |
107 | } | |
108 | ||
109 | static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr) | |
110 | { | |
111 | uint64_t val = ldq_le_p(&s->csr[addr]); | |
112 | uint64_t womask = ldq_le_p(&s->womask[addr]); | |
113 | return val & ~womask; | |
114 | } | |
115 | ||
116 | static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr) | |
117 | { | |
118 | uint32_t val = ldl_le_p(&s->csr[addr]); | |
119 | uint32_t womask = ldl_le_p(&s->womask[addr]); | |
120 | return val & ~womask; | |
121 | } | |
122 | ||
123 | /* "Internal" get/set operations */ | |
124 | static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr) | |
125 | { | |
126 | return ldq_le_p(&s->csr[addr]); | |
127 | } | |
128 | ||
129 | static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr) | |
130 | { | |
131 | return ldl_le_p(&s->csr[addr]); | |
132 | } | |
133 | ||
134 | static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val) | |
135 | { | |
136 | stq_le_p(&s->csr[addr], val); | |
137 | } | |
138 | ||
139 | static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr, | |
140 | uint32_t clear, uint32_t mask) | |
141 | { | |
142 | uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask; | |
143 | stl_le_p(&s->csr[addr], new_val); | |
144 | return new_val; | |
145 | } | |
146 | ||
147 | static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr, | |
148 | uint64_t clear, uint64_t mask) | |
149 | { | |
150 | uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask; | |
151 | stq_le_p(&s->csr[addr], new_val); | |
152 | return new_val; | |
153 | } | |
154 | ||
1d9efa73 PX |
155 | static inline void vtd_iommu_lock(IntelIOMMUState *s) |
156 | { | |
157 | qemu_mutex_lock(&s->iommu_lock); | |
158 | } | |
159 | ||
160 | static inline void vtd_iommu_unlock(IntelIOMMUState *s) | |
161 | { | |
162 | qemu_mutex_unlock(&s->iommu_lock); | |
163 | } | |
164 | ||
2811af3b PX |
165 | static void vtd_update_scalable_state(IntelIOMMUState *s) |
166 | { | |
167 | uint64_t val = vtd_get_quad_raw(s, DMAR_RTADDR_REG); | |
168 | ||
169 | if (s->scalable_mode) { | |
170 | s->root_scalable = val & VTD_RTADDR_SMT; | |
171 | } | |
172 | } | |
173 | ||
4f8a62a9 PX |
174 | /* Whether the address space needs to notify new mappings */ |
175 | static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as) | |
176 | { | |
177 | return as->notifier_flags & IOMMU_NOTIFIER_MAP; | |
178 | } | |
179 | ||
b5a280c0 LT |
180 | /* GHashTable functions */ |
181 | static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2) | |
182 | { | |
183 | return *((const uint64_t *)v1) == *((const uint64_t *)v2); | |
184 | } | |
185 | ||
186 | static guint vtd_uint64_hash(gconstpointer v) | |
187 | { | |
188 | return (guint)*(const uint64_t *)v; | |
189 | } | |
190 | ||
191 | static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value, | |
192 | gpointer user_data) | |
193 | { | |
194 | VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; | |
195 | uint16_t domain_id = *(uint16_t *)user_data; | |
196 | return entry->domain_id == domain_id; | |
197 | } | |
198 | ||
d66b969b JW |
199 | /* The shift of an addr for a certain level of paging structure */ |
200 | static inline uint32_t vtd_slpt_level_shift(uint32_t level) | |
201 | { | |
7e58326a | 202 | assert(level != 0); |
d66b969b JW |
203 | return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS; |
204 | } | |
205 | ||
206 | static inline uint64_t vtd_slpt_level_page_mask(uint32_t level) | |
207 | { | |
208 | return ~((1ULL << vtd_slpt_level_shift(level)) - 1); | |
209 | } | |
210 | ||
b5a280c0 LT |
211 | static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, |
212 | gpointer user_data) | |
213 | { | |
214 | VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; | |
215 | VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; | |
d66b969b JW |
216 | uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; |
217 | uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; | |
b5a280c0 | 218 | return (entry->domain_id == info->domain_id) && |
d66b969b JW |
219 | (((entry->gfn & info->mask) == gfn) || |
220 | (entry->gfn == gfn_tlb)); | |
b5a280c0 LT |
221 | } |
222 | ||
d92fa2dc | 223 | /* Reset all the gen of VTDAddressSpace to zero and set the gen of |
1d9efa73 | 224 | * IntelIOMMUState to 1. Must be called with IOMMU lock held. |
d92fa2dc | 225 | */ |
1d9efa73 | 226 | static void vtd_reset_context_cache_locked(IntelIOMMUState *s) |
d92fa2dc | 227 | { |
d92fa2dc | 228 | VTDAddressSpace *vtd_as; |
7df953bd KO |
229 | VTDBus *vtd_bus; |
230 | GHashTableIter bus_it; | |
d92fa2dc LT |
231 | uint32_t devfn_it; |
232 | ||
7feb51b7 PX |
233 | trace_vtd_context_cache_reset(); |
234 | ||
7df953bd KO |
235 | g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr); |
236 | ||
7df953bd | 237 | while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) { |
bf33cc75 | 238 | for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { |
7df953bd | 239 | vtd_as = vtd_bus->dev_as[devfn_it]; |
d92fa2dc LT |
240 | if (!vtd_as) { |
241 | continue; | |
242 | } | |
243 | vtd_as->context_cache_entry.context_cache_gen = 0; | |
244 | } | |
245 | } | |
246 | s->context_cache_gen = 1; | |
247 | } | |
248 | ||
1d9efa73 PX |
249 | /* Must be called with IOMMU lock held. */ |
250 | static void vtd_reset_iotlb_locked(IntelIOMMUState *s) | |
b5a280c0 LT |
251 | { |
252 | assert(s->iotlb); | |
253 | g_hash_table_remove_all(s->iotlb); | |
254 | } | |
255 | ||
1d9efa73 PX |
256 | static void vtd_reset_iotlb(IntelIOMMUState *s) |
257 | { | |
258 | vtd_iommu_lock(s); | |
259 | vtd_reset_iotlb_locked(s); | |
260 | vtd_iommu_unlock(s); | |
261 | } | |
262 | ||
06aba4ca PX |
263 | static void vtd_reset_caches(IntelIOMMUState *s) |
264 | { | |
265 | vtd_iommu_lock(s); | |
266 | vtd_reset_iotlb_locked(s); | |
267 | vtd_reset_context_cache_locked(s); | |
268 | vtd_iommu_unlock(s); | |
269 | } | |
270 | ||
bacabb0a | 271 | static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id, |
d66b969b JW |
272 | uint32_t level) |
273 | { | |
274 | return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) | | |
275 | ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT); | |
276 | } | |
277 | ||
278 | static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) | |
279 | { | |
280 | return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; | |
281 | } | |
282 | ||
1d9efa73 | 283 | /* Must be called with IOMMU lock held */ |
b5a280c0 LT |
284 | static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, |
285 | hwaddr addr) | |
286 | { | |
d66b969b | 287 | VTDIOTLBEntry *entry; |
b5a280c0 | 288 | uint64_t key; |
d66b969b JW |
289 | int level; |
290 | ||
291 | for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) { | |
292 | key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level), | |
293 | source_id, level); | |
294 | entry = g_hash_table_lookup(s->iotlb, &key); | |
295 | if (entry) { | |
296 | goto out; | |
297 | } | |
298 | } | |
b5a280c0 | 299 | |
d66b969b JW |
300 | out: |
301 | return entry; | |
b5a280c0 LT |
302 | } |
303 | ||
1d9efa73 | 304 | /* Must be with IOMMU lock held */ |
b5a280c0 LT |
305 | static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, |
306 | uint16_t domain_id, hwaddr addr, uint64_t slpte, | |
07f7b733 | 307 | uint8_t access_flags, uint32_t level) |
b5a280c0 LT |
308 | { |
309 | VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); | |
310 | uint64_t *key = g_malloc(sizeof(*key)); | |
d66b969b | 311 | uint64_t gfn = vtd_get_iotlb_gfn(addr, level); |
b5a280c0 | 312 | |
6c441e1d | 313 | trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id); |
b5a280c0 | 314 | if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { |
6c441e1d | 315 | trace_vtd_iotlb_reset("iotlb exceeds size limit"); |
1d9efa73 | 316 | vtd_reset_iotlb_locked(s); |
b5a280c0 LT |
317 | } |
318 | ||
319 | entry->gfn = gfn; | |
320 | entry->domain_id = domain_id; | |
321 | entry->slpte = slpte; | |
07f7b733 | 322 | entry->access_flags = access_flags; |
d66b969b JW |
323 | entry->mask = vtd_slpt_level_page_mask(level); |
324 | *key = vtd_get_iotlb_key(gfn, source_id, level); | |
b5a280c0 LT |
325 | g_hash_table_replace(s->iotlb, key, entry); |
326 | } | |
327 | ||
1da12ec4 LT |
328 | /* Given the reg addr of both the message data and address, generate an |
329 | * interrupt via MSI. | |
330 | */ | |
331 | static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg, | |
332 | hwaddr mesg_data_reg) | |
333 | { | |
32946019 | 334 | MSIMessage msi; |
1da12ec4 LT |
335 | |
336 | assert(mesg_data_reg < DMAR_REG_SIZE); | |
337 | assert(mesg_addr_reg < DMAR_REG_SIZE); | |
338 | ||
32946019 RK |
339 | msi.address = vtd_get_long_raw(s, mesg_addr_reg); |
340 | msi.data = vtd_get_long_raw(s, mesg_data_reg); | |
1da12ec4 | 341 | |
7feb51b7 PX |
342 | trace_vtd_irq_generate(msi.address, msi.data); |
343 | ||
32946019 | 344 | apic_get_class()->send_msi(&msi); |
1da12ec4 LT |
345 | } |
346 | ||
347 | /* Generate a fault event to software via MSI if conditions are met. | |
348 | * Notice that the value of FSTS_REG being passed to it should be the one | |
349 | * before any update. | |
350 | */ | |
351 | static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts) | |
352 | { | |
353 | if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO || | |
354 | pre_fsts & VTD_FSTS_IQE) { | |
1376211f PX |
355 | error_report_once("There are previous interrupt conditions " |
356 | "to be serviced by software, fault event " | |
357 | "is not generated"); | |
1da12ec4 LT |
358 | return; |
359 | } | |
360 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP); | |
361 | if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) { | |
1376211f | 362 | error_report_once("Interrupt Mask set, irq is not generated"); |
1da12ec4 LT |
363 | } else { |
364 | vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); | |
365 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); | |
366 | } | |
367 | } | |
368 | ||
369 | /* Check if the Fault (F) field of the Fault Recording Register referenced by | |
370 | * @index is Set. | |
371 | */ | |
372 | static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index) | |
373 | { | |
374 | /* Each reg is 128-bit */ | |
375 | hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); | |
376 | addr += 8; /* Access the high 64-bit half */ | |
377 | ||
378 | assert(index < DMAR_FRCD_REG_NR); | |
379 | ||
380 | return vtd_get_quad_raw(s, addr) & VTD_FRCD_F; | |
381 | } | |
382 | ||
383 | /* Update the PPF field of Fault Status Register. | |
384 | * Should be called whenever change the F field of any fault recording | |
385 | * registers. | |
386 | */ | |
387 | static void vtd_update_fsts_ppf(IntelIOMMUState *s) | |
388 | { | |
389 | uint32_t i; | |
390 | uint32_t ppf_mask = 0; | |
391 | ||
392 | for (i = 0; i < DMAR_FRCD_REG_NR; i++) { | |
393 | if (vtd_is_frcd_set(s, i)) { | |
394 | ppf_mask = VTD_FSTS_PPF; | |
395 | break; | |
396 | } | |
397 | } | |
398 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask); | |
7feb51b7 | 399 | trace_vtd_fsts_ppf(!!ppf_mask); |
1da12ec4 LT |
400 | } |
401 | ||
402 | static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index) | |
403 | { | |
404 | /* Each reg is 128-bit */ | |
405 | hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); | |
406 | addr += 8; /* Access the high 64-bit half */ | |
407 | ||
408 | assert(index < DMAR_FRCD_REG_NR); | |
409 | ||
410 | vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F); | |
411 | vtd_update_fsts_ppf(s); | |
412 | } | |
413 | ||
414 | /* Must not update F field now, should be done later */ | |
415 | static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index, | |
416 | uint16_t source_id, hwaddr addr, | |
417 | VTDFaultReason fault, bool is_write) | |
418 | { | |
419 | uint64_t hi = 0, lo; | |
420 | hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); | |
421 | ||
422 | assert(index < DMAR_FRCD_REG_NR); | |
423 | ||
424 | lo = VTD_FRCD_FI(addr); | |
425 | hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault); | |
426 | if (!is_write) { | |
427 | hi |= VTD_FRCD_T; | |
428 | } | |
429 | vtd_set_quad_raw(s, frcd_reg_addr, lo); | |
430 | vtd_set_quad_raw(s, frcd_reg_addr + 8, hi); | |
7feb51b7 PX |
431 | |
432 | trace_vtd_frr_new(index, hi, lo); | |
1da12ec4 LT |
433 | } |
434 | ||
435 | /* Try to collapse multiple pending faults from the same requester */ | |
436 | static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id) | |
437 | { | |
438 | uint32_t i; | |
439 | uint64_t frcd_reg; | |
440 | hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */ | |
441 | ||
442 | for (i = 0; i < DMAR_FRCD_REG_NR; i++) { | |
443 | frcd_reg = vtd_get_quad_raw(s, addr); | |
1da12ec4 LT |
444 | if ((frcd_reg & VTD_FRCD_F) && |
445 | ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) { | |
446 | return true; | |
447 | } | |
448 | addr += 16; /* 128-bit for each */ | |
449 | } | |
450 | return false; | |
451 | } | |
452 | ||
453 | /* Log and report an DMAR (address translation) fault to software */ | |
454 | static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, | |
455 | hwaddr addr, VTDFaultReason fault, | |
456 | bool is_write) | |
457 | { | |
458 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); | |
459 | ||
460 | assert(fault < VTD_FR_MAX); | |
461 | ||
462 | if (fault == VTD_FR_RESERVED_ERR) { | |
463 | /* This is not a normal fault reason case. Drop it. */ | |
464 | return; | |
465 | } | |
7feb51b7 PX |
466 | |
467 | trace_vtd_dmar_fault(source_id, fault, addr, is_write); | |
468 | ||
1da12ec4 | 469 | if (fsts_reg & VTD_FSTS_PFO) { |
1376211f PX |
470 | error_report_once("New fault is not recorded due to " |
471 | "Primary Fault Overflow"); | |
1da12ec4 LT |
472 | return; |
473 | } | |
7feb51b7 | 474 | |
1da12ec4 | 475 | if (vtd_try_collapse_fault(s, source_id)) { |
1376211f PX |
476 | error_report_once("New fault is not recorded due to " |
477 | "compression of faults"); | |
1da12ec4 LT |
478 | return; |
479 | } | |
7feb51b7 | 480 | |
1da12ec4 | 481 | if (vtd_is_frcd_set(s, s->next_frcd_reg)) { |
1376211f PX |
482 | error_report_once("Next Fault Recording Reg is used, " |
483 | "new fault is not recorded, set PFO field"); | |
1da12ec4 LT |
484 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO); |
485 | return; | |
486 | } | |
487 | ||
488 | vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write); | |
489 | ||
490 | if (fsts_reg & VTD_FSTS_PPF) { | |
1376211f PX |
491 | error_report_once("There are pending faults already, " |
492 | "fault event is not generated"); | |
1da12ec4 LT |
493 | vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); |
494 | s->next_frcd_reg++; | |
495 | if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { | |
496 | s->next_frcd_reg = 0; | |
497 | } | |
498 | } else { | |
499 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK, | |
500 | VTD_FSTS_FRI(s->next_frcd_reg)); | |
501 | vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */ | |
502 | s->next_frcd_reg++; | |
503 | if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { | |
504 | s->next_frcd_reg = 0; | |
505 | } | |
506 | /* This case actually cause the PPF to be Set. | |
507 | * So generate fault event (interrupt). | |
508 | */ | |
509 | vtd_generate_fault_event(s, fsts_reg); | |
510 | } | |
511 | } | |
512 | ||
ed7b8fbc LT |
513 | /* Handle Invalidation Queue Errors of queued invalidation interface error |
514 | * conditions. | |
515 | */ | |
516 | static void vtd_handle_inv_queue_error(IntelIOMMUState *s) | |
517 | { | |
518 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); | |
519 | ||
520 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE); | |
521 | vtd_generate_fault_event(s, fsts_reg); | |
522 | } | |
523 | ||
524 | /* Set the IWC field and try to generate an invalidation completion interrupt */ | |
525 | static void vtd_generate_completion_event(IntelIOMMUState *s) | |
526 | { | |
ed7b8fbc | 527 | if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) { |
bc535e59 | 528 | trace_vtd_inv_desc_wait_irq("One pending, skip current"); |
ed7b8fbc LT |
529 | return; |
530 | } | |
531 | vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC); | |
532 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP); | |
533 | if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) { | |
bc535e59 PX |
534 | trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, " |
535 | "new event not generated"); | |
ed7b8fbc LT |
536 | return; |
537 | } else { | |
538 | /* Generate the interrupt event */ | |
bc535e59 | 539 | trace_vtd_inv_desc_wait_irq("Generating complete event"); |
ed7b8fbc LT |
540 | vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); |
541 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); | |
542 | } | |
543 | } | |
544 | ||
fb43cf73 LY |
545 | static inline bool vtd_root_entry_present(IntelIOMMUState *s, |
546 | VTDRootEntry *re, | |
547 | uint8_t devfn) | |
1da12ec4 | 548 | { |
fb43cf73 LY |
549 | if (s->root_scalable && devfn > UINT8_MAX / 2) { |
550 | return re->hi & VTD_ROOT_ENTRY_P; | |
551 | } | |
552 | ||
553 | return re->lo & VTD_ROOT_ENTRY_P; | |
1da12ec4 LT |
554 | } |
555 | ||
556 | static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, | |
557 | VTDRootEntry *re) | |
558 | { | |
559 | dma_addr_t addr; | |
560 | ||
561 | addr = s->root + index * sizeof(*re); | |
562 | if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { | |
fb43cf73 | 563 | re->lo = 0; |
1da12ec4 LT |
564 | return -VTD_FR_ROOT_TABLE_INV; |
565 | } | |
fb43cf73 LY |
566 | re->lo = le64_to_cpu(re->lo); |
567 | re->hi = le64_to_cpu(re->hi); | |
1da12ec4 LT |
568 | return 0; |
569 | } | |
570 | ||
8f7d7161 | 571 | static inline bool vtd_ce_present(VTDContextEntry *context) |
1da12ec4 LT |
572 | { |
573 | return context->lo & VTD_CONTEXT_ENTRY_P; | |
574 | } | |
575 | ||
fb43cf73 LY |
576 | static int vtd_get_context_entry_from_root(IntelIOMMUState *s, |
577 | VTDRootEntry *re, | |
578 | uint8_t index, | |
1da12ec4 LT |
579 | VTDContextEntry *ce) |
580 | { | |
fb43cf73 | 581 | dma_addr_t addr, ce_size; |
1da12ec4 | 582 | |
6c441e1d | 583 | /* we have checked that root entry is present */ |
fb43cf73 LY |
584 | ce_size = s->root_scalable ? VTD_CTX_ENTRY_SCALABLE_SIZE : |
585 | VTD_CTX_ENTRY_LEGACY_SIZE; | |
586 | ||
587 | if (s->root_scalable && index > UINT8_MAX / 2) { | |
588 | index = index & (~VTD_DEVFN_CHECK_MASK); | |
589 | addr = re->hi & VTD_ROOT_ENTRY_CTP; | |
590 | } else { | |
591 | addr = re->lo & VTD_ROOT_ENTRY_CTP; | |
592 | } | |
593 | ||
594 | addr = addr + index * ce_size; | |
595 | if (dma_memory_read(&address_space_memory, addr, ce, ce_size)) { | |
1da12ec4 LT |
596 | return -VTD_FR_CONTEXT_TABLE_INV; |
597 | } | |
fb43cf73 | 598 | |
1da12ec4 LT |
599 | ce->lo = le64_to_cpu(ce->lo); |
600 | ce->hi = le64_to_cpu(ce->hi); | |
fb43cf73 LY |
601 | if (ce_size == VTD_CTX_ENTRY_SCALABLE_SIZE) { |
602 | ce->val[2] = le64_to_cpu(ce->val[2]); | |
603 | ce->val[3] = le64_to_cpu(ce->val[3]); | |
604 | } | |
1da12ec4 LT |
605 | return 0; |
606 | } | |
607 | ||
8f7d7161 | 608 | static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce) |
1da12ec4 LT |
609 | { |
610 | return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; | |
611 | } | |
612 | ||
37f51384 | 613 | static inline uint64_t vtd_get_slpte_addr(uint64_t slpte, uint8_t aw) |
1da12ec4 | 614 | { |
37f51384 | 615 | return slpte & VTD_SL_PT_BASE_ADDR_MASK(aw); |
1da12ec4 LT |
616 | } |
617 | ||
618 | /* Whether the pte indicates the address of the page frame */ | |
619 | static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level) | |
620 | { | |
621 | return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK); | |
622 | } | |
623 | ||
624 | /* Get the content of a spte located in @base_addr[@index] */ | |
625 | static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) | |
626 | { | |
627 | uint64_t slpte; | |
628 | ||
629 | assert(index < VTD_SL_PT_ENTRY_NR); | |
630 | ||
631 | if (dma_memory_read(&address_space_memory, | |
632 | base_addr + index * sizeof(slpte), &slpte, | |
633 | sizeof(slpte))) { | |
634 | slpte = (uint64_t)-1; | |
635 | return slpte; | |
636 | } | |
637 | slpte = le64_to_cpu(slpte); | |
638 | return slpte; | |
639 | } | |
640 | ||
6e905564 PX |
641 | /* Given an iova and the level of paging structure, return the offset |
642 | * of current level. | |
1da12ec4 | 643 | */ |
6e905564 | 644 | static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level) |
1da12ec4 | 645 | { |
6e905564 | 646 | return (iova >> vtd_slpt_level_shift(level)) & |
1da12ec4 LT |
647 | ((1ULL << VTD_SL_LEVEL_BITS) - 1); |
648 | } | |
649 | ||
650 | /* Check Capability Register to see if the @level of page-table is supported */ | |
651 | static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) | |
652 | { | |
653 | return VTD_CAP_SAGAW_MASK & s->cap & | |
654 | (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); | |
655 | } | |
656 | ||
fb43cf73 LY |
657 | /* Return true if check passed, otherwise false */ |
658 | static inline bool vtd_pe_type_check(X86IOMMUState *x86_iommu, | |
659 | VTDPASIDEntry *pe) | |
660 | { | |
661 | switch (VTD_PE_GET_TYPE(pe)) { | |
662 | case VTD_SM_PASID_ENTRY_FLT: | |
663 | case VTD_SM_PASID_ENTRY_SLT: | |
664 | case VTD_SM_PASID_ENTRY_NESTED: | |
665 | break; | |
666 | case VTD_SM_PASID_ENTRY_PT: | |
667 | if (!x86_iommu->pt_supported) { | |
668 | return false; | |
669 | } | |
670 | break; | |
671 | default: | |
672 | /* Unknwon type */ | |
673 | return false; | |
674 | } | |
675 | return true; | |
676 | } | |
677 | ||
678 | static int vtd_get_pasid_dire(dma_addr_t pasid_dir_base, | |
679 | uint32_t pasid, | |
680 | VTDPASIDDirEntry *pdire) | |
681 | { | |
682 | uint32_t index; | |
683 | dma_addr_t addr, entry_size; | |
684 | ||
685 | index = VTD_PASID_DIR_INDEX(pasid); | |
686 | entry_size = VTD_PASID_DIR_ENTRY_SIZE; | |
687 | addr = pasid_dir_base + index * entry_size; | |
688 | if (dma_memory_read(&address_space_memory, addr, pdire, entry_size)) { | |
689 | return -VTD_FR_PASID_TABLE_INV; | |
690 | } | |
691 | ||
692 | return 0; | |
693 | } | |
694 | ||
695 | static int vtd_get_pasid_entry(IntelIOMMUState *s, | |
696 | uint32_t pasid, | |
697 | VTDPASIDDirEntry *pdire, | |
698 | VTDPASIDEntry *pe) | |
699 | { | |
700 | uint32_t index; | |
701 | dma_addr_t addr, entry_size; | |
702 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); | |
703 | ||
704 | index = VTD_PASID_TABLE_INDEX(pasid); | |
705 | entry_size = VTD_PASID_ENTRY_SIZE; | |
706 | addr = pdire->val & VTD_PASID_TABLE_BASE_ADDR_MASK; | |
707 | addr = addr + index * entry_size; | |
708 | if (dma_memory_read(&address_space_memory, addr, pe, entry_size)) { | |
709 | return -VTD_FR_PASID_TABLE_INV; | |
710 | } | |
711 | ||
712 | /* Do translation type check */ | |
713 | if (!vtd_pe_type_check(x86_iommu, pe)) { | |
714 | return -VTD_FR_PASID_TABLE_INV; | |
715 | } | |
716 | ||
717 | if (!vtd_is_level_supported(s, VTD_PE_GET_LEVEL(pe))) { | |
718 | return -VTD_FR_PASID_TABLE_INV; | |
719 | } | |
720 | ||
721 | return 0; | |
722 | } | |
723 | ||
724 | static int vtd_get_pasid_entry_from_pasid(IntelIOMMUState *s, | |
725 | dma_addr_t pasid_dir_base, | |
726 | uint32_t pasid, | |
727 | VTDPASIDEntry *pe) | |
728 | { | |
729 | int ret; | |
730 | VTDPASIDDirEntry pdire; | |
731 | ||
732 | ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); | |
733 | if (ret) { | |
734 | return ret; | |
735 | } | |
736 | ||
737 | ret = vtd_get_pasid_entry(s, pasid, &pdire, pe); | |
738 | if (ret) { | |
739 | return ret; | |
740 | } | |
741 | ||
742 | return ret; | |
743 | } | |
744 | ||
745 | static int vtd_ce_get_rid2pasid_entry(IntelIOMMUState *s, | |
746 | VTDContextEntry *ce, | |
747 | VTDPASIDEntry *pe) | |
748 | { | |
749 | uint32_t pasid; | |
750 | dma_addr_t pasid_dir_base; | |
751 | int ret = 0; | |
752 | ||
753 | pasid = VTD_CE_GET_RID2PASID(ce); | |
754 | pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); | |
755 | ret = vtd_get_pasid_entry_from_pasid(s, pasid_dir_base, pasid, pe); | |
756 | ||
757 | return ret; | |
758 | } | |
759 | ||
760 | static int vtd_ce_get_pasid_fpd(IntelIOMMUState *s, | |
761 | VTDContextEntry *ce, | |
762 | bool *pe_fpd_set) | |
763 | { | |
764 | int ret; | |
765 | uint32_t pasid; | |
766 | dma_addr_t pasid_dir_base; | |
767 | VTDPASIDDirEntry pdire; | |
768 | VTDPASIDEntry pe; | |
769 | ||
770 | pasid = VTD_CE_GET_RID2PASID(ce); | |
771 | pasid_dir_base = VTD_CE_GET_PASID_DIR_TABLE(ce); | |
772 | ||
773 | ret = vtd_get_pasid_dire(pasid_dir_base, pasid, &pdire); | |
774 | if (ret) { | |
775 | return ret; | |
776 | } | |
777 | ||
778 | if (pdire.val & VTD_PASID_DIR_FPD) { | |
779 | *pe_fpd_set = true; | |
780 | return 0; | |
781 | } | |
782 | ||
783 | ret = vtd_get_pasid_entry(s, pasid, &pdire, &pe); | |
784 | if (ret) { | |
785 | return ret; | |
786 | } | |
787 | ||
788 | if (pe.val[0] & VTD_PASID_ENTRY_FPD) { | |
789 | *pe_fpd_set = true; | |
790 | } | |
791 | ||
792 | return 0; | |
793 | } | |
794 | ||
1da12ec4 LT |
795 | /* Get the page-table level that hardware should use for the second-level |
796 | * page-table walk from the Address Width field of context-entry. | |
797 | */ | |
8f7d7161 | 798 | static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) |
1da12ec4 LT |
799 | { |
800 | return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); | |
801 | } | |
802 | ||
fb43cf73 LY |
803 | static uint32_t vtd_get_iova_level(IntelIOMMUState *s, |
804 | VTDContextEntry *ce) | |
805 | { | |
806 | VTDPASIDEntry pe; | |
807 | ||
808 | if (s->root_scalable) { | |
809 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); | |
810 | return VTD_PE_GET_LEVEL(&pe); | |
811 | } | |
812 | ||
813 | return vtd_ce_get_level(ce); | |
814 | } | |
815 | ||
8f7d7161 | 816 | static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) |
1da12ec4 LT |
817 | { |
818 | return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; | |
819 | } | |
820 | ||
fb43cf73 LY |
821 | static uint32_t vtd_get_iova_agaw(IntelIOMMUState *s, |
822 | VTDContextEntry *ce) | |
823 | { | |
824 | VTDPASIDEntry pe; | |
825 | ||
826 | if (s->root_scalable) { | |
827 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); | |
828 | return 30 + ((pe.val[0] >> 2) & VTD_SM_PASID_ENTRY_AW) * 9; | |
829 | } | |
830 | ||
831 | return vtd_ce_get_agaw(ce); | |
832 | } | |
833 | ||
127ff5c3 PX |
834 | static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce) |
835 | { | |
836 | return ce->lo & VTD_CONTEXT_ENTRY_TT; | |
837 | } | |
838 | ||
fb43cf73 | 839 | /* Only for Legacy Mode. Return true if check passed, otherwise false */ |
f80c9874 PX |
840 | static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, |
841 | VTDContextEntry *ce) | |
842 | { | |
843 | switch (vtd_ce_get_type(ce)) { | |
844 | case VTD_CONTEXT_TT_MULTI_LEVEL: | |
845 | /* Always supported */ | |
846 | break; | |
847 | case VTD_CONTEXT_TT_DEV_IOTLB: | |
848 | if (!x86_iommu->dt_supported) { | |
095955b2 | 849 | error_report_once("%s: DT specified but not supported", __func__); |
f80c9874 PX |
850 | return false; |
851 | } | |
852 | break; | |
dbaabb25 PX |
853 | case VTD_CONTEXT_TT_PASS_THROUGH: |
854 | if (!x86_iommu->pt_supported) { | |
095955b2 | 855 | error_report_once("%s: PT specified but not supported", __func__); |
dbaabb25 PX |
856 | return false; |
857 | } | |
858 | break; | |
f80c9874 | 859 | default: |
fb43cf73 | 860 | /* Unknown type */ |
095955b2 PX |
861 | error_report_once("%s: unknown ce type: %"PRIu32, __func__, |
862 | vtd_ce_get_type(ce)); | |
f80c9874 PX |
863 | return false; |
864 | } | |
865 | return true; | |
866 | } | |
867 | ||
fb43cf73 LY |
868 | static inline uint64_t vtd_iova_limit(IntelIOMMUState *s, |
869 | VTDContextEntry *ce, uint8_t aw) | |
f06a696d | 870 | { |
fb43cf73 | 871 | uint32_t ce_agaw = vtd_get_iova_agaw(s, ce); |
37f51384 | 872 | return 1ULL << MIN(ce_agaw, aw); |
f06a696d PX |
873 | } |
874 | ||
875 | /* Return true if IOVA passes range check, otherwise false. */ | |
fb43cf73 LY |
876 | static inline bool vtd_iova_range_check(IntelIOMMUState *s, |
877 | uint64_t iova, VTDContextEntry *ce, | |
37f51384 | 878 | uint8_t aw) |
f06a696d PX |
879 | { |
880 | /* | |
881 | * Check if @iova is above 2^X-1, where X is the minimum of MGAW | |
882 | * in CAP_REG and AW in context-entry. | |
883 | */ | |
fb43cf73 LY |
884 | return !(iova & ~(vtd_iova_limit(s, ce, aw) - 1)); |
885 | } | |
886 | ||
887 | static dma_addr_t vtd_get_iova_pgtbl_base(IntelIOMMUState *s, | |
888 | VTDContextEntry *ce) | |
889 | { | |
890 | VTDPASIDEntry pe; | |
891 | ||
892 | if (s->root_scalable) { | |
893 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); | |
894 | return pe.val[0] & VTD_SM_PASID_ENTRY_SLPTPTR; | |
895 | } | |
896 | ||
897 | return vtd_ce_get_slpt_base(ce); | |
f06a696d PX |
898 | } |
899 | ||
92e5d85e PS |
900 | /* |
901 | * Rsvd field masks for spte: | |
902 | * Index [1] to [4] 4k pages | |
903 | * Index [5] to [8] large pages | |
904 | */ | |
905 | static uint64_t vtd_paging_entry_rsvd_field[9]; | |
1da12ec4 LT |
906 | |
907 | static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) | |
908 | { | |
909 | if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) { | |
910 | /* Maybe large page */ | |
911 | return slpte & vtd_paging_entry_rsvd_field[level + 4]; | |
912 | } else { | |
913 | return slpte & vtd_paging_entry_rsvd_field[level]; | |
914 | } | |
915 | } | |
916 | ||
dbaabb25 PX |
917 | /* Find the VTD address space associated with a given bus number */ |
918 | static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) | |
919 | { | |
920 | VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; | |
921 | if (!vtd_bus) { | |
922 | /* | |
923 | * Iterate over the registered buses to find the one which | |
924 | * currently hold this bus number, and update the bus_num | |
925 | * lookup table: | |
926 | */ | |
927 | GHashTableIter iter; | |
928 | ||
929 | g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); | |
930 | while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { | |
931 | if (pci_bus_num(vtd_bus->bus) == bus_num) { | |
932 | s->vtd_as_by_bus_num[bus_num] = vtd_bus; | |
933 | return vtd_bus; | |
934 | } | |
935 | } | |
936 | } | |
937 | return vtd_bus; | |
938 | } | |
939 | ||
6e905564 | 940 | /* Given the @iova, get relevant @slptep. @slpte_level will be the last level |
1da12ec4 LT |
941 | * of the translation, can be used for deciding the size of large page. |
942 | */ | |
fb43cf73 LY |
943 | static int vtd_iova_to_slpte(IntelIOMMUState *s, VTDContextEntry *ce, |
944 | uint64_t iova, bool is_write, | |
6e905564 | 945 | uint64_t *slptep, uint32_t *slpte_level, |
37f51384 | 946 | bool *reads, bool *writes, uint8_t aw_bits) |
1da12ec4 | 947 | { |
fb43cf73 LY |
948 | dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); |
949 | uint32_t level = vtd_get_iova_level(s, ce); | |
1da12ec4 LT |
950 | uint32_t offset; |
951 | uint64_t slpte; | |
1da12ec4 LT |
952 | uint64_t access_right_check; |
953 | ||
fb43cf73 | 954 | if (!vtd_iova_range_check(s, iova, ce, aw_bits)) { |
4e4abd11 PX |
955 | error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", |
956 | __func__, iova); | |
1da12ec4 LT |
957 | return -VTD_FR_ADDR_BEYOND_MGAW; |
958 | } | |
959 | ||
960 | /* FIXME: what is the Atomics request here? */ | |
961 | access_right_check = is_write ? VTD_SL_W : VTD_SL_R; | |
962 | ||
963 | while (true) { | |
6e905564 | 964 | offset = vtd_iova_level_offset(iova, level); |
1da12ec4 LT |
965 | slpte = vtd_get_slpte(addr, offset); |
966 | ||
967 | if (slpte == (uint64_t)-1) { | |
4e4abd11 PX |
968 | error_report_once("%s: detected read error on DMAR slpte " |
969 | "(iova=0x%" PRIx64 ")", __func__, iova); | |
fb43cf73 | 970 | if (level == vtd_get_iova_level(s, ce)) { |
1da12ec4 LT |
971 | /* Invalid programming of context-entry */ |
972 | return -VTD_FR_CONTEXT_ENTRY_INV; | |
973 | } else { | |
974 | return -VTD_FR_PAGING_ENTRY_INV; | |
975 | } | |
976 | } | |
977 | *reads = (*reads) && (slpte & VTD_SL_R); | |
978 | *writes = (*writes) && (slpte & VTD_SL_W); | |
979 | if (!(slpte & access_right_check)) { | |
4e4abd11 PX |
980 | error_report_once("%s: detected slpte permission error " |
981 | "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " | |
982 | "slpte=0x%" PRIx64 ", write=%d)", __func__, | |
983 | iova, level, slpte, is_write); | |
1da12ec4 LT |
984 | return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; |
985 | } | |
986 | if (vtd_slpte_nonzero_rsvd(slpte, level)) { | |
4e4abd11 PX |
987 | error_report_once("%s: detected splte reserve non-zero " |
988 | "iova=0x%" PRIx64 ", level=0x%" PRIx32 | |
989 | "slpte=0x%" PRIx64 ")", __func__, iova, | |
990 | level, slpte); | |
1da12ec4 LT |
991 | return -VTD_FR_PAGING_ENTRY_RSVD; |
992 | } | |
993 | ||
994 | if (vtd_is_last_slpte(slpte, level)) { | |
995 | *slptep = slpte; | |
996 | *slpte_level = level; | |
997 | return 0; | |
998 | } | |
37f51384 | 999 | addr = vtd_get_slpte_addr(slpte, aw_bits); |
1da12ec4 LT |
1000 | level--; |
1001 | } | |
1002 | } | |
1003 | ||
f06a696d PX |
1004 | typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private); |
1005 | ||
fe215b0c PX |
1006 | /** |
1007 | * Constant information used during page walking | |
1008 | * | |
1009 | * @hook_fn: hook func to be called when detected page | |
1010 | * @private: private data to be passed into hook func | |
1011 | * @notify_unmap: whether we should notify invalid entries | |
2f764fa8 | 1012 | * @as: VT-d address space of the device |
fe215b0c | 1013 | * @aw: maximum address width |
d118c06e | 1014 | * @domain: domain ID of the page walk |
fe215b0c PX |
1015 | */ |
1016 | typedef struct { | |
2f764fa8 | 1017 | VTDAddressSpace *as; |
fe215b0c PX |
1018 | vtd_page_walk_hook hook_fn; |
1019 | void *private; | |
1020 | bool notify_unmap; | |
1021 | uint8_t aw; | |
d118c06e | 1022 | uint16_t domain_id; |
fe215b0c PX |
1023 | } vtd_page_walk_info; |
1024 | ||
d118c06e | 1025 | static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) |
36d2d52b | 1026 | { |
63b88968 | 1027 | VTDAddressSpace *as = info->as; |
fe215b0c PX |
1028 | vtd_page_walk_hook hook_fn = info->hook_fn; |
1029 | void *private = info->private; | |
63b88968 PX |
1030 | DMAMap target = { |
1031 | .iova = entry->iova, | |
1032 | .size = entry->addr_mask, | |
1033 | .translated_addr = entry->translated_addr, | |
1034 | .perm = entry->perm, | |
1035 | }; | |
1036 | DMAMap *mapped = iova_tree_find(as->iova_tree, &target); | |
1037 | ||
1038 | if (entry->perm == IOMMU_NONE && !info->notify_unmap) { | |
1039 | trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); | |
1040 | return 0; | |
1041 | } | |
fe215b0c | 1042 | |
36d2d52b | 1043 | assert(hook_fn); |
63b88968 PX |
1044 | |
1045 | /* Update local IOVA mapped ranges */ | |
1046 | if (entry->perm) { | |
1047 | if (mapped) { | |
1048 | /* If it's exactly the same translation, skip */ | |
1049 | if (!memcmp(mapped, &target, sizeof(target))) { | |
1050 | trace_vtd_page_walk_one_skip_map(entry->iova, entry->addr_mask, | |
1051 | entry->translated_addr); | |
1052 | return 0; | |
1053 | } else { | |
1054 | /* | |
1055 | * Translation changed. Normally this should not | |
1056 | * happen, but it can happen when with buggy guest | |
1057 | * OSes. Note that there will be a small window that | |
1058 | * we don't have map at all. But that's the best | |
1059 | * effort we can do. The ideal way to emulate this is | |
1060 | * atomically modify the PTE to follow what has | |
1061 | * changed, but we can't. One example is that vfio | |
1062 | * driver only has VFIO_IOMMU_[UN]MAP_DMA but no | |
1063 | * interface to modify a mapping (meanwhile it seems | |
1064 | * meaningless to even provide one). Anyway, let's | |
1065 | * mark this as a TODO in case one day we'll have | |
1066 | * a better solution. | |
1067 | */ | |
1068 | IOMMUAccessFlags cache_perm = entry->perm; | |
1069 | int ret; | |
1070 | ||
1071 | /* Emulate an UNMAP */ | |
1072 | entry->perm = IOMMU_NONE; | |
1073 | trace_vtd_page_walk_one(info->domain_id, | |
1074 | entry->iova, | |
1075 | entry->translated_addr, | |
1076 | entry->addr_mask, | |
1077 | entry->perm); | |
1078 | ret = hook_fn(entry, private); | |
1079 | if (ret) { | |
1080 | return ret; | |
1081 | } | |
1082 | /* Drop any existing mapping */ | |
1083 | iova_tree_remove(as->iova_tree, &target); | |
1084 | /* Recover the correct permission */ | |
1085 | entry->perm = cache_perm; | |
1086 | } | |
1087 | } | |
1088 | iova_tree_insert(as->iova_tree, &target); | |
1089 | } else { | |
1090 | if (!mapped) { | |
1091 | /* Skip since we didn't map this range at all */ | |
1092 | trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); | |
1093 | return 0; | |
1094 | } | |
1095 | iova_tree_remove(as->iova_tree, &target); | |
1096 | } | |
1097 | ||
d118c06e PX |
1098 | trace_vtd_page_walk_one(info->domain_id, entry->iova, |
1099 | entry->translated_addr, entry->addr_mask, | |
1100 | entry->perm); | |
36d2d52b PX |
1101 | return hook_fn(entry, private); |
1102 | } | |
1103 | ||
f06a696d PX |
1104 | /** |
1105 | * vtd_page_walk_level - walk over specific level for IOVA range | |
1106 | * | |
1107 | * @addr: base GPA addr to start the walk | |
1108 | * @start: IOVA range start address | |
1109 | * @end: IOVA range end address (start <= addr < end) | |
f06a696d PX |
1110 | * @read: whether parent level has read permission |
1111 | * @write: whether parent level has write permission | |
fe215b0c | 1112 | * @info: constant information for the page walk |
f06a696d PX |
1113 | */ |
1114 | static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, | |
fe215b0c PX |
1115 | uint64_t end, uint32_t level, bool read, |
1116 | bool write, vtd_page_walk_info *info) | |
f06a696d PX |
1117 | { |
1118 | bool read_cur, write_cur, entry_valid; | |
1119 | uint32_t offset; | |
1120 | uint64_t slpte; | |
1121 | uint64_t subpage_size, subpage_mask; | |
1122 | IOMMUTLBEntry entry; | |
1123 | uint64_t iova = start; | |
1124 | uint64_t iova_next; | |
1125 | int ret = 0; | |
1126 | ||
1127 | trace_vtd_page_walk_level(addr, level, start, end); | |
1128 | ||
1129 | subpage_size = 1ULL << vtd_slpt_level_shift(level); | |
1130 | subpage_mask = vtd_slpt_level_page_mask(level); | |
1131 | ||
1132 | while (iova < end) { | |
1133 | iova_next = (iova & subpage_mask) + subpage_size; | |
1134 | ||
1135 | offset = vtd_iova_level_offset(iova, level); | |
1136 | slpte = vtd_get_slpte(addr, offset); | |
1137 | ||
1138 | if (slpte == (uint64_t)-1) { | |
1139 | trace_vtd_page_walk_skip_read(iova, iova_next); | |
1140 | goto next; | |
1141 | } | |
1142 | ||
1143 | if (vtd_slpte_nonzero_rsvd(slpte, level)) { | |
1144 | trace_vtd_page_walk_skip_reserve(iova, iova_next); | |
1145 | goto next; | |
1146 | } | |
1147 | ||
1148 | /* Permissions are stacked with parents' */ | |
1149 | read_cur = read && (slpte & VTD_SL_R); | |
1150 | write_cur = write && (slpte & VTD_SL_W); | |
1151 | ||
1152 | /* | |
1153 | * As long as we have either read/write permission, this is a | |
1154 | * valid entry. The rule works for both page entries and page | |
1155 | * table entries. | |
1156 | */ | |
1157 | entry_valid = read_cur | write_cur; | |
1158 | ||
63b88968 PX |
1159 | if (!vtd_is_last_slpte(slpte, level) && entry_valid) { |
1160 | /* | |
1161 | * This is a valid PDE (or even bigger than PDE). We need | |
1162 | * to walk one further level. | |
1163 | */ | |
fe215b0c PX |
1164 | ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw), |
1165 | iova, MIN(iova_next, end), level - 1, | |
1166 | read_cur, write_cur, info); | |
63b88968 PX |
1167 | } else { |
1168 | /* | |
1169 | * This means we are either: | |
1170 | * | |
1171 | * (1) the real page entry (either 4K page, or huge page) | |
1172 | * (2) the whole range is invalid | |
1173 | * | |
1174 | * In either case, we send an IOTLB notification down. | |
1175 | */ | |
1176 | entry.target_as = &address_space_memory; | |
1177 | entry.iova = iova & subpage_mask; | |
1178 | entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); | |
1179 | entry.addr_mask = ~subpage_mask; | |
1180 | /* NOTE: this is only meaningful if entry_valid == true */ | |
1181 | entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw); | |
1182 | ret = vtd_page_walk_one(&entry, info); | |
1183 | } | |
1184 | ||
1185 | if (ret < 0) { | |
1186 | return ret; | |
f06a696d PX |
1187 | } |
1188 | ||
1189 | next: | |
1190 | iova = iova_next; | |
1191 | } | |
1192 | ||
1193 | return 0; | |
1194 | } | |
1195 | ||
1196 | /** | |
1197 | * vtd_page_walk - walk specific IOVA range, and call the hook | |
1198 | * | |
fb43cf73 | 1199 | * @s: intel iommu state |
f06a696d PX |
1200 | * @ce: context entry to walk upon |
1201 | * @start: IOVA address to start the walk | |
1202 | * @end: IOVA range end address (start <= addr < end) | |
fe215b0c | 1203 | * @info: page walking information struct |
f06a696d | 1204 | */ |
fb43cf73 LY |
1205 | static int vtd_page_walk(IntelIOMMUState *s, VTDContextEntry *ce, |
1206 | uint64_t start, uint64_t end, | |
fe215b0c | 1207 | vtd_page_walk_info *info) |
f06a696d | 1208 | { |
fb43cf73 LY |
1209 | dma_addr_t addr = vtd_get_iova_pgtbl_base(s, ce); |
1210 | uint32_t level = vtd_get_iova_level(s, ce); | |
f06a696d | 1211 | |
fb43cf73 | 1212 | if (!vtd_iova_range_check(s, start, ce, info->aw)) { |
f06a696d PX |
1213 | return -VTD_FR_ADDR_BEYOND_MGAW; |
1214 | } | |
1215 | ||
fb43cf73 | 1216 | if (!vtd_iova_range_check(s, end, ce, info->aw)) { |
f06a696d | 1217 | /* Fix end so that it reaches the maximum */ |
fb43cf73 | 1218 | end = vtd_iova_limit(s, ce, info->aw); |
f06a696d PX |
1219 | } |
1220 | ||
fe215b0c | 1221 | return vtd_page_walk_level(addr, start, end, level, true, true, info); |
f06a696d PX |
1222 | } |
1223 | ||
fb43cf73 LY |
1224 | static int vtd_root_entry_rsvd_bits_check(IntelIOMMUState *s, |
1225 | VTDRootEntry *re) | |
1226 | { | |
1227 | /* Legacy Mode reserved bits check */ | |
1228 | if (!s->root_scalable && | |
1229 | (re->hi || (re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) | |
1230 | goto rsvd_err; | |
1231 | ||
1232 | /* Scalable Mode reserved bits check */ | |
1233 | if (s->root_scalable && | |
1234 | ((re->lo & VTD_ROOT_ENTRY_RSVD(s->aw_bits)) || | |
1235 | (re->hi & VTD_ROOT_ENTRY_RSVD(s->aw_bits)))) | |
1236 | goto rsvd_err; | |
1237 | ||
1238 | return 0; | |
1239 | ||
1240 | rsvd_err: | |
1241 | error_report_once("%s: invalid root entry: hi=0x%"PRIx64 | |
1242 | ", lo=0x%"PRIx64, | |
1243 | __func__, re->hi, re->lo); | |
1244 | return -VTD_FR_ROOT_ENTRY_RSVD; | |
1245 | } | |
1246 | ||
1247 | static inline int vtd_context_entry_rsvd_bits_check(IntelIOMMUState *s, | |
1248 | VTDContextEntry *ce) | |
1249 | { | |
1250 | if (!s->root_scalable && | |
1251 | (ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI || | |
1252 | ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { | |
1253 | error_report_once("%s: invalid context entry: hi=%"PRIx64 | |
1254 | ", lo=%"PRIx64" (reserved nonzero)", | |
1255 | __func__, ce->hi, ce->lo); | |
1256 | return -VTD_FR_CONTEXT_ENTRY_RSVD; | |
1257 | } | |
1258 | ||
1259 | if (s->root_scalable && | |
1260 | (ce->val[0] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL0(s->aw_bits) || | |
1261 | ce->val[1] & VTD_SM_CONTEXT_ENTRY_RSVD_VAL1 || | |
1262 | ce->val[2] || | |
1263 | ce->val[3])) { | |
1264 | error_report_once("%s: invalid context entry: val[3]=%"PRIx64 | |
1265 | ", val[2]=%"PRIx64 | |
1266 | ", val[1]=%"PRIx64 | |
1267 | ", val[0]=%"PRIx64" (reserved nonzero)", | |
1268 | __func__, ce->val[3], ce->val[2], | |
1269 | ce->val[1], ce->val[0]); | |
1270 | return -VTD_FR_CONTEXT_ENTRY_RSVD; | |
1271 | } | |
1272 | ||
1273 | return 0; | |
1274 | } | |
1275 | ||
1276 | static int vtd_ce_rid2pasid_check(IntelIOMMUState *s, | |
1277 | VTDContextEntry *ce) | |
1278 | { | |
1279 | VTDPASIDEntry pe; | |
1280 | ||
1281 | /* | |
1282 | * Make sure in Scalable Mode, a present context entry | |
1283 | * has valid rid2pasid setting, which includes valid | |
1284 | * rid2pasid field and corresponding pasid entry setting | |
1285 | */ | |
1286 | return vtd_ce_get_rid2pasid_entry(s, ce, &pe); | |
1287 | } | |
1288 | ||
1da12ec4 LT |
1289 | /* Map a device to its corresponding domain (context-entry) */ |
1290 | static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, | |
1291 | uint8_t devfn, VTDContextEntry *ce) | |
1292 | { | |
1293 | VTDRootEntry re; | |
1294 | int ret_fr; | |
f80c9874 | 1295 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
1da12ec4 LT |
1296 | |
1297 | ret_fr = vtd_get_root_entry(s, bus_num, &re); | |
1298 | if (ret_fr) { | |
1299 | return ret_fr; | |
1300 | } | |
1301 | ||
fb43cf73 | 1302 | if (!vtd_root_entry_present(s, &re, devfn)) { |
6c441e1d PX |
1303 | /* Not error - it's okay we don't have root entry. */ |
1304 | trace_vtd_re_not_present(bus_num); | |
1da12ec4 | 1305 | return -VTD_FR_ROOT_ENTRY_P; |
f80c9874 PX |
1306 | } |
1307 | ||
fb43cf73 LY |
1308 | ret_fr = vtd_root_entry_rsvd_bits_check(s, &re); |
1309 | if (ret_fr) { | |
1310 | return ret_fr; | |
1da12ec4 LT |
1311 | } |
1312 | ||
fb43cf73 | 1313 | ret_fr = vtd_get_context_entry_from_root(s, &re, devfn, ce); |
1da12ec4 LT |
1314 | if (ret_fr) { |
1315 | return ret_fr; | |
1316 | } | |
1317 | ||
8f7d7161 | 1318 | if (!vtd_ce_present(ce)) { |
6c441e1d PX |
1319 | /* Not error - it's okay we don't have context entry. */ |
1320 | trace_vtd_ce_not_present(bus_num, devfn); | |
1da12ec4 | 1321 | return -VTD_FR_CONTEXT_ENTRY_P; |
f80c9874 PX |
1322 | } |
1323 | ||
fb43cf73 LY |
1324 | ret_fr = vtd_context_entry_rsvd_bits_check(s, ce); |
1325 | if (ret_fr) { | |
1326 | return ret_fr; | |
1da12ec4 | 1327 | } |
f80c9874 | 1328 | |
1da12ec4 | 1329 | /* Check if the programming of context-entry is valid */ |
fb43cf73 LY |
1330 | if (!s->root_scalable && |
1331 | !vtd_is_level_supported(s, vtd_ce_get_level(ce))) { | |
095955b2 PX |
1332 | error_report_once("%s: invalid context entry: hi=%"PRIx64 |
1333 | ", lo=%"PRIx64" (level %d not supported)", | |
fb43cf73 LY |
1334 | __func__, ce->hi, ce->lo, |
1335 | vtd_ce_get_level(ce)); | |
1da12ec4 | 1336 | return -VTD_FR_CONTEXT_ENTRY_INV; |
1da12ec4 | 1337 | } |
f80c9874 | 1338 | |
fb43cf73 LY |
1339 | if (!s->root_scalable) { |
1340 | /* Do translation type check */ | |
1341 | if (!vtd_ce_type_check(x86_iommu, ce)) { | |
1342 | /* Errors dumped in vtd_ce_type_check() */ | |
1343 | return -VTD_FR_CONTEXT_ENTRY_INV; | |
1344 | } | |
1345 | } else { | |
1346 | /* | |
1347 | * Check if the programming of context-entry.rid2pasid | |
1348 | * and corresponding pasid setting is valid, and thus | |
1349 | * avoids to check pasid entry fetching result in future | |
1350 | * helper function calling. | |
1351 | */ | |
1352 | ret_fr = vtd_ce_rid2pasid_check(s, ce); | |
1353 | if (ret_fr) { | |
1354 | return ret_fr; | |
1355 | } | |
f80c9874 PX |
1356 | } |
1357 | ||
1da12ec4 LT |
1358 | return 0; |
1359 | } | |
1360 | ||
63b88968 PX |
1361 | static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry, |
1362 | void *private) | |
1363 | { | |
cb1efcf4 | 1364 | memory_region_notify_iommu((IOMMUMemoryRegion *)private, 0, *entry); |
63b88968 PX |
1365 | return 0; |
1366 | } | |
1367 | ||
fb43cf73 LY |
1368 | static uint16_t vtd_get_domain_id(IntelIOMMUState *s, |
1369 | VTDContextEntry *ce) | |
1370 | { | |
1371 | VTDPASIDEntry pe; | |
1372 | ||
1373 | if (s->root_scalable) { | |
1374 | vtd_ce_get_rid2pasid_entry(s, ce, &pe); | |
1375 | return VTD_SM_PASID_ENTRY_DID(pe.val[1]); | |
1376 | } | |
1377 | ||
1378 | return VTD_CONTEXT_ENTRY_DID(ce->hi); | |
1379 | } | |
1380 | ||
63b88968 PX |
1381 | static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, |
1382 | VTDContextEntry *ce, | |
1383 | hwaddr addr, hwaddr size) | |
1384 | { | |
1385 | IntelIOMMUState *s = vtd_as->iommu_state; | |
1386 | vtd_page_walk_info info = { | |
1387 | .hook_fn = vtd_sync_shadow_page_hook, | |
1388 | .private = (void *)&vtd_as->iommu, | |
1389 | .notify_unmap = true, | |
1390 | .aw = s->aw_bits, | |
1391 | .as = vtd_as, | |
fb43cf73 | 1392 | .domain_id = vtd_get_domain_id(s, ce), |
63b88968 | 1393 | }; |
63b88968 | 1394 | |
fb43cf73 | 1395 | return vtd_page_walk(s, ce, addr, addr + size, &info); |
63b88968 PX |
1396 | } |
1397 | ||
1398 | static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) | |
1399 | { | |
95ecd3df PX |
1400 | int ret; |
1401 | VTDContextEntry ce; | |
c28b535d | 1402 | IOMMUNotifier *n; |
95ecd3df PX |
1403 | |
1404 | ret = vtd_dev_to_context_entry(vtd_as->iommu_state, | |
1405 | pci_bus_num(vtd_as->bus), | |
1406 | vtd_as->devfn, &ce); | |
1407 | if (ret) { | |
c28b535d PX |
1408 | if (ret == -VTD_FR_CONTEXT_ENTRY_P) { |
1409 | /* | |
1410 | * It's a valid scenario to have a context entry that is | |
1411 | * not present. For example, when a device is removed | |
1412 | * from an existing domain then the context entry will be | |
1413 | * zeroed by the guest before it was put into another | |
1414 | * domain. When this happens, instead of synchronizing | |
1415 | * the shadow pages we should invalidate all existing | |
1416 | * mappings and notify the backends. | |
1417 | */ | |
1418 | IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { | |
1419 | vtd_address_space_unmap(vtd_as, n); | |
1420 | } | |
1421 | ret = 0; | |
1422 | } | |
95ecd3df PX |
1423 | return ret; |
1424 | } | |
1425 | ||
1426 | return vtd_sync_shadow_page_table_range(vtd_as, &ce, 0, UINT64_MAX); | |
63b88968 PX |
1427 | } |
1428 | ||
dbaabb25 | 1429 | /* |
fb43cf73 LY |
1430 | * Check if specific device is configed to bypass address |
1431 | * translation for DMA requests. In Scalable Mode, bypass | |
1432 | * 1st-level translation or 2nd-level translation, it depends | |
1433 | * on PGTT setting. | |
dbaabb25 | 1434 | */ |
fb43cf73 | 1435 | static bool vtd_dev_pt_enabled(VTDAddressSpace *as) |
dbaabb25 PX |
1436 | { |
1437 | IntelIOMMUState *s; | |
1438 | VTDContextEntry ce; | |
fb43cf73 | 1439 | VTDPASIDEntry pe; |
dbaabb25 PX |
1440 | int ret; |
1441 | ||
fb43cf73 | 1442 | assert(as); |
dbaabb25 | 1443 | |
fb43cf73 | 1444 | s = as->iommu_state; |
dbaabb25 PX |
1445 | ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), |
1446 | as->devfn, &ce); | |
1447 | if (ret) { | |
dbaabb25 PX |
1448 | /* |
1449 | * Possibly failed to parse the context entry for some reason | |
1450 | * (e.g., during init, or any guest configuration errors on | |
1451 | * context entries). We should assume PT not enabled for | |
1452 | * safety. | |
1453 | */ | |
1454 | return false; | |
1455 | } | |
1456 | ||
fb43cf73 LY |
1457 | if (s->root_scalable) { |
1458 | ret = vtd_ce_get_rid2pasid_entry(s, &ce, &pe); | |
1459 | if (ret) { | |
1460 | error_report_once("%s: vtd_ce_get_rid2pasid_entry error: %"PRId32, | |
1461 | __func__, ret); | |
1462 | return false; | |
1463 | } | |
1464 | return (VTD_PE_GET_TYPE(&pe) == VTD_SM_PASID_ENTRY_PT); | |
1465 | } | |
1466 | ||
1467 | return (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH); | |
dbaabb25 PX |
1468 | } |
1469 | ||
1470 | /* Return whether the device is using IOMMU translation. */ | |
1471 | static bool vtd_switch_address_space(VTDAddressSpace *as) | |
1472 | { | |
1473 | bool use_iommu; | |
66a4a031 PX |
1474 | /* Whether we need to take the BQL on our own */ |
1475 | bool take_bql = !qemu_mutex_iothread_locked(); | |
dbaabb25 PX |
1476 | |
1477 | assert(as); | |
1478 | ||
2a078b10 | 1479 | use_iommu = as->iommu_state->dmar_enabled && !vtd_dev_pt_enabled(as); |
dbaabb25 PX |
1480 | |
1481 | trace_vtd_switch_address_space(pci_bus_num(as->bus), | |
1482 | VTD_PCI_SLOT(as->devfn), | |
1483 | VTD_PCI_FUNC(as->devfn), | |
1484 | use_iommu); | |
1485 | ||
66a4a031 PX |
1486 | /* |
1487 | * It's possible that we reach here without BQL, e.g., when called | |
1488 | * from vtd_pt_enable_fast_path(). However the memory APIs need | |
1489 | * it. We'd better make sure we have had it already, or, take it. | |
1490 | */ | |
1491 | if (take_bql) { | |
1492 | qemu_mutex_lock_iothread(); | |
1493 | } | |
1494 | ||
dbaabb25 PX |
1495 | /* Turn off first then on the other */ |
1496 | if (use_iommu) { | |
4b519ef1 | 1497 | memory_region_set_enabled(&as->nodmar, false); |
3df9d748 | 1498 | memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); |
dbaabb25 | 1499 | } else { |
3df9d748 | 1500 | memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); |
4b519ef1 | 1501 | memory_region_set_enabled(&as->nodmar, true); |
dbaabb25 PX |
1502 | } |
1503 | ||
66a4a031 PX |
1504 | if (take_bql) { |
1505 | qemu_mutex_unlock_iothread(); | |
1506 | } | |
1507 | ||
dbaabb25 PX |
1508 | return use_iommu; |
1509 | } | |
1510 | ||
1511 | static void vtd_switch_address_space_all(IntelIOMMUState *s) | |
1512 | { | |
1513 | GHashTableIter iter; | |
1514 | VTDBus *vtd_bus; | |
1515 | int i; | |
1516 | ||
1517 | g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); | |
1518 | while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { | |
bf33cc75 | 1519 | for (i = 0; i < PCI_DEVFN_MAX; i++) { |
dbaabb25 PX |
1520 | if (!vtd_bus->dev_as[i]) { |
1521 | continue; | |
1522 | } | |
1523 | vtd_switch_address_space(vtd_bus->dev_as[i]); | |
1524 | } | |
1525 | } | |
1526 | } | |
1527 | ||
1da12ec4 LT |
1528 | static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn) |
1529 | { | |
1530 | return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); | |
1531 | } | |
1532 | ||
1533 | static const bool vtd_qualified_faults[] = { | |
1534 | [VTD_FR_RESERVED] = false, | |
1535 | [VTD_FR_ROOT_ENTRY_P] = false, | |
1536 | [VTD_FR_CONTEXT_ENTRY_P] = true, | |
1537 | [VTD_FR_CONTEXT_ENTRY_INV] = true, | |
1538 | [VTD_FR_ADDR_BEYOND_MGAW] = true, | |
1539 | [VTD_FR_WRITE] = true, | |
1540 | [VTD_FR_READ] = true, | |
1541 | [VTD_FR_PAGING_ENTRY_INV] = true, | |
1542 | [VTD_FR_ROOT_TABLE_INV] = false, | |
1543 | [VTD_FR_CONTEXT_TABLE_INV] = false, | |
1544 | [VTD_FR_ROOT_ENTRY_RSVD] = false, | |
1545 | [VTD_FR_PAGING_ENTRY_RSVD] = true, | |
1546 | [VTD_FR_CONTEXT_ENTRY_TT] = true, | |
fb43cf73 | 1547 | [VTD_FR_PASID_TABLE_INV] = false, |
1da12ec4 LT |
1548 | [VTD_FR_RESERVED_ERR] = false, |
1549 | [VTD_FR_MAX] = false, | |
1550 | }; | |
1551 | ||
1552 | /* To see if a fault condition is "qualified", which is reported to software | |
1553 | * only if the FPD field in the context-entry used to process the faulting | |
1554 | * request is 0. | |
1555 | */ | |
1556 | static inline bool vtd_is_qualified_fault(VTDFaultReason fault) | |
1557 | { | |
1558 | return vtd_qualified_faults[fault]; | |
1559 | } | |
1560 | ||
1561 | static inline bool vtd_is_interrupt_addr(hwaddr addr) | |
1562 | { | |
1563 | return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; | |
1564 | } | |
1565 | ||
dbaabb25 PX |
1566 | static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) |
1567 | { | |
1568 | VTDBus *vtd_bus; | |
1569 | VTDAddressSpace *vtd_as; | |
1570 | bool success = false; | |
1571 | ||
1572 | vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id)); | |
1573 | if (!vtd_bus) { | |
1574 | goto out; | |
1575 | } | |
1576 | ||
1577 | vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)]; | |
1578 | if (!vtd_as) { | |
1579 | goto out; | |
1580 | } | |
1581 | ||
1582 | if (vtd_switch_address_space(vtd_as) == false) { | |
1583 | /* We switched off IOMMU region successfully. */ | |
1584 | success = true; | |
1585 | } | |
1586 | ||
1587 | out: | |
1588 | trace_vtd_pt_enable_fast_path(source_id, success); | |
1589 | } | |
1590 | ||
1da12ec4 LT |
1591 | /* Map dev to context-entry then do a paging-structures walk to do a iommu |
1592 | * translation. | |
79e2b9ae PB |
1593 | * |
1594 | * Called from RCU critical section. | |
1595 | * | |
1da12ec4 LT |
1596 | * @bus_num: The bus number |
1597 | * @devfn: The devfn, which is the combined of device and function number | |
1598 | * @is_write: The access is a write operation | |
1599 | * @entry: IOMMUTLBEntry that contain the addr to be translated and result | |
b9313021 PX |
1600 | * |
1601 | * Returns true if translation is successful, otherwise false. | |
1da12ec4 | 1602 | */ |
b9313021 | 1603 | static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, |
1da12ec4 LT |
1604 | uint8_t devfn, hwaddr addr, bool is_write, |
1605 | IOMMUTLBEntry *entry) | |
1606 | { | |
d92fa2dc | 1607 | IntelIOMMUState *s = vtd_as->iommu_state; |
1da12ec4 | 1608 | VTDContextEntry ce; |
7df953bd | 1609 | uint8_t bus_num = pci_bus_num(bus); |
1d9efa73 | 1610 | VTDContextCacheEntry *cc_entry; |
d66b969b | 1611 | uint64_t slpte, page_mask; |
1da12ec4 LT |
1612 | uint32_t level; |
1613 | uint16_t source_id = vtd_make_source_id(bus_num, devfn); | |
1614 | int ret_fr; | |
1615 | bool is_fpd_set = false; | |
1616 | bool reads = true; | |
1617 | bool writes = true; | |
07f7b733 | 1618 | uint8_t access_flags; |
b5a280c0 | 1619 | VTDIOTLBEntry *iotlb_entry; |
1da12ec4 | 1620 | |
046ab7e9 PX |
1621 | /* |
1622 | * We have standalone memory region for interrupt addresses, we | |
1623 | * should never receive translation requests in this region. | |
1624 | */ | |
1625 | assert(!vtd_is_interrupt_addr(addr)); | |
1626 | ||
1d9efa73 PX |
1627 | vtd_iommu_lock(s); |
1628 | ||
1629 | cc_entry = &vtd_as->context_cache_entry; | |
1630 | ||
b5a280c0 LT |
1631 | /* Try to fetch slpte form IOTLB */ |
1632 | iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); | |
1633 | if (iotlb_entry) { | |
6c441e1d PX |
1634 | trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, |
1635 | iotlb_entry->domain_id); | |
b5a280c0 | 1636 | slpte = iotlb_entry->slpte; |
07f7b733 | 1637 | access_flags = iotlb_entry->access_flags; |
d66b969b | 1638 | page_mask = iotlb_entry->mask; |
b5a280c0 LT |
1639 | goto out; |
1640 | } | |
b9313021 | 1641 | |
d92fa2dc LT |
1642 | /* Try to fetch context-entry from cache first */ |
1643 | if (cc_entry->context_cache_gen == s->context_cache_gen) { | |
6c441e1d PX |
1644 | trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, |
1645 | cc_entry->context_entry.lo, | |
1646 | cc_entry->context_cache_gen); | |
d92fa2dc LT |
1647 | ce = cc_entry->context_entry; |
1648 | is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; | |
fb43cf73 LY |
1649 | if (!is_fpd_set && s->root_scalable) { |
1650 | ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); | |
1651 | VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); | |
1652 | } | |
d92fa2dc LT |
1653 | } else { |
1654 | ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); | |
1655 | is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; | |
fb43cf73 LY |
1656 | if (!ret_fr && !is_fpd_set && s->root_scalable) { |
1657 | ret_fr = vtd_ce_get_pasid_fpd(s, &ce, &is_fpd_set); | |
1da12ec4 | 1658 | } |
fb43cf73 | 1659 | VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); |
d92fa2dc | 1660 | /* Update context-cache */ |
6c441e1d PX |
1661 | trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, |
1662 | cc_entry->context_cache_gen, | |
1663 | s->context_cache_gen); | |
d92fa2dc LT |
1664 | cc_entry->context_entry = ce; |
1665 | cc_entry->context_cache_gen = s->context_cache_gen; | |
1da12ec4 LT |
1666 | } |
1667 | ||
dbaabb25 PX |
1668 | /* |
1669 | * We don't need to translate for pass-through context entries. | |
1670 | * Also, let's ignore IOTLB caching as well for PT devices. | |
1671 | */ | |
1672 | if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { | |
892721d9 | 1673 | entry->iova = addr & VTD_PAGE_MASK_4K; |
dbaabb25 | 1674 | entry->translated_addr = entry->iova; |
892721d9 | 1675 | entry->addr_mask = ~VTD_PAGE_MASK_4K; |
dbaabb25 PX |
1676 | entry->perm = IOMMU_RW; |
1677 | trace_vtd_translate_pt(source_id, entry->iova); | |
1678 | ||
1679 | /* | |
1680 | * When this happens, it means firstly caching-mode is not | |
1681 | * enabled, and this is the first passthrough translation for | |
1682 | * the device. Let's enable the fast path for passthrough. | |
1683 | * | |
1684 | * When passthrough is disabled again for the device, we can | |
1685 | * capture it via the context entry invalidation, then the | |
1686 | * IOMMU region can be swapped back. | |
1687 | */ | |
1688 | vtd_pt_enable_fast_path(s, source_id); | |
1d9efa73 | 1689 | vtd_iommu_unlock(s); |
b9313021 | 1690 | return true; |
dbaabb25 PX |
1691 | } |
1692 | ||
fb43cf73 | 1693 | ret_fr = vtd_iova_to_slpte(s, &ce, addr, is_write, &slpte, &level, |
37f51384 | 1694 | &reads, &writes, s->aw_bits); |
fb43cf73 | 1695 | VTD_PE_GET_FPD_ERR(ret_fr, is_fpd_set, s, source_id, addr, is_write); |
1da12ec4 | 1696 | |
d66b969b | 1697 | page_mask = vtd_slpt_level_page_mask(level); |
07f7b733 | 1698 | access_flags = IOMMU_ACCESS_FLAG(reads, writes); |
fb43cf73 | 1699 | vtd_update_iotlb(s, source_id, vtd_get_domain_id(s, &ce), addr, slpte, |
07f7b733 | 1700 | access_flags, level); |
b5a280c0 | 1701 | out: |
1d9efa73 | 1702 | vtd_iommu_unlock(s); |
d66b969b | 1703 | entry->iova = addr & page_mask; |
37f51384 | 1704 | entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask; |
d66b969b | 1705 | entry->addr_mask = ~page_mask; |
07f7b733 | 1706 | entry->perm = access_flags; |
b9313021 PX |
1707 | return true; |
1708 | ||
1709 | error: | |
1d9efa73 | 1710 | vtd_iommu_unlock(s); |
b9313021 PX |
1711 | entry->iova = 0; |
1712 | entry->translated_addr = 0; | |
1713 | entry->addr_mask = 0; | |
1714 | entry->perm = IOMMU_NONE; | |
1715 | return false; | |
1da12ec4 LT |
1716 | } |
1717 | ||
1718 | static void vtd_root_table_setup(IntelIOMMUState *s) | |
1719 | { | |
1720 | s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); | |
1721 | s->root_extended = s->root & VTD_RTADDR_RTT; | |
37f51384 | 1722 | s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); |
1da12ec4 | 1723 | |
2811af3b PX |
1724 | vtd_update_scalable_state(s); |
1725 | ||
7feb51b7 | 1726 | trace_vtd_reg_dmar_root(s->root, s->root_extended); |
1da12ec4 LT |
1727 | } |
1728 | ||
02a2cbc8 PX |
1729 | static void vtd_iec_notify_all(IntelIOMMUState *s, bool global, |
1730 | uint32_t index, uint32_t mask) | |
1731 | { | |
1732 | x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask); | |
1733 | } | |
1734 | ||
a5861439 PX |
1735 | static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s) |
1736 | { | |
1737 | uint64_t value = 0; | |
1738 | value = vtd_get_quad_raw(s, DMAR_IRTA_REG); | |
1739 | s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1); | |
37f51384 | 1740 | s->intr_root = value & VTD_IRTA_ADDR_MASK(s->aw_bits); |
28589311 | 1741 | s->intr_eime = value & VTD_IRTA_EIME; |
a5861439 | 1742 | |
02a2cbc8 PX |
1743 | /* Notify global invalidation */ |
1744 | vtd_iec_notify_all(s, true, 0, 0); | |
a5861439 | 1745 | |
7feb51b7 | 1746 | trace_vtd_reg_ir_root(s->intr_root, s->intr_size); |
a5861439 PX |
1747 | } |
1748 | ||
dd4d607e PX |
1749 | static void vtd_iommu_replay_all(IntelIOMMUState *s) |
1750 | { | |
b4a4ba0d | 1751 | VTDAddressSpace *vtd_as; |
dd4d607e | 1752 | |
b4a4ba0d | 1753 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
63b88968 | 1754 | vtd_sync_shadow_page_table(vtd_as); |
dd4d607e PX |
1755 | } |
1756 | } | |
1757 | ||
d92fa2dc LT |
1758 | static void vtd_context_global_invalidate(IntelIOMMUState *s) |
1759 | { | |
bc535e59 | 1760 | trace_vtd_inv_desc_cc_global(); |
1d9efa73 PX |
1761 | /* Protects context cache */ |
1762 | vtd_iommu_lock(s); | |
d92fa2dc LT |
1763 | s->context_cache_gen++; |
1764 | if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { | |
1d9efa73 | 1765 | vtd_reset_context_cache_locked(s); |
d92fa2dc | 1766 | } |
1d9efa73 | 1767 | vtd_iommu_unlock(s); |
2cc9ddcc | 1768 | vtd_address_space_refresh_all(s); |
dd4d607e PX |
1769 | /* |
1770 | * From VT-d spec 6.5.2.1, a global context entry invalidation | |
1771 | * should be followed by a IOTLB global invalidation, so we should | |
1772 | * be safe even without this. Hoewever, let's replay the region as | |
1773 | * well to be safer, and go back here when we need finer tunes for | |
1774 | * VT-d emulation codes. | |
1775 | */ | |
1776 | vtd_iommu_replay_all(s); | |
d92fa2dc LT |
1777 | } |
1778 | ||
1779 | /* Do a context-cache device-selective invalidation. | |
1780 | * @func_mask: FM field after shifting | |
1781 | */ | |
1782 | static void vtd_context_device_invalidate(IntelIOMMUState *s, | |
1783 | uint16_t source_id, | |
1784 | uint16_t func_mask) | |
1785 | { | |
1786 | uint16_t mask; | |
7df953bd | 1787 | VTDBus *vtd_bus; |
d92fa2dc | 1788 | VTDAddressSpace *vtd_as; |
bc535e59 | 1789 | uint8_t bus_n, devfn; |
d92fa2dc LT |
1790 | uint16_t devfn_it; |
1791 | ||
bc535e59 PX |
1792 | trace_vtd_inv_desc_cc_devices(source_id, func_mask); |
1793 | ||
d92fa2dc LT |
1794 | switch (func_mask & 3) { |
1795 | case 0: | |
1796 | mask = 0; /* No bits in the SID field masked */ | |
1797 | break; | |
1798 | case 1: | |
1799 | mask = 4; /* Mask bit 2 in the SID field */ | |
1800 | break; | |
1801 | case 2: | |
1802 | mask = 6; /* Mask bit 2:1 in the SID field */ | |
1803 | break; | |
1804 | case 3: | |
1805 | mask = 7; /* Mask bit 2:0 in the SID field */ | |
1806 | break; | |
1807 | } | |
6cb99acc | 1808 | mask = ~mask; |
bc535e59 PX |
1809 | |
1810 | bus_n = VTD_SID_TO_BUS(source_id); | |
1811 | vtd_bus = vtd_find_as_from_bus_num(s, bus_n); | |
7df953bd | 1812 | if (vtd_bus) { |
d92fa2dc | 1813 | devfn = VTD_SID_TO_DEVFN(source_id); |
bf33cc75 | 1814 | for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { |
7df953bd | 1815 | vtd_as = vtd_bus->dev_as[devfn_it]; |
d92fa2dc | 1816 | if (vtd_as && ((devfn_it & mask) == (devfn & mask))) { |
bc535e59 PX |
1817 | trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), |
1818 | VTD_PCI_FUNC(devfn_it)); | |
1d9efa73 | 1819 | vtd_iommu_lock(s); |
d92fa2dc | 1820 | vtd_as->context_cache_entry.context_cache_gen = 0; |
1d9efa73 | 1821 | vtd_iommu_unlock(s); |
dbaabb25 PX |
1822 | /* |
1823 | * Do switch address space when needed, in case if the | |
1824 | * device passthrough bit is switched. | |
1825 | */ | |
1826 | vtd_switch_address_space(vtd_as); | |
dd4d607e PX |
1827 | /* |
1828 | * So a device is moving out of (or moving into) a | |
63b88968 | 1829 | * domain, resync the shadow page table. |
dd4d607e PX |
1830 | * This won't bring bad even if we have no such |
1831 | * notifier registered - the IOMMU notification | |
1832 | * framework will skip MAP notifications if that | |
1833 | * happened. | |
1834 | */ | |
63b88968 | 1835 | vtd_sync_shadow_page_table(vtd_as); |
d92fa2dc LT |
1836 | } |
1837 | } | |
1838 | } | |
1839 | } | |
1840 | ||
1da12ec4 LT |
1841 | /* Context-cache invalidation |
1842 | * Returns the Context Actual Invalidation Granularity. | |
1843 | * @val: the content of the CCMD_REG | |
1844 | */ | |
1845 | static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val) | |
1846 | { | |
1847 | uint64_t caig; | |
1848 | uint64_t type = val & VTD_CCMD_CIRG_MASK; | |
1849 | ||
1850 | switch (type) { | |
d92fa2dc | 1851 | case VTD_CCMD_DOMAIN_INVL: |
d92fa2dc | 1852 | /* Fall through */ |
1da12ec4 | 1853 | case VTD_CCMD_GLOBAL_INVL: |
1da12ec4 | 1854 | caig = VTD_CCMD_GLOBAL_INVL_A; |
d92fa2dc | 1855 | vtd_context_global_invalidate(s); |
1da12ec4 LT |
1856 | break; |
1857 | ||
1858 | case VTD_CCMD_DEVICE_INVL: | |
1da12ec4 | 1859 | caig = VTD_CCMD_DEVICE_INVL_A; |
d92fa2dc | 1860 | vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val)); |
1da12ec4 LT |
1861 | break; |
1862 | ||
1863 | default: | |
1376211f PX |
1864 | error_report_once("%s: invalid context: 0x%" PRIx64, |
1865 | __func__, val); | |
1da12ec4 LT |
1866 | caig = 0; |
1867 | } | |
1868 | return caig; | |
1869 | } | |
1870 | ||
b5a280c0 LT |
1871 | static void vtd_iotlb_global_invalidate(IntelIOMMUState *s) |
1872 | { | |
7feb51b7 | 1873 | trace_vtd_inv_desc_iotlb_global(); |
b5a280c0 | 1874 | vtd_reset_iotlb(s); |
dd4d607e | 1875 | vtd_iommu_replay_all(s); |
b5a280c0 LT |
1876 | } |
1877 | ||
1878 | static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) | |
1879 | { | |
dd4d607e PX |
1880 | VTDContextEntry ce; |
1881 | VTDAddressSpace *vtd_as; | |
1882 | ||
7feb51b7 PX |
1883 | trace_vtd_inv_desc_iotlb_domain(domain_id); |
1884 | ||
1d9efa73 | 1885 | vtd_iommu_lock(s); |
b5a280c0 LT |
1886 | g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, |
1887 | &domain_id); | |
1d9efa73 | 1888 | vtd_iommu_unlock(s); |
dd4d607e | 1889 | |
b4a4ba0d | 1890 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
dd4d607e PX |
1891 | if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), |
1892 | vtd_as->devfn, &ce) && | |
fb43cf73 | 1893 | domain_id == vtd_get_domain_id(s, &ce)) { |
63b88968 | 1894 | vtd_sync_shadow_page_table(vtd_as); |
dd4d607e PX |
1895 | } |
1896 | } | |
1897 | } | |
1898 | ||
dd4d607e PX |
1899 | static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, |
1900 | uint16_t domain_id, hwaddr addr, | |
1901 | uint8_t am) | |
1902 | { | |
b4a4ba0d | 1903 | VTDAddressSpace *vtd_as; |
dd4d607e PX |
1904 | VTDContextEntry ce; |
1905 | int ret; | |
4f8a62a9 | 1906 | hwaddr size = (1 << am) * VTD_PAGE_SIZE; |
dd4d607e | 1907 | |
b4a4ba0d | 1908 | QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { |
dd4d607e PX |
1909 | ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), |
1910 | vtd_as->devfn, &ce); | |
fb43cf73 | 1911 | if (!ret && domain_id == vtd_get_domain_id(s, &ce)) { |
4f8a62a9 PX |
1912 | if (vtd_as_has_map_notifier(vtd_as)) { |
1913 | /* | |
1914 | * As long as we have MAP notifications registered in | |
1915 | * any of our IOMMU notifiers, we need to sync the | |
1916 | * shadow page table. | |
1917 | */ | |
63b88968 | 1918 | vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size); |
4f8a62a9 PX |
1919 | } else { |
1920 | /* | |
1921 | * For UNMAP-only notifiers, we don't need to walk the | |
1922 | * page tables. We just deliver the PSI down to | |
1923 | * invalidate caches. | |
1924 | */ | |
1925 | IOMMUTLBEntry entry = { | |
1926 | .target_as = &address_space_memory, | |
1927 | .iova = addr, | |
1928 | .translated_addr = 0, | |
1929 | .addr_mask = size - 1, | |
1930 | .perm = IOMMU_NONE, | |
1931 | }; | |
cb1efcf4 | 1932 | memory_region_notify_iommu(&vtd_as->iommu, 0, entry); |
4f8a62a9 | 1933 | } |
dd4d607e PX |
1934 | } |
1935 | } | |
b5a280c0 LT |
1936 | } |
1937 | ||
1938 | static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, | |
1939 | hwaddr addr, uint8_t am) | |
1940 | { | |
1941 | VTDIOTLBPageInvInfo info; | |
1942 | ||
7feb51b7 PX |
1943 | trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am); |
1944 | ||
b5a280c0 LT |
1945 | assert(am <= VTD_MAMV); |
1946 | info.domain_id = domain_id; | |
d66b969b | 1947 | info.addr = addr; |
b5a280c0 | 1948 | info.mask = ~((1 << am) - 1); |
1d9efa73 | 1949 | vtd_iommu_lock(s); |
b5a280c0 | 1950 | g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); |
1d9efa73 | 1951 | vtd_iommu_unlock(s); |
dd4d607e | 1952 | vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am); |
b5a280c0 LT |
1953 | } |
1954 | ||
1da12ec4 LT |
1955 | /* Flush IOTLB |
1956 | * Returns the IOTLB Actual Invalidation Granularity. | |
1957 | * @val: the content of the IOTLB_REG | |
1958 | */ | |
1959 | static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val) | |
1960 | { | |
1961 | uint64_t iaig; | |
1962 | uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK; | |
b5a280c0 LT |
1963 | uint16_t domain_id; |
1964 | hwaddr addr; | |
1965 | uint8_t am; | |
1da12ec4 LT |
1966 | |
1967 | switch (type) { | |
1968 | case VTD_TLB_GLOBAL_FLUSH: | |
1da12ec4 | 1969 | iaig = VTD_TLB_GLOBAL_FLUSH_A; |
b5a280c0 | 1970 | vtd_iotlb_global_invalidate(s); |
1da12ec4 LT |
1971 | break; |
1972 | ||
1973 | case VTD_TLB_DSI_FLUSH: | |
b5a280c0 | 1974 | domain_id = VTD_TLB_DID(val); |
1da12ec4 | 1975 | iaig = VTD_TLB_DSI_FLUSH_A; |
b5a280c0 | 1976 | vtd_iotlb_domain_invalidate(s, domain_id); |
1da12ec4 LT |
1977 | break; |
1978 | ||
1979 | case VTD_TLB_PSI_FLUSH: | |
b5a280c0 LT |
1980 | domain_id = VTD_TLB_DID(val); |
1981 | addr = vtd_get_quad_raw(s, DMAR_IVA_REG); | |
1982 | am = VTD_IVA_AM(addr); | |
1983 | addr = VTD_IVA_ADDR(addr); | |
b5a280c0 | 1984 | if (am > VTD_MAMV) { |
1376211f PX |
1985 | error_report_once("%s: address mask overflow: 0x%" PRIx64, |
1986 | __func__, vtd_get_quad_raw(s, DMAR_IVA_REG)); | |
b5a280c0 LT |
1987 | iaig = 0; |
1988 | break; | |
1989 | } | |
1da12ec4 | 1990 | iaig = VTD_TLB_PSI_FLUSH_A; |
b5a280c0 | 1991 | vtd_iotlb_page_invalidate(s, domain_id, addr, am); |
1da12ec4 LT |
1992 | break; |
1993 | ||
1994 | default: | |
1376211f PX |
1995 | error_report_once("%s: invalid granularity: 0x%" PRIx64, |
1996 | __func__, val); | |
1da12ec4 LT |
1997 | iaig = 0; |
1998 | } | |
1999 | return iaig; | |
2000 | } | |
2001 | ||
8991c460 | 2002 | static void vtd_fetch_inv_desc(IntelIOMMUState *s); |
ed7b8fbc LT |
2003 | |
2004 | static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s) | |
2005 | { | |
2006 | return s->qi_enabled && (s->iq_tail == s->iq_head) && | |
2007 | (s->iq_last_desc_type == VTD_INV_DESC_WAIT); | |
2008 | } | |
2009 | ||
2010 | static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) | |
2011 | { | |
2012 | uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG); | |
2013 | ||
7feb51b7 PX |
2014 | trace_vtd_inv_qi_enable(en); |
2015 | ||
ed7b8fbc | 2016 | if (en) { |
37f51384 | 2017 | s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); |
8991c460 | 2018 | /* 2^(x+8) entries */ |
c0c1d351 | 2019 | s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8 - (s->iq_dw ? 1 : 0)); |
8991c460 LP |
2020 | s->qi_enabled = true; |
2021 | trace_vtd_inv_qi_setup(s->iq, s->iq_size); | |
2022 | /* Ok - report back to driver */ | |
2023 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES); | |
2024 | ||
2025 | if (s->iq_tail != 0) { | |
2026 | /* | |
2027 | * This is a spec violation but Windows guests are known to set up | |
2028 | * Queued Invalidation this way so we allow the write and process | |
2029 | * Invalidation Descriptors right away. | |
2030 | */ | |
2031 | trace_vtd_warn_invalid_qi_tail(s->iq_tail); | |
2032 | if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { | |
2033 | vtd_fetch_inv_desc(s); | |
2034 | } | |
ed7b8fbc LT |
2035 | } |
2036 | } else { | |
2037 | if (vtd_queued_inv_disable_check(s)) { | |
2038 | /* disable Queued Invalidation */ | |
2039 | vtd_set_quad_raw(s, DMAR_IQH_REG, 0); | |
2040 | s->iq_head = 0; | |
2041 | s->qi_enabled = false; | |
2042 | /* Ok - report back to driver */ | |
2043 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0); | |
2044 | } else { | |
4e4abd11 PX |
2045 | error_report_once("%s: detected improper state when disable QI " |
2046 | "(head=0x%x, tail=0x%x, last_type=%d)", | |
2047 | __func__, | |
2048 | s->iq_head, s->iq_tail, s->iq_last_desc_type); | |
ed7b8fbc LT |
2049 | } |
2050 | } | |
2051 | } | |
2052 | ||
1da12ec4 LT |
2053 | /* Set Root Table Pointer */ |
2054 | static void vtd_handle_gcmd_srtp(IntelIOMMUState *s) | |
2055 | { | |
1da12ec4 LT |
2056 | vtd_root_table_setup(s); |
2057 | /* Ok - report back to driver */ | |
2058 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS); | |
2cc9ddcc PX |
2059 | vtd_reset_caches(s); |
2060 | vtd_address_space_refresh_all(s); | |
1da12ec4 LT |
2061 | } |
2062 | ||
a5861439 PX |
2063 | /* Set Interrupt Remap Table Pointer */ |
2064 | static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s) | |
2065 | { | |
a5861439 PX |
2066 | vtd_interrupt_remap_table_setup(s); |
2067 | /* Ok - report back to driver */ | |
2068 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS); | |
2069 | } | |
2070 | ||
1da12ec4 LT |
2071 | /* Handle Translation Enable/Disable */ |
2072 | static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) | |
2073 | { | |
558e0024 PX |
2074 | if (s->dmar_enabled == en) { |
2075 | return; | |
2076 | } | |
2077 | ||
7feb51b7 | 2078 | trace_vtd_dmar_enable(en); |
1da12ec4 LT |
2079 | |
2080 | if (en) { | |
2081 | s->dmar_enabled = true; | |
2082 | /* Ok - report back to driver */ | |
2083 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES); | |
2084 | } else { | |
2085 | s->dmar_enabled = false; | |
2086 | ||
2087 | /* Clear the index of Fault Recording Register */ | |
2088 | s->next_frcd_reg = 0; | |
2089 | /* Ok - report back to driver */ | |
2090 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0); | |
2091 | } | |
558e0024 | 2092 | |
2cc9ddcc PX |
2093 | vtd_reset_caches(s); |
2094 | vtd_address_space_refresh_all(s); | |
1da12ec4 LT |
2095 | } |
2096 | ||
80de52ba PX |
2097 | /* Handle Interrupt Remap Enable/Disable */ |
2098 | static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en) | |
2099 | { | |
7feb51b7 | 2100 | trace_vtd_ir_enable(en); |
80de52ba PX |
2101 | |
2102 | if (en) { | |
2103 | s->intr_enabled = true; | |
2104 | /* Ok - report back to driver */ | |
2105 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES); | |
2106 | } else { | |
2107 | s->intr_enabled = false; | |
2108 | /* Ok - report back to driver */ | |
2109 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0); | |
2110 | } | |
2111 | } | |
2112 | ||
1da12ec4 LT |
2113 | /* Handle write to Global Command Register */ |
2114 | static void vtd_handle_gcmd_write(IntelIOMMUState *s) | |
2115 | { | |
2116 | uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG); | |
2117 | uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG); | |
2118 | uint32_t changed = status ^ val; | |
2119 | ||
7feb51b7 | 2120 | trace_vtd_reg_write_gcmd(status, val); |
1da12ec4 LT |
2121 | if (changed & VTD_GCMD_TE) { |
2122 | /* Translation enable/disable */ | |
2123 | vtd_handle_gcmd_te(s, val & VTD_GCMD_TE); | |
2124 | } | |
2125 | if (val & VTD_GCMD_SRTP) { | |
2126 | /* Set/update the root-table pointer */ | |
2127 | vtd_handle_gcmd_srtp(s); | |
2128 | } | |
ed7b8fbc LT |
2129 | if (changed & VTD_GCMD_QIE) { |
2130 | /* Queued Invalidation Enable */ | |
2131 | vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE); | |
2132 | } | |
a5861439 PX |
2133 | if (val & VTD_GCMD_SIRTP) { |
2134 | /* Set/update the interrupt remapping root-table pointer */ | |
2135 | vtd_handle_gcmd_sirtp(s); | |
2136 | } | |
80de52ba PX |
2137 | if (changed & VTD_GCMD_IRE) { |
2138 | /* Interrupt remap enable/disable */ | |
2139 | vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE); | |
2140 | } | |
1da12ec4 LT |
2141 | } |
2142 | ||
2143 | /* Handle write to Context Command Register */ | |
2144 | static void vtd_handle_ccmd_write(IntelIOMMUState *s) | |
2145 | { | |
2146 | uint64_t ret; | |
2147 | uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG); | |
2148 | ||
2149 | /* Context-cache invalidation request */ | |
2150 | if (val & VTD_CCMD_ICC) { | |
ed7b8fbc | 2151 | if (s->qi_enabled) { |
1376211f PX |
2152 | error_report_once("Queued Invalidation enabled, " |
2153 | "should not use register-based invalidation"); | |
ed7b8fbc LT |
2154 | return; |
2155 | } | |
1da12ec4 LT |
2156 | ret = vtd_context_cache_invalidate(s, val); |
2157 | /* Invalidation completed. Change something to show */ | |
2158 | vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL); | |
2159 | ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK, | |
2160 | ret); | |
1da12ec4 LT |
2161 | } |
2162 | } | |
2163 | ||
2164 | /* Handle write to IOTLB Invalidation Register */ | |
2165 | static void vtd_handle_iotlb_write(IntelIOMMUState *s) | |
2166 | { | |
2167 | uint64_t ret; | |
2168 | uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG); | |
2169 | ||
2170 | /* IOTLB invalidation request */ | |
2171 | if (val & VTD_TLB_IVT) { | |
ed7b8fbc | 2172 | if (s->qi_enabled) { |
1376211f PX |
2173 | error_report_once("Queued Invalidation enabled, " |
2174 | "should not use register-based invalidation"); | |
ed7b8fbc LT |
2175 | return; |
2176 | } | |
1da12ec4 LT |
2177 | ret = vtd_iotlb_flush(s, val); |
2178 | /* Invalidation completed. Change something to show */ | |
2179 | vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL); | |
2180 | ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, | |
2181 | VTD_TLB_FLUSH_GRANU_MASK_A, ret); | |
1da12ec4 LT |
2182 | } |
2183 | } | |
2184 | ||
ed7b8fbc | 2185 | /* Fetch an Invalidation Descriptor from the Invalidation Queue */ |
c0c1d351 | 2186 | static bool vtd_get_inv_desc(IntelIOMMUState *s, |
ed7b8fbc LT |
2187 | VTDInvDesc *inv_desc) |
2188 | { | |
c0c1d351 LY |
2189 | dma_addr_t base_addr = s->iq; |
2190 | uint32_t offset = s->iq_head; | |
2191 | uint32_t dw = s->iq_dw ? 32 : 16; | |
2192 | dma_addr_t addr = base_addr + offset * dw; | |
2193 | ||
2194 | if (dma_memory_read(&address_space_memory, addr, inv_desc, dw)) { | |
2195 | error_report_once("Read INV DESC failed."); | |
ed7b8fbc LT |
2196 | return false; |
2197 | } | |
2198 | inv_desc->lo = le64_to_cpu(inv_desc->lo); | |
2199 | inv_desc->hi = le64_to_cpu(inv_desc->hi); | |
c0c1d351 LY |
2200 | if (dw == 32) { |
2201 | inv_desc->val[2] = le64_to_cpu(inv_desc->val[2]); | |
2202 | inv_desc->val[3] = le64_to_cpu(inv_desc->val[3]); | |
2203 | } | |
ed7b8fbc LT |
2204 | return true; |
2205 | } | |
2206 | ||
2207 | static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) | |
2208 | { | |
2209 | if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || | |
2210 | (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { | |
095955b2 PX |
2211 | error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 |
2212 | " (reserved nonzero)", __func__, inv_desc->hi, | |
2213 | inv_desc->lo); | |
ed7b8fbc LT |
2214 | return false; |
2215 | } | |
2216 | if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { | |
2217 | /* Status Write */ | |
2218 | uint32_t status_data = (uint32_t)(inv_desc->lo >> | |
2219 | VTD_INV_DESC_WAIT_DATA_SHIFT); | |
2220 | ||
2221 | assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); | |
2222 | ||
2223 | /* FIXME: need to be masked with HAW? */ | |
2224 | dma_addr_t status_addr = inv_desc->hi; | |
bc535e59 | 2225 | trace_vtd_inv_desc_wait_sw(status_addr, status_data); |
ed7b8fbc LT |
2226 | status_data = cpu_to_le32(status_data); |
2227 | if (dma_memory_write(&address_space_memory, status_addr, &status_data, | |
2228 | sizeof(status_data))) { | |
bc535e59 | 2229 | trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); |
ed7b8fbc LT |
2230 | return false; |
2231 | } | |
2232 | } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { | |
2233 | /* Interrupt flag */ | |
ed7b8fbc LT |
2234 | vtd_generate_completion_event(s); |
2235 | } else { | |
095955b2 PX |
2236 | error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 |
2237 | " (unknown type)", __func__, inv_desc->hi, | |
2238 | inv_desc->lo); | |
ed7b8fbc LT |
2239 | return false; |
2240 | } | |
2241 | return true; | |
2242 | } | |
2243 | ||
d92fa2dc LT |
2244 | static bool vtd_process_context_cache_desc(IntelIOMMUState *s, |
2245 | VTDInvDesc *inv_desc) | |
2246 | { | |
bc535e59 PX |
2247 | uint16_t sid, fmask; |
2248 | ||
d92fa2dc | 2249 | if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { |
095955b2 PX |
2250 | error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 |
2251 | " (reserved nonzero)", __func__, inv_desc->hi, | |
2252 | inv_desc->lo); | |
d92fa2dc LT |
2253 | return false; |
2254 | } | |
2255 | switch (inv_desc->lo & VTD_INV_DESC_CC_G) { | |
2256 | case VTD_INV_DESC_CC_DOMAIN: | |
bc535e59 PX |
2257 | trace_vtd_inv_desc_cc_domain( |
2258 | (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo)); | |
d92fa2dc LT |
2259 | /* Fall through */ |
2260 | case VTD_INV_DESC_CC_GLOBAL: | |
d92fa2dc LT |
2261 | vtd_context_global_invalidate(s); |
2262 | break; | |
2263 | ||
2264 | case VTD_INV_DESC_CC_DEVICE: | |
bc535e59 PX |
2265 | sid = VTD_INV_DESC_CC_SID(inv_desc->lo); |
2266 | fmask = VTD_INV_DESC_CC_FM(inv_desc->lo); | |
2267 | vtd_context_device_invalidate(s, sid, fmask); | |
d92fa2dc LT |
2268 | break; |
2269 | ||
2270 | default: | |
095955b2 PX |
2271 | error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 |
2272 | " (invalid type)", __func__, inv_desc->hi, | |
2273 | inv_desc->lo); | |
d92fa2dc LT |
2274 | return false; |
2275 | } | |
2276 | return true; | |
2277 | } | |
2278 | ||
b5a280c0 LT |
2279 | static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) |
2280 | { | |
2281 | uint16_t domain_id; | |
2282 | uint8_t am; | |
2283 | hwaddr addr; | |
2284 | ||
2285 | if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || | |
2286 | (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { | |
095955b2 PX |
2287 | error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 |
2288 | ", lo=0x%"PRIx64" (reserved bits unzero)\n", | |
2289 | __func__, inv_desc->hi, inv_desc->lo); | |
b5a280c0 LT |
2290 | return false; |
2291 | } | |
2292 | ||
2293 | switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) { | |
2294 | case VTD_INV_DESC_IOTLB_GLOBAL: | |
b5a280c0 LT |
2295 | vtd_iotlb_global_invalidate(s); |
2296 | break; | |
2297 | ||
2298 | case VTD_INV_DESC_IOTLB_DOMAIN: | |
2299 | domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); | |
b5a280c0 LT |
2300 | vtd_iotlb_domain_invalidate(s, domain_id); |
2301 | break; | |
2302 | ||
2303 | case VTD_INV_DESC_IOTLB_PAGE: | |
2304 | domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); | |
2305 | addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi); | |
2306 | am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi); | |
b5a280c0 | 2307 | if (am > VTD_MAMV) { |
095955b2 PX |
2308 | error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 |
2309 | ", lo=0x%"PRIx64" (am=%u > VTD_MAMV=%u)\n", | |
2310 | __func__, inv_desc->hi, inv_desc->lo, | |
2311 | am, (unsigned)VTD_MAMV); | |
b5a280c0 LT |
2312 | return false; |
2313 | } | |
2314 | vtd_iotlb_page_invalidate(s, domain_id, addr, am); | |
2315 | break; | |
2316 | ||
2317 | default: | |
095955b2 PX |
2318 | error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 |
2319 | ", lo=0x%"PRIx64" (type mismatch: 0x%llx)\n", | |
2320 | __func__, inv_desc->hi, inv_desc->lo, | |
2321 | inv_desc->lo & VTD_INV_DESC_IOTLB_G); | |
b5a280c0 LT |
2322 | return false; |
2323 | } | |
2324 | return true; | |
2325 | } | |
2326 | ||
02a2cbc8 PX |
2327 | static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, |
2328 | VTDInvDesc *inv_desc) | |
2329 | { | |
7feb51b7 PX |
2330 | trace_vtd_inv_desc_iec(inv_desc->iec.granularity, |
2331 | inv_desc->iec.index, | |
2332 | inv_desc->iec.index_mask); | |
02a2cbc8 PX |
2333 | |
2334 | vtd_iec_notify_all(s, !inv_desc->iec.granularity, | |
2335 | inv_desc->iec.index, | |
2336 | inv_desc->iec.index_mask); | |
554f5e16 JW |
2337 | return true; |
2338 | } | |
2339 | ||
2340 | static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, | |
2341 | VTDInvDesc *inv_desc) | |
2342 | { | |
2343 | VTDAddressSpace *vtd_dev_as; | |
2344 | IOMMUTLBEntry entry; | |
2345 | struct VTDBus *vtd_bus; | |
2346 | hwaddr addr; | |
2347 | uint64_t sz; | |
2348 | uint16_t sid; | |
2349 | uint8_t devfn; | |
2350 | bool size; | |
2351 | uint8_t bus_num; | |
2352 | ||
2353 | addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); | |
2354 | sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); | |
2355 | devfn = sid & 0xff; | |
2356 | bus_num = sid >> 8; | |
2357 | size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); | |
2358 | ||
2359 | if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || | |
2360 | (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { | |
095955b2 PX |
2361 | error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64 |
2362 | ", lo=%"PRIx64" (reserved nonzero)", __func__, | |
2363 | inv_desc->hi, inv_desc->lo); | |
554f5e16 JW |
2364 | return false; |
2365 | } | |
2366 | ||
2367 | vtd_bus = vtd_find_as_from_bus_num(s, bus_num); | |
2368 | if (!vtd_bus) { | |
2369 | goto done; | |
2370 | } | |
2371 | ||
2372 | vtd_dev_as = vtd_bus->dev_as[devfn]; | |
2373 | if (!vtd_dev_as) { | |
2374 | goto done; | |
2375 | } | |
2376 | ||
04eb6247 JW |
2377 | /* According to ATS spec table 2.4: |
2378 | * S = 0, bits 15:12 = xxxx range size: 4K | |
2379 | * S = 1, bits 15:12 = xxx0 range size: 8K | |
2380 | * S = 1, bits 15:12 = xx01 range size: 16K | |
2381 | * S = 1, bits 15:12 = x011 range size: 32K | |
2382 | * S = 1, bits 15:12 = 0111 range size: 64K | |
2383 | * ... | |
2384 | */ | |
554f5e16 | 2385 | if (size) { |
04eb6247 | 2386 | sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT); |
554f5e16 JW |
2387 | addr &= ~(sz - 1); |
2388 | } else { | |
2389 | sz = VTD_PAGE_SIZE; | |
2390 | } | |
02a2cbc8 | 2391 | |
554f5e16 JW |
2392 | entry.target_as = &vtd_dev_as->as; |
2393 | entry.addr_mask = sz - 1; | |
2394 | entry.iova = addr; | |
2395 | entry.perm = IOMMU_NONE; | |
2396 | entry.translated_addr = 0; | |
cb1efcf4 | 2397 | memory_region_notify_iommu(&vtd_dev_as->iommu, 0, entry); |
554f5e16 JW |
2398 | |
2399 | done: | |
02a2cbc8 PX |
2400 | return true; |
2401 | } | |
2402 | ||
ed7b8fbc LT |
2403 | static bool vtd_process_inv_desc(IntelIOMMUState *s) |
2404 | { | |
2405 | VTDInvDesc inv_desc; | |
2406 | uint8_t desc_type; | |
2407 | ||
7feb51b7 | 2408 | trace_vtd_inv_qi_head(s->iq_head); |
c0c1d351 | 2409 | if (!vtd_get_inv_desc(s, &inv_desc)) { |
ed7b8fbc LT |
2410 | s->iq_last_desc_type = VTD_INV_DESC_NONE; |
2411 | return false; | |
2412 | } | |
c0c1d351 | 2413 | |
ed7b8fbc LT |
2414 | desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; |
2415 | /* FIXME: should update at first or at last? */ | |
2416 | s->iq_last_desc_type = desc_type; | |
2417 | ||
2418 | switch (desc_type) { | |
2419 | case VTD_INV_DESC_CC: | |
bc535e59 | 2420 | trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo); |
d92fa2dc LT |
2421 | if (!vtd_process_context_cache_desc(s, &inv_desc)) { |
2422 | return false; | |
2423 | } | |
ed7b8fbc LT |
2424 | break; |
2425 | ||
2426 | case VTD_INV_DESC_IOTLB: | |
bc535e59 | 2427 | trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo); |
b5a280c0 LT |
2428 | if (!vtd_process_iotlb_desc(s, &inv_desc)) { |
2429 | return false; | |
2430 | } | |
ed7b8fbc LT |
2431 | break; |
2432 | ||
4a4f219e YS |
2433 | /* |
2434 | * TODO: the entity of below two cases will be implemented in future series. | |
2435 | * To make guest (which integrates scalable mode support patch set in | |
2436 | * iommu driver) work, just return true is enough so far. | |
2437 | */ | |
2438 | case VTD_INV_DESC_PC: | |
2439 | break; | |
2440 | ||
2441 | case VTD_INV_DESC_PIOTLB: | |
2442 | break; | |
2443 | ||
ed7b8fbc | 2444 | case VTD_INV_DESC_WAIT: |
bc535e59 | 2445 | trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo); |
ed7b8fbc LT |
2446 | if (!vtd_process_wait_desc(s, &inv_desc)) { |
2447 | return false; | |
2448 | } | |
2449 | break; | |
2450 | ||
b7910472 | 2451 | case VTD_INV_DESC_IEC: |
bc535e59 | 2452 | trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo); |
02a2cbc8 PX |
2453 | if (!vtd_process_inv_iec_desc(s, &inv_desc)) { |
2454 | return false; | |
2455 | } | |
b7910472 PX |
2456 | break; |
2457 | ||
554f5e16 | 2458 | case VTD_INV_DESC_DEVICE: |
7feb51b7 | 2459 | trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo); |
554f5e16 JW |
2460 | if (!vtd_process_device_iotlb_desc(s, &inv_desc)) { |
2461 | return false; | |
2462 | } | |
2463 | break; | |
2464 | ||
ed7b8fbc | 2465 | default: |
095955b2 PX |
2466 | error_report_once("%s: invalid inv desc: hi=%"PRIx64", lo=%"PRIx64 |
2467 | " (unknown type)", __func__, inv_desc.hi, | |
2468 | inv_desc.lo); | |
ed7b8fbc LT |
2469 | return false; |
2470 | } | |
2471 | s->iq_head++; | |
2472 | if (s->iq_head == s->iq_size) { | |
2473 | s->iq_head = 0; | |
2474 | } | |
2475 | return true; | |
2476 | } | |
2477 | ||
2478 | /* Try to fetch and process more Invalidation Descriptors */ | |
2479 | static void vtd_fetch_inv_desc(IntelIOMMUState *s) | |
2480 | { | |
7feb51b7 PX |
2481 | trace_vtd_inv_qi_fetch(); |
2482 | ||
ed7b8fbc LT |
2483 | if (s->iq_tail >= s->iq_size) { |
2484 | /* Detects an invalid Tail pointer */ | |
4e4abd11 PX |
2485 | error_report_once("%s: detected invalid QI tail " |
2486 | "(tail=0x%x, size=0x%x)", | |
2487 | __func__, s->iq_tail, s->iq_size); | |
ed7b8fbc LT |
2488 | vtd_handle_inv_queue_error(s); |
2489 | return; | |
2490 | } | |
2491 | while (s->iq_head != s->iq_tail) { | |
2492 | if (!vtd_process_inv_desc(s)) { | |
2493 | /* Invalidation Queue Errors */ | |
2494 | vtd_handle_inv_queue_error(s); | |
2495 | break; | |
2496 | } | |
2497 | /* Must update the IQH_REG in time */ | |
2498 | vtd_set_quad_raw(s, DMAR_IQH_REG, | |
2499 | (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) & | |
2500 | VTD_IQH_QH_MASK); | |
2501 | } | |
2502 | } | |
2503 | ||
2504 | /* Handle write to Invalidation Queue Tail Register */ | |
2505 | static void vtd_handle_iqt_write(IntelIOMMUState *s) | |
2506 | { | |
2507 | uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG); | |
2508 | ||
c0c1d351 LY |
2509 | if (s->iq_dw && (val & VTD_IQT_QT_256_RSV_BIT)) { |
2510 | error_report_once("%s: RSV bit is set: val=0x%"PRIx64, | |
2511 | __func__, val); | |
2512 | return; | |
2513 | } | |
2514 | s->iq_tail = VTD_IQT_QT(s->iq_dw, val); | |
7feb51b7 PX |
2515 | trace_vtd_inv_qi_tail(s->iq_tail); |
2516 | ||
ed7b8fbc LT |
2517 | if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { |
2518 | /* Process Invalidation Queue here */ | |
2519 | vtd_fetch_inv_desc(s); | |
2520 | } | |
2521 | } | |
2522 | ||
1da12ec4 LT |
2523 | static void vtd_handle_fsts_write(IntelIOMMUState *s) |
2524 | { | |
2525 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); | |
2526 | uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); | |
2527 | uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE; | |
2528 | ||
2529 | if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) { | |
2530 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); | |
7feb51b7 | 2531 | trace_vtd_fsts_clear_ip(); |
1da12ec4 | 2532 | } |
ed7b8fbc LT |
2533 | /* FIXME: when IQE is Clear, should we try to fetch some Invalidation |
2534 | * Descriptors if there are any when Queued Invalidation is enabled? | |
2535 | */ | |
1da12ec4 LT |
2536 | } |
2537 | ||
2538 | static void vtd_handle_fectl_write(IntelIOMMUState *s) | |
2539 | { | |
2540 | uint32_t fectl_reg; | |
2541 | /* FIXME: when software clears the IM field, check the IP field. But do we | |
2542 | * need to compare the old value and the new value to conclude that | |
2543 | * software clears the IM field? Or just check if the IM field is zero? | |
2544 | */ | |
2545 | fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); | |
7feb51b7 PX |
2546 | |
2547 | trace_vtd_reg_write_fectl(fectl_reg); | |
2548 | ||
1da12ec4 LT |
2549 | if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) { |
2550 | vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); | |
2551 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); | |
1da12ec4 LT |
2552 | } |
2553 | } | |
2554 | ||
ed7b8fbc LT |
2555 | static void vtd_handle_ics_write(IntelIOMMUState *s) |
2556 | { | |
2557 | uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG); | |
2558 | uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); | |
2559 | ||
2560 | if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) { | |
7feb51b7 | 2561 | trace_vtd_reg_ics_clear_ip(); |
ed7b8fbc | 2562 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); |
ed7b8fbc LT |
2563 | } |
2564 | } | |
2565 | ||
2566 | static void vtd_handle_iectl_write(IntelIOMMUState *s) | |
2567 | { | |
2568 | uint32_t iectl_reg; | |
2569 | /* FIXME: when software clears the IM field, check the IP field. But do we | |
2570 | * need to compare the old value and the new value to conclude that | |
2571 | * software clears the IM field? Or just check if the IM field is zero? | |
2572 | */ | |
2573 | iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); | |
7feb51b7 PX |
2574 | |
2575 | trace_vtd_reg_write_iectl(iectl_reg); | |
2576 | ||
ed7b8fbc LT |
2577 | if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) { |
2578 | vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); | |
2579 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); | |
ed7b8fbc LT |
2580 | } |
2581 | } | |
2582 | ||
1da12ec4 LT |
2583 | static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) |
2584 | { | |
2585 | IntelIOMMUState *s = opaque; | |
2586 | uint64_t val; | |
2587 | ||
7feb51b7 PX |
2588 | trace_vtd_reg_read(addr, size); |
2589 | ||
1da12ec4 | 2590 | if (addr + size > DMAR_REG_SIZE) { |
1376211f PX |
2591 | error_report_once("%s: MMIO over range: addr=0x%" PRIx64 |
2592 | " size=0x%u", __func__, addr, size); | |
1da12ec4 LT |
2593 | return (uint64_t)-1; |
2594 | } | |
2595 | ||
2596 | switch (addr) { | |
2597 | /* Root Table Address Register, 64-bit */ | |
2598 | case DMAR_RTADDR_REG: | |
2599 | if (size == 4) { | |
2600 | val = s->root & ((1ULL << 32) - 1); | |
2601 | } else { | |
2602 | val = s->root; | |
2603 | } | |
2604 | break; | |
2605 | ||
2606 | case DMAR_RTADDR_REG_HI: | |
2607 | assert(size == 4); | |
2608 | val = s->root >> 32; | |
2609 | break; | |
2610 | ||
ed7b8fbc LT |
2611 | /* Invalidation Queue Address Register, 64-bit */ |
2612 | case DMAR_IQA_REG: | |
2613 | val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS); | |
2614 | if (size == 4) { | |
2615 | val = val & ((1ULL << 32) - 1); | |
2616 | } | |
2617 | break; | |
2618 | ||
2619 | case DMAR_IQA_REG_HI: | |
2620 | assert(size == 4); | |
2621 | val = s->iq >> 32; | |
2622 | break; | |
2623 | ||
1da12ec4 LT |
2624 | default: |
2625 | if (size == 4) { | |
2626 | val = vtd_get_long(s, addr); | |
2627 | } else { | |
2628 | val = vtd_get_quad(s, addr); | |
2629 | } | |
2630 | } | |
7feb51b7 | 2631 | |
1da12ec4 LT |
2632 | return val; |
2633 | } | |
2634 | ||
2635 | static void vtd_mem_write(void *opaque, hwaddr addr, | |
2636 | uint64_t val, unsigned size) | |
2637 | { | |
2638 | IntelIOMMUState *s = opaque; | |
2639 | ||
7feb51b7 PX |
2640 | trace_vtd_reg_write(addr, size, val); |
2641 | ||
1da12ec4 | 2642 | if (addr + size > DMAR_REG_SIZE) { |
1376211f PX |
2643 | error_report_once("%s: MMIO over range: addr=0x%" PRIx64 |
2644 | " size=0x%u", __func__, addr, size); | |
1da12ec4 LT |
2645 | return; |
2646 | } | |
2647 | ||
2648 | switch (addr) { | |
2649 | /* Global Command Register, 32-bit */ | |
2650 | case DMAR_GCMD_REG: | |
1da12ec4 LT |
2651 | vtd_set_long(s, addr, val); |
2652 | vtd_handle_gcmd_write(s); | |
2653 | break; | |
2654 | ||
2655 | /* Context Command Register, 64-bit */ | |
2656 | case DMAR_CCMD_REG: | |
1da12ec4 LT |
2657 | if (size == 4) { |
2658 | vtd_set_long(s, addr, val); | |
2659 | } else { | |
2660 | vtd_set_quad(s, addr, val); | |
2661 | vtd_handle_ccmd_write(s); | |
2662 | } | |
2663 | break; | |
2664 | ||
2665 | case DMAR_CCMD_REG_HI: | |
1da12ec4 LT |
2666 | assert(size == 4); |
2667 | vtd_set_long(s, addr, val); | |
2668 | vtd_handle_ccmd_write(s); | |
2669 | break; | |
2670 | ||
2671 | /* IOTLB Invalidation Register, 64-bit */ | |
2672 | case DMAR_IOTLB_REG: | |
1da12ec4 LT |
2673 | if (size == 4) { |
2674 | vtd_set_long(s, addr, val); | |
2675 | } else { | |
2676 | vtd_set_quad(s, addr, val); | |
2677 | vtd_handle_iotlb_write(s); | |
2678 | } | |
2679 | break; | |
2680 | ||
2681 | case DMAR_IOTLB_REG_HI: | |
1da12ec4 LT |
2682 | assert(size == 4); |
2683 | vtd_set_long(s, addr, val); | |
2684 | vtd_handle_iotlb_write(s); | |
2685 | break; | |
2686 | ||
b5a280c0 LT |
2687 | /* Invalidate Address Register, 64-bit */ |
2688 | case DMAR_IVA_REG: | |
b5a280c0 LT |
2689 | if (size == 4) { |
2690 | vtd_set_long(s, addr, val); | |
2691 | } else { | |
2692 | vtd_set_quad(s, addr, val); | |
2693 | } | |
2694 | break; | |
2695 | ||
2696 | case DMAR_IVA_REG_HI: | |
b5a280c0 LT |
2697 | assert(size == 4); |
2698 | vtd_set_long(s, addr, val); | |
2699 | break; | |
2700 | ||
1da12ec4 LT |
2701 | /* Fault Status Register, 32-bit */ |
2702 | case DMAR_FSTS_REG: | |
1da12ec4 LT |
2703 | assert(size == 4); |
2704 | vtd_set_long(s, addr, val); | |
2705 | vtd_handle_fsts_write(s); | |
2706 | break; | |
2707 | ||
2708 | /* Fault Event Control Register, 32-bit */ | |
2709 | case DMAR_FECTL_REG: | |
1da12ec4 LT |
2710 | assert(size == 4); |
2711 | vtd_set_long(s, addr, val); | |
2712 | vtd_handle_fectl_write(s); | |
2713 | break; | |
2714 | ||
2715 | /* Fault Event Data Register, 32-bit */ | |
2716 | case DMAR_FEDATA_REG: | |
1da12ec4 LT |
2717 | assert(size == 4); |
2718 | vtd_set_long(s, addr, val); | |
2719 | break; | |
2720 | ||
2721 | /* Fault Event Address Register, 32-bit */ | |
2722 | case DMAR_FEADDR_REG: | |
b7a7bb35 JK |
2723 | if (size == 4) { |
2724 | vtd_set_long(s, addr, val); | |
2725 | } else { | |
2726 | /* | |
2727 | * While the register is 32-bit only, some guests (Xen...) write to | |
2728 | * it with 64-bit. | |
2729 | */ | |
2730 | vtd_set_quad(s, addr, val); | |
2731 | } | |
1da12ec4 LT |
2732 | break; |
2733 | ||
2734 | /* Fault Event Upper Address Register, 32-bit */ | |
2735 | case DMAR_FEUADDR_REG: | |
1da12ec4 LT |
2736 | assert(size == 4); |
2737 | vtd_set_long(s, addr, val); | |
2738 | break; | |
2739 | ||
2740 | /* Protected Memory Enable Register, 32-bit */ | |
2741 | case DMAR_PMEN_REG: | |
1da12ec4 LT |
2742 | assert(size == 4); |
2743 | vtd_set_long(s, addr, val); | |
2744 | break; | |
2745 | ||
2746 | /* Root Table Address Register, 64-bit */ | |
2747 | case DMAR_RTADDR_REG: | |
1da12ec4 LT |
2748 | if (size == 4) { |
2749 | vtd_set_long(s, addr, val); | |
2750 | } else { | |
2751 | vtd_set_quad(s, addr, val); | |
2752 | } | |
2753 | break; | |
2754 | ||
2755 | case DMAR_RTADDR_REG_HI: | |
1da12ec4 LT |
2756 | assert(size == 4); |
2757 | vtd_set_long(s, addr, val); | |
2758 | break; | |
2759 | ||
ed7b8fbc LT |
2760 | /* Invalidation Queue Tail Register, 64-bit */ |
2761 | case DMAR_IQT_REG: | |
ed7b8fbc LT |
2762 | if (size == 4) { |
2763 | vtd_set_long(s, addr, val); | |
2764 | } else { | |
2765 | vtd_set_quad(s, addr, val); | |
2766 | } | |
2767 | vtd_handle_iqt_write(s); | |
2768 | break; | |
2769 | ||
2770 | case DMAR_IQT_REG_HI: | |
ed7b8fbc LT |
2771 | assert(size == 4); |
2772 | vtd_set_long(s, addr, val); | |
2773 | /* 19:63 of IQT_REG is RsvdZ, do nothing here */ | |
2774 | break; | |
2775 | ||
2776 | /* Invalidation Queue Address Register, 64-bit */ | |
2777 | case DMAR_IQA_REG: | |
ed7b8fbc LT |
2778 | if (size == 4) { |
2779 | vtd_set_long(s, addr, val); | |
2780 | } else { | |
2781 | vtd_set_quad(s, addr, val); | |
2782 | } | |
c0c1d351 LY |
2783 | if (s->ecap & VTD_ECAP_SMTS && |
2784 | val & VTD_IQA_DW_MASK) { | |
2785 | s->iq_dw = true; | |
2786 | } else { | |
2787 | s->iq_dw = false; | |
2788 | } | |
ed7b8fbc LT |
2789 | break; |
2790 | ||
2791 | case DMAR_IQA_REG_HI: | |
ed7b8fbc LT |
2792 | assert(size == 4); |
2793 | vtd_set_long(s, addr, val); | |
2794 | break; | |
2795 | ||
2796 | /* Invalidation Completion Status Register, 32-bit */ | |
2797 | case DMAR_ICS_REG: | |
ed7b8fbc LT |
2798 | assert(size == 4); |
2799 | vtd_set_long(s, addr, val); | |
2800 | vtd_handle_ics_write(s); | |
2801 | break; | |
2802 | ||
2803 | /* Invalidation Event Control Register, 32-bit */ | |
2804 | case DMAR_IECTL_REG: | |
ed7b8fbc LT |
2805 | assert(size == 4); |
2806 | vtd_set_long(s, addr, val); | |
2807 | vtd_handle_iectl_write(s); | |
2808 | break; | |
2809 | ||
2810 | /* Invalidation Event Data Register, 32-bit */ | |
2811 | case DMAR_IEDATA_REG: | |
ed7b8fbc LT |
2812 | assert(size == 4); |
2813 | vtd_set_long(s, addr, val); | |
2814 | break; | |
2815 | ||
2816 | /* Invalidation Event Address Register, 32-bit */ | |
2817 | case DMAR_IEADDR_REG: | |
ed7b8fbc LT |
2818 | assert(size == 4); |
2819 | vtd_set_long(s, addr, val); | |
2820 | break; | |
2821 | ||
2822 | /* Invalidation Event Upper Address Register, 32-bit */ | |
2823 | case DMAR_IEUADDR_REG: | |
ed7b8fbc LT |
2824 | assert(size == 4); |
2825 | vtd_set_long(s, addr, val); | |
2826 | break; | |
2827 | ||
1da12ec4 LT |
2828 | /* Fault Recording Registers, 128-bit */ |
2829 | case DMAR_FRCD_REG_0_0: | |
1da12ec4 LT |
2830 | if (size == 4) { |
2831 | vtd_set_long(s, addr, val); | |
2832 | } else { | |
2833 | vtd_set_quad(s, addr, val); | |
2834 | } | |
2835 | break; | |
2836 | ||
2837 | case DMAR_FRCD_REG_0_1: | |
1da12ec4 LT |
2838 | assert(size == 4); |
2839 | vtd_set_long(s, addr, val); | |
2840 | break; | |
2841 | ||
2842 | case DMAR_FRCD_REG_0_2: | |
1da12ec4 LT |
2843 | if (size == 4) { |
2844 | vtd_set_long(s, addr, val); | |
2845 | } else { | |
2846 | vtd_set_quad(s, addr, val); | |
2847 | /* May clear bit 127 (Fault), update PPF */ | |
2848 | vtd_update_fsts_ppf(s); | |
2849 | } | |
2850 | break; | |
2851 | ||
2852 | case DMAR_FRCD_REG_0_3: | |
1da12ec4 LT |
2853 | assert(size == 4); |
2854 | vtd_set_long(s, addr, val); | |
2855 | /* May clear bit 127 (Fault), update PPF */ | |
2856 | vtd_update_fsts_ppf(s); | |
2857 | break; | |
2858 | ||
a5861439 | 2859 | case DMAR_IRTA_REG: |
a5861439 PX |
2860 | if (size == 4) { |
2861 | vtd_set_long(s, addr, val); | |
2862 | } else { | |
2863 | vtd_set_quad(s, addr, val); | |
2864 | } | |
2865 | break; | |
2866 | ||
2867 | case DMAR_IRTA_REG_HI: | |
a5861439 PX |
2868 | assert(size == 4); |
2869 | vtd_set_long(s, addr, val); | |
2870 | break; | |
2871 | ||
1da12ec4 | 2872 | default: |
1da12ec4 LT |
2873 | if (size == 4) { |
2874 | vtd_set_long(s, addr, val); | |
2875 | } else { | |
2876 | vtd_set_quad(s, addr, val); | |
2877 | } | |
2878 | } | |
2879 | } | |
2880 | ||
3df9d748 | 2881 | static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr, |
2c91bcf2 | 2882 | IOMMUAccessFlags flag, int iommu_idx) |
1da12ec4 LT |
2883 | { |
2884 | VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); | |
2885 | IntelIOMMUState *s = vtd_as->iommu_state; | |
b9313021 PX |
2886 | IOMMUTLBEntry iotlb = { |
2887 | /* We'll fill in the rest later. */ | |
1da12ec4 | 2888 | .target_as = &address_space_memory, |
1da12ec4 | 2889 | }; |
b9313021 | 2890 | bool success; |
1da12ec4 | 2891 | |
b9313021 PX |
2892 | if (likely(s->dmar_enabled)) { |
2893 | success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, | |
2894 | addr, flag & IOMMU_WO, &iotlb); | |
2895 | } else { | |
1da12ec4 | 2896 | /* DMAR disabled, passthrough, use 4k-page*/ |
b9313021 PX |
2897 | iotlb.iova = addr & VTD_PAGE_MASK_4K; |
2898 | iotlb.translated_addr = addr & VTD_PAGE_MASK_4K; | |
2899 | iotlb.addr_mask = ~VTD_PAGE_MASK_4K; | |
2900 | iotlb.perm = IOMMU_RW; | |
2901 | success = true; | |
1da12ec4 LT |
2902 | } |
2903 | ||
b9313021 PX |
2904 | if (likely(success)) { |
2905 | trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus), | |
2906 | VTD_PCI_SLOT(vtd_as->devfn), | |
2907 | VTD_PCI_FUNC(vtd_as->devfn), | |
2908 | iotlb.iova, iotlb.translated_addr, | |
2909 | iotlb.addr_mask); | |
2910 | } else { | |
4e4abd11 PX |
2911 | error_report_once("%s: detected translation failure " |
2912 | "(dev=%02x:%02x:%02x, iova=0x%" PRIx64 ")", | |
2913 | __func__, pci_bus_num(vtd_as->bus), | |
2914 | VTD_PCI_SLOT(vtd_as->devfn), | |
2915 | VTD_PCI_FUNC(vtd_as->devfn), | |
662b4b69 | 2916 | addr); |
b9313021 | 2917 | } |
7feb51b7 | 2918 | |
b9313021 | 2919 | return iotlb; |
1da12ec4 LT |
2920 | } |
2921 | ||
3df9d748 | 2922 | static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, |
5bf3d319 PX |
2923 | IOMMUNotifierFlag old, |
2924 | IOMMUNotifierFlag new) | |
3cb3b154 AW |
2925 | { |
2926 | VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); | |
dd4d607e | 2927 | IntelIOMMUState *s = vtd_as->iommu_state; |
3cb3b154 | 2928 | |
dd4d607e | 2929 | if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { |
75c5626c | 2930 | error_report("We need to set caching-mode=on for intel-iommu to enable " |
dd4d607e | 2931 | "device assignment with IOMMU protection."); |
a3276f78 PX |
2932 | exit(1); |
2933 | } | |
dd4d607e | 2934 | |
4f8a62a9 PX |
2935 | /* Update per-address-space notifier flags */ |
2936 | vtd_as->notifier_flags = new; | |
2937 | ||
dd4d607e | 2938 | if (old == IOMMU_NOTIFIER_NONE) { |
b4a4ba0d PX |
2939 | QLIST_INSERT_HEAD(&s->vtd_as_with_notifiers, vtd_as, next); |
2940 | } else if (new == IOMMU_NOTIFIER_NONE) { | |
2941 | QLIST_REMOVE(vtd_as, next); | |
dd4d607e | 2942 | } |
3cb3b154 AW |
2943 | } |
2944 | ||
552a1e01 PX |
2945 | static int vtd_post_load(void *opaque, int version_id) |
2946 | { | |
2947 | IntelIOMMUState *iommu = opaque; | |
2948 | ||
2949 | /* | |
2950 | * Memory regions are dynamically turned on/off depending on | |
2951 | * context entry configurations from the guest. After migration, | |
2952 | * we need to make sure the memory regions are still correct. | |
2953 | */ | |
2954 | vtd_switch_address_space_all(iommu); | |
2955 | ||
2811af3b PX |
2956 | /* |
2957 | * We don't need to migrate the root_scalable because we can | |
2958 | * simply do the calculation after the loading is complete. We | |
2959 | * can actually do similar things with root, dmar_enabled, etc. | |
2960 | * however since we've had them already so we'd better keep them | |
2961 | * for compatibility of migration. | |
2962 | */ | |
2963 | vtd_update_scalable_state(iommu); | |
2964 | ||
552a1e01 PX |
2965 | return 0; |
2966 | } | |
2967 | ||
1da12ec4 LT |
2968 | static const VMStateDescription vtd_vmstate = { |
2969 | .name = "iommu-intel", | |
8cdcf3c1 PX |
2970 | .version_id = 1, |
2971 | .minimum_version_id = 1, | |
2972 | .priority = MIG_PRI_IOMMU, | |
552a1e01 | 2973 | .post_load = vtd_post_load, |
8cdcf3c1 PX |
2974 | .fields = (VMStateField[]) { |
2975 | VMSTATE_UINT64(root, IntelIOMMUState), | |
2976 | VMSTATE_UINT64(intr_root, IntelIOMMUState), | |
2977 | VMSTATE_UINT64(iq, IntelIOMMUState), | |
2978 | VMSTATE_UINT32(intr_size, IntelIOMMUState), | |
2979 | VMSTATE_UINT16(iq_head, IntelIOMMUState), | |
2980 | VMSTATE_UINT16(iq_tail, IntelIOMMUState), | |
2981 | VMSTATE_UINT16(iq_size, IntelIOMMUState), | |
2982 | VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState), | |
2983 | VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE), | |
2984 | VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState), | |
2985 | VMSTATE_BOOL(root_extended, IntelIOMMUState), | |
2986 | VMSTATE_BOOL(dmar_enabled, IntelIOMMUState), | |
2987 | VMSTATE_BOOL(qi_enabled, IntelIOMMUState), | |
2988 | VMSTATE_BOOL(intr_enabled, IntelIOMMUState), | |
2989 | VMSTATE_BOOL(intr_eime, IntelIOMMUState), | |
2990 | VMSTATE_END_OF_LIST() | |
2991 | } | |
1da12ec4 LT |
2992 | }; |
2993 | ||
2994 | static const MemoryRegionOps vtd_mem_ops = { | |
2995 | .read = vtd_mem_read, | |
2996 | .write = vtd_mem_write, | |
2997 | .endianness = DEVICE_LITTLE_ENDIAN, | |
2998 | .impl = { | |
2999 | .min_access_size = 4, | |
3000 | .max_access_size = 8, | |
3001 | }, | |
3002 | .valid = { | |
3003 | .min_access_size = 4, | |
3004 | .max_access_size = 8, | |
3005 | }, | |
3006 | }; | |
3007 | ||
3008 | static Property vtd_properties[] = { | |
3009 | DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0), | |
e6b6af05 RK |
3010 | DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim, |
3011 | ON_OFF_AUTO_AUTO), | |
fb506e70 | 3012 | DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false), |
4b49b586 | 3013 | DEFINE_PROP_UINT8("aw-bits", IntelIOMMUState, aw_bits, |
37f51384 | 3014 | VTD_HOST_ADDRESS_WIDTH), |
3b40f0e5 | 3015 | DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), |
4a4f219e | 3016 | DEFINE_PROP_BOOL("x-scalable-mode", IntelIOMMUState, scalable_mode, FALSE), |
ccc23bb0 | 3017 | DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), |
1da12ec4 LT |
3018 | DEFINE_PROP_END_OF_LIST(), |
3019 | }; | |
3020 | ||
651e4cef PX |
3021 | /* Read IRTE entry with specific index */ |
3022 | static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, | |
bc38ee10 | 3023 | VTD_IR_TableEntry *entry, uint16_t sid) |
651e4cef | 3024 | { |
ede9c94a PX |
3025 | static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \ |
3026 | {0xffff, 0xfffb, 0xfff9, 0xfff8}; | |
651e4cef | 3027 | dma_addr_t addr = 0x00; |
ede9c94a PX |
3028 | uint16_t mask, source_id; |
3029 | uint8_t bus, bus_max, bus_min; | |
651e4cef PX |
3030 | |
3031 | addr = iommu->intr_root + index * sizeof(*entry); | |
3032 | if (dma_memory_read(&address_space_memory, addr, entry, | |
3033 | sizeof(*entry))) { | |
1376211f PX |
3034 | error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64, |
3035 | __func__, index, addr); | |
651e4cef PX |
3036 | return -VTD_FR_IR_ROOT_INVAL; |
3037 | } | |
3038 | ||
7feb51b7 PX |
3039 | trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]), |
3040 | le64_to_cpu(entry->data[0])); | |
3041 | ||
bc38ee10 | 3042 | if (!entry->irte.present) { |
4e4abd11 PX |
3043 | error_report_once("%s: detected non-present IRTE " |
3044 | "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", | |
3045 | __func__, index, le64_to_cpu(entry->data[1]), | |
3046 | le64_to_cpu(entry->data[0])); | |
651e4cef PX |
3047 | return -VTD_FR_IR_ENTRY_P; |
3048 | } | |
3049 | ||
bc38ee10 MT |
3050 | if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || |
3051 | entry->irte.__reserved_2) { | |
4e4abd11 PX |
3052 | error_report_once("%s: detected non-zero reserved IRTE " |
3053 | "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", | |
3054 | __func__, index, le64_to_cpu(entry->data[1]), | |
3055 | le64_to_cpu(entry->data[0])); | |
651e4cef PX |
3056 | return -VTD_FR_IR_IRTE_RSVD; |
3057 | } | |
3058 | ||
ede9c94a PX |
3059 | if (sid != X86_IOMMU_SID_INVALID) { |
3060 | /* Validate IRTE SID */ | |
bc38ee10 MT |
3061 | source_id = le32_to_cpu(entry->irte.source_id); |
3062 | switch (entry->irte.sid_vtype) { | |
ede9c94a | 3063 | case VTD_SVT_NONE: |
ede9c94a PX |
3064 | break; |
3065 | ||
3066 | case VTD_SVT_ALL: | |
bc38ee10 | 3067 | mask = vtd_svt_mask[entry->irte.sid_q]; |
ede9c94a | 3068 | if ((source_id & mask) != (sid & mask)) { |
4e4abd11 PX |
3069 | error_report_once("%s: invalid IRTE SID " |
3070 | "(index=%u, sid=%u, source_id=%u)", | |
3071 | __func__, index, sid, source_id); | |
ede9c94a PX |
3072 | return -VTD_FR_IR_SID_ERR; |
3073 | } | |
3074 | break; | |
3075 | ||
3076 | case VTD_SVT_BUS: | |
3077 | bus_max = source_id >> 8; | |
3078 | bus_min = source_id & 0xff; | |
3079 | bus = sid >> 8; | |
3080 | if (bus > bus_max || bus < bus_min) { | |
4e4abd11 PX |
3081 | error_report_once("%s: invalid SVT_BUS " |
3082 | "(index=%u, bus=%u, min=%u, max=%u)", | |
3083 | __func__, index, bus, bus_min, bus_max); | |
ede9c94a PX |
3084 | return -VTD_FR_IR_SID_ERR; |
3085 | } | |
3086 | break; | |
3087 | ||
3088 | default: | |
4e4abd11 PX |
3089 | error_report_once("%s: detected invalid IRTE SVT " |
3090 | "(index=%u, type=%d)", __func__, | |
3091 | index, entry->irte.sid_vtype); | |
ede9c94a PX |
3092 | /* Take this as verification failure. */ |
3093 | return -VTD_FR_IR_SID_ERR; | |
3094 | break; | |
3095 | } | |
3096 | } | |
651e4cef PX |
3097 | |
3098 | return 0; | |
3099 | } | |
3100 | ||
3101 | /* Fetch IRQ information of specific IR index */ | |
ede9c94a | 3102 | static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index, |
35c24501 | 3103 | X86IOMMUIrq *irq, uint16_t sid) |
651e4cef | 3104 | { |
bc38ee10 | 3105 | VTD_IR_TableEntry irte = {}; |
651e4cef PX |
3106 | int ret = 0; |
3107 | ||
ede9c94a | 3108 | ret = vtd_irte_get(iommu, index, &irte, sid); |
651e4cef PX |
3109 | if (ret) { |
3110 | return ret; | |
3111 | } | |
3112 | ||
bc38ee10 MT |
3113 | irq->trigger_mode = irte.irte.trigger_mode; |
3114 | irq->vector = irte.irte.vector; | |
3115 | irq->delivery_mode = irte.irte.delivery_mode; | |
3116 | irq->dest = le32_to_cpu(irte.irte.dest_id); | |
28589311 | 3117 | if (!iommu->intr_eime) { |
651e4cef PX |
3118 | #define VTD_IR_APIC_DEST_MASK (0xff00ULL) |
3119 | #define VTD_IR_APIC_DEST_SHIFT (8) | |
28589311 JK |
3120 | irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >> |
3121 | VTD_IR_APIC_DEST_SHIFT; | |
3122 | } | |
bc38ee10 MT |
3123 | irq->dest_mode = irte.irte.dest_mode; |
3124 | irq->redir_hint = irte.irte.redir_hint; | |
651e4cef | 3125 | |
7feb51b7 PX |
3126 | trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector, |
3127 | irq->delivery_mode, irq->dest, irq->dest_mode); | |
651e4cef PX |
3128 | |
3129 | return 0; | |
3130 | } | |
3131 | ||
651e4cef PX |
3132 | /* Interrupt remapping for MSI/MSI-X entry */ |
3133 | static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, | |
3134 | MSIMessage *origin, | |
ede9c94a PX |
3135 | MSIMessage *translated, |
3136 | uint16_t sid) | |
651e4cef PX |
3137 | { |
3138 | int ret = 0; | |
3139 | VTD_IR_MSIAddress addr; | |
3140 | uint16_t index; | |
35c24501 | 3141 | X86IOMMUIrq irq = {}; |
651e4cef PX |
3142 | |
3143 | assert(origin && translated); | |
3144 | ||
7feb51b7 PX |
3145 | trace_vtd_ir_remap_msi_req(origin->address, origin->data); |
3146 | ||
651e4cef | 3147 | if (!iommu || !iommu->intr_enabled) { |
e7a3b91f PX |
3148 | memcpy(translated, origin, sizeof(*origin)); |
3149 | goto out; | |
651e4cef PX |
3150 | } |
3151 | ||
3152 | if (origin->address & VTD_MSI_ADDR_HI_MASK) { | |
1376211f PX |
3153 | error_report_once("%s: MSI address high 32 bits non-zero detected: " |
3154 | "address=0x%" PRIx64, __func__, origin->address); | |
651e4cef PX |
3155 | return -VTD_FR_IR_REQ_RSVD; |
3156 | } | |
3157 | ||
3158 | addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; | |
1a43713b | 3159 | if (addr.addr.__head != 0xfee) { |
1376211f PX |
3160 | error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32, |
3161 | __func__, addr.data); | |
651e4cef PX |
3162 | return -VTD_FR_IR_REQ_RSVD; |
3163 | } | |
3164 | ||
3165 | /* This is compatible mode. */ | |
bc38ee10 | 3166 | if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) { |
e7a3b91f PX |
3167 | memcpy(translated, origin, sizeof(*origin)); |
3168 | goto out; | |
651e4cef PX |
3169 | } |
3170 | ||
bc38ee10 | 3171 | index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l); |
651e4cef PX |
3172 | |
3173 | #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff) | |
3174 | #define VTD_IR_MSI_DATA_RESERVED (0xffff0000) | |
3175 | ||
bc38ee10 | 3176 | if (addr.addr.sub_valid) { |
651e4cef PX |
3177 | /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ |
3178 | index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; | |
3179 | } | |
3180 | ||
ede9c94a | 3181 | ret = vtd_remap_irq_get(iommu, index, &irq, sid); |
651e4cef PX |
3182 | if (ret) { |
3183 | return ret; | |
3184 | } | |
3185 | ||
bc38ee10 | 3186 | if (addr.addr.sub_valid) { |
7feb51b7 | 3187 | trace_vtd_ir_remap_type("MSI"); |
651e4cef | 3188 | if (origin->data & VTD_IR_MSI_DATA_RESERVED) { |
4e4abd11 PX |
3189 | error_report_once("%s: invalid IR MSI " |
3190 | "(sid=%u, address=0x%" PRIx64 | |
3191 | ", data=0x%" PRIx32 ")", | |
3192 | __func__, sid, origin->address, origin->data); | |
651e4cef PX |
3193 | return -VTD_FR_IR_REQ_RSVD; |
3194 | } | |
3195 | } else { | |
3196 | uint8_t vector = origin->data & 0xff; | |
dea651a9 FW |
3197 | uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; |
3198 | ||
7feb51b7 | 3199 | trace_vtd_ir_remap_type("IOAPIC"); |
651e4cef PX |
3200 | /* IOAPIC entry vector should be aligned with IRTE vector |
3201 | * (see vt-d spec 5.1.5.1). */ | |
3202 | if (vector != irq.vector) { | |
7feb51b7 | 3203 | trace_vtd_warn_ir_vector(sid, index, vector, irq.vector); |
651e4cef | 3204 | } |
dea651a9 FW |
3205 | |
3206 | /* The Trigger Mode field must match the Trigger Mode in the IRTE. | |
3207 | * (see vt-d spec 5.1.5.1). */ | |
3208 | if (trigger_mode != irq.trigger_mode) { | |
7feb51b7 PX |
3209 | trace_vtd_warn_ir_trigger(sid, index, trigger_mode, |
3210 | irq.trigger_mode); | |
dea651a9 | 3211 | } |
651e4cef PX |
3212 | } |
3213 | ||
3214 | /* | |
3215 | * We'd better keep the last two bits, assuming that guest OS | |
3216 | * might modify it. Keep it does not hurt after all. | |
3217 | */ | |
bc38ee10 | 3218 | irq.msi_addr_last_bits = addr.addr.__not_care; |
651e4cef | 3219 | |
35c24501 BS |
3220 | /* Translate X86IOMMUIrq to MSI message */ |
3221 | x86_iommu_irq_to_msi_message(&irq, translated); | |
651e4cef | 3222 | |
e7a3b91f | 3223 | out: |
7feb51b7 PX |
3224 | trace_vtd_ir_remap_msi(origin->address, origin->data, |
3225 | translated->address, translated->data); | |
651e4cef PX |
3226 | return 0; |
3227 | } | |
3228 | ||
8b5ed7df PX |
3229 | static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src, |
3230 | MSIMessage *dst, uint16_t sid) | |
3231 | { | |
ede9c94a PX |
3232 | return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu), |
3233 | src, dst, sid); | |
8b5ed7df PX |
3234 | } |
3235 | ||
651e4cef PX |
3236 | static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr, |
3237 | uint64_t *data, unsigned size, | |
3238 | MemTxAttrs attrs) | |
3239 | { | |
3240 | return MEMTX_OK; | |
3241 | } | |
3242 | ||
3243 | static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr, | |
3244 | uint64_t value, unsigned size, | |
3245 | MemTxAttrs attrs) | |
3246 | { | |
3247 | int ret = 0; | |
09cd058a | 3248 | MSIMessage from = {}, to = {}; |
ede9c94a | 3249 | uint16_t sid = X86_IOMMU_SID_INVALID; |
651e4cef PX |
3250 | |
3251 | from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST; | |
3252 | from.data = (uint32_t) value; | |
3253 | ||
ede9c94a PX |
3254 | if (!attrs.unspecified) { |
3255 | /* We have explicit Source ID */ | |
3256 | sid = attrs.requester_id; | |
3257 | } | |
3258 | ||
3259 | ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid); | |
651e4cef PX |
3260 | if (ret) { |
3261 | /* TODO: report error */ | |
651e4cef PX |
3262 | /* Drop this interrupt */ |
3263 | return MEMTX_ERROR; | |
3264 | } | |
3265 | ||
32946019 | 3266 | apic_get_class()->send_msi(&to); |
651e4cef PX |
3267 | |
3268 | return MEMTX_OK; | |
3269 | } | |
3270 | ||
3271 | static const MemoryRegionOps vtd_mem_ir_ops = { | |
3272 | .read_with_attrs = vtd_mem_ir_read, | |
3273 | .write_with_attrs = vtd_mem_ir_write, | |
3274 | .endianness = DEVICE_LITTLE_ENDIAN, | |
3275 | .impl = { | |
3276 | .min_access_size = 4, | |
3277 | .max_access_size = 4, | |
3278 | }, | |
3279 | .valid = { | |
3280 | .min_access_size = 4, | |
3281 | .max_access_size = 4, | |
3282 | }, | |
3283 | }; | |
7df953bd KO |
3284 | |
3285 | VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) | |
3286 | { | |
3287 | uintptr_t key = (uintptr_t)bus; | |
3288 | VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key); | |
3289 | VTDAddressSpace *vtd_dev_as; | |
e0a3c8cc | 3290 | char name[128]; |
7df953bd KO |
3291 | |
3292 | if (!vtd_bus) { | |
2d3fc581 JW |
3293 | uintptr_t *new_key = g_malloc(sizeof(*new_key)); |
3294 | *new_key = (uintptr_t)bus; | |
7df953bd | 3295 | /* No corresponding free() */ |
04af0e18 | 3296 | vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \ |
bf33cc75 | 3297 | PCI_DEVFN_MAX); |
7df953bd | 3298 | vtd_bus->bus = bus; |
2d3fc581 | 3299 | g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus); |
7df953bd KO |
3300 | } |
3301 | ||
3302 | vtd_dev_as = vtd_bus->dev_as[devfn]; | |
3303 | ||
3304 | if (!vtd_dev_as) { | |
4b519ef1 PX |
3305 | snprintf(name, sizeof(name), "vtd-%02x.%x", PCI_SLOT(devfn), |
3306 | PCI_FUNC(devfn)); | |
7df953bd KO |
3307 | vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); |
3308 | ||
3309 | vtd_dev_as->bus = bus; | |
3310 | vtd_dev_as->devfn = (uint8_t)devfn; | |
3311 | vtd_dev_as->iommu_state = s; | |
3312 | vtd_dev_as->context_cache_entry.context_cache_gen = 0; | |
63b88968 | 3313 | vtd_dev_as->iova_tree = iova_tree_new(); |
558e0024 | 3314 | |
4b519ef1 PX |
3315 | memory_region_init(&vtd_dev_as->root, OBJECT(s), name, UINT64_MAX); |
3316 | address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, "vtd-root"); | |
3317 | ||
558e0024 | 3318 | /* |
4b519ef1 PX |
3319 | * Build the DMAR-disabled container with aliases to the |
3320 | * shared MRs. Note that aliasing to a shared memory region | |
3321 | * could help the memory API to detect same FlatViews so we | |
3322 | * can have devices to share the same FlatView when DMAR is | |
3323 | * disabled (either by not providing "intel_iommu=on" or with | |
3324 | * "iommu=pt"). It will greatly reduce the total number of | |
3325 | * FlatViews of the system hence VM runs faster. | |
3326 | */ | |
3327 | memory_region_init_alias(&vtd_dev_as->nodmar, OBJECT(s), | |
3328 | "vtd-nodmar", &s->mr_nodmar, 0, | |
3329 | memory_region_size(&s->mr_nodmar)); | |
3330 | ||
3331 | /* | |
3332 | * Build the per-device DMAR-enabled container. | |
558e0024 | 3333 | * |
4b519ef1 PX |
3334 | * TODO: currently we have per-device IOMMU memory region only |
3335 | * because we have per-device IOMMU notifiers for devices. If | |
3336 | * one day we can abstract the IOMMU notifiers out of the | |
3337 | * memory regions then we can also share the same memory | |
3338 | * region here just like what we've done above with the nodmar | |
3339 | * region. | |
558e0024 | 3340 | */ |
4b519ef1 | 3341 | strcat(name, "-dmar"); |
1221a474 AK |
3342 | memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu), |
3343 | TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s), | |
4b519ef1 PX |
3344 | name, UINT64_MAX); |
3345 | memory_region_init_alias(&vtd_dev_as->iommu_ir, OBJECT(s), "vtd-ir", | |
3346 | &s->mr_ir, 0, memory_region_size(&s->mr_ir)); | |
3347 | memory_region_add_subregion_overlap(MEMORY_REGION(&vtd_dev_as->iommu), | |
558e0024 | 3348 | VTD_INTERRUPT_ADDR_FIRST, |
4b519ef1 PX |
3349 | &vtd_dev_as->iommu_ir, 1); |
3350 | ||
3351 | /* | |
3352 | * Hook both the containers under the root container, we | |
3353 | * switch between DMAR & noDMAR by enable/disable | |
3354 | * corresponding sub-containers | |
3355 | */ | |
558e0024 | 3356 | memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, |
3df9d748 | 3357 | MEMORY_REGION(&vtd_dev_as->iommu), |
4b519ef1 PX |
3358 | 0); |
3359 | memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, | |
3360 | &vtd_dev_as->nodmar, 0); | |
3361 | ||
558e0024 | 3362 | vtd_switch_address_space(vtd_dev_as); |
7df953bd KO |
3363 | } |
3364 | return vtd_dev_as; | |
3365 | } | |
3366 | ||
dd4d607e PX |
3367 | /* Unmap the whole range in the notifier's scope. */ |
3368 | static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) | |
3369 | { | |
3370 | IOMMUTLBEntry entry; | |
3371 | hwaddr size; | |
3372 | hwaddr start = n->start; | |
3373 | hwaddr end = n->end; | |
37f51384 | 3374 | IntelIOMMUState *s = as->iommu_state; |
63b88968 | 3375 | DMAMap map; |
dd4d607e PX |
3376 | |
3377 | /* | |
3378 | * Note: all the codes in this function has a assumption that IOVA | |
3379 | * bits are no more than VTD_MGAW bits (which is restricted by | |
3380 | * VT-d spec), otherwise we need to consider overflow of 64 bits. | |
3381 | */ | |
3382 | ||
37f51384 | 3383 | if (end > VTD_ADDRESS_SIZE(s->aw_bits)) { |
dd4d607e PX |
3384 | /* |
3385 | * Don't need to unmap regions that is bigger than the whole | |
3386 | * VT-d supported address space size | |
3387 | */ | |
37f51384 | 3388 | end = VTD_ADDRESS_SIZE(s->aw_bits); |
dd4d607e PX |
3389 | } |
3390 | ||
3391 | assert(start <= end); | |
3392 | size = end - start; | |
3393 | ||
3394 | if (ctpop64(size) != 1) { | |
3395 | /* | |
3396 | * This size cannot format a correct mask. Let's enlarge it to | |
3397 | * suite the minimum available mask. | |
3398 | */ | |
3399 | int n = 64 - clz64(size); | |
37f51384 | 3400 | if (n > s->aw_bits) { |
dd4d607e | 3401 | /* should not happen, but in case it happens, limit it */ |
37f51384 | 3402 | n = s->aw_bits; |
dd4d607e PX |
3403 | } |
3404 | size = 1ULL << n; | |
3405 | } | |
3406 | ||
3407 | entry.target_as = &address_space_memory; | |
3408 | /* Adjust iova for the size */ | |
3409 | entry.iova = n->start & ~(size - 1); | |
3410 | /* This field is meaningless for unmap */ | |
3411 | entry.translated_addr = 0; | |
3412 | entry.perm = IOMMU_NONE; | |
3413 | entry.addr_mask = size - 1; | |
3414 | ||
3415 | trace_vtd_as_unmap_whole(pci_bus_num(as->bus), | |
3416 | VTD_PCI_SLOT(as->devfn), | |
3417 | VTD_PCI_FUNC(as->devfn), | |
3418 | entry.iova, size); | |
3419 | ||
63b88968 PX |
3420 | map.iova = entry.iova; |
3421 | map.size = entry.addr_mask; | |
3422 | iova_tree_remove(as->iova_tree, &map); | |
3423 | ||
dd4d607e PX |
3424 | memory_region_notify_one(n, &entry); |
3425 | } | |
3426 | ||
3427 | static void vtd_address_space_unmap_all(IntelIOMMUState *s) | |
3428 | { | |
dd4d607e PX |
3429 | VTDAddressSpace *vtd_as; |
3430 | IOMMUNotifier *n; | |
3431 | ||
b4a4ba0d | 3432 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
dd4d607e PX |
3433 | IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { |
3434 | vtd_address_space_unmap(vtd_as, n); | |
3435 | } | |
3436 | } | |
3437 | } | |
3438 | ||
2cc9ddcc PX |
3439 | static void vtd_address_space_refresh_all(IntelIOMMUState *s) |
3440 | { | |
3441 | vtd_address_space_unmap_all(s); | |
3442 | vtd_switch_address_space_all(s); | |
3443 | } | |
3444 | ||
f06a696d PX |
3445 | static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private) |
3446 | { | |
3447 | memory_region_notify_one((IOMMUNotifier *)private, entry); | |
3448 | return 0; | |
3449 | } | |
3450 | ||
3df9d748 | 3451 | static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) |
f06a696d | 3452 | { |
3df9d748 | 3453 | VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu); |
f06a696d PX |
3454 | IntelIOMMUState *s = vtd_as->iommu_state; |
3455 | uint8_t bus_n = pci_bus_num(vtd_as->bus); | |
3456 | VTDContextEntry ce; | |
3457 | ||
dd4d607e PX |
3458 | /* |
3459 | * The replay can be triggered by either a invalidation or a newly | |
3460 | * created entry. No matter what, we release existing mappings | |
3461 | * (it means flushing caches for UNMAP-only registers). | |
3462 | */ | |
3463 | vtd_address_space_unmap(vtd_as, n); | |
3464 | ||
f06a696d | 3465 | if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { |
fb43cf73 LY |
3466 | trace_vtd_replay_ce_valid(s->root_scalable ? "scalable mode" : |
3467 | "legacy mode", | |
3468 | bus_n, PCI_SLOT(vtd_as->devfn), | |
f06a696d | 3469 | PCI_FUNC(vtd_as->devfn), |
fb43cf73 | 3470 | vtd_get_domain_id(s, &ce), |
f06a696d | 3471 | ce.hi, ce.lo); |
4f8a62a9 PX |
3472 | if (vtd_as_has_map_notifier(vtd_as)) { |
3473 | /* This is required only for MAP typed notifiers */ | |
fe215b0c PX |
3474 | vtd_page_walk_info info = { |
3475 | .hook_fn = vtd_replay_hook, | |
3476 | .private = (void *)n, | |
3477 | .notify_unmap = false, | |
3478 | .aw = s->aw_bits, | |
2f764fa8 | 3479 | .as = vtd_as, |
fb43cf73 | 3480 | .domain_id = vtd_get_domain_id(s, &ce), |
fe215b0c PX |
3481 | }; |
3482 | ||
fb43cf73 | 3483 | vtd_page_walk(s, &ce, 0, ~0ULL, &info); |
4f8a62a9 | 3484 | } |
f06a696d PX |
3485 | } else { |
3486 | trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), | |
3487 | PCI_FUNC(vtd_as->devfn)); | |
3488 | } | |
3489 | ||
3490 | return; | |
3491 | } | |
3492 | ||
1da12ec4 LT |
3493 | /* Do the initialization. It will also be called when reset, so pay |
3494 | * attention when adding new initialization stuff. | |
3495 | */ | |
3496 | static void vtd_init(IntelIOMMUState *s) | |
3497 | { | |
d54bd7f8 PX |
3498 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
3499 | ||
1da12ec4 LT |
3500 | memset(s->csr, 0, DMAR_REG_SIZE); |
3501 | memset(s->wmask, 0, DMAR_REG_SIZE); | |
3502 | memset(s->w1cmask, 0, DMAR_REG_SIZE); | |
3503 | memset(s->womask, 0, DMAR_REG_SIZE); | |
3504 | ||
1da12ec4 LT |
3505 | s->root = 0; |
3506 | s->root_extended = false; | |
fb43cf73 | 3507 | s->root_scalable = false; |
1da12ec4 | 3508 | s->dmar_enabled = false; |
d7bb469a | 3509 | s->intr_enabled = false; |
1da12ec4 LT |
3510 | s->iq_head = 0; |
3511 | s->iq_tail = 0; | |
3512 | s->iq = 0; | |
3513 | s->iq_size = 0; | |
3514 | s->qi_enabled = false; | |
3515 | s->iq_last_desc_type = VTD_INV_DESC_NONE; | |
c0c1d351 | 3516 | s->iq_dw = false; |
1da12ec4 | 3517 | s->next_frcd_reg = 0; |
92e5d85e PS |
3518 | s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | |
3519 | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | | |
37f51384 | 3520 | VTD_CAP_SAGAW_39bit | VTD_CAP_MGAW(s->aw_bits); |
ccc23bb0 PX |
3521 | if (s->dma_drain) { |
3522 | s->cap |= VTD_CAP_DRAIN; | |
3523 | } | |
37f51384 PS |
3524 | if (s->aw_bits == VTD_HOST_AW_48BIT) { |
3525 | s->cap |= VTD_CAP_SAGAW_48bit; | |
3526 | } | |
ed7b8fbc | 3527 | s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; |
1da12ec4 | 3528 | |
92e5d85e PS |
3529 | /* |
3530 | * Rsvd field masks for spte | |
3531 | */ | |
3532 | vtd_paging_entry_rsvd_field[0] = ~0ULL; | |
37f51384 PS |
3533 | vtd_paging_entry_rsvd_field[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits); |
3534 | vtd_paging_entry_rsvd_field[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); | |
3535 | vtd_paging_entry_rsvd_field[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); | |
3536 | vtd_paging_entry_rsvd_field[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); | |
3537 | vtd_paging_entry_rsvd_field[5] = VTD_SPTE_LPAGE_L1_RSVD_MASK(s->aw_bits); | |
3538 | vtd_paging_entry_rsvd_field[6] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits); | |
3539 | vtd_paging_entry_rsvd_field[7] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits); | |
3540 | vtd_paging_entry_rsvd_field[8] = VTD_SPTE_LPAGE_L4_RSVD_MASK(s->aw_bits); | |
92e5d85e | 3541 | |
a924b3d8 | 3542 | if (x86_iommu_ir_supported(x86_iommu)) { |
e6b6af05 RK |
3543 | s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; |
3544 | if (s->intr_eim == ON_OFF_AUTO_ON) { | |
3545 | s->ecap |= VTD_ECAP_EIM; | |
3546 | } | |
3547 | assert(s->intr_eim != ON_OFF_AUTO_AUTO); | |
d54bd7f8 PX |
3548 | } |
3549 | ||
554f5e16 JW |
3550 | if (x86_iommu->dt_supported) { |
3551 | s->ecap |= VTD_ECAP_DT; | |
3552 | } | |
3553 | ||
dbaabb25 PX |
3554 | if (x86_iommu->pt_supported) { |
3555 | s->ecap |= VTD_ECAP_PT; | |
3556 | } | |
3557 | ||
3b40f0e5 ABD |
3558 | if (s->caching_mode) { |
3559 | s->cap |= VTD_CAP_CM; | |
3560 | } | |
3561 | ||
4a4f219e YS |
3562 | /* TODO: read cap/ecap from host to decide which cap to be exposed. */ |
3563 | if (s->scalable_mode) { | |
3564 | s->ecap |= VTD_ECAP_SMTS | VTD_ECAP_SRS | VTD_ECAP_SLTS; | |
3565 | } | |
3566 | ||
06aba4ca | 3567 | vtd_reset_caches(s); |
d92fa2dc | 3568 | |
1da12ec4 LT |
3569 | /* Define registers with default values and bit semantics */ |
3570 | vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0); | |
3571 | vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); | |
3572 | vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); | |
3573 | vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); | |
3574 | vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); | |
3575 | vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); | |
fb43cf73 | 3576 | vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffffc00ULL, 0); |
1da12ec4 LT |
3577 | vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); |
3578 | vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); | |
3579 | ||
3580 | /* Advanced Fault Logging not supported */ | |
3581 | vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL); | |
3582 | vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0); | |
3583 | vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0); | |
3584 | vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0); | |
3585 | ||
3586 | /* Treated as RsvdZ when EIM in ECAP_REG is not supported | |
3587 | * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0); | |
3588 | */ | |
3589 | vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0); | |
3590 | ||
3591 | /* Treated as RO for implementations that PLMR and PHMR fields reported | |
3592 | * as Clear in the CAP_REG. | |
3593 | * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0); | |
3594 | */ | |
3595 | vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0); | |
3596 | ||
ed7b8fbc LT |
3597 | vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); |
3598 | vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); | |
c0c1d351 | 3599 | vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff807ULL, 0); |
ed7b8fbc LT |
3600 | vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); |
3601 | vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); | |
3602 | vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); | |
3603 | vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0); | |
3604 | /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */ | |
3605 | vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0); | |
3606 | ||
1da12ec4 LT |
3607 | /* IOTLB registers */ |
3608 | vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0); | |
3609 | vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0); | |
3610 | vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL); | |
3611 | ||
3612 | /* Fault Recording Registers, 128-bit */ | |
3613 | vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0); | |
3614 | vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL); | |
a5861439 PX |
3615 | |
3616 | /* | |
28589311 | 3617 | * Interrupt remapping registers. |
a5861439 | 3618 | */ |
28589311 | 3619 | vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0); |
1da12ec4 LT |
3620 | } |
3621 | ||
3622 | /* Should not reset address_spaces when reset because devices will still use | |
3623 | * the address space they got at first (won't ask the bus again). | |
3624 | */ | |
3625 | static void vtd_reset(DeviceState *dev) | |
3626 | { | |
3627 | IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); | |
3628 | ||
1da12ec4 | 3629 | vtd_init(s); |
2cc9ddcc | 3630 | vtd_address_space_refresh_all(s); |
1da12ec4 LT |
3631 | } |
3632 | ||
621d983a MA |
3633 | static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) |
3634 | { | |
3635 | IntelIOMMUState *s = opaque; | |
3636 | VTDAddressSpace *vtd_as; | |
3637 | ||
bf33cc75 | 3638 | assert(0 <= devfn && devfn < PCI_DEVFN_MAX); |
621d983a MA |
3639 | |
3640 | vtd_as = vtd_find_add_as(s, bus, devfn); | |
3641 | return &vtd_as->as; | |
3642 | } | |
3643 | ||
e6b6af05 | 3644 | static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) |
6333e93c | 3645 | { |
e6b6af05 RK |
3646 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
3647 | ||
a924b3d8 | 3648 | if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu_ir_supported(x86_iommu)) { |
e6b6af05 RK |
3649 | error_setg(errp, "eim=on cannot be selected without intremap=on"); |
3650 | return false; | |
3651 | } | |
3652 | ||
3653 | if (s->intr_eim == ON_OFF_AUTO_AUTO) { | |
fb506e70 | 3654 | s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim) |
a924b3d8 | 3655 | && x86_iommu_ir_supported(x86_iommu) ? |
e6b6af05 RK |
3656 | ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; |
3657 | } | |
fb506e70 RK |
3658 | if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { |
3659 | if (!kvm_irqchip_in_kernel()) { | |
3660 | error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split"); | |
3661 | return false; | |
3662 | } | |
3663 | if (!kvm_enable_x2apic()) { | |
3664 | error_setg(errp, "eim=on requires support on the KVM side" | |
3665 | "(X2APIC_API, first shipped in v4.7)"); | |
3666 | return false; | |
3667 | } | |
3668 | } | |
e6b6af05 | 3669 | |
37f51384 PS |
3670 | /* Currently only address widths supported are 39 and 48 bits */ |
3671 | if ((s->aw_bits != VTD_HOST_AW_39BIT) && | |
3672 | (s->aw_bits != VTD_HOST_AW_48BIT)) { | |
3673 | error_setg(errp, "Supported values for x-aw-bits are: %d, %d", | |
3674 | VTD_HOST_AW_39BIT, VTD_HOST_AW_48BIT); | |
3675 | return false; | |
3676 | } | |
3677 | ||
4a4f219e YS |
3678 | if (s->scalable_mode && !s->dma_drain) { |
3679 | error_setg(errp, "Need to set dma_drain for scalable mode"); | |
3680 | return false; | |
3681 | } | |
3682 | ||
6333e93c RK |
3683 | return true; |
3684 | } | |
3685 | ||
1da12ec4 LT |
3686 | static void vtd_realize(DeviceState *dev, Error **errp) |
3687 | { | |
ef0e8fc7 | 3688 | MachineState *ms = MACHINE(qdev_get_machine()); |
29396ed9 MG |
3689 | PCMachineState *pcms = PC_MACHINE(ms); |
3690 | PCIBus *bus = pcms->bus; | |
1da12ec4 | 3691 | IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); |
4684a204 | 3692 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); |
1da12ec4 | 3693 | |
fb9f5926 | 3694 | x86_iommu->type = TYPE_INTEL; |
6333e93c | 3695 | |
e6b6af05 | 3696 | if (!vtd_decide_config(s, errp)) { |
6333e93c RK |
3697 | return; |
3698 | } | |
3699 | ||
b4a4ba0d | 3700 | QLIST_INIT(&s->vtd_as_with_notifiers); |
1d9efa73 | 3701 | qemu_mutex_init(&s->iommu_lock); |
7df953bd | 3702 | memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); |
1da12ec4 LT |
3703 | memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, |
3704 | "intel_iommu", DMAR_REG_SIZE); | |
4b519ef1 PX |
3705 | |
3706 | /* Create the shared memory regions by all devices */ | |
3707 | memory_region_init(&s->mr_nodmar, OBJECT(s), "vtd-nodmar", | |
3708 | UINT64_MAX); | |
3709 | memory_region_init_io(&s->mr_ir, OBJECT(s), &vtd_mem_ir_ops, | |
3710 | s, "vtd-ir", VTD_INTERRUPT_ADDR_SIZE); | |
3711 | memory_region_init_alias(&s->mr_sys_alias, OBJECT(s), | |
3712 | "vtd-sys-alias", get_system_memory(), 0, | |
3713 | memory_region_size(get_system_memory())); | |
3714 | memory_region_add_subregion_overlap(&s->mr_nodmar, 0, | |
3715 | &s->mr_sys_alias, 0); | |
3716 | memory_region_add_subregion_overlap(&s->mr_nodmar, | |
3717 | VTD_INTERRUPT_ADDR_FIRST, | |
3718 | &s->mr_ir, 1); | |
3719 | ||
1da12ec4 | 3720 | sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem); |
b5a280c0 LT |
3721 | /* No corresponding destroy */ |
3722 | s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, | |
3723 | g_free, g_free); | |
7df953bd KO |
3724 | s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, |
3725 | g_free, g_free); | |
1da12ec4 | 3726 | vtd_init(s); |
621d983a MA |
3727 | sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); |
3728 | pci_setup_iommu(bus, vtd_host_dma_iommu, dev); | |
cb135f59 PX |
3729 | /* Pseudo address space under root PCI bus. */ |
3730 | pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); | |
1da12ec4 LT |
3731 | } |
3732 | ||
3733 | static void vtd_class_init(ObjectClass *klass, void *data) | |
3734 | { | |
3735 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1c7955c4 | 3736 | X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass); |
1da12ec4 LT |
3737 | |
3738 | dc->reset = vtd_reset; | |
1da12ec4 LT |
3739 | dc->vmsd = &vtd_vmstate; |
3740 | dc->props = vtd_properties; | |
621d983a | 3741 | dc->hotpluggable = false; |
1c7955c4 | 3742 | x86_class->realize = vtd_realize; |
8b5ed7df | 3743 | x86_class->int_remap = vtd_int_remap; |
8ab5700c | 3744 | /* Supported by the pc-q35-* machine types */ |
e4f4fb1e | 3745 | dc->user_creatable = true; |
1da12ec4 LT |
3746 | } |
3747 | ||
3748 | static const TypeInfo vtd_info = { | |
3749 | .name = TYPE_INTEL_IOMMU_DEVICE, | |
1c7955c4 | 3750 | .parent = TYPE_X86_IOMMU_DEVICE, |
1da12ec4 LT |
3751 | .instance_size = sizeof(IntelIOMMUState), |
3752 | .class_init = vtd_class_init, | |
3753 | }; | |
3754 | ||
1221a474 AK |
3755 | static void vtd_iommu_memory_region_class_init(ObjectClass *klass, |
3756 | void *data) | |
3757 | { | |
3758 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); | |
3759 | ||
3760 | imrc->translate = vtd_iommu_translate; | |
3761 | imrc->notify_flag_changed = vtd_iommu_notify_flag_changed; | |
3762 | imrc->replay = vtd_iommu_replay; | |
3763 | } | |
3764 | ||
3765 | static const TypeInfo vtd_iommu_memory_region_info = { | |
3766 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
3767 | .name = TYPE_INTEL_IOMMU_MEMORY_REGION, | |
3768 | .class_init = vtd_iommu_memory_region_class_init, | |
3769 | }; | |
3770 | ||
1da12ec4 LT |
3771 | static void vtd_register_types(void) |
3772 | { | |
1da12ec4 | 3773 | type_register_static(&vtd_info); |
1221a474 | 3774 | type_register_static(&vtd_iommu_memory_region_info); |
1da12ec4 LT |
3775 | } |
3776 | ||
3777 | type_init(vtd_register_types) |