]>
Commit | Line | Data |
---|---|---|
1da12ec4 LT |
1 | /* |
2 | * QEMU emulation of an Intel IOMMU (VT-d) | |
3 | * (DMA Remapping device) | |
4 | * | |
5 | * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com> | |
6 | * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License as published by | |
10 | * the Free Software Foundation; either version 2 of the License, or | |
11 | * (at your option) any later version. | |
12 | ||
13 | * This program is distributed in the hope that it will be useful, | |
14 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
15 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
16 | * GNU General Public License for more details. | |
17 | ||
18 | * You should have received a copy of the GNU General Public License along | |
19 | * with this program; if not, see <http://www.gnu.org/licenses/>. | |
20 | */ | |
21 | ||
b6a0aa05 | 22 | #include "qemu/osdep.h" |
4684a204 | 23 | #include "qemu/error-report.h" |
6333e93c | 24 | #include "qapi/error.h" |
1da12ec4 LT |
25 | #include "hw/sysbus.h" |
26 | #include "exec/address-spaces.h" | |
27 | #include "intel_iommu_internal.h" | |
7df953bd | 28 | #include "hw/pci/pci.h" |
3cb3b154 | 29 | #include "hw/pci/pci_bus.h" |
621d983a | 30 | #include "hw/i386/pc.h" |
dea651a9 | 31 | #include "hw/i386/apic-msidef.h" |
04af0e18 PX |
32 | #include "hw/boards.h" |
33 | #include "hw/i386/x86-iommu.h" | |
cb135f59 | 34 | #include "hw/pci-host/q35.h" |
4684a204 | 35 | #include "sysemu/kvm.h" |
32946019 | 36 | #include "hw/i386/apic_internal.h" |
fb506e70 | 37 | #include "kvm_i386.h" |
bc535e59 | 38 | #include "trace.h" |
1da12ec4 | 39 | |
2cc9ddcc | 40 | static void vtd_address_space_refresh_all(IntelIOMMUState *s); |
c28b535d | 41 | static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n); |
2cc9ddcc | 42 | |
1da12ec4 LT |
43 | static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val, |
44 | uint64_t wmask, uint64_t w1cmask) | |
45 | { | |
46 | stq_le_p(&s->csr[addr], val); | |
47 | stq_le_p(&s->wmask[addr], wmask); | |
48 | stq_le_p(&s->w1cmask[addr], w1cmask); | |
49 | } | |
50 | ||
51 | static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask) | |
52 | { | |
53 | stq_le_p(&s->womask[addr], mask); | |
54 | } | |
55 | ||
56 | static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val, | |
57 | uint32_t wmask, uint32_t w1cmask) | |
58 | { | |
59 | stl_le_p(&s->csr[addr], val); | |
60 | stl_le_p(&s->wmask[addr], wmask); | |
61 | stl_le_p(&s->w1cmask[addr], w1cmask); | |
62 | } | |
63 | ||
64 | static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask) | |
65 | { | |
66 | stl_le_p(&s->womask[addr], mask); | |
67 | } | |
68 | ||
69 | /* "External" get/set operations */ | |
70 | static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val) | |
71 | { | |
72 | uint64_t oldval = ldq_le_p(&s->csr[addr]); | |
73 | uint64_t wmask = ldq_le_p(&s->wmask[addr]); | |
74 | uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]); | |
75 | stq_le_p(&s->csr[addr], | |
76 | ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); | |
77 | } | |
78 | ||
79 | static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val) | |
80 | { | |
81 | uint32_t oldval = ldl_le_p(&s->csr[addr]); | |
82 | uint32_t wmask = ldl_le_p(&s->wmask[addr]); | |
83 | uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]); | |
84 | stl_le_p(&s->csr[addr], | |
85 | ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val)); | |
86 | } | |
87 | ||
88 | static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr) | |
89 | { | |
90 | uint64_t val = ldq_le_p(&s->csr[addr]); | |
91 | uint64_t womask = ldq_le_p(&s->womask[addr]); | |
92 | return val & ~womask; | |
93 | } | |
94 | ||
95 | static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr) | |
96 | { | |
97 | uint32_t val = ldl_le_p(&s->csr[addr]); | |
98 | uint32_t womask = ldl_le_p(&s->womask[addr]); | |
99 | return val & ~womask; | |
100 | } | |
101 | ||
102 | /* "Internal" get/set operations */ | |
103 | static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr) | |
104 | { | |
105 | return ldq_le_p(&s->csr[addr]); | |
106 | } | |
107 | ||
108 | static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr) | |
109 | { | |
110 | return ldl_le_p(&s->csr[addr]); | |
111 | } | |
112 | ||
113 | static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val) | |
114 | { | |
115 | stq_le_p(&s->csr[addr], val); | |
116 | } | |
117 | ||
118 | static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr, | |
119 | uint32_t clear, uint32_t mask) | |
120 | { | |
121 | uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask; | |
122 | stl_le_p(&s->csr[addr], new_val); | |
123 | return new_val; | |
124 | } | |
125 | ||
126 | static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr, | |
127 | uint64_t clear, uint64_t mask) | |
128 | { | |
129 | uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask; | |
130 | stq_le_p(&s->csr[addr], new_val); | |
131 | return new_val; | |
132 | } | |
133 | ||
1d9efa73 PX |
134 | static inline void vtd_iommu_lock(IntelIOMMUState *s) |
135 | { | |
136 | qemu_mutex_lock(&s->iommu_lock); | |
137 | } | |
138 | ||
139 | static inline void vtd_iommu_unlock(IntelIOMMUState *s) | |
140 | { | |
141 | qemu_mutex_unlock(&s->iommu_lock); | |
142 | } | |
143 | ||
4f8a62a9 PX |
144 | /* Whether the address space needs to notify new mappings */ |
145 | static inline gboolean vtd_as_has_map_notifier(VTDAddressSpace *as) | |
146 | { | |
147 | return as->notifier_flags & IOMMU_NOTIFIER_MAP; | |
148 | } | |
149 | ||
b5a280c0 LT |
150 | /* GHashTable functions */ |
151 | static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2) | |
152 | { | |
153 | return *((const uint64_t *)v1) == *((const uint64_t *)v2); | |
154 | } | |
155 | ||
156 | static guint vtd_uint64_hash(gconstpointer v) | |
157 | { | |
158 | return (guint)*(const uint64_t *)v; | |
159 | } | |
160 | ||
161 | static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value, | |
162 | gpointer user_data) | |
163 | { | |
164 | VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; | |
165 | uint16_t domain_id = *(uint16_t *)user_data; | |
166 | return entry->domain_id == domain_id; | |
167 | } | |
168 | ||
d66b969b JW |
169 | /* The shift of an addr for a certain level of paging structure */ |
170 | static inline uint32_t vtd_slpt_level_shift(uint32_t level) | |
171 | { | |
7e58326a | 172 | assert(level != 0); |
d66b969b JW |
173 | return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS; |
174 | } | |
175 | ||
176 | static inline uint64_t vtd_slpt_level_page_mask(uint32_t level) | |
177 | { | |
178 | return ~((1ULL << vtd_slpt_level_shift(level)) - 1); | |
179 | } | |
180 | ||
b5a280c0 LT |
181 | static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value, |
182 | gpointer user_data) | |
183 | { | |
184 | VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value; | |
185 | VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data; | |
d66b969b JW |
186 | uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask; |
187 | uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K; | |
b5a280c0 | 188 | return (entry->domain_id == info->domain_id) && |
d66b969b JW |
189 | (((entry->gfn & info->mask) == gfn) || |
190 | (entry->gfn == gfn_tlb)); | |
b5a280c0 LT |
191 | } |
192 | ||
d92fa2dc | 193 | /* Reset all the gen of VTDAddressSpace to zero and set the gen of |
1d9efa73 | 194 | * IntelIOMMUState to 1. Must be called with IOMMU lock held. |
d92fa2dc | 195 | */ |
1d9efa73 | 196 | static void vtd_reset_context_cache_locked(IntelIOMMUState *s) |
d92fa2dc | 197 | { |
d92fa2dc | 198 | VTDAddressSpace *vtd_as; |
7df953bd KO |
199 | VTDBus *vtd_bus; |
200 | GHashTableIter bus_it; | |
d92fa2dc LT |
201 | uint32_t devfn_it; |
202 | ||
7feb51b7 PX |
203 | trace_vtd_context_cache_reset(); |
204 | ||
7df953bd KO |
205 | g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr); |
206 | ||
7df953bd | 207 | while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) { |
bf33cc75 | 208 | for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { |
7df953bd | 209 | vtd_as = vtd_bus->dev_as[devfn_it]; |
d92fa2dc LT |
210 | if (!vtd_as) { |
211 | continue; | |
212 | } | |
213 | vtd_as->context_cache_entry.context_cache_gen = 0; | |
214 | } | |
215 | } | |
216 | s->context_cache_gen = 1; | |
217 | } | |
218 | ||
1d9efa73 PX |
219 | /* Must be called with IOMMU lock held. */ |
220 | static void vtd_reset_iotlb_locked(IntelIOMMUState *s) | |
b5a280c0 LT |
221 | { |
222 | assert(s->iotlb); | |
223 | g_hash_table_remove_all(s->iotlb); | |
224 | } | |
225 | ||
1d9efa73 PX |
226 | static void vtd_reset_iotlb(IntelIOMMUState *s) |
227 | { | |
228 | vtd_iommu_lock(s); | |
229 | vtd_reset_iotlb_locked(s); | |
230 | vtd_iommu_unlock(s); | |
231 | } | |
232 | ||
06aba4ca PX |
233 | static void vtd_reset_caches(IntelIOMMUState *s) |
234 | { | |
235 | vtd_iommu_lock(s); | |
236 | vtd_reset_iotlb_locked(s); | |
237 | vtd_reset_context_cache_locked(s); | |
238 | vtd_iommu_unlock(s); | |
239 | } | |
240 | ||
bacabb0a | 241 | static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id, |
d66b969b JW |
242 | uint32_t level) |
243 | { | |
244 | return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) | | |
245 | ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT); | |
246 | } | |
247 | ||
248 | static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level) | |
249 | { | |
250 | return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K; | |
251 | } | |
252 | ||
1d9efa73 | 253 | /* Must be called with IOMMU lock held */ |
b5a280c0 LT |
254 | static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id, |
255 | hwaddr addr) | |
256 | { | |
d66b969b | 257 | VTDIOTLBEntry *entry; |
b5a280c0 | 258 | uint64_t key; |
d66b969b JW |
259 | int level; |
260 | ||
261 | for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) { | |
262 | key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level), | |
263 | source_id, level); | |
264 | entry = g_hash_table_lookup(s->iotlb, &key); | |
265 | if (entry) { | |
266 | goto out; | |
267 | } | |
268 | } | |
b5a280c0 | 269 | |
d66b969b JW |
270 | out: |
271 | return entry; | |
b5a280c0 LT |
272 | } |
273 | ||
1d9efa73 | 274 | /* Must be with IOMMU lock held */ |
b5a280c0 LT |
275 | static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id, |
276 | uint16_t domain_id, hwaddr addr, uint64_t slpte, | |
07f7b733 | 277 | uint8_t access_flags, uint32_t level) |
b5a280c0 LT |
278 | { |
279 | VTDIOTLBEntry *entry = g_malloc(sizeof(*entry)); | |
280 | uint64_t *key = g_malloc(sizeof(*key)); | |
d66b969b | 281 | uint64_t gfn = vtd_get_iotlb_gfn(addr, level); |
b5a280c0 | 282 | |
6c441e1d | 283 | trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id); |
b5a280c0 | 284 | if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) { |
6c441e1d | 285 | trace_vtd_iotlb_reset("iotlb exceeds size limit"); |
1d9efa73 | 286 | vtd_reset_iotlb_locked(s); |
b5a280c0 LT |
287 | } |
288 | ||
289 | entry->gfn = gfn; | |
290 | entry->domain_id = domain_id; | |
291 | entry->slpte = slpte; | |
07f7b733 | 292 | entry->access_flags = access_flags; |
d66b969b JW |
293 | entry->mask = vtd_slpt_level_page_mask(level); |
294 | *key = vtd_get_iotlb_key(gfn, source_id, level); | |
b5a280c0 LT |
295 | g_hash_table_replace(s->iotlb, key, entry); |
296 | } | |
297 | ||
1da12ec4 LT |
298 | /* Given the reg addr of both the message data and address, generate an |
299 | * interrupt via MSI. | |
300 | */ | |
301 | static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg, | |
302 | hwaddr mesg_data_reg) | |
303 | { | |
32946019 | 304 | MSIMessage msi; |
1da12ec4 LT |
305 | |
306 | assert(mesg_data_reg < DMAR_REG_SIZE); | |
307 | assert(mesg_addr_reg < DMAR_REG_SIZE); | |
308 | ||
32946019 RK |
309 | msi.address = vtd_get_long_raw(s, mesg_addr_reg); |
310 | msi.data = vtd_get_long_raw(s, mesg_data_reg); | |
1da12ec4 | 311 | |
7feb51b7 PX |
312 | trace_vtd_irq_generate(msi.address, msi.data); |
313 | ||
32946019 | 314 | apic_get_class()->send_msi(&msi); |
1da12ec4 LT |
315 | } |
316 | ||
317 | /* Generate a fault event to software via MSI if conditions are met. | |
318 | * Notice that the value of FSTS_REG being passed to it should be the one | |
319 | * before any update. | |
320 | */ | |
321 | static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts) | |
322 | { | |
323 | if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO || | |
324 | pre_fsts & VTD_FSTS_IQE) { | |
1376211f PX |
325 | error_report_once("There are previous interrupt conditions " |
326 | "to be serviced by software, fault event " | |
327 | "is not generated"); | |
1da12ec4 LT |
328 | return; |
329 | } | |
330 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP); | |
331 | if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) { | |
1376211f | 332 | error_report_once("Interrupt Mask set, irq is not generated"); |
1da12ec4 LT |
333 | } else { |
334 | vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); | |
335 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); | |
336 | } | |
337 | } | |
338 | ||
339 | /* Check if the Fault (F) field of the Fault Recording Register referenced by | |
340 | * @index is Set. | |
341 | */ | |
342 | static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index) | |
343 | { | |
344 | /* Each reg is 128-bit */ | |
345 | hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); | |
346 | addr += 8; /* Access the high 64-bit half */ | |
347 | ||
348 | assert(index < DMAR_FRCD_REG_NR); | |
349 | ||
350 | return vtd_get_quad_raw(s, addr) & VTD_FRCD_F; | |
351 | } | |
352 | ||
353 | /* Update the PPF field of Fault Status Register. | |
354 | * Should be called whenever change the F field of any fault recording | |
355 | * registers. | |
356 | */ | |
357 | static void vtd_update_fsts_ppf(IntelIOMMUState *s) | |
358 | { | |
359 | uint32_t i; | |
360 | uint32_t ppf_mask = 0; | |
361 | ||
362 | for (i = 0; i < DMAR_FRCD_REG_NR; i++) { | |
363 | if (vtd_is_frcd_set(s, i)) { | |
364 | ppf_mask = VTD_FSTS_PPF; | |
365 | break; | |
366 | } | |
367 | } | |
368 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask); | |
7feb51b7 | 369 | trace_vtd_fsts_ppf(!!ppf_mask); |
1da12ec4 LT |
370 | } |
371 | ||
372 | static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index) | |
373 | { | |
374 | /* Each reg is 128-bit */ | |
375 | hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); | |
376 | addr += 8; /* Access the high 64-bit half */ | |
377 | ||
378 | assert(index < DMAR_FRCD_REG_NR); | |
379 | ||
380 | vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F); | |
381 | vtd_update_fsts_ppf(s); | |
382 | } | |
383 | ||
384 | /* Must not update F field now, should be done later */ | |
385 | static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index, | |
386 | uint16_t source_id, hwaddr addr, | |
387 | VTDFaultReason fault, bool is_write) | |
388 | { | |
389 | uint64_t hi = 0, lo; | |
390 | hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4); | |
391 | ||
392 | assert(index < DMAR_FRCD_REG_NR); | |
393 | ||
394 | lo = VTD_FRCD_FI(addr); | |
395 | hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault); | |
396 | if (!is_write) { | |
397 | hi |= VTD_FRCD_T; | |
398 | } | |
399 | vtd_set_quad_raw(s, frcd_reg_addr, lo); | |
400 | vtd_set_quad_raw(s, frcd_reg_addr + 8, hi); | |
7feb51b7 PX |
401 | |
402 | trace_vtd_frr_new(index, hi, lo); | |
1da12ec4 LT |
403 | } |
404 | ||
405 | /* Try to collapse multiple pending faults from the same requester */ | |
406 | static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id) | |
407 | { | |
408 | uint32_t i; | |
409 | uint64_t frcd_reg; | |
410 | hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */ | |
411 | ||
412 | for (i = 0; i < DMAR_FRCD_REG_NR; i++) { | |
413 | frcd_reg = vtd_get_quad_raw(s, addr); | |
1da12ec4 LT |
414 | if ((frcd_reg & VTD_FRCD_F) && |
415 | ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) { | |
416 | return true; | |
417 | } | |
418 | addr += 16; /* 128-bit for each */ | |
419 | } | |
420 | return false; | |
421 | } | |
422 | ||
423 | /* Log and report an DMAR (address translation) fault to software */ | |
424 | static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id, | |
425 | hwaddr addr, VTDFaultReason fault, | |
426 | bool is_write) | |
427 | { | |
428 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); | |
429 | ||
430 | assert(fault < VTD_FR_MAX); | |
431 | ||
432 | if (fault == VTD_FR_RESERVED_ERR) { | |
433 | /* This is not a normal fault reason case. Drop it. */ | |
434 | return; | |
435 | } | |
7feb51b7 PX |
436 | |
437 | trace_vtd_dmar_fault(source_id, fault, addr, is_write); | |
438 | ||
1da12ec4 | 439 | if (fsts_reg & VTD_FSTS_PFO) { |
1376211f PX |
440 | error_report_once("New fault is not recorded due to " |
441 | "Primary Fault Overflow"); | |
1da12ec4 LT |
442 | return; |
443 | } | |
7feb51b7 | 444 | |
1da12ec4 | 445 | if (vtd_try_collapse_fault(s, source_id)) { |
1376211f PX |
446 | error_report_once("New fault is not recorded due to " |
447 | "compression of faults"); | |
1da12ec4 LT |
448 | return; |
449 | } | |
7feb51b7 | 450 | |
1da12ec4 | 451 | if (vtd_is_frcd_set(s, s->next_frcd_reg)) { |
1376211f PX |
452 | error_report_once("Next Fault Recording Reg is used, " |
453 | "new fault is not recorded, set PFO field"); | |
1da12ec4 LT |
454 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO); |
455 | return; | |
456 | } | |
457 | ||
458 | vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write); | |
459 | ||
460 | if (fsts_reg & VTD_FSTS_PPF) { | |
1376211f PX |
461 | error_report_once("There are pending faults already, " |
462 | "fault event is not generated"); | |
1da12ec4 LT |
463 | vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); |
464 | s->next_frcd_reg++; | |
465 | if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { | |
466 | s->next_frcd_reg = 0; | |
467 | } | |
468 | } else { | |
469 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK, | |
470 | VTD_FSTS_FRI(s->next_frcd_reg)); | |
471 | vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */ | |
472 | s->next_frcd_reg++; | |
473 | if (s->next_frcd_reg == DMAR_FRCD_REG_NR) { | |
474 | s->next_frcd_reg = 0; | |
475 | } | |
476 | /* This case actually cause the PPF to be Set. | |
477 | * So generate fault event (interrupt). | |
478 | */ | |
479 | vtd_generate_fault_event(s, fsts_reg); | |
480 | } | |
481 | } | |
482 | ||
ed7b8fbc LT |
483 | /* Handle Invalidation Queue Errors of queued invalidation interface error |
484 | * conditions. | |
485 | */ | |
486 | static void vtd_handle_inv_queue_error(IntelIOMMUState *s) | |
487 | { | |
488 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); | |
489 | ||
490 | vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE); | |
491 | vtd_generate_fault_event(s, fsts_reg); | |
492 | } | |
493 | ||
494 | /* Set the IWC field and try to generate an invalidation completion interrupt */ | |
495 | static void vtd_generate_completion_event(IntelIOMMUState *s) | |
496 | { | |
ed7b8fbc | 497 | if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) { |
bc535e59 | 498 | trace_vtd_inv_desc_wait_irq("One pending, skip current"); |
ed7b8fbc LT |
499 | return; |
500 | } | |
501 | vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC); | |
502 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP); | |
503 | if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) { | |
bc535e59 PX |
504 | trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, " |
505 | "new event not generated"); | |
ed7b8fbc LT |
506 | return; |
507 | } else { | |
508 | /* Generate the interrupt event */ | |
bc535e59 | 509 | trace_vtd_inv_desc_wait_irq("Generating complete event"); |
ed7b8fbc LT |
510 | vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); |
511 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); | |
512 | } | |
513 | } | |
514 | ||
1da12ec4 LT |
515 | static inline bool vtd_root_entry_present(VTDRootEntry *root) |
516 | { | |
517 | return root->val & VTD_ROOT_ENTRY_P; | |
518 | } | |
519 | ||
520 | static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index, | |
521 | VTDRootEntry *re) | |
522 | { | |
523 | dma_addr_t addr; | |
524 | ||
525 | addr = s->root + index * sizeof(*re); | |
526 | if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) { | |
1da12ec4 LT |
527 | re->val = 0; |
528 | return -VTD_FR_ROOT_TABLE_INV; | |
529 | } | |
530 | re->val = le64_to_cpu(re->val); | |
531 | return 0; | |
532 | } | |
533 | ||
8f7d7161 | 534 | static inline bool vtd_ce_present(VTDContextEntry *context) |
1da12ec4 LT |
535 | { |
536 | return context->lo & VTD_CONTEXT_ENTRY_P; | |
537 | } | |
538 | ||
539 | static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index, | |
540 | VTDContextEntry *ce) | |
541 | { | |
542 | dma_addr_t addr; | |
543 | ||
6c441e1d | 544 | /* we have checked that root entry is present */ |
1da12ec4 LT |
545 | addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce); |
546 | if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) { | |
1da12ec4 LT |
547 | return -VTD_FR_CONTEXT_TABLE_INV; |
548 | } | |
549 | ce->lo = le64_to_cpu(ce->lo); | |
550 | ce->hi = le64_to_cpu(ce->hi); | |
551 | return 0; | |
552 | } | |
553 | ||
8f7d7161 | 554 | static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce) |
1da12ec4 LT |
555 | { |
556 | return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR; | |
557 | } | |
558 | ||
37f51384 | 559 | static inline uint64_t vtd_get_slpte_addr(uint64_t slpte, uint8_t aw) |
1da12ec4 | 560 | { |
37f51384 | 561 | return slpte & VTD_SL_PT_BASE_ADDR_MASK(aw); |
1da12ec4 LT |
562 | } |
563 | ||
564 | /* Whether the pte indicates the address of the page frame */ | |
565 | static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level) | |
566 | { | |
567 | return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK); | |
568 | } | |
569 | ||
570 | /* Get the content of a spte located in @base_addr[@index] */ | |
571 | static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index) | |
572 | { | |
573 | uint64_t slpte; | |
574 | ||
575 | assert(index < VTD_SL_PT_ENTRY_NR); | |
576 | ||
577 | if (dma_memory_read(&address_space_memory, | |
578 | base_addr + index * sizeof(slpte), &slpte, | |
579 | sizeof(slpte))) { | |
580 | slpte = (uint64_t)-1; | |
581 | return slpte; | |
582 | } | |
583 | slpte = le64_to_cpu(slpte); | |
584 | return slpte; | |
585 | } | |
586 | ||
6e905564 PX |
587 | /* Given an iova and the level of paging structure, return the offset |
588 | * of current level. | |
1da12ec4 | 589 | */ |
6e905564 | 590 | static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level) |
1da12ec4 | 591 | { |
6e905564 | 592 | return (iova >> vtd_slpt_level_shift(level)) & |
1da12ec4 LT |
593 | ((1ULL << VTD_SL_LEVEL_BITS) - 1); |
594 | } | |
595 | ||
596 | /* Check Capability Register to see if the @level of page-table is supported */ | |
597 | static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level) | |
598 | { | |
599 | return VTD_CAP_SAGAW_MASK & s->cap & | |
600 | (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT)); | |
601 | } | |
602 | ||
603 | /* Get the page-table level that hardware should use for the second-level | |
604 | * page-table walk from the Address Width field of context-entry. | |
605 | */ | |
8f7d7161 | 606 | static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce) |
1da12ec4 LT |
607 | { |
608 | return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW); | |
609 | } | |
610 | ||
8f7d7161 | 611 | static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce) |
1da12ec4 LT |
612 | { |
613 | return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9; | |
614 | } | |
615 | ||
127ff5c3 PX |
616 | static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce) |
617 | { | |
618 | return ce->lo & VTD_CONTEXT_ENTRY_TT; | |
619 | } | |
620 | ||
f80c9874 PX |
621 | /* Return true if check passed, otherwise false */ |
622 | static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu, | |
623 | VTDContextEntry *ce) | |
624 | { | |
625 | switch (vtd_ce_get_type(ce)) { | |
626 | case VTD_CONTEXT_TT_MULTI_LEVEL: | |
627 | /* Always supported */ | |
628 | break; | |
629 | case VTD_CONTEXT_TT_DEV_IOTLB: | |
630 | if (!x86_iommu->dt_supported) { | |
095955b2 | 631 | error_report_once("%s: DT specified but not supported", __func__); |
f80c9874 PX |
632 | return false; |
633 | } | |
634 | break; | |
dbaabb25 PX |
635 | case VTD_CONTEXT_TT_PASS_THROUGH: |
636 | if (!x86_iommu->pt_supported) { | |
095955b2 | 637 | error_report_once("%s: PT specified but not supported", __func__); |
dbaabb25 PX |
638 | return false; |
639 | } | |
640 | break; | |
f80c9874 PX |
641 | default: |
642 | /* Unknwon type */ | |
095955b2 PX |
643 | error_report_once("%s: unknown ce type: %"PRIu32, __func__, |
644 | vtd_ce_get_type(ce)); | |
f80c9874 PX |
645 | return false; |
646 | } | |
647 | return true; | |
648 | } | |
649 | ||
37f51384 | 650 | static inline uint64_t vtd_iova_limit(VTDContextEntry *ce, uint8_t aw) |
f06a696d | 651 | { |
8f7d7161 | 652 | uint32_t ce_agaw = vtd_ce_get_agaw(ce); |
37f51384 | 653 | return 1ULL << MIN(ce_agaw, aw); |
f06a696d PX |
654 | } |
655 | ||
656 | /* Return true if IOVA passes range check, otherwise false. */ | |
37f51384 PS |
657 | static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce, |
658 | uint8_t aw) | |
f06a696d PX |
659 | { |
660 | /* | |
661 | * Check if @iova is above 2^X-1, where X is the minimum of MGAW | |
662 | * in CAP_REG and AW in context-entry. | |
663 | */ | |
37f51384 | 664 | return !(iova & ~(vtd_iova_limit(ce, aw) - 1)); |
f06a696d PX |
665 | } |
666 | ||
92e5d85e PS |
667 | /* |
668 | * Rsvd field masks for spte: | |
669 | * Index [1] to [4] 4k pages | |
670 | * Index [5] to [8] large pages | |
671 | */ | |
672 | static uint64_t vtd_paging_entry_rsvd_field[9]; | |
1da12ec4 LT |
673 | |
674 | static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level) | |
675 | { | |
676 | if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) { | |
677 | /* Maybe large page */ | |
678 | return slpte & vtd_paging_entry_rsvd_field[level + 4]; | |
679 | } else { | |
680 | return slpte & vtd_paging_entry_rsvd_field[level]; | |
681 | } | |
682 | } | |
683 | ||
dbaabb25 PX |
684 | /* Find the VTD address space associated with a given bus number */ |
685 | static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num) | |
686 | { | |
687 | VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num]; | |
688 | if (!vtd_bus) { | |
689 | /* | |
690 | * Iterate over the registered buses to find the one which | |
691 | * currently hold this bus number, and update the bus_num | |
692 | * lookup table: | |
693 | */ | |
694 | GHashTableIter iter; | |
695 | ||
696 | g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); | |
697 | while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { | |
698 | if (pci_bus_num(vtd_bus->bus) == bus_num) { | |
699 | s->vtd_as_by_bus_num[bus_num] = vtd_bus; | |
700 | return vtd_bus; | |
701 | } | |
702 | } | |
703 | } | |
704 | return vtd_bus; | |
705 | } | |
706 | ||
6e905564 | 707 | /* Given the @iova, get relevant @slptep. @slpte_level will be the last level |
1da12ec4 LT |
708 | * of the translation, can be used for deciding the size of large page. |
709 | */ | |
6e905564 PX |
710 | static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write, |
711 | uint64_t *slptep, uint32_t *slpte_level, | |
37f51384 | 712 | bool *reads, bool *writes, uint8_t aw_bits) |
1da12ec4 | 713 | { |
8f7d7161 PX |
714 | dma_addr_t addr = vtd_ce_get_slpt_base(ce); |
715 | uint32_t level = vtd_ce_get_level(ce); | |
1da12ec4 LT |
716 | uint32_t offset; |
717 | uint64_t slpte; | |
1da12ec4 LT |
718 | uint64_t access_right_check; |
719 | ||
37f51384 | 720 | if (!vtd_iova_range_check(iova, ce, aw_bits)) { |
4e4abd11 PX |
721 | error_report_once("%s: detected IOVA overflow (iova=0x%" PRIx64 ")", |
722 | __func__, iova); | |
1da12ec4 LT |
723 | return -VTD_FR_ADDR_BEYOND_MGAW; |
724 | } | |
725 | ||
726 | /* FIXME: what is the Atomics request here? */ | |
727 | access_right_check = is_write ? VTD_SL_W : VTD_SL_R; | |
728 | ||
729 | while (true) { | |
6e905564 | 730 | offset = vtd_iova_level_offset(iova, level); |
1da12ec4 LT |
731 | slpte = vtd_get_slpte(addr, offset); |
732 | ||
733 | if (slpte == (uint64_t)-1) { | |
4e4abd11 PX |
734 | error_report_once("%s: detected read error on DMAR slpte " |
735 | "(iova=0x%" PRIx64 ")", __func__, iova); | |
8f7d7161 | 736 | if (level == vtd_ce_get_level(ce)) { |
1da12ec4 LT |
737 | /* Invalid programming of context-entry */ |
738 | return -VTD_FR_CONTEXT_ENTRY_INV; | |
739 | } else { | |
740 | return -VTD_FR_PAGING_ENTRY_INV; | |
741 | } | |
742 | } | |
743 | *reads = (*reads) && (slpte & VTD_SL_R); | |
744 | *writes = (*writes) && (slpte & VTD_SL_W); | |
745 | if (!(slpte & access_right_check)) { | |
4e4abd11 PX |
746 | error_report_once("%s: detected slpte permission error " |
747 | "(iova=0x%" PRIx64 ", level=0x%" PRIx32 ", " | |
748 | "slpte=0x%" PRIx64 ", write=%d)", __func__, | |
749 | iova, level, slpte, is_write); | |
1da12ec4 LT |
750 | return is_write ? -VTD_FR_WRITE : -VTD_FR_READ; |
751 | } | |
752 | if (vtd_slpte_nonzero_rsvd(slpte, level)) { | |
4e4abd11 PX |
753 | error_report_once("%s: detected splte reserve non-zero " |
754 | "iova=0x%" PRIx64 ", level=0x%" PRIx32 | |
755 | "slpte=0x%" PRIx64 ")", __func__, iova, | |
756 | level, slpte); | |
1da12ec4 LT |
757 | return -VTD_FR_PAGING_ENTRY_RSVD; |
758 | } | |
759 | ||
760 | if (vtd_is_last_slpte(slpte, level)) { | |
761 | *slptep = slpte; | |
762 | *slpte_level = level; | |
763 | return 0; | |
764 | } | |
37f51384 | 765 | addr = vtd_get_slpte_addr(slpte, aw_bits); |
1da12ec4 LT |
766 | level--; |
767 | } | |
768 | } | |
769 | ||
f06a696d PX |
770 | typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private); |
771 | ||
fe215b0c PX |
772 | /** |
773 | * Constant information used during page walking | |
774 | * | |
775 | * @hook_fn: hook func to be called when detected page | |
776 | * @private: private data to be passed into hook func | |
777 | * @notify_unmap: whether we should notify invalid entries | |
2f764fa8 | 778 | * @as: VT-d address space of the device |
fe215b0c | 779 | * @aw: maximum address width |
d118c06e | 780 | * @domain: domain ID of the page walk |
fe215b0c PX |
781 | */ |
782 | typedef struct { | |
2f764fa8 | 783 | VTDAddressSpace *as; |
fe215b0c PX |
784 | vtd_page_walk_hook hook_fn; |
785 | void *private; | |
786 | bool notify_unmap; | |
787 | uint8_t aw; | |
d118c06e | 788 | uint16_t domain_id; |
fe215b0c PX |
789 | } vtd_page_walk_info; |
790 | ||
d118c06e | 791 | static int vtd_page_walk_one(IOMMUTLBEntry *entry, vtd_page_walk_info *info) |
36d2d52b | 792 | { |
63b88968 | 793 | VTDAddressSpace *as = info->as; |
fe215b0c PX |
794 | vtd_page_walk_hook hook_fn = info->hook_fn; |
795 | void *private = info->private; | |
63b88968 PX |
796 | DMAMap target = { |
797 | .iova = entry->iova, | |
798 | .size = entry->addr_mask, | |
799 | .translated_addr = entry->translated_addr, | |
800 | .perm = entry->perm, | |
801 | }; | |
802 | DMAMap *mapped = iova_tree_find(as->iova_tree, &target); | |
803 | ||
804 | if (entry->perm == IOMMU_NONE && !info->notify_unmap) { | |
805 | trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); | |
806 | return 0; | |
807 | } | |
fe215b0c | 808 | |
36d2d52b | 809 | assert(hook_fn); |
63b88968 PX |
810 | |
811 | /* Update local IOVA mapped ranges */ | |
812 | if (entry->perm) { | |
813 | if (mapped) { | |
814 | /* If it's exactly the same translation, skip */ | |
815 | if (!memcmp(mapped, &target, sizeof(target))) { | |
816 | trace_vtd_page_walk_one_skip_map(entry->iova, entry->addr_mask, | |
817 | entry->translated_addr); | |
818 | return 0; | |
819 | } else { | |
820 | /* | |
821 | * Translation changed. Normally this should not | |
822 | * happen, but it can happen when with buggy guest | |
823 | * OSes. Note that there will be a small window that | |
824 | * we don't have map at all. But that's the best | |
825 | * effort we can do. The ideal way to emulate this is | |
826 | * atomically modify the PTE to follow what has | |
827 | * changed, but we can't. One example is that vfio | |
828 | * driver only has VFIO_IOMMU_[UN]MAP_DMA but no | |
829 | * interface to modify a mapping (meanwhile it seems | |
830 | * meaningless to even provide one). Anyway, let's | |
831 | * mark this as a TODO in case one day we'll have | |
832 | * a better solution. | |
833 | */ | |
834 | IOMMUAccessFlags cache_perm = entry->perm; | |
835 | int ret; | |
836 | ||
837 | /* Emulate an UNMAP */ | |
838 | entry->perm = IOMMU_NONE; | |
839 | trace_vtd_page_walk_one(info->domain_id, | |
840 | entry->iova, | |
841 | entry->translated_addr, | |
842 | entry->addr_mask, | |
843 | entry->perm); | |
844 | ret = hook_fn(entry, private); | |
845 | if (ret) { | |
846 | return ret; | |
847 | } | |
848 | /* Drop any existing mapping */ | |
849 | iova_tree_remove(as->iova_tree, &target); | |
850 | /* Recover the correct permission */ | |
851 | entry->perm = cache_perm; | |
852 | } | |
853 | } | |
854 | iova_tree_insert(as->iova_tree, &target); | |
855 | } else { | |
856 | if (!mapped) { | |
857 | /* Skip since we didn't map this range at all */ | |
858 | trace_vtd_page_walk_one_skip_unmap(entry->iova, entry->addr_mask); | |
859 | return 0; | |
860 | } | |
861 | iova_tree_remove(as->iova_tree, &target); | |
862 | } | |
863 | ||
d118c06e PX |
864 | trace_vtd_page_walk_one(info->domain_id, entry->iova, |
865 | entry->translated_addr, entry->addr_mask, | |
866 | entry->perm); | |
36d2d52b PX |
867 | return hook_fn(entry, private); |
868 | } | |
869 | ||
f06a696d PX |
870 | /** |
871 | * vtd_page_walk_level - walk over specific level for IOVA range | |
872 | * | |
873 | * @addr: base GPA addr to start the walk | |
874 | * @start: IOVA range start address | |
875 | * @end: IOVA range end address (start <= addr < end) | |
f06a696d PX |
876 | * @read: whether parent level has read permission |
877 | * @write: whether parent level has write permission | |
fe215b0c | 878 | * @info: constant information for the page walk |
f06a696d PX |
879 | */ |
880 | static int vtd_page_walk_level(dma_addr_t addr, uint64_t start, | |
fe215b0c PX |
881 | uint64_t end, uint32_t level, bool read, |
882 | bool write, vtd_page_walk_info *info) | |
f06a696d PX |
883 | { |
884 | bool read_cur, write_cur, entry_valid; | |
885 | uint32_t offset; | |
886 | uint64_t slpte; | |
887 | uint64_t subpage_size, subpage_mask; | |
888 | IOMMUTLBEntry entry; | |
889 | uint64_t iova = start; | |
890 | uint64_t iova_next; | |
891 | int ret = 0; | |
892 | ||
893 | trace_vtd_page_walk_level(addr, level, start, end); | |
894 | ||
895 | subpage_size = 1ULL << vtd_slpt_level_shift(level); | |
896 | subpage_mask = vtd_slpt_level_page_mask(level); | |
897 | ||
898 | while (iova < end) { | |
899 | iova_next = (iova & subpage_mask) + subpage_size; | |
900 | ||
901 | offset = vtd_iova_level_offset(iova, level); | |
902 | slpte = vtd_get_slpte(addr, offset); | |
903 | ||
904 | if (slpte == (uint64_t)-1) { | |
905 | trace_vtd_page_walk_skip_read(iova, iova_next); | |
906 | goto next; | |
907 | } | |
908 | ||
909 | if (vtd_slpte_nonzero_rsvd(slpte, level)) { | |
910 | trace_vtd_page_walk_skip_reserve(iova, iova_next); | |
911 | goto next; | |
912 | } | |
913 | ||
914 | /* Permissions are stacked with parents' */ | |
915 | read_cur = read && (slpte & VTD_SL_R); | |
916 | write_cur = write && (slpte & VTD_SL_W); | |
917 | ||
918 | /* | |
919 | * As long as we have either read/write permission, this is a | |
920 | * valid entry. The rule works for both page entries and page | |
921 | * table entries. | |
922 | */ | |
923 | entry_valid = read_cur | write_cur; | |
924 | ||
63b88968 PX |
925 | if (!vtd_is_last_slpte(slpte, level) && entry_valid) { |
926 | /* | |
927 | * This is a valid PDE (or even bigger than PDE). We need | |
928 | * to walk one further level. | |
929 | */ | |
fe215b0c PX |
930 | ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte, info->aw), |
931 | iova, MIN(iova_next, end), level - 1, | |
932 | read_cur, write_cur, info); | |
63b88968 PX |
933 | } else { |
934 | /* | |
935 | * This means we are either: | |
936 | * | |
937 | * (1) the real page entry (either 4K page, or huge page) | |
938 | * (2) the whole range is invalid | |
939 | * | |
940 | * In either case, we send an IOTLB notification down. | |
941 | */ | |
942 | entry.target_as = &address_space_memory; | |
943 | entry.iova = iova & subpage_mask; | |
944 | entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur); | |
945 | entry.addr_mask = ~subpage_mask; | |
946 | /* NOTE: this is only meaningful if entry_valid == true */ | |
947 | entry.translated_addr = vtd_get_slpte_addr(slpte, info->aw); | |
948 | ret = vtd_page_walk_one(&entry, info); | |
949 | } | |
950 | ||
951 | if (ret < 0) { | |
952 | return ret; | |
f06a696d PX |
953 | } |
954 | ||
955 | next: | |
956 | iova = iova_next; | |
957 | } | |
958 | ||
959 | return 0; | |
960 | } | |
961 | ||
962 | /** | |
963 | * vtd_page_walk - walk specific IOVA range, and call the hook | |
964 | * | |
965 | * @ce: context entry to walk upon | |
966 | * @start: IOVA address to start the walk | |
967 | * @end: IOVA range end address (start <= addr < end) | |
fe215b0c | 968 | * @info: page walking information struct |
f06a696d PX |
969 | */ |
970 | static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end, | |
fe215b0c | 971 | vtd_page_walk_info *info) |
f06a696d | 972 | { |
8f7d7161 PX |
973 | dma_addr_t addr = vtd_ce_get_slpt_base(ce); |
974 | uint32_t level = vtd_ce_get_level(ce); | |
f06a696d | 975 | |
fe215b0c | 976 | if (!vtd_iova_range_check(start, ce, info->aw)) { |
f06a696d PX |
977 | return -VTD_FR_ADDR_BEYOND_MGAW; |
978 | } | |
979 | ||
fe215b0c | 980 | if (!vtd_iova_range_check(end, ce, info->aw)) { |
f06a696d | 981 | /* Fix end so that it reaches the maximum */ |
fe215b0c | 982 | end = vtd_iova_limit(ce, info->aw); |
f06a696d PX |
983 | } |
984 | ||
fe215b0c | 985 | return vtd_page_walk_level(addr, start, end, level, true, true, info); |
f06a696d PX |
986 | } |
987 | ||
1da12ec4 LT |
988 | /* Map a device to its corresponding domain (context-entry) */ |
989 | static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num, | |
990 | uint8_t devfn, VTDContextEntry *ce) | |
991 | { | |
992 | VTDRootEntry re; | |
993 | int ret_fr; | |
f80c9874 | 994 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
1da12ec4 LT |
995 | |
996 | ret_fr = vtd_get_root_entry(s, bus_num, &re); | |
997 | if (ret_fr) { | |
998 | return ret_fr; | |
999 | } | |
1000 | ||
1001 | if (!vtd_root_entry_present(&re)) { | |
6c441e1d PX |
1002 | /* Not error - it's okay we don't have root entry. */ |
1003 | trace_vtd_re_not_present(bus_num); | |
1da12ec4 | 1004 | return -VTD_FR_ROOT_ENTRY_P; |
f80c9874 PX |
1005 | } |
1006 | ||
37f51384 | 1007 | if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD(s->aw_bits))) { |
095955b2 PX |
1008 | error_report_once("%s: invalid root entry: rsvd=0x%"PRIx64 |
1009 | ", val=0x%"PRIx64" (reserved nonzero)", | |
1010 | __func__, re.rsvd, re.val); | |
1da12ec4 LT |
1011 | return -VTD_FR_ROOT_ENTRY_RSVD; |
1012 | } | |
1013 | ||
1014 | ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce); | |
1015 | if (ret_fr) { | |
1016 | return ret_fr; | |
1017 | } | |
1018 | ||
8f7d7161 | 1019 | if (!vtd_ce_present(ce)) { |
6c441e1d PX |
1020 | /* Not error - it's okay we don't have context entry. */ |
1021 | trace_vtd_ce_not_present(bus_num, devfn); | |
1da12ec4 | 1022 | return -VTD_FR_CONTEXT_ENTRY_P; |
f80c9874 PX |
1023 | } |
1024 | ||
1025 | if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) || | |
37f51384 | 1026 | (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO(s->aw_bits))) { |
095955b2 PX |
1027 | error_report_once("%s: invalid context entry: hi=%"PRIx64 |
1028 | ", lo=%"PRIx64" (reserved nonzero)", | |
1029 | __func__, ce->hi, ce->lo); | |
1da12ec4 LT |
1030 | return -VTD_FR_CONTEXT_ENTRY_RSVD; |
1031 | } | |
f80c9874 | 1032 | |
1da12ec4 | 1033 | /* Check if the programming of context-entry is valid */ |
8f7d7161 | 1034 | if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) { |
095955b2 PX |
1035 | error_report_once("%s: invalid context entry: hi=%"PRIx64 |
1036 | ", lo=%"PRIx64" (level %d not supported)", | |
1037 | __func__, ce->hi, ce->lo, vtd_ce_get_level(ce)); | |
1da12ec4 | 1038 | return -VTD_FR_CONTEXT_ENTRY_INV; |
1da12ec4 | 1039 | } |
f80c9874 PX |
1040 | |
1041 | /* Do translation type check */ | |
1042 | if (!vtd_ce_type_check(x86_iommu, ce)) { | |
095955b2 | 1043 | /* Errors dumped in vtd_ce_type_check() */ |
f80c9874 PX |
1044 | return -VTD_FR_CONTEXT_ENTRY_INV; |
1045 | } | |
1046 | ||
1da12ec4 LT |
1047 | return 0; |
1048 | } | |
1049 | ||
63b88968 PX |
1050 | static int vtd_sync_shadow_page_hook(IOMMUTLBEntry *entry, |
1051 | void *private) | |
1052 | { | |
cb1efcf4 | 1053 | memory_region_notify_iommu((IOMMUMemoryRegion *)private, 0, *entry); |
63b88968 PX |
1054 | return 0; |
1055 | } | |
1056 | ||
63b88968 PX |
1057 | static int vtd_sync_shadow_page_table_range(VTDAddressSpace *vtd_as, |
1058 | VTDContextEntry *ce, | |
1059 | hwaddr addr, hwaddr size) | |
1060 | { | |
1061 | IntelIOMMUState *s = vtd_as->iommu_state; | |
1062 | vtd_page_walk_info info = { | |
1063 | .hook_fn = vtd_sync_shadow_page_hook, | |
1064 | .private = (void *)&vtd_as->iommu, | |
1065 | .notify_unmap = true, | |
1066 | .aw = s->aw_bits, | |
1067 | .as = vtd_as, | |
95ecd3df | 1068 | .domain_id = VTD_CONTEXT_ENTRY_DID(ce->hi), |
63b88968 | 1069 | }; |
63b88968 | 1070 | |
95ecd3df | 1071 | return vtd_page_walk(ce, addr, addr + size, &info); |
63b88968 PX |
1072 | } |
1073 | ||
1074 | static int vtd_sync_shadow_page_table(VTDAddressSpace *vtd_as) | |
1075 | { | |
95ecd3df PX |
1076 | int ret; |
1077 | VTDContextEntry ce; | |
c28b535d | 1078 | IOMMUNotifier *n; |
95ecd3df PX |
1079 | |
1080 | ret = vtd_dev_to_context_entry(vtd_as->iommu_state, | |
1081 | pci_bus_num(vtd_as->bus), | |
1082 | vtd_as->devfn, &ce); | |
1083 | if (ret) { | |
c28b535d PX |
1084 | if (ret == -VTD_FR_CONTEXT_ENTRY_P) { |
1085 | /* | |
1086 | * It's a valid scenario to have a context entry that is | |
1087 | * not present. For example, when a device is removed | |
1088 | * from an existing domain then the context entry will be | |
1089 | * zeroed by the guest before it was put into another | |
1090 | * domain. When this happens, instead of synchronizing | |
1091 | * the shadow pages we should invalidate all existing | |
1092 | * mappings and notify the backends. | |
1093 | */ | |
1094 | IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { | |
1095 | vtd_address_space_unmap(vtd_as, n); | |
1096 | } | |
1097 | ret = 0; | |
1098 | } | |
95ecd3df PX |
1099 | return ret; |
1100 | } | |
1101 | ||
1102 | return vtd_sync_shadow_page_table_range(vtd_as, &ce, 0, UINT64_MAX); | |
63b88968 PX |
1103 | } |
1104 | ||
dbaabb25 PX |
1105 | /* |
1106 | * Fetch translation type for specific device. Returns <0 if error | |
1107 | * happens, otherwise return the shifted type to check against | |
1108 | * VTD_CONTEXT_TT_*. | |
1109 | */ | |
1110 | static int vtd_dev_get_trans_type(VTDAddressSpace *as) | |
1111 | { | |
1112 | IntelIOMMUState *s; | |
1113 | VTDContextEntry ce; | |
1114 | int ret; | |
1115 | ||
1116 | s = as->iommu_state; | |
1117 | ||
1118 | ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus), | |
1119 | as->devfn, &ce); | |
1120 | if (ret) { | |
1121 | return ret; | |
1122 | } | |
1123 | ||
1124 | return vtd_ce_get_type(&ce); | |
1125 | } | |
1126 | ||
1127 | static bool vtd_dev_pt_enabled(VTDAddressSpace *as) | |
1128 | { | |
1129 | int ret; | |
1130 | ||
1131 | assert(as); | |
1132 | ||
1133 | ret = vtd_dev_get_trans_type(as); | |
1134 | if (ret < 0) { | |
1135 | /* | |
1136 | * Possibly failed to parse the context entry for some reason | |
1137 | * (e.g., during init, or any guest configuration errors on | |
1138 | * context entries). We should assume PT not enabled for | |
1139 | * safety. | |
1140 | */ | |
1141 | return false; | |
1142 | } | |
1143 | ||
1144 | return ret == VTD_CONTEXT_TT_PASS_THROUGH; | |
1145 | } | |
1146 | ||
1147 | /* Return whether the device is using IOMMU translation. */ | |
1148 | static bool vtd_switch_address_space(VTDAddressSpace *as) | |
1149 | { | |
1150 | bool use_iommu; | |
66a4a031 PX |
1151 | /* Whether we need to take the BQL on our own */ |
1152 | bool take_bql = !qemu_mutex_iothread_locked(); | |
dbaabb25 PX |
1153 | |
1154 | assert(as); | |
1155 | ||
1156 | use_iommu = as->iommu_state->dmar_enabled & !vtd_dev_pt_enabled(as); | |
1157 | ||
1158 | trace_vtd_switch_address_space(pci_bus_num(as->bus), | |
1159 | VTD_PCI_SLOT(as->devfn), | |
1160 | VTD_PCI_FUNC(as->devfn), | |
1161 | use_iommu); | |
1162 | ||
66a4a031 PX |
1163 | /* |
1164 | * It's possible that we reach here without BQL, e.g., when called | |
1165 | * from vtd_pt_enable_fast_path(). However the memory APIs need | |
1166 | * it. We'd better make sure we have had it already, or, take it. | |
1167 | */ | |
1168 | if (take_bql) { | |
1169 | qemu_mutex_lock_iothread(); | |
1170 | } | |
1171 | ||
dbaabb25 PX |
1172 | /* Turn off first then on the other */ |
1173 | if (use_iommu) { | |
1174 | memory_region_set_enabled(&as->sys_alias, false); | |
3df9d748 | 1175 | memory_region_set_enabled(MEMORY_REGION(&as->iommu), true); |
dbaabb25 | 1176 | } else { |
3df9d748 | 1177 | memory_region_set_enabled(MEMORY_REGION(&as->iommu), false); |
dbaabb25 PX |
1178 | memory_region_set_enabled(&as->sys_alias, true); |
1179 | } | |
1180 | ||
66a4a031 PX |
1181 | if (take_bql) { |
1182 | qemu_mutex_unlock_iothread(); | |
1183 | } | |
1184 | ||
dbaabb25 PX |
1185 | return use_iommu; |
1186 | } | |
1187 | ||
1188 | static void vtd_switch_address_space_all(IntelIOMMUState *s) | |
1189 | { | |
1190 | GHashTableIter iter; | |
1191 | VTDBus *vtd_bus; | |
1192 | int i; | |
1193 | ||
1194 | g_hash_table_iter_init(&iter, s->vtd_as_by_busptr); | |
1195 | while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) { | |
bf33cc75 | 1196 | for (i = 0; i < PCI_DEVFN_MAX; i++) { |
dbaabb25 PX |
1197 | if (!vtd_bus->dev_as[i]) { |
1198 | continue; | |
1199 | } | |
1200 | vtd_switch_address_space(vtd_bus->dev_as[i]); | |
1201 | } | |
1202 | } | |
1203 | } | |
1204 | ||
1da12ec4 LT |
1205 | static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn) |
1206 | { | |
1207 | return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL); | |
1208 | } | |
1209 | ||
1210 | static const bool vtd_qualified_faults[] = { | |
1211 | [VTD_FR_RESERVED] = false, | |
1212 | [VTD_FR_ROOT_ENTRY_P] = false, | |
1213 | [VTD_FR_CONTEXT_ENTRY_P] = true, | |
1214 | [VTD_FR_CONTEXT_ENTRY_INV] = true, | |
1215 | [VTD_FR_ADDR_BEYOND_MGAW] = true, | |
1216 | [VTD_FR_WRITE] = true, | |
1217 | [VTD_FR_READ] = true, | |
1218 | [VTD_FR_PAGING_ENTRY_INV] = true, | |
1219 | [VTD_FR_ROOT_TABLE_INV] = false, | |
1220 | [VTD_FR_CONTEXT_TABLE_INV] = false, | |
1221 | [VTD_FR_ROOT_ENTRY_RSVD] = false, | |
1222 | [VTD_FR_PAGING_ENTRY_RSVD] = true, | |
1223 | [VTD_FR_CONTEXT_ENTRY_TT] = true, | |
1224 | [VTD_FR_RESERVED_ERR] = false, | |
1225 | [VTD_FR_MAX] = false, | |
1226 | }; | |
1227 | ||
1228 | /* To see if a fault condition is "qualified", which is reported to software | |
1229 | * only if the FPD field in the context-entry used to process the faulting | |
1230 | * request is 0. | |
1231 | */ | |
1232 | static inline bool vtd_is_qualified_fault(VTDFaultReason fault) | |
1233 | { | |
1234 | return vtd_qualified_faults[fault]; | |
1235 | } | |
1236 | ||
1237 | static inline bool vtd_is_interrupt_addr(hwaddr addr) | |
1238 | { | |
1239 | return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST; | |
1240 | } | |
1241 | ||
dbaabb25 PX |
1242 | static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id) |
1243 | { | |
1244 | VTDBus *vtd_bus; | |
1245 | VTDAddressSpace *vtd_as; | |
1246 | bool success = false; | |
1247 | ||
1248 | vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id)); | |
1249 | if (!vtd_bus) { | |
1250 | goto out; | |
1251 | } | |
1252 | ||
1253 | vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)]; | |
1254 | if (!vtd_as) { | |
1255 | goto out; | |
1256 | } | |
1257 | ||
1258 | if (vtd_switch_address_space(vtd_as) == false) { | |
1259 | /* We switched off IOMMU region successfully. */ | |
1260 | success = true; | |
1261 | } | |
1262 | ||
1263 | out: | |
1264 | trace_vtd_pt_enable_fast_path(source_id, success); | |
1265 | } | |
1266 | ||
1da12ec4 LT |
1267 | /* Map dev to context-entry then do a paging-structures walk to do a iommu |
1268 | * translation. | |
79e2b9ae PB |
1269 | * |
1270 | * Called from RCU critical section. | |
1271 | * | |
1da12ec4 LT |
1272 | * @bus_num: The bus number |
1273 | * @devfn: The devfn, which is the combined of device and function number | |
1274 | * @is_write: The access is a write operation | |
1275 | * @entry: IOMMUTLBEntry that contain the addr to be translated and result | |
b9313021 PX |
1276 | * |
1277 | * Returns true if translation is successful, otherwise false. | |
1da12ec4 | 1278 | */ |
b9313021 | 1279 | static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus, |
1da12ec4 LT |
1280 | uint8_t devfn, hwaddr addr, bool is_write, |
1281 | IOMMUTLBEntry *entry) | |
1282 | { | |
d92fa2dc | 1283 | IntelIOMMUState *s = vtd_as->iommu_state; |
1da12ec4 | 1284 | VTDContextEntry ce; |
7df953bd | 1285 | uint8_t bus_num = pci_bus_num(bus); |
1d9efa73 | 1286 | VTDContextCacheEntry *cc_entry; |
d66b969b | 1287 | uint64_t slpte, page_mask; |
1da12ec4 LT |
1288 | uint32_t level; |
1289 | uint16_t source_id = vtd_make_source_id(bus_num, devfn); | |
1290 | int ret_fr; | |
1291 | bool is_fpd_set = false; | |
1292 | bool reads = true; | |
1293 | bool writes = true; | |
07f7b733 | 1294 | uint8_t access_flags; |
b5a280c0 | 1295 | VTDIOTLBEntry *iotlb_entry; |
1da12ec4 | 1296 | |
046ab7e9 PX |
1297 | /* |
1298 | * We have standalone memory region for interrupt addresses, we | |
1299 | * should never receive translation requests in this region. | |
1300 | */ | |
1301 | assert(!vtd_is_interrupt_addr(addr)); | |
1302 | ||
1d9efa73 PX |
1303 | vtd_iommu_lock(s); |
1304 | ||
1305 | cc_entry = &vtd_as->context_cache_entry; | |
1306 | ||
b5a280c0 LT |
1307 | /* Try to fetch slpte form IOTLB */ |
1308 | iotlb_entry = vtd_lookup_iotlb(s, source_id, addr); | |
1309 | if (iotlb_entry) { | |
6c441e1d PX |
1310 | trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte, |
1311 | iotlb_entry->domain_id); | |
b5a280c0 | 1312 | slpte = iotlb_entry->slpte; |
07f7b733 | 1313 | access_flags = iotlb_entry->access_flags; |
d66b969b | 1314 | page_mask = iotlb_entry->mask; |
b5a280c0 LT |
1315 | goto out; |
1316 | } | |
b9313021 | 1317 | |
d92fa2dc LT |
1318 | /* Try to fetch context-entry from cache first */ |
1319 | if (cc_entry->context_cache_gen == s->context_cache_gen) { | |
6c441e1d PX |
1320 | trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi, |
1321 | cc_entry->context_entry.lo, | |
1322 | cc_entry->context_cache_gen); | |
d92fa2dc LT |
1323 | ce = cc_entry->context_entry; |
1324 | is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; | |
1325 | } else { | |
1326 | ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce); | |
1327 | is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD; | |
1328 | if (ret_fr) { | |
1329 | ret_fr = -ret_fr; | |
1330 | if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { | |
6c441e1d | 1331 | trace_vtd_fault_disabled(); |
d92fa2dc LT |
1332 | } else { |
1333 | vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); | |
1334 | } | |
b9313021 | 1335 | goto error; |
1da12ec4 | 1336 | } |
d92fa2dc | 1337 | /* Update context-cache */ |
6c441e1d PX |
1338 | trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo, |
1339 | cc_entry->context_cache_gen, | |
1340 | s->context_cache_gen); | |
d92fa2dc LT |
1341 | cc_entry->context_entry = ce; |
1342 | cc_entry->context_cache_gen = s->context_cache_gen; | |
1da12ec4 LT |
1343 | } |
1344 | ||
dbaabb25 PX |
1345 | /* |
1346 | * We don't need to translate for pass-through context entries. | |
1347 | * Also, let's ignore IOTLB caching as well for PT devices. | |
1348 | */ | |
1349 | if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) { | |
892721d9 | 1350 | entry->iova = addr & VTD_PAGE_MASK_4K; |
dbaabb25 | 1351 | entry->translated_addr = entry->iova; |
892721d9 | 1352 | entry->addr_mask = ~VTD_PAGE_MASK_4K; |
dbaabb25 PX |
1353 | entry->perm = IOMMU_RW; |
1354 | trace_vtd_translate_pt(source_id, entry->iova); | |
1355 | ||
1356 | /* | |
1357 | * When this happens, it means firstly caching-mode is not | |
1358 | * enabled, and this is the first passthrough translation for | |
1359 | * the device. Let's enable the fast path for passthrough. | |
1360 | * | |
1361 | * When passthrough is disabled again for the device, we can | |
1362 | * capture it via the context entry invalidation, then the | |
1363 | * IOMMU region can be swapped back. | |
1364 | */ | |
1365 | vtd_pt_enable_fast_path(s, source_id); | |
1d9efa73 | 1366 | vtd_iommu_unlock(s); |
b9313021 | 1367 | return true; |
dbaabb25 PX |
1368 | } |
1369 | ||
6e905564 | 1370 | ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level, |
37f51384 | 1371 | &reads, &writes, s->aw_bits); |
1da12ec4 LT |
1372 | if (ret_fr) { |
1373 | ret_fr = -ret_fr; | |
1374 | if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) { | |
6c441e1d | 1375 | trace_vtd_fault_disabled(); |
1da12ec4 LT |
1376 | } else { |
1377 | vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write); | |
1378 | } | |
b9313021 | 1379 | goto error; |
1da12ec4 LT |
1380 | } |
1381 | ||
d66b969b | 1382 | page_mask = vtd_slpt_level_page_mask(level); |
07f7b733 | 1383 | access_flags = IOMMU_ACCESS_FLAG(reads, writes); |
b5a280c0 | 1384 | vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte, |
07f7b733 | 1385 | access_flags, level); |
b5a280c0 | 1386 | out: |
1d9efa73 | 1387 | vtd_iommu_unlock(s); |
d66b969b | 1388 | entry->iova = addr & page_mask; |
37f51384 | 1389 | entry->translated_addr = vtd_get_slpte_addr(slpte, s->aw_bits) & page_mask; |
d66b969b | 1390 | entry->addr_mask = ~page_mask; |
07f7b733 | 1391 | entry->perm = access_flags; |
b9313021 PX |
1392 | return true; |
1393 | ||
1394 | error: | |
1d9efa73 | 1395 | vtd_iommu_unlock(s); |
b9313021 PX |
1396 | entry->iova = 0; |
1397 | entry->translated_addr = 0; | |
1398 | entry->addr_mask = 0; | |
1399 | entry->perm = IOMMU_NONE; | |
1400 | return false; | |
1da12ec4 LT |
1401 | } |
1402 | ||
1403 | static void vtd_root_table_setup(IntelIOMMUState *s) | |
1404 | { | |
1405 | s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG); | |
1406 | s->root_extended = s->root & VTD_RTADDR_RTT; | |
37f51384 | 1407 | s->root &= VTD_RTADDR_ADDR_MASK(s->aw_bits); |
1da12ec4 | 1408 | |
7feb51b7 | 1409 | trace_vtd_reg_dmar_root(s->root, s->root_extended); |
1da12ec4 LT |
1410 | } |
1411 | ||
02a2cbc8 PX |
1412 | static void vtd_iec_notify_all(IntelIOMMUState *s, bool global, |
1413 | uint32_t index, uint32_t mask) | |
1414 | { | |
1415 | x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask); | |
1416 | } | |
1417 | ||
a5861439 PX |
1418 | static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s) |
1419 | { | |
1420 | uint64_t value = 0; | |
1421 | value = vtd_get_quad_raw(s, DMAR_IRTA_REG); | |
1422 | s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1); | |
37f51384 | 1423 | s->intr_root = value & VTD_IRTA_ADDR_MASK(s->aw_bits); |
28589311 | 1424 | s->intr_eime = value & VTD_IRTA_EIME; |
a5861439 | 1425 | |
02a2cbc8 PX |
1426 | /* Notify global invalidation */ |
1427 | vtd_iec_notify_all(s, true, 0, 0); | |
a5861439 | 1428 | |
7feb51b7 | 1429 | trace_vtd_reg_ir_root(s->intr_root, s->intr_size); |
a5861439 PX |
1430 | } |
1431 | ||
dd4d607e PX |
1432 | static void vtd_iommu_replay_all(IntelIOMMUState *s) |
1433 | { | |
b4a4ba0d | 1434 | VTDAddressSpace *vtd_as; |
dd4d607e | 1435 | |
b4a4ba0d | 1436 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
63b88968 | 1437 | vtd_sync_shadow_page_table(vtd_as); |
dd4d607e PX |
1438 | } |
1439 | } | |
1440 | ||
d92fa2dc LT |
1441 | static void vtd_context_global_invalidate(IntelIOMMUState *s) |
1442 | { | |
bc535e59 | 1443 | trace_vtd_inv_desc_cc_global(); |
1d9efa73 PX |
1444 | /* Protects context cache */ |
1445 | vtd_iommu_lock(s); | |
d92fa2dc LT |
1446 | s->context_cache_gen++; |
1447 | if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) { | |
1d9efa73 | 1448 | vtd_reset_context_cache_locked(s); |
d92fa2dc | 1449 | } |
1d9efa73 | 1450 | vtd_iommu_unlock(s); |
2cc9ddcc | 1451 | vtd_address_space_refresh_all(s); |
dd4d607e PX |
1452 | /* |
1453 | * From VT-d spec 6.5.2.1, a global context entry invalidation | |
1454 | * should be followed by a IOTLB global invalidation, so we should | |
1455 | * be safe even without this. Hoewever, let's replay the region as | |
1456 | * well to be safer, and go back here when we need finer tunes for | |
1457 | * VT-d emulation codes. | |
1458 | */ | |
1459 | vtd_iommu_replay_all(s); | |
d92fa2dc LT |
1460 | } |
1461 | ||
1462 | /* Do a context-cache device-selective invalidation. | |
1463 | * @func_mask: FM field after shifting | |
1464 | */ | |
1465 | static void vtd_context_device_invalidate(IntelIOMMUState *s, | |
1466 | uint16_t source_id, | |
1467 | uint16_t func_mask) | |
1468 | { | |
1469 | uint16_t mask; | |
7df953bd | 1470 | VTDBus *vtd_bus; |
d92fa2dc | 1471 | VTDAddressSpace *vtd_as; |
bc535e59 | 1472 | uint8_t bus_n, devfn; |
d92fa2dc LT |
1473 | uint16_t devfn_it; |
1474 | ||
bc535e59 PX |
1475 | trace_vtd_inv_desc_cc_devices(source_id, func_mask); |
1476 | ||
d92fa2dc LT |
1477 | switch (func_mask & 3) { |
1478 | case 0: | |
1479 | mask = 0; /* No bits in the SID field masked */ | |
1480 | break; | |
1481 | case 1: | |
1482 | mask = 4; /* Mask bit 2 in the SID field */ | |
1483 | break; | |
1484 | case 2: | |
1485 | mask = 6; /* Mask bit 2:1 in the SID field */ | |
1486 | break; | |
1487 | case 3: | |
1488 | mask = 7; /* Mask bit 2:0 in the SID field */ | |
1489 | break; | |
1490 | } | |
6cb99acc | 1491 | mask = ~mask; |
bc535e59 PX |
1492 | |
1493 | bus_n = VTD_SID_TO_BUS(source_id); | |
1494 | vtd_bus = vtd_find_as_from_bus_num(s, bus_n); | |
7df953bd | 1495 | if (vtd_bus) { |
d92fa2dc | 1496 | devfn = VTD_SID_TO_DEVFN(source_id); |
bf33cc75 | 1497 | for (devfn_it = 0; devfn_it < PCI_DEVFN_MAX; ++devfn_it) { |
7df953bd | 1498 | vtd_as = vtd_bus->dev_as[devfn_it]; |
d92fa2dc | 1499 | if (vtd_as && ((devfn_it & mask) == (devfn & mask))) { |
bc535e59 PX |
1500 | trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it), |
1501 | VTD_PCI_FUNC(devfn_it)); | |
1d9efa73 | 1502 | vtd_iommu_lock(s); |
d92fa2dc | 1503 | vtd_as->context_cache_entry.context_cache_gen = 0; |
1d9efa73 | 1504 | vtd_iommu_unlock(s); |
dbaabb25 PX |
1505 | /* |
1506 | * Do switch address space when needed, in case if the | |
1507 | * device passthrough bit is switched. | |
1508 | */ | |
1509 | vtd_switch_address_space(vtd_as); | |
dd4d607e PX |
1510 | /* |
1511 | * So a device is moving out of (or moving into) a | |
63b88968 | 1512 | * domain, resync the shadow page table. |
dd4d607e PX |
1513 | * This won't bring bad even if we have no such |
1514 | * notifier registered - the IOMMU notification | |
1515 | * framework will skip MAP notifications if that | |
1516 | * happened. | |
1517 | */ | |
63b88968 | 1518 | vtd_sync_shadow_page_table(vtd_as); |
d92fa2dc LT |
1519 | } |
1520 | } | |
1521 | } | |
1522 | } | |
1523 | ||
1da12ec4 LT |
1524 | /* Context-cache invalidation |
1525 | * Returns the Context Actual Invalidation Granularity. | |
1526 | * @val: the content of the CCMD_REG | |
1527 | */ | |
1528 | static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val) | |
1529 | { | |
1530 | uint64_t caig; | |
1531 | uint64_t type = val & VTD_CCMD_CIRG_MASK; | |
1532 | ||
1533 | switch (type) { | |
d92fa2dc | 1534 | case VTD_CCMD_DOMAIN_INVL: |
d92fa2dc | 1535 | /* Fall through */ |
1da12ec4 | 1536 | case VTD_CCMD_GLOBAL_INVL: |
1da12ec4 | 1537 | caig = VTD_CCMD_GLOBAL_INVL_A; |
d92fa2dc | 1538 | vtd_context_global_invalidate(s); |
1da12ec4 LT |
1539 | break; |
1540 | ||
1541 | case VTD_CCMD_DEVICE_INVL: | |
1da12ec4 | 1542 | caig = VTD_CCMD_DEVICE_INVL_A; |
d92fa2dc | 1543 | vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val)); |
1da12ec4 LT |
1544 | break; |
1545 | ||
1546 | default: | |
1376211f PX |
1547 | error_report_once("%s: invalid context: 0x%" PRIx64, |
1548 | __func__, val); | |
1da12ec4 LT |
1549 | caig = 0; |
1550 | } | |
1551 | return caig; | |
1552 | } | |
1553 | ||
b5a280c0 LT |
1554 | static void vtd_iotlb_global_invalidate(IntelIOMMUState *s) |
1555 | { | |
7feb51b7 | 1556 | trace_vtd_inv_desc_iotlb_global(); |
b5a280c0 | 1557 | vtd_reset_iotlb(s); |
dd4d607e | 1558 | vtd_iommu_replay_all(s); |
b5a280c0 LT |
1559 | } |
1560 | ||
1561 | static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id) | |
1562 | { | |
dd4d607e PX |
1563 | VTDContextEntry ce; |
1564 | VTDAddressSpace *vtd_as; | |
1565 | ||
7feb51b7 PX |
1566 | trace_vtd_inv_desc_iotlb_domain(domain_id); |
1567 | ||
1d9efa73 | 1568 | vtd_iommu_lock(s); |
b5a280c0 LT |
1569 | g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain, |
1570 | &domain_id); | |
1d9efa73 | 1571 | vtd_iommu_unlock(s); |
dd4d607e | 1572 | |
b4a4ba0d | 1573 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
dd4d607e PX |
1574 | if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), |
1575 | vtd_as->devfn, &ce) && | |
1576 | domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { | |
63b88968 | 1577 | vtd_sync_shadow_page_table(vtd_as); |
dd4d607e PX |
1578 | } |
1579 | } | |
1580 | } | |
1581 | ||
dd4d607e PX |
1582 | static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s, |
1583 | uint16_t domain_id, hwaddr addr, | |
1584 | uint8_t am) | |
1585 | { | |
b4a4ba0d | 1586 | VTDAddressSpace *vtd_as; |
dd4d607e PX |
1587 | VTDContextEntry ce; |
1588 | int ret; | |
4f8a62a9 | 1589 | hwaddr size = (1 << am) * VTD_PAGE_SIZE; |
dd4d607e | 1590 | |
b4a4ba0d | 1591 | QLIST_FOREACH(vtd_as, &(s->vtd_as_with_notifiers), next) { |
dd4d607e PX |
1592 | ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus), |
1593 | vtd_as->devfn, &ce); | |
1594 | if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) { | |
4f8a62a9 PX |
1595 | if (vtd_as_has_map_notifier(vtd_as)) { |
1596 | /* | |
1597 | * As long as we have MAP notifications registered in | |
1598 | * any of our IOMMU notifiers, we need to sync the | |
1599 | * shadow page table. | |
1600 | */ | |
63b88968 | 1601 | vtd_sync_shadow_page_table_range(vtd_as, &ce, addr, size); |
4f8a62a9 PX |
1602 | } else { |
1603 | /* | |
1604 | * For UNMAP-only notifiers, we don't need to walk the | |
1605 | * page tables. We just deliver the PSI down to | |
1606 | * invalidate caches. | |
1607 | */ | |
1608 | IOMMUTLBEntry entry = { | |
1609 | .target_as = &address_space_memory, | |
1610 | .iova = addr, | |
1611 | .translated_addr = 0, | |
1612 | .addr_mask = size - 1, | |
1613 | .perm = IOMMU_NONE, | |
1614 | }; | |
cb1efcf4 | 1615 | memory_region_notify_iommu(&vtd_as->iommu, 0, entry); |
4f8a62a9 | 1616 | } |
dd4d607e PX |
1617 | } |
1618 | } | |
b5a280c0 LT |
1619 | } |
1620 | ||
1621 | static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id, | |
1622 | hwaddr addr, uint8_t am) | |
1623 | { | |
1624 | VTDIOTLBPageInvInfo info; | |
1625 | ||
7feb51b7 PX |
1626 | trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am); |
1627 | ||
b5a280c0 LT |
1628 | assert(am <= VTD_MAMV); |
1629 | info.domain_id = domain_id; | |
d66b969b | 1630 | info.addr = addr; |
b5a280c0 | 1631 | info.mask = ~((1 << am) - 1); |
1d9efa73 | 1632 | vtd_iommu_lock(s); |
b5a280c0 | 1633 | g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info); |
1d9efa73 | 1634 | vtd_iommu_unlock(s); |
dd4d607e | 1635 | vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am); |
b5a280c0 LT |
1636 | } |
1637 | ||
1da12ec4 LT |
1638 | /* Flush IOTLB |
1639 | * Returns the IOTLB Actual Invalidation Granularity. | |
1640 | * @val: the content of the IOTLB_REG | |
1641 | */ | |
1642 | static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val) | |
1643 | { | |
1644 | uint64_t iaig; | |
1645 | uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK; | |
b5a280c0 LT |
1646 | uint16_t domain_id; |
1647 | hwaddr addr; | |
1648 | uint8_t am; | |
1da12ec4 LT |
1649 | |
1650 | switch (type) { | |
1651 | case VTD_TLB_GLOBAL_FLUSH: | |
1da12ec4 | 1652 | iaig = VTD_TLB_GLOBAL_FLUSH_A; |
b5a280c0 | 1653 | vtd_iotlb_global_invalidate(s); |
1da12ec4 LT |
1654 | break; |
1655 | ||
1656 | case VTD_TLB_DSI_FLUSH: | |
b5a280c0 | 1657 | domain_id = VTD_TLB_DID(val); |
1da12ec4 | 1658 | iaig = VTD_TLB_DSI_FLUSH_A; |
b5a280c0 | 1659 | vtd_iotlb_domain_invalidate(s, domain_id); |
1da12ec4 LT |
1660 | break; |
1661 | ||
1662 | case VTD_TLB_PSI_FLUSH: | |
b5a280c0 LT |
1663 | domain_id = VTD_TLB_DID(val); |
1664 | addr = vtd_get_quad_raw(s, DMAR_IVA_REG); | |
1665 | am = VTD_IVA_AM(addr); | |
1666 | addr = VTD_IVA_ADDR(addr); | |
b5a280c0 | 1667 | if (am > VTD_MAMV) { |
1376211f PX |
1668 | error_report_once("%s: address mask overflow: 0x%" PRIx64, |
1669 | __func__, vtd_get_quad_raw(s, DMAR_IVA_REG)); | |
b5a280c0 LT |
1670 | iaig = 0; |
1671 | break; | |
1672 | } | |
1da12ec4 | 1673 | iaig = VTD_TLB_PSI_FLUSH_A; |
b5a280c0 | 1674 | vtd_iotlb_page_invalidate(s, domain_id, addr, am); |
1da12ec4 LT |
1675 | break; |
1676 | ||
1677 | default: | |
1376211f PX |
1678 | error_report_once("%s: invalid granularity: 0x%" PRIx64, |
1679 | __func__, val); | |
1da12ec4 LT |
1680 | iaig = 0; |
1681 | } | |
1682 | return iaig; | |
1683 | } | |
1684 | ||
8991c460 | 1685 | static void vtd_fetch_inv_desc(IntelIOMMUState *s); |
ed7b8fbc LT |
1686 | |
1687 | static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s) | |
1688 | { | |
1689 | return s->qi_enabled && (s->iq_tail == s->iq_head) && | |
1690 | (s->iq_last_desc_type == VTD_INV_DESC_WAIT); | |
1691 | } | |
1692 | ||
1693 | static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en) | |
1694 | { | |
1695 | uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG); | |
1696 | ||
7feb51b7 PX |
1697 | trace_vtd_inv_qi_enable(en); |
1698 | ||
ed7b8fbc | 1699 | if (en) { |
37f51384 | 1700 | s->iq = iqa_val & VTD_IQA_IQA_MASK(s->aw_bits); |
8991c460 LP |
1701 | /* 2^(x+8) entries */ |
1702 | s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8); | |
1703 | s->qi_enabled = true; | |
1704 | trace_vtd_inv_qi_setup(s->iq, s->iq_size); | |
1705 | /* Ok - report back to driver */ | |
1706 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES); | |
1707 | ||
1708 | if (s->iq_tail != 0) { | |
1709 | /* | |
1710 | * This is a spec violation but Windows guests are known to set up | |
1711 | * Queued Invalidation this way so we allow the write and process | |
1712 | * Invalidation Descriptors right away. | |
1713 | */ | |
1714 | trace_vtd_warn_invalid_qi_tail(s->iq_tail); | |
1715 | if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { | |
1716 | vtd_fetch_inv_desc(s); | |
1717 | } | |
ed7b8fbc LT |
1718 | } |
1719 | } else { | |
1720 | if (vtd_queued_inv_disable_check(s)) { | |
1721 | /* disable Queued Invalidation */ | |
1722 | vtd_set_quad_raw(s, DMAR_IQH_REG, 0); | |
1723 | s->iq_head = 0; | |
1724 | s->qi_enabled = false; | |
1725 | /* Ok - report back to driver */ | |
1726 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0); | |
1727 | } else { | |
4e4abd11 PX |
1728 | error_report_once("%s: detected improper state when disable QI " |
1729 | "(head=0x%x, tail=0x%x, last_type=%d)", | |
1730 | __func__, | |
1731 | s->iq_head, s->iq_tail, s->iq_last_desc_type); | |
ed7b8fbc LT |
1732 | } |
1733 | } | |
1734 | } | |
1735 | ||
1da12ec4 LT |
1736 | /* Set Root Table Pointer */ |
1737 | static void vtd_handle_gcmd_srtp(IntelIOMMUState *s) | |
1738 | { | |
1da12ec4 LT |
1739 | vtd_root_table_setup(s); |
1740 | /* Ok - report back to driver */ | |
1741 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS); | |
2cc9ddcc PX |
1742 | vtd_reset_caches(s); |
1743 | vtd_address_space_refresh_all(s); | |
1da12ec4 LT |
1744 | } |
1745 | ||
a5861439 PX |
1746 | /* Set Interrupt Remap Table Pointer */ |
1747 | static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s) | |
1748 | { | |
a5861439 PX |
1749 | vtd_interrupt_remap_table_setup(s); |
1750 | /* Ok - report back to driver */ | |
1751 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS); | |
1752 | } | |
1753 | ||
1da12ec4 LT |
1754 | /* Handle Translation Enable/Disable */ |
1755 | static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en) | |
1756 | { | |
558e0024 PX |
1757 | if (s->dmar_enabled == en) { |
1758 | return; | |
1759 | } | |
1760 | ||
7feb51b7 | 1761 | trace_vtd_dmar_enable(en); |
1da12ec4 LT |
1762 | |
1763 | if (en) { | |
1764 | s->dmar_enabled = true; | |
1765 | /* Ok - report back to driver */ | |
1766 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES); | |
1767 | } else { | |
1768 | s->dmar_enabled = false; | |
1769 | ||
1770 | /* Clear the index of Fault Recording Register */ | |
1771 | s->next_frcd_reg = 0; | |
1772 | /* Ok - report back to driver */ | |
1773 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0); | |
1774 | } | |
558e0024 | 1775 | |
2cc9ddcc PX |
1776 | vtd_reset_caches(s); |
1777 | vtd_address_space_refresh_all(s); | |
1da12ec4 LT |
1778 | } |
1779 | ||
80de52ba PX |
1780 | /* Handle Interrupt Remap Enable/Disable */ |
1781 | static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en) | |
1782 | { | |
7feb51b7 | 1783 | trace_vtd_ir_enable(en); |
80de52ba PX |
1784 | |
1785 | if (en) { | |
1786 | s->intr_enabled = true; | |
1787 | /* Ok - report back to driver */ | |
1788 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES); | |
1789 | } else { | |
1790 | s->intr_enabled = false; | |
1791 | /* Ok - report back to driver */ | |
1792 | vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0); | |
1793 | } | |
1794 | } | |
1795 | ||
1da12ec4 LT |
1796 | /* Handle write to Global Command Register */ |
1797 | static void vtd_handle_gcmd_write(IntelIOMMUState *s) | |
1798 | { | |
1799 | uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG); | |
1800 | uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG); | |
1801 | uint32_t changed = status ^ val; | |
1802 | ||
7feb51b7 | 1803 | trace_vtd_reg_write_gcmd(status, val); |
1da12ec4 LT |
1804 | if (changed & VTD_GCMD_TE) { |
1805 | /* Translation enable/disable */ | |
1806 | vtd_handle_gcmd_te(s, val & VTD_GCMD_TE); | |
1807 | } | |
1808 | if (val & VTD_GCMD_SRTP) { | |
1809 | /* Set/update the root-table pointer */ | |
1810 | vtd_handle_gcmd_srtp(s); | |
1811 | } | |
ed7b8fbc LT |
1812 | if (changed & VTD_GCMD_QIE) { |
1813 | /* Queued Invalidation Enable */ | |
1814 | vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE); | |
1815 | } | |
a5861439 PX |
1816 | if (val & VTD_GCMD_SIRTP) { |
1817 | /* Set/update the interrupt remapping root-table pointer */ | |
1818 | vtd_handle_gcmd_sirtp(s); | |
1819 | } | |
80de52ba PX |
1820 | if (changed & VTD_GCMD_IRE) { |
1821 | /* Interrupt remap enable/disable */ | |
1822 | vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE); | |
1823 | } | |
1da12ec4 LT |
1824 | } |
1825 | ||
1826 | /* Handle write to Context Command Register */ | |
1827 | static void vtd_handle_ccmd_write(IntelIOMMUState *s) | |
1828 | { | |
1829 | uint64_t ret; | |
1830 | uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG); | |
1831 | ||
1832 | /* Context-cache invalidation request */ | |
1833 | if (val & VTD_CCMD_ICC) { | |
ed7b8fbc | 1834 | if (s->qi_enabled) { |
1376211f PX |
1835 | error_report_once("Queued Invalidation enabled, " |
1836 | "should not use register-based invalidation"); | |
ed7b8fbc LT |
1837 | return; |
1838 | } | |
1da12ec4 LT |
1839 | ret = vtd_context_cache_invalidate(s, val); |
1840 | /* Invalidation completed. Change something to show */ | |
1841 | vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL); | |
1842 | ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK, | |
1843 | ret); | |
1da12ec4 LT |
1844 | } |
1845 | } | |
1846 | ||
1847 | /* Handle write to IOTLB Invalidation Register */ | |
1848 | static void vtd_handle_iotlb_write(IntelIOMMUState *s) | |
1849 | { | |
1850 | uint64_t ret; | |
1851 | uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG); | |
1852 | ||
1853 | /* IOTLB invalidation request */ | |
1854 | if (val & VTD_TLB_IVT) { | |
ed7b8fbc | 1855 | if (s->qi_enabled) { |
1376211f PX |
1856 | error_report_once("Queued Invalidation enabled, " |
1857 | "should not use register-based invalidation"); | |
ed7b8fbc LT |
1858 | return; |
1859 | } | |
1da12ec4 LT |
1860 | ret = vtd_iotlb_flush(s, val); |
1861 | /* Invalidation completed. Change something to show */ | |
1862 | vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL); | |
1863 | ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, | |
1864 | VTD_TLB_FLUSH_GRANU_MASK_A, ret); | |
1da12ec4 LT |
1865 | } |
1866 | } | |
1867 | ||
ed7b8fbc LT |
1868 | /* Fetch an Invalidation Descriptor from the Invalidation Queue */ |
1869 | static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset, | |
1870 | VTDInvDesc *inv_desc) | |
1871 | { | |
1872 | dma_addr_t addr = base_addr + offset * sizeof(*inv_desc); | |
1873 | if (dma_memory_read(&address_space_memory, addr, inv_desc, | |
1874 | sizeof(*inv_desc))) { | |
1376211f | 1875 | error_report_once("Read INV DESC failed"); |
ed7b8fbc LT |
1876 | inv_desc->lo = 0; |
1877 | inv_desc->hi = 0; | |
ed7b8fbc LT |
1878 | return false; |
1879 | } | |
1880 | inv_desc->lo = le64_to_cpu(inv_desc->lo); | |
1881 | inv_desc->hi = le64_to_cpu(inv_desc->hi); | |
1882 | return true; | |
1883 | } | |
1884 | ||
1885 | static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) | |
1886 | { | |
1887 | if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) || | |
1888 | (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) { | |
095955b2 PX |
1889 | error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 |
1890 | " (reserved nonzero)", __func__, inv_desc->hi, | |
1891 | inv_desc->lo); | |
ed7b8fbc LT |
1892 | return false; |
1893 | } | |
1894 | if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) { | |
1895 | /* Status Write */ | |
1896 | uint32_t status_data = (uint32_t)(inv_desc->lo >> | |
1897 | VTD_INV_DESC_WAIT_DATA_SHIFT); | |
1898 | ||
1899 | assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF)); | |
1900 | ||
1901 | /* FIXME: need to be masked with HAW? */ | |
1902 | dma_addr_t status_addr = inv_desc->hi; | |
bc535e59 | 1903 | trace_vtd_inv_desc_wait_sw(status_addr, status_data); |
ed7b8fbc LT |
1904 | status_data = cpu_to_le32(status_data); |
1905 | if (dma_memory_write(&address_space_memory, status_addr, &status_data, | |
1906 | sizeof(status_data))) { | |
bc535e59 | 1907 | trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo); |
ed7b8fbc LT |
1908 | return false; |
1909 | } | |
1910 | } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) { | |
1911 | /* Interrupt flag */ | |
ed7b8fbc LT |
1912 | vtd_generate_completion_event(s); |
1913 | } else { | |
095955b2 PX |
1914 | error_report_once("%s: invalid wait desc: hi=%"PRIx64", lo=%"PRIx64 |
1915 | " (unknown type)", __func__, inv_desc->hi, | |
1916 | inv_desc->lo); | |
ed7b8fbc LT |
1917 | return false; |
1918 | } | |
1919 | return true; | |
1920 | } | |
1921 | ||
d92fa2dc LT |
1922 | static bool vtd_process_context_cache_desc(IntelIOMMUState *s, |
1923 | VTDInvDesc *inv_desc) | |
1924 | { | |
bc535e59 PX |
1925 | uint16_t sid, fmask; |
1926 | ||
d92fa2dc | 1927 | if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) { |
095955b2 PX |
1928 | error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 |
1929 | " (reserved nonzero)", __func__, inv_desc->hi, | |
1930 | inv_desc->lo); | |
d92fa2dc LT |
1931 | return false; |
1932 | } | |
1933 | switch (inv_desc->lo & VTD_INV_DESC_CC_G) { | |
1934 | case VTD_INV_DESC_CC_DOMAIN: | |
bc535e59 PX |
1935 | trace_vtd_inv_desc_cc_domain( |
1936 | (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo)); | |
d92fa2dc LT |
1937 | /* Fall through */ |
1938 | case VTD_INV_DESC_CC_GLOBAL: | |
d92fa2dc LT |
1939 | vtd_context_global_invalidate(s); |
1940 | break; | |
1941 | ||
1942 | case VTD_INV_DESC_CC_DEVICE: | |
bc535e59 PX |
1943 | sid = VTD_INV_DESC_CC_SID(inv_desc->lo); |
1944 | fmask = VTD_INV_DESC_CC_FM(inv_desc->lo); | |
1945 | vtd_context_device_invalidate(s, sid, fmask); | |
d92fa2dc LT |
1946 | break; |
1947 | ||
1948 | default: | |
095955b2 PX |
1949 | error_report_once("%s: invalid cc inv desc: hi=%"PRIx64", lo=%"PRIx64 |
1950 | " (invalid type)", __func__, inv_desc->hi, | |
1951 | inv_desc->lo); | |
d92fa2dc LT |
1952 | return false; |
1953 | } | |
1954 | return true; | |
1955 | } | |
1956 | ||
b5a280c0 LT |
1957 | static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc) |
1958 | { | |
1959 | uint16_t domain_id; | |
1960 | uint8_t am; | |
1961 | hwaddr addr; | |
1962 | ||
1963 | if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) || | |
1964 | (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) { | |
095955b2 PX |
1965 | error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 |
1966 | ", lo=0x%"PRIx64" (reserved bits unzero)\n", | |
1967 | __func__, inv_desc->hi, inv_desc->lo); | |
b5a280c0 LT |
1968 | return false; |
1969 | } | |
1970 | ||
1971 | switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) { | |
1972 | case VTD_INV_DESC_IOTLB_GLOBAL: | |
b5a280c0 LT |
1973 | vtd_iotlb_global_invalidate(s); |
1974 | break; | |
1975 | ||
1976 | case VTD_INV_DESC_IOTLB_DOMAIN: | |
1977 | domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); | |
b5a280c0 LT |
1978 | vtd_iotlb_domain_invalidate(s, domain_id); |
1979 | break; | |
1980 | ||
1981 | case VTD_INV_DESC_IOTLB_PAGE: | |
1982 | domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo); | |
1983 | addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi); | |
1984 | am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi); | |
b5a280c0 | 1985 | if (am > VTD_MAMV) { |
095955b2 PX |
1986 | error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 |
1987 | ", lo=0x%"PRIx64" (am=%u > VTD_MAMV=%u)\n", | |
1988 | __func__, inv_desc->hi, inv_desc->lo, | |
1989 | am, (unsigned)VTD_MAMV); | |
b5a280c0 LT |
1990 | return false; |
1991 | } | |
1992 | vtd_iotlb_page_invalidate(s, domain_id, addr, am); | |
1993 | break; | |
1994 | ||
1995 | default: | |
095955b2 PX |
1996 | error_report_once("%s: invalid iotlb inv desc: hi=0x%"PRIx64 |
1997 | ", lo=0x%"PRIx64" (type mismatch: 0x%llx)\n", | |
1998 | __func__, inv_desc->hi, inv_desc->lo, | |
1999 | inv_desc->lo & VTD_INV_DESC_IOTLB_G); | |
b5a280c0 LT |
2000 | return false; |
2001 | } | |
2002 | return true; | |
2003 | } | |
2004 | ||
02a2cbc8 PX |
2005 | static bool vtd_process_inv_iec_desc(IntelIOMMUState *s, |
2006 | VTDInvDesc *inv_desc) | |
2007 | { | |
7feb51b7 PX |
2008 | trace_vtd_inv_desc_iec(inv_desc->iec.granularity, |
2009 | inv_desc->iec.index, | |
2010 | inv_desc->iec.index_mask); | |
02a2cbc8 PX |
2011 | |
2012 | vtd_iec_notify_all(s, !inv_desc->iec.granularity, | |
2013 | inv_desc->iec.index, | |
2014 | inv_desc->iec.index_mask); | |
554f5e16 JW |
2015 | return true; |
2016 | } | |
2017 | ||
2018 | static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s, | |
2019 | VTDInvDesc *inv_desc) | |
2020 | { | |
2021 | VTDAddressSpace *vtd_dev_as; | |
2022 | IOMMUTLBEntry entry; | |
2023 | struct VTDBus *vtd_bus; | |
2024 | hwaddr addr; | |
2025 | uint64_t sz; | |
2026 | uint16_t sid; | |
2027 | uint8_t devfn; | |
2028 | bool size; | |
2029 | uint8_t bus_num; | |
2030 | ||
2031 | addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi); | |
2032 | sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo); | |
2033 | devfn = sid & 0xff; | |
2034 | bus_num = sid >> 8; | |
2035 | size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi); | |
2036 | ||
2037 | if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) || | |
2038 | (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) { | |
095955b2 PX |
2039 | error_report_once("%s: invalid dev-iotlb inv desc: hi=%"PRIx64 |
2040 | ", lo=%"PRIx64" (reserved nonzero)", __func__, | |
2041 | inv_desc->hi, inv_desc->lo); | |
554f5e16 JW |
2042 | return false; |
2043 | } | |
2044 | ||
2045 | vtd_bus = vtd_find_as_from_bus_num(s, bus_num); | |
2046 | if (!vtd_bus) { | |
2047 | goto done; | |
2048 | } | |
2049 | ||
2050 | vtd_dev_as = vtd_bus->dev_as[devfn]; | |
2051 | if (!vtd_dev_as) { | |
2052 | goto done; | |
2053 | } | |
2054 | ||
04eb6247 JW |
2055 | /* According to ATS spec table 2.4: |
2056 | * S = 0, bits 15:12 = xxxx range size: 4K | |
2057 | * S = 1, bits 15:12 = xxx0 range size: 8K | |
2058 | * S = 1, bits 15:12 = xx01 range size: 16K | |
2059 | * S = 1, bits 15:12 = x011 range size: 32K | |
2060 | * S = 1, bits 15:12 = 0111 range size: 64K | |
2061 | * ... | |
2062 | */ | |
554f5e16 | 2063 | if (size) { |
04eb6247 | 2064 | sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT); |
554f5e16 JW |
2065 | addr &= ~(sz - 1); |
2066 | } else { | |
2067 | sz = VTD_PAGE_SIZE; | |
2068 | } | |
02a2cbc8 | 2069 | |
554f5e16 JW |
2070 | entry.target_as = &vtd_dev_as->as; |
2071 | entry.addr_mask = sz - 1; | |
2072 | entry.iova = addr; | |
2073 | entry.perm = IOMMU_NONE; | |
2074 | entry.translated_addr = 0; | |
cb1efcf4 | 2075 | memory_region_notify_iommu(&vtd_dev_as->iommu, 0, entry); |
554f5e16 JW |
2076 | |
2077 | done: | |
02a2cbc8 PX |
2078 | return true; |
2079 | } | |
2080 | ||
ed7b8fbc LT |
2081 | static bool vtd_process_inv_desc(IntelIOMMUState *s) |
2082 | { | |
2083 | VTDInvDesc inv_desc; | |
2084 | uint8_t desc_type; | |
2085 | ||
7feb51b7 | 2086 | trace_vtd_inv_qi_head(s->iq_head); |
ed7b8fbc LT |
2087 | if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) { |
2088 | s->iq_last_desc_type = VTD_INV_DESC_NONE; | |
2089 | return false; | |
2090 | } | |
2091 | desc_type = inv_desc.lo & VTD_INV_DESC_TYPE; | |
2092 | /* FIXME: should update at first or at last? */ | |
2093 | s->iq_last_desc_type = desc_type; | |
2094 | ||
2095 | switch (desc_type) { | |
2096 | case VTD_INV_DESC_CC: | |
bc535e59 | 2097 | trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo); |
d92fa2dc LT |
2098 | if (!vtd_process_context_cache_desc(s, &inv_desc)) { |
2099 | return false; | |
2100 | } | |
ed7b8fbc LT |
2101 | break; |
2102 | ||
2103 | case VTD_INV_DESC_IOTLB: | |
bc535e59 | 2104 | trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo); |
b5a280c0 LT |
2105 | if (!vtd_process_iotlb_desc(s, &inv_desc)) { |
2106 | return false; | |
2107 | } | |
ed7b8fbc LT |
2108 | break; |
2109 | ||
2110 | case VTD_INV_DESC_WAIT: | |
bc535e59 | 2111 | trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo); |
ed7b8fbc LT |
2112 | if (!vtd_process_wait_desc(s, &inv_desc)) { |
2113 | return false; | |
2114 | } | |
2115 | break; | |
2116 | ||
b7910472 | 2117 | case VTD_INV_DESC_IEC: |
bc535e59 | 2118 | trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo); |
02a2cbc8 PX |
2119 | if (!vtd_process_inv_iec_desc(s, &inv_desc)) { |
2120 | return false; | |
2121 | } | |
b7910472 PX |
2122 | break; |
2123 | ||
554f5e16 | 2124 | case VTD_INV_DESC_DEVICE: |
7feb51b7 | 2125 | trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo); |
554f5e16 JW |
2126 | if (!vtd_process_device_iotlb_desc(s, &inv_desc)) { |
2127 | return false; | |
2128 | } | |
2129 | break; | |
2130 | ||
ed7b8fbc | 2131 | default: |
095955b2 PX |
2132 | error_report_once("%s: invalid inv desc: hi=%"PRIx64", lo=%"PRIx64 |
2133 | " (unknown type)", __func__, inv_desc.hi, | |
2134 | inv_desc.lo); | |
ed7b8fbc LT |
2135 | return false; |
2136 | } | |
2137 | s->iq_head++; | |
2138 | if (s->iq_head == s->iq_size) { | |
2139 | s->iq_head = 0; | |
2140 | } | |
2141 | return true; | |
2142 | } | |
2143 | ||
2144 | /* Try to fetch and process more Invalidation Descriptors */ | |
2145 | static void vtd_fetch_inv_desc(IntelIOMMUState *s) | |
2146 | { | |
7feb51b7 PX |
2147 | trace_vtd_inv_qi_fetch(); |
2148 | ||
ed7b8fbc LT |
2149 | if (s->iq_tail >= s->iq_size) { |
2150 | /* Detects an invalid Tail pointer */ | |
4e4abd11 PX |
2151 | error_report_once("%s: detected invalid QI tail " |
2152 | "(tail=0x%x, size=0x%x)", | |
2153 | __func__, s->iq_tail, s->iq_size); | |
ed7b8fbc LT |
2154 | vtd_handle_inv_queue_error(s); |
2155 | return; | |
2156 | } | |
2157 | while (s->iq_head != s->iq_tail) { | |
2158 | if (!vtd_process_inv_desc(s)) { | |
2159 | /* Invalidation Queue Errors */ | |
2160 | vtd_handle_inv_queue_error(s); | |
2161 | break; | |
2162 | } | |
2163 | /* Must update the IQH_REG in time */ | |
2164 | vtd_set_quad_raw(s, DMAR_IQH_REG, | |
2165 | (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) & | |
2166 | VTD_IQH_QH_MASK); | |
2167 | } | |
2168 | } | |
2169 | ||
2170 | /* Handle write to Invalidation Queue Tail Register */ | |
2171 | static void vtd_handle_iqt_write(IntelIOMMUState *s) | |
2172 | { | |
2173 | uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG); | |
2174 | ||
2175 | s->iq_tail = VTD_IQT_QT(val); | |
7feb51b7 PX |
2176 | trace_vtd_inv_qi_tail(s->iq_tail); |
2177 | ||
ed7b8fbc LT |
2178 | if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) { |
2179 | /* Process Invalidation Queue here */ | |
2180 | vtd_fetch_inv_desc(s); | |
2181 | } | |
2182 | } | |
2183 | ||
1da12ec4 LT |
2184 | static void vtd_handle_fsts_write(IntelIOMMUState *s) |
2185 | { | |
2186 | uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG); | |
2187 | uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); | |
2188 | uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE; | |
2189 | ||
2190 | if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) { | |
2191 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); | |
7feb51b7 | 2192 | trace_vtd_fsts_clear_ip(); |
1da12ec4 | 2193 | } |
ed7b8fbc LT |
2194 | /* FIXME: when IQE is Clear, should we try to fetch some Invalidation |
2195 | * Descriptors if there are any when Queued Invalidation is enabled? | |
2196 | */ | |
1da12ec4 LT |
2197 | } |
2198 | ||
2199 | static void vtd_handle_fectl_write(IntelIOMMUState *s) | |
2200 | { | |
2201 | uint32_t fectl_reg; | |
2202 | /* FIXME: when software clears the IM field, check the IP field. But do we | |
2203 | * need to compare the old value and the new value to conclude that | |
2204 | * software clears the IM field? Or just check if the IM field is zero? | |
2205 | */ | |
2206 | fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG); | |
7feb51b7 PX |
2207 | |
2208 | trace_vtd_reg_write_fectl(fectl_reg); | |
2209 | ||
1da12ec4 LT |
2210 | if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) { |
2211 | vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG); | |
2212 | vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0); | |
1da12ec4 LT |
2213 | } |
2214 | } | |
2215 | ||
ed7b8fbc LT |
2216 | static void vtd_handle_ics_write(IntelIOMMUState *s) |
2217 | { | |
2218 | uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG); | |
2219 | uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); | |
2220 | ||
2221 | if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) { | |
7feb51b7 | 2222 | trace_vtd_reg_ics_clear_ip(); |
ed7b8fbc | 2223 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); |
ed7b8fbc LT |
2224 | } |
2225 | } | |
2226 | ||
2227 | static void vtd_handle_iectl_write(IntelIOMMUState *s) | |
2228 | { | |
2229 | uint32_t iectl_reg; | |
2230 | /* FIXME: when software clears the IM field, check the IP field. But do we | |
2231 | * need to compare the old value and the new value to conclude that | |
2232 | * software clears the IM field? Or just check if the IM field is zero? | |
2233 | */ | |
2234 | iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG); | |
7feb51b7 PX |
2235 | |
2236 | trace_vtd_reg_write_iectl(iectl_reg); | |
2237 | ||
ed7b8fbc LT |
2238 | if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) { |
2239 | vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG); | |
2240 | vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0); | |
ed7b8fbc LT |
2241 | } |
2242 | } | |
2243 | ||
1da12ec4 LT |
2244 | static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size) |
2245 | { | |
2246 | IntelIOMMUState *s = opaque; | |
2247 | uint64_t val; | |
2248 | ||
7feb51b7 PX |
2249 | trace_vtd_reg_read(addr, size); |
2250 | ||
1da12ec4 | 2251 | if (addr + size > DMAR_REG_SIZE) { |
1376211f PX |
2252 | error_report_once("%s: MMIO over range: addr=0x%" PRIx64 |
2253 | " size=0x%u", __func__, addr, size); | |
1da12ec4 LT |
2254 | return (uint64_t)-1; |
2255 | } | |
2256 | ||
2257 | switch (addr) { | |
2258 | /* Root Table Address Register, 64-bit */ | |
2259 | case DMAR_RTADDR_REG: | |
2260 | if (size == 4) { | |
2261 | val = s->root & ((1ULL << 32) - 1); | |
2262 | } else { | |
2263 | val = s->root; | |
2264 | } | |
2265 | break; | |
2266 | ||
2267 | case DMAR_RTADDR_REG_HI: | |
2268 | assert(size == 4); | |
2269 | val = s->root >> 32; | |
2270 | break; | |
2271 | ||
ed7b8fbc LT |
2272 | /* Invalidation Queue Address Register, 64-bit */ |
2273 | case DMAR_IQA_REG: | |
2274 | val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS); | |
2275 | if (size == 4) { | |
2276 | val = val & ((1ULL << 32) - 1); | |
2277 | } | |
2278 | break; | |
2279 | ||
2280 | case DMAR_IQA_REG_HI: | |
2281 | assert(size == 4); | |
2282 | val = s->iq >> 32; | |
2283 | break; | |
2284 | ||
1da12ec4 LT |
2285 | default: |
2286 | if (size == 4) { | |
2287 | val = vtd_get_long(s, addr); | |
2288 | } else { | |
2289 | val = vtd_get_quad(s, addr); | |
2290 | } | |
2291 | } | |
7feb51b7 | 2292 | |
1da12ec4 LT |
2293 | return val; |
2294 | } | |
2295 | ||
2296 | static void vtd_mem_write(void *opaque, hwaddr addr, | |
2297 | uint64_t val, unsigned size) | |
2298 | { | |
2299 | IntelIOMMUState *s = opaque; | |
2300 | ||
7feb51b7 PX |
2301 | trace_vtd_reg_write(addr, size, val); |
2302 | ||
1da12ec4 | 2303 | if (addr + size > DMAR_REG_SIZE) { |
1376211f PX |
2304 | error_report_once("%s: MMIO over range: addr=0x%" PRIx64 |
2305 | " size=0x%u", __func__, addr, size); | |
1da12ec4 LT |
2306 | return; |
2307 | } | |
2308 | ||
2309 | switch (addr) { | |
2310 | /* Global Command Register, 32-bit */ | |
2311 | case DMAR_GCMD_REG: | |
1da12ec4 LT |
2312 | vtd_set_long(s, addr, val); |
2313 | vtd_handle_gcmd_write(s); | |
2314 | break; | |
2315 | ||
2316 | /* Context Command Register, 64-bit */ | |
2317 | case DMAR_CCMD_REG: | |
1da12ec4 LT |
2318 | if (size == 4) { |
2319 | vtd_set_long(s, addr, val); | |
2320 | } else { | |
2321 | vtd_set_quad(s, addr, val); | |
2322 | vtd_handle_ccmd_write(s); | |
2323 | } | |
2324 | break; | |
2325 | ||
2326 | case DMAR_CCMD_REG_HI: | |
1da12ec4 LT |
2327 | assert(size == 4); |
2328 | vtd_set_long(s, addr, val); | |
2329 | vtd_handle_ccmd_write(s); | |
2330 | break; | |
2331 | ||
2332 | /* IOTLB Invalidation Register, 64-bit */ | |
2333 | case DMAR_IOTLB_REG: | |
1da12ec4 LT |
2334 | if (size == 4) { |
2335 | vtd_set_long(s, addr, val); | |
2336 | } else { | |
2337 | vtd_set_quad(s, addr, val); | |
2338 | vtd_handle_iotlb_write(s); | |
2339 | } | |
2340 | break; | |
2341 | ||
2342 | case DMAR_IOTLB_REG_HI: | |
1da12ec4 LT |
2343 | assert(size == 4); |
2344 | vtd_set_long(s, addr, val); | |
2345 | vtd_handle_iotlb_write(s); | |
2346 | break; | |
2347 | ||
b5a280c0 LT |
2348 | /* Invalidate Address Register, 64-bit */ |
2349 | case DMAR_IVA_REG: | |
b5a280c0 LT |
2350 | if (size == 4) { |
2351 | vtd_set_long(s, addr, val); | |
2352 | } else { | |
2353 | vtd_set_quad(s, addr, val); | |
2354 | } | |
2355 | break; | |
2356 | ||
2357 | case DMAR_IVA_REG_HI: | |
b5a280c0 LT |
2358 | assert(size == 4); |
2359 | vtd_set_long(s, addr, val); | |
2360 | break; | |
2361 | ||
1da12ec4 LT |
2362 | /* Fault Status Register, 32-bit */ |
2363 | case DMAR_FSTS_REG: | |
1da12ec4 LT |
2364 | assert(size == 4); |
2365 | vtd_set_long(s, addr, val); | |
2366 | vtd_handle_fsts_write(s); | |
2367 | break; | |
2368 | ||
2369 | /* Fault Event Control Register, 32-bit */ | |
2370 | case DMAR_FECTL_REG: | |
1da12ec4 LT |
2371 | assert(size == 4); |
2372 | vtd_set_long(s, addr, val); | |
2373 | vtd_handle_fectl_write(s); | |
2374 | break; | |
2375 | ||
2376 | /* Fault Event Data Register, 32-bit */ | |
2377 | case DMAR_FEDATA_REG: | |
1da12ec4 LT |
2378 | assert(size == 4); |
2379 | vtd_set_long(s, addr, val); | |
2380 | break; | |
2381 | ||
2382 | /* Fault Event Address Register, 32-bit */ | |
2383 | case DMAR_FEADDR_REG: | |
b7a7bb35 JK |
2384 | if (size == 4) { |
2385 | vtd_set_long(s, addr, val); | |
2386 | } else { | |
2387 | /* | |
2388 | * While the register is 32-bit only, some guests (Xen...) write to | |
2389 | * it with 64-bit. | |
2390 | */ | |
2391 | vtd_set_quad(s, addr, val); | |
2392 | } | |
1da12ec4 LT |
2393 | break; |
2394 | ||
2395 | /* Fault Event Upper Address Register, 32-bit */ | |
2396 | case DMAR_FEUADDR_REG: | |
1da12ec4 LT |
2397 | assert(size == 4); |
2398 | vtd_set_long(s, addr, val); | |
2399 | break; | |
2400 | ||
2401 | /* Protected Memory Enable Register, 32-bit */ | |
2402 | case DMAR_PMEN_REG: | |
1da12ec4 LT |
2403 | assert(size == 4); |
2404 | vtd_set_long(s, addr, val); | |
2405 | break; | |
2406 | ||
2407 | /* Root Table Address Register, 64-bit */ | |
2408 | case DMAR_RTADDR_REG: | |
1da12ec4 LT |
2409 | if (size == 4) { |
2410 | vtd_set_long(s, addr, val); | |
2411 | } else { | |
2412 | vtd_set_quad(s, addr, val); | |
2413 | } | |
2414 | break; | |
2415 | ||
2416 | case DMAR_RTADDR_REG_HI: | |
1da12ec4 LT |
2417 | assert(size == 4); |
2418 | vtd_set_long(s, addr, val); | |
2419 | break; | |
2420 | ||
ed7b8fbc LT |
2421 | /* Invalidation Queue Tail Register, 64-bit */ |
2422 | case DMAR_IQT_REG: | |
ed7b8fbc LT |
2423 | if (size == 4) { |
2424 | vtd_set_long(s, addr, val); | |
2425 | } else { | |
2426 | vtd_set_quad(s, addr, val); | |
2427 | } | |
2428 | vtd_handle_iqt_write(s); | |
2429 | break; | |
2430 | ||
2431 | case DMAR_IQT_REG_HI: | |
ed7b8fbc LT |
2432 | assert(size == 4); |
2433 | vtd_set_long(s, addr, val); | |
2434 | /* 19:63 of IQT_REG is RsvdZ, do nothing here */ | |
2435 | break; | |
2436 | ||
2437 | /* Invalidation Queue Address Register, 64-bit */ | |
2438 | case DMAR_IQA_REG: | |
ed7b8fbc LT |
2439 | if (size == 4) { |
2440 | vtd_set_long(s, addr, val); | |
2441 | } else { | |
2442 | vtd_set_quad(s, addr, val); | |
2443 | } | |
2444 | break; | |
2445 | ||
2446 | case DMAR_IQA_REG_HI: | |
ed7b8fbc LT |
2447 | assert(size == 4); |
2448 | vtd_set_long(s, addr, val); | |
2449 | break; | |
2450 | ||
2451 | /* Invalidation Completion Status Register, 32-bit */ | |
2452 | case DMAR_ICS_REG: | |
ed7b8fbc LT |
2453 | assert(size == 4); |
2454 | vtd_set_long(s, addr, val); | |
2455 | vtd_handle_ics_write(s); | |
2456 | break; | |
2457 | ||
2458 | /* Invalidation Event Control Register, 32-bit */ | |
2459 | case DMAR_IECTL_REG: | |
ed7b8fbc LT |
2460 | assert(size == 4); |
2461 | vtd_set_long(s, addr, val); | |
2462 | vtd_handle_iectl_write(s); | |
2463 | break; | |
2464 | ||
2465 | /* Invalidation Event Data Register, 32-bit */ | |
2466 | case DMAR_IEDATA_REG: | |
ed7b8fbc LT |
2467 | assert(size == 4); |
2468 | vtd_set_long(s, addr, val); | |
2469 | break; | |
2470 | ||
2471 | /* Invalidation Event Address Register, 32-bit */ | |
2472 | case DMAR_IEADDR_REG: | |
ed7b8fbc LT |
2473 | assert(size == 4); |
2474 | vtd_set_long(s, addr, val); | |
2475 | break; | |
2476 | ||
2477 | /* Invalidation Event Upper Address Register, 32-bit */ | |
2478 | case DMAR_IEUADDR_REG: | |
ed7b8fbc LT |
2479 | assert(size == 4); |
2480 | vtd_set_long(s, addr, val); | |
2481 | break; | |
2482 | ||
1da12ec4 LT |
2483 | /* Fault Recording Registers, 128-bit */ |
2484 | case DMAR_FRCD_REG_0_0: | |
1da12ec4 LT |
2485 | if (size == 4) { |
2486 | vtd_set_long(s, addr, val); | |
2487 | } else { | |
2488 | vtd_set_quad(s, addr, val); | |
2489 | } | |
2490 | break; | |
2491 | ||
2492 | case DMAR_FRCD_REG_0_1: | |
1da12ec4 LT |
2493 | assert(size == 4); |
2494 | vtd_set_long(s, addr, val); | |
2495 | break; | |
2496 | ||
2497 | case DMAR_FRCD_REG_0_2: | |
1da12ec4 LT |
2498 | if (size == 4) { |
2499 | vtd_set_long(s, addr, val); | |
2500 | } else { | |
2501 | vtd_set_quad(s, addr, val); | |
2502 | /* May clear bit 127 (Fault), update PPF */ | |
2503 | vtd_update_fsts_ppf(s); | |
2504 | } | |
2505 | break; | |
2506 | ||
2507 | case DMAR_FRCD_REG_0_3: | |
1da12ec4 LT |
2508 | assert(size == 4); |
2509 | vtd_set_long(s, addr, val); | |
2510 | /* May clear bit 127 (Fault), update PPF */ | |
2511 | vtd_update_fsts_ppf(s); | |
2512 | break; | |
2513 | ||
a5861439 | 2514 | case DMAR_IRTA_REG: |
a5861439 PX |
2515 | if (size == 4) { |
2516 | vtd_set_long(s, addr, val); | |
2517 | } else { | |
2518 | vtd_set_quad(s, addr, val); | |
2519 | } | |
2520 | break; | |
2521 | ||
2522 | case DMAR_IRTA_REG_HI: | |
a5861439 PX |
2523 | assert(size == 4); |
2524 | vtd_set_long(s, addr, val); | |
2525 | break; | |
2526 | ||
1da12ec4 | 2527 | default: |
1da12ec4 LT |
2528 | if (size == 4) { |
2529 | vtd_set_long(s, addr, val); | |
2530 | } else { | |
2531 | vtd_set_quad(s, addr, val); | |
2532 | } | |
2533 | } | |
2534 | } | |
2535 | ||
3df9d748 | 2536 | static IOMMUTLBEntry vtd_iommu_translate(IOMMUMemoryRegion *iommu, hwaddr addr, |
2c91bcf2 | 2537 | IOMMUAccessFlags flag, int iommu_idx) |
1da12ec4 LT |
2538 | { |
2539 | VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); | |
2540 | IntelIOMMUState *s = vtd_as->iommu_state; | |
b9313021 PX |
2541 | IOMMUTLBEntry iotlb = { |
2542 | /* We'll fill in the rest later. */ | |
1da12ec4 | 2543 | .target_as = &address_space_memory, |
1da12ec4 | 2544 | }; |
b9313021 | 2545 | bool success; |
1da12ec4 | 2546 | |
b9313021 PX |
2547 | if (likely(s->dmar_enabled)) { |
2548 | success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, | |
2549 | addr, flag & IOMMU_WO, &iotlb); | |
2550 | } else { | |
1da12ec4 | 2551 | /* DMAR disabled, passthrough, use 4k-page*/ |
b9313021 PX |
2552 | iotlb.iova = addr & VTD_PAGE_MASK_4K; |
2553 | iotlb.translated_addr = addr & VTD_PAGE_MASK_4K; | |
2554 | iotlb.addr_mask = ~VTD_PAGE_MASK_4K; | |
2555 | iotlb.perm = IOMMU_RW; | |
2556 | success = true; | |
1da12ec4 LT |
2557 | } |
2558 | ||
b9313021 PX |
2559 | if (likely(success)) { |
2560 | trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus), | |
2561 | VTD_PCI_SLOT(vtd_as->devfn), | |
2562 | VTD_PCI_FUNC(vtd_as->devfn), | |
2563 | iotlb.iova, iotlb.translated_addr, | |
2564 | iotlb.addr_mask); | |
2565 | } else { | |
4e4abd11 PX |
2566 | error_report_once("%s: detected translation failure " |
2567 | "(dev=%02x:%02x:%02x, iova=0x%" PRIx64 ")", | |
2568 | __func__, pci_bus_num(vtd_as->bus), | |
2569 | VTD_PCI_SLOT(vtd_as->devfn), | |
2570 | VTD_PCI_FUNC(vtd_as->devfn), | |
662b4b69 | 2571 | addr); |
b9313021 | 2572 | } |
7feb51b7 | 2573 | |
b9313021 | 2574 | return iotlb; |
1da12ec4 LT |
2575 | } |
2576 | ||
3df9d748 | 2577 | static void vtd_iommu_notify_flag_changed(IOMMUMemoryRegion *iommu, |
5bf3d319 PX |
2578 | IOMMUNotifierFlag old, |
2579 | IOMMUNotifierFlag new) | |
3cb3b154 AW |
2580 | { |
2581 | VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu); | |
dd4d607e | 2582 | IntelIOMMUState *s = vtd_as->iommu_state; |
3cb3b154 | 2583 | |
dd4d607e | 2584 | if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) { |
4c427a4c | 2585 | error_report("We need to set caching-mode=1 for intel-iommu to enable " |
dd4d607e | 2586 | "device assignment with IOMMU protection."); |
a3276f78 PX |
2587 | exit(1); |
2588 | } | |
dd4d607e | 2589 | |
4f8a62a9 PX |
2590 | /* Update per-address-space notifier flags */ |
2591 | vtd_as->notifier_flags = new; | |
2592 | ||
dd4d607e | 2593 | if (old == IOMMU_NOTIFIER_NONE) { |
b4a4ba0d PX |
2594 | QLIST_INSERT_HEAD(&s->vtd_as_with_notifiers, vtd_as, next); |
2595 | } else if (new == IOMMU_NOTIFIER_NONE) { | |
2596 | QLIST_REMOVE(vtd_as, next); | |
dd4d607e | 2597 | } |
3cb3b154 AW |
2598 | } |
2599 | ||
552a1e01 PX |
2600 | static int vtd_post_load(void *opaque, int version_id) |
2601 | { | |
2602 | IntelIOMMUState *iommu = opaque; | |
2603 | ||
2604 | /* | |
2605 | * Memory regions are dynamically turned on/off depending on | |
2606 | * context entry configurations from the guest. After migration, | |
2607 | * we need to make sure the memory regions are still correct. | |
2608 | */ | |
2609 | vtd_switch_address_space_all(iommu); | |
2610 | ||
2611 | return 0; | |
2612 | } | |
2613 | ||
1da12ec4 LT |
2614 | static const VMStateDescription vtd_vmstate = { |
2615 | .name = "iommu-intel", | |
8cdcf3c1 PX |
2616 | .version_id = 1, |
2617 | .minimum_version_id = 1, | |
2618 | .priority = MIG_PRI_IOMMU, | |
552a1e01 | 2619 | .post_load = vtd_post_load, |
8cdcf3c1 PX |
2620 | .fields = (VMStateField[]) { |
2621 | VMSTATE_UINT64(root, IntelIOMMUState), | |
2622 | VMSTATE_UINT64(intr_root, IntelIOMMUState), | |
2623 | VMSTATE_UINT64(iq, IntelIOMMUState), | |
2624 | VMSTATE_UINT32(intr_size, IntelIOMMUState), | |
2625 | VMSTATE_UINT16(iq_head, IntelIOMMUState), | |
2626 | VMSTATE_UINT16(iq_tail, IntelIOMMUState), | |
2627 | VMSTATE_UINT16(iq_size, IntelIOMMUState), | |
2628 | VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState), | |
2629 | VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE), | |
2630 | VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState), | |
2631 | VMSTATE_BOOL(root_extended, IntelIOMMUState), | |
2632 | VMSTATE_BOOL(dmar_enabled, IntelIOMMUState), | |
2633 | VMSTATE_BOOL(qi_enabled, IntelIOMMUState), | |
2634 | VMSTATE_BOOL(intr_enabled, IntelIOMMUState), | |
2635 | VMSTATE_BOOL(intr_eime, IntelIOMMUState), | |
2636 | VMSTATE_END_OF_LIST() | |
2637 | } | |
1da12ec4 LT |
2638 | }; |
2639 | ||
2640 | static const MemoryRegionOps vtd_mem_ops = { | |
2641 | .read = vtd_mem_read, | |
2642 | .write = vtd_mem_write, | |
2643 | .endianness = DEVICE_LITTLE_ENDIAN, | |
2644 | .impl = { | |
2645 | .min_access_size = 4, | |
2646 | .max_access_size = 8, | |
2647 | }, | |
2648 | .valid = { | |
2649 | .min_access_size = 4, | |
2650 | .max_access_size = 8, | |
2651 | }, | |
2652 | }; | |
2653 | ||
2654 | static Property vtd_properties[] = { | |
2655 | DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0), | |
e6b6af05 RK |
2656 | DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim, |
2657 | ON_OFF_AUTO_AUTO), | |
fb506e70 | 2658 | DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false), |
37f51384 PS |
2659 | DEFINE_PROP_UINT8("x-aw-bits", IntelIOMMUState, aw_bits, |
2660 | VTD_HOST_ADDRESS_WIDTH), | |
3b40f0e5 | 2661 | DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE), |
ccc23bb0 | 2662 | DEFINE_PROP_BOOL("dma-drain", IntelIOMMUState, dma_drain, true), |
1da12ec4 LT |
2663 | DEFINE_PROP_END_OF_LIST(), |
2664 | }; | |
2665 | ||
651e4cef PX |
2666 | /* Read IRTE entry with specific index */ |
2667 | static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index, | |
bc38ee10 | 2668 | VTD_IR_TableEntry *entry, uint16_t sid) |
651e4cef | 2669 | { |
ede9c94a PX |
2670 | static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \ |
2671 | {0xffff, 0xfffb, 0xfff9, 0xfff8}; | |
651e4cef | 2672 | dma_addr_t addr = 0x00; |
ede9c94a PX |
2673 | uint16_t mask, source_id; |
2674 | uint8_t bus, bus_max, bus_min; | |
651e4cef PX |
2675 | |
2676 | addr = iommu->intr_root + index * sizeof(*entry); | |
2677 | if (dma_memory_read(&address_space_memory, addr, entry, | |
2678 | sizeof(*entry))) { | |
1376211f PX |
2679 | error_report_once("%s: read failed: ind=0x%x addr=0x%" PRIx64, |
2680 | __func__, index, addr); | |
651e4cef PX |
2681 | return -VTD_FR_IR_ROOT_INVAL; |
2682 | } | |
2683 | ||
7feb51b7 PX |
2684 | trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]), |
2685 | le64_to_cpu(entry->data[0])); | |
2686 | ||
bc38ee10 | 2687 | if (!entry->irte.present) { |
4e4abd11 PX |
2688 | error_report_once("%s: detected non-present IRTE " |
2689 | "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", | |
2690 | __func__, index, le64_to_cpu(entry->data[1]), | |
2691 | le64_to_cpu(entry->data[0])); | |
651e4cef PX |
2692 | return -VTD_FR_IR_ENTRY_P; |
2693 | } | |
2694 | ||
bc38ee10 MT |
2695 | if (entry->irte.__reserved_0 || entry->irte.__reserved_1 || |
2696 | entry->irte.__reserved_2) { | |
4e4abd11 PX |
2697 | error_report_once("%s: detected non-zero reserved IRTE " |
2698 | "(index=%u, high=0x%" PRIx64 ", low=0x%" PRIx64 ")", | |
2699 | __func__, index, le64_to_cpu(entry->data[1]), | |
2700 | le64_to_cpu(entry->data[0])); | |
651e4cef PX |
2701 | return -VTD_FR_IR_IRTE_RSVD; |
2702 | } | |
2703 | ||
ede9c94a PX |
2704 | if (sid != X86_IOMMU_SID_INVALID) { |
2705 | /* Validate IRTE SID */ | |
bc38ee10 MT |
2706 | source_id = le32_to_cpu(entry->irte.source_id); |
2707 | switch (entry->irte.sid_vtype) { | |
ede9c94a | 2708 | case VTD_SVT_NONE: |
ede9c94a PX |
2709 | break; |
2710 | ||
2711 | case VTD_SVT_ALL: | |
bc38ee10 | 2712 | mask = vtd_svt_mask[entry->irte.sid_q]; |
ede9c94a | 2713 | if ((source_id & mask) != (sid & mask)) { |
4e4abd11 PX |
2714 | error_report_once("%s: invalid IRTE SID " |
2715 | "(index=%u, sid=%u, source_id=%u)", | |
2716 | __func__, index, sid, source_id); | |
ede9c94a PX |
2717 | return -VTD_FR_IR_SID_ERR; |
2718 | } | |
2719 | break; | |
2720 | ||
2721 | case VTD_SVT_BUS: | |
2722 | bus_max = source_id >> 8; | |
2723 | bus_min = source_id & 0xff; | |
2724 | bus = sid >> 8; | |
2725 | if (bus > bus_max || bus < bus_min) { | |
4e4abd11 PX |
2726 | error_report_once("%s: invalid SVT_BUS " |
2727 | "(index=%u, bus=%u, min=%u, max=%u)", | |
2728 | __func__, index, bus, bus_min, bus_max); | |
ede9c94a PX |
2729 | return -VTD_FR_IR_SID_ERR; |
2730 | } | |
2731 | break; | |
2732 | ||
2733 | default: | |
4e4abd11 PX |
2734 | error_report_once("%s: detected invalid IRTE SVT " |
2735 | "(index=%u, type=%d)", __func__, | |
2736 | index, entry->irte.sid_vtype); | |
ede9c94a PX |
2737 | /* Take this as verification failure. */ |
2738 | return -VTD_FR_IR_SID_ERR; | |
2739 | break; | |
2740 | } | |
2741 | } | |
651e4cef PX |
2742 | |
2743 | return 0; | |
2744 | } | |
2745 | ||
2746 | /* Fetch IRQ information of specific IR index */ | |
ede9c94a | 2747 | static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index, |
35c24501 | 2748 | X86IOMMUIrq *irq, uint16_t sid) |
651e4cef | 2749 | { |
bc38ee10 | 2750 | VTD_IR_TableEntry irte = {}; |
651e4cef PX |
2751 | int ret = 0; |
2752 | ||
ede9c94a | 2753 | ret = vtd_irte_get(iommu, index, &irte, sid); |
651e4cef PX |
2754 | if (ret) { |
2755 | return ret; | |
2756 | } | |
2757 | ||
bc38ee10 MT |
2758 | irq->trigger_mode = irte.irte.trigger_mode; |
2759 | irq->vector = irte.irte.vector; | |
2760 | irq->delivery_mode = irte.irte.delivery_mode; | |
2761 | irq->dest = le32_to_cpu(irte.irte.dest_id); | |
28589311 | 2762 | if (!iommu->intr_eime) { |
651e4cef PX |
2763 | #define VTD_IR_APIC_DEST_MASK (0xff00ULL) |
2764 | #define VTD_IR_APIC_DEST_SHIFT (8) | |
28589311 JK |
2765 | irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >> |
2766 | VTD_IR_APIC_DEST_SHIFT; | |
2767 | } | |
bc38ee10 MT |
2768 | irq->dest_mode = irte.irte.dest_mode; |
2769 | irq->redir_hint = irte.irte.redir_hint; | |
651e4cef | 2770 | |
7feb51b7 PX |
2771 | trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector, |
2772 | irq->delivery_mode, irq->dest, irq->dest_mode); | |
651e4cef PX |
2773 | |
2774 | return 0; | |
2775 | } | |
2776 | ||
651e4cef PX |
2777 | /* Interrupt remapping for MSI/MSI-X entry */ |
2778 | static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu, | |
2779 | MSIMessage *origin, | |
ede9c94a PX |
2780 | MSIMessage *translated, |
2781 | uint16_t sid) | |
651e4cef PX |
2782 | { |
2783 | int ret = 0; | |
2784 | VTD_IR_MSIAddress addr; | |
2785 | uint16_t index; | |
35c24501 | 2786 | X86IOMMUIrq irq = {}; |
651e4cef PX |
2787 | |
2788 | assert(origin && translated); | |
2789 | ||
7feb51b7 PX |
2790 | trace_vtd_ir_remap_msi_req(origin->address, origin->data); |
2791 | ||
651e4cef | 2792 | if (!iommu || !iommu->intr_enabled) { |
e7a3b91f PX |
2793 | memcpy(translated, origin, sizeof(*origin)); |
2794 | goto out; | |
651e4cef PX |
2795 | } |
2796 | ||
2797 | if (origin->address & VTD_MSI_ADDR_HI_MASK) { | |
1376211f PX |
2798 | error_report_once("%s: MSI address high 32 bits non-zero detected: " |
2799 | "address=0x%" PRIx64, __func__, origin->address); | |
651e4cef PX |
2800 | return -VTD_FR_IR_REQ_RSVD; |
2801 | } | |
2802 | ||
2803 | addr.data = origin->address & VTD_MSI_ADDR_LO_MASK; | |
1a43713b | 2804 | if (addr.addr.__head != 0xfee) { |
1376211f PX |
2805 | error_report_once("%s: MSI address low 32 bit invalid: 0x%" PRIx32, |
2806 | __func__, addr.data); | |
651e4cef PX |
2807 | return -VTD_FR_IR_REQ_RSVD; |
2808 | } | |
2809 | ||
2810 | /* This is compatible mode. */ | |
bc38ee10 | 2811 | if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) { |
e7a3b91f PX |
2812 | memcpy(translated, origin, sizeof(*origin)); |
2813 | goto out; | |
651e4cef PX |
2814 | } |
2815 | ||
bc38ee10 | 2816 | index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l); |
651e4cef PX |
2817 | |
2818 | #define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff) | |
2819 | #define VTD_IR_MSI_DATA_RESERVED (0xffff0000) | |
2820 | ||
bc38ee10 | 2821 | if (addr.addr.sub_valid) { |
651e4cef PX |
2822 | /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */ |
2823 | index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE; | |
2824 | } | |
2825 | ||
ede9c94a | 2826 | ret = vtd_remap_irq_get(iommu, index, &irq, sid); |
651e4cef PX |
2827 | if (ret) { |
2828 | return ret; | |
2829 | } | |
2830 | ||
bc38ee10 | 2831 | if (addr.addr.sub_valid) { |
7feb51b7 | 2832 | trace_vtd_ir_remap_type("MSI"); |
651e4cef | 2833 | if (origin->data & VTD_IR_MSI_DATA_RESERVED) { |
4e4abd11 PX |
2834 | error_report_once("%s: invalid IR MSI " |
2835 | "(sid=%u, address=0x%" PRIx64 | |
2836 | ", data=0x%" PRIx32 ")", | |
2837 | __func__, sid, origin->address, origin->data); | |
651e4cef PX |
2838 | return -VTD_FR_IR_REQ_RSVD; |
2839 | } | |
2840 | } else { | |
2841 | uint8_t vector = origin->data & 0xff; | |
dea651a9 FW |
2842 | uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1; |
2843 | ||
7feb51b7 | 2844 | trace_vtd_ir_remap_type("IOAPIC"); |
651e4cef PX |
2845 | /* IOAPIC entry vector should be aligned with IRTE vector |
2846 | * (see vt-d spec 5.1.5.1). */ | |
2847 | if (vector != irq.vector) { | |
7feb51b7 | 2848 | trace_vtd_warn_ir_vector(sid, index, vector, irq.vector); |
651e4cef | 2849 | } |
dea651a9 FW |
2850 | |
2851 | /* The Trigger Mode field must match the Trigger Mode in the IRTE. | |
2852 | * (see vt-d spec 5.1.5.1). */ | |
2853 | if (trigger_mode != irq.trigger_mode) { | |
7feb51b7 PX |
2854 | trace_vtd_warn_ir_trigger(sid, index, trigger_mode, |
2855 | irq.trigger_mode); | |
dea651a9 | 2856 | } |
651e4cef PX |
2857 | } |
2858 | ||
2859 | /* | |
2860 | * We'd better keep the last two bits, assuming that guest OS | |
2861 | * might modify it. Keep it does not hurt after all. | |
2862 | */ | |
bc38ee10 | 2863 | irq.msi_addr_last_bits = addr.addr.__not_care; |
651e4cef | 2864 | |
35c24501 BS |
2865 | /* Translate X86IOMMUIrq to MSI message */ |
2866 | x86_iommu_irq_to_msi_message(&irq, translated); | |
651e4cef | 2867 | |
e7a3b91f | 2868 | out: |
7feb51b7 PX |
2869 | trace_vtd_ir_remap_msi(origin->address, origin->data, |
2870 | translated->address, translated->data); | |
651e4cef PX |
2871 | return 0; |
2872 | } | |
2873 | ||
8b5ed7df PX |
2874 | static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src, |
2875 | MSIMessage *dst, uint16_t sid) | |
2876 | { | |
ede9c94a PX |
2877 | return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu), |
2878 | src, dst, sid); | |
8b5ed7df PX |
2879 | } |
2880 | ||
651e4cef PX |
2881 | static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr, |
2882 | uint64_t *data, unsigned size, | |
2883 | MemTxAttrs attrs) | |
2884 | { | |
2885 | return MEMTX_OK; | |
2886 | } | |
2887 | ||
2888 | static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr, | |
2889 | uint64_t value, unsigned size, | |
2890 | MemTxAttrs attrs) | |
2891 | { | |
2892 | int ret = 0; | |
09cd058a | 2893 | MSIMessage from = {}, to = {}; |
ede9c94a | 2894 | uint16_t sid = X86_IOMMU_SID_INVALID; |
651e4cef PX |
2895 | |
2896 | from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST; | |
2897 | from.data = (uint32_t) value; | |
2898 | ||
ede9c94a PX |
2899 | if (!attrs.unspecified) { |
2900 | /* We have explicit Source ID */ | |
2901 | sid = attrs.requester_id; | |
2902 | } | |
2903 | ||
2904 | ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid); | |
651e4cef PX |
2905 | if (ret) { |
2906 | /* TODO: report error */ | |
651e4cef PX |
2907 | /* Drop this interrupt */ |
2908 | return MEMTX_ERROR; | |
2909 | } | |
2910 | ||
32946019 | 2911 | apic_get_class()->send_msi(&to); |
651e4cef PX |
2912 | |
2913 | return MEMTX_OK; | |
2914 | } | |
2915 | ||
2916 | static const MemoryRegionOps vtd_mem_ir_ops = { | |
2917 | .read_with_attrs = vtd_mem_ir_read, | |
2918 | .write_with_attrs = vtd_mem_ir_write, | |
2919 | .endianness = DEVICE_LITTLE_ENDIAN, | |
2920 | .impl = { | |
2921 | .min_access_size = 4, | |
2922 | .max_access_size = 4, | |
2923 | }, | |
2924 | .valid = { | |
2925 | .min_access_size = 4, | |
2926 | .max_access_size = 4, | |
2927 | }, | |
2928 | }; | |
7df953bd KO |
2929 | |
2930 | VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn) | |
2931 | { | |
2932 | uintptr_t key = (uintptr_t)bus; | |
2933 | VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key); | |
2934 | VTDAddressSpace *vtd_dev_as; | |
e0a3c8cc | 2935 | char name[128]; |
7df953bd KO |
2936 | |
2937 | if (!vtd_bus) { | |
2d3fc581 JW |
2938 | uintptr_t *new_key = g_malloc(sizeof(*new_key)); |
2939 | *new_key = (uintptr_t)bus; | |
7df953bd | 2940 | /* No corresponding free() */ |
04af0e18 | 2941 | vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \ |
bf33cc75 | 2942 | PCI_DEVFN_MAX); |
7df953bd | 2943 | vtd_bus->bus = bus; |
2d3fc581 | 2944 | g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus); |
7df953bd KO |
2945 | } |
2946 | ||
2947 | vtd_dev_as = vtd_bus->dev_as[devfn]; | |
2948 | ||
2949 | if (!vtd_dev_as) { | |
e0a3c8cc | 2950 | snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn); |
7df953bd KO |
2951 | vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace)); |
2952 | ||
2953 | vtd_dev_as->bus = bus; | |
2954 | vtd_dev_as->devfn = (uint8_t)devfn; | |
2955 | vtd_dev_as->iommu_state = s; | |
2956 | vtd_dev_as->context_cache_entry.context_cache_gen = 0; | |
63b88968 | 2957 | vtd_dev_as->iova_tree = iova_tree_new(); |
558e0024 PX |
2958 | |
2959 | /* | |
2960 | * Memory region relationships looks like (Address range shows | |
2961 | * only lower 32 bits to make it short in length...): | |
2962 | * | |
2963 | * |-----------------+-------------------+----------| | |
2964 | * | Name | Address range | Priority | | |
2965 | * |-----------------+-------------------+----------+ | |
2966 | * | vtd_root | 00000000-ffffffff | 0 | | |
2967 | * | intel_iommu | 00000000-ffffffff | 1 | | |
2968 | * | vtd_sys_alias | 00000000-ffffffff | 1 | | |
2969 | * | intel_iommu_ir | fee00000-feefffff | 64 | | |
2970 | * |-----------------+-------------------+----------| | |
2971 | * | |
2972 | * We enable/disable DMAR by switching enablement for | |
2973 | * vtd_sys_alias and intel_iommu regions. IR region is always | |
2974 | * enabled. | |
2975 | */ | |
1221a474 AK |
2976 | memory_region_init_iommu(&vtd_dev_as->iommu, sizeof(vtd_dev_as->iommu), |
2977 | TYPE_INTEL_IOMMU_MEMORY_REGION, OBJECT(s), | |
2978 | "intel_iommu_dmar", | |
558e0024 PX |
2979 | UINT64_MAX); |
2980 | memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s), | |
2981 | "vtd_sys_alias", get_system_memory(), | |
2982 | 0, memory_region_size(get_system_memory())); | |
651e4cef PX |
2983 | memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s), |
2984 | &vtd_mem_ir_ops, s, "intel_iommu_ir", | |
2985 | VTD_INTERRUPT_ADDR_SIZE); | |
558e0024 PX |
2986 | memory_region_init(&vtd_dev_as->root, OBJECT(s), |
2987 | "vtd_root", UINT64_MAX); | |
2988 | memory_region_add_subregion_overlap(&vtd_dev_as->root, | |
2989 | VTD_INTERRUPT_ADDR_FIRST, | |
2990 | &vtd_dev_as->iommu_ir, 64); | |
2991 | address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name); | |
2992 | memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, | |
2993 | &vtd_dev_as->sys_alias, 1); | |
2994 | memory_region_add_subregion_overlap(&vtd_dev_as->root, 0, | |
3df9d748 AK |
2995 | MEMORY_REGION(&vtd_dev_as->iommu), |
2996 | 1); | |
558e0024 | 2997 | vtd_switch_address_space(vtd_dev_as); |
7df953bd KO |
2998 | } |
2999 | return vtd_dev_as; | |
3000 | } | |
3001 | ||
dd4d607e PX |
3002 | /* Unmap the whole range in the notifier's scope. */ |
3003 | static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n) | |
3004 | { | |
3005 | IOMMUTLBEntry entry; | |
3006 | hwaddr size; | |
3007 | hwaddr start = n->start; | |
3008 | hwaddr end = n->end; | |
37f51384 | 3009 | IntelIOMMUState *s = as->iommu_state; |
63b88968 | 3010 | DMAMap map; |
dd4d607e PX |
3011 | |
3012 | /* | |
3013 | * Note: all the codes in this function has a assumption that IOVA | |
3014 | * bits are no more than VTD_MGAW bits (which is restricted by | |
3015 | * VT-d spec), otherwise we need to consider overflow of 64 bits. | |
3016 | */ | |
3017 | ||
37f51384 | 3018 | if (end > VTD_ADDRESS_SIZE(s->aw_bits)) { |
dd4d607e PX |
3019 | /* |
3020 | * Don't need to unmap regions that is bigger than the whole | |
3021 | * VT-d supported address space size | |
3022 | */ | |
37f51384 | 3023 | end = VTD_ADDRESS_SIZE(s->aw_bits); |
dd4d607e PX |
3024 | } |
3025 | ||
3026 | assert(start <= end); | |
3027 | size = end - start; | |
3028 | ||
3029 | if (ctpop64(size) != 1) { | |
3030 | /* | |
3031 | * This size cannot format a correct mask. Let's enlarge it to | |
3032 | * suite the minimum available mask. | |
3033 | */ | |
3034 | int n = 64 - clz64(size); | |
37f51384 | 3035 | if (n > s->aw_bits) { |
dd4d607e | 3036 | /* should not happen, but in case it happens, limit it */ |
37f51384 | 3037 | n = s->aw_bits; |
dd4d607e PX |
3038 | } |
3039 | size = 1ULL << n; | |
3040 | } | |
3041 | ||
3042 | entry.target_as = &address_space_memory; | |
3043 | /* Adjust iova for the size */ | |
3044 | entry.iova = n->start & ~(size - 1); | |
3045 | /* This field is meaningless for unmap */ | |
3046 | entry.translated_addr = 0; | |
3047 | entry.perm = IOMMU_NONE; | |
3048 | entry.addr_mask = size - 1; | |
3049 | ||
3050 | trace_vtd_as_unmap_whole(pci_bus_num(as->bus), | |
3051 | VTD_PCI_SLOT(as->devfn), | |
3052 | VTD_PCI_FUNC(as->devfn), | |
3053 | entry.iova, size); | |
3054 | ||
63b88968 PX |
3055 | map.iova = entry.iova; |
3056 | map.size = entry.addr_mask; | |
3057 | iova_tree_remove(as->iova_tree, &map); | |
3058 | ||
dd4d607e PX |
3059 | memory_region_notify_one(n, &entry); |
3060 | } | |
3061 | ||
3062 | static void vtd_address_space_unmap_all(IntelIOMMUState *s) | |
3063 | { | |
dd4d607e PX |
3064 | VTDAddressSpace *vtd_as; |
3065 | IOMMUNotifier *n; | |
3066 | ||
b4a4ba0d | 3067 | QLIST_FOREACH(vtd_as, &s->vtd_as_with_notifiers, next) { |
dd4d607e PX |
3068 | IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) { |
3069 | vtd_address_space_unmap(vtd_as, n); | |
3070 | } | |
3071 | } | |
3072 | } | |
3073 | ||
2cc9ddcc PX |
3074 | static void vtd_address_space_refresh_all(IntelIOMMUState *s) |
3075 | { | |
3076 | vtd_address_space_unmap_all(s); | |
3077 | vtd_switch_address_space_all(s); | |
3078 | } | |
3079 | ||
f06a696d PX |
3080 | static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private) |
3081 | { | |
3082 | memory_region_notify_one((IOMMUNotifier *)private, entry); | |
3083 | return 0; | |
3084 | } | |
3085 | ||
3df9d748 | 3086 | static void vtd_iommu_replay(IOMMUMemoryRegion *iommu_mr, IOMMUNotifier *n) |
f06a696d | 3087 | { |
3df9d748 | 3088 | VTDAddressSpace *vtd_as = container_of(iommu_mr, VTDAddressSpace, iommu); |
f06a696d PX |
3089 | IntelIOMMUState *s = vtd_as->iommu_state; |
3090 | uint8_t bus_n = pci_bus_num(vtd_as->bus); | |
3091 | VTDContextEntry ce; | |
3092 | ||
dd4d607e PX |
3093 | /* |
3094 | * The replay can be triggered by either a invalidation or a newly | |
3095 | * created entry. No matter what, we release existing mappings | |
3096 | * (it means flushing caches for UNMAP-only registers). | |
3097 | */ | |
3098 | vtd_address_space_unmap(vtd_as, n); | |
3099 | ||
f06a696d | 3100 | if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) { |
f06a696d PX |
3101 | trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn), |
3102 | PCI_FUNC(vtd_as->devfn), | |
3103 | VTD_CONTEXT_ENTRY_DID(ce.hi), | |
3104 | ce.hi, ce.lo); | |
4f8a62a9 PX |
3105 | if (vtd_as_has_map_notifier(vtd_as)) { |
3106 | /* This is required only for MAP typed notifiers */ | |
fe215b0c PX |
3107 | vtd_page_walk_info info = { |
3108 | .hook_fn = vtd_replay_hook, | |
3109 | .private = (void *)n, | |
3110 | .notify_unmap = false, | |
3111 | .aw = s->aw_bits, | |
2f764fa8 | 3112 | .as = vtd_as, |
d118c06e | 3113 | .domain_id = VTD_CONTEXT_ENTRY_DID(ce.hi), |
fe215b0c PX |
3114 | }; |
3115 | ||
3116 | vtd_page_walk(&ce, 0, ~0ULL, &info); | |
4f8a62a9 | 3117 | } |
f06a696d PX |
3118 | } else { |
3119 | trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn), | |
3120 | PCI_FUNC(vtd_as->devfn)); | |
3121 | } | |
3122 | ||
3123 | return; | |
3124 | } | |
3125 | ||
1da12ec4 LT |
3126 | /* Do the initialization. It will also be called when reset, so pay |
3127 | * attention when adding new initialization stuff. | |
3128 | */ | |
3129 | static void vtd_init(IntelIOMMUState *s) | |
3130 | { | |
d54bd7f8 PX |
3131 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
3132 | ||
1da12ec4 LT |
3133 | memset(s->csr, 0, DMAR_REG_SIZE); |
3134 | memset(s->wmask, 0, DMAR_REG_SIZE); | |
3135 | memset(s->w1cmask, 0, DMAR_REG_SIZE); | |
3136 | memset(s->womask, 0, DMAR_REG_SIZE); | |
3137 | ||
1da12ec4 LT |
3138 | s->root = 0; |
3139 | s->root_extended = false; | |
3140 | s->dmar_enabled = false; | |
3141 | s->iq_head = 0; | |
3142 | s->iq_tail = 0; | |
3143 | s->iq = 0; | |
3144 | s->iq_size = 0; | |
3145 | s->qi_enabled = false; | |
3146 | s->iq_last_desc_type = VTD_INV_DESC_NONE; | |
3147 | s->next_frcd_reg = 0; | |
92e5d85e PS |
3148 | s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | |
3149 | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS | | |
37f51384 | 3150 | VTD_CAP_SAGAW_39bit | VTD_CAP_MGAW(s->aw_bits); |
ccc23bb0 PX |
3151 | if (s->dma_drain) { |
3152 | s->cap |= VTD_CAP_DRAIN; | |
3153 | } | |
37f51384 PS |
3154 | if (s->aw_bits == VTD_HOST_AW_48BIT) { |
3155 | s->cap |= VTD_CAP_SAGAW_48bit; | |
3156 | } | |
ed7b8fbc | 3157 | s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO; |
1da12ec4 | 3158 | |
92e5d85e PS |
3159 | /* |
3160 | * Rsvd field masks for spte | |
3161 | */ | |
3162 | vtd_paging_entry_rsvd_field[0] = ~0ULL; | |
37f51384 PS |
3163 | vtd_paging_entry_rsvd_field[1] = VTD_SPTE_PAGE_L1_RSVD_MASK(s->aw_bits); |
3164 | vtd_paging_entry_rsvd_field[2] = VTD_SPTE_PAGE_L2_RSVD_MASK(s->aw_bits); | |
3165 | vtd_paging_entry_rsvd_field[3] = VTD_SPTE_PAGE_L3_RSVD_MASK(s->aw_bits); | |
3166 | vtd_paging_entry_rsvd_field[4] = VTD_SPTE_PAGE_L4_RSVD_MASK(s->aw_bits); | |
3167 | vtd_paging_entry_rsvd_field[5] = VTD_SPTE_LPAGE_L1_RSVD_MASK(s->aw_bits); | |
3168 | vtd_paging_entry_rsvd_field[6] = VTD_SPTE_LPAGE_L2_RSVD_MASK(s->aw_bits); | |
3169 | vtd_paging_entry_rsvd_field[7] = VTD_SPTE_LPAGE_L3_RSVD_MASK(s->aw_bits); | |
3170 | vtd_paging_entry_rsvd_field[8] = VTD_SPTE_LPAGE_L4_RSVD_MASK(s->aw_bits); | |
92e5d85e | 3171 | |
d54bd7f8 | 3172 | if (x86_iommu->intr_supported) { |
e6b6af05 RK |
3173 | s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV; |
3174 | if (s->intr_eim == ON_OFF_AUTO_ON) { | |
3175 | s->ecap |= VTD_ECAP_EIM; | |
3176 | } | |
3177 | assert(s->intr_eim != ON_OFF_AUTO_AUTO); | |
d54bd7f8 PX |
3178 | } |
3179 | ||
554f5e16 JW |
3180 | if (x86_iommu->dt_supported) { |
3181 | s->ecap |= VTD_ECAP_DT; | |
3182 | } | |
3183 | ||
dbaabb25 PX |
3184 | if (x86_iommu->pt_supported) { |
3185 | s->ecap |= VTD_ECAP_PT; | |
3186 | } | |
3187 | ||
3b40f0e5 ABD |
3188 | if (s->caching_mode) { |
3189 | s->cap |= VTD_CAP_CM; | |
3190 | } | |
3191 | ||
06aba4ca | 3192 | vtd_reset_caches(s); |
d92fa2dc | 3193 | |
1da12ec4 LT |
3194 | /* Define registers with default values and bit semantics */ |
3195 | vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0); | |
3196 | vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0); | |
3197 | vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0); | |
3198 | vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0); | |
3199 | vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL); | |
3200 | vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0); | |
3201 | vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0); | |
3202 | vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0); | |
3203 | vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL); | |
3204 | ||
3205 | /* Advanced Fault Logging not supported */ | |
3206 | vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL); | |
3207 | vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0); | |
3208 | vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0); | |
3209 | vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0); | |
3210 | ||
3211 | /* Treated as RsvdZ when EIM in ECAP_REG is not supported | |
3212 | * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0); | |
3213 | */ | |
3214 | vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0); | |
3215 | ||
3216 | /* Treated as RO for implementations that PLMR and PHMR fields reported | |
3217 | * as Clear in the CAP_REG. | |
3218 | * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0); | |
3219 | */ | |
3220 | vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0); | |
3221 | ||
ed7b8fbc LT |
3222 | vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0); |
3223 | vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0); | |
3224 | vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0); | |
3225 | vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL); | |
3226 | vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0); | |
3227 | vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0); | |
3228 | vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0); | |
3229 | /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */ | |
3230 | vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0); | |
3231 | ||
1da12ec4 LT |
3232 | /* IOTLB registers */ |
3233 | vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0); | |
3234 | vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0); | |
3235 | vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL); | |
3236 | ||
3237 | /* Fault Recording Registers, 128-bit */ | |
3238 | vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0); | |
3239 | vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL); | |
a5861439 PX |
3240 | |
3241 | /* | |
28589311 | 3242 | * Interrupt remapping registers. |
a5861439 | 3243 | */ |
28589311 | 3244 | vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0); |
1da12ec4 LT |
3245 | } |
3246 | ||
3247 | /* Should not reset address_spaces when reset because devices will still use | |
3248 | * the address space they got at first (won't ask the bus again). | |
3249 | */ | |
3250 | static void vtd_reset(DeviceState *dev) | |
3251 | { | |
3252 | IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); | |
3253 | ||
1da12ec4 | 3254 | vtd_init(s); |
2cc9ddcc | 3255 | vtd_address_space_refresh_all(s); |
1da12ec4 LT |
3256 | } |
3257 | ||
621d983a MA |
3258 | static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn) |
3259 | { | |
3260 | IntelIOMMUState *s = opaque; | |
3261 | VTDAddressSpace *vtd_as; | |
3262 | ||
bf33cc75 | 3263 | assert(0 <= devfn && devfn < PCI_DEVFN_MAX); |
621d983a MA |
3264 | |
3265 | vtd_as = vtd_find_add_as(s, bus, devfn); | |
3266 | return &vtd_as->as; | |
3267 | } | |
3268 | ||
e6b6af05 | 3269 | static bool vtd_decide_config(IntelIOMMUState *s, Error **errp) |
6333e93c | 3270 | { |
e6b6af05 RK |
3271 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s); |
3272 | ||
e6b6af05 RK |
3273 | if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) { |
3274 | error_setg(errp, "eim=on cannot be selected without intremap=on"); | |
3275 | return false; | |
3276 | } | |
3277 | ||
3278 | if (s->intr_eim == ON_OFF_AUTO_AUTO) { | |
fb506e70 RK |
3279 | s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim) |
3280 | && x86_iommu->intr_supported ? | |
e6b6af05 RK |
3281 | ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; |
3282 | } | |
fb506e70 RK |
3283 | if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) { |
3284 | if (!kvm_irqchip_in_kernel()) { | |
3285 | error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split"); | |
3286 | return false; | |
3287 | } | |
3288 | if (!kvm_enable_x2apic()) { | |
3289 | error_setg(errp, "eim=on requires support on the KVM side" | |
3290 | "(X2APIC_API, first shipped in v4.7)"); | |
3291 | return false; | |
3292 | } | |
3293 | } | |
e6b6af05 | 3294 | |
37f51384 PS |
3295 | /* Currently only address widths supported are 39 and 48 bits */ |
3296 | if ((s->aw_bits != VTD_HOST_AW_39BIT) && | |
3297 | (s->aw_bits != VTD_HOST_AW_48BIT)) { | |
3298 | error_setg(errp, "Supported values for x-aw-bits are: %d, %d", | |
3299 | VTD_HOST_AW_39BIT, VTD_HOST_AW_48BIT); | |
3300 | return false; | |
3301 | } | |
3302 | ||
6333e93c RK |
3303 | return true; |
3304 | } | |
3305 | ||
1da12ec4 LT |
3306 | static void vtd_realize(DeviceState *dev, Error **errp) |
3307 | { | |
ef0e8fc7 | 3308 | MachineState *ms = MACHINE(qdev_get_machine()); |
29396ed9 MG |
3309 | PCMachineState *pcms = PC_MACHINE(ms); |
3310 | PCIBus *bus = pcms->bus; | |
1da12ec4 | 3311 | IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev); |
4684a204 | 3312 | X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev); |
1da12ec4 | 3313 | |
fb9f5926 | 3314 | x86_iommu->type = TYPE_INTEL; |
6333e93c | 3315 | |
e6b6af05 | 3316 | if (!vtd_decide_config(s, errp)) { |
6333e93c RK |
3317 | return; |
3318 | } | |
3319 | ||
b4a4ba0d | 3320 | QLIST_INIT(&s->vtd_as_with_notifiers); |
1d9efa73 | 3321 | qemu_mutex_init(&s->iommu_lock); |
7df953bd | 3322 | memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num)); |
1da12ec4 LT |
3323 | memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s, |
3324 | "intel_iommu", DMAR_REG_SIZE); | |
3325 | sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem); | |
b5a280c0 LT |
3326 | /* No corresponding destroy */ |
3327 | s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, | |
3328 | g_free, g_free); | |
7df953bd KO |
3329 | s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal, |
3330 | g_free, g_free); | |
1da12ec4 | 3331 | vtd_init(s); |
621d983a MA |
3332 | sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR); |
3333 | pci_setup_iommu(bus, vtd_host_dma_iommu, dev); | |
cb135f59 PX |
3334 | /* Pseudo address space under root PCI bus. */ |
3335 | pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC); | |
1da12ec4 LT |
3336 | } |
3337 | ||
3338 | static void vtd_class_init(ObjectClass *klass, void *data) | |
3339 | { | |
3340 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1c7955c4 | 3341 | X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass); |
1da12ec4 LT |
3342 | |
3343 | dc->reset = vtd_reset; | |
1da12ec4 LT |
3344 | dc->vmsd = &vtd_vmstate; |
3345 | dc->props = vtd_properties; | |
621d983a | 3346 | dc->hotpluggable = false; |
1c7955c4 | 3347 | x86_class->realize = vtd_realize; |
8b5ed7df | 3348 | x86_class->int_remap = vtd_int_remap; |
8ab5700c | 3349 | /* Supported by the pc-q35-* machine types */ |
e4f4fb1e | 3350 | dc->user_creatable = true; |
1da12ec4 LT |
3351 | } |
3352 | ||
3353 | static const TypeInfo vtd_info = { | |
3354 | .name = TYPE_INTEL_IOMMU_DEVICE, | |
1c7955c4 | 3355 | .parent = TYPE_X86_IOMMU_DEVICE, |
1da12ec4 LT |
3356 | .instance_size = sizeof(IntelIOMMUState), |
3357 | .class_init = vtd_class_init, | |
3358 | }; | |
3359 | ||
1221a474 AK |
3360 | static void vtd_iommu_memory_region_class_init(ObjectClass *klass, |
3361 | void *data) | |
3362 | { | |
3363 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); | |
3364 | ||
3365 | imrc->translate = vtd_iommu_translate; | |
3366 | imrc->notify_flag_changed = vtd_iommu_notify_flag_changed; | |
3367 | imrc->replay = vtd_iommu_replay; | |
3368 | } | |
3369 | ||
3370 | static const TypeInfo vtd_iommu_memory_region_info = { | |
3371 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
3372 | .name = TYPE_INTEL_IOMMU_MEMORY_REGION, | |
3373 | .class_init = vtd_iommu_memory_region_class_init, | |
3374 | }; | |
3375 | ||
1da12ec4 LT |
3376 | static void vtd_register_types(void) |
3377 | { | |
1da12ec4 | 3378 | type_register_static(&vtd_info); |
1221a474 | 3379 | type_register_static(&vtd_iommu_memory_region_info); |
1da12ec4 LT |
3380 | } |
3381 | ||
3382 | type_init(vtd_register_types) |