]> git.proxmox.com Git - mirror_qemu.git/blame - hw/i386/intel_iommu.c
chardev: fix parallel device can't be reconnect
[mirror_qemu.git] / hw / i386 / intel_iommu.c
CommitLineData
1da12ec4
LT
1/*
2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
4 *
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
b6a0aa05 22#include "qemu/osdep.h"
4684a204 23#include "qemu/error-report.h"
6333e93c 24#include "qapi/error.h"
1da12ec4
LT
25#include "hw/sysbus.h"
26#include "exec/address-spaces.h"
27#include "intel_iommu_internal.h"
7df953bd 28#include "hw/pci/pci.h"
3cb3b154 29#include "hw/pci/pci_bus.h"
621d983a 30#include "hw/i386/pc.h"
dea651a9 31#include "hw/i386/apic-msidef.h"
04af0e18
PX
32#include "hw/boards.h"
33#include "hw/i386/x86-iommu.h"
cb135f59 34#include "hw/pci-host/q35.h"
4684a204 35#include "sysemu/kvm.h"
32946019 36#include "hw/i386/apic_internal.h"
fb506e70 37#include "kvm_i386.h"
bc535e59 38#include "trace.h"
1da12ec4 39
1da12ec4
LT
40static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
41 uint64_t wmask, uint64_t w1cmask)
42{
43 stq_le_p(&s->csr[addr], val);
44 stq_le_p(&s->wmask[addr], wmask);
45 stq_le_p(&s->w1cmask[addr], w1cmask);
46}
47
48static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask)
49{
50 stq_le_p(&s->womask[addr], mask);
51}
52
53static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val,
54 uint32_t wmask, uint32_t w1cmask)
55{
56 stl_le_p(&s->csr[addr], val);
57 stl_le_p(&s->wmask[addr], wmask);
58 stl_le_p(&s->w1cmask[addr], w1cmask);
59}
60
61static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask)
62{
63 stl_le_p(&s->womask[addr], mask);
64}
65
66/* "External" get/set operations */
67static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val)
68{
69 uint64_t oldval = ldq_le_p(&s->csr[addr]);
70 uint64_t wmask = ldq_le_p(&s->wmask[addr]);
71 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
72 stq_le_p(&s->csr[addr],
73 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
74}
75
76static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val)
77{
78 uint32_t oldval = ldl_le_p(&s->csr[addr]);
79 uint32_t wmask = ldl_le_p(&s->wmask[addr]);
80 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
81 stl_le_p(&s->csr[addr],
82 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
83}
84
85static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr)
86{
87 uint64_t val = ldq_le_p(&s->csr[addr]);
88 uint64_t womask = ldq_le_p(&s->womask[addr]);
89 return val & ~womask;
90}
91
92static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr)
93{
94 uint32_t val = ldl_le_p(&s->csr[addr]);
95 uint32_t womask = ldl_le_p(&s->womask[addr]);
96 return val & ~womask;
97}
98
99/* "Internal" get/set operations */
100static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr)
101{
102 return ldq_le_p(&s->csr[addr]);
103}
104
105static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr)
106{
107 return ldl_le_p(&s->csr[addr]);
108}
109
110static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val)
111{
112 stq_le_p(&s->csr[addr], val);
113}
114
115static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr,
116 uint32_t clear, uint32_t mask)
117{
118 uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask;
119 stl_le_p(&s->csr[addr], new_val);
120 return new_val;
121}
122
123static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
124 uint64_t clear, uint64_t mask)
125{
126 uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask;
127 stq_le_p(&s->csr[addr], new_val);
128 return new_val;
129}
130
b5a280c0
LT
131/* GHashTable functions */
132static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
133{
134 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
135}
136
137static guint vtd_uint64_hash(gconstpointer v)
138{
139 return (guint)*(const uint64_t *)v;
140}
141
142static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
143 gpointer user_data)
144{
145 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
146 uint16_t domain_id = *(uint16_t *)user_data;
147 return entry->domain_id == domain_id;
148}
149
d66b969b
JW
150/* The shift of an addr for a certain level of paging structure */
151static inline uint32_t vtd_slpt_level_shift(uint32_t level)
152{
7e58326a 153 assert(level != 0);
d66b969b
JW
154 return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
155}
156
157static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
158{
159 return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
160}
161
b5a280c0
LT
162static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
163 gpointer user_data)
164{
165 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
166 VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
d66b969b
JW
167 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
168 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
b5a280c0 169 return (entry->domain_id == info->domain_id) &&
d66b969b
JW
170 (((entry->gfn & info->mask) == gfn) ||
171 (entry->gfn == gfn_tlb));
b5a280c0
LT
172}
173
d92fa2dc
LT
174/* Reset all the gen of VTDAddressSpace to zero and set the gen of
175 * IntelIOMMUState to 1.
176 */
177static void vtd_reset_context_cache(IntelIOMMUState *s)
178{
d92fa2dc 179 VTDAddressSpace *vtd_as;
7df953bd
KO
180 VTDBus *vtd_bus;
181 GHashTableIter bus_it;
d92fa2dc
LT
182 uint32_t devfn_it;
183
7feb51b7
PX
184 trace_vtd_context_cache_reset();
185
7df953bd
KO
186 g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
187
7df953bd 188 while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
04af0e18 189 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
7df953bd 190 vtd_as = vtd_bus->dev_as[devfn_it];
d92fa2dc
LT
191 if (!vtd_as) {
192 continue;
193 }
194 vtd_as->context_cache_entry.context_cache_gen = 0;
195 }
196 }
197 s->context_cache_gen = 1;
198}
199
b5a280c0
LT
200static void vtd_reset_iotlb(IntelIOMMUState *s)
201{
202 assert(s->iotlb);
203 g_hash_table_remove_all(s->iotlb);
204}
205
bacabb0a 206static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint16_t source_id,
d66b969b
JW
207 uint32_t level)
208{
209 return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
210 ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
211}
212
213static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
214{
215 return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
216}
217
b5a280c0
LT
218static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
219 hwaddr addr)
220{
d66b969b 221 VTDIOTLBEntry *entry;
b5a280c0 222 uint64_t key;
d66b969b
JW
223 int level;
224
225 for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
226 key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
227 source_id, level);
228 entry = g_hash_table_lookup(s->iotlb, &key);
229 if (entry) {
230 goto out;
231 }
232 }
b5a280c0 233
d66b969b
JW
234out:
235 return entry;
b5a280c0
LT
236}
237
238static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
239 uint16_t domain_id, hwaddr addr, uint64_t slpte,
d66b969b
JW
240 bool read_flags, bool write_flags,
241 uint32_t level)
b5a280c0
LT
242{
243 VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
244 uint64_t *key = g_malloc(sizeof(*key));
d66b969b 245 uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
b5a280c0 246
6c441e1d 247 trace_vtd_iotlb_page_update(source_id, addr, slpte, domain_id);
b5a280c0 248 if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
6c441e1d 249 trace_vtd_iotlb_reset("iotlb exceeds size limit");
b5a280c0
LT
250 vtd_reset_iotlb(s);
251 }
252
253 entry->gfn = gfn;
254 entry->domain_id = domain_id;
255 entry->slpte = slpte;
256 entry->read_flags = read_flags;
257 entry->write_flags = write_flags;
d66b969b
JW
258 entry->mask = vtd_slpt_level_page_mask(level);
259 *key = vtd_get_iotlb_key(gfn, source_id, level);
b5a280c0
LT
260 g_hash_table_replace(s->iotlb, key, entry);
261}
262
1da12ec4
LT
263/* Given the reg addr of both the message data and address, generate an
264 * interrupt via MSI.
265 */
266static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
267 hwaddr mesg_data_reg)
268{
32946019 269 MSIMessage msi;
1da12ec4
LT
270
271 assert(mesg_data_reg < DMAR_REG_SIZE);
272 assert(mesg_addr_reg < DMAR_REG_SIZE);
273
32946019
RK
274 msi.address = vtd_get_long_raw(s, mesg_addr_reg);
275 msi.data = vtd_get_long_raw(s, mesg_data_reg);
1da12ec4 276
7feb51b7
PX
277 trace_vtd_irq_generate(msi.address, msi.data);
278
32946019 279 apic_get_class()->send_msi(&msi);
1da12ec4
LT
280}
281
282/* Generate a fault event to software via MSI if conditions are met.
283 * Notice that the value of FSTS_REG being passed to it should be the one
284 * before any update.
285 */
286static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
287{
288 if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
289 pre_fsts & VTD_FSTS_IQE) {
7feb51b7
PX
290 trace_vtd_err("There are previous interrupt conditions "
291 "to be serviced by software, fault event "
292 "is not generated.");
1da12ec4
LT
293 return;
294 }
295 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
296 if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
7feb51b7 297 trace_vtd_err("Interrupt Mask set, irq is not generated.");
1da12ec4
LT
298 } else {
299 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
300 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
301 }
302}
303
304/* Check if the Fault (F) field of the Fault Recording Register referenced by
305 * @index is Set.
306 */
307static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index)
308{
309 /* Each reg is 128-bit */
310 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
311 addr += 8; /* Access the high 64-bit half */
312
313 assert(index < DMAR_FRCD_REG_NR);
314
315 return vtd_get_quad_raw(s, addr) & VTD_FRCD_F;
316}
317
318/* Update the PPF field of Fault Status Register.
319 * Should be called whenever change the F field of any fault recording
320 * registers.
321 */
322static void vtd_update_fsts_ppf(IntelIOMMUState *s)
323{
324 uint32_t i;
325 uint32_t ppf_mask = 0;
326
327 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
328 if (vtd_is_frcd_set(s, i)) {
329 ppf_mask = VTD_FSTS_PPF;
330 break;
331 }
332 }
333 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
7feb51b7 334 trace_vtd_fsts_ppf(!!ppf_mask);
1da12ec4
LT
335}
336
337static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
338{
339 /* Each reg is 128-bit */
340 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
341 addr += 8; /* Access the high 64-bit half */
342
343 assert(index < DMAR_FRCD_REG_NR);
344
345 vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F);
346 vtd_update_fsts_ppf(s);
347}
348
349/* Must not update F field now, should be done later */
350static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
351 uint16_t source_id, hwaddr addr,
352 VTDFaultReason fault, bool is_write)
353{
354 uint64_t hi = 0, lo;
355 hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
356
357 assert(index < DMAR_FRCD_REG_NR);
358
359 lo = VTD_FRCD_FI(addr);
360 hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
361 if (!is_write) {
362 hi |= VTD_FRCD_T;
363 }
364 vtd_set_quad_raw(s, frcd_reg_addr, lo);
365 vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
7feb51b7
PX
366
367 trace_vtd_frr_new(index, hi, lo);
1da12ec4
LT
368}
369
370/* Try to collapse multiple pending faults from the same requester */
371static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
372{
373 uint32_t i;
374 uint64_t frcd_reg;
375 hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */
376
377 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
378 frcd_reg = vtd_get_quad_raw(s, addr);
1da12ec4
LT
379 if ((frcd_reg & VTD_FRCD_F) &&
380 ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
381 return true;
382 }
383 addr += 16; /* 128-bit for each */
384 }
385 return false;
386}
387
388/* Log and report an DMAR (address translation) fault to software */
389static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
390 hwaddr addr, VTDFaultReason fault,
391 bool is_write)
392{
393 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
394
395 assert(fault < VTD_FR_MAX);
396
397 if (fault == VTD_FR_RESERVED_ERR) {
398 /* This is not a normal fault reason case. Drop it. */
399 return;
400 }
7feb51b7
PX
401
402 trace_vtd_dmar_fault(source_id, fault, addr, is_write);
403
1da12ec4 404 if (fsts_reg & VTD_FSTS_PFO) {
7feb51b7
PX
405 trace_vtd_err("New fault is not recorded due to "
406 "Primary Fault Overflow.");
1da12ec4
LT
407 return;
408 }
7feb51b7 409
1da12ec4 410 if (vtd_try_collapse_fault(s, source_id)) {
7feb51b7
PX
411 trace_vtd_err("New fault is not recorded due to "
412 "compression of faults.");
1da12ec4
LT
413 return;
414 }
7feb51b7 415
1da12ec4 416 if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
7feb51b7
PX
417 trace_vtd_err("Next Fault Recording Reg is used, "
418 "new fault is not recorded, set PFO field.");
1da12ec4
LT
419 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
420 return;
421 }
422
423 vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
424
425 if (fsts_reg & VTD_FSTS_PPF) {
7feb51b7
PX
426 trace_vtd_err("There are pending faults already, "
427 "fault event is not generated.");
1da12ec4
LT
428 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
429 s->next_frcd_reg++;
430 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
431 s->next_frcd_reg = 0;
432 }
433 } else {
434 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK,
435 VTD_FSTS_FRI(s->next_frcd_reg));
436 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */
437 s->next_frcd_reg++;
438 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
439 s->next_frcd_reg = 0;
440 }
441 /* This case actually cause the PPF to be Set.
442 * So generate fault event (interrupt).
443 */
444 vtd_generate_fault_event(s, fsts_reg);
445 }
446}
447
ed7b8fbc
LT
448/* Handle Invalidation Queue Errors of queued invalidation interface error
449 * conditions.
450 */
451static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
452{
453 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
454
455 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE);
456 vtd_generate_fault_event(s, fsts_reg);
457}
458
459/* Set the IWC field and try to generate an invalidation completion interrupt */
460static void vtd_generate_completion_event(IntelIOMMUState *s)
461{
ed7b8fbc 462 if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
bc535e59 463 trace_vtd_inv_desc_wait_irq("One pending, skip current");
ed7b8fbc
LT
464 return;
465 }
466 vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
467 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
468 if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
bc535e59
PX
469 trace_vtd_inv_desc_wait_irq("IM in IECTL_REG is set, "
470 "new event not generated");
ed7b8fbc
LT
471 return;
472 } else {
473 /* Generate the interrupt event */
bc535e59 474 trace_vtd_inv_desc_wait_irq("Generating complete event");
ed7b8fbc
LT
475 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
476 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
477 }
478}
479
1da12ec4
LT
480static inline bool vtd_root_entry_present(VTDRootEntry *root)
481{
482 return root->val & VTD_ROOT_ENTRY_P;
483}
484
485static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
486 VTDRootEntry *re)
487{
488 dma_addr_t addr;
489
490 addr = s->root + index * sizeof(*re);
491 if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
6c441e1d 492 trace_vtd_re_invalid(re->rsvd, re->val);
1da12ec4
LT
493 re->val = 0;
494 return -VTD_FR_ROOT_TABLE_INV;
495 }
496 re->val = le64_to_cpu(re->val);
497 return 0;
498}
499
8f7d7161 500static inline bool vtd_ce_present(VTDContextEntry *context)
1da12ec4
LT
501{
502 return context->lo & VTD_CONTEXT_ENTRY_P;
503}
504
505static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
506 VTDContextEntry *ce)
507{
508 dma_addr_t addr;
509
6c441e1d 510 /* we have checked that root entry is present */
1da12ec4
LT
511 addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
512 if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
6c441e1d 513 trace_vtd_re_invalid(root->rsvd, root->val);
1da12ec4
LT
514 return -VTD_FR_CONTEXT_TABLE_INV;
515 }
516 ce->lo = le64_to_cpu(ce->lo);
517 ce->hi = le64_to_cpu(ce->hi);
518 return 0;
519}
520
8f7d7161 521static inline dma_addr_t vtd_ce_get_slpt_base(VTDContextEntry *ce)
1da12ec4
LT
522{
523 return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
524}
525
1da12ec4
LT
526static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
527{
528 return slpte & VTD_SL_PT_BASE_ADDR_MASK;
529}
530
531/* Whether the pte indicates the address of the page frame */
532static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
533{
534 return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
535}
536
537/* Get the content of a spte located in @base_addr[@index] */
538static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
539{
540 uint64_t slpte;
541
542 assert(index < VTD_SL_PT_ENTRY_NR);
543
544 if (dma_memory_read(&address_space_memory,
545 base_addr + index * sizeof(slpte), &slpte,
546 sizeof(slpte))) {
547 slpte = (uint64_t)-1;
548 return slpte;
549 }
550 slpte = le64_to_cpu(slpte);
551 return slpte;
552}
553
6e905564
PX
554/* Given an iova and the level of paging structure, return the offset
555 * of current level.
1da12ec4 556 */
6e905564 557static inline uint32_t vtd_iova_level_offset(uint64_t iova, uint32_t level)
1da12ec4 558{
6e905564 559 return (iova >> vtd_slpt_level_shift(level)) &
1da12ec4
LT
560 ((1ULL << VTD_SL_LEVEL_BITS) - 1);
561}
562
563/* Check Capability Register to see if the @level of page-table is supported */
564static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
565{
566 return VTD_CAP_SAGAW_MASK & s->cap &
567 (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
568}
569
570/* Get the page-table level that hardware should use for the second-level
571 * page-table walk from the Address Width field of context-entry.
572 */
8f7d7161 573static inline uint32_t vtd_ce_get_level(VTDContextEntry *ce)
1da12ec4
LT
574{
575 return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
576}
577
8f7d7161 578static inline uint32_t vtd_ce_get_agaw(VTDContextEntry *ce)
1da12ec4
LT
579{
580 return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
581}
582
127ff5c3
PX
583static inline uint32_t vtd_ce_get_type(VTDContextEntry *ce)
584{
585 return ce->lo & VTD_CONTEXT_ENTRY_TT;
586}
587
f80c9874
PX
588/* Return true if check passed, otherwise false */
589static inline bool vtd_ce_type_check(X86IOMMUState *x86_iommu,
590 VTDContextEntry *ce)
591{
592 switch (vtd_ce_get_type(ce)) {
593 case VTD_CONTEXT_TT_MULTI_LEVEL:
594 /* Always supported */
595 break;
596 case VTD_CONTEXT_TT_DEV_IOTLB:
597 if (!x86_iommu->dt_supported) {
598 return false;
599 }
600 break;
dbaabb25
PX
601 case VTD_CONTEXT_TT_PASS_THROUGH:
602 if (!x86_iommu->pt_supported) {
603 return false;
604 }
605 break;
f80c9874
PX
606 default:
607 /* Unknwon type */
608 return false;
609 }
610 return true;
611}
612
f06a696d
PX
613static inline uint64_t vtd_iova_limit(VTDContextEntry *ce)
614{
8f7d7161 615 uint32_t ce_agaw = vtd_ce_get_agaw(ce);
f06a696d
PX
616 return 1ULL << MIN(ce_agaw, VTD_MGAW);
617}
618
619/* Return true if IOVA passes range check, otherwise false. */
620static inline bool vtd_iova_range_check(uint64_t iova, VTDContextEntry *ce)
621{
622 /*
623 * Check if @iova is above 2^X-1, where X is the minimum of MGAW
624 * in CAP_REG and AW in context-entry.
625 */
626 return !(iova & ~(vtd_iova_limit(ce) - 1));
627}
628
1da12ec4
LT
629static const uint64_t vtd_paging_entry_rsvd_field[] = {
630 [0] = ~0ULL,
631 /* For not large page */
632 [1] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
633 [2] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
634 [3] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
635 [4] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
636 /* For large page */
637 [5] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
638 [6] = 0x1ff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
639 [7] = 0x3ffff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
640 [8] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
641};
642
643static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
644{
645 if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) {
646 /* Maybe large page */
647 return slpte & vtd_paging_entry_rsvd_field[level + 4];
648 } else {
649 return slpte & vtd_paging_entry_rsvd_field[level];
650 }
651}
652
dbaabb25
PX
653/* Find the VTD address space associated with a given bus number */
654static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
655{
656 VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
657 if (!vtd_bus) {
658 /*
659 * Iterate over the registered buses to find the one which
660 * currently hold this bus number, and update the bus_num
661 * lookup table:
662 */
663 GHashTableIter iter;
664
665 g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
666 while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
667 if (pci_bus_num(vtd_bus->bus) == bus_num) {
668 s->vtd_as_by_bus_num[bus_num] = vtd_bus;
669 return vtd_bus;
670 }
671 }
672 }
673 return vtd_bus;
674}
675
6e905564 676/* Given the @iova, get relevant @slptep. @slpte_level will be the last level
1da12ec4
LT
677 * of the translation, can be used for deciding the size of large page.
678 */
6e905564
PX
679static int vtd_iova_to_slpte(VTDContextEntry *ce, uint64_t iova, bool is_write,
680 uint64_t *slptep, uint32_t *slpte_level,
681 bool *reads, bool *writes)
1da12ec4 682{
8f7d7161
PX
683 dma_addr_t addr = vtd_ce_get_slpt_base(ce);
684 uint32_t level = vtd_ce_get_level(ce);
1da12ec4
LT
685 uint32_t offset;
686 uint64_t slpte;
1da12ec4
LT
687 uint64_t access_right_check;
688
f06a696d 689 if (!vtd_iova_range_check(iova, ce)) {
7feb51b7 690 trace_vtd_err_dmar_iova_overflow(iova);
1da12ec4
LT
691 return -VTD_FR_ADDR_BEYOND_MGAW;
692 }
693
694 /* FIXME: what is the Atomics request here? */
695 access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
696
697 while (true) {
6e905564 698 offset = vtd_iova_level_offset(iova, level);
1da12ec4
LT
699 slpte = vtd_get_slpte(addr, offset);
700
701 if (slpte == (uint64_t)-1) {
7feb51b7 702 trace_vtd_err_dmar_slpte_read_error(iova, level);
8f7d7161 703 if (level == vtd_ce_get_level(ce)) {
1da12ec4
LT
704 /* Invalid programming of context-entry */
705 return -VTD_FR_CONTEXT_ENTRY_INV;
706 } else {
707 return -VTD_FR_PAGING_ENTRY_INV;
708 }
709 }
710 *reads = (*reads) && (slpte & VTD_SL_R);
711 *writes = (*writes) && (slpte & VTD_SL_W);
712 if (!(slpte & access_right_check)) {
7feb51b7 713 trace_vtd_err_dmar_slpte_perm_error(iova, level, slpte, is_write);
1da12ec4
LT
714 return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
715 }
716 if (vtd_slpte_nonzero_rsvd(slpte, level)) {
7feb51b7 717 trace_vtd_err_dmar_slpte_resv_error(iova, level, slpte);
1da12ec4
LT
718 return -VTD_FR_PAGING_ENTRY_RSVD;
719 }
720
721 if (vtd_is_last_slpte(slpte, level)) {
722 *slptep = slpte;
723 *slpte_level = level;
724 return 0;
725 }
726 addr = vtd_get_slpte_addr(slpte);
727 level--;
728 }
729}
730
f06a696d
PX
731typedef int (*vtd_page_walk_hook)(IOMMUTLBEntry *entry, void *private);
732
733/**
734 * vtd_page_walk_level - walk over specific level for IOVA range
735 *
736 * @addr: base GPA addr to start the walk
737 * @start: IOVA range start address
738 * @end: IOVA range end address (start <= addr < end)
739 * @hook_fn: hook func to be called when detected page
740 * @private: private data to be passed into hook func
741 * @read: whether parent level has read permission
742 * @write: whether parent level has write permission
743 * @notify_unmap: whether we should notify invalid entries
744 */
745static int vtd_page_walk_level(dma_addr_t addr, uint64_t start,
746 uint64_t end, vtd_page_walk_hook hook_fn,
747 void *private, uint32_t level,
748 bool read, bool write, bool notify_unmap)
749{
750 bool read_cur, write_cur, entry_valid;
751 uint32_t offset;
752 uint64_t slpte;
753 uint64_t subpage_size, subpage_mask;
754 IOMMUTLBEntry entry;
755 uint64_t iova = start;
756 uint64_t iova_next;
757 int ret = 0;
758
759 trace_vtd_page_walk_level(addr, level, start, end);
760
761 subpage_size = 1ULL << vtd_slpt_level_shift(level);
762 subpage_mask = vtd_slpt_level_page_mask(level);
763
764 while (iova < end) {
765 iova_next = (iova & subpage_mask) + subpage_size;
766
767 offset = vtd_iova_level_offset(iova, level);
768 slpte = vtd_get_slpte(addr, offset);
769
770 if (slpte == (uint64_t)-1) {
771 trace_vtd_page_walk_skip_read(iova, iova_next);
772 goto next;
773 }
774
775 if (vtd_slpte_nonzero_rsvd(slpte, level)) {
776 trace_vtd_page_walk_skip_reserve(iova, iova_next);
777 goto next;
778 }
779
780 /* Permissions are stacked with parents' */
781 read_cur = read && (slpte & VTD_SL_R);
782 write_cur = write && (slpte & VTD_SL_W);
783
784 /*
785 * As long as we have either read/write permission, this is a
786 * valid entry. The rule works for both page entries and page
787 * table entries.
788 */
789 entry_valid = read_cur | write_cur;
790
791 if (vtd_is_last_slpte(slpte, level)) {
792 entry.target_as = &address_space_memory;
793 entry.iova = iova & subpage_mask;
794 /* NOTE: this is only meaningful if entry_valid == true */
795 entry.translated_addr = vtd_get_slpte_addr(slpte);
796 entry.addr_mask = ~subpage_mask;
797 entry.perm = IOMMU_ACCESS_FLAG(read_cur, write_cur);
798 if (!entry_valid && !notify_unmap) {
799 trace_vtd_page_walk_skip_perm(iova, iova_next);
800 goto next;
801 }
802 trace_vtd_page_walk_one(level, entry.iova, entry.translated_addr,
803 entry.addr_mask, entry.perm);
804 if (hook_fn) {
805 ret = hook_fn(&entry, private);
806 if (ret < 0) {
807 return ret;
808 }
809 }
810 } else {
811 if (!entry_valid) {
812 trace_vtd_page_walk_skip_perm(iova, iova_next);
813 goto next;
814 }
815 ret = vtd_page_walk_level(vtd_get_slpte_addr(slpte), iova,
816 MIN(iova_next, end), hook_fn, private,
817 level - 1, read_cur, write_cur,
818 notify_unmap);
819 if (ret < 0) {
820 return ret;
821 }
822 }
823
824next:
825 iova = iova_next;
826 }
827
828 return 0;
829}
830
831/**
832 * vtd_page_walk - walk specific IOVA range, and call the hook
833 *
834 * @ce: context entry to walk upon
835 * @start: IOVA address to start the walk
836 * @end: IOVA range end address (start <= addr < end)
837 * @hook_fn: the hook that to be called for each detected area
838 * @private: private data for the hook function
839 */
840static int vtd_page_walk(VTDContextEntry *ce, uint64_t start, uint64_t end,
dd4d607e
PX
841 vtd_page_walk_hook hook_fn, void *private,
842 bool notify_unmap)
f06a696d 843{
8f7d7161
PX
844 dma_addr_t addr = vtd_ce_get_slpt_base(ce);
845 uint32_t level = vtd_ce_get_level(ce);
f06a696d
PX
846
847 if (!vtd_iova_range_check(start, ce)) {
848 return -VTD_FR_ADDR_BEYOND_MGAW;
849 }
850
851 if (!vtd_iova_range_check(end, ce)) {
852 /* Fix end so that it reaches the maximum */
853 end = vtd_iova_limit(ce);
854 }
855
856 return vtd_page_walk_level(addr, start, end, hook_fn, private,
dd4d607e 857 level, true, true, notify_unmap);
f06a696d
PX
858}
859
1da12ec4
LT
860/* Map a device to its corresponding domain (context-entry) */
861static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
862 uint8_t devfn, VTDContextEntry *ce)
863{
864 VTDRootEntry re;
865 int ret_fr;
f80c9874 866 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
1da12ec4
LT
867
868 ret_fr = vtd_get_root_entry(s, bus_num, &re);
869 if (ret_fr) {
870 return ret_fr;
871 }
872
873 if (!vtd_root_entry_present(&re)) {
6c441e1d
PX
874 /* Not error - it's okay we don't have root entry. */
875 trace_vtd_re_not_present(bus_num);
1da12ec4 876 return -VTD_FR_ROOT_ENTRY_P;
f80c9874
PX
877 }
878
879 if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
6c441e1d 880 trace_vtd_re_invalid(re.rsvd, re.val);
1da12ec4
LT
881 return -VTD_FR_ROOT_ENTRY_RSVD;
882 }
883
884 ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
885 if (ret_fr) {
886 return ret_fr;
887 }
888
8f7d7161 889 if (!vtd_ce_present(ce)) {
6c441e1d
PX
890 /* Not error - it's okay we don't have context entry. */
891 trace_vtd_ce_not_present(bus_num, devfn);
1da12ec4 892 return -VTD_FR_CONTEXT_ENTRY_P;
f80c9874
PX
893 }
894
895 if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
896 (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
6c441e1d 897 trace_vtd_ce_invalid(ce->hi, ce->lo);
1da12ec4
LT
898 return -VTD_FR_CONTEXT_ENTRY_RSVD;
899 }
f80c9874 900
1da12ec4 901 /* Check if the programming of context-entry is valid */
8f7d7161 902 if (!vtd_is_level_supported(s, vtd_ce_get_level(ce))) {
6c441e1d 903 trace_vtd_ce_invalid(ce->hi, ce->lo);
1da12ec4 904 return -VTD_FR_CONTEXT_ENTRY_INV;
1da12ec4 905 }
f80c9874
PX
906
907 /* Do translation type check */
908 if (!vtd_ce_type_check(x86_iommu, ce)) {
909 trace_vtd_ce_invalid(ce->hi, ce->lo);
910 return -VTD_FR_CONTEXT_ENTRY_INV;
911 }
912
1da12ec4
LT
913 return 0;
914}
915
dbaabb25
PX
916/*
917 * Fetch translation type for specific device. Returns <0 if error
918 * happens, otherwise return the shifted type to check against
919 * VTD_CONTEXT_TT_*.
920 */
921static int vtd_dev_get_trans_type(VTDAddressSpace *as)
922{
923 IntelIOMMUState *s;
924 VTDContextEntry ce;
925 int ret;
926
927 s = as->iommu_state;
928
929 ret = vtd_dev_to_context_entry(s, pci_bus_num(as->bus),
930 as->devfn, &ce);
931 if (ret) {
932 return ret;
933 }
934
935 return vtd_ce_get_type(&ce);
936}
937
938static bool vtd_dev_pt_enabled(VTDAddressSpace *as)
939{
940 int ret;
941
942 assert(as);
943
944 ret = vtd_dev_get_trans_type(as);
945 if (ret < 0) {
946 /*
947 * Possibly failed to parse the context entry for some reason
948 * (e.g., during init, or any guest configuration errors on
949 * context entries). We should assume PT not enabled for
950 * safety.
951 */
952 return false;
953 }
954
955 return ret == VTD_CONTEXT_TT_PASS_THROUGH;
956}
957
958/* Return whether the device is using IOMMU translation. */
959static bool vtd_switch_address_space(VTDAddressSpace *as)
960{
961 bool use_iommu;
962
963 assert(as);
964
965 use_iommu = as->iommu_state->dmar_enabled & !vtd_dev_pt_enabled(as);
966
967 trace_vtd_switch_address_space(pci_bus_num(as->bus),
968 VTD_PCI_SLOT(as->devfn),
969 VTD_PCI_FUNC(as->devfn),
970 use_iommu);
971
972 /* Turn off first then on the other */
973 if (use_iommu) {
974 memory_region_set_enabled(&as->sys_alias, false);
975 memory_region_set_enabled(&as->iommu, true);
976 } else {
977 memory_region_set_enabled(&as->iommu, false);
978 memory_region_set_enabled(&as->sys_alias, true);
979 }
980
981 return use_iommu;
982}
983
984static void vtd_switch_address_space_all(IntelIOMMUState *s)
985{
986 GHashTableIter iter;
987 VTDBus *vtd_bus;
988 int i;
989
990 g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
991 while (g_hash_table_iter_next(&iter, NULL, (void **)&vtd_bus)) {
992 for (i = 0; i < X86_IOMMU_PCI_DEVFN_MAX; i++) {
993 if (!vtd_bus->dev_as[i]) {
994 continue;
995 }
996 vtd_switch_address_space(vtd_bus->dev_as[i]);
997 }
998 }
999}
1000
1da12ec4
LT
1001static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn)
1002{
1003 return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
1004}
1005
1006static const bool vtd_qualified_faults[] = {
1007 [VTD_FR_RESERVED] = false,
1008 [VTD_FR_ROOT_ENTRY_P] = false,
1009 [VTD_FR_CONTEXT_ENTRY_P] = true,
1010 [VTD_FR_CONTEXT_ENTRY_INV] = true,
1011 [VTD_FR_ADDR_BEYOND_MGAW] = true,
1012 [VTD_FR_WRITE] = true,
1013 [VTD_FR_READ] = true,
1014 [VTD_FR_PAGING_ENTRY_INV] = true,
1015 [VTD_FR_ROOT_TABLE_INV] = false,
1016 [VTD_FR_CONTEXT_TABLE_INV] = false,
1017 [VTD_FR_ROOT_ENTRY_RSVD] = false,
1018 [VTD_FR_PAGING_ENTRY_RSVD] = true,
1019 [VTD_FR_CONTEXT_ENTRY_TT] = true,
1020 [VTD_FR_RESERVED_ERR] = false,
1021 [VTD_FR_MAX] = false,
1022};
1023
1024/* To see if a fault condition is "qualified", which is reported to software
1025 * only if the FPD field in the context-entry used to process the faulting
1026 * request is 0.
1027 */
1028static inline bool vtd_is_qualified_fault(VTDFaultReason fault)
1029{
1030 return vtd_qualified_faults[fault];
1031}
1032
1033static inline bool vtd_is_interrupt_addr(hwaddr addr)
1034{
1035 return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
1036}
1037
dbaabb25
PX
1038static void vtd_pt_enable_fast_path(IntelIOMMUState *s, uint16_t source_id)
1039{
1040 VTDBus *vtd_bus;
1041 VTDAddressSpace *vtd_as;
1042 bool success = false;
1043
1044 vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
1045 if (!vtd_bus) {
1046 goto out;
1047 }
1048
1049 vtd_as = vtd_bus->dev_as[VTD_SID_TO_DEVFN(source_id)];
1050 if (!vtd_as) {
1051 goto out;
1052 }
1053
1054 if (vtd_switch_address_space(vtd_as) == false) {
1055 /* We switched off IOMMU region successfully. */
1056 success = true;
1057 }
1058
1059out:
1060 trace_vtd_pt_enable_fast_path(source_id, success);
1061}
1062
1da12ec4
LT
1063/* Map dev to context-entry then do a paging-structures walk to do a iommu
1064 * translation.
79e2b9ae
PB
1065 *
1066 * Called from RCU critical section.
1067 *
1da12ec4
LT
1068 * @bus_num: The bus number
1069 * @devfn: The devfn, which is the combined of device and function number
1070 * @is_write: The access is a write operation
1071 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
b9313021
PX
1072 *
1073 * Returns true if translation is successful, otherwise false.
1da12ec4 1074 */
b9313021 1075static bool vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
1da12ec4
LT
1076 uint8_t devfn, hwaddr addr, bool is_write,
1077 IOMMUTLBEntry *entry)
1078{
d92fa2dc 1079 IntelIOMMUState *s = vtd_as->iommu_state;
1da12ec4 1080 VTDContextEntry ce;
7df953bd 1081 uint8_t bus_num = pci_bus_num(bus);
d92fa2dc 1082 VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
d66b969b 1083 uint64_t slpte, page_mask;
1da12ec4
LT
1084 uint32_t level;
1085 uint16_t source_id = vtd_make_source_id(bus_num, devfn);
1086 int ret_fr;
1087 bool is_fpd_set = false;
1088 bool reads = true;
1089 bool writes = true;
b5a280c0 1090 VTDIOTLBEntry *iotlb_entry;
1da12ec4 1091
046ab7e9
PX
1092 /*
1093 * We have standalone memory region for interrupt addresses, we
1094 * should never receive translation requests in this region.
1095 */
1096 assert(!vtd_is_interrupt_addr(addr));
1097
b5a280c0
LT
1098 /* Try to fetch slpte form IOTLB */
1099 iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
1100 if (iotlb_entry) {
6c441e1d
PX
1101 trace_vtd_iotlb_page_hit(source_id, addr, iotlb_entry->slpte,
1102 iotlb_entry->domain_id);
b5a280c0
LT
1103 slpte = iotlb_entry->slpte;
1104 reads = iotlb_entry->read_flags;
1105 writes = iotlb_entry->write_flags;
d66b969b 1106 page_mask = iotlb_entry->mask;
b5a280c0
LT
1107 goto out;
1108 }
b9313021 1109
d92fa2dc
LT
1110 /* Try to fetch context-entry from cache first */
1111 if (cc_entry->context_cache_gen == s->context_cache_gen) {
6c441e1d
PX
1112 trace_vtd_iotlb_cc_hit(bus_num, devfn, cc_entry->context_entry.hi,
1113 cc_entry->context_entry.lo,
1114 cc_entry->context_cache_gen);
d92fa2dc
LT
1115 ce = cc_entry->context_entry;
1116 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
1117 } else {
1118 ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
1119 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
1120 if (ret_fr) {
1121 ret_fr = -ret_fr;
1122 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
6c441e1d 1123 trace_vtd_fault_disabled();
d92fa2dc
LT
1124 } else {
1125 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
1126 }
b9313021 1127 goto error;
1da12ec4 1128 }
d92fa2dc 1129 /* Update context-cache */
6c441e1d
PX
1130 trace_vtd_iotlb_cc_update(bus_num, devfn, ce.hi, ce.lo,
1131 cc_entry->context_cache_gen,
1132 s->context_cache_gen);
d92fa2dc
LT
1133 cc_entry->context_entry = ce;
1134 cc_entry->context_cache_gen = s->context_cache_gen;
1da12ec4
LT
1135 }
1136
dbaabb25
PX
1137 /*
1138 * We don't need to translate for pass-through context entries.
1139 * Also, let's ignore IOTLB caching as well for PT devices.
1140 */
1141 if (vtd_ce_get_type(&ce) == VTD_CONTEXT_TT_PASS_THROUGH) {
b9313021 1142 entry->iova = addr & VTD_PAGE_MASK;
dbaabb25 1143 entry->translated_addr = entry->iova;
b9313021 1144 entry->addr_mask = VTD_PAGE_MASK;
dbaabb25
PX
1145 entry->perm = IOMMU_RW;
1146 trace_vtd_translate_pt(source_id, entry->iova);
1147
1148 /*
1149 * When this happens, it means firstly caching-mode is not
1150 * enabled, and this is the first passthrough translation for
1151 * the device. Let's enable the fast path for passthrough.
1152 *
1153 * When passthrough is disabled again for the device, we can
1154 * capture it via the context entry invalidation, then the
1155 * IOMMU region can be swapped back.
1156 */
1157 vtd_pt_enable_fast_path(s, source_id);
1158
b9313021 1159 return true;
dbaabb25
PX
1160 }
1161
6e905564
PX
1162 ret_fr = vtd_iova_to_slpte(&ce, addr, is_write, &slpte, &level,
1163 &reads, &writes);
1da12ec4
LT
1164 if (ret_fr) {
1165 ret_fr = -ret_fr;
1166 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
6c441e1d 1167 trace_vtd_fault_disabled();
1da12ec4
LT
1168 } else {
1169 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
1170 }
b9313021 1171 goto error;
1da12ec4
LT
1172 }
1173
d66b969b 1174 page_mask = vtd_slpt_level_page_mask(level);
b5a280c0 1175 vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
d66b969b 1176 reads, writes, level);
b5a280c0 1177out:
d66b969b
JW
1178 entry->iova = addr & page_mask;
1179 entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
1180 entry->addr_mask = ~page_mask;
5a38cb59 1181 entry->perm = IOMMU_ACCESS_FLAG(reads, writes);
b9313021
PX
1182 return true;
1183
1184error:
1185 entry->iova = 0;
1186 entry->translated_addr = 0;
1187 entry->addr_mask = 0;
1188 entry->perm = IOMMU_NONE;
1189 return false;
1da12ec4
LT
1190}
1191
1192static void vtd_root_table_setup(IntelIOMMUState *s)
1193{
1194 s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
1195 s->root_extended = s->root & VTD_RTADDR_RTT;
1196 s->root &= VTD_RTADDR_ADDR_MASK;
1197
7feb51b7 1198 trace_vtd_reg_dmar_root(s->root, s->root_extended);
1da12ec4
LT
1199}
1200
02a2cbc8
PX
1201static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
1202 uint32_t index, uint32_t mask)
1203{
1204 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
1205}
1206
a5861439
PX
1207static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
1208{
1209 uint64_t value = 0;
1210 value = vtd_get_quad_raw(s, DMAR_IRTA_REG);
1211 s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1);
1212 s->intr_root = value & VTD_IRTA_ADDR_MASK;
28589311 1213 s->intr_eime = value & VTD_IRTA_EIME;
a5861439 1214
02a2cbc8
PX
1215 /* Notify global invalidation */
1216 vtd_iec_notify_all(s, true, 0, 0);
a5861439 1217
7feb51b7 1218 trace_vtd_reg_ir_root(s->intr_root, s->intr_size);
a5861439
PX
1219}
1220
dd4d607e
PX
1221static void vtd_iommu_replay_all(IntelIOMMUState *s)
1222{
1223 IntelIOMMUNotifierNode *node;
1224
1225 QLIST_FOREACH(node, &s->notifiers_list, next) {
1226 memory_region_iommu_replay_all(&node->vtd_as->iommu);
1227 }
1228}
1229
d92fa2dc
LT
1230static void vtd_context_global_invalidate(IntelIOMMUState *s)
1231{
bc535e59 1232 trace_vtd_inv_desc_cc_global();
d92fa2dc
LT
1233 s->context_cache_gen++;
1234 if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
1235 vtd_reset_context_cache(s);
1236 }
dbaabb25 1237 vtd_switch_address_space_all(s);
dd4d607e
PX
1238 /*
1239 * From VT-d spec 6.5.2.1, a global context entry invalidation
1240 * should be followed by a IOTLB global invalidation, so we should
1241 * be safe even without this. Hoewever, let's replay the region as
1242 * well to be safer, and go back here when we need finer tunes for
1243 * VT-d emulation codes.
1244 */
1245 vtd_iommu_replay_all(s);
d92fa2dc
LT
1246}
1247
1248/* Do a context-cache device-selective invalidation.
1249 * @func_mask: FM field after shifting
1250 */
1251static void vtd_context_device_invalidate(IntelIOMMUState *s,
1252 uint16_t source_id,
1253 uint16_t func_mask)
1254{
1255 uint16_t mask;
7df953bd 1256 VTDBus *vtd_bus;
d92fa2dc 1257 VTDAddressSpace *vtd_as;
bc535e59 1258 uint8_t bus_n, devfn;
d92fa2dc
LT
1259 uint16_t devfn_it;
1260
bc535e59
PX
1261 trace_vtd_inv_desc_cc_devices(source_id, func_mask);
1262
d92fa2dc
LT
1263 switch (func_mask & 3) {
1264 case 0:
1265 mask = 0; /* No bits in the SID field masked */
1266 break;
1267 case 1:
1268 mask = 4; /* Mask bit 2 in the SID field */
1269 break;
1270 case 2:
1271 mask = 6; /* Mask bit 2:1 in the SID field */
1272 break;
1273 case 3:
1274 mask = 7; /* Mask bit 2:0 in the SID field */
1275 break;
1276 }
6cb99acc 1277 mask = ~mask;
bc535e59
PX
1278
1279 bus_n = VTD_SID_TO_BUS(source_id);
1280 vtd_bus = vtd_find_as_from_bus_num(s, bus_n);
7df953bd 1281 if (vtd_bus) {
d92fa2dc 1282 devfn = VTD_SID_TO_DEVFN(source_id);
04af0e18 1283 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
7df953bd 1284 vtd_as = vtd_bus->dev_as[devfn_it];
d92fa2dc 1285 if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
bc535e59
PX
1286 trace_vtd_inv_desc_cc_device(bus_n, VTD_PCI_SLOT(devfn_it),
1287 VTD_PCI_FUNC(devfn_it));
d92fa2dc 1288 vtd_as->context_cache_entry.context_cache_gen = 0;
dbaabb25
PX
1289 /*
1290 * Do switch address space when needed, in case if the
1291 * device passthrough bit is switched.
1292 */
1293 vtd_switch_address_space(vtd_as);
dd4d607e
PX
1294 /*
1295 * So a device is moving out of (or moving into) a
1296 * domain, a replay() suites here to notify all the
1297 * IOMMU_NOTIFIER_MAP registers about this change.
1298 * This won't bring bad even if we have no such
1299 * notifier registered - the IOMMU notification
1300 * framework will skip MAP notifications if that
1301 * happened.
1302 */
1303 memory_region_iommu_replay_all(&vtd_as->iommu);
d92fa2dc
LT
1304 }
1305 }
1306 }
1307}
1308
1da12ec4
LT
1309/* Context-cache invalidation
1310 * Returns the Context Actual Invalidation Granularity.
1311 * @val: the content of the CCMD_REG
1312 */
1313static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
1314{
1315 uint64_t caig;
1316 uint64_t type = val & VTD_CCMD_CIRG_MASK;
1317
1318 switch (type) {
d92fa2dc 1319 case VTD_CCMD_DOMAIN_INVL:
d92fa2dc 1320 /* Fall through */
1da12ec4 1321 case VTD_CCMD_GLOBAL_INVL:
1da12ec4 1322 caig = VTD_CCMD_GLOBAL_INVL_A;
d92fa2dc 1323 vtd_context_global_invalidate(s);
1da12ec4
LT
1324 break;
1325
1326 case VTD_CCMD_DEVICE_INVL:
1da12ec4 1327 caig = VTD_CCMD_DEVICE_INVL_A;
d92fa2dc 1328 vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
1da12ec4
LT
1329 break;
1330
1331 default:
7feb51b7 1332 trace_vtd_err("Context cache invalidate type error.");
1da12ec4
LT
1333 caig = 0;
1334 }
1335 return caig;
1336}
1337
b5a280c0
LT
1338static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
1339{
7feb51b7 1340 trace_vtd_inv_desc_iotlb_global();
b5a280c0 1341 vtd_reset_iotlb(s);
dd4d607e 1342 vtd_iommu_replay_all(s);
b5a280c0
LT
1343}
1344
1345static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
1346{
dd4d607e
PX
1347 IntelIOMMUNotifierNode *node;
1348 VTDContextEntry ce;
1349 VTDAddressSpace *vtd_as;
1350
7feb51b7
PX
1351 trace_vtd_inv_desc_iotlb_domain(domain_id);
1352
b5a280c0
LT
1353 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
1354 &domain_id);
dd4d607e
PX
1355
1356 QLIST_FOREACH(node, &s->notifiers_list, next) {
1357 vtd_as = node->vtd_as;
1358 if (!vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
1359 vtd_as->devfn, &ce) &&
1360 domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
1361 memory_region_iommu_replay_all(&vtd_as->iommu);
1362 }
1363 }
1364}
1365
1366static int vtd_page_invalidate_notify_hook(IOMMUTLBEntry *entry,
1367 void *private)
1368{
1369 memory_region_notify_iommu((MemoryRegion *)private, *entry);
1370 return 0;
1371}
1372
1373static void vtd_iotlb_page_invalidate_notify(IntelIOMMUState *s,
1374 uint16_t domain_id, hwaddr addr,
1375 uint8_t am)
1376{
1377 IntelIOMMUNotifierNode *node;
1378 VTDContextEntry ce;
1379 int ret;
1380
1381 QLIST_FOREACH(node, &(s->notifiers_list), next) {
1382 VTDAddressSpace *vtd_as = node->vtd_as;
1383 ret = vtd_dev_to_context_entry(s, pci_bus_num(vtd_as->bus),
1384 vtd_as->devfn, &ce);
1385 if (!ret && domain_id == VTD_CONTEXT_ENTRY_DID(ce.hi)) {
1386 vtd_page_walk(&ce, addr, addr + (1 << am) * VTD_PAGE_SIZE,
1387 vtd_page_invalidate_notify_hook,
1388 (void *)&vtd_as->iommu, true);
1389 }
1390 }
b5a280c0
LT
1391}
1392
1393static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
1394 hwaddr addr, uint8_t am)
1395{
1396 VTDIOTLBPageInvInfo info;
1397
7feb51b7
PX
1398 trace_vtd_inv_desc_iotlb_pages(domain_id, addr, am);
1399
b5a280c0
LT
1400 assert(am <= VTD_MAMV);
1401 info.domain_id = domain_id;
d66b969b 1402 info.addr = addr;
b5a280c0
LT
1403 info.mask = ~((1 << am) - 1);
1404 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
dd4d607e 1405 vtd_iotlb_page_invalidate_notify(s, domain_id, addr, am);
b5a280c0
LT
1406}
1407
1da12ec4
LT
1408/* Flush IOTLB
1409 * Returns the IOTLB Actual Invalidation Granularity.
1410 * @val: the content of the IOTLB_REG
1411 */
1412static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
1413{
1414 uint64_t iaig;
1415 uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
b5a280c0
LT
1416 uint16_t domain_id;
1417 hwaddr addr;
1418 uint8_t am;
1da12ec4
LT
1419
1420 switch (type) {
1421 case VTD_TLB_GLOBAL_FLUSH:
1da12ec4 1422 iaig = VTD_TLB_GLOBAL_FLUSH_A;
b5a280c0 1423 vtd_iotlb_global_invalidate(s);
1da12ec4
LT
1424 break;
1425
1426 case VTD_TLB_DSI_FLUSH:
b5a280c0 1427 domain_id = VTD_TLB_DID(val);
1da12ec4 1428 iaig = VTD_TLB_DSI_FLUSH_A;
b5a280c0 1429 vtd_iotlb_domain_invalidate(s, domain_id);
1da12ec4
LT
1430 break;
1431
1432 case VTD_TLB_PSI_FLUSH:
b5a280c0
LT
1433 domain_id = VTD_TLB_DID(val);
1434 addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
1435 am = VTD_IVA_AM(addr);
1436 addr = VTD_IVA_ADDR(addr);
b5a280c0 1437 if (am > VTD_MAMV) {
7feb51b7 1438 trace_vtd_err("IOTLB PSI flush: address mask overflow.");
b5a280c0
LT
1439 iaig = 0;
1440 break;
1441 }
1da12ec4 1442 iaig = VTD_TLB_PSI_FLUSH_A;
b5a280c0 1443 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1da12ec4
LT
1444 break;
1445
1446 default:
7feb51b7 1447 trace_vtd_err("IOTLB flush: invalid granularity.");
1da12ec4
LT
1448 iaig = 0;
1449 }
1450 return iaig;
1451}
1452
8991c460 1453static void vtd_fetch_inv_desc(IntelIOMMUState *s);
ed7b8fbc
LT
1454
1455static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
1456{
1457 return s->qi_enabled && (s->iq_tail == s->iq_head) &&
1458 (s->iq_last_desc_type == VTD_INV_DESC_WAIT);
1459}
1460
1461static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en)
1462{
1463 uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
1464
7feb51b7
PX
1465 trace_vtd_inv_qi_enable(en);
1466
ed7b8fbc 1467 if (en) {
8991c460
LP
1468 s->iq = iqa_val & VTD_IQA_IQA_MASK;
1469 /* 2^(x+8) entries */
1470 s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
1471 s->qi_enabled = true;
1472 trace_vtd_inv_qi_setup(s->iq, s->iq_size);
1473 /* Ok - report back to driver */
1474 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
1475
1476 if (s->iq_tail != 0) {
1477 /*
1478 * This is a spec violation but Windows guests are known to set up
1479 * Queued Invalidation this way so we allow the write and process
1480 * Invalidation Descriptors right away.
1481 */
1482 trace_vtd_warn_invalid_qi_tail(s->iq_tail);
1483 if (!(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
1484 vtd_fetch_inv_desc(s);
1485 }
ed7b8fbc
LT
1486 }
1487 } else {
1488 if (vtd_queued_inv_disable_check(s)) {
1489 /* disable Queued Invalidation */
1490 vtd_set_quad_raw(s, DMAR_IQH_REG, 0);
1491 s->iq_head = 0;
1492 s->qi_enabled = false;
1493 /* Ok - report back to driver */
1494 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
1495 } else {
7feb51b7 1496 trace_vtd_err_qi_disable(s->iq_head, s->iq_tail, s->iq_last_desc_type);
ed7b8fbc
LT
1497 }
1498 }
1499}
1500
1da12ec4
LT
1501/* Set Root Table Pointer */
1502static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
1503{
1da12ec4
LT
1504 vtd_root_table_setup(s);
1505 /* Ok - report back to driver */
1506 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
1507}
1508
a5861439
PX
1509/* Set Interrupt Remap Table Pointer */
1510static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s)
1511{
a5861439
PX
1512 vtd_interrupt_remap_table_setup(s);
1513 /* Ok - report back to driver */
1514 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS);
1515}
1516
1da12ec4
LT
1517/* Handle Translation Enable/Disable */
1518static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
1519{
558e0024
PX
1520 if (s->dmar_enabled == en) {
1521 return;
1522 }
1523
7feb51b7 1524 trace_vtd_dmar_enable(en);
1da12ec4
LT
1525
1526 if (en) {
1527 s->dmar_enabled = true;
1528 /* Ok - report back to driver */
1529 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES);
1530 } else {
1531 s->dmar_enabled = false;
1532
1533 /* Clear the index of Fault Recording Register */
1534 s->next_frcd_reg = 0;
1535 /* Ok - report back to driver */
1536 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
1537 }
558e0024
PX
1538
1539 vtd_switch_address_space_all(s);
1da12ec4
LT
1540}
1541
80de52ba
PX
1542/* Handle Interrupt Remap Enable/Disable */
1543static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en)
1544{
7feb51b7 1545 trace_vtd_ir_enable(en);
80de52ba
PX
1546
1547 if (en) {
1548 s->intr_enabled = true;
1549 /* Ok - report back to driver */
1550 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES);
1551 } else {
1552 s->intr_enabled = false;
1553 /* Ok - report back to driver */
1554 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0);
1555 }
1556}
1557
1da12ec4
LT
1558/* Handle write to Global Command Register */
1559static void vtd_handle_gcmd_write(IntelIOMMUState *s)
1560{
1561 uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG);
1562 uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
1563 uint32_t changed = status ^ val;
1564
7feb51b7 1565 trace_vtd_reg_write_gcmd(status, val);
1da12ec4
LT
1566 if (changed & VTD_GCMD_TE) {
1567 /* Translation enable/disable */
1568 vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
1569 }
1570 if (val & VTD_GCMD_SRTP) {
1571 /* Set/update the root-table pointer */
1572 vtd_handle_gcmd_srtp(s);
1573 }
ed7b8fbc
LT
1574 if (changed & VTD_GCMD_QIE) {
1575 /* Queued Invalidation Enable */
1576 vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE);
1577 }
a5861439
PX
1578 if (val & VTD_GCMD_SIRTP) {
1579 /* Set/update the interrupt remapping root-table pointer */
1580 vtd_handle_gcmd_sirtp(s);
1581 }
80de52ba
PX
1582 if (changed & VTD_GCMD_IRE) {
1583 /* Interrupt remap enable/disable */
1584 vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE);
1585 }
1da12ec4
LT
1586}
1587
1588/* Handle write to Context Command Register */
1589static void vtd_handle_ccmd_write(IntelIOMMUState *s)
1590{
1591 uint64_t ret;
1592 uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG);
1593
1594 /* Context-cache invalidation request */
1595 if (val & VTD_CCMD_ICC) {
ed7b8fbc 1596 if (s->qi_enabled) {
7feb51b7
PX
1597 trace_vtd_err("Queued Invalidation enabled, "
1598 "should not use register-based invalidation");
ed7b8fbc
LT
1599 return;
1600 }
1da12ec4
LT
1601 ret = vtd_context_cache_invalidate(s, val);
1602 /* Invalidation completed. Change something to show */
1603 vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
1604 ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
1605 ret);
1da12ec4
LT
1606 }
1607}
1608
1609/* Handle write to IOTLB Invalidation Register */
1610static void vtd_handle_iotlb_write(IntelIOMMUState *s)
1611{
1612 uint64_t ret;
1613 uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG);
1614
1615 /* IOTLB invalidation request */
1616 if (val & VTD_TLB_IVT) {
ed7b8fbc 1617 if (s->qi_enabled) {
7feb51b7
PX
1618 trace_vtd_err("Queued Invalidation enabled, "
1619 "should not use register-based invalidation.");
ed7b8fbc
LT
1620 return;
1621 }
1da12ec4
LT
1622 ret = vtd_iotlb_flush(s, val);
1623 /* Invalidation completed. Change something to show */
1624 vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
1625 ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
1626 VTD_TLB_FLUSH_GRANU_MASK_A, ret);
1da12ec4
LT
1627 }
1628}
1629
ed7b8fbc
LT
1630/* Fetch an Invalidation Descriptor from the Invalidation Queue */
1631static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset,
1632 VTDInvDesc *inv_desc)
1633{
1634 dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
1635 if (dma_memory_read(&address_space_memory, addr, inv_desc,
1636 sizeof(*inv_desc))) {
7feb51b7 1637 trace_vtd_err("Read INV DESC failed.");
ed7b8fbc
LT
1638 inv_desc->lo = 0;
1639 inv_desc->hi = 0;
ed7b8fbc
LT
1640 return false;
1641 }
1642 inv_desc->lo = le64_to_cpu(inv_desc->lo);
1643 inv_desc->hi = le64_to_cpu(inv_desc->hi);
1644 return true;
1645}
1646
1647static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1648{
1649 if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
1650 (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
bc535e59 1651 trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
ed7b8fbc
LT
1652 return false;
1653 }
1654 if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
1655 /* Status Write */
1656 uint32_t status_data = (uint32_t)(inv_desc->lo >>
1657 VTD_INV_DESC_WAIT_DATA_SHIFT);
1658
1659 assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
1660
1661 /* FIXME: need to be masked with HAW? */
1662 dma_addr_t status_addr = inv_desc->hi;
bc535e59 1663 trace_vtd_inv_desc_wait_sw(status_addr, status_data);
ed7b8fbc
LT
1664 status_data = cpu_to_le32(status_data);
1665 if (dma_memory_write(&address_space_memory, status_addr, &status_data,
1666 sizeof(status_data))) {
bc535e59 1667 trace_vtd_inv_desc_wait_write_fail(inv_desc->hi, inv_desc->lo);
ed7b8fbc
LT
1668 return false;
1669 }
1670 } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
1671 /* Interrupt flag */
ed7b8fbc
LT
1672 vtd_generate_completion_event(s);
1673 } else {
bc535e59 1674 trace_vtd_inv_desc_wait_invalid(inv_desc->hi, inv_desc->lo);
ed7b8fbc
LT
1675 return false;
1676 }
1677 return true;
1678}
1679
d92fa2dc
LT
1680static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
1681 VTDInvDesc *inv_desc)
1682{
bc535e59
PX
1683 uint16_t sid, fmask;
1684
d92fa2dc 1685 if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
bc535e59 1686 trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
d92fa2dc
LT
1687 return false;
1688 }
1689 switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
1690 case VTD_INV_DESC_CC_DOMAIN:
bc535e59
PX
1691 trace_vtd_inv_desc_cc_domain(
1692 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
d92fa2dc
LT
1693 /* Fall through */
1694 case VTD_INV_DESC_CC_GLOBAL:
d92fa2dc
LT
1695 vtd_context_global_invalidate(s);
1696 break;
1697
1698 case VTD_INV_DESC_CC_DEVICE:
bc535e59
PX
1699 sid = VTD_INV_DESC_CC_SID(inv_desc->lo);
1700 fmask = VTD_INV_DESC_CC_FM(inv_desc->lo);
1701 vtd_context_device_invalidate(s, sid, fmask);
d92fa2dc
LT
1702 break;
1703
1704 default:
bc535e59 1705 trace_vtd_inv_desc_cc_invalid(inv_desc->hi, inv_desc->lo);
d92fa2dc
LT
1706 return false;
1707 }
1708 return true;
1709}
1710
b5a280c0
LT
1711static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1712{
1713 uint16_t domain_id;
1714 uint8_t am;
1715 hwaddr addr;
1716
1717 if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
1718 (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
bc535e59 1719 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
b5a280c0
LT
1720 return false;
1721 }
1722
1723 switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
1724 case VTD_INV_DESC_IOTLB_GLOBAL:
b5a280c0
LT
1725 vtd_iotlb_global_invalidate(s);
1726 break;
1727
1728 case VTD_INV_DESC_IOTLB_DOMAIN:
1729 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
b5a280c0
LT
1730 vtd_iotlb_domain_invalidate(s, domain_id);
1731 break;
1732
1733 case VTD_INV_DESC_IOTLB_PAGE:
1734 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1735 addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
1736 am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
b5a280c0 1737 if (am > VTD_MAMV) {
bc535e59 1738 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
b5a280c0
LT
1739 return false;
1740 }
1741 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1742 break;
1743
1744 default:
bc535e59 1745 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
b5a280c0
LT
1746 return false;
1747 }
1748 return true;
1749}
1750
02a2cbc8
PX
1751static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
1752 VTDInvDesc *inv_desc)
1753{
7feb51b7
PX
1754 trace_vtd_inv_desc_iec(inv_desc->iec.granularity,
1755 inv_desc->iec.index,
1756 inv_desc->iec.index_mask);
02a2cbc8
PX
1757
1758 vtd_iec_notify_all(s, !inv_desc->iec.granularity,
1759 inv_desc->iec.index,
1760 inv_desc->iec.index_mask);
554f5e16
JW
1761 return true;
1762}
1763
1764static bool vtd_process_device_iotlb_desc(IntelIOMMUState *s,
1765 VTDInvDesc *inv_desc)
1766{
1767 VTDAddressSpace *vtd_dev_as;
1768 IOMMUTLBEntry entry;
1769 struct VTDBus *vtd_bus;
1770 hwaddr addr;
1771 uint64_t sz;
1772 uint16_t sid;
1773 uint8_t devfn;
1774 bool size;
1775 uint8_t bus_num;
1776
1777 addr = VTD_INV_DESC_DEVICE_IOTLB_ADDR(inv_desc->hi);
1778 sid = VTD_INV_DESC_DEVICE_IOTLB_SID(inv_desc->lo);
1779 devfn = sid & 0xff;
1780 bus_num = sid >> 8;
1781 size = VTD_INV_DESC_DEVICE_IOTLB_SIZE(inv_desc->hi);
1782
1783 if ((inv_desc->lo & VTD_INV_DESC_DEVICE_IOTLB_RSVD_LO) ||
1784 (inv_desc->hi & VTD_INV_DESC_DEVICE_IOTLB_RSVD_HI)) {
7feb51b7 1785 trace_vtd_inv_desc_iotlb_invalid(inv_desc->hi, inv_desc->lo);
554f5e16
JW
1786 return false;
1787 }
1788
1789 vtd_bus = vtd_find_as_from_bus_num(s, bus_num);
1790 if (!vtd_bus) {
1791 goto done;
1792 }
1793
1794 vtd_dev_as = vtd_bus->dev_as[devfn];
1795 if (!vtd_dev_as) {
1796 goto done;
1797 }
1798
04eb6247
JW
1799 /* According to ATS spec table 2.4:
1800 * S = 0, bits 15:12 = xxxx range size: 4K
1801 * S = 1, bits 15:12 = xxx0 range size: 8K
1802 * S = 1, bits 15:12 = xx01 range size: 16K
1803 * S = 1, bits 15:12 = x011 range size: 32K
1804 * S = 1, bits 15:12 = 0111 range size: 64K
1805 * ...
1806 */
554f5e16 1807 if (size) {
04eb6247 1808 sz = (VTD_PAGE_SIZE * 2) << cto64(addr >> VTD_PAGE_SHIFT);
554f5e16
JW
1809 addr &= ~(sz - 1);
1810 } else {
1811 sz = VTD_PAGE_SIZE;
1812 }
02a2cbc8 1813
554f5e16
JW
1814 entry.target_as = &vtd_dev_as->as;
1815 entry.addr_mask = sz - 1;
1816 entry.iova = addr;
1817 entry.perm = IOMMU_NONE;
1818 entry.translated_addr = 0;
10315b9b 1819 memory_region_notify_iommu(&vtd_dev_as->iommu, entry);
554f5e16
JW
1820
1821done:
02a2cbc8
PX
1822 return true;
1823}
1824
ed7b8fbc
LT
1825static bool vtd_process_inv_desc(IntelIOMMUState *s)
1826{
1827 VTDInvDesc inv_desc;
1828 uint8_t desc_type;
1829
7feb51b7 1830 trace_vtd_inv_qi_head(s->iq_head);
ed7b8fbc
LT
1831 if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) {
1832 s->iq_last_desc_type = VTD_INV_DESC_NONE;
1833 return false;
1834 }
1835 desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
1836 /* FIXME: should update at first or at last? */
1837 s->iq_last_desc_type = desc_type;
1838
1839 switch (desc_type) {
1840 case VTD_INV_DESC_CC:
bc535e59 1841 trace_vtd_inv_desc("context-cache", inv_desc.hi, inv_desc.lo);
d92fa2dc
LT
1842 if (!vtd_process_context_cache_desc(s, &inv_desc)) {
1843 return false;
1844 }
ed7b8fbc
LT
1845 break;
1846
1847 case VTD_INV_DESC_IOTLB:
bc535e59 1848 trace_vtd_inv_desc("iotlb", inv_desc.hi, inv_desc.lo);
b5a280c0
LT
1849 if (!vtd_process_iotlb_desc(s, &inv_desc)) {
1850 return false;
1851 }
ed7b8fbc
LT
1852 break;
1853
1854 case VTD_INV_DESC_WAIT:
bc535e59 1855 trace_vtd_inv_desc("wait", inv_desc.hi, inv_desc.lo);
ed7b8fbc
LT
1856 if (!vtd_process_wait_desc(s, &inv_desc)) {
1857 return false;
1858 }
1859 break;
1860
b7910472 1861 case VTD_INV_DESC_IEC:
bc535e59 1862 trace_vtd_inv_desc("iec", inv_desc.hi, inv_desc.lo);
02a2cbc8
PX
1863 if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
1864 return false;
1865 }
b7910472
PX
1866 break;
1867
554f5e16 1868 case VTD_INV_DESC_DEVICE:
7feb51b7 1869 trace_vtd_inv_desc("device", inv_desc.hi, inv_desc.lo);
554f5e16
JW
1870 if (!vtd_process_device_iotlb_desc(s, &inv_desc)) {
1871 return false;
1872 }
1873 break;
1874
ed7b8fbc 1875 default:
bc535e59 1876 trace_vtd_inv_desc_invalid(inv_desc.hi, inv_desc.lo);
ed7b8fbc
LT
1877 return false;
1878 }
1879 s->iq_head++;
1880 if (s->iq_head == s->iq_size) {
1881 s->iq_head = 0;
1882 }
1883 return true;
1884}
1885
1886/* Try to fetch and process more Invalidation Descriptors */
1887static void vtd_fetch_inv_desc(IntelIOMMUState *s)
1888{
7feb51b7
PX
1889 trace_vtd_inv_qi_fetch();
1890
ed7b8fbc
LT
1891 if (s->iq_tail >= s->iq_size) {
1892 /* Detects an invalid Tail pointer */
7feb51b7 1893 trace_vtd_err_qi_tail(s->iq_tail, s->iq_size);
ed7b8fbc
LT
1894 vtd_handle_inv_queue_error(s);
1895 return;
1896 }
1897 while (s->iq_head != s->iq_tail) {
1898 if (!vtd_process_inv_desc(s)) {
1899 /* Invalidation Queue Errors */
1900 vtd_handle_inv_queue_error(s);
1901 break;
1902 }
1903 /* Must update the IQH_REG in time */
1904 vtd_set_quad_raw(s, DMAR_IQH_REG,
1905 (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) &
1906 VTD_IQH_QH_MASK);
1907 }
1908}
1909
1910/* Handle write to Invalidation Queue Tail Register */
1911static void vtd_handle_iqt_write(IntelIOMMUState *s)
1912{
1913 uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
1914
1915 s->iq_tail = VTD_IQT_QT(val);
7feb51b7
PX
1916 trace_vtd_inv_qi_tail(s->iq_tail);
1917
ed7b8fbc
LT
1918 if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
1919 /* Process Invalidation Queue here */
1920 vtd_fetch_inv_desc(s);
1921 }
1922}
1923
1da12ec4
LT
1924static void vtd_handle_fsts_write(IntelIOMMUState *s)
1925{
1926 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
1927 uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
1928 uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE;
1929
1930 if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
1931 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
7feb51b7 1932 trace_vtd_fsts_clear_ip();
1da12ec4 1933 }
ed7b8fbc
LT
1934 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1935 * Descriptors if there are any when Queued Invalidation is enabled?
1936 */
1da12ec4
LT
1937}
1938
1939static void vtd_handle_fectl_write(IntelIOMMUState *s)
1940{
1941 uint32_t fectl_reg;
1942 /* FIXME: when software clears the IM field, check the IP field. But do we
1943 * need to compare the old value and the new value to conclude that
1944 * software clears the IM field? Or just check if the IM field is zero?
1945 */
1946 fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
7feb51b7
PX
1947
1948 trace_vtd_reg_write_fectl(fectl_reg);
1949
1da12ec4
LT
1950 if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
1951 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
1952 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
1da12ec4
LT
1953 }
1954}
1955
ed7b8fbc
LT
1956static void vtd_handle_ics_write(IntelIOMMUState *s)
1957{
1958 uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG);
1959 uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1960
1961 if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
7feb51b7 1962 trace_vtd_reg_ics_clear_ip();
ed7b8fbc 1963 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
ed7b8fbc
LT
1964 }
1965}
1966
1967static void vtd_handle_iectl_write(IntelIOMMUState *s)
1968{
1969 uint32_t iectl_reg;
1970 /* FIXME: when software clears the IM field, check the IP field. But do we
1971 * need to compare the old value and the new value to conclude that
1972 * software clears the IM field? Or just check if the IM field is zero?
1973 */
1974 iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
7feb51b7
PX
1975
1976 trace_vtd_reg_write_iectl(iectl_reg);
1977
ed7b8fbc
LT
1978 if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
1979 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
1980 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
ed7b8fbc
LT
1981 }
1982}
1983
1da12ec4
LT
1984static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
1985{
1986 IntelIOMMUState *s = opaque;
1987 uint64_t val;
1988
7feb51b7
PX
1989 trace_vtd_reg_read(addr, size);
1990
1da12ec4 1991 if (addr + size > DMAR_REG_SIZE) {
7feb51b7 1992 trace_vtd_err("Read MMIO over range.");
1da12ec4
LT
1993 return (uint64_t)-1;
1994 }
1995
1996 switch (addr) {
1997 /* Root Table Address Register, 64-bit */
1998 case DMAR_RTADDR_REG:
1999 if (size == 4) {
2000 val = s->root & ((1ULL << 32) - 1);
2001 } else {
2002 val = s->root;
2003 }
2004 break;
2005
2006 case DMAR_RTADDR_REG_HI:
2007 assert(size == 4);
2008 val = s->root >> 32;
2009 break;
2010
ed7b8fbc
LT
2011 /* Invalidation Queue Address Register, 64-bit */
2012 case DMAR_IQA_REG:
2013 val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
2014 if (size == 4) {
2015 val = val & ((1ULL << 32) - 1);
2016 }
2017 break;
2018
2019 case DMAR_IQA_REG_HI:
2020 assert(size == 4);
2021 val = s->iq >> 32;
2022 break;
2023
1da12ec4
LT
2024 default:
2025 if (size == 4) {
2026 val = vtd_get_long(s, addr);
2027 } else {
2028 val = vtd_get_quad(s, addr);
2029 }
2030 }
7feb51b7 2031
1da12ec4
LT
2032 return val;
2033}
2034
2035static void vtd_mem_write(void *opaque, hwaddr addr,
2036 uint64_t val, unsigned size)
2037{
2038 IntelIOMMUState *s = opaque;
2039
7feb51b7
PX
2040 trace_vtd_reg_write(addr, size, val);
2041
1da12ec4 2042 if (addr + size > DMAR_REG_SIZE) {
7feb51b7 2043 trace_vtd_err("Write MMIO over range.");
1da12ec4
LT
2044 return;
2045 }
2046
2047 switch (addr) {
2048 /* Global Command Register, 32-bit */
2049 case DMAR_GCMD_REG:
1da12ec4
LT
2050 vtd_set_long(s, addr, val);
2051 vtd_handle_gcmd_write(s);
2052 break;
2053
2054 /* Context Command Register, 64-bit */
2055 case DMAR_CCMD_REG:
1da12ec4
LT
2056 if (size == 4) {
2057 vtd_set_long(s, addr, val);
2058 } else {
2059 vtd_set_quad(s, addr, val);
2060 vtd_handle_ccmd_write(s);
2061 }
2062 break;
2063
2064 case DMAR_CCMD_REG_HI:
1da12ec4
LT
2065 assert(size == 4);
2066 vtd_set_long(s, addr, val);
2067 vtd_handle_ccmd_write(s);
2068 break;
2069
2070 /* IOTLB Invalidation Register, 64-bit */
2071 case DMAR_IOTLB_REG:
1da12ec4
LT
2072 if (size == 4) {
2073 vtd_set_long(s, addr, val);
2074 } else {
2075 vtd_set_quad(s, addr, val);
2076 vtd_handle_iotlb_write(s);
2077 }
2078 break;
2079
2080 case DMAR_IOTLB_REG_HI:
1da12ec4
LT
2081 assert(size == 4);
2082 vtd_set_long(s, addr, val);
2083 vtd_handle_iotlb_write(s);
2084 break;
2085
b5a280c0
LT
2086 /* Invalidate Address Register, 64-bit */
2087 case DMAR_IVA_REG:
b5a280c0
LT
2088 if (size == 4) {
2089 vtd_set_long(s, addr, val);
2090 } else {
2091 vtd_set_quad(s, addr, val);
2092 }
2093 break;
2094
2095 case DMAR_IVA_REG_HI:
b5a280c0
LT
2096 assert(size == 4);
2097 vtd_set_long(s, addr, val);
2098 break;
2099
1da12ec4
LT
2100 /* Fault Status Register, 32-bit */
2101 case DMAR_FSTS_REG:
1da12ec4
LT
2102 assert(size == 4);
2103 vtd_set_long(s, addr, val);
2104 vtd_handle_fsts_write(s);
2105 break;
2106
2107 /* Fault Event Control Register, 32-bit */
2108 case DMAR_FECTL_REG:
1da12ec4
LT
2109 assert(size == 4);
2110 vtd_set_long(s, addr, val);
2111 vtd_handle_fectl_write(s);
2112 break;
2113
2114 /* Fault Event Data Register, 32-bit */
2115 case DMAR_FEDATA_REG:
1da12ec4
LT
2116 assert(size == 4);
2117 vtd_set_long(s, addr, val);
2118 break;
2119
2120 /* Fault Event Address Register, 32-bit */
2121 case DMAR_FEADDR_REG:
1da12ec4
LT
2122 assert(size == 4);
2123 vtd_set_long(s, addr, val);
2124 break;
2125
2126 /* Fault Event Upper Address Register, 32-bit */
2127 case DMAR_FEUADDR_REG:
1da12ec4
LT
2128 assert(size == 4);
2129 vtd_set_long(s, addr, val);
2130 break;
2131
2132 /* Protected Memory Enable Register, 32-bit */
2133 case DMAR_PMEN_REG:
1da12ec4
LT
2134 assert(size == 4);
2135 vtd_set_long(s, addr, val);
2136 break;
2137
2138 /* Root Table Address Register, 64-bit */
2139 case DMAR_RTADDR_REG:
1da12ec4
LT
2140 if (size == 4) {
2141 vtd_set_long(s, addr, val);
2142 } else {
2143 vtd_set_quad(s, addr, val);
2144 }
2145 break;
2146
2147 case DMAR_RTADDR_REG_HI:
1da12ec4
LT
2148 assert(size == 4);
2149 vtd_set_long(s, addr, val);
2150 break;
2151
ed7b8fbc
LT
2152 /* Invalidation Queue Tail Register, 64-bit */
2153 case DMAR_IQT_REG:
ed7b8fbc
LT
2154 if (size == 4) {
2155 vtd_set_long(s, addr, val);
2156 } else {
2157 vtd_set_quad(s, addr, val);
2158 }
2159 vtd_handle_iqt_write(s);
2160 break;
2161
2162 case DMAR_IQT_REG_HI:
ed7b8fbc
LT
2163 assert(size == 4);
2164 vtd_set_long(s, addr, val);
2165 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
2166 break;
2167
2168 /* Invalidation Queue Address Register, 64-bit */
2169 case DMAR_IQA_REG:
ed7b8fbc
LT
2170 if (size == 4) {
2171 vtd_set_long(s, addr, val);
2172 } else {
2173 vtd_set_quad(s, addr, val);
2174 }
2175 break;
2176
2177 case DMAR_IQA_REG_HI:
ed7b8fbc
LT
2178 assert(size == 4);
2179 vtd_set_long(s, addr, val);
2180 break;
2181
2182 /* Invalidation Completion Status Register, 32-bit */
2183 case DMAR_ICS_REG:
ed7b8fbc
LT
2184 assert(size == 4);
2185 vtd_set_long(s, addr, val);
2186 vtd_handle_ics_write(s);
2187 break;
2188
2189 /* Invalidation Event Control Register, 32-bit */
2190 case DMAR_IECTL_REG:
ed7b8fbc
LT
2191 assert(size == 4);
2192 vtd_set_long(s, addr, val);
2193 vtd_handle_iectl_write(s);
2194 break;
2195
2196 /* Invalidation Event Data Register, 32-bit */
2197 case DMAR_IEDATA_REG:
ed7b8fbc
LT
2198 assert(size == 4);
2199 vtd_set_long(s, addr, val);
2200 break;
2201
2202 /* Invalidation Event Address Register, 32-bit */
2203 case DMAR_IEADDR_REG:
ed7b8fbc
LT
2204 assert(size == 4);
2205 vtd_set_long(s, addr, val);
2206 break;
2207
2208 /* Invalidation Event Upper Address Register, 32-bit */
2209 case DMAR_IEUADDR_REG:
ed7b8fbc
LT
2210 assert(size == 4);
2211 vtd_set_long(s, addr, val);
2212 break;
2213
1da12ec4
LT
2214 /* Fault Recording Registers, 128-bit */
2215 case DMAR_FRCD_REG_0_0:
1da12ec4
LT
2216 if (size == 4) {
2217 vtd_set_long(s, addr, val);
2218 } else {
2219 vtd_set_quad(s, addr, val);
2220 }
2221 break;
2222
2223 case DMAR_FRCD_REG_0_1:
1da12ec4
LT
2224 assert(size == 4);
2225 vtd_set_long(s, addr, val);
2226 break;
2227
2228 case DMAR_FRCD_REG_0_2:
1da12ec4
LT
2229 if (size == 4) {
2230 vtd_set_long(s, addr, val);
2231 } else {
2232 vtd_set_quad(s, addr, val);
2233 /* May clear bit 127 (Fault), update PPF */
2234 vtd_update_fsts_ppf(s);
2235 }
2236 break;
2237
2238 case DMAR_FRCD_REG_0_3:
1da12ec4
LT
2239 assert(size == 4);
2240 vtd_set_long(s, addr, val);
2241 /* May clear bit 127 (Fault), update PPF */
2242 vtd_update_fsts_ppf(s);
2243 break;
2244
a5861439 2245 case DMAR_IRTA_REG:
a5861439
PX
2246 if (size == 4) {
2247 vtd_set_long(s, addr, val);
2248 } else {
2249 vtd_set_quad(s, addr, val);
2250 }
2251 break;
2252
2253 case DMAR_IRTA_REG_HI:
a5861439
PX
2254 assert(size == 4);
2255 vtd_set_long(s, addr, val);
2256 break;
2257
1da12ec4 2258 default:
1da12ec4
LT
2259 if (size == 4) {
2260 vtd_set_long(s, addr, val);
2261 } else {
2262 vtd_set_quad(s, addr, val);
2263 }
2264 }
2265}
2266
2267static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
bf55b7af 2268 IOMMUAccessFlags flag)
1da12ec4
LT
2269{
2270 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
2271 IntelIOMMUState *s = vtd_as->iommu_state;
b9313021
PX
2272 IOMMUTLBEntry iotlb = {
2273 /* We'll fill in the rest later. */
1da12ec4 2274 .target_as = &address_space_memory,
1da12ec4 2275 };
b9313021 2276 bool success;
1da12ec4 2277
b9313021
PX
2278 if (likely(s->dmar_enabled)) {
2279 success = vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn,
2280 addr, flag & IOMMU_WO, &iotlb);
2281 } else {
1da12ec4 2282 /* DMAR disabled, passthrough, use 4k-page*/
b9313021
PX
2283 iotlb.iova = addr & VTD_PAGE_MASK_4K;
2284 iotlb.translated_addr = addr & VTD_PAGE_MASK_4K;
2285 iotlb.addr_mask = ~VTD_PAGE_MASK_4K;
2286 iotlb.perm = IOMMU_RW;
2287 success = true;
1da12ec4
LT
2288 }
2289
b9313021
PX
2290 if (likely(success)) {
2291 trace_vtd_dmar_translate(pci_bus_num(vtd_as->bus),
2292 VTD_PCI_SLOT(vtd_as->devfn),
2293 VTD_PCI_FUNC(vtd_as->devfn),
2294 iotlb.iova, iotlb.translated_addr,
2295 iotlb.addr_mask);
2296 } else {
2297 trace_vtd_err_dmar_translate(pci_bus_num(vtd_as->bus),
2298 VTD_PCI_SLOT(vtd_as->devfn),
2299 VTD_PCI_FUNC(vtd_as->devfn),
2300 iotlb.iova);
2301 }
7feb51b7 2302
b9313021 2303 return iotlb;
1da12ec4
LT
2304}
2305
5bf3d319
PX
2306static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
2307 IOMMUNotifierFlag old,
2308 IOMMUNotifierFlag new)
3cb3b154
AW
2309{
2310 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
dd4d607e
PX
2311 IntelIOMMUState *s = vtd_as->iommu_state;
2312 IntelIOMMUNotifierNode *node = NULL;
2313 IntelIOMMUNotifierNode *next_node = NULL;
3cb3b154 2314
dd4d607e
PX
2315 if (!s->caching_mode && new & IOMMU_NOTIFIER_MAP) {
2316 error_report("We need to set cache_mode=1 for intel-iommu to enable "
2317 "device assignment with IOMMU protection.");
a3276f78
PX
2318 exit(1);
2319 }
dd4d607e
PX
2320
2321 if (old == IOMMU_NOTIFIER_NONE) {
2322 node = g_malloc0(sizeof(*node));
2323 node->vtd_as = vtd_as;
2324 QLIST_INSERT_HEAD(&s->notifiers_list, node, next);
2325 return;
2326 }
2327
2328 /* update notifier node with new flags */
2329 QLIST_FOREACH_SAFE(node, &s->notifiers_list, next, next_node) {
2330 if (node->vtd_as == vtd_as) {
2331 if (new == IOMMU_NOTIFIER_NONE) {
2332 QLIST_REMOVE(node, next);
2333 g_free(node);
2334 }
2335 return;
2336 }
2337 }
3cb3b154
AW
2338}
2339
552a1e01
PX
2340static int vtd_post_load(void *opaque, int version_id)
2341{
2342 IntelIOMMUState *iommu = opaque;
2343
2344 /*
2345 * Memory regions are dynamically turned on/off depending on
2346 * context entry configurations from the guest. After migration,
2347 * we need to make sure the memory regions are still correct.
2348 */
2349 vtd_switch_address_space_all(iommu);
2350
2351 return 0;
2352}
2353
1da12ec4
LT
2354static const VMStateDescription vtd_vmstate = {
2355 .name = "iommu-intel",
8cdcf3c1
PX
2356 .version_id = 1,
2357 .minimum_version_id = 1,
2358 .priority = MIG_PRI_IOMMU,
552a1e01 2359 .post_load = vtd_post_load,
8cdcf3c1
PX
2360 .fields = (VMStateField[]) {
2361 VMSTATE_UINT64(root, IntelIOMMUState),
2362 VMSTATE_UINT64(intr_root, IntelIOMMUState),
2363 VMSTATE_UINT64(iq, IntelIOMMUState),
2364 VMSTATE_UINT32(intr_size, IntelIOMMUState),
2365 VMSTATE_UINT16(iq_head, IntelIOMMUState),
2366 VMSTATE_UINT16(iq_tail, IntelIOMMUState),
2367 VMSTATE_UINT16(iq_size, IntelIOMMUState),
2368 VMSTATE_UINT16(next_frcd_reg, IntelIOMMUState),
2369 VMSTATE_UINT8_ARRAY(csr, IntelIOMMUState, DMAR_REG_SIZE),
2370 VMSTATE_UINT8(iq_last_desc_type, IntelIOMMUState),
2371 VMSTATE_BOOL(root_extended, IntelIOMMUState),
2372 VMSTATE_BOOL(dmar_enabled, IntelIOMMUState),
2373 VMSTATE_BOOL(qi_enabled, IntelIOMMUState),
2374 VMSTATE_BOOL(intr_enabled, IntelIOMMUState),
2375 VMSTATE_BOOL(intr_eime, IntelIOMMUState),
2376 VMSTATE_END_OF_LIST()
2377 }
1da12ec4
LT
2378};
2379
2380static const MemoryRegionOps vtd_mem_ops = {
2381 .read = vtd_mem_read,
2382 .write = vtd_mem_write,
2383 .endianness = DEVICE_LITTLE_ENDIAN,
2384 .impl = {
2385 .min_access_size = 4,
2386 .max_access_size = 8,
2387 },
2388 .valid = {
2389 .min_access_size = 4,
2390 .max_access_size = 8,
2391 },
2392};
2393
2394static Property vtd_properties[] = {
2395 DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
e6b6af05
RK
2396 DEFINE_PROP_ON_OFF_AUTO("eim", IntelIOMMUState, intr_eim,
2397 ON_OFF_AUTO_AUTO),
fb506e70 2398 DEFINE_PROP_BOOL("x-buggy-eim", IntelIOMMUState, buggy_eim, false),
3b40f0e5 2399 DEFINE_PROP_BOOL("caching-mode", IntelIOMMUState, caching_mode, FALSE),
1da12ec4
LT
2400 DEFINE_PROP_END_OF_LIST(),
2401};
2402
651e4cef
PX
2403/* Read IRTE entry with specific index */
2404static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
bc38ee10 2405 VTD_IR_TableEntry *entry, uint16_t sid)
651e4cef 2406{
ede9c94a
PX
2407 static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
2408 {0xffff, 0xfffb, 0xfff9, 0xfff8};
651e4cef 2409 dma_addr_t addr = 0x00;
ede9c94a
PX
2410 uint16_t mask, source_id;
2411 uint8_t bus, bus_max, bus_min;
651e4cef
PX
2412
2413 addr = iommu->intr_root + index * sizeof(*entry);
2414 if (dma_memory_read(&address_space_memory, addr, entry,
2415 sizeof(*entry))) {
7feb51b7 2416 trace_vtd_err("Memory read failed for IRTE.");
651e4cef
PX
2417 return -VTD_FR_IR_ROOT_INVAL;
2418 }
2419
7feb51b7
PX
2420 trace_vtd_ir_irte_get(index, le64_to_cpu(entry->data[1]),
2421 le64_to_cpu(entry->data[0]));
2422
bc38ee10 2423 if (!entry->irte.present) {
7feb51b7
PX
2424 trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]),
2425 le64_to_cpu(entry->data[0]));
651e4cef
PX
2426 return -VTD_FR_IR_ENTRY_P;
2427 }
2428
bc38ee10
MT
2429 if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
2430 entry->irte.__reserved_2) {
7feb51b7
PX
2431 trace_vtd_err_irte(index, le64_to_cpu(entry->data[1]),
2432 le64_to_cpu(entry->data[0]));
651e4cef
PX
2433 return -VTD_FR_IR_IRTE_RSVD;
2434 }
2435
ede9c94a
PX
2436 if (sid != X86_IOMMU_SID_INVALID) {
2437 /* Validate IRTE SID */
bc38ee10
MT
2438 source_id = le32_to_cpu(entry->irte.source_id);
2439 switch (entry->irte.sid_vtype) {
ede9c94a 2440 case VTD_SVT_NONE:
ede9c94a
PX
2441 break;
2442
2443 case VTD_SVT_ALL:
bc38ee10 2444 mask = vtd_svt_mask[entry->irte.sid_q];
ede9c94a 2445 if ((source_id & mask) != (sid & mask)) {
7feb51b7 2446 trace_vtd_err_irte_sid(index, sid, source_id);
ede9c94a
PX
2447 return -VTD_FR_IR_SID_ERR;
2448 }
2449 break;
2450
2451 case VTD_SVT_BUS:
2452 bus_max = source_id >> 8;
2453 bus_min = source_id & 0xff;
2454 bus = sid >> 8;
2455 if (bus > bus_max || bus < bus_min) {
7feb51b7 2456 trace_vtd_err_irte_sid_bus(index, bus, bus_min, bus_max);
ede9c94a
PX
2457 return -VTD_FR_IR_SID_ERR;
2458 }
2459 break;
2460
2461 default:
7feb51b7 2462 trace_vtd_err_irte_svt(index, entry->irte.sid_vtype);
ede9c94a
PX
2463 /* Take this as verification failure. */
2464 return -VTD_FR_IR_SID_ERR;
2465 break;
2466 }
2467 }
651e4cef
PX
2468
2469 return 0;
2470}
2471
2472/* Fetch IRQ information of specific IR index */
ede9c94a
PX
2473static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
2474 VTDIrq *irq, uint16_t sid)
651e4cef 2475{
bc38ee10 2476 VTD_IR_TableEntry irte = {};
651e4cef
PX
2477 int ret = 0;
2478
ede9c94a 2479 ret = vtd_irte_get(iommu, index, &irte, sid);
651e4cef
PX
2480 if (ret) {
2481 return ret;
2482 }
2483
bc38ee10
MT
2484 irq->trigger_mode = irte.irte.trigger_mode;
2485 irq->vector = irte.irte.vector;
2486 irq->delivery_mode = irte.irte.delivery_mode;
2487 irq->dest = le32_to_cpu(irte.irte.dest_id);
28589311 2488 if (!iommu->intr_eime) {
651e4cef
PX
2489#define VTD_IR_APIC_DEST_MASK (0xff00ULL)
2490#define VTD_IR_APIC_DEST_SHIFT (8)
28589311
JK
2491 irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >>
2492 VTD_IR_APIC_DEST_SHIFT;
2493 }
bc38ee10
MT
2494 irq->dest_mode = irte.irte.dest_mode;
2495 irq->redir_hint = irte.irte.redir_hint;
651e4cef 2496
7feb51b7
PX
2497 trace_vtd_ir_remap(index, irq->trigger_mode, irq->vector,
2498 irq->delivery_mode, irq->dest, irq->dest_mode);
651e4cef
PX
2499
2500 return 0;
2501}
2502
2503/* Generate one MSI message from VTDIrq info */
2504static void vtd_generate_msi_message(VTDIrq *irq, MSIMessage *msg_out)
2505{
2506 VTD_MSIMessage msg = {};
2507
2508 /* Generate address bits */
2509 msg.dest_mode = irq->dest_mode;
2510 msg.redir_hint = irq->redir_hint;
2511 msg.dest = irq->dest;
32946019 2512 msg.__addr_hi = irq->dest & 0xffffff00;
651e4cef
PX
2513 msg.__addr_head = cpu_to_le32(0xfee);
2514 /* Keep this from original MSI address bits */
2515 msg.__not_used = irq->msi_addr_last_bits;
2516
2517 /* Generate data bits */
2518 msg.vector = irq->vector;
2519 msg.delivery_mode = irq->delivery_mode;
2520 msg.level = 1;
2521 msg.trigger_mode = irq->trigger_mode;
2522
2523 msg_out->address = msg.msi_addr;
2524 msg_out->data = msg.msi_data;
2525}
2526
2527/* Interrupt remapping for MSI/MSI-X entry */
2528static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
2529 MSIMessage *origin,
ede9c94a
PX
2530 MSIMessage *translated,
2531 uint16_t sid)
651e4cef
PX
2532{
2533 int ret = 0;
2534 VTD_IR_MSIAddress addr;
2535 uint16_t index;
09cd058a 2536 VTDIrq irq = {};
651e4cef
PX
2537
2538 assert(origin && translated);
2539
7feb51b7
PX
2540 trace_vtd_ir_remap_msi_req(origin->address, origin->data);
2541
651e4cef 2542 if (!iommu || !iommu->intr_enabled) {
e7a3b91f
PX
2543 memcpy(translated, origin, sizeof(*origin));
2544 goto out;
651e4cef
PX
2545 }
2546
2547 if (origin->address & VTD_MSI_ADDR_HI_MASK) {
7feb51b7
PX
2548 trace_vtd_err("MSI address high 32 bits non-zero when "
2549 "Interrupt Remapping enabled.");
651e4cef
PX
2550 return -VTD_FR_IR_REQ_RSVD;
2551 }
2552
2553 addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
1a43713b 2554 if (addr.addr.__head != 0xfee) {
7feb51b7 2555 trace_vtd_err("MSI addr low 32 bit invalid.");
651e4cef
PX
2556 return -VTD_FR_IR_REQ_RSVD;
2557 }
2558
2559 /* This is compatible mode. */
bc38ee10 2560 if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) {
e7a3b91f
PX
2561 memcpy(translated, origin, sizeof(*origin));
2562 goto out;
651e4cef
PX
2563 }
2564
bc38ee10 2565 index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
651e4cef
PX
2566
2567#define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
2568#define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
2569
bc38ee10 2570 if (addr.addr.sub_valid) {
651e4cef
PX
2571 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
2572 index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
2573 }
2574
ede9c94a 2575 ret = vtd_remap_irq_get(iommu, index, &irq, sid);
651e4cef
PX
2576 if (ret) {
2577 return ret;
2578 }
2579
bc38ee10 2580 if (addr.addr.sub_valid) {
7feb51b7 2581 trace_vtd_ir_remap_type("MSI");
651e4cef 2582 if (origin->data & VTD_IR_MSI_DATA_RESERVED) {
7feb51b7 2583 trace_vtd_err_ir_msi_invalid(sid, origin->address, origin->data);
651e4cef
PX
2584 return -VTD_FR_IR_REQ_RSVD;
2585 }
2586 } else {
2587 uint8_t vector = origin->data & 0xff;
dea651a9
FW
2588 uint8_t trigger_mode = (origin->data >> MSI_DATA_TRIGGER_SHIFT) & 0x1;
2589
7feb51b7 2590 trace_vtd_ir_remap_type("IOAPIC");
651e4cef
PX
2591 /* IOAPIC entry vector should be aligned with IRTE vector
2592 * (see vt-d spec 5.1.5.1). */
2593 if (vector != irq.vector) {
7feb51b7 2594 trace_vtd_warn_ir_vector(sid, index, vector, irq.vector);
651e4cef 2595 }
dea651a9
FW
2596
2597 /* The Trigger Mode field must match the Trigger Mode in the IRTE.
2598 * (see vt-d spec 5.1.5.1). */
2599 if (trigger_mode != irq.trigger_mode) {
7feb51b7
PX
2600 trace_vtd_warn_ir_trigger(sid, index, trigger_mode,
2601 irq.trigger_mode);
dea651a9 2602 }
651e4cef
PX
2603 }
2604
2605 /*
2606 * We'd better keep the last two bits, assuming that guest OS
2607 * might modify it. Keep it does not hurt after all.
2608 */
bc38ee10 2609 irq.msi_addr_last_bits = addr.addr.__not_care;
651e4cef
PX
2610
2611 /* Translate VTDIrq to MSI message */
2612 vtd_generate_msi_message(&irq, translated);
2613
e7a3b91f 2614out:
7feb51b7
PX
2615 trace_vtd_ir_remap_msi(origin->address, origin->data,
2616 translated->address, translated->data);
651e4cef
PX
2617 return 0;
2618}
2619
8b5ed7df
PX
2620static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
2621 MSIMessage *dst, uint16_t sid)
2622{
ede9c94a
PX
2623 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
2624 src, dst, sid);
8b5ed7df
PX
2625}
2626
651e4cef
PX
2627static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
2628 uint64_t *data, unsigned size,
2629 MemTxAttrs attrs)
2630{
2631 return MEMTX_OK;
2632}
2633
2634static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
2635 uint64_t value, unsigned size,
2636 MemTxAttrs attrs)
2637{
2638 int ret = 0;
09cd058a 2639 MSIMessage from = {}, to = {};
ede9c94a 2640 uint16_t sid = X86_IOMMU_SID_INVALID;
651e4cef
PX
2641
2642 from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
2643 from.data = (uint32_t) value;
2644
ede9c94a
PX
2645 if (!attrs.unspecified) {
2646 /* We have explicit Source ID */
2647 sid = attrs.requester_id;
2648 }
2649
2650 ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
651e4cef
PX
2651 if (ret) {
2652 /* TODO: report error */
651e4cef
PX
2653 /* Drop this interrupt */
2654 return MEMTX_ERROR;
2655 }
2656
32946019 2657 apic_get_class()->send_msi(&to);
651e4cef
PX
2658
2659 return MEMTX_OK;
2660}
2661
2662static const MemoryRegionOps vtd_mem_ir_ops = {
2663 .read_with_attrs = vtd_mem_ir_read,
2664 .write_with_attrs = vtd_mem_ir_write,
2665 .endianness = DEVICE_LITTLE_ENDIAN,
2666 .impl = {
2667 .min_access_size = 4,
2668 .max_access_size = 4,
2669 },
2670 .valid = {
2671 .min_access_size = 4,
2672 .max_access_size = 4,
2673 },
2674};
7df953bd
KO
2675
2676VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
2677{
2678 uintptr_t key = (uintptr_t)bus;
2679 VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
2680 VTDAddressSpace *vtd_dev_as;
e0a3c8cc 2681 char name[128];
7df953bd
KO
2682
2683 if (!vtd_bus) {
2d3fc581
JW
2684 uintptr_t *new_key = g_malloc(sizeof(*new_key));
2685 *new_key = (uintptr_t)bus;
7df953bd 2686 /* No corresponding free() */
04af0e18
PX
2687 vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
2688 X86_IOMMU_PCI_DEVFN_MAX);
7df953bd 2689 vtd_bus->bus = bus;
2d3fc581 2690 g_hash_table_insert(s->vtd_as_by_busptr, new_key, vtd_bus);
7df953bd
KO
2691 }
2692
2693 vtd_dev_as = vtd_bus->dev_as[devfn];
2694
2695 if (!vtd_dev_as) {
e0a3c8cc 2696 snprintf(name, sizeof(name), "intel_iommu_devfn_%d", devfn);
7df953bd
KO
2697 vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
2698
2699 vtd_dev_as->bus = bus;
2700 vtd_dev_as->devfn = (uint8_t)devfn;
2701 vtd_dev_as->iommu_state = s;
2702 vtd_dev_as->context_cache_entry.context_cache_gen = 0;
558e0024
PX
2703
2704 /*
2705 * Memory region relationships looks like (Address range shows
2706 * only lower 32 bits to make it short in length...):
2707 *
2708 * |-----------------+-------------------+----------|
2709 * | Name | Address range | Priority |
2710 * |-----------------+-------------------+----------+
2711 * | vtd_root | 00000000-ffffffff | 0 |
2712 * | intel_iommu | 00000000-ffffffff | 1 |
2713 * | vtd_sys_alias | 00000000-ffffffff | 1 |
2714 * | intel_iommu_ir | fee00000-feefffff | 64 |
2715 * |-----------------+-------------------+----------|
2716 *
2717 * We enable/disable DMAR by switching enablement for
2718 * vtd_sys_alias and intel_iommu regions. IR region is always
2719 * enabled.
2720 */
7df953bd 2721 memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s),
558e0024
PX
2722 &s->iommu_ops, "intel_iommu_dmar",
2723 UINT64_MAX);
2724 memory_region_init_alias(&vtd_dev_as->sys_alias, OBJECT(s),
2725 "vtd_sys_alias", get_system_memory(),
2726 0, memory_region_size(get_system_memory()));
651e4cef
PX
2727 memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s),
2728 &vtd_mem_ir_ops, s, "intel_iommu_ir",
2729 VTD_INTERRUPT_ADDR_SIZE);
558e0024
PX
2730 memory_region_init(&vtd_dev_as->root, OBJECT(s),
2731 "vtd_root", UINT64_MAX);
2732 memory_region_add_subregion_overlap(&vtd_dev_as->root,
2733 VTD_INTERRUPT_ADDR_FIRST,
2734 &vtd_dev_as->iommu_ir, 64);
2735 address_space_init(&vtd_dev_as->as, &vtd_dev_as->root, name);
2736 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
2737 &vtd_dev_as->sys_alias, 1);
2738 memory_region_add_subregion_overlap(&vtd_dev_as->root, 0,
2739 &vtd_dev_as->iommu, 1);
2740 vtd_switch_address_space(vtd_dev_as);
7df953bd
KO
2741 }
2742 return vtd_dev_as;
2743}
2744
dd4d607e
PX
2745/* Unmap the whole range in the notifier's scope. */
2746static void vtd_address_space_unmap(VTDAddressSpace *as, IOMMUNotifier *n)
2747{
2748 IOMMUTLBEntry entry;
2749 hwaddr size;
2750 hwaddr start = n->start;
2751 hwaddr end = n->end;
2752
2753 /*
2754 * Note: all the codes in this function has a assumption that IOVA
2755 * bits are no more than VTD_MGAW bits (which is restricted by
2756 * VT-d spec), otherwise we need to consider overflow of 64 bits.
2757 */
2758
2759 if (end > VTD_ADDRESS_SIZE) {
2760 /*
2761 * Don't need to unmap regions that is bigger than the whole
2762 * VT-d supported address space size
2763 */
2764 end = VTD_ADDRESS_SIZE;
2765 }
2766
2767 assert(start <= end);
2768 size = end - start;
2769
2770 if (ctpop64(size) != 1) {
2771 /*
2772 * This size cannot format a correct mask. Let's enlarge it to
2773 * suite the minimum available mask.
2774 */
2775 int n = 64 - clz64(size);
2776 if (n > VTD_MGAW) {
2777 /* should not happen, but in case it happens, limit it */
2778 n = VTD_MGAW;
2779 }
2780 size = 1ULL << n;
2781 }
2782
2783 entry.target_as = &address_space_memory;
2784 /* Adjust iova for the size */
2785 entry.iova = n->start & ~(size - 1);
2786 /* This field is meaningless for unmap */
2787 entry.translated_addr = 0;
2788 entry.perm = IOMMU_NONE;
2789 entry.addr_mask = size - 1;
2790
2791 trace_vtd_as_unmap_whole(pci_bus_num(as->bus),
2792 VTD_PCI_SLOT(as->devfn),
2793 VTD_PCI_FUNC(as->devfn),
2794 entry.iova, size);
2795
2796 memory_region_notify_one(n, &entry);
2797}
2798
2799static void vtd_address_space_unmap_all(IntelIOMMUState *s)
2800{
2801 IntelIOMMUNotifierNode *node;
2802 VTDAddressSpace *vtd_as;
2803 IOMMUNotifier *n;
2804
2805 QLIST_FOREACH(node, &s->notifiers_list, next) {
2806 vtd_as = node->vtd_as;
2807 IOMMU_NOTIFIER_FOREACH(n, &vtd_as->iommu) {
2808 vtd_address_space_unmap(vtd_as, n);
2809 }
2810 }
2811}
2812
f06a696d
PX
2813static int vtd_replay_hook(IOMMUTLBEntry *entry, void *private)
2814{
2815 memory_region_notify_one((IOMMUNotifier *)private, entry);
2816 return 0;
2817}
2818
2819static void vtd_iommu_replay(MemoryRegion *mr, IOMMUNotifier *n)
2820{
2821 VTDAddressSpace *vtd_as = container_of(mr, VTDAddressSpace, iommu);
2822 IntelIOMMUState *s = vtd_as->iommu_state;
2823 uint8_t bus_n = pci_bus_num(vtd_as->bus);
2824 VTDContextEntry ce;
2825
dd4d607e
PX
2826 /*
2827 * The replay can be triggered by either a invalidation or a newly
2828 * created entry. No matter what, we release existing mappings
2829 * (it means flushing caches for UNMAP-only registers).
2830 */
2831 vtd_address_space_unmap(vtd_as, n);
2832
f06a696d 2833 if (vtd_dev_to_context_entry(s, bus_n, vtd_as->devfn, &ce) == 0) {
f06a696d
PX
2834 trace_vtd_replay_ce_valid(bus_n, PCI_SLOT(vtd_as->devfn),
2835 PCI_FUNC(vtd_as->devfn),
2836 VTD_CONTEXT_ENTRY_DID(ce.hi),
2837 ce.hi, ce.lo);
dd4d607e 2838 vtd_page_walk(&ce, 0, ~0ULL, vtd_replay_hook, (void *)n, false);
f06a696d
PX
2839 } else {
2840 trace_vtd_replay_ce_invalid(bus_n, PCI_SLOT(vtd_as->devfn),
2841 PCI_FUNC(vtd_as->devfn));
2842 }
2843
2844 return;
2845}
2846
1da12ec4
LT
2847/* Do the initialization. It will also be called when reset, so pay
2848 * attention when adding new initialization stuff.
2849 */
2850static void vtd_init(IntelIOMMUState *s)
2851{
d54bd7f8
PX
2852 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2853
1da12ec4
LT
2854 memset(s->csr, 0, DMAR_REG_SIZE);
2855 memset(s->wmask, 0, DMAR_REG_SIZE);
2856 memset(s->w1cmask, 0, DMAR_REG_SIZE);
2857 memset(s->womask, 0, DMAR_REG_SIZE);
2858
2859 s->iommu_ops.translate = vtd_iommu_translate;
5bf3d319 2860 s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed;
f06a696d 2861 s->iommu_ops.replay = vtd_iommu_replay;
1da12ec4
LT
2862 s->root = 0;
2863 s->root_extended = false;
2864 s->dmar_enabled = false;
2865 s->iq_head = 0;
2866 s->iq_tail = 0;
2867 s->iq = 0;
2868 s->iq_size = 0;
2869 s->qi_enabled = false;
2870 s->iq_last_desc_type = VTD_INV_DESC_NONE;
2871 s->next_frcd_reg = 0;
2872 s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
d66b969b 2873 VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
ed7b8fbc 2874 s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
1da12ec4 2875
d54bd7f8 2876 if (x86_iommu->intr_supported) {
e6b6af05
RK
2877 s->ecap |= VTD_ECAP_IR | VTD_ECAP_MHMV;
2878 if (s->intr_eim == ON_OFF_AUTO_ON) {
2879 s->ecap |= VTD_ECAP_EIM;
2880 }
2881 assert(s->intr_eim != ON_OFF_AUTO_AUTO);
d54bd7f8
PX
2882 }
2883
554f5e16
JW
2884 if (x86_iommu->dt_supported) {
2885 s->ecap |= VTD_ECAP_DT;
2886 }
2887
dbaabb25
PX
2888 if (x86_iommu->pt_supported) {
2889 s->ecap |= VTD_ECAP_PT;
2890 }
2891
3b40f0e5
ABD
2892 if (s->caching_mode) {
2893 s->cap |= VTD_CAP_CM;
2894 }
2895
d92fa2dc 2896 vtd_reset_context_cache(s);
b5a280c0 2897 vtd_reset_iotlb(s);
d92fa2dc 2898
1da12ec4
LT
2899 /* Define registers with default values and bit semantics */
2900 vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
2901 vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
2902 vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0);
2903 vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0);
2904 vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL);
2905 vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0);
2906 vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0);
2907 vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0);
2908 vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL);
2909
2910 /* Advanced Fault Logging not supported */
2911 vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL);
2912 vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0);
2913 vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0);
2914 vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0);
2915
2916 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
2917 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
2918 */
2919 vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0);
2920
2921 /* Treated as RO for implementations that PLMR and PHMR fields reported
2922 * as Clear in the CAP_REG.
2923 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
2924 */
2925 vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0);
2926
ed7b8fbc
LT
2927 vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0);
2928 vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0);
2929 vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0);
2930 vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL);
2931 vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0);
2932 vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0);
2933 vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0);
2934 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
2935 vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0);
2936
1da12ec4
LT
2937 /* IOTLB registers */
2938 vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0);
2939 vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0);
2940 vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL);
2941
2942 /* Fault Recording Registers, 128-bit */
2943 vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0);
2944 vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL);
a5861439
PX
2945
2946 /*
28589311 2947 * Interrupt remapping registers.
a5861439 2948 */
28589311 2949 vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0);
1da12ec4
LT
2950}
2951
2952/* Should not reset address_spaces when reset because devices will still use
2953 * the address space they got at first (won't ask the bus again).
2954 */
2955static void vtd_reset(DeviceState *dev)
2956{
2957 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
2958
1da12ec4 2959 vtd_init(s);
dd4d607e
PX
2960
2961 /*
2962 * When device reset, throw away all mappings and external caches
2963 */
2964 vtd_address_space_unmap_all(s);
1da12ec4
LT
2965}
2966
621d983a
MA
2967static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
2968{
2969 IntelIOMMUState *s = opaque;
2970 VTDAddressSpace *vtd_as;
2971
8e7a0a16 2972 assert(0 <= devfn && devfn < X86_IOMMU_PCI_DEVFN_MAX);
621d983a
MA
2973
2974 vtd_as = vtd_find_add_as(s, bus, devfn);
2975 return &vtd_as->as;
2976}
2977
e6b6af05 2978static bool vtd_decide_config(IntelIOMMUState *s, Error **errp)
6333e93c 2979{
e6b6af05
RK
2980 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2981
6333e93c
RK
2982 /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
2983 if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() &&
2984 !kvm_irqchip_is_split()) {
2985 error_setg(errp, "Intel Interrupt Remapping cannot work with "
2986 "kernel-irqchip=on, please use 'split|off'.");
2987 return false;
2988 }
e6b6af05
RK
2989 if (s->intr_eim == ON_OFF_AUTO_ON && !x86_iommu->intr_supported) {
2990 error_setg(errp, "eim=on cannot be selected without intremap=on");
2991 return false;
2992 }
2993
2994 if (s->intr_eim == ON_OFF_AUTO_AUTO) {
fb506e70
RK
2995 s->intr_eim = (kvm_irqchip_in_kernel() || s->buggy_eim)
2996 && x86_iommu->intr_supported ?
e6b6af05
RK
2997 ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF;
2998 }
fb506e70
RK
2999 if (s->intr_eim == ON_OFF_AUTO_ON && !s->buggy_eim) {
3000 if (!kvm_irqchip_in_kernel()) {
3001 error_setg(errp, "eim=on requires accel=kvm,kernel-irqchip=split");
3002 return false;
3003 }
3004 if (!kvm_enable_x2apic()) {
3005 error_setg(errp, "eim=on requires support on the KVM side"
3006 "(X2APIC_API, first shipped in v4.7)");
3007 return false;
3008 }
3009 }
e6b6af05 3010
6333e93c
RK
3011 return true;
3012}
3013
1da12ec4
LT
3014static void vtd_realize(DeviceState *dev, Error **errp)
3015{
ef0e8fc7
EH
3016 MachineState *ms = MACHINE(qdev_get_machine());
3017 MachineClass *mc = MACHINE_GET_CLASS(ms);
3018 PCMachineState *pcms =
3019 PC_MACHINE(object_dynamic_cast(OBJECT(ms), TYPE_PC_MACHINE));
3020 PCIBus *bus;
1da12ec4 3021 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
4684a204 3022 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
1da12ec4 3023
ef0e8fc7
EH
3024 if (!pcms) {
3025 error_setg(errp, "Machine-type '%s' not supported by intel-iommu",
3026 mc->name);
3027 return;
3028 }
3029
3030 bus = pcms->bus;
fb9f5926 3031 x86_iommu->type = TYPE_INTEL;
6333e93c 3032
e6b6af05 3033 if (!vtd_decide_config(s, errp)) {
6333e93c
RK
3034 return;
3035 }
3036
dd4d607e 3037 QLIST_INIT(&s->notifiers_list);
7df953bd 3038 memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
1da12ec4
LT
3039 memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
3040 "intel_iommu", DMAR_REG_SIZE);
3041 sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
b5a280c0
LT
3042 /* No corresponding destroy */
3043 s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
3044 g_free, g_free);
7df953bd
KO
3045 s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
3046 g_free, g_free);
1da12ec4 3047 vtd_init(s);
621d983a
MA
3048 sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR);
3049 pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
cb135f59
PX
3050 /* Pseudo address space under root PCI bus. */
3051 pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
1da12ec4
LT
3052}
3053
3054static void vtd_class_init(ObjectClass *klass, void *data)
3055{
3056 DeviceClass *dc = DEVICE_CLASS(klass);
1c7955c4 3057 X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass);
1da12ec4
LT
3058
3059 dc->reset = vtd_reset;
1da12ec4
LT
3060 dc->vmsd = &vtd_vmstate;
3061 dc->props = vtd_properties;
621d983a 3062 dc->hotpluggable = false;
1c7955c4 3063 x86_class->realize = vtd_realize;
8b5ed7df 3064 x86_class->int_remap = vtd_int_remap;
8ab5700c 3065 /* Supported by the pc-q35-* machine types */
e4f4fb1e 3066 dc->user_creatable = true;
1da12ec4
LT
3067}
3068
3069static const TypeInfo vtd_info = {
3070 .name = TYPE_INTEL_IOMMU_DEVICE,
1c7955c4 3071 .parent = TYPE_X86_IOMMU_DEVICE,
1da12ec4
LT
3072 .instance_size = sizeof(IntelIOMMUState),
3073 .class_init = vtd_class_init,
3074};
3075
3076static void vtd_register_types(void)
3077{
1da12ec4
LT
3078 type_register_static(&vtd_info);
3079}
3080
3081type_init(vtd_register_types)