]> git.proxmox.com Git - mirror_qemu.git/blame - hw/i386/intel_iommu.c
virtio: cleanup VMSTATE_VIRTIO_DEVICE
[mirror_qemu.git] / hw / i386 / intel_iommu.c
CommitLineData
1da12ec4
LT
1/*
2 * QEMU emulation of an Intel IOMMU (VT-d)
3 * (DMA Remapping device)
4 *
5 * Copyright (C) 2013 Knut Omang, Oracle <knut.omang@oracle.com>
6 * Copyright (C) 2014 Le Tan, <tamlokveer@gmail.com>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License as published by
10 * the Free Software Foundation; either version 2 of the License, or
11 * (at your option) any later version.
12
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17
18 * You should have received a copy of the GNU General Public License along
19 * with this program; if not, see <http://www.gnu.org/licenses/>.
20 */
21
b6a0aa05 22#include "qemu/osdep.h"
4684a204 23#include "qemu/error-report.h"
1da12ec4
LT
24#include "hw/sysbus.h"
25#include "exec/address-spaces.h"
26#include "intel_iommu_internal.h"
7df953bd 27#include "hw/pci/pci.h"
3cb3b154 28#include "hw/pci/pci_bus.h"
621d983a 29#include "hw/i386/pc.h"
04af0e18
PX
30#include "hw/boards.h"
31#include "hw/i386/x86-iommu.h"
cb135f59 32#include "hw/pci-host/q35.h"
4684a204 33#include "sysemu/kvm.h"
1da12ec4
LT
34
35/*#define DEBUG_INTEL_IOMMU*/
36#ifdef DEBUG_INTEL_IOMMU
37enum {
38 DEBUG_GENERAL, DEBUG_CSR, DEBUG_INV, DEBUG_MMU, DEBUG_FLOG,
a5861439 39 DEBUG_CACHE, DEBUG_IR,
1da12ec4
LT
40};
41#define VTD_DBGBIT(x) (1 << DEBUG_##x)
42static int vtd_dbgflags = VTD_DBGBIT(GENERAL) | VTD_DBGBIT(CSR);
43
44#define VTD_DPRINTF(what, fmt, ...) do { \
45 if (vtd_dbgflags & VTD_DBGBIT(what)) { \
46 fprintf(stderr, "(vtd)%s: " fmt "\n", __func__, \
47 ## __VA_ARGS__); } \
48 } while (0)
49#else
50#define VTD_DPRINTF(what, fmt, ...) do {} while (0)
51#endif
52
53static void vtd_define_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val,
54 uint64_t wmask, uint64_t w1cmask)
55{
56 stq_le_p(&s->csr[addr], val);
57 stq_le_p(&s->wmask[addr], wmask);
58 stq_le_p(&s->w1cmask[addr], w1cmask);
59}
60
61static void vtd_define_quad_wo(IntelIOMMUState *s, hwaddr addr, uint64_t mask)
62{
63 stq_le_p(&s->womask[addr], mask);
64}
65
66static void vtd_define_long(IntelIOMMUState *s, hwaddr addr, uint32_t val,
67 uint32_t wmask, uint32_t w1cmask)
68{
69 stl_le_p(&s->csr[addr], val);
70 stl_le_p(&s->wmask[addr], wmask);
71 stl_le_p(&s->w1cmask[addr], w1cmask);
72}
73
74static void vtd_define_long_wo(IntelIOMMUState *s, hwaddr addr, uint32_t mask)
75{
76 stl_le_p(&s->womask[addr], mask);
77}
78
79/* "External" get/set operations */
80static void vtd_set_quad(IntelIOMMUState *s, hwaddr addr, uint64_t val)
81{
82 uint64_t oldval = ldq_le_p(&s->csr[addr]);
83 uint64_t wmask = ldq_le_p(&s->wmask[addr]);
84 uint64_t w1cmask = ldq_le_p(&s->w1cmask[addr]);
85 stq_le_p(&s->csr[addr],
86 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
87}
88
89static void vtd_set_long(IntelIOMMUState *s, hwaddr addr, uint32_t val)
90{
91 uint32_t oldval = ldl_le_p(&s->csr[addr]);
92 uint32_t wmask = ldl_le_p(&s->wmask[addr]);
93 uint32_t w1cmask = ldl_le_p(&s->w1cmask[addr]);
94 stl_le_p(&s->csr[addr],
95 ((oldval & ~wmask) | (val & wmask)) & ~(w1cmask & val));
96}
97
98static uint64_t vtd_get_quad(IntelIOMMUState *s, hwaddr addr)
99{
100 uint64_t val = ldq_le_p(&s->csr[addr]);
101 uint64_t womask = ldq_le_p(&s->womask[addr]);
102 return val & ~womask;
103}
104
105static uint32_t vtd_get_long(IntelIOMMUState *s, hwaddr addr)
106{
107 uint32_t val = ldl_le_p(&s->csr[addr]);
108 uint32_t womask = ldl_le_p(&s->womask[addr]);
109 return val & ~womask;
110}
111
112/* "Internal" get/set operations */
113static uint64_t vtd_get_quad_raw(IntelIOMMUState *s, hwaddr addr)
114{
115 return ldq_le_p(&s->csr[addr]);
116}
117
118static uint32_t vtd_get_long_raw(IntelIOMMUState *s, hwaddr addr)
119{
120 return ldl_le_p(&s->csr[addr]);
121}
122
123static void vtd_set_quad_raw(IntelIOMMUState *s, hwaddr addr, uint64_t val)
124{
125 stq_le_p(&s->csr[addr], val);
126}
127
128static uint32_t vtd_set_clear_mask_long(IntelIOMMUState *s, hwaddr addr,
129 uint32_t clear, uint32_t mask)
130{
131 uint32_t new_val = (ldl_le_p(&s->csr[addr]) & ~clear) | mask;
132 stl_le_p(&s->csr[addr], new_val);
133 return new_val;
134}
135
136static uint64_t vtd_set_clear_mask_quad(IntelIOMMUState *s, hwaddr addr,
137 uint64_t clear, uint64_t mask)
138{
139 uint64_t new_val = (ldq_le_p(&s->csr[addr]) & ~clear) | mask;
140 stq_le_p(&s->csr[addr], new_val);
141 return new_val;
142}
143
b5a280c0
LT
144/* GHashTable functions */
145static gboolean vtd_uint64_equal(gconstpointer v1, gconstpointer v2)
146{
147 return *((const uint64_t *)v1) == *((const uint64_t *)v2);
148}
149
150static guint vtd_uint64_hash(gconstpointer v)
151{
152 return (guint)*(const uint64_t *)v;
153}
154
155static gboolean vtd_hash_remove_by_domain(gpointer key, gpointer value,
156 gpointer user_data)
157{
158 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
159 uint16_t domain_id = *(uint16_t *)user_data;
160 return entry->domain_id == domain_id;
161}
162
d66b969b
JW
163/* The shift of an addr for a certain level of paging structure */
164static inline uint32_t vtd_slpt_level_shift(uint32_t level)
165{
166 return VTD_PAGE_SHIFT_4K + (level - 1) * VTD_SL_LEVEL_BITS;
167}
168
169static inline uint64_t vtd_slpt_level_page_mask(uint32_t level)
170{
171 return ~((1ULL << vtd_slpt_level_shift(level)) - 1);
172}
173
b5a280c0
LT
174static gboolean vtd_hash_remove_by_page(gpointer key, gpointer value,
175 gpointer user_data)
176{
177 VTDIOTLBEntry *entry = (VTDIOTLBEntry *)value;
178 VTDIOTLBPageInvInfo *info = (VTDIOTLBPageInvInfo *)user_data;
d66b969b
JW
179 uint64_t gfn = (info->addr >> VTD_PAGE_SHIFT_4K) & info->mask;
180 uint64_t gfn_tlb = (info->addr & entry->mask) >> VTD_PAGE_SHIFT_4K;
b5a280c0 181 return (entry->domain_id == info->domain_id) &&
d66b969b
JW
182 (((entry->gfn & info->mask) == gfn) ||
183 (entry->gfn == gfn_tlb));
b5a280c0
LT
184}
185
d92fa2dc
LT
186/* Reset all the gen of VTDAddressSpace to zero and set the gen of
187 * IntelIOMMUState to 1.
188 */
189static void vtd_reset_context_cache(IntelIOMMUState *s)
190{
d92fa2dc 191 VTDAddressSpace *vtd_as;
7df953bd
KO
192 VTDBus *vtd_bus;
193 GHashTableIter bus_it;
d92fa2dc
LT
194 uint32_t devfn_it;
195
7df953bd
KO
196 g_hash_table_iter_init(&bus_it, s->vtd_as_by_busptr);
197
d92fa2dc 198 VTD_DPRINTF(CACHE, "global context_cache_gen=1");
7df953bd 199 while (g_hash_table_iter_next (&bus_it, NULL, (void**)&vtd_bus)) {
04af0e18 200 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
7df953bd 201 vtd_as = vtd_bus->dev_as[devfn_it];
d92fa2dc
LT
202 if (!vtd_as) {
203 continue;
204 }
205 vtd_as->context_cache_entry.context_cache_gen = 0;
206 }
207 }
208 s->context_cache_gen = 1;
209}
210
b5a280c0
LT
211static void vtd_reset_iotlb(IntelIOMMUState *s)
212{
213 assert(s->iotlb);
214 g_hash_table_remove_all(s->iotlb);
215}
216
d66b969b
JW
217static uint64_t vtd_get_iotlb_key(uint64_t gfn, uint8_t source_id,
218 uint32_t level)
219{
220 return gfn | ((uint64_t)(source_id) << VTD_IOTLB_SID_SHIFT) |
221 ((uint64_t)(level) << VTD_IOTLB_LVL_SHIFT);
222}
223
224static uint64_t vtd_get_iotlb_gfn(hwaddr addr, uint32_t level)
225{
226 return (addr & vtd_slpt_level_page_mask(level)) >> VTD_PAGE_SHIFT_4K;
227}
228
b5a280c0
LT
229static VTDIOTLBEntry *vtd_lookup_iotlb(IntelIOMMUState *s, uint16_t source_id,
230 hwaddr addr)
231{
d66b969b 232 VTDIOTLBEntry *entry;
b5a280c0 233 uint64_t key;
d66b969b
JW
234 int level;
235
236 for (level = VTD_SL_PT_LEVEL; level < VTD_SL_PML4_LEVEL; level++) {
237 key = vtd_get_iotlb_key(vtd_get_iotlb_gfn(addr, level),
238 source_id, level);
239 entry = g_hash_table_lookup(s->iotlb, &key);
240 if (entry) {
241 goto out;
242 }
243 }
b5a280c0 244
d66b969b
JW
245out:
246 return entry;
b5a280c0
LT
247}
248
249static void vtd_update_iotlb(IntelIOMMUState *s, uint16_t source_id,
250 uint16_t domain_id, hwaddr addr, uint64_t slpte,
d66b969b
JW
251 bool read_flags, bool write_flags,
252 uint32_t level)
b5a280c0
LT
253{
254 VTDIOTLBEntry *entry = g_malloc(sizeof(*entry));
255 uint64_t *key = g_malloc(sizeof(*key));
d66b969b 256 uint64_t gfn = vtd_get_iotlb_gfn(addr, level);
b5a280c0
LT
257
258 VTD_DPRINTF(CACHE, "update iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
259 " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr, slpte,
260 domain_id);
261 if (g_hash_table_size(s->iotlb) >= VTD_IOTLB_MAX_SIZE) {
262 VTD_DPRINTF(CACHE, "iotlb exceeds size limit, forced to reset");
263 vtd_reset_iotlb(s);
264 }
265
266 entry->gfn = gfn;
267 entry->domain_id = domain_id;
268 entry->slpte = slpte;
269 entry->read_flags = read_flags;
270 entry->write_flags = write_flags;
d66b969b
JW
271 entry->mask = vtd_slpt_level_page_mask(level);
272 *key = vtd_get_iotlb_key(gfn, source_id, level);
b5a280c0
LT
273 g_hash_table_replace(s->iotlb, key, entry);
274}
275
1da12ec4
LT
276/* Given the reg addr of both the message data and address, generate an
277 * interrupt via MSI.
278 */
279static void vtd_generate_interrupt(IntelIOMMUState *s, hwaddr mesg_addr_reg,
280 hwaddr mesg_data_reg)
281{
282 hwaddr addr;
283 uint32_t data;
284
285 assert(mesg_data_reg < DMAR_REG_SIZE);
286 assert(mesg_addr_reg < DMAR_REG_SIZE);
287
288 addr = vtd_get_long_raw(s, mesg_addr_reg);
289 data = vtd_get_long_raw(s, mesg_data_reg);
290
291 VTD_DPRINTF(FLOG, "msi: addr 0x%"PRIx64 " data 0x%"PRIx32, addr, data);
42874d3a
PM
292 address_space_stl_le(&address_space_memory, addr, data,
293 MEMTXATTRS_UNSPECIFIED, NULL);
1da12ec4
LT
294}
295
296/* Generate a fault event to software via MSI if conditions are met.
297 * Notice that the value of FSTS_REG being passed to it should be the one
298 * before any update.
299 */
300static void vtd_generate_fault_event(IntelIOMMUState *s, uint32_t pre_fsts)
301{
302 if (pre_fsts & VTD_FSTS_PPF || pre_fsts & VTD_FSTS_PFO ||
303 pre_fsts & VTD_FSTS_IQE) {
304 VTD_DPRINTF(FLOG, "there are previous interrupt conditions "
305 "to be serviced by software, fault event is not generated "
306 "(FSTS_REG 0x%"PRIx32 ")", pre_fsts);
307 return;
308 }
309 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, 0, VTD_FECTL_IP);
310 if (vtd_get_long_raw(s, DMAR_FECTL_REG) & VTD_FECTL_IM) {
311 VTD_DPRINTF(FLOG, "Interrupt Mask set, fault event is not generated");
312 } else {
313 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
314 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
315 }
316}
317
318/* Check if the Fault (F) field of the Fault Recording Register referenced by
319 * @index is Set.
320 */
321static bool vtd_is_frcd_set(IntelIOMMUState *s, uint16_t index)
322{
323 /* Each reg is 128-bit */
324 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
325 addr += 8; /* Access the high 64-bit half */
326
327 assert(index < DMAR_FRCD_REG_NR);
328
329 return vtd_get_quad_raw(s, addr) & VTD_FRCD_F;
330}
331
332/* Update the PPF field of Fault Status Register.
333 * Should be called whenever change the F field of any fault recording
334 * registers.
335 */
336static void vtd_update_fsts_ppf(IntelIOMMUState *s)
337{
338 uint32_t i;
339 uint32_t ppf_mask = 0;
340
341 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
342 if (vtd_is_frcd_set(s, i)) {
343 ppf_mask = VTD_FSTS_PPF;
344 break;
345 }
346 }
347 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_PPF, ppf_mask);
348 VTD_DPRINTF(FLOG, "set PPF of FSTS_REG to %d", ppf_mask ? 1 : 0);
349}
350
351static void vtd_set_frcd_and_update_ppf(IntelIOMMUState *s, uint16_t index)
352{
353 /* Each reg is 128-bit */
354 hwaddr addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
355 addr += 8; /* Access the high 64-bit half */
356
357 assert(index < DMAR_FRCD_REG_NR);
358
359 vtd_set_clear_mask_quad(s, addr, 0, VTD_FRCD_F);
360 vtd_update_fsts_ppf(s);
361}
362
363/* Must not update F field now, should be done later */
364static void vtd_record_frcd(IntelIOMMUState *s, uint16_t index,
365 uint16_t source_id, hwaddr addr,
366 VTDFaultReason fault, bool is_write)
367{
368 uint64_t hi = 0, lo;
369 hwaddr frcd_reg_addr = DMAR_FRCD_REG_OFFSET + (((uint64_t)index) << 4);
370
371 assert(index < DMAR_FRCD_REG_NR);
372
373 lo = VTD_FRCD_FI(addr);
374 hi = VTD_FRCD_SID(source_id) | VTD_FRCD_FR(fault);
375 if (!is_write) {
376 hi |= VTD_FRCD_T;
377 }
378 vtd_set_quad_raw(s, frcd_reg_addr, lo);
379 vtd_set_quad_raw(s, frcd_reg_addr + 8, hi);
380 VTD_DPRINTF(FLOG, "record to FRCD_REG #%"PRIu16 ": hi 0x%"PRIx64
381 ", lo 0x%"PRIx64, index, hi, lo);
382}
383
384/* Try to collapse multiple pending faults from the same requester */
385static bool vtd_try_collapse_fault(IntelIOMMUState *s, uint16_t source_id)
386{
387 uint32_t i;
388 uint64_t frcd_reg;
389 hwaddr addr = DMAR_FRCD_REG_OFFSET + 8; /* The high 64-bit half */
390
391 for (i = 0; i < DMAR_FRCD_REG_NR; i++) {
392 frcd_reg = vtd_get_quad_raw(s, addr);
393 VTD_DPRINTF(FLOG, "frcd_reg #%d 0x%"PRIx64, i, frcd_reg);
394 if ((frcd_reg & VTD_FRCD_F) &&
395 ((frcd_reg & VTD_FRCD_SID_MASK) == source_id)) {
396 return true;
397 }
398 addr += 16; /* 128-bit for each */
399 }
400 return false;
401}
402
403/* Log and report an DMAR (address translation) fault to software */
404static void vtd_report_dmar_fault(IntelIOMMUState *s, uint16_t source_id,
405 hwaddr addr, VTDFaultReason fault,
406 bool is_write)
407{
408 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
409
410 assert(fault < VTD_FR_MAX);
411
412 if (fault == VTD_FR_RESERVED_ERR) {
413 /* This is not a normal fault reason case. Drop it. */
414 return;
415 }
416 VTD_DPRINTF(FLOG, "sid 0x%"PRIx16 ", fault %d, addr 0x%"PRIx64
417 ", is_write %d", source_id, fault, addr, is_write);
418 if (fsts_reg & VTD_FSTS_PFO) {
419 VTD_DPRINTF(FLOG, "new fault is not recorded due to "
420 "Primary Fault Overflow");
421 return;
422 }
423 if (vtd_try_collapse_fault(s, source_id)) {
424 VTD_DPRINTF(FLOG, "new fault is not recorded due to "
425 "compression of faults");
426 return;
427 }
428 if (vtd_is_frcd_set(s, s->next_frcd_reg)) {
429 VTD_DPRINTF(FLOG, "Primary Fault Overflow and "
430 "new fault is not recorded, set PFO field");
431 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_PFO);
432 return;
433 }
434
435 vtd_record_frcd(s, s->next_frcd_reg, source_id, addr, fault, is_write);
436
437 if (fsts_reg & VTD_FSTS_PPF) {
438 VTD_DPRINTF(FLOG, "there are pending faults already, "
439 "fault event is not generated");
440 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg);
441 s->next_frcd_reg++;
442 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
443 s->next_frcd_reg = 0;
444 }
445 } else {
446 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, VTD_FSTS_FRI_MASK,
447 VTD_FSTS_FRI(s->next_frcd_reg));
448 vtd_set_frcd_and_update_ppf(s, s->next_frcd_reg); /* Will set PPF */
449 s->next_frcd_reg++;
450 if (s->next_frcd_reg == DMAR_FRCD_REG_NR) {
451 s->next_frcd_reg = 0;
452 }
453 /* This case actually cause the PPF to be Set.
454 * So generate fault event (interrupt).
455 */
456 vtd_generate_fault_event(s, fsts_reg);
457 }
458}
459
ed7b8fbc
LT
460/* Handle Invalidation Queue Errors of queued invalidation interface error
461 * conditions.
462 */
463static void vtd_handle_inv_queue_error(IntelIOMMUState *s)
464{
465 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
466
467 vtd_set_clear_mask_long(s, DMAR_FSTS_REG, 0, VTD_FSTS_IQE);
468 vtd_generate_fault_event(s, fsts_reg);
469}
470
471/* Set the IWC field and try to generate an invalidation completion interrupt */
472static void vtd_generate_completion_event(IntelIOMMUState *s)
473{
474 VTD_DPRINTF(INV, "completes an invalidation wait command with "
475 "Interrupt Flag");
476 if (vtd_get_long_raw(s, DMAR_ICS_REG) & VTD_ICS_IWC) {
477 VTD_DPRINTF(INV, "there is a previous interrupt condition to be "
478 "serviced by software, "
479 "new invalidation event is not generated");
480 return;
481 }
482 vtd_set_clear_mask_long(s, DMAR_ICS_REG, 0, VTD_ICS_IWC);
483 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, 0, VTD_IECTL_IP);
484 if (vtd_get_long_raw(s, DMAR_IECTL_REG) & VTD_IECTL_IM) {
485 VTD_DPRINTF(INV, "IM filed in IECTL_REG is set, new invalidation "
486 "event is not generated");
487 return;
488 } else {
489 /* Generate the interrupt event */
490 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
491 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
492 }
493}
494
1da12ec4
LT
495static inline bool vtd_root_entry_present(VTDRootEntry *root)
496{
497 return root->val & VTD_ROOT_ENTRY_P;
498}
499
500static int vtd_get_root_entry(IntelIOMMUState *s, uint8_t index,
501 VTDRootEntry *re)
502{
503 dma_addr_t addr;
504
505 addr = s->root + index * sizeof(*re);
506 if (dma_memory_read(&address_space_memory, addr, re, sizeof(*re))) {
507 VTD_DPRINTF(GENERAL, "error: fail to access root-entry at 0x%"PRIx64
508 " + %"PRIu8, s->root, index);
509 re->val = 0;
510 return -VTD_FR_ROOT_TABLE_INV;
511 }
512 re->val = le64_to_cpu(re->val);
513 return 0;
514}
515
516static inline bool vtd_context_entry_present(VTDContextEntry *context)
517{
518 return context->lo & VTD_CONTEXT_ENTRY_P;
519}
520
521static int vtd_get_context_entry_from_root(VTDRootEntry *root, uint8_t index,
522 VTDContextEntry *ce)
523{
524 dma_addr_t addr;
525
526 if (!vtd_root_entry_present(root)) {
527 VTD_DPRINTF(GENERAL, "error: root-entry is not present");
528 return -VTD_FR_ROOT_ENTRY_P;
529 }
530 addr = (root->val & VTD_ROOT_ENTRY_CTP) + index * sizeof(*ce);
531 if (dma_memory_read(&address_space_memory, addr, ce, sizeof(*ce))) {
532 VTD_DPRINTF(GENERAL, "error: fail to access context-entry at 0x%"PRIx64
533 " + %"PRIu8,
534 (uint64_t)(root->val & VTD_ROOT_ENTRY_CTP), index);
535 return -VTD_FR_CONTEXT_TABLE_INV;
536 }
537 ce->lo = le64_to_cpu(ce->lo);
538 ce->hi = le64_to_cpu(ce->hi);
539 return 0;
540}
541
542static inline dma_addr_t vtd_get_slpt_base_from_context(VTDContextEntry *ce)
543{
544 return ce->lo & VTD_CONTEXT_ENTRY_SLPTPTR;
545}
546
1da12ec4
LT
547static inline uint64_t vtd_get_slpte_addr(uint64_t slpte)
548{
549 return slpte & VTD_SL_PT_BASE_ADDR_MASK;
550}
551
552/* Whether the pte indicates the address of the page frame */
553static inline bool vtd_is_last_slpte(uint64_t slpte, uint32_t level)
554{
555 return level == VTD_SL_PT_LEVEL || (slpte & VTD_SL_PT_PAGE_SIZE_MASK);
556}
557
558/* Get the content of a spte located in @base_addr[@index] */
559static uint64_t vtd_get_slpte(dma_addr_t base_addr, uint32_t index)
560{
561 uint64_t slpte;
562
563 assert(index < VTD_SL_PT_ENTRY_NR);
564
565 if (dma_memory_read(&address_space_memory,
566 base_addr + index * sizeof(slpte), &slpte,
567 sizeof(slpte))) {
568 slpte = (uint64_t)-1;
569 return slpte;
570 }
571 slpte = le64_to_cpu(slpte);
572 return slpte;
573}
574
575/* Given a gpa and the level of paging structure, return the offset of current
576 * level.
577 */
578static inline uint32_t vtd_gpa_level_offset(uint64_t gpa, uint32_t level)
579{
580 return (gpa >> vtd_slpt_level_shift(level)) &
581 ((1ULL << VTD_SL_LEVEL_BITS) - 1);
582}
583
584/* Check Capability Register to see if the @level of page-table is supported */
585static inline bool vtd_is_level_supported(IntelIOMMUState *s, uint32_t level)
586{
587 return VTD_CAP_SAGAW_MASK & s->cap &
588 (1ULL << (level - 2 + VTD_CAP_SAGAW_SHIFT));
589}
590
591/* Get the page-table level that hardware should use for the second-level
592 * page-table walk from the Address Width field of context-entry.
593 */
594static inline uint32_t vtd_get_level_from_context_entry(VTDContextEntry *ce)
595{
596 return 2 + (ce->hi & VTD_CONTEXT_ENTRY_AW);
597}
598
599static inline uint32_t vtd_get_agaw_from_context_entry(VTDContextEntry *ce)
600{
601 return 30 + (ce->hi & VTD_CONTEXT_ENTRY_AW) * 9;
602}
603
604static const uint64_t vtd_paging_entry_rsvd_field[] = {
605 [0] = ~0ULL,
606 /* For not large page */
607 [1] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
608 [2] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
609 [3] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
610 [4] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
611 /* For large page */
612 [5] = 0x800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
613 [6] = 0x1ff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
614 [7] = 0x3ffff800ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
615 [8] = 0x880ULL | ~(VTD_HAW_MASK | VTD_SL_IGN_COM),
616};
617
618static bool vtd_slpte_nonzero_rsvd(uint64_t slpte, uint32_t level)
619{
620 if (slpte & VTD_SL_PT_PAGE_SIZE_MASK) {
621 /* Maybe large page */
622 return slpte & vtd_paging_entry_rsvd_field[level + 4];
623 } else {
624 return slpte & vtd_paging_entry_rsvd_field[level];
625 }
626}
627
628/* Given the @gpa, get relevant @slptep. @slpte_level will be the last level
629 * of the translation, can be used for deciding the size of large page.
630 */
631static int vtd_gpa_to_slpte(VTDContextEntry *ce, uint64_t gpa, bool is_write,
632 uint64_t *slptep, uint32_t *slpte_level,
633 bool *reads, bool *writes)
634{
635 dma_addr_t addr = vtd_get_slpt_base_from_context(ce);
636 uint32_t level = vtd_get_level_from_context_entry(ce);
637 uint32_t offset;
638 uint64_t slpte;
639 uint32_t ce_agaw = vtd_get_agaw_from_context_entry(ce);
640 uint64_t access_right_check;
641
642 /* Check if @gpa is above 2^X-1, where X is the minimum of MGAW in CAP_REG
643 * and AW in context-entry.
644 */
645 if (gpa & ~((1ULL << MIN(ce_agaw, VTD_MGAW)) - 1)) {
646 VTD_DPRINTF(GENERAL, "error: gpa 0x%"PRIx64 " exceeds limits", gpa);
647 return -VTD_FR_ADDR_BEYOND_MGAW;
648 }
649
650 /* FIXME: what is the Atomics request here? */
651 access_right_check = is_write ? VTD_SL_W : VTD_SL_R;
652
653 while (true) {
654 offset = vtd_gpa_level_offset(gpa, level);
655 slpte = vtd_get_slpte(addr, offset);
656
657 if (slpte == (uint64_t)-1) {
658 VTD_DPRINTF(GENERAL, "error: fail to access second-level paging "
659 "entry at level %"PRIu32 " for gpa 0x%"PRIx64,
660 level, gpa);
661 if (level == vtd_get_level_from_context_entry(ce)) {
662 /* Invalid programming of context-entry */
663 return -VTD_FR_CONTEXT_ENTRY_INV;
664 } else {
665 return -VTD_FR_PAGING_ENTRY_INV;
666 }
667 }
668 *reads = (*reads) && (slpte & VTD_SL_R);
669 *writes = (*writes) && (slpte & VTD_SL_W);
670 if (!(slpte & access_right_check)) {
671 VTD_DPRINTF(GENERAL, "error: lack of %s permission for "
672 "gpa 0x%"PRIx64 " slpte 0x%"PRIx64,
673 (is_write ? "write" : "read"), gpa, slpte);
674 return is_write ? -VTD_FR_WRITE : -VTD_FR_READ;
675 }
676 if (vtd_slpte_nonzero_rsvd(slpte, level)) {
677 VTD_DPRINTF(GENERAL, "error: non-zero reserved field in second "
678 "level paging entry level %"PRIu32 " slpte 0x%"PRIx64,
679 level, slpte);
680 return -VTD_FR_PAGING_ENTRY_RSVD;
681 }
682
683 if (vtd_is_last_slpte(slpte, level)) {
684 *slptep = slpte;
685 *slpte_level = level;
686 return 0;
687 }
688 addr = vtd_get_slpte_addr(slpte);
689 level--;
690 }
691}
692
693/* Map a device to its corresponding domain (context-entry) */
694static int vtd_dev_to_context_entry(IntelIOMMUState *s, uint8_t bus_num,
695 uint8_t devfn, VTDContextEntry *ce)
696{
697 VTDRootEntry re;
698 int ret_fr;
699
700 ret_fr = vtd_get_root_entry(s, bus_num, &re);
701 if (ret_fr) {
702 return ret_fr;
703 }
704
705 if (!vtd_root_entry_present(&re)) {
706 VTD_DPRINTF(GENERAL, "error: root-entry #%"PRIu8 " is not present",
707 bus_num);
708 return -VTD_FR_ROOT_ENTRY_P;
709 } else if (re.rsvd || (re.val & VTD_ROOT_ENTRY_RSVD)) {
710 VTD_DPRINTF(GENERAL, "error: non-zero reserved field in root-entry "
711 "hi 0x%"PRIx64 " lo 0x%"PRIx64, re.rsvd, re.val);
712 return -VTD_FR_ROOT_ENTRY_RSVD;
713 }
714
715 ret_fr = vtd_get_context_entry_from_root(&re, devfn, ce);
716 if (ret_fr) {
717 return ret_fr;
718 }
719
720 if (!vtd_context_entry_present(ce)) {
721 VTD_DPRINTF(GENERAL,
722 "error: context-entry #%"PRIu8 "(bus #%"PRIu8 ") "
723 "is not present", devfn, bus_num);
724 return -VTD_FR_CONTEXT_ENTRY_P;
725 } else if ((ce->hi & VTD_CONTEXT_ENTRY_RSVD_HI) ||
726 (ce->lo & VTD_CONTEXT_ENTRY_RSVD_LO)) {
727 VTD_DPRINTF(GENERAL,
728 "error: non-zero reserved field in context-entry "
729 "hi 0x%"PRIx64 " lo 0x%"PRIx64, ce->hi, ce->lo);
730 return -VTD_FR_CONTEXT_ENTRY_RSVD;
731 }
732 /* Check if the programming of context-entry is valid */
733 if (!vtd_is_level_supported(s, vtd_get_level_from_context_entry(ce))) {
734 VTD_DPRINTF(GENERAL, "error: unsupported Address Width value in "
735 "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
736 ce->hi, ce->lo);
737 return -VTD_FR_CONTEXT_ENTRY_INV;
738 } else if (ce->lo & VTD_CONTEXT_ENTRY_TT) {
739 VTD_DPRINTF(GENERAL, "error: unsupported Translation Type in "
740 "context-entry hi 0x%"PRIx64 " lo 0x%"PRIx64,
741 ce->hi, ce->lo);
742 return -VTD_FR_CONTEXT_ENTRY_INV;
743 }
744 return 0;
745}
746
747static inline uint16_t vtd_make_source_id(uint8_t bus_num, uint8_t devfn)
748{
749 return ((bus_num & 0xffUL) << 8) | (devfn & 0xffUL);
750}
751
752static const bool vtd_qualified_faults[] = {
753 [VTD_FR_RESERVED] = false,
754 [VTD_FR_ROOT_ENTRY_P] = false,
755 [VTD_FR_CONTEXT_ENTRY_P] = true,
756 [VTD_FR_CONTEXT_ENTRY_INV] = true,
757 [VTD_FR_ADDR_BEYOND_MGAW] = true,
758 [VTD_FR_WRITE] = true,
759 [VTD_FR_READ] = true,
760 [VTD_FR_PAGING_ENTRY_INV] = true,
761 [VTD_FR_ROOT_TABLE_INV] = false,
762 [VTD_FR_CONTEXT_TABLE_INV] = false,
763 [VTD_FR_ROOT_ENTRY_RSVD] = false,
764 [VTD_FR_PAGING_ENTRY_RSVD] = true,
765 [VTD_FR_CONTEXT_ENTRY_TT] = true,
766 [VTD_FR_RESERVED_ERR] = false,
767 [VTD_FR_MAX] = false,
768};
769
770/* To see if a fault condition is "qualified", which is reported to software
771 * only if the FPD field in the context-entry used to process the faulting
772 * request is 0.
773 */
774static inline bool vtd_is_qualified_fault(VTDFaultReason fault)
775{
776 return vtd_qualified_faults[fault];
777}
778
779static inline bool vtd_is_interrupt_addr(hwaddr addr)
780{
781 return VTD_INTERRUPT_ADDR_FIRST <= addr && addr <= VTD_INTERRUPT_ADDR_LAST;
782}
783
784/* Map dev to context-entry then do a paging-structures walk to do a iommu
785 * translation.
79e2b9ae
PB
786 *
787 * Called from RCU critical section.
788 *
1da12ec4
LT
789 * @bus_num: The bus number
790 * @devfn: The devfn, which is the combined of device and function number
791 * @is_write: The access is a write operation
792 * @entry: IOMMUTLBEntry that contain the addr to be translated and result
793 */
7df953bd 794static void vtd_do_iommu_translate(VTDAddressSpace *vtd_as, PCIBus *bus,
1da12ec4
LT
795 uint8_t devfn, hwaddr addr, bool is_write,
796 IOMMUTLBEntry *entry)
797{
d92fa2dc 798 IntelIOMMUState *s = vtd_as->iommu_state;
1da12ec4 799 VTDContextEntry ce;
7df953bd 800 uint8_t bus_num = pci_bus_num(bus);
d92fa2dc 801 VTDContextCacheEntry *cc_entry = &vtd_as->context_cache_entry;
d66b969b 802 uint64_t slpte, page_mask;
1da12ec4
LT
803 uint32_t level;
804 uint16_t source_id = vtd_make_source_id(bus_num, devfn);
805 int ret_fr;
806 bool is_fpd_set = false;
807 bool reads = true;
808 bool writes = true;
b5a280c0 809 VTDIOTLBEntry *iotlb_entry;
1da12ec4
LT
810
811 /* Check if the request is in interrupt address range */
812 if (vtd_is_interrupt_addr(addr)) {
813 if (is_write) {
814 /* FIXME: since we don't know the length of the access here, we
815 * treat Non-DWORD length write requests without PASID as
816 * interrupt requests, too. Withoud interrupt remapping support,
817 * we just use 1:1 mapping.
818 */
819 VTD_DPRINTF(MMU, "write request to interrupt address "
820 "gpa 0x%"PRIx64, addr);
821 entry->iova = addr & VTD_PAGE_MASK_4K;
822 entry->translated_addr = addr & VTD_PAGE_MASK_4K;
823 entry->addr_mask = ~VTD_PAGE_MASK_4K;
824 entry->perm = IOMMU_WO;
825 return;
826 } else {
827 VTD_DPRINTF(GENERAL, "error: read request from interrupt address "
828 "gpa 0x%"PRIx64, addr);
829 vtd_report_dmar_fault(s, source_id, addr, VTD_FR_READ, is_write);
830 return;
831 }
832 }
b5a280c0
LT
833 /* Try to fetch slpte form IOTLB */
834 iotlb_entry = vtd_lookup_iotlb(s, source_id, addr);
835 if (iotlb_entry) {
836 VTD_DPRINTF(CACHE, "hit iotlb sid 0x%"PRIx16 " gpa 0x%"PRIx64
837 " slpte 0x%"PRIx64 " did 0x%"PRIx16, source_id, addr,
838 iotlb_entry->slpte, iotlb_entry->domain_id);
839 slpte = iotlb_entry->slpte;
840 reads = iotlb_entry->read_flags;
841 writes = iotlb_entry->write_flags;
d66b969b 842 page_mask = iotlb_entry->mask;
b5a280c0
LT
843 goto out;
844 }
d92fa2dc
LT
845 /* Try to fetch context-entry from cache first */
846 if (cc_entry->context_cache_gen == s->context_cache_gen) {
847 VTD_DPRINTF(CACHE, "hit context-cache bus %d devfn %d "
848 "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 ")",
849 bus_num, devfn, cc_entry->context_entry.hi,
850 cc_entry->context_entry.lo, cc_entry->context_cache_gen);
851 ce = cc_entry->context_entry;
852 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
853 } else {
854 ret_fr = vtd_dev_to_context_entry(s, bus_num, devfn, &ce);
855 is_fpd_set = ce.lo & VTD_CONTEXT_ENTRY_FPD;
856 if (ret_fr) {
857 ret_fr = -ret_fr;
858 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
859 VTD_DPRINTF(FLOG, "fault processing is disabled for DMA "
860 "requests through this context-entry "
861 "(with FPD Set)");
862 } else {
863 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
864 }
865 return;
1da12ec4 866 }
d92fa2dc
LT
867 /* Update context-cache */
868 VTD_DPRINTF(CACHE, "update context-cache bus %d devfn %d "
869 "(hi %"PRIx64 " lo %"PRIx64 " gen %"PRIu32 "->%"PRIu32 ")",
870 bus_num, devfn, ce.hi, ce.lo,
871 cc_entry->context_cache_gen, s->context_cache_gen);
872 cc_entry->context_entry = ce;
873 cc_entry->context_cache_gen = s->context_cache_gen;
1da12ec4
LT
874 }
875
876 ret_fr = vtd_gpa_to_slpte(&ce, addr, is_write, &slpte, &level,
877 &reads, &writes);
878 if (ret_fr) {
879 ret_fr = -ret_fr;
880 if (is_fpd_set && vtd_is_qualified_fault(ret_fr)) {
881 VTD_DPRINTF(FLOG, "fault processing is disabled for DMA requests "
882 "through this context-entry (with FPD Set)");
883 } else {
884 vtd_report_dmar_fault(s, source_id, addr, ret_fr, is_write);
885 }
886 return;
887 }
888
d66b969b 889 page_mask = vtd_slpt_level_page_mask(level);
b5a280c0 890 vtd_update_iotlb(s, source_id, VTD_CONTEXT_ENTRY_DID(ce.hi), addr, slpte,
d66b969b 891 reads, writes, level);
b5a280c0 892out:
d66b969b
JW
893 entry->iova = addr & page_mask;
894 entry->translated_addr = vtd_get_slpte_addr(slpte) & page_mask;
895 entry->addr_mask = ~page_mask;
1da12ec4
LT
896 entry->perm = (writes ? 2 : 0) + (reads ? 1 : 0);
897}
898
899static void vtd_root_table_setup(IntelIOMMUState *s)
900{
901 s->root = vtd_get_quad_raw(s, DMAR_RTADDR_REG);
902 s->root_extended = s->root & VTD_RTADDR_RTT;
903 s->root &= VTD_RTADDR_ADDR_MASK;
904
905 VTD_DPRINTF(CSR, "root_table addr 0x%"PRIx64 " %s", s->root,
906 (s->root_extended ? "(extended)" : ""));
907}
908
02a2cbc8
PX
909static void vtd_iec_notify_all(IntelIOMMUState *s, bool global,
910 uint32_t index, uint32_t mask)
911{
912 x86_iommu_iec_notify_all(X86_IOMMU_DEVICE(s), global, index, mask);
913}
914
a5861439
PX
915static void vtd_interrupt_remap_table_setup(IntelIOMMUState *s)
916{
917 uint64_t value = 0;
918 value = vtd_get_quad_raw(s, DMAR_IRTA_REG);
919 s->intr_size = 1UL << ((value & VTD_IRTA_SIZE_MASK) + 1);
920 s->intr_root = value & VTD_IRTA_ADDR_MASK;
28589311 921 s->intr_eime = value & VTD_IRTA_EIME;
a5861439 922
02a2cbc8
PX
923 /* Notify global invalidation */
924 vtd_iec_notify_all(s, true, 0, 0);
a5861439
PX
925
926 VTD_DPRINTF(CSR, "int remap table addr 0x%"PRIx64 " size %"PRIu32,
927 s->intr_root, s->intr_size);
928}
929
d92fa2dc
LT
930static void vtd_context_global_invalidate(IntelIOMMUState *s)
931{
932 s->context_cache_gen++;
933 if (s->context_cache_gen == VTD_CONTEXT_CACHE_GEN_MAX) {
934 vtd_reset_context_cache(s);
935 }
936}
937
7df953bd
KO
938
939/* Find the VTD address space currently associated with a given bus number,
940 */
941static VTDBus *vtd_find_as_from_bus_num(IntelIOMMUState *s, uint8_t bus_num)
942{
943 VTDBus *vtd_bus = s->vtd_as_by_bus_num[bus_num];
944 if (!vtd_bus) {
945 /* Iterate over the registered buses to find the one
946 * which currently hold this bus number, and update the bus_num lookup table:
947 */
948 GHashTableIter iter;
949
950 g_hash_table_iter_init(&iter, s->vtd_as_by_busptr);
951 while (g_hash_table_iter_next (&iter, NULL, (void**)&vtd_bus)) {
952 if (pci_bus_num(vtd_bus->bus) == bus_num) {
953 s->vtd_as_by_bus_num[bus_num] = vtd_bus;
954 return vtd_bus;
955 }
956 }
957 }
958 return vtd_bus;
959}
960
d92fa2dc
LT
961/* Do a context-cache device-selective invalidation.
962 * @func_mask: FM field after shifting
963 */
964static void vtd_context_device_invalidate(IntelIOMMUState *s,
965 uint16_t source_id,
966 uint16_t func_mask)
967{
968 uint16_t mask;
7df953bd 969 VTDBus *vtd_bus;
d92fa2dc
LT
970 VTDAddressSpace *vtd_as;
971 uint16_t devfn;
972 uint16_t devfn_it;
973
974 switch (func_mask & 3) {
975 case 0:
976 mask = 0; /* No bits in the SID field masked */
977 break;
978 case 1:
979 mask = 4; /* Mask bit 2 in the SID field */
980 break;
981 case 2:
982 mask = 6; /* Mask bit 2:1 in the SID field */
983 break;
984 case 3:
985 mask = 7; /* Mask bit 2:0 in the SID field */
986 break;
987 }
988 VTD_DPRINTF(INV, "device-selective invalidation source 0x%"PRIx16
989 " mask %"PRIu16, source_id, mask);
7df953bd
KO
990 vtd_bus = vtd_find_as_from_bus_num(s, VTD_SID_TO_BUS(source_id));
991 if (vtd_bus) {
d92fa2dc 992 devfn = VTD_SID_TO_DEVFN(source_id);
04af0e18 993 for (devfn_it = 0; devfn_it < X86_IOMMU_PCI_DEVFN_MAX; ++devfn_it) {
7df953bd 994 vtd_as = vtd_bus->dev_as[devfn_it];
d92fa2dc
LT
995 if (vtd_as && ((devfn_it & mask) == (devfn & mask))) {
996 VTD_DPRINTF(INV, "invalidate context-cahce of devfn 0x%"PRIx16,
997 devfn_it);
998 vtd_as->context_cache_entry.context_cache_gen = 0;
999 }
1000 }
1001 }
1002}
1003
1da12ec4
LT
1004/* Context-cache invalidation
1005 * Returns the Context Actual Invalidation Granularity.
1006 * @val: the content of the CCMD_REG
1007 */
1008static uint64_t vtd_context_cache_invalidate(IntelIOMMUState *s, uint64_t val)
1009{
1010 uint64_t caig;
1011 uint64_t type = val & VTD_CCMD_CIRG_MASK;
1012
1013 switch (type) {
d92fa2dc
LT
1014 case VTD_CCMD_DOMAIN_INVL:
1015 VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1016 (uint16_t)VTD_CCMD_DID(val));
1017 /* Fall through */
1da12ec4 1018 case VTD_CCMD_GLOBAL_INVL:
d92fa2dc 1019 VTD_DPRINTF(INV, "global invalidation");
1da12ec4 1020 caig = VTD_CCMD_GLOBAL_INVL_A;
d92fa2dc 1021 vtd_context_global_invalidate(s);
1da12ec4
LT
1022 break;
1023
1024 case VTD_CCMD_DEVICE_INVL:
1da12ec4 1025 caig = VTD_CCMD_DEVICE_INVL_A;
d92fa2dc 1026 vtd_context_device_invalidate(s, VTD_CCMD_SID(val), VTD_CCMD_FM(val));
1da12ec4
LT
1027 break;
1028
1029 default:
d92fa2dc 1030 VTD_DPRINTF(GENERAL, "error: invalid granularity");
1da12ec4
LT
1031 caig = 0;
1032 }
1033 return caig;
1034}
1035
b5a280c0
LT
1036static void vtd_iotlb_global_invalidate(IntelIOMMUState *s)
1037{
1038 vtd_reset_iotlb(s);
1039}
1040
1041static void vtd_iotlb_domain_invalidate(IntelIOMMUState *s, uint16_t domain_id)
1042{
1043 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_domain,
1044 &domain_id);
1045}
1046
1047static void vtd_iotlb_page_invalidate(IntelIOMMUState *s, uint16_t domain_id,
1048 hwaddr addr, uint8_t am)
1049{
1050 VTDIOTLBPageInvInfo info;
1051
1052 assert(am <= VTD_MAMV);
1053 info.domain_id = domain_id;
d66b969b 1054 info.addr = addr;
b5a280c0
LT
1055 info.mask = ~((1 << am) - 1);
1056 g_hash_table_foreach_remove(s->iotlb, vtd_hash_remove_by_page, &info);
1057}
1058
1da12ec4
LT
1059/* Flush IOTLB
1060 * Returns the IOTLB Actual Invalidation Granularity.
1061 * @val: the content of the IOTLB_REG
1062 */
1063static uint64_t vtd_iotlb_flush(IntelIOMMUState *s, uint64_t val)
1064{
1065 uint64_t iaig;
1066 uint64_t type = val & VTD_TLB_FLUSH_GRANU_MASK;
b5a280c0
LT
1067 uint16_t domain_id;
1068 hwaddr addr;
1069 uint8_t am;
1da12ec4
LT
1070
1071 switch (type) {
1072 case VTD_TLB_GLOBAL_FLUSH:
b5a280c0 1073 VTD_DPRINTF(INV, "global invalidation");
1da12ec4 1074 iaig = VTD_TLB_GLOBAL_FLUSH_A;
b5a280c0 1075 vtd_iotlb_global_invalidate(s);
1da12ec4
LT
1076 break;
1077
1078 case VTD_TLB_DSI_FLUSH:
b5a280c0
LT
1079 domain_id = VTD_TLB_DID(val);
1080 VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1081 domain_id);
1da12ec4 1082 iaig = VTD_TLB_DSI_FLUSH_A;
b5a280c0 1083 vtd_iotlb_domain_invalidate(s, domain_id);
1da12ec4
LT
1084 break;
1085
1086 case VTD_TLB_PSI_FLUSH:
b5a280c0
LT
1087 domain_id = VTD_TLB_DID(val);
1088 addr = vtd_get_quad_raw(s, DMAR_IVA_REG);
1089 am = VTD_IVA_AM(addr);
1090 addr = VTD_IVA_ADDR(addr);
1091 VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
1092 " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
1093 if (am > VTD_MAMV) {
1094 VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
1095 "%"PRIu8, (uint8_t)VTD_MAMV);
1096 iaig = 0;
1097 break;
1098 }
1da12ec4 1099 iaig = VTD_TLB_PSI_FLUSH_A;
b5a280c0 1100 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1da12ec4
LT
1101 break;
1102
1103 default:
b5a280c0 1104 VTD_DPRINTF(GENERAL, "error: invalid granularity");
1da12ec4
LT
1105 iaig = 0;
1106 }
1107 return iaig;
1108}
1109
ed7b8fbc
LT
1110static inline bool vtd_queued_inv_enable_check(IntelIOMMUState *s)
1111{
1112 return s->iq_tail == 0;
1113}
1114
1115static inline bool vtd_queued_inv_disable_check(IntelIOMMUState *s)
1116{
1117 return s->qi_enabled && (s->iq_tail == s->iq_head) &&
1118 (s->iq_last_desc_type == VTD_INV_DESC_WAIT);
1119}
1120
1121static void vtd_handle_gcmd_qie(IntelIOMMUState *s, bool en)
1122{
1123 uint64_t iqa_val = vtd_get_quad_raw(s, DMAR_IQA_REG);
1124
1125 VTD_DPRINTF(INV, "Queued Invalidation Enable %s", (en ? "on" : "off"));
1126 if (en) {
1127 if (vtd_queued_inv_enable_check(s)) {
1128 s->iq = iqa_val & VTD_IQA_IQA_MASK;
1129 /* 2^(x+8) entries */
1130 s->iq_size = 1UL << ((iqa_val & VTD_IQA_QS) + 8);
1131 s->qi_enabled = true;
1132 VTD_DPRINTF(INV, "DMAR_IQA_REG 0x%"PRIx64, iqa_val);
1133 VTD_DPRINTF(INV, "Invalidation Queue addr 0x%"PRIx64 " size %d",
1134 s->iq, s->iq_size);
1135 /* Ok - report back to driver */
1136 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_QIES);
1137 } else {
1138 VTD_DPRINTF(GENERAL, "error: can't enable Queued Invalidation: "
1139 "tail %"PRIu16, s->iq_tail);
1140 }
1141 } else {
1142 if (vtd_queued_inv_disable_check(s)) {
1143 /* disable Queued Invalidation */
1144 vtd_set_quad_raw(s, DMAR_IQH_REG, 0);
1145 s->iq_head = 0;
1146 s->qi_enabled = false;
1147 /* Ok - report back to driver */
1148 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_QIES, 0);
1149 } else {
1150 VTD_DPRINTF(GENERAL, "error: can't disable Queued Invalidation: "
1151 "head %"PRIu16 ", tail %"PRIu16
1152 ", last_descriptor %"PRIu8,
1153 s->iq_head, s->iq_tail, s->iq_last_desc_type);
1154 }
1155 }
1156}
1157
1da12ec4
LT
1158/* Set Root Table Pointer */
1159static void vtd_handle_gcmd_srtp(IntelIOMMUState *s)
1160{
1161 VTD_DPRINTF(CSR, "set Root Table Pointer");
1162
1163 vtd_root_table_setup(s);
1164 /* Ok - report back to driver */
1165 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_RTPS);
1166}
1167
a5861439
PX
1168/* Set Interrupt Remap Table Pointer */
1169static void vtd_handle_gcmd_sirtp(IntelIOMMUState *s)
1170{
1171 VTD_DPRINTF(CSR, "set Interrupt Remap Table Pointer");
1172
1173 vtd_interrupt_remap_table_setup(s);
1174 /* Ok - report back to driver */
1175 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRTPS);
1176}
1177
1da12ec4
LT
1178/* Handle Translation Enable/Disable */
1179static void vtd_handle_gcmd_te(IntelIOMMUState *s, bool en)
1180{
1181 VTD_DPRINTF(CSR, "Translation Enable %s", (en ? "on" : "off"));
1182
1183 if (en) {
1184 s->dmar_enabled = true;
1185 /* Ok - report back to driver */
1186 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_TES);
1187 } else {
1188 s->dmar_enabled = false;
1189
1190 /* Clear the index of Fault Recording Register */
1191 s->next_frcd_reg = 0;
1192 /* Ok - report back to driver */
1193 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_TES, 0);
1194 }
1195}
1196
80de52ba
PX
1197/* Handle Interrupt Remap Enable/Disable */
1198static void vtd_handle_gcmd_ire(IntelIOMMUState *s, bool en)
1199{
1200 VTD_DPRINTF(CSR, "Interrupt Remap Enable %s", (en ? "on" : "off"));
1201
1202 if (en) {
1203 s->intr_enabled = true;
1204 /* Ok - report back to driver */
1205 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, 0, VTD_GSTS_IRES);
1206 } else {
1207 s->intr_enabled = false;
1208 /* Ok - report back to driver */
1209 vtd_set_clear_mask_long(s, DMAR_GSTS_REG, VTD_GSTS_IRES, 0);
1210 }
1211}
1212
1da12ec4
LT
1213/* Handle write to Global Command Register */
1214static void vtd_handle_gcmd_write(IntelIOMMUState *s)
1215{
1216 uint32_t status = vtd_get_long_raw(s, DMAR_GSTS_REG);
1217 uint32_t val = vtd_get_long_raw(s, DMAR_GCMD_REG);
1218 uint32_t changed = status ^ val;
1219
1220 VTD_DPRINTF(CSR, "value 0x%"PRIx32 " status 0x%"PRIx32, val, status);
1221 if (changed & VTD_GCMD_TE) {
1222 /* Translation enable/disable */
1223 vtd_handle_gcmd_te(s, val & VTD_GCMD_TE);
1224 }
1225 if (val & VTD_GCMD_SRTP) {
1226 /* Set/update the root-table pointer */
1227 vtd_handle_gcmd_srtp(s);
1228 }
ed7b8fbc
LT
1229 if (changed & VTD_GCMD_QIE) {
1230 /* Queued Invalidation Enable */
1231 vtd_handle_gcmd_qie(s, val & VTD_GCMD_QIE);
1232 }
a5861439
PX
1233 if (val & VTD_GCMD_SIRTP) {
1234 /* Set/update the interrupt remapping root-table pointer */
1235 vtd_handle_gcmd_sirtp(s);
1236 }
80de52ba
PX
1237 if (changed & VTD_GCMD_IRE) {
1238 /* Interrupt remap enable/disable */
1239 vtd_handle_gcmd_ire(s, val & VTD_GCMD_IRE);
1240 }
1da12ec4
LT
1241}
1242
1243/* Handle write to Context Command Register */
1244static void vtd_handle_ccmd_write(IntelIOMMUState *s)
1245{
1246 uint64_t ret;
1247 uint64_t val = vtd_get_quad_raw(s, DMAR_CCMD_REG);
1248
1249 /* Context-cache invalidation request */
1250 if (val & VTD_CCMD_ICC) {
ed7b8fbc
LT
1251 if (s->qi_enabled) {
1252 VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
1253 "should not use register-based invalidation");
1254 return;
1255 }
1da12ec4
LT
1256 ret = vtd_context_cache_invalidate(s, val);
1257 /* Invalidation completed. Change something to show */
1258 vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_ICC, 0ULL);
1259 ret = vtd_set_clear_mask_quad(s, DMAR_CCMD_REG, VTD_CCMD_CAIG_MASK,
1260 ret);
1261 VTD_DPRINTF(INV, "CCMD_REG write-back val: 0x%"PRIx64, ret);
1262 }
1263}
1264
1265/* Handle write to IOTLB Invalidation Register */
1266static void vtd_handle_iotlb_write(IntelIOMMUState *s)
1267{
1268 uint64_t ret;
1269 uint64_t val = vtd_get_quad_raw(s, DMAR_IOTLB_REG);
1270
1271 /* IOTLB invalidation request */
1272 if (val & VTD_TLB_IVT) {
ed7b8fbc
LT
1273 if (s->qi_enabled) {
1274 VTD_DPRINTF(GENERAL, "error: Queued Invalidation enabled, "
1275 "should not use register-based invalidation");
1276 return;
1277 }
1da12ec4
LT
1278 ret = vtd_iotlb_flush(s, val);
1279 /* Invalidation completed. Change something to show */
1280 vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG, VTD_TLB_IVT, 0ULL);
1281 ret = vtd_set_clear_mask_quad(s, DMAR_IOTLB_REG,
1282 VTD_TLB_FLUSH_GRANU_MASK_A, ret);
1283 VTD_DPRINTF(INV, "IOTLB_REG write-back val: 0x%"PRIx64, ret);
1284 }
1285}
1286
ed7b8fbc
LT
1287/* Fetch an Invalidation Descriptor from the Invalidation Queue */
1288static bool vtd_get_inv_desc(dma_addr_t base_addr, uint32_t offset,
1289 VTDInvDesc *inv_desc)
1290{
1291 dma_addr_t addr = base_addr + offset * sizeof(*inv_desc);
1292 if (dma_memory_read(&address_space_memory, addr, inv_desc,
1293 sizeof(*inv_desc))) {
1294 VTD_DPRINTF(GENERAL, "error: fail to fetch Invalidation Descriptor "
1295 "base_addr 0x%"PRIx64 " offset %"PRIu32, base_addr, offset);
1296 inv_desc->lo = 0;
1297 inv_desc->hi = 0;
1298
1299 return false;
1300 }
1301 inv_desc->lo = le64_to_cpu(inv_desc->lo);
1302 inv_desc->hi = le64_to_cpu(inv_desc->hi);
1303 return true;
1304}
1305
1306static bool vtd_process_wait_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1307{
1308 if ((inv_desc->hi & VTD_INV_DESC_WAIT_RSVD_HI) ||
1309 (inv_desc->lo & VTD_INV_DESC_WAIT_RSVD_LO)) {
1310 VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Invalidation "
1311 "Wait Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1312 inv_desc->hi, inv_desc->lo);
1313 return false;
1314 }
1315 if (inv_desc->lo & VTD_INV_DESC_WAIT_SW) {
1316 /* Status Write */
1317 uint32_t status_data = (uint32_t)(inv_desc->lo >>
1318 VTD_INV_DESC_WAIT_DATA_SHIFT);
1319
1320 assert(!(inv_desc->lo & VTD_INV_DESC_WAIT_IF));
1321
1322 /* FIXME: need to be masked with HAW? */
1323 dma_addr_t status_addr = inv_desc->hi;
1324 VTD_DPRINTF(INV, "status data 0x%x, status addr 0x%"PRIx64,
1325 status_data, status_addr);
1326 status_data = cpu_to_le32(status_data);
1327 if (dma_memory_write(&address_space_memory, status_addr, &status_data,
1328 sizeof(status_data))) {
1329 VTD_DPRINTF(GENERAL, "error: fail to perform a coherent write");
1330 return false;
1331 }
1332 } else if (inv_desc->lo & VTD_INV_DESC_WAIT_IF) {
1333 /* Interrupt flag */
1334 VTD_DPRINTF(INV, "Invalidation Wait Descriptor interrupt completion");
1335 vtd_generate_completion_event(s);
1336 } else {
1337 VTD_DPRINTF(GENERAL, "error: invalid Invalidation Wait Descriptor: "
1338 "hi 0x%"PRIx64 " lo 0x%"PRIx64, inv_desc->hi, inv_desc->lo);
1339 return false;
1340 }
1341 return true;
1342}
1343
d92fa2dc
LT
1344static bool vtd_process_context_cache_desc(IntelIOMMUState *s,
1345 VTDInvDesc *inv_desc)
1346{
1347 if ((inv_desc->lo & VTD_INV_DESC_CC_RSVD) || inv_desc->hi) {
1348 VTD_DPRINTF(GENERAL, "error: non-zero reserved field in Context-cache "
1349 "Invalidate Descriptor");
1350 return false;
1351 }
1352 switch (inv_desc->lo & VTD_INV_DESC_CC_G) {
1353 case VTD_INV_DESC_CC_DOMAIN:
1354 VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1355 (uint16_t)VTD_INV_DESC_CC_DID(inv_desc->lo));
1356 /* Fall through */
1357 case VTD_INV_DESC_CC_GLOBAL:
1358 VTD_DPRINTF(INV, "global invalidation");
1359 vtd_context_global_invalidate(s);
1360 break;
1361
1362 case VTD_INV_DESC_CC_DEVICE:
1363 vtd_context_device_invalidate(s, VTD_INV_DESC_CC_SID(inv_desc->lo),
1364 VTD_INV_DESC_CC_FM(inv_desc->lo));
1365 break;
1366
1367 default:
1368 VTD_DPRINTF(GENERAL, "error: invalid granularity in Context-cache "
1369 "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1370 inv_desc->hi, inv_desc->lo);
1371 return false;
1372 }
1373 return true;
1374}
1375
b5a280c0
LT
1376static bool vtd_process_iotlb_desc(IntelIOMMUState *s, VTDInvDesc *inv_desc)
1377{
1378 uint16_t domain_id;
1379 uint8_t am;
1380 hwaddr addr;
1381
1382 if ((inv_desc->lo & VTD_INV_DESC_IOTLB_RSVD_LO) ||
1383 (inv_desc->hi & VTD_INV_DESC_IOTLB_RSVD_HI)) {
1384 VTD_DPRINTF(GENERAL, "error: non-zero reserved field in IOTLB "
1385 "Invalidate Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1386 inv_desc->hi, inv_desc->lo);
1387 return false;
1388 }
1389
1390 switch (inv_desc->lo & VTD_INV_DESC_IOTLB_G) {
1391 case VTD_INV_DESC_IOTLB_GLOBAL:
1392 VTD_DPRINTF(INV, "global invalidation");
1393 vtd_iotlb_global_invalidate(s);
1394 break;
1395
1396 case VTD_INV_DESC_IOTLB_DOMAIN:
1397 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1398 VTD_DPRINTF(INV, "domain-selective invalidation domain 0x%"PRIx16,
1399 domain_id);
1400 vtd_iotlb_domain_invalidate(s, domain_id);
1401 break;
1402
1403 case VTD_INV_DESC_IOTLB_PAGE:
1404 domain_id = VTD_INV_DESC_IOTLB_DID(inv_desc->lo);
1405 addr = VTD_INV_DESC_IOTLB_ADDR(inv_desc->hi);
1406 am = VTD_INV_DESC_IOTLB_AM(inv_desc->hi);
1407 VTD_DPRINTF(INV, "page-selective invalidation domain 0x%"PRIx16
1408 " addr 0x%"PRIx64 " mask %"PRIu8, domain_id, addr, am);
1409 if (am > VTD_MAMV) {
1410 VTD_DPRINTF(GENERAL, "error: supported max address mask value is "
1411 "%"PRIu8, (uint8_t)VTD_MAMV);
1412 return false;
1413 }
1414 vtd_iotlb_page_invalidate(s, domain_id, addr, am);
1415 break;
1416
1417 default:
1418 VTD_DPRINTF(GENERAL, "error: invalid granularity in IOTLB Invalidate "
1419 "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1420 inv_desc->hi, inv_desc->lo);
1421 return false;
1422 }
1423 return true;
1424}
1425
02a2cbc8
PX
1426static bool vtd_process_inv_iec_desc(IntelIOMMUState *s,
1427 VTDInvDesc *inv_desc)
1428{
1429 VTD_DPRINTF(INV, "inv ir glob %d index %d mask %d",
1430 inv_desc->iec.granularity,
1431 inv_desc->iec.index,
1432 inv_desc->iec.index_mask);
1433
1434 vtd_iec_notify_all(s, !inv_desc->iec.granularity,
1435 inv_desc->iec.index,
1436 inv_desc->iec.index_mask);
1437
1438 return true;
1439}
1440
ed7b8fbc
LT
1441static bool vtd_process_inv_desc(IntelIOMMUState *s)
1442{
1443 VTDInvDesc inv_desc;
1444 uint8_t desc_type;
1445
1446 VTD_DPRINTF(INV, "iq head %"PRIu16, s->iq_head);
1447 if (!vtd_get_inv_desc(s->iq, s->iq_head, &inv_desc)) {
1448 s->iq_last_desc_type = VTD_INV_DESC_NONE;
1449 return false;
1450 }
1451 desc_type = inv_desc.lo & VTD_INV_DESC_TYPE;
1452 /* FIXME: should update at first or at last? */
1453 s->iq_last_desc_type = desc_type;
1454
1455 switch (desc_type) {
1456 case VTD_INV_DESC_CC:
1457 VTD_DPRINTF(INV, "Context-cache Invalidate Descriptor hi 0x%"PRIx64
1458 " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
d92fa2dc
LT
1459 if (!vtd_process_context_cache_desc(s, &inv_desc)) {
1460 return false;
1461 }
ed7b8fbc
LT
1462 break;
1463
1464 case VTD_INV_DESC_IOTLB:
1465 VTD_DPRINTF(INV, "IOTLB Invalidate Descriptor hi 0x%"PRIx64
1466 " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
b5a280c0
LT
1467 if (!vtd_process_iotlb_desc(s, &inv_desc)) {
1468 return false;
1469 }
ed7b8fbc
LT
1470 break;
1471
1472 case VTD_INV_DESC_WAIT:
1473 VTD_DPRINTF(INV, "Invalidation Wait Descriptor hi 0x%"PRIx64
1474 " lo 0x%"PRIx64, inv_desc.hi, inv_desc.lo);
1475 if (!vtd_process_wait_desc(s, &inv_desc)) {
1476 return false;
1477 }
1478 break;
1479
b7910472 1480 case VTD_INV_DESC_IEC:
02a2cbc8
PX
1481 VTD_DPRINTF(INV, "Invalidation Interrupt Entry Cache "
1482 "Descriptor hi 0x%"PRIx64 " lo 0x%"PRIx64,
1483 inv_desc.hi, inv_desc.lo);
1484 if (!vtd_process_inv_iec_desc(s, &inv_desc)) {
1485 return false;
1486 }
b7910472
PX
1487 break;
1488
ed7b8fbc
LT
1489 default:
1490 VTD_DPRINTF(GENERAL, "error: unkonw Invalidation Descriptor type "
1491 "hi 0x%"PRIx64 " lo 0x%"PRIx64 " type %"PRIu8,
1492 inv_desc.hi, inv_desc.lo, desc_type);
1493 return false;
1494 }
1495 s->iq_head++;
1496 if (s->iq_head == s->iq_size) {
1497 s->iq_head = 0;
1498 }
1499 return true;
1500}
1501
1502/* Try to fetch and process more Invalidation Descriptors */
1503static void vtd_fetch_inv_desc(IntelIOMMUState *s)
1504{
1505 VTD_DPRINTF(INV, "fetch Invalidation Descriptors");
1506 if (s->iq_tail >= s->iq_size) {
1507 /* Detects an invalid Tail pointer */
1508 VTD_DPRINTF(GENERAL, "error: iq_tail is %"PRIu16
1509 " while iq_size is %"PRIu16, s->iq_tail, s->iq_size);
1510 vtd_handle_inv_queue_error(s);
1511 return;
1512 }
1513 while (s->iq_head != s->iq_tail) {
1514 if (!vtd_process_inv_desc(s)) {
1515 /* Invalidation Queue Errors */
1516 vtd_handle_inv_queue_error(s);
1517 break;
1518 }
1519 /* Must update the IQH_REG in time */
1520 vtd_set_quad_raw(s, DMAR_IQH_REG,
1521 (((uint64_t)(s->iq_head)) << VTD_IQH_QH_SHIFT) &
1522 VTD_IQH_QH_MASK);
1523 }
1524}
1525
1526/* Handle write to Invalidation Queue Tail Register */
1527static void vtd_handle_iqt_write(IntelIOMMUState *s)
1528{
1529 uint64_t val = vtd_get_quad_raw(s, DMAR_IQT_REG);
1530
1531 s->iq_tail = VTD_IQT_QT(val);
1532 VTD_DPRINTF(INV, "set iq tail %"PRIu16, s->iq_tail);
1533 if (s->qi_enabled && !(vtd_get_long_raw(s, DMAR_FSTS_REG) & VTD_FSTS_IQE)) {
1534 /* Process Invalidation Queue here */
1535 vtd_fetch_inv_desc(s);
1536 }
1537}
1538
1da12ec4
LT
1539static void vtd_handle_fsts_write(IntelIOMMUState *s)
1540{
1541 uint32_t fsts_reg = vtd_get_long_raw(s, DMAR_FSTS_REG);
1542 uint32_t fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
1543 uint32_t status_fields = VTD_FSTS_PFO | VTD_FSTS_PPF | VTD_FSTS_IQE;
1544
1545 if ((fectl_reg & VTD_FECTL_IP) && !(fsts_reg & status_fields)) {
1546 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
1547 VTD_DPRINTF(FLOG, "all pending interrupt conditions serviced, clear "
1548 "IP field of FECTL_REG");
1549 }
ed7b8fbc
LT
1550 /* FIXME: when IQE is Clear, should we try to fetch some Invalidation
1551 * Descriptors if there are any when Queued Invalidation is enabled?
1552 */
1da12ec4
LT
1553}
1554
1555static void vtd_handle_fectl_write(IntelIOMMUState *s)
1556{
1557 uint32_t fectl_reg;
1558 /* FIXME: when software clears the IM field, check the IP field. But do we
1559 * need to compare the old value and the new value to conclude that
1560 * software clears the IM field? Or just check if the IM field is zero?
1561 */
1562 fectl_reg = vtd_get_long_raw(s, DMAR_FECTL_REG);
1563 if ((fectl_reg & VTD_FECTL_IP) && !(fectl_reg & VTD_FECTL_IM)) {
1564 vtd_generate_interrupt(s, DMAR_FEADDR_REG, DMAR_FEDATA_REG);
1565 vtd_set_clear_mask_long(s, DMAR_FECTL_REG, VTD_FECTL_IP, 0);
1566 VTD_DPRINTF(FLOG, "IM field is cleared, generate "
1567 "fault event interrupt");
1568 }
1569}
1570
ed7b8fbc
LT
1571static void vtd_handle_ics_write(IntelIOMMUState *s)
1572{
1573 uint32_t ics_reg = vtd_get_long_raw(s, DMAR_ICS_REG);
1574 uint32_t iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1575
1576 if ((iectl_reg & VTD_IECTL_IP) && !(ics_reg & VTD_ICS_IWC)) {
1577 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1578 VTD_DPRINTF(INV, "pending completion interrupt condition serviced, "
1579 "clear IP field of IECTL_REG");
1580 }
1581}
1582
1583static void vtd_handle_iectl_write(IntelIOMMUState *s)
1584{
1585 uint32_t iectl_reg;
1586 /* FIXME: when software clears the IM field, check the IP field. But do we
1587 * need to compare the old value and the new value to conclude that
1588 * software clears the IM field? Or just check if the IM field is zero?
1589 */
1590 iectl_reg = vtd_get_long_raw(s, DMAR_IECTL_REG);
1591 if ((iectl_reg & VTD_IECTL_IP) && !(iectl_reg & VTD_IECTL_IM)) {
1592 vtd_generate_interrupt(s, DMAR_IEADDR_REG, DMAR_IEDATA_REG);
1593 vtd_set_clear_mask_long(s, DMAR_IECTL_REG, VTD_IECTL_IP, 0);
1594 VTD_DPRINTF(INV, "IM field is cleared, generate "
1595 "invalidation event interrupt");
1596 }
1597}
1598
1da12ec4
LT
1599static uint64_t vtd_mem_read(void *opaque, hwaddr addr, unsigned size)
1600{
1601 IntelIOMMUState *s = opaque;
1602 uint64_t val;
1603
1604 if (addr + size > DMAR_REG_SIZE) {
1605 VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
1606 ", got 0x%"PRIx64 " %d",
1607 (uint64_t)DMAR_REG_SIZE, addr, size);
1608 return (uint64_t)-1;
1609 }
1610
1611 switch (addr) {
1612 /* Root Table Address Register, 64-bit */
1613 case DMAR_RTADDR_REG:
1614 if (size == 4) {
1615 val = s->root & ((1ULL << 32) - 1);
1616 } else {
1617 val = s->root;
1618 }
1619 break;
1620
1621 case DMAR_RTADDR_REG_HI:
1622 assert(size == 4);
1623 val = s->root >> 32;
1624 break;
1625
ed7b8fbc
LT
1626 /* Invalidation Queue Address Register, 64-bit */
1627 case DMAR_IQA_REG:
1628 val = s->iq | (vtd_get_quad(s, DMAR_IQA_REG) & VTD_IQA_QS);
1629 if (size == 4) {
1630 val = val & ((1ULL << 32) - 1);
1631 }
1632 break;
1633
1634 case DMAR_IQA_REG_HI:
1635 assert(size == 4);
1636 val = s->iq >> 32;
1637 break;
1638
1da12ec4
LT
1639 default:
1640 if (size == 4) {
1641 val = vtd_get_long(s, addr);
1642 } else {
1643 val = vtd_get_quad(s, addr);
1644 }
1645 }
1646 VTD_DPRINTF(CSR, "addr 0x%"PRIx64 " size %d val 0x%"PRIx64,
1647 addr, size, val);
1648 return val;
1649}
1650
1651static void vtd_mem_write(void *opaque, hwaddr addr,
1652 uint64_t val, unsigned size)
1653{
1654 IntelIOMMUState *s = opaque;
1655
1656 if (addr + size > DMAR_REG_SIZE) {
1657 VTD_DPRINTF(GENERAL, "error: addr outside region: max 0x%"PRIx64
1658 ", got 0x%"PRIx64 " %d",
1659 (uint64_t)DMAR_REG_SIZE, addr, size);
1660 return;
1661 }
1662
1663 switch (addr) {
1664 /* Global Command Register, 32-bit */
1665 case DMAR_GCMD_REG:
1666 VTD_DPRINTF(CSR, "DMAR_GCMD_REG write addr 0x%"PRIx64
1667 ", size %d, val 0x%"PRIx64, addr, size, val);
1668 vtd_set_long(s, addr, val);
1669 vtd_handle_gcmd_write(s);
1670 break;
1671
1672 /* Context Command Register, 64-bit */
1673 case DMAR_CCMD_REG:
1674 VTD_DPRINTF(CSR, "DMAR_CCMD_REG write addr 0x%"PRIx64
1675 ", size %d, val 0x%"PRIx64, addr, size, val);
1676 if (size == 4) {
1677 vtd_set_long(s, addr, val);
1678 } else {
1679 vtd_set_quad(s, addr, val);
1680 vtd_handle_ccmd_write(s);
1681 }
1682 break;
1683
1684 case DMAR_CCMD_REG_HI:
1685 VTD_DPRINTF(CSR, "DMAR_CCMD_REG_HI write addr 0x%"PRIx64
1686 ", size %d, val 0x%"PRIx64, addr, size, val);
1687 assert(size == 4);
1688 vtd_set_long(s, addr, val);
1689 vtd_handle_ccmd_write(s);
1690 break;
1691
1692 /* IOTLB Invalidation Register, 64-bit */
1693 case DMAR_IOTLB_REG:
1694 VTD_DPRINTF(INV, "DMAR_IOTLB_REG write addr 0x%"PRIx64
1695 ", size %d, val 0x%"PRIx64, addr, size, val);
1696 if (size == 4) {
1697 vtd_set_long(s, addr, val);
1698 } else {
1699 vtd_set_quad(s, addr, val);
1700 vtd_handle_iotlb_write(s);
1701 }
1702 break;
1703
1704 case DMAR_IOTLB_REG_HI:
1705 VTD_DPRINTF(INV, "DMAR_IOTLB_REG_HI write addr 0x%"PRIx64
1706 ", size %d, val 0x%"PRIx64, addr, size, val);
1707 assert(size == 4);
1708 vtd_set_long(s, addr, val);
1709 vtd_handle_iotlb_write(s);
1710 break;
1711
b5a280c0
LT
1712 /* Invalidate Address Register, 64-bit */
1713 case DMAR_IVA_REG:
1714 VTD_DPRINTF(INV, "DMAR_IVA_REG write addr 0x%"PRIx64
1715 ", size %d, val 0x%"PRIx64, addr, size, val);
1716 if (size == 4) {
1717 vtd_set_long(s, addr, val);
1718 } else {
1719 vtd_set_quad(s, addr, val);
1720 }
1721 break;
1722
1723 case DMAR_IVA_REG_HI:
1724 VTD_DPRINTF(INV, "DMAR_IVA_REG_HI write addr 0x%"PRIx64
1725 ", size %d, val 0x%"PRIx64, addr, size, val);
1726 assert(size == 4);
1727 vtd_set_long(s, addr, val);
1728 break;
1729
1da12ec4
LT
1730 /* Fault Status Register, 32-bit */
1731 case DMAR_FSTS_REG:
1732 VTD_DPRINTF(FLOG, "DMAR_FSTS_REG write addr 0x%"PRIx64
1733 ", size %d, val 0x%"PRIx64, addr, size, val);
1734 assert(size == 4);
1735 vtd_set_long(s, addr, val);
1736 vtd_handle_fsts_write(s);
1737 break;
1738
1739 /* Fault Event Control Register, 32-bit */
1740 case DMAR_FECTL_REG:
1741 VTD_DPRINTF(FLOG, "DMAR_FECTL_REG write addr 0x%"PRIx64
1742 ", size %d, val 0x%"PRIx64, addr, size, val);
1743 assert(size == 4);
1744 vtd_set_long(s, addr, val);
1745 vtd_handle_fectl_write(s);
1746 break;
1747
1748 /* Fault Event Data Register, 32-bit */
1749 case DMAR_FEDATA_REG:
1750 VTD_DPRINTF(FLOG, "DMAR_FEDATA_REG write addr 0x%"PRIx64
1751 ", size %d, val 0x%"PRIx64, addr, size, val);
1752 assert(size == 4);
1753 vtd_set_long(s, addr, val);
1754 break;
1755
1756 /* Fault Event Address Register, 32-bit */
1757 case DMAR_FEADDR_REG:
1758 VTD_DPRINTF(FLOG, "DMAR_FEADDR_REG write addr 0x%"PRIx64
1759 ", size %d, val 0x%"PRIx64, addr, size, val);
1760 assert(size == 4);
1761 vtd_set_long(s, addr, val);
1762 break;
1763
1764 /* Fault Event Upper Address Register, 32-bit */
1765 case DMAR_FEUADDR_REG:
1766 VTD_DPRINTF(FLOG, "DMAR_FEUADDR_REG write addr 0x%"PRIx64
1767 ", size %d, val 0x%"PRIx64, addr, size, val);
1768 assert(size == 4);
1769 vtd_set_long(s, addr, val);
1770 break;
1771
1772 /* Protected Memory Enable Register, 32-bit */
1773 case DMAR_PMEN_REG:
1774 VTD_DPRINTF(CSR, "DMAR_PMEN_REG write addr 0x%"PRIx64
1775 ", size %d, val 0x%"PRIx64, addr, size, val);
1776 assert(size == 4);
1777 vtd_set_long(s, addr, val);
1778 break;
1779
1780 /* Root Table Address Register, 64-bit */
1781 case DMAR_RTADDR_REG:
1782 VTD_DPRINTF(CSR, "DMAR_RTADDR_REG write addr 0x%"PRIx64
1783 ", size %d, val 0x%"PRIx64, addr, size, val);
1784 if (size == 4) {
1785 vtd_set_long(s, addr, val);
1786 } else {
1787 vtd_set_quad(s, addr, val);
1788 }
1789 break;
1790
1791 case DMAR_RTADDR_REG_HI:
1792 VTD_DPRINTF(CSR, "DMAR_RTADDR_REG_HI write addr 0x%"PRIx64
1793 ", size %d, val 0x%"PRIx64, addr, size, val);
1794 assert(size == 4);
1795 vtd_set_long(s, addr, val);
1796 break;
1797
ed7b8fbc
LT
1798 /* Invalidation Queue Tail Register, 64-bit */
1799 case DMAR_IQT_REG:
1800 VTD_DPRINTF(INV, "DMAR_IQT_REG write addr 0x%"PRIx64
1801 ", size %d, val 0x%"PRIx64, addr, size, val);
1802 if (size == 4) {
1803 vtd_set_long(s, addr, val);
1804 } else {
1805 vtd_set_quad(s, addr, val);
1806 }
1807 vtd_handle_iqt_write(s);
1808 break;
1809
1810 case DMAR_IQT_REG_HI:
1811 VTD_DPRINTF(INV, "DMAR_IQT_REG_HI write addr 0x%"PRIx64
1812 ", size %d, val 0x%"PRIx64, addr, size, val);
1813 assert(size == 4);
1814 vtd_set_long(s, addr, val);
1815 /* 19:63 of IQT_REG is RsvdZ, do nothing here */
1816 break;
1817
1818 /* Invalidation Queue Address Register, 64-bit */
1819 case DMAR_IQA_REG:
1820 VTD_DPRINTF(INV, "DMAR_IQA_REG write addr 0x%"PRIx64
1821 ", size %d, val 0x%"PRIx64, addr, size, val);
1822 if (size == 4) {
1823 vtd_set_long(s, addr, val);
1824 } else {
1825 vtd_set_quad(s, addr, val);
1826 }
1827 break;
1828
1829 case DMAR_IQA_REG_HI:
1830 VTD_DPRINTF(INV, "DMAR_IQA_REG_HI write addr 0x%"PRIx64
1831 ", size %d, val 0x%"PRIx64, addr, size, val);
1832 assert(size == 4);
1833 vtd_set_long(s, addr, val);
1834 break;
1835
1836 /* Invalidation Completion Status Register, 32-bit */
1837 case DMAR_ICS_REG:
1838 VTD_DPRINTF(INV, "DMAR_ICS_REG write addr 0x%"PRIx64
1839 ", size %d, val 0x%"PRIx64, addr, size, val);
1840 assert(size == 4);
1841 vtd_set_long(s, addr, val);
1842 vtd_handle_ics_write(s);
1843 break;
1844
1845 /* Invalidation Event Control Register, 32-bit */
1846 case DMAR_IECTL_REG:
1847 VTD_DPRINTF(INV, "DMAR_IECTL_REG write addr 0x%"PRIx64
1848 ", size %d, val 0x%"PRIx64, addr, size, val);
1849 assert(size == 4);
1850 vtd_set_long(s, addr, val);
1851 vtd_handle_iectl_write(s);
1852 break;
1853
1854 /* Invalidation Event Data Register, 32-bit */
1855 case DMAR_IEDATA_REG:
1856 VTD_DPRINTF(INV, "DMAR_IEDATA_REG write addr 0x%"PRIx64
1857 ", size %d, val 0x%"PRIx64, addr, size, val);
1858 assert(size == 4);
1859 vtd_set_long(s, addr, val);
1860 break;
1861
1862 /* Invalidation Event Address Register, 32-bit */
1863 case DMAR_IEADDR_REG:
1864 VTD_DPRINTF(INV, "DMAR_IEADDR_REG write addr 0x%"PRIx64
1865 ", size %d, val 0x%"PRIx64, addr, size, val);
1866 assert(size == 4);
1867 vtd_set_long(s, addr, val);
1868 break;
1869
1870 /* Invalidation Event Upper Address Register, 32-bit */
1871 case DMAR_IEUADDR_REG:
1872 VTD_DPRINTF(INV, "DMAR_IEUADDR_REG write addr 0x%"PRIx64
1873 ", size %d, val 0x%"PRIx64, addr, size, val);
1874 assert(size == 4);
1875 vtd_set_long(s, addr, val);
1876 break;
1877
1da12ec4
LT
1878 /* Fault Recording Registers, 128-bit */
1879 case DMAR_FRCD_REG_0_0:
1880 VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_0 write addr 0x%"PRIx64
1881 ", size %d, val 0x%"PRIx64, addr, size, val);
1882 if (size == 4) {
1883 vtd_set_long(s, addr, val);
1884 } else {
1885 vtd_set_quad(s, addr, val);
1886 }
1887 break;
1888
1889 case DMAR_FRCD_REG_0_1:
1890 VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_1 write addr 0x%"PRIx64
1891 ", size %d, val 0x%"PRIx64, addr, size, val);
1892 assert(size == 4);
1893 vtd_set_long(s, addr, val);
1894 break;
1895
1896 case DMAR_FRCD_REG_0_2:
1897 VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_2 write addr 0x%"PRIx64
1898 ", size %d, val 0x%"PRIx64, addr, size, val);
1899 if (size == 4) {
1900 vtd_set_long(s, addr, val);
1901 } else {
1902 vtd_set_quad(s, addr, val);
1903 /* May clear bit 127 (Fault), update PPF */
1904 vtd_update_fsts_ppf(s);
1905 }
1906 break;
1907
1908 case DMAR_FRCD_REG_0_3:
1909 VTD_DPRINTF(FLOG, "DMAR_FRCD_REG_0_3 write addr 0x%"PRIx64
1910 ", size %d, val 0x%"PRIx64, addr, size, val);
1911 assert(size == 4);
1912 vtd_set_long(s, addr, val);
1913 /* May clear bit 127 (Fault), update PPF */
1914 vtd_update_fsts_ppf(s);
1915 break;
1916
a5861439
PX
1917 case DMAR_IRTA_REG:
1918 VTD_DPRINTF(IR, "DMAR_IRTA_REG write addr 0x%"PRIx64
1919 ", size %d, val 0x%"PRIx64, addr, size, val);
1920 if (size == 4) {
1921 vtd_set_long(s, addr, val);
1922 } else {
1923 vtd_set_quad(s, addr, val);
1924 }
1925 break;
1926
1927 case DMAR_IRTA_REG_HI:
1928 VTD_DPRINTF(IR, "DMAR_IRTA_REG_HI write addr 0x%"PRIx64
1929 ", size %d, val 0x%"PRIx64, addr, size, val);
1930 assert(size == 4);
1931 vtd_set_long(s, addr, val);
1932 break;
1933
1da12ec4
LT
1934 default:
1935 VTD_DPRINTF(GENERAL, "error: unhandled reg write addr 0x%"PRIx64
1936 ", size %d, val 0x%"PRIx64, addr, size, val);
1937 if (size == 4) {
1938 vtd_set_long(s, addr, val);
1939 } else {
1940 vtd_set_quad(s, addr, val);
1941 }
1942 }
1943}
1944
1945static IOMMUTLBEntry vtd_iommu_translate(MemoryRegion *iommu, hwaddr addr,
1946 bool is_write)
1947{
1948 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
1949 IntelIOMMUState *s = vtd_as->iommu_state;
1da12ec4
LT
1950 IOMMUTLBEntry ret = {
1951 .target_as = &address_space_memory,
1952 .iova = addr,
1953 .translated_addr = 0,
1954 .addr_mask = ~(hwaddr)0,
1955 .perm = IOMMU_NONE,
1956 };
1957
1958 if (!s->dmar_enabled) {
1959 /* DMAR disabled, passthrough, use 4k-page*/
1960 ret.iova = addr & VTD_PAGE_MASK_4K;
1961 ret.translated_addr = addr & VTD_PAGE_MASK_4K;
1962 ret.addr_mask = ~VTD_PAGE_MASK_4K;
1963 ret.perm = IOMMU_RW;
1964 return ret;
1965 }
1966
7df953bd 1967 vtd_do_iommu_translate(vtd_as, vtd_as->bus, vtd_as->devfn, addr,
d92fa2dc 1968 is_write, &ret);
1da12ec4
LT
1969 VTD_DPRINTF(MMU,
1970 "bus %"PRIu8 " slot %"PRIu8 " func %"PRIu8 " devfn %"PRIu8
7df953bd 1971 " gpa 0x%"PRIx64 " hpa 0x%"PRIx64, pci_bus_num(vtd_as->bus),
d92fa2dc
LT
1972 VTD_PCI_SLOT(vtd_as->devfn), VTD_PCI_FUNC(vtd_as->devfn),
1973 vtd_as->devfn, addr, ret.translated_addr);
1da12ec4
LT
1974 return ret;
1975}
1976
5bf3d319
PX
1977static void vtd_iommu_notify_flag_changed(MemoryRegion *iommu,
1978 IOMMUNotifierFlag old,
1979 IOMMUNotifierFlag new)
3cb3b154
AW
1980{
1981 VTDAddressSpace *vtd_as = container_of(iommu, VTDAddressSpace, iommu);
1982
a3276f78
PX
1983 if (new & IOMMU_NOTIFIER_MAP) {
1984 error_report("Device at bus %s addr %02x.%d requires iommu "
1985 "notifier which is currently not supported by "
1986 "intel-iommu emulation",
1987 vtd_as->bus->qbus.name, PCI_SLOT(vtd_as->devfn),
1988 PCI_FUNC(vtd_as->devfn));
1989 exit(1);
1990 }
3cb3b154
AW
1991}
1992
1da12ec4
LT
1993static const VMStateDescription vtd_vmstate = {
1994 .name = "iommu-intel",
1995 .unmigratable = 1,
1996};
1997
1998static const MemoryRegionOps vtd_mem_ops = {
1999 .read = vtd_mem_read,
2000 .write = vtd_mem_write,
2001 .endianness = DEVICE_LITTLE_ENDIAN,
2002 .impl = {
2003 .min_access_size = 4,
2004 .max_access_size = 8,
2005 },
2006 .valid = {
2007 .min_access_size = 4,
2008 .max_access_size = 8,
2009 },
2010};
2011
2012static Property vtd_properties[] = {
2013 DEFINE_PROP_UINT32("version", IntelIOMMUState, version, 0),
2014 DEFINE_PROP_END_OF_LIST(),
2015};
2016
651e4cef
PX
2017/* Read IRTE entry with specific index */
2018static int vtd_irte_get(IntelIOMMUState *iommu, uint16_t index,
bc38ee10 2019 VTD_IR_TableEntry *entry, uint16_t sid)
651e4cef 2020{
ede9c94a
PX
2021 static const uint16_t vtd_svt_mask[VTD_SQ_MAX] = \
2022 {0xffff, 0xfffb, 0xfff9, 0xfff8};
651e4cef 2023 dma_addr_t addr = 0x00;
ede9c94a
PX
2024 uint16_t mask, source_id;
2025 uint8_t bus, bus_max, bus_min;
651e4cef
PX
2026
2027 addr = iommu->intr_root + index * sizeof(*entry);
2028 if (dma_memory_read(&address_space_memory, addr, entry,
2029 sizeof(*entry))) {
2030 VTD_DPRINTF(GENERAL, "error: fail to access IR root at 0x%"PRIx64
2031 " + %"PRIu16, iommu->intr_root, index);
2032 return -VTD_FR_IR_ROOT_INVAL;
2033 }
2034
bc38ee10 2035 if (!entry->irte.present) {
651e4cef
PX
2036 VTD_DPRINTF(GENERAL, "error: present flag not set in IRTE"
2037 " entry index %u value 0x%"PRIx64 " 0x%"PRIx64,
2038 index, le64_to_cpu(entry->data[1]),
2039 le64_to_cpu(entry->data[0]));
2040 return -VTD_FR_IR_ENTRY_P;
2041 }
2042
bc38ee10
MT
2043 if (entry->irte.__reserved_0 || entry->irte.__reserved_1 ||
2044 entry->irte.__reserved_2) {
651e4cef
PX
2045 VTD_DPRINTF(GENERAL, "error: IRTE entry index %"PRIu16
2046 " reserved fields non-zero: 0x%"PRIx64 " 0x%"PRIx64,
2047 index, le64_to_cpu(entry->data[1]),
2048 le64_to_cpu(entry->data[0]));
2049 return -VTD_FR_IR_IRTE_RSVD;
2050 }
2051
ede9c94a
PX
2052 if (sid != X86_IOMMU_SID_INVALID) {
2053 /* Validate IRTE SID */
bc38ee10
MT
2054 source_id = le32_to_cpu(entry->irte.source_id);
2055 switch (entry->irte.sid_vtype) {
ede9c94a
PX
2056 case VTD_SVT_NONE:
2057 VTD_DPRINTF(IR, "No SID validation for IRTE index %d", index);
2058 break;
2059
2060 case VTD_SVT_ALL:
bc38ee10 2061 mask = vtd_svt_mask[entry->irte.sid_q];
ede9c94a
PX
2062 if ((source_id & mask) != (sid & mask)) {
2063 VTD_DPRINTF(GENERAL, "SID validation for IRTE index "
2064 "%d failed (reqid 0x%04x sid 0x%04x)", index,
2065 sid, source_id);
2066 return -VTD_FR_IR_SID_ERR;
2067 }
2068 break;
2069
2070 case VTD_SVT_BUS:
2071 bus_max = source_id >> 8;
2072 bus_min = source_id & 0xff;
2073 bus = sid >> 8;
2074 if (bus > bus_max || bus < bus_min) {
2075 VTD_DPRINTF(GENERAL, "SID validation for IRTE index %d "
2076 "failed (bus %d outside %d-%d)", index, bus,
2077 bus_min, bus_max);
2078 return -VTD_FR_IR_SID_ERR;
2079 }
2080 break;
2081
2082 default:
2083 VTD_DPRINTF(GENERAL, "Invalid SVT bits (0x%x) in IRTE index "
bc38ee10 2084 "%d", entry->irte.sid_vtype, index);
ede9c94a
PX
2085 /* Take this as verification failure. */
2086 return -VTD_FR_IR_SID_ERR;
2087 break;
2088 }
2089 }
651e4cef
PX
2090
2091 return 0;
2092}
2093
2094/* Fetch IRQ information of specific IR index */
ede9c94a
PX
2095static int vtd_remap_irq_get(IntelIOMMUState *iommu, uint16_t index,
2096 VTDIrq *irq, uint16_t sid)
651e4cef 2097{
bc38ee10 2098 VTD_IR_TableEntry irte = {};
651e4cef
PX
2099 int ret = 0;
2100
ede9c94a 2101 ret = vtd_irte_get(iommu, index, &irte, sid);
651e4cef
PX
2102 if (ret) {
2103 return ret;
2104 }
2105
bc38ee10
MT
2106 irq->trigger_mode = irte.irte.trigger_mode;
2107 irq->vector = irte.irte.vector;
2108 irq->delivery_mode = irte.irte.delivery_mode;
2109 irq->dest = le32_to_cpu(irte.irte.dest_id);
28589311 2110 if (!iommu->intr_eime) {
651e4cef
PX
2111#define VTD_IR_APIC_DEST_MASK (0xff00ULL)
2112#define VTD_IR_APIC_DEST_SHIFT (8)
28589311
JK
2113 irq->dest = (irq->dest & VTD_IR_APIC_DEST_MASK) >>
2114 VTD_IR_APIC_DEST_SHIFT;
2115 }
bc38ee10
MT
2116 irq->dest_mode = irte.irte.dest_mode;
2117 irq->redir_hint = irte.irte.redir_hint;
651e4cef
PX
2118
2119 VTD_DPRINTF(IR, "remapping interrupt index %d: trig:%u,vec:%u,"
2120 "deliver:%u,dest:%u,dest_mode:%u", index,
2121 irq->trigger_mode, irq->vector, irq->delivery_mode,
2122 irq->dest, irq->dest_mode);
2123
2124 return 0;
2125}
2126
2127/* Generate one MSI message from VTDIrq info */
2128static void vtd_generate_msi_message(VTDIrq *irq, MSIMessage *msg_out)
2129{
2130 VTD_MSIMessage msg = {};
2131
2132 /* Generate address bits */
2133 msg.dest_mode = irq->dest_mode;
2134 msg.redir_hint = irq->redir_hint;
2135 msg.dest = irq->dest;
2136 msg.__addr_head = cpu_to_le32(0xfee);
2137 /* Keep this from original MSI address bits */
2138 msg.__not_used = irq->msi_addr_last_bits;
2139
2140 /* Generate data bits */
2141 msg.vector = irq->vector;
2142 msg.delivery_mode = irq->delivery_mode;
2143 msg.level = 1;
2144 msg.trigger_mode = irq->trigger_mode;
2145
2146 msg_out->address = msg.msi_addr;
2147 msg_out->data = msg.msi_data;
2148}
2149
2150/* Interrupt remapping for MSI/MSI-X entry */
2151static int vtd_interrupt_remap_msi(IntelIOMMUState *iommu,
2152 MSIMessage *origin,
ede9c94a
PX
2153 MSIMessage *translated,
2154 uint16_t sid)
651e4cef
PX
2155{
2156 int ret = 0;
2157 VTD_IR_MSIAddress addr;
2158 uint16_t index;
09cd058a 2159 VTDIrq irq = {};
651e4cef
PX
2160
2161 assert(origin && translated);
2162
2163 if (!iommu || !iommu->intr_enabled) {
2164 goto do_not_translate;
2165 }
2166
2167 if (origin->address & VTD_MSI_ADDR_HI_MASK) {
2168 VTD_DPRINTF(GENERAL, "error: MSI addr high 32 bits nonzero"
2169 " during interrupt remapping: 0x%"PRIx32,
2170 (uint32_t)((origin->address & VTD_MSI_ADDR_HI_MASK) >> \
2171 VTD_MSI_ADDR_HI_SHIFT));
2172 return -VTD_FR_IR_REQ_RSVD;
2173 }
2174
2175 addr.data = origin->address & VTD_MSI_ADDR_LO_MASK;
bc38ee10 2176 if (le16_to_cpu(addr.addr.__head) != 0xfee) {
651e4cef
PX
2177 VTD_DPRINTF(GENERAL, "error: MSI addr low 32 bits invalid: "
2178 "0x%"PRIx32, addr.data);
2179 return -VTD_FR_IR_REQ_RSVD;
2180 }
2181
2182 /* This is compatible mode. */
bc38ee10 2183 if (addr.addr.int_mode != VTD_IR_INT_FORMAT_REMAP) {
651e4cef
PX
2184 goto do_not_translate;
2185 }
2186
bc38ee10 2187 index = addr.addr.index_h << 15 | le16_to_cpu(addr.addr.index_l);
651e4cef
PX
2188
2189#define VTD_IR_MSI_DATA_SUBHANDLE (0x0000ffff)
2190#define VTD_IR_MSI_DATA_RESERVED (0xffff0000)
2191
bc38ee10 2192 if (addr.addr.sub_valid) {
651e4cef
PX
2193 /* See VT-d spec 5.1.2.2 and 5.1.3 on subhandle */
2194 index += origin->data & VTD_IR_MSI_DATA_SUBHANDLE;
2195 }
2196
ede9c94a 2197 ret = vtd_remap_irq_get(iommu, index, &irq, sid);
651e4cef
PX
2198 if (ret) {
2199 return ret;
2200 }
2201
bc38ee10 2202 if (addr.addr.sub_valid) {
651e4cef
PX
2203 VTD_DPRINTF(IR, "received MSI interrupt");
2204 if (origin->data & VTD_IR_MSI_DATA_RESERVED) {
2205 VTD_DPRINTF(GENERAL, "error: MSI data bits non-zero for "
2206 "interrupt remappable entry: 0x%"PRIx32,
2207 origin->data);
2208 return -VTD_FR_IR_REQ_RSVD;
2209 }
2210 } else {
2211 uint8_t vector = origin->data & 0xff;
2212 VTD_DPRINTF(IR, "received IOAPIC interrupt");
2213 /* IOAPIC entry vector should be aligned with IRTE vector
2214 * (see vt-d spec 5.1.5.1). */
2215 if (vector != irq.vector) {
2216 VTD_DPRINTF(GENERAL, "IOAPIC vector inconsistent: "
2217 "entry: %d, IRTE: %d, index: %d",
2218 vector, irq.vector, index);
2219 }
2220 }
2221
2222 /*
2223 * We'd better keep the last two bits, assuming that guest OS
2224 * might modify it. Keep it does not hurt after all.
2225 */
bc38ee10 2226 irq.msi_addr_last_bits = addr.addr.__not_care;
651e4cef
PX
2227
2228 /* Translate VTDIrq to MSI message */
2229 vtd_generate_msi_message(&irq, translated);
2230
2231 VTD_DPRINTF(IR, "mapping MSI 0x%"PRIx64":0x%"PRIx32 " -> "
2232 "0x%"PRIx64":0x%"PRIx32, origin->address, origin->data,
2233 translated->address, translated->data);
2234 return 0;
2235
2236do_not_translate:
2237 memcpy(translated, origin, sizeof(*origin));
2238 return 0;
2239}
2240
8b5ed7df
PX
2241static int vtd_int_remap(X86IOMMUState *iommu, MSIMessage *src,
2242 MSIMessage *dst, uint16_t sid)
2243{
ede9c94a
PX
2244 return vtd_interrupt_remap_msi(INTEL_IOMMU_DEVICE(iommu),
2245 src, dst, sid);
8b5ed7df
PX
2246}
2247
651e4cef
PX
2248static MemTxResult vtd_mem_ir_read(void *opaque, hwaddr addr,
2249 uint64_t *data, unsigned size,
2250 MemTxAttrs attrs)
2251{
2252 return MEMTX_OK;
2253}
2254
2255static MemTxResult vtd_mem_ir_write(void *opaque, hwaddr addr,
2256 uint64_t value, unsigned size,
2257 MemTxAttrs attrs)
2258{
2259 int ret = 0;
09cd058a 2260 MSIMessage from = {}, to = {};
ede9c94a 2261 uint16_t sid = X86_IOMMU_SID_INVALID;
651e4cef
PX
2262
2263 from.address = (uint64_t) addr + VTD_INTERRUPT_ADDR_FIRST;
2264 from.data = (uint32_t) value;
2265
ede9c94a
PX
2266 if (!attrs.unspecified) {
2267 /* We have explicit Source ID */
2268 sid = attrs.requester_id;
2269 }
2270
2271 ret = vtd_interrupt_remap_msi(opaque, &from, &to, sid);
651e4cef
PX
2272 if (ret) {
2273 /* TODO: report error */
2274 VTD_DPRINTF(GENERAL, "int remap fail for addr 0x%"PRIx64
2275 " data 0x%"PRIx32, from.address, from.data);
2276 /* Drop this interrupt */
2277 return MEMTX_ERROR;
2278 }
2279
2280 VTD_DPRINTF(IR, "delivering MSI 0x%"PRIx64":0x%"PRIx32
2281 " for device sid 0x%04x",
2282 to.address, to.data, sid);
2283
2284 if (dma_memory_write(&address_space_memory, to.address,
2285 &to.data, size)) {
2286 VTD_DPRINTF(GENERAL, "error: fail to write 0x%"PRIx64
2287 " value 0x%"PRIx32, to.address, to.data);
2288 }
2289
2290 return MEMTX_OK;
2291}
2292
2293static const MemoryRegionOps vtd_mem_ir_ops = {
2294 .read_with_attrs = vtd_mem_ir_read,
2295 .write_with_attrs = vtd_mem_ir_write,
2296 .endianness = DEVICE_LITTLE_ENDIAN,
2297 .impl = {
2298 .min_access_size = 4,
2299 .max_access_size = 4,
2300 },
2301 .valid = {
2302 .min_access_size = 4,
2303 .max_access_size = 4,
2304 },
2305};
7df953bd
KO
2306
2307VTDAddressSpace *vtd_find_add_as(IntelIOMMUState *s, PCIBus *bus, int devfn)
2308{
2309 uintptr_t key = (uintptr_t)bus;
2310 VTDBus *vtd_bus = g_hash_table_lookup(s->vtd_as_by_busptr, &key);
2311 VTDAddressSpace *vtd_dev_as;
2312
2313 if (!vtd_bus) {
2314 /* No corresponding free() */
04af0e18
PX
2315 vtd_bus = g_malloc0(sizeof(VTDBus) + sizeof(VTDAddressSpace *) * \
2316 X86_IOMMU_PCI_DEVFN_MAX);
7df953bd
KO
2317 vtd_bus->bus = bus;
2318 key = (uintptr_t)bus;
2319 g_hash_table_insert(s->vtd_as_by_busptr, &key, vtd_bus);
2320 }
2321
2322 vtd_dev_as = vtd_bus->dev_as[devfn];
2323
2324 if (!vtd_dev_as) {
2325 vtd_bus->dev_as[devfn] = vtd_dev_as = g_malloc0(sizeof(VTDAddressSpace));
2326
2327 vtd_dev_as->bus = bus;
2328 vtd_dev_as->devfn = (uint8_t)devfn;
2329 vtd_dev_as->iommu_state = s;
2330 vtd_dev_as->context_cache_entry.context_cache_gen = 0;
2331 memory_region_init_iommu(&vtd_dev_as->iommu, OBJECT(s),
2332 &s->iommu_ops, "intel_iommu", UINT64_MAX);
651e4cef
PX
2333 memory_region_init_io(&vtd_dev_as->iommu_ir, OBJECT(s),
2334 &vtd_mem_ir_ops, s, "intel_iommu_ir",
2335 VTD_INTERRUPT_ADDR_SIZE);
2336 memory_region_add_subregion(&vtd_dev_as->iommu, VTD_INTERRUPT_ADDR_FIRST,
2337 &vtd_dev_as->iommu_ir);
7df953bd
KO
2338 address_space_init(&vtd_dev_as->as,
2339 &vtd_dev_as->iommu, "intel_iommu");
2340 }
2341 return vtd_dev_as;
2342}
2343
1da12ec4
LT
2344/* Do the initialization. It will also be called when reset, so pay
2345 * attention when adding new initialization stuff.
2346 */
2347static void vtd_init(IntelIOMMUState *s)
2348{
d54bd7f8
PX
2349 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(s);
2350
1da12ec4
LT
2351 memset(s->csr, 0, DMAR_REG_SIZE);
2352 memset(s->wmask, 0, DMAR_REG_SIZE);
2353 memset(s->w1cmask, 0, DMAR_REG_SIZE);
2354 memset(s->womask, 0, DMAR_REG_SIZE);
2355
2356 s->iommu_ops.translate = vtd_iommu_translate;
5bf3d319 2357 s->iommu_ops.notify_flag_changed = vtd_iommu_notify_flag_changed;
1da12ec4
LT
2358 s->root = 0;
2359 s->root_extended = false;
2360 s->dmar_enabled = false;
2361 s->iq_head = 0;
2362 s->iq_tail = 0;
2363 s->iq = 0;
2364 s->iq_size = 0;
2365 s->qi_enabled = false;
2366 s->iq_last_desc_type = VTD_INV_DESC_NONE;
2367 s->next_frcd_reg = 0;
2368 s->cap = VTD_CAP_FRO | VTD_CAP_NFR | VTD_CAP_ND | VTD_CAP_MGAW |
d66b969b 2369 VTD_CAP_SAGAW | VTD_CAP_MAMV | VTD_CAP_PSI | VTD_CAP_SLLPS;
ed7b8fbc 2370 s->ecap = VTD_ECAP_QI | VTD_ECAP_IRO;
1da12ec4 2371
d54bd7f8 2372 if (x86_iommu->intr_supported) {
a3f409cb 2373 s->ecap |= VTD_ECAP_IR | VTD_ECAP_EIM | VTD_ECAP_MHMV;
d54bd7f8
PX
2374 }
2375
d92fa2dc 2376 vtd_reset_context_cache(s);
b5a280c0 2377 vtd_reset_iotlb(s);
d92fa2dc 2378
1da12ec4
LT
2379 /* Define registers with default values and bit semantics */
2380 vtd_define_long(s, DMAR_VER_REG, 0x10UL, 0, 0);
2381 vtd_define_quad(s, DMAR_CAP_REG, s->cap, 0, 0);
2382 vtd_define_quad(s, DMAR_ECAP_REG, s->ecap, 0, 0);
2383 vtd_define_long(s, DMAR_GCMD_REG, 0, 0xff800000UL, 0);
2384 vtd_define_long_wo(s, DMAR_GCMD_REG, 0xff800000UL);
2385 vtd_define_long(s, DMAR_GSTS_REG, 0, 0, 0);
2386 vtd_define_quad(s, DMAR_RTADDR_REG, 0, 0xfffffffffffff000ULL, 0);
2387 vtd_define_quad(s, DMAR_CCMD_REG, 0, 0xe0000003ffffffffULL, 0);
2388 vtd_define_quad_wo(s, DMAR_CCMD_REG, 0x3ffff0000ULL);
2389
2390 /* Advanced Fault Logging not supported */
2391 vtd_define_long(s, DMAR_FSTS_REG, 0, 0, 0x11UL);
2392 vtd_define_long(s, DMAR_FECTL_REG, 0x80000000UL, 0x80000000UL, 0);
2393 vtd_define_long(s, DMAR_FEDATA_REG, 0, 0x0000ffffUL, 0);
2394 vtd_define_long(s, DMAR_FEADDR_REG, 0, 0xfffffffcUL, 0);
2395
2396 /* Treated as RsvdZ when EIM in ECAP_REG is not supported
2397 * vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0xffffffffUL, 0);
2398 */
2399 vtd_define_long(s, DMAR_FEUADDR_REG, 0, 0, 0);
2400
2401 /* Treated as RO for implementations that PLMR and PHMR fields reported
2402 * as Clear in the CAP_REG.
2403 * vtd_define_long(s, DMAR_PMEN_REG, 0, 0x80000000UL, 0);
2404 */
2405 vtd_define_long(s, DMAR_PMEN_REG, 0, 0, 0);
2406
ed7b8fbc
LT
2407 vtd_define_quad(s, DMAR_IQH_REG, 0, 0, 0);
2408 vtd_define_quad(s, DMAR_IQT_REG, 0, 0x7fff0ULL, 0);
2409 vtd_define_quad(s, DMAR_IQA_REG, 0, 0xfffffffffffff007ULL, 0);
2410 vtd_define_long(s, DMAR_ICS_REG, 0, 0, 0x1UL);
2411 vtd_define_long(s, DMAR_IECTL_REG, 0x80000000UL, 0x80000000UL, 0);
2412 vtd_define_long(s, DMAR_IEDATA_REG, 0, 0xffffffffUL, 0);
2413 vtd_define_long(s, DMAR_IEADDR_REG, 0, 0xfffffffcUL, 0);
2414 /* Treadted as RsvdZ when EIM in ECAP_REG is not supported */
2415 vtd_define_long(s, DMAR_IEUADDR_REG, 0, 0, 0);
2416
1da12ec4
LT
2417 /* IOTLB registers */
2418 vtd_define_quad(s, DMAR_IOTLB_REG, 0, 0Xb003ffff00000000ULL, 0);
2419 vtd_define_quad(s, DMAR_IVA_REG, 0, 0xfffffffffffff07fULL, 0);
2420 vtd_define_quad_wo(s, DMAR_IVA_REG, 0xfffffffffffff07fULL);
2421
2422 /* Fault Recording Registers, 128-bit */
2423 vtd_define_quad(s, DMAR_FRCD_REG_0_0, 0, 0, 0);
2424 vtd_define_quad(s, DMAR_FRCD_REG_0_2, 0, 0, 0x8000000000000000ULL);
a5861439
PX
2425
2426 /*
28589311 2427 * Interrupt remapping registers.
a5861439 2428 */
28589311 2429 vtd_define_quad(s, DMAR_IRTA_REG, 0, 0xfffffffffffff80fULL, 0);
1da12ec4
LT
2430}
2431
2432/* Should not reset address_spaces when reset because devices will still use
2433 * the address space they got at first (won't ask the bus again).
2434 */
2435static void vtd_reset(DeviceState *dev)
2436{
2437 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
2438
2439 VTD_DPRINTF(GENERAL, "");
2440 vtd_init(s);
2441}
2442
621d983a
MA
2443static AddressSpace *vtd_host_dma_iommu(PCIBus *bus, void *opaque, int devfn)
2444{
2445 IntelIOMMUState *s = opaque;
2446 VTDAddressSpace *vtd_as;
2447
04af0e18 2448 assert(0 <= devfn && devfn <= X86_IOMMU_PCI_DEVFN_MAX);
621d983a
MA
2449
2450 vtd_as = vtd_find_add_as(s, bus, devfn);
2451 return &vtd_as->as;
2452}
2453
1da12ec4
LT
2454static void vtd_realize(DeviceState *dev, Error **errp)
2455{
cb135f59
PX
2456 PCMachineState *pcms = PC_MACHINE(qdev_get_machine());
2457 PCIBus *bus = pcms->bus;
1da12ec4 2458 IntelIOMMUState *s = INTEL_IOMMU_DEVICE(dev);
4684a204 2459 X86IOMMUState *x86_iommu = X86_IOMMU_DEVICE(dev);
1da12ec4
LT
2460
2461 VTD_DPRINTF(GENERAL, "");
fb9f5926 2462 x86_iommu->type = TYPE_INTEL;
7df953bd 2463 memset(s->vtd_as_by_bus_num, 0, sizeof(s->vtd_as_by_bus_num));
1da12ec4
LT
2464 memory_region_init_io(&s->csrmem, OBJECT(s), &vtd_mem_ops, s,
2465 "intel_iommu", DMAR_REG_SIZE);
2466 sysbus_init_mmio(SYS_BUS_DEVICE(s), &s->csrmem);
b5a280c0
LT
2467 /* No corresponding destroy */
2468 s->iotlb = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
2469 g_free, g_free);
7df953bd
KO
2470 s->vtd_as_by_busptr = g_hash_table_new_full(vtd_uint64_hash, vtd_uint64_equal,
2471 g_free, g_free);
1da12ec4 2472 vtd_init(s);
621d983a
MA
2473 sysbus_mmio_map(SYS_BUS_DEVICE(s), 0, Q35_HOST_BRIDGE_IOMMU_ADDR);
2474 pci_setup_iommu(bus, vtd_host_dma_iommu, dev);
cb135f59
PX
2475 /* Pseudo address space under root PCI bus. */
2476 pcms->ioapic_as = vtd_host_dma_iommu(bus, s, Q35_PSEUDO_DEVFN_IOAPIC);
4684a204
PX
2477
2478 /* Currently Intel IOMMU IR only support "kernel-irqchip={off|split}" */
2479 if (x86_iommu->intr_supported && kvm_irqchip_in_kernel() &&
2480 !kvm_irqchip_is_split()) {
2481 error_report("Intel Interrupt Remapping cannot work with "
2482 "kernel-irqchip=on, please use 'split|off'.");
2483 exit(1);
2484 }
1da12ec4
LT
2485}
2486
2487static void vtd_class_init(ObjectClass *klass, void *data)
2488{
2489 DeviceClass *dc = DEVICE_CLASS(klass);
1c7955c4 2490 X86IOMMUClass *x86_class = X86_IOMMU_CLASS(klass);
1da12ec4
LT
2491
2492 dc->reset = vtd_reset;
1da12ec4
LT
2493 dc->vmsd = &vtd_vmstate;
2494 dc->props = vtd_properties;
621d983a 2495 dc->hotpluggable = false;
1c7955c4 2496 x86_class->realize = vtd_realize;
8b5ed7df 2497 x86_class->int_remap = vtd_int_remap;
1da12ec4
LT
2498}
2499
2500static const TypeInfo vtd_info = {
2501 .name = TYPE_INTEL_IOMMU_DEVICE,
1c7955c4 2502 .parent = TYPE_X86_IOMMU_DEVICE,
1da12ec4
LT
2503 .instance_size = sizeof(IntelIOMMUState),
2504 .class_init = vtd_class_init,
2505};
2506
2507static void vtd_register_types(void)
2508{
2509 VTD_DPRINTF(GENERAL, "");
2510 type_register_static(&vtd_info);
2511}
2512
2513type_init(vtd_register_types)