4 * Copyright (c) 2020 Red Hat, Inc.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/target_page.h"
24 #include "hw/qdev-properties.h"
25 #include "hw/virtio/virtio.h"
26 #include "sysemu/kvm.h"
27 #include "sysemu/reset.h"
28 #include "sysemu/sysemu.h"
29 #include "qapi/error.h"
30 #include "qemu/error-report.h"
33 #include "standard-headers/linux/virtio_ids.h"
35 #include "hw/virtio/virtio-bus.h"
36 #include "hw/virtio/virtio-iommu.h"
37 #include "hw/pci/pci_bus.h"
38 #include "hw/pci/pci.h"
41 #define VIOMMU_DEFAULT_QUEUE_SIZE 256
42 #define VIOMMU_PROBE_SIZE 512
44 typedef struct VirtIOIOMMUDomain
{
48 QLIST_HEAD(, VirtIOIOMMUEndpoint
) endpoint_list
;
51 typedef struct VirtIOIOMMUEndpoint
{
53 VirtIOIOMMUDomain
*domain
;
54 IOMMUMemoryRegion
*iommu_mr
;
55 QLIST_ENTRY(VirtIOIOMMUEndpoint
) next
;
56 } VirtIOIOMMUEndpoint
;
58 typedef struct VirtIOIOMMUInterval
{
61 } VirtIOIOMMUInterval
;
63 typedef struct VirtIOIOMMUMapping
{
68 static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice
*dev
)
70 return PCI_BUILD_BDF(pci_bus_num(dev
->bus
), dev
->devfn
);
73 static bool virtio_iommu_device_bypassed(IOMMUDevice
*sdev
)
77 VirtIOIOMMU
*s
= sdev
->viommu
;
78 VirtIOIOMMUEndpoint
*ep
;
80 sid
= virtio_iommu_get_bdf(sdev
);
82 qemu_rec_mutex_lock(&s
->mutex
);
83 /* need to check bypass before system reset */
85 bypassed
= s
->config
.bypass
;
89 ep
= g_tree_lookup(s
->endpoints
, GUINT_TO_POINTER(sid
));
90 if (!ep
|| !ep
->domain
) {
91 bypassed
= s
->config
.bypass
;
93 bypassed
= ep
->domain
->bypass
;
97 qemu_rec_mutex_unlock(&s
->mutex
);
101 /* Return whether the device is using IOMMU translation. */
102 static bool virtio_iommu_switch_address_space(IOMMUDevice
*sdev
)
108 use_remapping
= !virtio_iommu_device_bypassed(sdev
);
110 trace_virtio_iommu_switch_address_space(pci_bus_num(sdev
->bus
),
111 PCI_SLOT(sdev
->devfn
),
112 PCI_FUNC(sdev
->devfn
),
115 /* Turn off first then on the other */
117 memory_region_set_enabled(&sdev
->bypass_mr
, false);
118 memory_region_set_enabled(MEMORY_REGION(&sdev
->iommu_mr
), true);
120 memory_region_set_enabled(MEMORY_REGION(&sdev
->iommu_mr
), false);
121 memory_region_set_enabled(&sdev
->bypass_mr
, true);
124 return use_remapping
;
127 static void virtio_iommu_switch_address_space_all(VirtIOIOMMU
*s
)
130 IOMMUPciBus
*iommu_pci_bus
;
133 g_hash_table_iter_init(&iter
, s
->as_by_busptr
);
134 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&iommu_pci_bus
)) {
135 for (i
= 0; i
< PCI_DEVFN_MAX
; i
++) {
136 if (!iommu_pci_bus
->pbdev
[i
]) {
139 virtio_iommu_switch_address_space(iommu_pci_bus
->pbdev
[i
]);
145 * The bus number is used for lookup when SID based operations occur.
146 * In that case we lazily populate the IOMMUPciBus array from the bus hash
147 * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
148 * numbers may not be always initialized yet.
150 static IOMMUPciBus
*iommu_find_iommu_pcibus(VirtIOIOMMU
*s
, uint8_t bus_num
)
152 IOMMUPciBus
*iommu_pci_bus
= s
->iommu_pcibus_by_bus_num
[bus_num
];
154 if (!iommu_pci_bus
) {
157 g_hash_table_iter_init(&iter
, s
->as_by_busptr
);
158 while (g_hash_table_iter_next(&iter
, NULL
, (void **)&iommu_pci_bus
)) {
159 if (pci_bus_num(iommu_pci_bus
->bus
) == bus_num
) {
160 s
->iommu_pcibus_by_bus_num
[bus_num
] = iommu_pci_bus
;
161 return iommu_pci_bus
;
166 return iommu_pci_bus
;
169 static IOMMUMemoryRegion
*virtio_iommu_mr(VirtIOIOMMU
*s
, uint32_t sid
)
171 uint8_t bus_n
, devfn
;
172 IOMMUPciBus
*iommu_pci_bus
;
175 bus_n
= PCI_BUS_NUM(sid
);
176 iommu_pci_bus
= iommu_find_iommu_pcibus(s
, bus_n
);
178 devfn
= sid
& (PCI_DEVFN_MAX
- 1);
179 dev
= iommu_pci_bus
->pbdev
[devfn
];
181 return &dev
->iommu_mr
;
187 static gint
interval_cmp(gconstpointer a
, gconstpointer b
, gpointer user_data
)
189 VirtIOIOMMUInterval
*inta
= (VirtIOIOMMUInterval
*)a
;
190 VirtIOIOMMUInterval
*intb
= (VirtIOIOMMUInterval
*)b
;
192 if (inta
->high
< intb
->low
) {
194 } else if (intb
->high
< inta
->low
) {
201 static void virtio_iommu_notify_map_unmap(IOMMUMemoryRegion
*mr
,
202 IOMMUTLBEvent
*event
,
203 hwaddr virt_start
, hwaddr virt_end
)
205 uint64_t delta
= virt_end
- virt_start
;
207 event
->entry
.iova
= virt_start
;
208 event
->entry
.addr_mask
= delta
;
210 if (delta
== UINT64_MAX
) {
211 memory_region_notify_iommu(mr
, 0, *event
);
214 while (virt_start
!= virt_end
+ 1) {
215 uint64_t mask
= dma_aligned_pow2_mask(virt_start
, virt_end
, 64);
217 event
->entry
.addr_mask
= mask
;
218 event
->entry
.iova
= virt_start
;
219 memory_region_notify_iommu(mr
, 0, *event
);
220 virt_start
+= mask
+ 1;
221 if (event
->entry
.perm
!= IOMMU_NONE
) {
222 event
->entry
.translated_addr
+= mask
+ 1;
227 static void virtio_iommu_notify_map(IOMMUMemoryRegion
*mr
, hwaddr virt_start
,
228 hwaddr virt_end
, hwaddr paddr
,
232 IOMMUAccessFlags perm
= IOMMU_ACCESS_FLAG(flags
& VIRTIO_IOMMU_MAP_F_READ
,
233 flags
& VIRTIO_IOMMU_MAP_F_WRITE
);
235 if (!(mr
->iommu_notify_flags
& IOMMU_NOTIFIER_MAP
) ||
236 (flags
& VIRTIO_IOMMU_MAP_F_MMIO
) || !perm
) {
240 trace_virtio_iommu_notify_map(mr
->parent_obj
.name
, virt_start
, virt_end
,
243 event
.type
= IOMMU_NOTIFIER_MAP
;
244 event
.entry
.target_as
= &address_space_memory
;
245 event
.entry
.perm
= perm
;
246 event
.entry
.translated_addr
= paddr
;
248 virtio_iommu_notify_map_unmap(mr
, &event
, virt_start
, virt_end
);
251 static void virtio_iommu_notify_unmap(IOMMUMemoryRegion
*mr
, hwaddr virt_start
,
256 if (!(mr
->iommu_notify_flags
& IOMMU_NOTIFIER_UNMAP
)) {
260 trace_virtio_iommu_notify_unmap(mr
->parent_obj
.name
, virt_start
, virt_end
);
262 event
.type
= IOMMU_NOTIFIER_UNMAP
;
263 event
.entry
.target_as
= &address_space_memory
;
264 event
.entry
.perm
= IOMMU_NONE
;
265 event
.entry
.translated_addr
= 0;
267 virtio_iommu_notify_map_unmap(mr
, &event
, virt_start
, virt_end
);
270 static gboolean
virtio_iommu_notify_unmap_cb(gpointer key
, gpointer value
,
273 VirtIOIOMMUInterval
*interval
= (VirtIOIOMMUInterval
*) key
;
274 IOMMUMemoryRegion
*mr
= (IOMMUMemoryRegion
*) data
;
276 virtio_iommu_notify_unmap(mr
, interval
->low
, interval
->high
);
281 static gboolean
virtio_iommu_notify_map_cb(gpointer key
, gpointer value
,
284 VirtIOIOMMUMapping
*mapping
= (VirtIOIOMMUMapping
*) value
;
285 VirtIOIOMMUInterval
*interval
= (VirtIOIOMMUInterval
*) key
;
286 IOMMUMemoryRegion
*mr
= (IOMMUMemoryRegion
*) data
;
288 virtio_iommu_notify_map(mr
, interval
->low
, interval
->high
,
289 mapping
->phys_addr
, mapping
->flags
);
294 static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint
*ep
)
296 VirtIOIOMMUDomain
*domain
= ep
->domain
;
297 IOMMUDevice
*sdev
= container_of(ep
->iommu_mr
, IOMMUDevice
, iommu_mr
);
302 g_tree_foreach(domain
->mappings
, virtio_iommu_notify_unmap_cb
,
304 QLIST_REMOVE(ep
, next
);
306 virtio_iommu_switch_address_space(sdev
);
309 static VirtIOIOMMUEndpoint
*virtio_iommu_get_endpoint(VirtIOIOMMU
*s
,
312 VirtIOIOMMUEndpoint
*ep
;
313 IOMMUMemoryRegion
*mr
;
315 ep
= g_tree_lookup(s
->endpoints
, GUINT_TO_POINTER(ep_id
));
319 mr
= virtio_iommu_mr(s
, ep_id
);
323 ep
= g_malloc0(sizeof(*ep
));
326 trace_virtio_iommu_get_endpoint(ep_id
);
327 g_tree_insert(s
->endpoints
, GUINT_TO_POINTER(ep_id
), ep
);
331 static void virtio_iommu_put_endpoint(gpointer data
)
333 VirtIOIOMMUEndpoint
*ep
= (VirtIOIOMMUEndpoint
*)data
;
336 virtio_iommu_detach_endpoint_from_domain(ep
);
339 trace_virtio_iommu_put_endpoint(ep
->id
);
343 static VirtIOIOMMUDomain
*virtio_iommu_get_domain(VirtIOIOMMU
*s
,
347 VirtIOIOMMUDomain
*domain
;
349 domain
= g_tree_lookup(s
->domains
, GUINT_TO_POINTER(domain_id
));
351 if (domain
->bypass
!= bypass
) {
356 domain
= g_malloc0(sizeof(*domain
));
357 domain
->id
= domain_id
;
358 domain
->mappings
= g_tree_new_full((GCompareDataFunc
)interval_cmp
,
359 NULL
, (GDestroyNotify
)g_free
,
360 (GDestroyNotify
)g_free
);
361 domain
->bypass
= bypass
;
362 g_tree_insert(s
->domains
, GUINT_TO_POINTER(domain_id
), domain
);
363 QLIST_INIT(&domain
->endpoint_list
);
364 trace_virtio_iommu_get_domain(domain_id
);
368 static void virtio_iommu_put_domain(gpointer data
)
370 VirtIOIOMMUDomain
*domain
= (VirtIOIOMMUDomain
*)data
;
371 VirtIOIOMMUEndpoint
*iter
, *tmp
;
373 QLIST_FOREACH_SAFE(iter
, &domain
->endpoint_list
, next
, tmp
) {
374 virtio_iommu_detach_endpoint_from_domain(iter
);
376 g_tree_destroy(domain
->mappings
);
377 trace_virtio_iommu_put_domain(domain
->id
);
381 static AddressSpace
*virtio_iommu_find_add_as(PCIBus
*bus
, void *opaque
,
384 VirtIOIOMMU
*s
= opaque
;
385 IOMMUPciBus
*sbus
= g_hash_table_lookup(s
->as_by_busptr
, bus
);
386 static uint32_t mr_index
;
390 sbus
= g_malloc0(sizeof(IOMMUPciBus
) +
391 sizeof(IOMMUDevice
*) * PCI_DEVFN_MAX
);
393 g_hash_table_insert(s
->as_by_busptr
, bus
, sbus
);
396 sdev
= sbus
->pbdev
[devfn
];
398 char *name
= g_strdup_printf("%s-%d-%d",
399 TYPE_VIRTIO_IOMMU_MEMORY_REGION
,
401 sdev
= sbus
->pbdev
[devfn
] = g_new0(IOMMUDevice
, 1);
407 trace_virtio_iommu_init_iommu_mr(name
);
409 memory_region_init(&sdev
->root
, OBJECT(s
), name
, UINT64_MAX
);
410 address_space_init(&sdev
->as
, &sdev
->root
, TYPE_VIRTIO_IOMMU
);
413 * Build the IOMMU disabled container with aliases to the
414 * shared MRs. Note that aliasing to a shared memory region
415 * could help the memory API to detect same FlatViews so we
416 * can have devices to share the same FlatView when in bypass
417 * mode. (either by not configuring virtio-iommu driver or with
418 * "iommu=pt"). It will greatly reduce the total number of
419 * FlatViews of the system hence VM runs faster.
421 memory_region_init_alias(&sdev
->bypass_mr
, OBJECT(s
),
422 "system", get_system_memory(), 0,
423 memory_region_size(get_system_memory()));
425 memory_region_init_iommu(&sdev
->iommu_mr
, sizeof(sdev
->iommu_mr
),
426 TYPE_VIRTIO_IOMMU_MEMORY_REGION
,
431 * Hook both the containers under the root container, we
432 * switch between iommu & bypass MRs by enable/disable
433 * corresponding sub-containers
435 memory_region_add_subregion_overlap(&sdev
->root
, 0,
436 MEMORY_REGION(&sdev
->iommu_mr
),
438 memory_region_add_subregion_overlap(&sdev
->root
, 0,
439 &sdev
->bypass_mr
, 0);
441 virtio_iommu_switch_address_space(sdev
);
447 static int virtio_iommu_attach(VirtIOIOMMU
*s
,
448 struct virtio_iommu_req_attach
*req
)
450 uint32_t domain_id
= le32_to_cpu(req
->domain
);
451 uint32_t ep_id
= le32_to_cpu(req
->endpoint
);
452 uint32_t flags
= le32_to_cpu(req
->flags
);
453 VirtIOIOMMUDomain
*domain
;
454 VirtIOIOMMUEndpoint
*ep
;
457 trace_virtio_iommu_attach(domain_id
, ep_id
);
459 if (flags
& ~VIRTIO_IOMMU_ATTACH_F_BYPASS
) {
460 return VIRTIO_IOMMU_S_INVAL
;
463 ep
= virtio_iommu_get_endpoint(s
, ep_id
);
465 return VIRTIO_IOMMU_S_NOENT
;
469 VirtIOIOMMUDomain
*previous_domain
= ep
->domain
;
471 * the device is already attached to a domain,
474 virtio_iommu_detach_endpoint_from_domain(ep
);
475 if (QLIST_EMPTY(&previous_domain
->endpoint_list
)) {
476 g_tree_remove(s
->domains
, GUINT_TO_POINTER(previous_domain
->id
));
480 domain
= virtio_iommu_get_domain(s
, domain_id
,
481 flags
& VIRTIO_IOMMU_ATTACH_F_BYPASS
);
483 /* Incompatible bypass flag */
484 return VIRTIO_IOMMU_S_INVAL
;
486 QLIST_INSERT_HEAD(&domain
->endpoint_list
, ep
, next
);
489 sdev
= container_of(ep
->iommu_mr
, IOMMUDevice
, iommu_mr
);
490 virtio_iommu_switch_address_space(sdev
);
492 /* Replay domain mappings on the associated memory region */
493 g_tree_foreach(domain
->mappings
, virtio_iommu_notify_map_cb
,
496 return VIRTIO_IOMMU_S_OK
;
499 static int virtio_iommu_detach(VirtIOIOMMU
*s
,
500 struct virtio_iommu_req_detach
*req
)
502 uint32_t domain_id
= le32_to_cpu(req
->domain
);
503 uint32_t ep_id
= le32_to_cpu(req
->endpoint
);
504 VirtIOIOMMUDomain
*domain
;
505 VirtIOIOMMUEndpoint
*ep
;
507 trace_virtio_iommu_detach(domain_id
, ep_id
);
509 ep
= g_tree_lookup(s
->endpoints
, GUINT_TO_POINTER(ep_id
));
511 return VIRTIO_IOMMU_S_NOENT
;
516 if (!domain
|| domain
->id
!= domain_id
) {
517 return VIRTIO_IOMMU_S_INVAL
;
520 virtio_iommu_detach_endpoint_from_domain(ep
);
522 if (QLIST_EMPTY(&domain
->endpoint_list
)) {
523 g_tree_remove(s
->domains
, GUINT_TO_POINTER(domain
->id
));
525 return VIRTIO_IOMMU_S_OK
;
528 static int virtio_iommu_map(VirtIOIOMMU
*s
,
529 struct virtio_iommu_req_map
*req
)
531 uint32_t domain_id
= le32_to_cpu(req
->domain
);
532 uint64_t phys_start
= le64_to_cpu(req
->phys_start
);
533 uint64_t virt_start
= le64_to_cpu(req
->virt_start
);
534 uint64_t virt_end
= le64_to_cpu(req
->virt_end
);
535 uint32_t flags
= le32_to_cpu(req
->flags
);
536 VirtIOIOMMUDomain
*domain
;
537 VirtIOIOMMUInterval
*interval
;
538 VirtIOIOMMUMapping
*mapping
;
539 VirtIOIOMMUEndpoint
*ep
;
541 if (flags
& ~VIRTIO_IOMMU_MAP_F_MASK
) {
542 return VIRTIO_IOMMU_S_INVAL
;
545 domain
= g_tree_lookup(s
->domains
, GUINT_TO_POINTER(domain_id
));
547 return VIRTIO_IOMMU_S_NOENT
;
550 if (domain
->bypass
) {
551 return VIRTIO_IOMMU_S_INVAL
;
554 interval
= g_malloc0(sizeof(*interval
));
556 interval
->low
= virt_start
;
557 interval
->high
= virt_end
;
559 mapping
= g_tree_lookup(domain
->mappings
, (gpointer
)interval
);
562 return VIRTIO_IOMMU_S_INVAL
;
565 trace_virtio_iommu_map(domain_id
, virt_start
, virt_end
, phys_start
, flags
);
567 mapping
= g_malloc0(sizeof(*mapping
));
568 mapping
->phys_addr
= phys_start
;
569 mapping
->flags
= flags
;
571 g_tree_insert(domain
->mappings
, interval
, mapping
);
573 QLIST_FOREACH(ep
, &domain
->endpoint_list
, next
) {
574 virtio_iommu_notify_map(ep
->iommu_mr
, virt_start
, virt_end
, phys_start
,
578 return VIRTIO_IOMMU_S_OK
;
581 static int virtio_iommu_unmap(VirtIOIOMMU
*s
,
582 struct virtio_iommu_req_unmap
*req
)
584 uint32_t domain_id
= le32_to_cpu(req
->domain
);
585 uint64_t virt_start
= le64_to_cpu(req
->virt_start
);
586 uint64_t virt_end
= le64_to_cpu(req
->virt_end
);
587 VirtIOIOMMUMapping
*iter_val
;
588 VirtIOIOMMUInterval interval
, *iter_key
;
589 VirtIOIOMMUDomain
*domain
;
590 VirtIOIOMMUEndpoint
*ep
;
591 int ret
= VIRTIO_IOMMU_S_OK
;
593 trace_virtio_iommu_unmap(domain_id
, virt_start
, virt_end
);
595 domain
= g_tree_lookup(s
->domains
, GUINT_TO_POINTER(domain_id
));
597 return VIRTIO_IOMMU_S_NOENT
;
600 if (domain
->bypass
) {
601 return VIRTIO_IOMMU_S_INVAL
;
604 interval
.low
= virt_start
;
605 interval
.high
= virt_end
;
607 while (g_tree_lookup_extended(domain
->mappings
, &interval
,
608 (void **)&iter_key
, (void**)&iter_val
)) {
609 uint64_t current_low
= iter_key
->low
;
610 uint64_t current_high
= iter_key
->high
;
612 if (interval
.low
<= current_low
&& interval
.high
>= current_high
) {
613 QLIST_FOREACH(ep
, &domain
->endpoint_list
, next
) {
614 virtio_iommu_notify_unmap(ep
->iommu_mr
, current_low
,
617 g_tree_remove(domain
->mappings
, iter_key
);
618 trace_virtio_iommu_unmap_done(domain_id
, current_low
, current_high
);
620 ret
= VIRTIO_IOMMU_S_RANGE
;
627 static ssize_t
virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU
*s
, uint32_t ep
,
628 uint8_t *buf
, size_t free
)
630 struct virtio_iommu_probe_resv_mem prop
= {};
631 size_t size
= sizeof(prop
), length
= size
- sizeof(prop
.head
), total
;
634 total
= size
* s
->nb_reserved_regions
;
640 for (i
= 0; i
< s
->nb_reserved_regions
; i
++) {
641 unsigned subtype
= s
->reserved_regions
[i
].type
;
643 assert(subtype
== VIRTIO_IOMMU_RESV_MEM_T_RESERVED
||
644 subtype
== VIRTIO_IOMMU_RESV_MEM_T_MSI
);
645 prop
.head
.type
= cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM
);
646 prop
.head
.length
= cpu_to_le16(length
);
647 prop
.subtype
= subtype
;
648 prop
.start
= cpu_to_le64(s
->reserved_regions
[i
].low
);
649 prop
.end
= cpu_to_le64(s
->reserved_regions
[i
].high
);
651 memcpy(buf
, &prop
, size
);
653 trace_virtio_iommu_fill_resv_property(ep
, prop
.subtype
,
654 prop
.start
, prop
.end
);
661 * virtio_iommu_probe - Fill the probe request buffer with
662 * the properties the device is able to return
664 static int virtio_iommu_probe(VirtIOIOMMU
*s
,
665 struct virtio_iommu_req_probe
*req
,
668 uint32_t ep_id
= le32_to_cpu(req
->endpoint
);
669 size_t free
= VIOMMU_PROBE_SIZE
;
672 if (!virtio_iommu_mr(s
, ep_id
)) {
673 return VIRTIO_IOMMU_S_NOENT
;
676 count
= virtio_iommu_fill_resv_mem_prop(s
, ep_id
, buf
, free
);
678 return VIRTIO_IOMMU_S_INVAL
;
683 return VIRTIO_IOMMU_S_OK
;
686 static int virtio_iommu_iov_to_req(struct iovec
*iov
,
687 unsigned int iov_cnt
,
688 void *req
, size_t payload_sz
)
690 size_t sz
= iov_to_buf(iov
, iov_cnt
, 0, req
, payload_sz
);
692 if (unlikely(sz
!= payload_sz
)) {
693 return VIRTIO_IOMMU_S_INVAL
;
698 #define virtio_iommu_handle_req(__req) \
699 static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \
701 unsigned int iov_cnt) \
703 struct virtio_iommu_req_ ## __req req; \
704 int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, \
705 sizeof(req) - sizeof(struct virtio_iommu_req_tail));\
707 return ret ? ret : virtio_iommu_ ## __req(s, &req); \
710 virtio_iommu_handle_req(attach
)
711 virtio_iommu_handle_req(detach
)
712 virtio_iommu_handle_req(map
)
713 virtio_iommu_handle_req(unmap
)
715 static int virtio_iommu_handle_probe(VirtIOIOMMU
*s
,
717 unsigned int iov_cnt
,
720 struct virtio_iommu_req_probe req
;
721 int ret
= virtio_iommu_iov_to_req(iov
, iov_cnt
, &req
, sizeof(req
));
723 return ret
? ret
: virtio_iommu_probe(s
, &req
, buf
);
726 static void virtio_iommu_handle_command(VirtIODevice
*vdev
, VirtQueue
*vq
)
728 VirtIOIOMMU
*s
= VIRTIO_IOMMU(vdev
);
729 struct virtio_iommu_req_head head
;
730 struct virtio_iommu_req_tail tail
= {};
731 size_t output_size
= sizeof(tail
), sz
;
732 VirtQueueElement
*elem
;
733 unsigned int iov_cnt
;
738 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
743 if (iov_size(elem
->in_sg
, elem
->in_num
) < sizeof(tail
) ||
744 iov_size(elem
->out_sg
, elem
->out_num
) < sizeof(head
)) {
745 virtio_error(vdev
, "virtio-iommu bad head/tail size");
746 virtqueue_detach_element(vq
, elem
, 0);
751 iov_cnt
= elem
->out_num
;
753 sz
= iov_to_buf(iov
, iov_cnt
, 0, &head
, sizeof(head
));
754 if (unlikely(sz
!= sizeof(head
))) {
755 tail
.status
= VIRTIO_IOMMU_S_DEVERR
;
758 qemu_rec_mutex_lock(&s
->mutex
);
760 case VIRTIO_IOMMU_T_ATTACH
:
761 tail
.status
= virtio_iommu_handle_attach(s
, iov
, iov_cnt
);
763 case VIRTIO_IOMMU_T_DETACH
:
764 tail
.status
= virtio_iommu_handle_detach(s
, iov
, iov_cnt
);
766 case VIRTIO_IOMMU_T_MAP
:
767 tail
.status
= virtio_iommu_handle_map(s
, iov
, iov_cnt
);
769 case VIRTIO_IOMMU_T_UNMAP
:
770 tail
.status
= virtio_iommu_handle_unmap(s
, iov
, iov_cnt
);
772 case VIRTIO_IOMMU_T_PROBE
:
774 struct virtio_iommu_req_tail
*ptail
;
776 output_size
= s
->config
.probe_size
+ sizeof(tail
);
777 buf
= g_malloc0(output_size
);
779 ptail
= buf
+ s
->config
.probe_size
;
780 ptail
->status
= virtio_iommu_handle_probe(s
, iov
, iov_cnt
, buf
);
784 tail
.status
= VIRTIO_IOMMU_S_UNSUPP
;
786 qemu_rec_mutex_unlock(&s
->mutex
);
789 sz
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0,
790 buf
? buf
: &tail
, output_size
);
791 assert(sz
== output_size
);
793 virtqueue_push(vq
, elem
, sz
);
794 virtio_notify(vdev
, vq
);
801 static void virtio_iommu_report_fault(VirtIOIOMMU
*viommu
, uint8_t reason
,
802 int flags
, uint32_t endpoint
,
805 VirtIODevice
*vdev
= &viommu
->parent_obj
;
806 VirtQueue
*vq
= viommu
->event_vq
;
807 struct virtio_iommu_fault fault
;
808 VirtQueueElement
*elem
;
811 memset(&fault
, 0, sizeof(fault
));
812 fault
.reason
= reason
;
813 fault
.flags
= cpu_to_le32(flags
);
814 fault
.endpoint
= cpu_to_le32(endpoint
);
815 fault
.address
= cpu_to_le64(address
);
817 elem
= virtqueue_pop(vq
, sizeof(VirtQueueElement
));
821 "no buffer available in event queue to report event");
825 if (iov_size(elem
->in_sg
, elem
->in_num
) < sizeof(fault
)) {
826 virtio_error(vdev
, "error buffer of wrong size");
827 virtqueue_detach_element(vq
, elem
, 0);
832 sz
= iov_from_buf(elem
->in_sg
, elem
->in_num
, 0,
833 &fault
, sizeof(fault
));
834 assert(sz
== sizeof(fault
));
836 trace_virtio_iommu_report_fault(reason
, flags
, endpoint
, address
);
837 virtqueue_push(vq
, elem
, sz
);
838 virtio_notify(vdev
, vq
);
843 static IOMMUTLBEntry
virtio_iommu_translate(IOMMUMemoryRegion
*mr
, hwaddr addr
,
844 IOMMUAccessFlags flag
,
847 IOMMUDevice
*sdev
= container_of(mr
, IOMMUDevice
, iommu_mr
);
848 VirtIOIOMMUInterval interval
, *mapping_key
;
849 VirtIOIOMMUMapping
*mapping_value
;
850 VirtIOIOMMU
*s
= sdev
->viommu
;
851 bool read_fault
, write_fault
;
852 VirtIOIOMMUEndpoint
*ep
;
859 interval
.high
= addr
+ 1;
861 IOMMUTLBEntry entry
= {
862 .target_as
= &address_space_memory
,
864 .translated_addr
= addr
,
865 .addr_mask
= (1 << ctz32(s
->config
.page_size_mask
)) - 1,
869 bypass_allowed
= s
->config
.bypass
;
871 sid
= virtio_iommu_get_bdf(sdev
);
873 trace_virtio_iommu_translate(mr
->parent_obj
.name
, sid
, addr
, flag
);
874 qemu_rec_mutex_lock(&s
->mutex
);
876 ep
= g_tree_lookup(s
->endpoints
, GUINT_TO_POINTER(sid
));
879 assert(ep
&& ep
->domain
&& !ep
->domain
->bypass
);
882 if (!bypass_allowed
) {
883 error_report_once("%s sid=%d is not known!!", __func__
, sid
);
884 virtio_iommu_report_fault(s
, VIRTIO_IOMMU_FAULT_R_UNKNOWN
,
885 VIRTIO_IOMMU_FAULT_F_ADDRESS
,
893 for (i
= 0; i
< s
->nb_reserved_regions
; i
++) {
894 ReservedRegion
*reg
= &s
->reserved_regions
[i
];
896 if (addr
>= reg
->low
&& addr
<= reg
->high
) {
898 case VIRTIO_IOMMU_RESV_MEM_T_MSI
:
901 case VIRTIO_IOMMU_RESV_MEM_T_RESERVED
:
903 virtio_iommu_report_fault(s
, VIRTIO_IOMMU_FAULT_R_MAPPING
,
904 VIRTIO_IOMMU_FAULT_F_ADDRESS
,
913 if (!bypass_allowed
) {
914 error_report_once("%s %02x:%02x.%01x not attached to any domain",
915 __func__
, PCI_BUS_NUM(sid
),
916 PCI_SLOT(sid
), PCI_FUNC(sid
));
917 virtio_iommu_report_fault(s
, VIRTIO_IOMMU_FAULT_R_DOMAIN
,
918 VIRTIO_IOMMU_FAULT_F_ADDRESS
,
924 } else if (ep
->domain
->bypass
) {
929 found
= g_tree_lookup_extended(ep
->domain
->mappings
, (gpointer
)(&interval
),
930 (void **)&mapping_key
,
931 (void **)&mapping_value
);
933 error_report_once("%s no mapping for 0x%"PRIx64
" for sid=%d",
934 __func__
, addr
, sid
);
935 virtio_iommu_report_fault(s
, VIRTIO_IOMMU_FAULT_R_MAPPING
,
936 VIRTIO_IOMMU_FAULT_F_ADDRESS
,
941 read_fault
= (flag
& IOMMU_RO
) &&
942 !(mapping_value
->flags
& VIRTIO_IOMMU_MAP_F_READ
);
943 write_fault
= (flag
& IOMMU_WO
) &&
944 !(mapping_value
->flags
& VIRTIO_IOMMU_MAP_F_WRITE
);
946 flags
= read_fault
? VIRTIO_IOMMU_FAULT_F_READ
: 0;
947 flags
|= write_fault
? VIRTIO_IOMMU_FAULT_F_WRITE
: 0;
949 error_report_once("%s permission error on 0x%"PRIx64
"(%d): allowed=%d",
950 __func__
, addr
, flag
, mapping_value
->flags
);
951 flags
|= VIRTIO_IOMMU_FAULT_F_ADDRESS
;
952 virtio_iommu_report_fault(s
, VIRTIO_IOMMU_FAULT_R_MAPPING
,
953 flags
| VIRTIO_IOMMU_FAULT_F_ADDRESS
,
957 entry
.translated_addr
= addr
- mapping_key
->low
+ mapping_value
->phys_addr
;
959 trace_virtio_iommu_translate_out(addr
, entry
.translated_addr
, sid
);
962 qemu_rec_mutex_unlock(&s
->mutex
);
966 static void virtio_iommu_get_config(VirtIODevice
*vdev
, uint8_t *config_data
)
968 VirtIOIOMMU
*dev
= VIRTIO_IOMMU(vdev
);
969 struct virtio_iommu_config
*dev_config
= &dev
->config
;
970 struct virtio_iommu_config
*out_config
= (void *)config_data
;
972 out_config
->page_size_mask
= cpu_to_le64(dev_config
->page_size_mask
);
973 out_config
->input_range
.start
= cpu_to_le64(dev_config
->input_range
.start
);
974 out_config
->input_range
.end
= cpu_to_le64(dev_config
->input_range
.end
);
975 out_config
->domain_range
.start
= cpu_to_le32(dev_config
->domain_range
.start
);
976 out_config
->domain_range
.end
= cpu_to_le32(dev_config
->domain_range
.end
);
977 out_config
->probe_size
= cpu_to_le32(dev_config
->probe_size
);
978 out_config
->bypass
= dev_config
->bypass
;
980 trace_virtio_iommu_get_config(dev_config
->page_size_mask
,
981 dev_config
->input_range
.start
,
982 dev_config
->input_range
.end
,
983 dev_config
->domain_range
.start
,
984 dev_config
->domain_range
.end
,
985 dev_config
->probe_size
,
989 static void virtio_iommu_set_config(VirtIODevice
*vdev
,
990 const uint8_t *config_data
)
992 VirtIOIOMMU
*dev
= VIRTIO_IOMMU(vdev
);
993 struct virtio_iommu_config
*dev_config
= &dev
->config
;
994 const struct virtio_iommu_config
*in_config
= (void *)config_data
;
996 if (in_config
->bypass
!= dev_config
->bypass
) {
997 if (!virtio_vdev_has_feature(vdev
, VIRTIO_IOMMU_F_BYPASS_CONFIG
)) {
998 virtio_error(vdev
, "cannot set config.bypass");
1000 } else if (in_config
->bypass
!= 0 && in_config
->bypass
!= 1) {
1001 virtio_error(vdev
, "invalid config.bypass value '%u'",
1005 dev_config
->bypass
= in_config
->bypass
;
1006 virtio_iommu_switch_address_space_all(dev
);
1009 trace_virtio_iommu_set_config(in_config
->bypass
);
1012 static uint64_t virtio_iommu_get_features(VirtIODevice
*vdev
, uint64_t f
,
1015 VirtIOIOMMU
*dev
= VIRTIO_IOMMU(vdev
);
1018 trace_virtio_iommu_get_features(f
);
1022 static gint
int_cmp(gconstpointer a
, gconstpointer b
, gpointer user_data
)
1024 guint ua
= GPOINTER_TO_UINT(a
);
1025 guint ub
= GPOINTER_TO_UINT(b
);
1026 return (ua
> ub
) - (ua
< ub
);
1029 static gboolean
virtio_iommu_remap(gpointer key
, gpointer value
, gpointer data
)
1031 VirtIOIOMMUMapping
*mapping
= (VirtIOIOMMUMapping
*) value
;
1032 VirtIOIOMMUInterval
*interval
= (VirtIOIOMMUInterval
*) key
;
1033 IOMMUMemoryRegion
*mr
= (IOMMUMemoryRegion
*) data
;
1035 trace_virtio_iommu_remap(mr
->parent_obj
.name
, interval
->low
, interval
->high
,
1036 mapping
->phys_addr
);
1037 virtio_iommu_notify_map(mr
, interval
->low
, interval
->high
,
1038 mapping
->phys_addr
, mapping
->flags
);
1042 static void virtio_iommu_replay(IOMMUMemoryRegion
*mr
, IOMMUNotifier
*n
)
1044 IOMMUDevice
*sdev
= container_of(mr
, IOMMUDevice
, iommu_mr
);
1045 VirtIOIOMMU
*s
= sdev
->viommu
;
1047 VirtIOIOMMUEndpoint
*ep
;
1049 sid
= virtio_iommu_get_bdf(sdev
);
1051 qemu_rec_mutex_lock(&s
->mutex
);
1053 if (!s
->endpoints
) {
1057 ep
= g_tree_lookup(s
->endpoints
, GUINT_TO_POINTER(sid
));
1058 if (!ep
|| !ep
->domain
) {
1062 g_tree_foreach(ep
->domain
->mappings
, virtio_iommu_remap
, mr
);
1065 qemu_rec_mutex_unlock(&s
->mutex
);
1068 static int virtio_iommu_notify_flag_changed(IOMMUMemoryRegion
*iommu_mr
,
1069 IOMMUNotifierFlag old
,
1070 IOMMUNotifierFlag
new,
1073 if (new & IOMMU_NOTIFIER_DEVIOTLB_UNMAP
) {
1074 error_setg(errp
, "Virtio-iommu does not support dev-iotlb yet");
1078 if (old
== IOMMU_NOTIFIER_NONE
) {
1079 trace_virtio_iommu_notify_flag_add(iommu_mr
->parent_obj
.name
);
1080 } else if (new == IOMMU_NOTIFIER_NONE
) {
1081 trace_virtio_iommu_notify_flag_del(iommu_mr
->parent_obj
.name
);
1087 * The default mask (TARGET_PAGE_MASK) is the smallest supported guest granule,
1088 * for example 0xfffffffffffff000. When an assigned device has page size
1089 * restrictions due to the hardware IOMMU configuration, apply this restriction
1092 static int virtio_iommu_set_page_size_mask(IOMMUMemoryRegion
*mr
,
1096 IOMMUDevice
*sdev
= container_of(mr
, IOMMUDevice
, iommu_mr
);
1097 VirtIOIOMMU
*s
= sdev
->viommu
;
1098 uint64_t cur_mask
= s
->config
.page_size_mask
;
1100 trace_virtio_iommu_set_page_size_mask(mr
->parent_obj
.name
, cur_mask
,
1103 if ((cur_mask
& new_mask
) == 0) {
1104 error_setg(errp
, "virtio-iommu page mask 0x%"PRIx64
1105 " is incompatible with mask 0x%"PRIx64
, cur_mask
, new_mask
);
1110 * Once the granule is frozen we can't change the mask anymore. If by
1111 * chance the hotplugged device supports the same granule, we can still
1112 * accept it. Having a different masks is possible but the guest will use
1113 * sub-optimal block sizes, so warn about it.
1115 if (s
->granule_frozen
) {
1116 int new_granule
= ctz64(new_mask
);
1117 int cur_granule
= ctz64(cur_mask
);
1119 if (new_granule
!= cur_granule
) {
1120 error_setg(errp
, "virtio-iommu page mask 0x%"PRIx64
1121 " is incompatible with mask 0x%"PRIx64
, cur_mask
,
1124 } else if (new_mask
!= cur_mask
) {
1125 warn_report("virtio-iommu page mask 0x%"PRIx64
1126 " does not match 0x%"PRIx64
, cur_mask
, new_mask
);
1131 s
->config
.page_size_mask
&= new_mask
;
1135 static void virtio_iommu_system_reset(void *opaque
)
1137 VirtIOIOMMU
*s
= opaque
;
1139 trace_virtio_iommu_system_reset();
1142 * config.bypass is sticky across device reset, but should be restored on
1145 s
->config
.bypass
= s
->boot_bypass
;
1146 virtio_iommu_switch_address_space_all(s
);
1150 static void virtio_iommu_freeze_granule(Notifier
*notifier
, void *data
)
1152 VirtIOIOMMU
*s
= container_of(notifier
, VirtIOIOMMU
, machine_done
);
1155 if (likely(s
->config
.bypass
)) {
1157 * Transient IOMMU MR enable to collect page_size_mask requirements
1158 * through memory_region_iommu_set_page_size_mask() called by
1159 * VFIO region_add() callback
1161 s
->config
.bypass
= false;
1162 virtio_iommu_switch_address_space_all(s
);
1163 /* restore default */
1164 s
->config
.bypass
= true;
1165 virtio_iommu_switch_address_space_all(s
);
1167 s
->granule_frozen
= true;
1168 granule
= ctz64(s
->config
.page_size_mask
);
1169 trace_virtio_iommu_freeze_granule(BIT(granule
));
1172 static void virtio_iommu_device_realize(DeviceState
*dev
, Error
**errp
)
1174 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1175 VirtIOIOMMU
*s
= VIRTIO_IOMMU(dev
);
1177 virtio_init(vdev
, VIRTIO_ID_IOMMU
, sizeof(struct virtio_iommu_config
));
1179 memset(s
->iommu_pcibus_by_bus_num
, 0, sizeof(s
->iommu_pcibus_by_bus_num
));
1181 s
->req_vq
= virtio_add_queue(vdev
, VIOMMU_DEFAULT_QUEUE_SIZE
,
1182 virtio_iommu_handle_command
);
1183 s
->event_vq
= virtio_add_queue(vdev
, VIOMMU_DEFAULT_QUEUE_SIZE
, NULL
);
1186 * config.bypass is needed to get initial address space early, such as
1189 s
->config
.bypass
= s
->boot_bypass
;
1190 s
->config
.page_size_mask
= qemu_target_page_mask();
1191 s
->config
.input_range
.end
= UINT64_MAX
;
1192 s
->config
.domain_range
.end
= UINT32_MAX
;
1193 s
->config
.probe_size
= VIOMMU_PROBE_SIZE
;
1195 virtio_add_feature(&s
->features
, VIRTIO_RING_F_EVENT_IDX
);
1196 virtio_add_feature(&s
->features
, VIRTIO_RING_F_INDIRECT_DESC
);
1197 virtio_add_feature(&s
->features
, VIRTIO_F_VERSION_1
);
1198 virtio_add_feature(&s
->features
, VIRTIO_IOMMU_F_INPUT_RANGE
);
1199 virtio_add_feature(&s
->features
, VIRTIO_IOMMU_F_DOMAIN_RANGE
);
1200 virtio_add_feature(&s
->features
, VIRTIO_IOMMU_F_MAP_UNMAP
);
1201 virtio_add_feature(&s
->features
, VIRTIO_IOMMU_F_MMIO
);
1202 virtio_add_feature(&s
->features
, VIRTIO_IOMMU_F_PROBE
);
1203 virtio_add_feature(&s
->features
, VIRTIO_IOMMU_F_BYPASS_CONFIG
);
1205 qemu_rec_mutex_init(&s
->mutex
);
1207 s
->as_by_busptr
= g_hash_table_new_full(NULL
, NULL
, NULL
, g_free
);
1209 if (s
->primary_bus
) {
1210 pci_setup_iommu(s
->primary_bus
, virtio_iommu_find_add_as
, s
);
1212 error_setg(errp
, "VIRTIO-IOMMU is not attached to any PCI bus!");
1215 s
->machine_done
.notify
= virtio_iommu_freeze_granule
;
1216 qemu_add_machine_init_done_notifier(&s
->machine_done
);
1218 qemu_register_reset(virtio_iommu_system_reset
, s
);
1221 static void virtio_iommu_device_unrealize(DeviceState
*dev
)
1223 VirtIODevice
*vdev
= VIRTIO_DEVICE(dev
);
1224 VirtIOIOMMU
*s
= VIRTIO_IOMMU(dev
);
1226 qemu_unregister_reset(virtio_iommu_system_reset
, s
);
1227 qemu_remove_machine_init_done_notifier(&s
->machine_done
);
1229 g_hash_table_destroy(s
->as_by_busptr
);
1231 g_tree_destroy(s
->domains
);
1234 g_tree_destroy(s
->endpoints
);
1237 qemu_rec_mutex_destroy(&s
->mutex
);
1239 virtio_delete_queue(s
->req_vq
);
1240 virtio_delete_queue(s
->event_vq
);
1241 virtio_cleanup(vdev
);
1244 static void virtio_iommu_device_reset(VirtIODevice
*vdev
)
1246 VirtIOIOMMU
*s
= VIRTIO_IOMMU(vdev
);
1248 trace_virtio_iommu_device_reset();
1251 g_tree_destroy(s
->domains
);
1254 g_tree_destroy(s
->endpoints
);
1256 s
->domains
= g_tree_new_full((GCompareDataFunc
)int_cmp
,
1257 NULL
, NULL
, virtio_iommu_put_domain
);
1258 s
->endpoints
= g_tree_new_full((GCompareDataFunc
)int_cmp
,
1259 NULL
, NULL
, virtio_iommu_put_endpoint
);
1262 static void virtio_iommu_set_status(VirtIODevice
*vdev
, uint8_t status
)
1264 trace_virtio_iommu_device_status(status
);
1267 static void virtio_iommu_instance_init(Object
*obj
)
1271 #define VMSTATE_INTERVAL \
1273 .name = "interval", \
1275 .minimum_version_id = 1, \
1276 .fields = (VMStateField[]) { \
1277 VMSTATE_UINT64(low, VirtIOIOMMUInterval), \
1278 VMSTATE_UINT64(high, VirtIOIOMMUInterval), \
1279 VMSTATE_END_OF_LIST() \
1283 #define VMSTATE_MAPPING \
1285 .name = "mapping", \
1287 .minimum_version_id = 1, \
1288 .fields = (VMStateField[]) { \
1289 VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\
1290 VMSTATE_UINT32(flags, VirtIOIOMMUMapping), \
1291 VMSTATE_END_OF_LIST() \
1295 static const VMStateDescription vmstate_interval_mapping
[2] = {
1296 VMSTATE_MAPPING
, /* value */
1297 VMSTATE_INTERVAL
/* key */
1300 static int domain_preload(void *opaque
)
1302 VirtIOIOMMUDomain
*domain
= opaque
;
1304 domain
->mappings
= g_tree_new_full((GCompareDataFunc
)interval_cmp
,
1305 NULL
, g_free
, g_free
);
1309 static const VMStateDescription vmstate_endpoint
= {
1312 .minimum_version_id
= 1,
1313 .fields
= (VMStateField
[]) {
1314 VMSTATE_UINT32(id
, VirtIOIOMMUEndpoint
),
1315 VMSTATE_END_OF_LIST()
1319 static const VMStateDescription vmstate_domain
= {
1322 .minimum_version_id
= 2,
1323 .pre_load
= domain_preload
,
1324 .fields
= (VMStateField
[]) {
1325 VMSTATE_UINT32(id
, VirtIOIOMMUDomain
),
1326 VMSTATE_GTREE_V(mappings
, VirtIOIOMMUDomain
, 1,
1327 vmstate_interval_mapping
,
1328 VirtIOIOMMUInterval
, VirtIOIOMMUMapping
),
1329 VMSTATE_QLIST_V(endpoint_list
, VirtIOIOMMUDomain
, 1,
1330 vmstate_endpoint
, VirtIOIOMMUEndpoint
, next
),
1331 VMSTATE_BOOL_V(bypass
, VirtIOIOMMUDomain
, 2),
1332 VMSTATE_END_OF_LIST()
1336 static gboolean
reconstruct_endpoints(gpointer key
, gpointer value
,
1339 VirtIOIOMMU
*s
= (VirtIOIOMMU
*)data
;
1340 VirtIOIOMMUDomain
*d
= (VirtIOIOMMUDomain
*)value
;
1341 VirtIOIOMMUEndpoint
*iter
;
1342 IOMMUMemoryRegion
*mr
;
1344 QLIST_FOREACH(iter
, &d
->endpoint_list
, next
) {
1345 mr
= virtio_iommu_mr(s
, iter
->id
);
1349 iter
->iommu_mr
= mr
;
1350 g_tree_insert(s
->endpoints
, GUINT_TO_POINTER(iter
->id
), iter
);
1352 return false; /* continue the domain traversal */
1355 static int iommu_post_load(void *opaque
, int version_id
)
1357 VirtIOIOMMU
*s
= opaque
;
1359 g_tree_foreach(s
->domains
, reconstruct_endpoints
, s
);
1362 * Memory regions are dynamically turned on/off depending on
1363 * 'config.bypass' and attached domain type if there is. After
1364 * migration, we need to make sure the memory regions are
1367 virtio_iommu_switch_address_space_all(s
);
1371 static const VMStateDescription vmstate_virtio_iommu_device
= {
1372 .name
= "virtio-iommu-device",
1373 .minimum_version_id
= 2,
1375 .post_load
= iommu_post_load
,
1376 .fields
= (VMStateField
[]) {
1377 VMSTATE_GTREE_DIRECT_KEY_V(domains
, VirtIOIOMMU
, 2,
1378 &vmstate_domain
, VirtIOIOMMUDomain
),
1379 VMSTATE_UINT8_V(config
.bypass
, VirtIOIOMMU
, 2),
1380 VMSTATE_END_OF_LIST()
1384 static const VMStateDescription vmstate_virtio_iommu
= {
1385 .name
= "virtio-iommu",
1386 .minimum_version_id
= 2,
1387 .priority
= MIG_PRI_IOMMU
,
1389 .fields
= (VMStateField
[]) {
1390 VMSTATE_VIRTIO_DEVICE
,
1391 VMSTATE_END_OF_LIST()
1395 static Property virtio_iommu_properties
[] = {
1396 DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU
, primary_bus
,
1397 TYPE_PCI_BUS
, PCIBus
*),
1398 DEFINE_PROP_BOOL("boot-bypass", VirtIOIOMMU
, boot_bypass
, true),
1399 DEFINE_PROP_END_OF_LIST(),
1402 static void virtio_iommu_class_init(ObjectClass
*klass
, void *data
)
1404 DeviceClass
*dc
= DEVICE_CLASS(klass
);
1405 VirtioDeviceClass
*vdc
= VIRTIO_DEVICE_CLASS(klass
);
1407 device_class_set_props(dc
, virtio_iommu_properties
);
1408 dc
->vmsd
= &vmstate_virtio_iommu
;
1410 set_bit(DEVICE_CATEGORY_MISC
, dc
->categories
);
1411 vdc
->realize
= virtio_iommu_device_realize
;
1412 vdc
->unrealize
= virtio_iommu_device_unrealize
;
1413 vdc
->reset
= virtio_iommu_device_reset
;
1414 vdc
->get_config
= virtio_iommu_get_config
;
1415 vdc
->set_config
= virtio_iommu_set_config
;
1416 vdc
->get_features
= virtio_iommu_get_features
;
1417 vdc
->set_status
= virtio_iommu_set_status
;
1418 vdc
->vmsd
= &vmstate_virtio_iommu_device
;
1421 static void virtio_iommu_memory_region_class_init(ObjectClass
*klass
,
1424 IOMMUMemoryRegionClass
*imrc
= IOMMU_MEMORY_REGION_CLASS(klass
);
1426 imrc
->translate
= virtio_iommu_translate
;
1427 imrc
->replay
= virtio_iommu_replay
;
1428 imrc
->notify_flag_changed
= virtio_iommu_notify_flag_changed
;
1429 imrc
->iommu_set_page_size_mask
= virtio_iommu_set_page_size_mask
;
1432 static const TypeInfo virtio_iommu_info
= {
1433 .name
= TYPE_VIRTIO_IOMMU
,
1434 .parent
= TYPE_VIRTIO_DEVICE
,
1435 .instance_size
= sizeof(VirtIOIOMMU
),
1436 .instance_init
= virtio_iommu_instance_init
,
1437 .class_init
= virtio_iommu_class_init
,
1440 static const TypeInfo virtio_iommu_memory_region_info
= {
1441 .parent
= TYPE_IOMMU_MEMORY_REGION
,
1442 .name
= TYPE_VIRTIO_IOMMU_MEMORY_REGION
,
1443 .class_init
= virtio_iommu_memory_region_class_init
,
1446 static void virtio_register_types(void)
1448 type_register_static(&virtio_iommu_info
);
1449 type_register_static(&virtio_iommu_memory_region_info
);
1452 type_init(virtio_register_types
)