]> git.proxmox.com Git - mirror_qemu.git/blame - hw/virtio/virtio-iommu.c
virtio-iommu: Implement map/unmap
[mirror_qemu.git] / hw / virtio / virtio-iommu.c
CommitLineData
22c37a10
EA
1/*
2 * virtio-iommu device
3 *
4 * Copyright (c) 2020 Red Hat, Inc.
5 *
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms and conditions of the GNU General Public License,
8 * version 2 or later, as published by the Free Software Foundation.
9 *
10 * This program is distributed in the hope it will be useful, but WITHOUT
11 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
12 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
13 * more details.
14 *
15 * You should have received a copy of the GNU General Public License along with
16 * this program. If not, see <http://www.gnu.org/licenses/>.
17 *
18 */
19
20#include "qemu/osdep.h"
fe2cacae 21#include "qemu/log.h"
22c37a10
EA
22#include "qemu/iov.h"
23#include "qemu-common.h"
24#include "hw/qdev-properties.h"
25#include "hw/virtio/virtio.h"
26#include "sysemu/kvm.h"
cfb42188
EA
27#include "qapi/error.h"
28#include "qemu/error-report.h"
22c37a10
EA
29#include "trace.h"
30
31#include "standard-headers/linux/virtio_ids.h"
32
33#include "hw/virtio/virtio-bus.h"
34#include "hw/virtio/virtio-access.h"
35#include "hw/virtio/virtio-iommu.h"
cfb42188
EA
36#include "hw/pci/pci_bus.h"
37#include "hw/pci/pci.h"
22c37a10
EA
38
39/* Max size */
40#define VIOMMU_DEFAULT_QUEUE_SIZE 256
41
cfb42188
EA
42typedef struct VirtIOIOMMUDomain {
43 uint32_t id;
44 GTree *mappings;
45 QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list;
46} VirtIOIOMMUDomain;
47
48typedef struct VirtIOIOMMUEndpoint {
49 uint32_t id;
50 VirtIOIOMMUDomain *domain;
51 QLIST_ENTRY(VirtIOIOMMUEndpoint) next;
52} VirtIOIOMMUEndpoint;
53
54typedef struct VirtIOIOMMUInterval {
55 uint64_t low;
56 uint64_t high;
57} VirtIOIOMMUInterval;
58
fe2cacae
EA
59typedef struct VirtIOIOMMUMapping {
60 uint64_t phys_addr;
61 uint32_t flags;
62} VirtIOIOMMUMapping;
63
cfb42188
EA
64static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev)
65{
66 return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn);
67}
68
69/**
70 * The bus number is used for lookup when SID based operations occur.
71 * In that case we lazily populate the IOMMUPciBus array from the bus hash
72 * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus
73 * numbers may not be always initialized yet.
74 */
75static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num)
76{
77 IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num];
78
79 if (!iommu_pci_bus) {
80 GHashTableIter iter;
81
82 g_hash_table_iter_init(&iter, s->as_by_busptr);
83 while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) {
84 if (pci_bus_num(iommu_pci_bus->bus) == bus_num) {
85 s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus;
86 return iommu_pci_bus;
87 }
88 }
89 return NULL;
90 }
91 return iommu_pci_bus;
92}
93
94static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid)
95{
96 uint8_t bus_n, devfn;
97 IOMMUPciBus *iommu_pci_bus;
98 IOMMUDevice *dev;
99
100 bus_n = PCI_BUS_NUM(sid);
101 iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n);
102 if (iommu_pci_bus) {
103 devfn = sid & PCI_DEVFN_MAX;
104 dev = iommu_pci_bus->pbdev[devfn];
105 if (dev) {
106 return &dev->iommu_mr;
107 }
108 }
109 return NULL;
110}
111
112static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
113{
114 VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a;
115 VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b;
116
117 if (inta->high < intb->low) {
118 return -1;
119 } else if (intb->high < inta->low) {
120 return 1;
121 } else {
122 return 0;
123 }
124}
125
126static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep)
127{
128 if (!ep->domain) {
129 return;
130 }
131 QLIST_REMOVE(ep, next);
132 ep->domain = NULL;
133}
134
135static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s,
136 uint32_t ep_id)
137{
138 VirtIOIOMMUEndpoint *ep;
139
140 ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
141 if (ep) {
142 return ep;
143 }
144 if (!virtio_iommu_mr(s, ep_id)) {
145 return NULL;
146 }
147 ep = g_malloc0(sizeof(*ep));
148 ep->id = ep_id;
149 trace_virtio_iommu_get_endpoint(ep_id);
150 g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep);
151 return ep;
152}
153
154static void virtio_iommu_put_endpoint(gpointer data)
155{
156 VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data;
157
158 if (ep->domain) {
159 virtio_iommu_detach_endpoint_from_domain(ep);
160 }
161
162 trace_virtio_iommu_put_endpoint(ep->id);
163 g_free(ep);
164}
165
166static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s,
167 uint32_t domain_id)
168{
169 VirtIOIOMMUDomain *domain;
170
171 domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
172 if (domain) {
173 return domain;
174 }
175 domain = g_malloc0(sizeof(*domain));
176 domain->id = domain_id;
177 domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp,
178 NULL, (GDestroyNotify)g_free,
179 (GDestroyNotify)g_free);
180 g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain);
181 QLIST_INIT(&domain->endpoint_list);
182 trace_virtio_iommu_get_domain(domain_id);
183 return domain;
184}
185
186static void virtio_iommu_put_domain(gpointer data)
187{
188 VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data;
189 VirtIOIOMMUEndpoint *iter, *tmp;
190
191 QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) {
192 virtio_iommu_detach_endpoint_from_domain(iter);
193 }
194 g_tree_destroy(domain->mappings);
195 trace_virtio_iommu_put_domain(domain->id);
196 g_free(domain);
197}
198
199static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque,
200 int devfn)
201{
202 VirtIOIOMMU *s = opaque;
203 IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus);
204 static uint32_t mr_index;
205 IOMMUDevice *sdev;
206
207 if (!sbus) {
208 sbus = g_malloc0(sizeof(IOMMUPciBus) +
209 sizeof(IOMMUDevice *) * PCI_DEVFN_MAX);
210 sbus->bus = bus;
211 g_hash_table_insert(s->as_by_busptr, bus, sbus);
212 }
213
214 sdev = sbus->pbdev[devfn];
215 if (!sdev) {
216 char *name = g_strdup_printf("%s-%d-%d",
217 TYPE_VIRTIO_IOMMU_MEMORY_REGION,
218 mr_index++, devfn);
219 sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(IOMMUDevice));
220
221 sdev->viommu = s;
222 sdev->bus = bus;
223 sdev->devfn = devfn;
224
225 trace_virtio_iommu_init_iommu_mr(name);
226
227 memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr),
228 TYPE_VIRTIO_IOMMU_MEMORY_REGION,
229 OBJECT(s), name,
230 UINT64_MAX);
231 address_space_init(&sdev->as,
232 MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU);
233 g_free(name);
234 }
235 return &sdev->as;
236}
237
5442b854
EA
238static int virtio_iommu_attach(VirtIOIOMMU *s,
239 struct virtio_iommu_req_attach *req)
22c37a10 240{
5442b854
EA
241 uint32_t domain_id = le32_to_cpu(req->domain);
242 uint32_t ep_id = le32_to_cpu(req->endpoint);
cfb42188
EA
243 VirtIOIOMMUDomain *domain;
244 VirtIOIOMMUEndpoint *ep;
5442b854
EA
245
246 trace_virtio_iommu_attach(domain_id, ep_id);
247
cfb42188
EA
248 ep = virtio_iommu_get_endpoint(s, ep_id);
249 if (!ep) {
250 return VIRTIO_IOMMU_S_NOENT;
251 }
252
253 if (ep->domain) {
254 VirtIOIOMMUDomain *previous_domain = ep->domain;
255 /*
256 * the device is already attached to a domain,
257 * detach it first
258 */
259 virtio_iommu_detach_endpoint_from_domain(ep);
260 if (QLIST_EMPTY(&previous_domain->endpoint_list)) {
261 g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id));
262 }
263 }
264
265 domain = virtio_iommu_get_domain(s, domain_id);
266 QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next);
267
268 ep->domain = domain;
269
270 return VIRTIO_IOMMU_S_OK;
22c37a10 271}
5442b854
EA
272
273static int virtio_iommu_detach(VirtIOIOMMU *s,
274 struct virtio_iommu_req_detach *req)
22c37a10 275{
5442b854
EA
276 uint32_t domain_id = le32_to_cpu(req->domain);
277 uint32_t ep_id = le32_to_cpu(req->endpoint);
cfb42188
EA
278 VirtIOIOMMUDomain *domain;
279 VirtIOIOMMUEndpoint *ep;
5442b854
EA
280
281 trace_virtio_iommu_detach(domain_id, ep_id);
282
cfb42188
EA
283 ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id));
284 if (!ep) {
285 return VIRTIO_IOMMU_S_NOENT;
286 }
287
288 domain = ep->domain;
289
290 if (!domain || domain->id != domain_id) {
291 return VIRTIO_IOMMU_S_INVAL;
292 }
293
294 virtio_iommu_detach_endpoint_from_domain(ep);
295
296 if (QLIST_EMPTY(&domain->endpoint_list)) {
297 g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id));
298 }
299 return VIRTIO_IOMMU_S_OK;
22c37a10 300}
5442b854
EA
301
302static int virtio_iommu_map(VirtIOIOMMU *s,
303 struct virtio_iommu_req_map *req)
22c37a10 304{
5442b854
EA
305 uint32_t domain_id = le32_to_cpu(req->domain);
306 uint64_t phys_start = le64_to_cpu(req->phys_start);
307 uint64_t virt_start = le64_to_cpu(req->virt_start);
308 uint64_t virt_end = le64_to_cpu(req->virt_end);
309 uint32_t flags = le32_to_cpu(req->flags);
fe2cacae
EA
310 VirtIOIOMMUDomain *domain;
311 VirtIOIOMMUInterval *interval;
312 VirtIOIOMMUMapping *mapping;
313
314 if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) {
315 return VIRTIO_IOMMU_S_INVAL;
316 }
317
318 domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
319 if (!domain) {
320 return VIRTIO_IOMMU_S_NOENT;
321 }
322
323 interval = g_malloc0(sizeof(*interval));
324
325 interval->low = virt_start;
326 interval->high = virt_end;
327
328 mapping = g_tree_lookup(domain->mappings, (gpointer)interval);
329 if (mapping) {
330 g_free(interval);
331 return VIRTIO_IOMMU_S_INVAL;
332 }
5442b854
EA
333
334 trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags);
335
fe2cacae
EA
336 mapping = g_malloc0(sizeof(*mapping));
337 mapping->phys_addr = phys_start;
338 mapping->flags = flags;
339
340 g_tree_insert(domain->mappings, interval, mapping);
341
342 return VIRTIO_IOMMU_S_OK;
22c37a10 343}
5442b854
EA
344
345static int virtio_iommu_unmap(VirtIOIOMMU *s,
346 struct virtio_iommu_req_unmap *req)
22c37a10 347{
5442b854
EA
348 uint32_t domain_id = le32_to_cpu(req->domain);
349 uint64_t virt_start = le64_to_cpu(req->virt_start);
350 uint64_t virt_end = le64_to_cpu(req->virt_end);
fe2cacae
EA
351 VirtIOIOMMUMapping *iter_val;
352 VirtIOIOMMUInterval interval, *iter_key;
353 VirtIOIOMMUDomain *domain;
354 int ret = VIRTIO_IOMMU_S_OK;
5442b854
EA
355
356 trace_virtio_iommu_unmap(domain_id, virt_start, virt_end);
357
fe2cacae
EA
358 domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id));
359 if (!domain) {
360 return VIRTIO_IOMMU_S_NOENT;
361 }
362 interval.low = virt_start;
363 interval.high = virt_end;
364
365 while (g_tree_lookup_extended(domain->mappings, &interval,
366 (void **)&iter_key, (void**)&iter_val)) {
367 uint64_t current_low = iter_key->low;
368 uint64_t current_high = iter_key->high;
369
370 if (interval.low <= current_low && interval.high >= current_high) {
371 g_tree_remove(domain->mappings, iter_key);
372 trace_virtio_iommu_unmap_done(domain_id, current_low, current_high);
373 } else {
374 ret = VIRTIO_IOMMU_S_RANGE;
375 break;
376 }
377 }
378 return ret;
22c37a10
EA
379}
380
5442b854
EA
381static int virtio_iommu_iov_to_req(struct iovec *iov,
382 unsigned int iov_cnt,
383 void *req, size_t req_sz)
384{
385 size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail);
386
387 sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz);
388 if (unlikely(sz != payload_sz)) {
389 return VIRTIO_IOMMU_S_INVAL;
390 }
391 return 0;
392}
393
394#define virtio_iommu_handle_req(__req) \
395static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \
396 struct iovec *iov, \
397 unsigned int iov_cnt) \
398{ \
399 struct virtio_iommu_req_ ## __req req; \
400 int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \
401 \
402 return ret ? ret : virtio_iommu_ ## __req(s, &req); \
403}
404
405virtio_iommu_handle_req(attach)
406virtio_iommu_handle_req(detach)
407virtio_iommu_handle_req(map)
408virtio_iommu_handle_req(unmap)
409
22c37a10
EA
410static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq)
411{
412 VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
413 struct virtio_iommu_req_head head;
414 struct virtio_iommu_req_tail tail = {};
415 VirtQueueElement *elem;
416 unsigned int iov_cnt;
417 struct iovec *iov;
418 size_t sz;
419
420 for (;;) {
421 elem = virtqueue_pop(vq, sizeof(VirtQueueElement));
422 if (!elem) {
423 return;
424 }
425
426 if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) ||
427 iov_size(elem->out_sg, elem->out_num) < sizeof(head)) {
428 virtio_error(vdev, "virtio-iommu bad head/tail size");
429 virtqueue_detach_element(vq, elem, 0);
430 g_free(elem);
431 break;
432 }
433
434 iov_cnt = elem->out_num;
435 iov = elem->out_sg;
436 sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head));
437 if (unlikely(sz != sizeof(head))) {
438 tail.status = VIRTIO_IOMMU_S_DEVERR;
439 goto out;
440 }
441 qemu_mutex_lock(&s->mutex);
442 switch (head.type) {
443 case VIRTIO_IOMMU_T_ATTACH:
444 tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt);
445 break;
446 case VIRTIO_IOMMU_T_DETACH:
447 tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt);
448 break;
449 case VIRTIO_IOMMU_T_MAP:
450 tail.status = virtio_iommu_handle_map(s, iov, iov_cnt);
451 break;
452 case VIRTIO_IOMMU_T_UNMAP:
453 tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt);
454 break;
455 default:
456 tail.status = VIRTIO_IOMMU_S_UNSUPP;
457 }
458 qemu_mutex_unlock(&s->mutex);
459
460out:
461 sz = iov_from_buf(elem->in_sg, elem->in_num, 0,
462 &tail, sizeof(tail));
463 assert(sz == sizeof(tail));
464
465 virtqueue_push(vq, elem, sizeof(tail));
466 virtio_notify(vdev, vq);
467 g_free(elem);
468 }
469}
470
cfb42188
EA
471static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr,
472 IOMMUAccessFlags flag,
473 int iommu_idx)
474{
475 IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr);
476 uint32_t sid;
477
478 IOMMUTLBEntry entry = {
479 .target_as = &address_space_memory,
480 .iova = addr,
481 .translated_addr = addr,
482 .addr_mask = ~(hwaddr)0,
483 .perm = IOMMU_NONE,
484 };
485
486 sid = virtio_iommu_get_bdf(sdev);
487
488 trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag);
489 return entry;
490}
491
22c37a10
EA
492static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data)
493{
494 VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
495 struct virtio_iommu_config *config = &dev->config;
496
497 trace_virtio_iommu_get_config(config->page_size_mask,
498 config->input_range.start,
499 config->input_range.end,
500 config->domain_range.end,
501 config->probe_size);
502 memcpy(config_data, &dev->config, sizeof(struct virtio_iommu_config));
503}
504
505static void virtio_iommu_set_config(VirtIODevice *vdev,
506 const uint8_t *config_data)
507{
508 struct virtio_iommu_config config;
509
510 memcpy(&config, config_data, sizeof(struct virtio_iommu_config));
511 trace_virtio_iommu_set_config(config.page_size_mask,
512 config.input_range.start,
513 config.input_range.end,
514 config.domain_range.end,
515 config.probe_size);
516}
517
518static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f,
519 Error **errp)
520{
521 VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev);
522
523 f |= dev->features;
524 trace_virtio_iommu_get_features(f);
525 return f;
526}
527
528/*
529 * Migration is not yet supported: most of the state consists
530 * of balanced binary trees which are not yet ready for getting
531 * migrated
532 */
533static const VMStateDescription vmstate_virtio_iommu_device = {
534 .name = "virtio-iommu-device",
535 .unmigratable = 1,
536};
537
cfb42188
EA
538static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data)
539{
540 guint ua = GPOINTER_TO_UINT(a);
541 guint ub = GPOINTER_TO_UINT(b);
542 return (ua > ub) - (ua < ub);
543}
544
22c37a10
EA
545static void virtio_iommu_device_realize(DeviceState *dev, Error **errp)
546{
547 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
548 VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
549
550 virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU,
551 sizeof(struct virtio_iommu_config));
552
cfb42188
EA
553 memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num));
554
22c37a10
EA
555 s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE,
556 virtio_iommu_handle_command);
557 s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL);
558
559 s->config.page_size_mask = TARGET_PAGE_MASK;
560 s->config.input_range.end = -1UL;
561 s->config.domain_range.end = 32;
562
563 virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX);
564 virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC);
565 virtio_add_feature(&s->features, VIRTIO_F_VERSION_1);
566 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE);
567 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE);
568 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP);
569 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS);
570 virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO);
571
572 qemu_mutex_init(&s->mutex);
cfb42188
EA
573
574 s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free);
575
576 if (s->primary_bus) {
577 pci_setup_iommu(s->primary_bus, virtio_iommu_find_add_as, s);
578 } else {
579 error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!");
580 }
22c37a10
EA
581}
582
583static void virtio_iommu_device_unrealize(DeviceState *dev, Error **errp)
584{
585 VirtIODevice *vdev = VIRTIO_DEVICE(dev);
cfb42188
EA
586 VirtIOIOMMU *s = VIRTIO_IOMMU(dev);
587
588 g_tree_destroy(s->domains);
589 g_tree_destroy(s->endpoints);
22c37a10
EA
590
591 virtio_cleanup(vdev);
592}
593
594static void virtio_iommu_device_reset(VirtIODevice *vdev)
595{
cfb42188
EA
596 VirtIOIOMMU *s = VIRTIO_IOMMU(vdev);
597
22c37a10 598 trace_virtio_iommu_device_reset();
cfb42188
EA
599
600 if (s->domains) {
601 g_tree_destroy(s->domains);
602 }
603 if (s->endpoints) {
604 g_tree_destroy(s->endpoints);
605 }
606 s->domains = g_tree_new_full((GCompareDataFunc)int_cmp,
607 NULL, NULL, virtio_iommu_put_domain);
608 s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp,
609 NULL, NULL, virtio_iommu_put_endpoint);
22c37a10
EA
610}
611
612static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status)
613{
614 trace_virtio_iommu_device_status(status);
615}
616
617static void virtio_iommu_instance_init(Object *obj)
618{
619}
620
621static const VMStateDescription vmstate_virtio_iommu = {
622 .name = "virtio-iommu",
623 .minimum_version_id = 1,
624 .version_id = 1,
625 .fields = (VMStateField[]) {
626 VMSTATE_VIRTIO_DEVICE,
627 VMSTATE_END_OF_LIST()
628 },
629};
630
631static Property virtio_iommu_properties[] = {
632 DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *),
633 DEFINE_PROP_END_OF_LIST(),
634};
635
636static void virtio_iommu_class_init(ObjectClass *klass, void *data)
637{
638 DeviceClass *dc = DEVICE_CLASS(klass);
639 VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass);
640
641 device_class_set_props(dc, virtio_iommu_properties);
642 dc->vmsd = &vmstate_virtio_iommu;
643
644 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
645 vdc->realize = virtio_iommu_device_realize;
646 vdc->unrealize = virtio_iommu_device_unrealize;
647 vdc->reset = virtio_iommu_device_reset;
648 vdc->get_config = virtio_iommu_get_config;
649 vdc->set_config = virtio_iommu_set_config;
650 vdc->get_features = virtio_iommu_get_features;
651 vdc->set_status = virtio_iommu_set_status;
652 vdc->vmsd = &vmstate_virtio_iommu_device;
653}
654
cfb42188
EA
655static void virtio_iommu_memory_region_class_init(ObjectClass *klass,
656 void *data)
657{
658 IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass);
659
660 imrc->translate = virtio_iommu_translate;
661}
662
22c37a10
EA
663static const TypeInfo virtio_iommu_info = {
664 .name = TYPE_VIRTIO_IOMMU,
665 .parent = TYPE_VIRTIO_DEVICE,
666 .instance_size = sizeof(VirtIOIOMMU),
667 .instance_init = virtio_iommu_instance_init,
668 .class_init = virtio_iommu_class_init,
669};
670
cfb42188
EA
671static const TypeInfo virtio_iommu_memory_region_info = {
672 .parent = TYPE_IOMMU_MEMORY_REGION,
673 .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION,
674 .class_init = virtio_iommu_memory_region_class_init,
675};
676
22c37a10
EA
677static void virtio_register_types(void)
678{
679 type_register_static(&virtio_iommu_info);
cfb42188 680 type_register_static(&virtio_iommu_memory_region_info);
22c37a10
EA
681}
682
683type_init(virtio_register_types)