]>
Commit | Line | Data |
---|---|---|
22c37a10 EA |
1 | /* |
2 | * virtio-iommu device | |
3 | * | |
4 | * Copyright (c) 2020 Red Hat, Inc. | |
5 | * | |
6 | * This program is free software; you can redistribute it and/or modify it | |
7 | * under the terms and conditions of the GNU General Public License, | |
8 | * version 2 or later, as published by the Free Software Foundation. | |
9 | * | |
10 | * This program is distributed in the hope it will be useful, but WITHOUT | |
11 | * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or | |
12 | * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for | |
13 | * more details. | |
14 | * | |
15 | * You should have received a copy of the GNU General Public License along with | |
16 | * this program. If not, see <http://www.gnu.org/licenses/>. | |
17 | * | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
fe2cacae | 21 | #include "qemu/log.h" |
22c37a10 EA |
22 | #include "qemu/iov.h" |
23 | #include "qemu-common.h" | |
24 | #include "hw/qdev-properties.h" | |
25 | #include "hw/virtio/virtio.h" | |
26 | #include "sysemu/kvm.h" | |
cfb42188 EA |
27 | #include "qapi/error.h" |
28 | #include "qemu/error-report.h" | |
22c37a10 EA |
29 | #include "trace.h" |
30 | ||
31 | #include "standard-headers/linux/virtio_ids.h" | |
32 | ||
33 | #include "hw/virtio/virtio-bus.h" | |
34 | #include "hw/virtio/virtio-access.h" | |
35 | #include "hw/virtio/virtio-iommu.h" | |
cfb42188 EA |
36 | #include "hw/pci/pci_bus.h" |
37 | #include "hw/pci/pci.h" | |
22c37a10 EA |
38 | |
39 | /* Max size */ | |
40 | #define VIOMMU_DEFAULT_QUEUE_SIZE 256 | |
1733eebb | 41 | #define VIOMMU_PROBE_SIZE 512 |
22c37a10 | 42 | |
cfb42188 EA |
43 | typedef struct VirtIOIOMMUDomain { |
44 | uint32_t id; | |
45 | GTree *mappings; | |
46 | QLIST_HEAD(, VirtIOIOMMUEndpoint) endpoint_list; | |
47 | } VirtIOIOMMUDomain; | |
48 | ||
49 | typedef struct VirtIOIOMMUEndpoint { | |
50 | uint32_t id; | |
51 | VirtIOIOMMUDomain *domain; | |
31aa323f | 52 | IOMMUMemoryRegion *iommu_mr; |
cfb42188 EA |
53 | QLIST_ENTRY(VirtIOIOMMUEndpoint) next; |
54 | } VirtIOIOMMUEndpoint; | |
55 | ||
56 | typedef struct VirtIOIOMMUInterval { | |
57 | uint64_t low; | |
58 | uint64_t high; | |
59 | } VirtIOIOMMUInterval; | |
60 | ||
fe2cacae EA |
61 | typedef struct VirtIOIOMMUMapping { |
62 | uint64_t phys_addr; | |
63 | uint32_t flags; | |
64 | } VirtIOIOMMUMapping; | |
65 | ||
cfb42188 EA |
66 | static inline uint16_t virtio_iommu_get_bdf(IOMMUDevice *dev) |
67 | { | |
68 | return PCI_BUILD_BDF(pci_bus_num(dev->bus), dev->devfn); | |
69 | } | |
70 | ||
71 | /** | |
72 | * The bus number is used for lookup when SID based operations occur. | |
73 | * In that case we lazily populate the IOMMUPciBus array from the bus hash | |
74 | * table. At the time the IOMMUPciBus is created (iommu_find_add_as), the bus | |
75 | * numbers may not be always initialized yet. | |
76 | */ | |
77 | static IOMMUPciBus *iommu_find_iommu_pcibus(VirtIOIOMMU *s, uint8_t bus_num) | |
78 | { | |
79 | IOMMUPciBus *iommu_pci_bus = s->iommu_pcibus_by_bus_num[bus_num]; | |
80 | ||
81 | if (!iommu_pci_bus) { | |
82 | GHashTableIter iter; | |
83 | ||
84 | g_hash_table_iter_init(&iter, s->as_by_busptr); | |
85 | while (g_hash_table_iter_next(&iter, NULL, (void **)&iommu_pci_bus)) { | |
86 | if (pci_bus_num(iommu_pci_bus->bus) == bus_num) { | |
87 | s->iommu_pcibus_by_bus_num[bus_num] = iommu_pci_bus; | |
88 | return iommu_pci_bus; | |
89 | } | |
90 | } | |
91 | return NULL; | |
92 | } | |
93 | return iommu_pci_bus; | |
94 | } | |
95 | ||
96 | static IOMMUMemoryRegion *virtio_iommu_mr(VirtIOIOMMU *s, uint32_t sid) | |
97 | { | |
98 | uint8_t bus_n, devfn; | |
99 | IOMMUPciBus *iommu_pci_bus; | |
100 | IOMMUDevice *dev; | |
101 | ||
102 | bus_n = PCI_BUS_NUM(sid); | |
103 | iommu_pci_bus = iommu_find_iommu_pcibus(s, bus_n); | |
104 | if (iommu_pci_bus) { | |
bfe7a961 | 105 | devfn = sid & (PCI_DEVFN_MAX - 1); |
cfb42188 EA |
106 | dev = iommu_pci_bus->pbdev[devfn]; |
107 | if (dev) { | |
108 | return &dev->iommu_mr; | |
109 | } | |
110 | } | |
111 | return NULL; | |
112 | } | |
113 | ||
114 | static gint interval_cmp(gconstpointer a, gconstpointer b, gpointer user_data) | |
115 | { | |
116 | VirtIOIOMMUInterval *inta = (VirtIOIOMMUInterval *)a; | |
117 | VirtIOIOMMUInterval *intb = (VirtIOIOMMUInterval *)b; | |
118 | ||
119 | if (inta->high < intb->low) { | |
120 | return -1; | |
121 | } else if (intb->high < inta->low) { | |
122 | return 1; | |
123 | } else { | |
124 | return 0; | |
125 | } | |
126 | } | |
127 | ||
128 | static void virtio_iommu_detach_endpoint_from_domain(VirtIOIOMMUEndpoint *ep) | |
129 | { | |
130 | if (!ep->domain) { | |
131 | return; | |
132 | } | |
133 | QLIST_REMOVE(ep, next); | |
134 | ep->domain = NULL; | |
135 | } | |
136 | ||
137 | static VirtIOIOMMUEndpoint *virtio_iommu_get_endpoint(VirtIOIOMMU *s, | |
138 | uint32_t ep_id) | |
139 | { | |
140 | VirtIOIOMMUEndpoint *ep; | |
31aa323f | 141 | IOMMUMemoryRegion *mr; |
cfb42188 EA |
142 | |
143 | ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id)); | |
144 | if (ep) { | |
145 | return ep; | |
146 | } | |
31aa323f JPB |
147 | mr = virtio_iommu_mr(s, ep_id); |
148 | if (!mr) { | |
cfb42188 EA |
149 | return NULL; |
150 | } | |
151 | ep = g_malloc0(sizeof(*ep)); | |
152 | ep->id = ep_id; | |
31aa323f | 153 | ep->iommu_mr = mr; |
cfb42188 EA |
154 | trace_virtio_iommu_get_endpoint(ep_id); |
155 | g_tree_insert(s->endpoints, GUINT_TO_POINTER(ep_id), ep); | |
156 | return ep; | |
157 | } | |
158 | ||
159 | static void virtio_iommu_put_endpoint(gpointer data) | |
160 | { | |
161 | VirtIOIOMMUEndpoint *ep = (VirtIOIOMMUEndpoint *)data; | |
162 | ||
163 | if (ep->domain) { | |
164 | virtio_iommu_detach_endpoint_from_domain(ep); | |
165 | } | |
166 | ||
167 | trace_virtio_iommu_put_endpoint(ep->id); | |
168 | g_free(ep); | |
169 | } | |
170 | ||
171 | static VirtIOIOMMUDomain *virtio_iommu_get_domain(VirtIOIOMMU *s, | |
172 | uint32_t domain_id) | |
173 | { | |
174 | VirtIOIOMMUDomain *domain; | |
175 | ||
176 | domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id)); | |
177 | if (domain) { | |
178 | return domain; | |
179 | } | |
180 | domain = g_malloc0(sizeof(*domain)); | |
181 | domain->id = domain_id; | |
182 | domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp, | |
183 | NULL, (GDestroyNotify)g_free, | |
184 | (GDestroyNotify)g_free); | |
185 | g_tree_insert(s->domains, GUINT_TO_POINTER(domain_id), domain); | |
186 | QLIST_INIT(&domain->endpoint_list); | |
187 | trace_virtio_iommu_get_domain(domain_id); | |
188 | return domain; | |
189 | } | |
190 | ||
191 | static void virtio_iommu_put_domain(gpointer data) | |
192 | { | |
193 | VirtIOIOMMUDomain *domain = (VirtIOIOMMUDomain *)data; | |
194 | VirtIOIOMMUEndpoint *iter, *tmp; | |
195 | ||
196 | QLIST_FOREACH_SAFE(iter, &domain->endpoint_list, next, tmp) { | |
197 | virtio_iommu_detach_endpoint_from_domain(iter); | |
198 | } | |
199 | g_tree_destroy(domain->mappings); | |
200 | trace_virtio_iommu_put_domain(domain->id); | |
201 | g_free(domain); | |
202 | } | |
203 | ||
204 | static AddressSpace *virtio_iommu_find_add_as(PCIBus *bus, void *opaque, | |
205 | int devfn) | |
206 | { | |
207 | VirtIOIOMMU *s = opaque; | |
208 | IOMMUPciBus *sbus = g_hash_table_lookup(s->as_by_busptr, bus); | |
209 | static uint32_t mr_index; | |
210 | IOMMUDevice *sdev; | |
211 | ||
212 | if (!sbus) { | |
213 | sbus = g_malloc0(sizeof(IOMMUPciBus) + | |
214 | sizeof(IOMMUDevice *) * PCI_DEVFN_MAX); | |
215 | sbus->bus = bus; | |
216 | g_hash_table_insert(s->as_by_busptr, bus, sbus); | |
217 | } | |
218 | ||
219 | sdev = sbus->pbdev[devfn]; | |
220 | if (!sdev) { | |
221 | char *name = g_strdup_printf("%s-%d-%d", | |
222 | TYPE_VIRTIO_IOMMU_MEMORY_REGION, | |
223 | mr_index++, devfn); | |
224 | sdev = sbus->pbdev[devfn] = g_malloc0(sizeof(IOMMUDevice)); | |
225 | ||
226 | sdev->viommu = s; | |
227 | sdev->bus = bus; | |
228 | sdev->devfn = devfn; | |
229 | ||
230 | trace_virtio_iommu_init_iommu_mr(name); | |
231 | ||
232 | memory_region_init_iommu(&sdev->iommu_mr, sizeof(sdev->iommu_mr), | |
233 | TYPE_VIRTIO_IOMMU_MEMORY_REGION, | |
234 | OBJECT(s), name, | |
235 | UINT64_MAX); | |
236 | address_space_init(&sdev->as, | |
237 | MEMORY_REGION(&sdev->iommu_mr), TYPE_VIRTIO_IOMMU); | |
238 | g_free(name); | |
239 | } | |
240 | return &sdev->as; | |
241 | } | |
242 | ||
5442b854 EA |
243 | static int virtio_iommu_attach(VirtIOIOMMU *s, |
244 | struct virtio_iommu_req_attach *req) | |
22c37a10 | 245 | { |
5442b854 EA |
246 | uint32_t domain_id = le32_to_cpu(req->domain); |
247 | uint32_t ep_id = le32_to_cpu(req->endpoint); | |
cfb42188 EA |
248 | VirtIOIOMMUDomain *domain; |
249 | VirtIOIOMMUEndpoint *ep; | |
5442b854 EA |
250 | |
251 | trace_virtio_iommu_attach(domain_id, ep_id); | |
252 | ||
cfb42188 EA |
253 | ep = virtio_iommu_get_endpoint(s, ep_id); |
254 | if (!ep) { | |
255 | return VIRTIO_IOMMU_S_NOENT; | |
256 | } | |
257 | ||
258 | if (ep->domain) { | |
259 | VirtIOIOMMUDomain *previous_domain = ep->domain; | |
260 | /* | |
261 | * the device is already attached to a domain, | |
262 | * detach it first | |
263 | */ | |
264 | virtio_iommu_detach_endpoint_from_domain(ep); | |
265 | if (QLIST_EMPTY(&previous_domain->endpoint_list)) { | |
266 | g_tree_remove(s->domains, GUINT_TO_POINTER(previous_domain->id)); | |
267 | } | |
268 | } | |
269 | ||
270 | domain = virtio_iommu_get_domain(s, domain_id); | |
271 | QLIST_INSERT_HEAD(&domain->endpoint_list, ep, next); | |
272 | ||
273 | ep->domain = domain; | |
274 | ||
275 | return VIRTIO_IOMMU_S_OK; | |
22c37a10 | 276 | } |
5442b854 EA |
277 | |
278 | static int virtio_iommu_detach(VirtIOIOMMU *s, | |
279 | struct virtio_iommu_req_detach *req) | |
22c37a10 | 280 | { |
5442b854 EA |
281 | uint32_t domain_id = le32_to_cpu(req->domain); |
282 | uint32_t ep_id = le32_to_cpu(req->endpoint); | |
cfb42188 EA |
283 | VirtIOIOMMUDomain *domain; |
284 | VirtIOIOMMUEndpoint *ep; | |
5442b854 EA |
285 | |
286 | trace_virtio_iommu_detach(domain_id, ep_id); | |
287 | ||
cfb42188 EA |
288 | ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(ep_id)); |
289 | if (!ep) { | |
290 | return VIRTIO_IOMMU_S_NOENT; | |
291 | } | |
292 | ||
293 | domain = ep->domain; | |
294 | ||
295 | if (!domain || domain->id != domain_id) { | |
296 | return VIRTIO_IOMMU_S_INVAL; | |
297 | } | |
298 | ||
299 | virtio_iommu_detach_endpoint_from_domain(ep); | |
300 | ||
301 | if (QLIST_EMPTY(&domain->endpoint_list)) { | |
302 | g_tree_remove(s->domains, GUINT_TO_POINTER(domain->id)); | |
303 | } | |
304 | return VIRTIO_IOMMU_S_OK; | |
22c37a10 | 305 | } |
5442b854 EA |
306 | |
307 | static int virtio_iommu_map(VirtIOIOMMU *s, | |
308 | struct virtio_iommu_req_map *req) | |
22c37a10 | 309 | { |
5442b854 EA |
310 | uint32_t domain_id = le32_to_cpu(req->domain); |
311 | uint64_t phys_start = le64_to_cpu(req->phys_start); | |
312 | uint64_t virt_start = le64_to_cpu(req->virt_start); | |
313 | uint64_t virt_end = le64_to_cpu(req->virt_end); | |
314 | uint32_t flags = le32_to_cpu(req->flags); | |
fe2cacae EA |
315 | VirtIOIOMMUDomain *domain; |
316 | VirtIOIOMMUInterval *interval; | |
317 | VirtIOIOMMUMapping *mapping; | |
318 | ||
319 | if (flags & ~VIRTIO_IOMMU_MAP_F_MASK) { | |
320 | return VIRTIO_IOMMU_S_INVAL; | |
321 | } | |
322 | ||
323 | domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id)); | |
324 | if (!domain) { | |
325 | return VIRTIO_IOMMU_S_NOENT; | |
326 | } | |
327 | ||
328 | interval = g_malloc0(sizeof(*interval)); | |
329 | ||
330 | interval->low = virt_start; | |
331 | interval->high = virt_end; | |
332 | ||
333 | mapping = g_tree_lookup(domain->mappings, (gpointer)interval); | |
334 | if (mapping) { | |
335 | g_free(interval); | |
336 | return VIRTIO_IOMMU_S_INVAL; | |
337 | } | |
5442b854 EA |
338 | |
339 | trace_virtio_iommu_map(domain_id, virt_start, virt_end, phys_start, flags); | |
340 | ||
fe2cacae EA |
341 | mapping = g_malloc0(sizeof(*mapping)); |
342 | mapping->phys_addr = phys_start; | |
343 | mapping->flags = flags; | |
344 | ||
345 | g_tree_insert(domain->mappings, interval, mapping); | |
346 | ||
347 | return VIRTIO_IOMMU_S_OK; | |
22c37a10 | 348 | } |
5442b854 EA |
349 | |
350 | static int virtio_iommu_unmap(VirtIOIOMMU *s, | |
351 | struct virtio_iommu_req_unmap *req) | |
22c37a10 | 352 | { |
5442b854 EA |
353 | uint32_t domain_id = le32_to_cpu(req->domain); |
354 | uint64_t virt_start = le64_to_cpu(req->virt_start); | |
355 | uint64_t virt_end = le64_to_cpu(req->virt_end); | |
fe2cacae EA |
356 | VirtIOIOMMUMapping *iter_val; |
357 | VirtIOIOMMUInterval interval, *iter_key; | |
358 | VirtIOIOMMUDomain *domain; | |
359 | int ret = VIRTIO_IOMMU_S_OK; | |
5442b854 EA |
360 | |
361 | trace_virtio_iommu_unmap(domain_id, virt_start, virt_end); | |
362 | ||
fe2cacae EA |
363 | domain = g_tree_lookup(s->domains, GUINT_TO_POINTER(domain_id)); |
364 | if (!domain) { | |
365 | return VIRTIO_IOMMU_S_NOENT; | |
366 | } | |
367 | interval.low = virt_start; | |
368 | interval.high = virt_end; | |
369 | ||
370 | while (g_tree_lookup_extended(domain->mappings, &interval, | |
371 | (void **)&iter_key, (void**)&iter_val)) { | |
372 | uint64_t current_low = iter_key->low; | |
373 | uint64_t current_high = iter_key->high; | |
374 | ||
375 | if (interval.low <= current_low && interval.high >= current_high) { | |
376 | g_tree_remove(domain->mappings, iter_key); | |
377 | trace_virtio_iommu_unmap_done(domain_id, current_low, current_high); | |
378 | } else { | |
379 | ret = VIRTIO_IOMMU_S_RANGE; | |
380 | break; | |
381 | } | |
382 | } | |
383 | return ret; | |
22c37a10 EA |
384 | } |
385 | ||
1733eebb EA |
386 | static ssize_t virtio_iommu_fill_resv_mem_prop(VirtIOIOMMU *s, uint32_t ep, |
387 | uint8_t *buf, size_t free) | |
388 | { | |
389 | struct virtio_iommu_probe_resv_mem prop = {}; | |
390 | size_t size = sizeof(prop), length = size - sizeof(prop.head), total; | |
391 | int i; | |
392 | ||
393 | total = size * s->nb_reserved_regions; | |
394 | ||
395 | if (total > free) { | |
396 | return -ENOSPC; | |
397 | } | |
398 | ||
399 | for (i = 0; i < s->nb_reserved_regions; i++) { | |
400 | unsigned subtype = s->reserved_regions[i].type; | |
401 | ||
402 | assert(subtype == VIRTIO_IOMMU_RESV_MEM_T_RESERVED || | |
403 | subtype == VIRTIO_IOMMU_RESV_MEM_T_MSI); | |
404 | prop.head.type = cpu_to_le16(VIRTIO_IOMMU_PROBE_T_RESV_MEM); | |
405 | prop.head.length = cpu_to_le16(length); | |
406 | prop.subtype = subtype; | |
407 | prop.start = cpu_to_le64(s->reserved_regions[i].low); | |
408 | prop.end = cpu_to_le64(s->reserved_regions[i].high); | |
409 | ||
410 | memcpy(buf, &prop, size); | |
411 | ||
412 | trace_virtio_iommu_fill_resv_property(ep, prop.subtype, | |
413 | prop.start, prop.end); | |
414 | buf += size; | |
415 | } | |
416 | return total; | |
417 | } | |
418 | ||
419 | /** | |
420 | * virtio_iommu_probe - Fill the probe request buffer with | |
421 | * the properties the device is able to return | |
422 | */ | |
423 | static int virtio_iommu_probe(VirtIOIOMMU *s, | |
424 | struct virtio_iommu_req_probe *req, | |
425 | uint8_t *buf) | |
426 | { | |
427 | uint32_t ep_id = le32_to_cpu(req->endpoint); | |
428 | size_t free = VIOMMU_PROBE_SIZE; | |
429 | ssize_t count; | |
430 | ||
431 | if (!virtio_iommu_mr(s, ep_id)) { | |
432 | return VIRTIO_IOMMU_S_NOENT; | |
433 | } | |
434 | ||
435 | count = virtio_iommu_fill_resv_mem_prop(s, ep_id, buf, free); | |
436 | if (count < 0) { | |
437 | return VIRTIO_IOMMU_S_INVAL; | |
438 | } | |
439 | buf += count; | |
440 | free -= count; | |
441 | ||
442 | return VIRTIO_IOMMU_S_OK; | |
443 | } | |
444 | ||
5442b854 EA |
445 | static int virtio_iommu_iov_to_req(struct iovec *iov, |
446 | unsigned int iov_cnt, | |
447 | void *req, size_t req_sz) | |
448 | { | |
449 | size_t sz, payload_sz = req_sz - sizeof(struct virtio_iommu_req_tail); | |
450 | ||
451 | sz = iov_to_buf(iov, iov_cnt, 0, req, payload_sz); | |
452 | if (unlikely(sz != payload_sz)) { | |
453 | return VIRTIO_IOMMU_S_INVAL; | |
454 | } | |
455 | return 0; | |
456 | } | |
457 | ||
458 | #define virtio_iommu_handle_req(__req) \ | |
459 | static int virtio_iommu_handle_ ## __req(VirtIOIOMMU *s, \ | |
460 | struct iovec *iov, \ | |
461 | unsigned int iov_cnt) \ | |
462 | { \ | |
463 | struct virtio_iommu_req_ ## __req req; \ | |
464 | int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); \ | |
465 | \ | |
466 | return ret ? ret : virtio_iommu_ ## __req(s, &req); \ | |
467 | } | |
468 | ||
469 | virtio_iommu_handle_req(attach) | |
470 | virtio_iommu_handle_req(detach) | |
471 | virtio_iommu_handle_req(map) | |
472 | virtio_iommu_handle_req(unmap) | |
473 | ||
1733eebb EA |
474 | static int virtio_iommu_handle_probe(VirtIOIOMMU *s, |
475 | struct iovec *iov, | |
476 | unsigned int iov_cnt, | |
477 | uint8_t *buf) | |
478 | { | |
479 | struct virtio_iommu_req_probe req; | |
480 | int ret = virtio_iommu_iov_to_req(iov, iov_cnt, &req, sizeof(req)); | |
481 | ||
482 | return ret ? ret : virtio_iommu_probe(s, &req, buf); | |
483 | } | |
484 | ||
22c37a10 EA |
485 | static void virtio_iommu_handle_command(VirtIODevice *vdev, VirtQueue *vq) |
486 | { | |
487 | VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); | |
488 | struct virtio_iommu_req_head head; | |
489 | struct virtio_iommu_req_tail tail = {}; | |
1733eebb | 490 | size_t output_size = sizeof(tail), sz; |
22c37a10 EA |
491 | VirtQueueElement *elem; |
492 | unsigned int iov_cnt; | |
493 | struct iovec *iov; | |
1733eebb | 494 | void *buf = NULL; |
22c37a10 EA |
495 | |
496 | for (;;) { | |
497 | elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); | |
498 | if (!elem) { | |
499 | return; | |
500 | } | |
501 | ||
502 | if (iov_size(elem->in_sg, elem->in_num) < sizeof(tail) || | |
503 | iov_size(elem->out_sg, elem->out_num) < sizeof(head)) { | |
504 | virtio_error(vdev, "virtio-iommu bad head/tail size"); | |
505 | virtqueue_detach_element(vq, elem, 0); | |
506 | g_free(elem); | |
507 | break; | |
508 | } | |
509 | ||
510 | iov_cnt = elem->out_num; | |
511 | iov = elem->out_sg; | |
512 | sz = iov_to_buf(iov, iov_cnt, 0, &head, sizeof(head)); | |
513 | if (unlikely(sz != sizeof(head))) { | |
514 | tail.status = VIRTIO_IOMMU_S_DEVERR; | |
515 | goto out; | |
516 | } | |
517 | qemu_mutex_lock(&s->mutex); | |
518 | switch (head.type) { | |
519 | case VIRTIO_IOMMU_T_ATTACH: | |
520 | tail.status = virtio_iommu_handle_attach(s, iov, iov_cnt); | |
521 | break; | |
522 | case VIRTIO_IOMMU_T_DETACH: | |
523 | tail.status = virtio_iommu_handle_detach(s, iov, iov_cnt); | |
524 | break; | |
525 | case VIRTIO_IOMMU_T_MAP: | |
526 | tail.status = virtio_iommu_handle_map(s, iov, iov_cnt); | |
527 | break; | |
528 | case VIRTIO_IOMMU_T_UNMAP: | |
529 | tail.status = virtio_iommu_handle_unmap(s, iov, iov_cnt); | |
530 | break; | |
1733eebb EA |
531 | case VIRTIO_IOMMU_T_PROBE: |
532 | { | |
533 | struct virtio_iommu_req_tail *ptail; | |
534 | ||
535 | output_size = s->config.probe_size + sizeof(tail); | |
536 | buf = g_malloc0(output_size); | |
537 | ||
538 | ptail = (struct virtio_iommu_req_tail *) | |
539 | (buf + s->config.probe_size); | |
540 | ptail->status = virtio_iommu_handle_probe(s, iov, iov_cnt, buf); | |
e95e05da | 541 | break; |
1733eebb | 542 | } |
22c37a10 EA |
543 | default: |
544 | tail.status = VIRTIO_IOMMU_S_UNSUPP; | |
545 | } | |
546 | qemu_mutex_unlock(&s->mutex); | |
547 | ||
548 | out: | |
549 | sz = iov_from_buf(elem->in_sg, elem->in_num, 0, | |
1733eebb EA |
550 | buf ? buf : &tail, output_size); |
551 | assert(sz == output_size); | |
22c37a10 | 552 | |
1733eebb | 553 | virtqueue_push(vq, elem, sz); |
22c37a10 EA |
554 | virtio_notify(vdev, vq); |
555 | g_free(elem); | |
1733eebb | 556 | g_free(buf); |
22c37a10 EA |
557 | } |
558 | } | |
559 | ||
a7c1da8a EA |
560 | static void virtio_iommu_report_fault(VirtIOIOMMU *viommu, uint8_t reason, |
561 | int flags, uint32_t endpoint, | |
562 | uint64_t address) | |
563 | { | |
564 | VirtIODevice *vdev = &viommu->parent_obj; | |
565 | VirtQueue *vq = viommu->event_vq; | |
566 | struct virtio_iommu_fault fault; | |
567 | VirtQueueElement *elem; | |
568 | size_t sz; | |
569 | ||
570 | memset(&fault, 0, sizeof(fault)); | |
571 | fault.reason = reason; | |
572 | fault.flags = cpu_to_le32(flags); | |
573 | fault.endpoint = cpu_to_le32(endpoint); | |
574 | fault.address = cpu_to_le64(address); | |
575 | ||
576 | elem = virtqueue_pop(vq, sizeof(VirtQueueElement)); | |
577 | ||
578 | if (!elem) { | |
579 | error_report_once( | |
580 | "no buffer available in event queue to report event"); | |
581 | return; | |
582 | } | |
583 | ||
584 | if (iov_size(elem->in_sg, elem->in_num) < sizeof(fault)) { | |
585 | virtio_error(vdev, "error buffer of wrong size"); | |
586 | virtqueue_detach_element(vq, elem, 0); | |
587 | g_free(elem); | |
588 | return; | |
589 | } | |
590 | ||
591 | sz = iov_from_buf(elem->in_sg, elem->in_num, 0, | |
592 | &fault, sizeof(fault)); | |
593 | assert(sz == sizeof(fault)); | |
594 | ||
595 | trace_virtio_iommu_report_fault(reason, flags, endpoint, address); | |
596 | virtqueue_push(vq, elem, sz); | |
597 | virtio_notify(vdev, vq); | |
598 | g_free(elem); | |
599 | ||
600 | } | |
601 | ||
cfb42188 EA |
602 | static IOMMUTLBEntry virtio_iommu_translate(IOMMUMemoryRegion *mr, hwaddr addr, |
603 | IOMMUAccessFlags flag, | |
604 | int iommu_idx) | |
605 | { | |
606 | IOMMUDevice *sdev = container_of(mr, IOMMUDevice, iommu_mr); | |
ed8449b3 EA |
607 | VirtIOIOMMUInterval interval, *mapping_key; |
608 | VirtIOIOMMUMapping *mapping_value; | |
609 | VirtIOIOMMU *s = sdev->viommu; | |
a7c1da8a | 610 | bool read_fault, write_fault; |
ed8449b3 | 611 | VirtIOIOMMUEndpoint *ep; |
a7c1da8a | 612 | uint32_t sid, flags; |
ed8449b3 | 613 | bool bypass_allowed; |
ed8449b3 | 614 | bool found; |
0f5a3092 | 615 | int i; |
ed8449b3 EA |
616 | |
617 | interval.low = addr; | |
618 | interval.high = addr + 1; | |
cfb42188 EA |
619 | |
620 | IOMMUTLBEntry entry = { | |
621 | .target_as = &address_space_memory, | |
622 | .iova = addr, | |
623 | .translated_addr = addr, | |
ed8449b3 | 624 | .addr_mask = (1 << ctz32(s->config.page_size_mask)) - 1, |
cfb42188 EA |
625 | .perm = IOMMU_NONE, |
626 | }; | |
627 | ||
ed8449b3 EA |
628 | bypass_allowed = virtio_vdev_has_feature(&s->parent_obj, |
629 | VIRTIO_IOMMU_F_BYPASS); | |
630 | ||
cfb42188 EA |
631 | sid = virtio_iommu_get_bdf(sdev); |
632 | ||
633 | trace_virtio_iommu_translate(mr->parent_obj.name, sid, addr, flag); | |
ed8449b3 EA |
634 | qemu_mutex_lock(&s->mutex); |
635 | ||
636 | ep = g_tree_lookup(s->endpoints, GUINT_TO_POINTER(sid)); | |
637 | if (!ep) { | |
638 | if (!bypass_allowed) { | |
639 | error_report_once("%s sid=%d is not known!!", __func__, sid); | |
a7c1da8a EA |
640 | virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_UNKNOWN, |
641 | VIRTIO_IOMMU_FAULT_F_ADDRESS, | |
642 | sid, addr); | |
ed8449b3 EA |
643 | } else { |
644 | entry.perm = flag; | |
645 | } | |
646 | goto unlock; | |
647 | } | |
648 | ||
0f5a3092 EA |
649 | for (i = 0; i < s->nb_reserved_regions; i++) { |
650 | ReservedRegion *reg = &s->reserved_regions[i]; | |
651 | ||
652 | if (addr >= reg->low && addr <= reg->high) { | |
653 | switch (reg->type) { | |
654 | case VIRTIO_IOMMU_RESV_MEM_T_MSI: | |
655 | entry.perm = flag; | |
656 | break; | |
657 | case VIRTIO_IOMMU_RESV_MEM_T_RESERVED: | |
658 | default: | |
659 | virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, | |
660 | VIRTIO_IOMMU_FAULT_F_ADDRESS, | |
661 | sid, addr); | |
662 | break; | |
663 | } | |
664 | goto unlock; | |
665 | } | |
666 | } | |
667 | ||
ed8449b3 EA |
668 | if (!ep->domain) { |
669 | if (!bypass_allowed) { | |
670 | error_report_once("%s %02x:%02x.%01x not attached to any domain", | |
671 | __func__, PCI_BUS_NUM(sid), | |
672 | PCI_SLOT(sid), PCI_FUNC(sid)); | |
a7c1da8a EA |
673 | virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_DOMAIN, |
674 | VIRTIO_IOMMU_FAULT_F_ADDRESS, | |
675 | sid, addr); | |
ed8449b3 EA |
676 | } else { |
677 | entry.perm = flag; | |
678 | } | |
679 | goto unlock; | |
680 | } | |
681 | ||
682 | found = g_tree_lookup_extended(ep->domain->mappings, (gpointer)(&interval), | |
683 | (void **)&mapping_key, | |
684 | (void **)&mapping_value); | |
685 | if (!found) { | |
686 | error_report_once("%s no mapping for 0x%"PRIx64" for sid=%d", | |
687 | __func__, addr, sid); | |
a7c1da8a EA |
688 | virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, |
689 | VIRTIO_IOMMU_FAULT_F_ADDRESS, | |
690 | sid, addr); | |
ed8449b3 EA |
691 | goto unlock; |
692 | } | |
693 | ||
a7c1da8a EA |
694 | read_fault = (flag & IOMMU_RO) && |
695 | !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_READ); | |
696 | write_fault = (flag & IOMMU_WO) && | |
697 | !(mapping_value->flags & VIRTIO_IOMMU_MAP_F_WRITE); | |
698 | ||
699 | flags = read_fault ? VIRTIO_IOMMU_FAULT_F_READ : 0; | |
700 | flags |= write_fault ? VIRTIO_IOMMU_FAULT_F_WRITE : 0; | |
701 | if (flags) { | |
ed8449b3 EA |
702 | error_report_once("%s permission error on 0x%"PRIx64"(%d): allowed=%d", |
703 | __func__, addr, flag, mapping_value->flags); | |
a7c1da8a EA |
704 | flags |= VIRTIO_IOMMU_FAULT_F_ADDRESS; |
705 | virtio_iommu_report_fault(s, VIRTIO_IOMMU_FAULT_R_MAPPING, | |
706 | flags | VIRTIO_IOMMU_FAULT_F_ADDRESS, | |
707 | sid, addr); | |
ed8449b3 EA |
708 | goto unlock; |
709 | } | |
710 | entry.translated_addr = addr - mapping_key->low + mapping_value->phys_addr; | |
711 | entry.perm = flag; | |
712 | trace_virtio_iommu_translate_out(addr, entry.translated_addr, sid); | |
713 | ||
714 | unlock: | |
715 | qemu_mutex_unlock(&s->mutex); | |
cfb42188 EA |
716 | return entry; |
717 | } | |
718 | ||
22c37a10 EA |
719 | static void virtio_iommu_get_config(VirtIODevice *vdev, uint8_t *config_data) |
720 | { | |
721 | VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev); | |
722 | struct virtio_iommu_config *config = &dev->config; | |
723 | ||
724 | trace_virtio_iommu_get_config(config->page_size_mask, | |
725 | config->input_range.start, | |
726 | config->input_range.end, | |
727 | config->domain_range.end, | |
728 | config->probe_size); | |
729 | memcpy(config_data, &dev->config, sizeof(struct virtio_iommu_config)); | |
730 | } | |
731 | ||
732 | static void virtio_iommu_set_config(VirtIODevice *vdev, | |
733 | const uint8_t *config_data) | |
734 | { | |
735 | struct virtio_iommu_config config; | |
736 | ||
737 | memcpy(&config, config_data, sizeof(struct virtio_iommu_config)); | |
738 | trace_virtio_iommu_set_config(config.page_size_mask, | |
739 | config.input_range.start, | |
740 | config.input_range.end, | |
741 | config.domain_range.end, | |
742 | config.probe_size); | |
743 | } | |
744 | ||
745 | static uint64_t virtio_iommu_get_features(VirtIODevice *vdev, uint64_t f, | |
746 | Error **errp) | |
747 | { | |
748 | VirtIOIOMMU *dev = VIRTIO_IOMMU(vdev); | |
749 | ||
750 | f |= dev->features; | |
751 | trace_virtio_iommu_get_features(f); | |
752 | return f; | |
753 | } | |
754 | ||
cfb42188 EA |
755 | static gint int_cmp(gconstpointer a, gconstpointer b, gpointer user_data) |
756 | { | |
757 | guint ua = GPOINTER_TO_UINT(a); | |
758 | guint ub = GPOINTER_TO_UINT(b); | |
759 | return (ua > ub) - (ua < ub); | |
760 | } | |
761 | ||
22c37a10 EA |
762 | static void virtio_iommu_device_realize(DeviceState *dev, Error **errp) |
763 | { | |
764 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); | |
765 | VirtIOIOMMU *s = VIRTIO_IOMMU(dev); | |
766 | ||
767 | virtio_init(vdev, "virtio-iommu", VIRTIO_ID_IOMMU, | |
768 | sizeof(struct virtio_iommu_config)); | |
769 | ||
cfb42188 EA |
770 | memset(s->iommu_pcibus_by_bus_num, 0, sizeof(s->iommu_pcibus_by_bus_num)); |
771 | ||
22c37a10 EA |
772 | s->req_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, |
773 | virtio_iommu_handle_command); | |
774 | s->event_vq = virtio_add_queue(vdev, VIOMMU_DEFAULT_QUEUE_SIZE, NULL); | |
775 | ||
776 | s->config.page_size_mask = TARGET_PAGE_MASK; | |
777 | s->config.input_range.end = -1UL; | |
778 | s->config.domain_range.end = 32; | |
1733eebb | 779 | s->config.probe_size = VIOMMU_PROBE_SIZE; |
22c37a10 EA |
780 | |
781 | virtio_add_feature(&s->features, VIRTIO_RING_F_EVENT_IDX); | |
782 | virtio_add_feature(&s->features, VIRTIO_RING_F_INDIRECT_DESC); | |
783 | virtio_add_feature(&s->features, VIRTIO_F_VERSION_1); | |
784 | virtio_add_feature(&s->features, VIRTIO_IOMMU_F_INPUT_RANGE); | |
785 | virtio_add_feature(&s->features, VIRTIO_IOMMU_F_DOMAIN_RANGE); | |
786 | virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MAP_UNMAP); | |
787 | virtio_add_feature(&s->features, VIRTIO_IOMMU_F_BYPASS); | |
788 | virtio_add_feature(&s->features, VIRTIO_IOMMU_F_MMIO); | |
1733eebb | 789 | virtio_add_feature(&s->features, VIRTIO_IOMMU_F_PROBE); |
22c37a10 EA |
790 | |
791 | qemu_mutex_init(&s->mutex); | |
cfb42188 EA |
792 | |
793 | s->as_by_busptr = g_hash_table_new_full(NULL, NULL, NULL, g_free); | |
794 | ||
795 | if (s->primary_bus) { | |
796 | pci_setup_iommu(s->primary_bus, virtio_iommu_find_add_as, s); | |
797 | } else { | |
798 | error_setg(errp, "VIRTIO-IOMMU is not attached to any PCI bus!"); | |
799 | } | |
22c37a10 EA |
800 | } |
801 | ||
b69c3c21 | 802 | static void virtio_iommu_device_unrealize(DeviceState *dev) |
22c37a10 EA |
803 | { |
804 | VirtIODevice *vdev = VIRTIO_DEVICE(dev); | |
cfb42188 EA |
805 | VirtIOIOMMU *s = VIRTIO_IOMMU(dev); |
806 | ||
de38ed30 | 807 | g_hash_table_destroy(s->as_by_busptr); |
59bf980d EA |
808 | if (s->domains) { |
809 | g_tree_destroy(s->domains); | |
810 | } | |
811 | if (s->endpoints) { | |
812 | g_tree_destroy(s->endpoints); | |
813 | } | |
22c37a10 | 814 | |
de38ed30 PN |
815 | virtio_delete_queue(s->req_vq); |
816 | virtio_delete_queue(s->event_vq); | |
22c37a10 EA |
817 | virtio_cleanup(vdev); |
818 | } | |
819 | ||
820 | static void virtio_iommu_device_reset(VirtIODevice *vdev) | |
821 | { | |
cfb42188 EA |
822 | VirtIOIOMMU *s = VIRTIO_IOMMU(vdev); |
823 | ||
22c37a10 | 824 | trace_virtio_iommu_device_reset(); |
cfb42188 EA |
825 | |
826 | if (s->domains) { | |
827 | g_tree_destroy(s->domains); | |
828 | } | |
829 | if (s->endpoints) { | |
830 | g_tree_destroy(s->endpoints); | |
831 | } | |
832 | s->domains = g_tree_new_full((GCompareDataFunc)int_cmp, | |
833 | NULL, NULL, virtio_iommu_put_domain); | |
834 | s->endpoints = g_tree_new_full((GCompareDataFunc)int_cmp, | |
835 | NULL, NULL, virtio_iommu_put_endpoint); | |
22c37a10 EA |
836 | } |
837 | ||
838 | static void virtio_iommu_set_status(VirtIODevice *vdev, uint8_t status) | |
839 | { | |
840 | trace_virtio_iommu_device_status(status); | |
841 | } | |
842 | ||
843 | static void virtio_iommu_instance_init(Object *obj) | |
844 | { | |
845 | } | |
846 | ||
bd0ab870 EA |
847 | #define VMSTATE_INTERVAL \ |
848 | { \ | |
849 | .name = "interval", \ | |
850 | .version_id = 1, \ | |
851 | .minimum_version_id = 1, \ | |
852 | .fields = (VMStateField[]) { \ | |
853 | VMSTATE_UINT64(low, VirtIOIOMMUInterval), \ | |
854 | VMSTATE_UINT64(high, VirtIOIOMMUInterval), \ | |
855 | VMSTATE_END_OF_LIST() \ | |
856 | } \ | |
857 | } | |
858 | ||
859 | #define VMSTATE_MAPPING \ | |
860 | { \ | |
861 | .name = "mapping", \ | |
862 | .version_id = 1, \ | |
863 | .minimum_version_id = 1, \ | |
864 | .fields = (VMStateField[]) { \ | |
865 | VMSTATE_UINT64(phys_addr, VirtIOIOMMUMapping),\ | |
866 | VMSTATE_UINT32(flags, VirtIOIOMMUMapping), \ | |
867 | VMSTATE_END_OF_LIST() \ | |
868 | }, \ | |
869 | } | |
870 | ||
871 | static const VMStateDescription vmstate_interval_mapping[2] = { | |
872 | VMSTATE_MAPPING, /* value */ | |
873 | VMSTATE_INTERVAL /* key */ | |
874 | }; | |
875 | ||
876 | static int domain_preload(void *opaque) | |
877 | { | |
878 | VirtIOIOMMUDomain *domain = opaque; | |
879 | ||
880 | domain->mappings = g_tree_new_full((GCompareDataFunc)interval_cmp, | |
881 | NULL, g_free, g_free); | |
882 | return 0; | |
883 | } | |
884 | ||
885 | static const VMStateDescription vmstate_endpoint = { | |
886 | .name = "endpoint", | |
887 | .version_id = 1, | |
888 | .minimum_version_id = 1, | |
889 | .fields = (VMStateField[]) { | |
890 | VMSTATE_UINT32(id, VirtIOIOMMUEndpoint), | |
891 | VMSTATE_END_OF_LIST() | |
892 | } | |
893 | }; | |
894 | ||
895 | static const VMStateDescription vmstate_domain = { | |
896 | .name = "domain", | |
897 | .version_id = 1, | |
898 | .minimum_version_id = 1, | |
899 | .pre_load = domain_preload, | |
900 | .fields = (VMStateField[]) { | |
901 | VMSTATE_UINT32(id, VirtIOIOMMUDomain), | |
902 | VMSTATE_GTREE_V(mappings, VirtIOIOMMUDomain, 1, | |
903 | vmstate_interval_mapping, | |
904 | VirtIOIOMMUInterval, VirtIOIOMMUMapping), | |
905 | VMSTATE_QLIST_V(endpoint_list, VirtIOIOMMUDomain, 1, | |
906 | vmstate_endpoint, VirtIOIOMMUEndpoint, next), | |
907 | VMSTATE_END_OF_LIST() | |
908 | } | |
909 | }; | |
910 | ||
911 | static gboolean reconstruct_endpoints(gpointer key, gpointer value, | |
912 | gpointer data) | |
913 | { | |
914 | VirtIOIOMMU *s = (VirtIOIOMMU *)data; | |
915 | VirtIOIOMMUDomain *d = (VirtIOIOMMUDomain *)value; | |
916 | VirtIOIOMMUEndpoint *iter; | |
31aa323f | 917 | IOMMUMemoryRegion *mr; |
bd0ab870 EA |
918 | |
919 | QLIST_FOREACH(iter, &d->endpoint_list, next) { | |
31aa323f JPB |
920 | mr = virtio_iommu_mr(s, iter->id); |
921 | assert(mr); | |
922 | ||
bd0ab870 | 923 | iter->domain = d; |
31aa323f | 924 | iter->iommu_mr = mr; |
bd0ab870 EA |
925 | g_tree_insert(s->endpoints, GUINT_TO_POINTER(iter->id), iter); |
926 | } | |
927 | return false; /* continue the domain traversal */ | |
928 | } | |
929 | ||
930 | static int iommu_post_load(void *opaque, int version_id) | |
931 | { | |
932 | VirtIOIOMMU *s = opaque; | |
933 | ||
934 | g_tree_foreach(s->domains, reconstruct_endpoints, s); | |
935 | return 0; | |
936 | } | |
937 | ||
938 | static const VMStateDescription vmstate_virtio_iommu_device = { | |
939 | .name = "virtio-iommu-device", | |
940 | .minimum_version_id = 1, | |
941 | .version_id = 1, | |
942 | .post_load = iommu_post_load, | |
943 | .fields = (VMStateField[]) { | |
944 | VMSTATE_GTREE_DIRECT_KEY_V(domains, VirtIOIOMMU, 1, | |
945 | &vmstate_domain, VirtIOIOMMUDomain), | |
946 | VMSTATE_END_OF_LIST() | |
947 | }, | |
948 | }; | |
949 | ||
22c37a10 EA |
950 | static const VMStateDescription vmstate_virtio_iommu = { |
951 | .name = "virtio-iommu", | |
952 | .minimum_version_id = 1, | |
bd0ab870 | 953 | .priority = MIG_PRI_IOMMU, |
22c37a10 EA |
954 | .version_id = 1, |
955 | .fields = (VMStateField[]) { | |
956 | VMSTATE_VIRTIO_DEVICE, | |
957 | VMSTATE_END_OF_LIST() | |
958 | }, | |
959 | }; | |
960 | ||
961 | static Property virtio_iommu_properties[] = { | |
962 | DEFINE_PROP_LINK("primary-bus", VirtIOIOMMU, primary_bus, "PCI", PCIBus *), | |
963 | DEFINE_PROP_END_OF_LIST(), | |
964 | }; | |
965 | ||
966 | static void virtio_iommu_class_init(ObjectClass *klass, void *data) | |
967 | { | |
968 | DeviceClass *dc = DEVICE_CLASS(klass); | |
969 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_CLASS(klass); | |
970 | ||
971 | device_class_set_props(dc, virtio_iommu_properties); | |
972 | dc->vmsd = &vmstate_virtio_iommu; | |
973 | ||
974 | set_bit(DEVICE_CATEGORY_MISC, dc->categories); | |
975 | vdc->realize = virtio_iommu_device_realize; | |
976 | vdc->unrealize = virtio_iommu_device_unrealize; | |
977 | vdc->reset = virtio_iommu_device_reset; | |
978 | vdc->get_config = virtio_iommu_get_config; | |
979 | vdc->set_config = virtio_iommu_set_config; | |
980 | vdc->get_features = virtio_iommu_get_features; | |
981 | vdc->set_status = virtio_iommu_set_status; | |
982 | vdc->vmsd = &vmstate_virtio_iommu_device; | |
983 | } | |
984 | ||
cfb42188 EA |
985 | static void virtio_iommu_memory_region_class_init(ObjectClass *klass, |
986 | void *data) | |
987 | { | |
988 | IOMMUMemoryRegionClass *imrc = IOMMU_MEMORY_REGION_CLASS(klass); | |
989 | ||
990 | imrc->translate = virtio_iommu_translate; | |
991 | } | |
992 | ||
22c37a10 EA |
993 | static const TypeInfo virtio_iommu_info = { |
994 | .name = TYPE_VIRTIO_IOMMU, | |
995 | .parent = TYPE_VIRTIO_DEVICE, | |
996 | .instance_size = sizeof(VirtIOIOMMU), | |
997 | .instance_init = virtio_iommu_instance_init, | |
998 | .class_init = virtio_iommu_class_init, | |
999 | }; | |
1000 | ||
cfb42188 EA |
1001 | static const TypeInfo virtio_iommu_memory_region_info = { |
1002 | .parent = TYPE_IOMMU_MEMORY_REGION, | |
1003 | .name = TYPE_VIRTIO_IOMMU_MEMORY_REGION, | |
1004 | .class_init = virtio_iommu_memory_region_class_init, | |
1005 | }; | |
1006 | ||
22c37a10 EA |
1007 | static void virtio_register_types(void) |
1008 | { | |
1009 | type_register_static(&virtio_iommu_info); | |
cfb42188 | 1010 | type_register_static(&virtio_iommu_memory_region_info); |
22c37a10 EA |
1011 | } |
1012 | ||
1013 | type_init(virtio_register_types) |