]>
Commit | Line | Data |
---|---|---|
53c25cea PB |
1 | /* |
2 | * Virtio PCI Bindings | |
3 | * | |
4 | * Copyright IBM, Corp. 2007 | |
5 | * Copyright (c) 2009 CodeSourcery | |
6 | * | |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * Paul Brook <paul@codesourcery.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
53c25cea PB |
16 | */ |
17 | ||
9b8bfe21 | 18 | #include "qemu/osdep.h" |
53c25cea | 19 | |
062c08d1 | 20 | #include "exec/memop.h" |
cbbe4f50 | 21 | #include "standard-headers/linux/virtio_pci.h" |
1436f32a | 22 | #include "hw/boards.h" |
0d09e41a | 23 | #include "hw/virtio/virtio.h" |
ca77ee28 | 24 | #include "migration/qemu-file-types.h" |
83c9f4ca | 25 | #include "hw/pci/pci.h" |
b0e5196a | 26 | #include "hw/pci/pci_bus.h" |
a27bd6c7 | 27 | #include "hw/qdev-properties.h" |
da34e65c | 28 | #include "qapi/error.h" |
1de7afc9 | 29 | #include "qemu/error-report.h" |
a8218588 | 30 | #include "qemu/log.h" |
0b8fa32f | 31 | #include "qemu/module.h" |
83c9f4ca PB |
32 | #include "hw/pci/msi.h" |
33 | #include "hw/pci/msix.h" | |
34 | #include "hw/loader.h" | |
9c17d615 | 35 | #include "sysemu/kvm.h" |
47b43a1f | 36 | #include "virtio-pci.h" |
1de7afc9 | 37 | #include "qemu/range.h" |
0d09e41a | 38 | #include "hw/virtio/virtio-bus.h" |
24a6e7f4 | 39 | #include "qapi/visitor.h" |
3909c079 | 40 | #include "sysemu/replay.h" |
53c25cea | 41 | |
cbbe4f50 | 42 | #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) |
aba800a3 | 43 | |
c17bef33 MT |
44 | #undef VIRTIO_PCI_CONFIG |
45 | ||
aba800a3 MT |
46 | /* The remaining space is defined by each driver as the per-driver |
47 | * configuration space */ | |
cbbe4f50 | 48 | #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) |
53c25cea | 49 | |
ac7af112 AF |
50 | static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, |
51 | VirtIOPCIProxy *dev); | |
75fd6f13 | 52 | static void virtio_pci_reset(DeviceState *qdev); |
d51fcfac | 53 | |
53c25cea | 54 | /* virtio device */ |
d2a0ccc6 MT |
55 | /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ |
56 | static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) | |
57 | { | |
58 | return container_of(d, VirtIOPCIProxy, pci_dev.qdev); | |
59 | } | |
53c25cea | 60 | |
d2a0ccc6 MT |
61 | /* DeviceState to VirtIOPCIProxy. Note: used on datapath, |
62 | * be careful and test performance if you change this. | |
63 | */ | |
64 | static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) | |
53c25cea | 65 | { |
d2a0ccc6 MT |
66 | return container_of(d, VirtIOPCIProxy, pci_dev.qdev); |
67 | } | |
68 | ||
69 | static void virtio_pci_notify(DeviceState *d, uint16_t vector) | |
70 | { | |
71 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); | |
a3fc66d9 | 72 | |
aba800a3 MT |
73 | if (msix_enabled(&proxy->pci_dev)) |
74 | msix_notify(&proxy->pci_dev, vector); | |
a3fc66d9 PB |
75 | else { |
76 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
d73415a3 | 77 | pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); |
a3fc66d9 | 78 | } |
53c25cea PB |
79 | } |
80 | ||
d2a0ccc6 | 81 | static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) |
ff24bd58 | 82 | { |
d2a0ccc6 | 83 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
84 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
85 | ||
ff24bd58 MT |
86 | pci_device_save(&proxy->pci_dev, f); |
87 | msix_save(&proxy->pci_dev, f); | |
88 | if (msix_present(&proxy->pci_dev)) | |
a3fc66d9 | 89 | qemu_put_be16(f, vdev->config_vector); |
ff24bd58 MT |
90 | } |
91 | ||
b81b948e DDAG |
92 | static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { |
93 | .name = "virtio_pci/modern_queue_state", | |
94 | .version_id = 1, | |
95 | .minimum_version_id = 1, | |
96 | .fields = (VMStateField[]) { | |
97 | VMSTATE_UINT16(num, VirtIOPCIQueue), | |
98 | VMSTATE_UNUSED(1), /* enabled was stored as be16 */ | |
99 | VMSTATE_BOOL(enabled, VirtIOPCIQueue), | |
100 | VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), | |
101 | VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), | |
102 | VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), | |
103 | VMSTATE_END_OF_LIST() | |
a6df8adf | 104 | } |
a6df8adf JW |
105 | }; |
106 | ||
107 | static bool virtio_pci_modern_state_needed(void *opaque) | |
108 | { | |
109 | VirtIOPCIProxy *proxy = opaque; | |
110 | ||
9a4c0e22 | 111 | return virtio_pci_modern(proxy); |
a6df8adf JW |
112 | } |
113 | ||
b81b948e | 114 | static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { |
a6df8adf JW |
115 | .name = "virtio_pci/modern_state", |
116 | .version_id = 1, | |
117 | .minimum_version_id = 1, | |
118 | .needed = &virtio_pci_modern_state_needed, | |
119 | .fields = (VMStateField[]) { | |
b81b948e DDAG |
120 | VMSTATE_UINT32(dfselect, VirtIOPCIProxy), |
121 | VMSTATE_UINT32(gfselect, VirtIOPCIProxy), | |
122 | VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), | |
123 | VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, | |
124 | vmstate_virtio_pci_modern_queue_state, | |
125 | VirtIOPCIQueue), | |
a6df8adf JW |
126 | VMSTATE_END_OF_LIST() |
127 | } | |
128 | }; | |
129 | ||
130 | static const VMStateDescription vmstate_virtio_pci = { | |
131 | .name = "virtio_pci", | |
132 | .version_id = 1, | |
133 | .minimum_version_id = 1, | |
134 | .minimum_version_id_old = 1, | |
135 | .fields = (VMStateField[]) { | |
136 | VMSTATE_END_OF_LIST() | |
137 | }, | |
138 | .subsections = (const VMStateDescription*[]) { | |
b81b948e | 139 | &vmstate_virtio_pci_modern_state_sub, |
a6df8adf JW |
140 | NULL |
141 | } | |
142 | }; | |
143 | ||
b81b948e DDAG |
144 | static bool virtio_pci_has_extra_state(DeviceState *d) |
145 | { | |
146 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
147 | ||
148 | return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; | |
149 | } | |
150 | ||
a6df8adf JW |
151 | static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) |
152 | { | |
153 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
154 | ||
155 | vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); | |
156 | } | |
157 | ||
158 | static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) | |
159 | { | |
160 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
161 | ||
162 | return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); | |
163 | } | |
164 | ||
d2a0ccc6 | 165 | static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) |
ff24bd58 | 166 | { |
d2a0ccc6 | 167 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
168 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
169 | ||
ff24bd58 | 170 | if (msix_present(&proxy->pci_dev)) |
a3fc66d9 | 171 | qemu_put_be16(f, virtio_queue_vector(vdev, n)); |
ff24bd58 MT |
172 | } |
173 | ||
d2a0ccc6 | 174 | static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) |
ff24bd58 | 175 | { |
d2a0ccc6 | 176 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
177 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
178 | ||
ff24bd58 MT |
179 | int ret; |
180 | ret = pci_device_load(&proxy->pci_dev, f); | |
e6da7680 | 181 | if (ret) { |
ff24bd58 | 182 | return ret; |
e6da7680 | 183 | } |
3cac001e | 184 | msix_unuse_all_vectors(&proxy->pci_dev); |
ff24bd58 | 185 | msix_load(&proxy->pci_dev, f); |
e6da7680 | 186 | if (msix_present(&proxy->pci_dev)) { |
a3fc66d9 | 187 | qemu_get_be16s(f, &vdev->config_vector); |
e6da7680 | 188 | } else { |
a3fc66d9 | 189 | vdev->config_vector = VIRTIO_NO_VECTOR; |
e6da7680 | 190 | } |
a3fc66d9 PB |
191 | if (vdev->config_vector != VIRTIO_NO_VECTOR) { |
192 | return msix_vector_use(&proxy->pci_dev, vdev->config_vector); | |
e6da7680 | 193 | } |
ff24bd58 MT |
194 | return 0; |
195 | } | |
196 | ||
d2a0ccc6 | 197 | static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) |
ff24bd58 | 198 | { |
d2a0ccc6 | 199 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
200 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
201 | ||
ff24bd58 | 202 | uint16_t vector; |
e6da7680 MT |
203 | if (msix_present(&proxy->pci_dev)) { |
204 | qemu_get_be16s(f, &vector); | |
205 | } else { | |
206 | vector = VIRTIO_NO_VECTOR; | |
207 | } | |
a3fc66d9 | 208 | virtio_queue_set_vector(vdev, n, vector); |
e6da7680 MT |
209 | if (vector != VIRTIO_NO_VECTOR) { |
210 | return msix_vector_use(&proxy->pci_dev, vector); | |
211 | } | |
a6df8adf | 212 | |
ff24bd58 MT |
213 | return 0; |
214 | } | |
215 | ||
8e93cef1 | 216 | static bool virtio_pci_ioeventfd_enabled(DeviceState *d) |
9f06e71a CH |
217 | { |
218 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
219 | ||
8e93cef1 | 220 | return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; |
9f06e71a CH |
221 | } |
222 | ||
975acc0a JW |
223 | #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 |
224 | ||
d9997d89 MA |
225 | static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) |
226 | { | |
227 | return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? | |
228 | QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; | |
229 | } | |
230 | ||
9f06e71a CH |
231 | static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, |
232 | int n, bool assign) | |
25db9ebe | 233 | { |
9f06e71a | 234 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
235 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
236 | VirtQueue *vq = virtio_get_queue(vdev, n); | |
9a4c0e22 MA |
237 | bool legacy = virtio_pci_legacy(proxy); |
238 | bool modern = virtio_pci_modern(proxy); | |
bc85ccfd | 239 | bool fast_mmio = kvm_ioeventfd_any_length_enabled(); |
9824d2a3 | 240 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
588255ad | 241 | MemoryRegion *modern_mr = &proxy->notify.mr; |
9824d2a3 | 242 | MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; |
975acc0a | 243 | MemoryRegion *legacy_mr = &proxy->bar; |
d9997d89 | 244 | hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * |
975acc0a JW |
245 | virtio_get_queue_index(vq); |
246 | hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; | |
da146d0a | 247 | |
25db9ebe | 248 | if (assign) { |
975acc0a | 249 | if (modern) { |
bc85ccfd JW |
250 | if (fast_mmio) { |
251 | memory_region_add_eventfd(modern_mr, modern_addr, 0, | |
252 | false, n, notifier); | |
253 | } else { | |
254 | memory_region_add_eventfd(modern_mr, modern_addr, 2, | |
255 | false, n, notifier); | |
256 | } | |
9824d2a3 JW |
257 | if (modern_pio) { |
258 | memory_region_add_eventfd(modern_notify_mr, 0, 2, | |
259 | true, n, notifier); | |
260 | } | |
975acc0a JW |
261 | } |
262 | if (legacy) { | |
263 | memory_region_add_eventfd(legacy_mr, legacy_addr, 2, | |
264 | true, n, notifier); | |
265 | } | |
25db9ebe | 266 | } else { |
975acc0a | 267 | if (modern) { |
bc85ccfd JW |
268 | if (fast_mmio) { |
269 | memory_region_del_eventfd(modern_mr, modern_addr, 0, | |
270 | false, n, notifier); | |
271 | } else { | |
272 | memory_region_del_eventfd(modern_mr, modern_addr, 2, | |
273 | false, n, notifier); | |
274 | } | |
9824d2a3 JW |
275 | if (modern_pio) { |
276 | memory_region_del_eventfd(modern_notify_mr, 0, 2, | |
277 | true, n, notifier); | |
278 | } | |
975acc0a JW |
279 | } |
280 | if (legacy) { | |
281 | memory_region_del_eventfd(legacy_mr, legacy_addr, 2, | |
282 | true, n, notifier); | |
283 | } | |
25db9ebe | 284 | } |
9f06e71a | 285 | return 0; |
25db9ebe SH |
286 | } |
287 | ||
b36e3914 | 288 | static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) |
25db9ebe | 289 | { |
9f06e71a | 290 | virtio_bus_start_ioeventfd(&proxy->bus); |
25db9ebe SH |
291 | } |
292 | ||
b36e3914 | 293 | static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) |
25db9ebe | 294 | { |
9f06e71a | 295 | virtio_bus_stop_ioeventfd(&proxy->bus); |
25db9ebe SH |
296 | } |
297 | ||
53c25cea PB |
298 | static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) |
299 | { | |
300 | VirtIOPCIProxy *proxy = opaque; | |
a3fc66d9 | 301 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
a8170e5e | 302 | hwaddr pa; |
53c25cea | 303 | |
53c25cea PB |
304 | switch (addr) { |
305 | case VIRTIO_PCI_GUEST_FEATURES: | |
181103cd FK |
306 | /* Guest does not negotiate properly? We have to assume nothing. */ |
307 | if (val & (1 << VIRTIO_F_BAD_FEATURE)) { | |
308 | val = virtio_bus_get_vdev_bad_features(&proxy->bus); | |
309 | } | |
ad0c9332 | 310 | virtio_set_features(vdev, val); |
53c25cea PB |
311 | break; |
312 | case VIRTIO_PCI_QUEUE_PFN: | |
a8170e5e | 313 | pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; |
1b8e9b27 | 314 | if (pa == 0) { |
75fd6f13 | 315 | virtio_pci_reset(DEVICE(proxy)); |
1b8e9b27 | 316 | } |
7055e687 MT |
317 | else |
318 | virtio_queue_set_addr(vdev, vdev->queue_sel, pa); | |
53c25cea PB |
319 | break; |
320 | case VIRTIO_PCI_QUEUE_SEL: | |
87b3bd1c | 321 | if (val < VIRTIO_QUEUE_MAX) |
53c25cea PB |
322 | vdev->queue_sel = val; |
323 | break; | |
324 | case VIRTIO_PCI_QUEUE_NOTIFY: | |
87b3bd1c | 325 | if (val < VIRTIO_QUEUE_MAX) { |
7157e2e2 SH |
326 | virtio_queue_notify(vdev, val); |
327 | } | |
53c25cea PB |
328 | break; |
329 | case VIRTIO_PCI_STATUS: | |
25db9ebe SH |
330 | if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { |
331 | virtio_pci_stop_ioeventfd(proxy); | |
332 | } | |
333 | ||
3e607cb5 | 334 | virtio_set_status(vdev, val & 0xFF); |
25db9ebe SH |
335 | |
336 | if (val & VIRTIO_CONFIG_S_DRIVER_OK) { | |
337 | virtio_pci_start_ioeventfd(proxy); | |
338 | } | |
339 | ||
1b8e9b27 | 340 | if (vdev->status == 0) { |
75fd6f13 | 341 | virtio_pci_reset(DEVICE(proxy)); |
1b8e9b27 | 342 | } |
c81131db | 343 | |
e43c0b2e MT |
344 | /* Linux before 2.6.34 drives the device without enabling |
345 | the PCI device bus master bit. Enable it automatically | |
346 | for the guest. This is a PCI spec violation but so is | |
347 | initiating DMA with bus master bit clear. */ | |
348 | if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { | |
349 | pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, | |
350 | proxy->pci_dev.config[PCI_COMMAND] | | |
351 | PCI_COMMAND_MASTER, 1); | |
352 | } | |
53c25cea | 353 | break; |
aba800a3 MT |
354 | case VIRTIO_MSI_CONFIG_VECTOR: |
355 | msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); | |
356 | /* Make it possible for guest to discover an error took place. */ | |
357 | if (msix_vector_use(&proxy->pci_dev, val) < 0) | |
358 | val = VIRTIO_NO_VECTOR; | |
359 | vdev->config_vector = val; | |
360 | break; | |
361 | case VIRTIO_MSI_QUEUE_VECTOR: | |
362 | msix_vector_unuse(&proxy->pci_dev, | |
363 | virtio_queue_vector(vdev, vdev->queue_sel)); | |
364 | /* Make it possible for guest to discover an error took place. */ | |
365 | if (msix_vector_use(&proxy->pci_dev, val) < 0) | |
366 | val = VIRTIO_NO_VECTOR; | |
367 | virtio_queue_set_vector(vdev, vdev->queue_sel, val); | |
368 | break; | |
369 | default: | |
a8218588 PMD |
370 | qemu_log_mask(LOG_GUEST_ERROR, |
371 | "%s: unexpected address 0x%x value 0x%x\n", | |
372 | __func__, addr, val); | |
aba800a3 | 373 | break; |
53c25cea PB |
374 | } |
375 | } | |
376 | ||
aba800a3 | 377 | static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) |
53c25cea | 378 | { |
a3fc66d9 | 379 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
53c25cea PB |
380 | uint32_t ret = 0xFFFFFFFF; |
381 | ||
53c25cea PB |
382 | switch (addr) { |
383 | case VIRTIO_PCI_HOST_FEATURES: | |
6b8f1020 | 384 | ret = vdev->host_features; |
53c25cea PB |
385 | break; |
386 | case VIRTIO_PCI_GUEST_FEATURES: | |
704a76fc | 387 | ret = vdev->guest_features; |
53c25cea PB |
388 | break; |
389 | case VIRTIO_PCI_QUEUE_PFN: | |
390 | ret = virtio_queue_get_addr(vdev, vdev->queue_sel) | |
391 | >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; | |
392 | break; | |
393 | case VIRTIO_PCI_QUEUE_NUM: | |
394 | ret = virtio_queue_get_num(vdev, vdev->queue_sel); | |
395 | break; | |
396 | case VIRTIO_PCI_QUEUE_SEL: | |
397 | ret = vdev->queue_sel; | |
398 | break; | |
399 | case VIRTIO_PCI_STATUS: | |
400 | ret = vdev->status; | |
401 | break; | |
402 | case VIRTIO_PCI_ISR: | |
403 | /* reading from the ISR also clears it. */ | |
d73415a3 | 404 | ret = qatomic_xchg(&vdev->isr, 0); |
9e64f8a3 | 405 | pci_irq_deassert(&proxy->pci_dev); |
53c25cea | 406 | break; |
aba800a3 MT |
407 | case VIRTIO_MSI_CONFIG_VECTOR: |
408 | ret = vdev->config_vector; | |
409 | break; | |
410 | case VIRTIO_MSI_QUEUE_VECTOR: | |
411 | ret = virtio_queue_vector(vdev, vdev->queue_sel); | |
412 | break; | |
53c25cea PB |
413 | default: |
414 | break; | |
415 | } | |
416 | ||
417 | return ret; | |
418 | } | |
419 | ||
df6db5b3 AG |
420 | static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, |
421 | unsigned size) | |
53c25cea PB |
422 | { |
423 | VirtIOPCIProxy *proxy = opaque; | |
a3fc66d9 | 424 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
cbbe4f50 | 425 | uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); |
df6db5b3 | 426 | uint64_t val = 0; |
bf697371 AM |
427 | |
428 | if (vdev == NULL) { | |
429 | return UINT64_MAX; | |
430 | } | |
431 | ||
aba800a3 | 432 | if (addr < config) { |
df6db5b3 | 433 | return virtio_ioport_read(proxy, addr); |
aba800a3 MT |
434 | } |
435 | addr -= config; | |
53c25cea | 436 | |
df6db5b3 AG |
437 | switch (size) { |
438 | case 1: | |
a3fc66d9 | 439 | val = virtio_config_readb(vdev, addr); |
df6db5b3 AG |
440 | break; |
441 | case 2: | |
a3fc66d9 | 442 | val = virtio_config_readw(vdev, addr); |
616a6552 | 443 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
444 | val = bswap16(val); |
445 | } | |
df6db5b3 AG |
446 | break; |
447 | case 4: | |
a3fc66d9 | 448 | val = virtio_config_readl(vdev, addr); |
616a6552 | 449 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
450 | val = bswap32(val); |
451 | } | |
df6db5b3 | 452 | break; |
82afa586 | 453 | } |
df6db5b3 | 454 | return val; |
53c25cea PB |
455 | } |
456 | ||
df6db5b3 AG |
457 | static void virtio_pci_config_write(void *opaque, hwaddr addr, |
458 | uint64_t val, unsigned size) | |
53c25cea PB |
459 | { |
460 | VirtIOPCIProxy *proxy = opaque; | |
cbbe4f50 | 461 | uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); |
a3fc66d9 | 462 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
bf697371 AM |
463 | |
464 | if (vdev == NULL) { | |
465 | return; | |
466 | } | |
467 | ||
aba800a3 MT |
468 | if (addr < config) { |
469 | virtio_ioport_write(proxy, addr, val); | |
470 | return; | |
471 | } | |
472 | addr -= config; | |
df6db5b3 AG |
473 | /* |
474 | * Virtio-PCI is odd. Ioports are LE but config space is target native | |
475 | * endian. | |
476 | */ | |
477 | switch (size) { | |
478 | case 1: | |
a3fc66d9 | 479 | virtio_config_writeb(vdev, addr, val); |
df6db5b3 AG |
480 | break; |
481 | case 2: | |
616a6552 | 482 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
483 | val = bswap16(val); |
484 | } | |
a3fc66d9 | 485 | virtio_config_writew(vdev, addr, val); |
df6db5b3 AG |
486 | break; |
487 | case 4: | |
616a6552 | 488 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
489 | val = bswap32(val); |
490 | } | |
a3fc66d9 | 491 | virtio_config_writel(vdev, addr, val); |
df6db5b3 | 492 | break; |
82afa586 | 493 | } |
53c25cea PB |
494 | } |
495 | ||
da146d0a | 496 | static const MemoryRegionOps virtio_pci_config_ops = { |
df6db5b3 AG |
497 | .read = virtio_pci_config_read, |
498 | .write = virtio_pci_config_write, | |
499 | .impl = { | |
500 | .min_access_size = 1, | |
501 | .max_access_size = 4, | |
502 | }, | |
8e4a424b | 503 | .endianness = DEVICE_LITTLE_ENDIAN, |
da146d0a | 504 | }; |
aba800a3 | 505 | |
a93c8d82 AK |
506 | static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, |
507 | hwaddr *off, int len) | |
508 | { | |
509 | int i; | |
510 | VirtIOPCIRegion *reg; | |
511 | ||
512 | for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { | |
513 | reg = &proxy->regs[i]; | |
514 | if (*off >= reg->offset && | |
515 | *off + len <= reg->offset + reg->size) { | |
516 | *off -= reg->offset; | |
517 | return ®->mr; | |
518 | } | |
519 | } | |
520 | ||
521 | return NULL; | |
522 | } | |
523 | ||
1e40356c MT |
524 | /* Below are generic functions to do memcpy from/to an address space, |
525 | * without byteswaps, with input validation. | |
526 | * | |
527 | * As regular address_space_* APIs all do some kind of byteswap at least for | |
528 | * some host/target combinations, we are forced to explicitly convert to a | |
529 | * known-endianness integer value. | |
530 | * It doesn't really matter which endian format to go through, so the code | |
531 | * below selects the endian that causes the least amount of work on the given | |
532 | * host. | |
533 | * | |
534 | * Note: host pointer must be aligned. | |
535 | */ | |
536 | static | |
a93c8d82 | 537 | void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, |
1e40356c MT |
538 | const uint8_t *buf, int len) |
539 | { | |
a93c8d82 AK |
540 | uint64_t val; |
541 | MemoryRegion *mr; | |
1e40356c MT |
542 | |
543 | /* address_space_* APIs assume an aligned address. | |
544 | * As address is under guest control, handle illegal values. | |
545 | */ | |
546 | addr &= ~(len - 1); | |
547 | ||
a93c8d82 AK |
548 | mr = virtio_address_space_lookup(proxy, &addr, len); |
549 | if (!mr) { | |
550 | return; | |
551 | } | |
552 | ||
1e40356c MT |
553 | /* Make sure caller aligned buf properly */ |
554 | assert(!(((uintptr_t)buf) & (len - 1))); | |
555 | ||
556 | switch (len) { | |
557 | case 1: | |
558 | val = pci_get_byte(buf); | |
1e40356c MT |
559 | break; |
560 | case 2: | |
9bf825bf | 561 | val = pci_get_word(buf); |
1e40356c MT |
562 | break; |
563 | case 4: | |
9bf825bf | 564 | val = pci_get_long(buf); |
1e40356c MT |
565 | break; |
566 | default: | |
567 | /* As length is under guest control, handle illegal values. */ | |
a93c8d82 | 568 | return; |
1e40356c | 569 | } |
d5d680ca | 570 | memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, |
062c08d1 | 571 | MEMTXATTRS_UNSPECIFIED); |
1e40356c MT |
572 | } |
573 | ||
574 | static void | |
a93c8d82 AK |
575 | virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, |
576 | uint8_t *buf, int len) | |
1e40356c | 577 | { |
a93c8d82 AK |
578 | uint64_t val; |
579 | MemoryRegion *mr; | |
1e40356c MT |
580 | |
581 | /* address_space_* APIs assume an aligned address. | |
582 | * As address is under guest control, handle illegal values. | |
583 | */ | |
584 | addr &= ~(len - 1); | |
585 | ||
a93c8d82 AK |
586 | mr = virtio_address_space_lookup(proxy, &addr, len); |
587 | if (!mr) { | |
588 | return; | |
589 | } | |
590 | ||
1e40356c MT |
591 | /* Make sure caller aligned buf properly */ |
592 | assert(!(((uintptr_t)buf) & (len - 1))); | |
593 | ||
d5d680ca | 594 | memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, |
062c08d1 | 595 | MEMTXATTRS_UNSPECIFIED); |
1e40356c MT |
596 | switch (len) { |
597 | case 1: | |
1e40356c MT |
598 | pci_set_byte(buf, val); |
599 | break; | |
600 | case 2: | |
9bf825bf | 601 | pci_set_word(buf, val); |
1e40356c MT |
602 | break; |
603 | case 4: | |
9bf825bf | 604 | pci_set_long(buf, val); |
1e40356c MT |
605 | break; |
606 | default: | |
607 | /* As length is under guest control, handle illegal values. */ | |
608 | break; | |
609 | } | |
610 | } | |
611 | ||
aba800a3 MT |
612 | static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, |
613 | uint32_t val, int len) | |
614 | { | |
3f262b26 | 615 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
a3fc66d9 | 616 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
ada434cd | 617 | struct virtio_pci_cfg_cap *cfg; |
ed757e14 | 618 | |
1129714f MT |
619 | pci_default_write_config(pci_dev, address, val, len); |
620 | ||
eb1556c4 JS |
621 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { |
622 | pcie_cap_flr_write_config(pci_dev, address, val, len); | |
623 | } | |
624 | ||
9d7bd082 MR |
625 | if (range_covers_byte(address, len, PCI_COMMAND)) { |
626 | if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { | |
627 | virtio_set_disabled(vdev, true); | |
628 | virtio_pci_stop_ioeventfd(proxy); | |
629 | virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); | |
630 | } else { | |
631 | virtio_set_disabled(vdev, false); | |
632 | } | |
ed757e14 | 633 | } |
ada434cd MT |
634 | |
635 | if (proxy->config_cap && | |
636 | ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, | |
637 | pci_cfg_data), | |
638 | sizeof cfg->pci_cfg_data)) { | |
639 | uint32_t off; | |
640 | uint32_t len; | |
641 | ||
642 | cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); | |
643 | off = le32_to_cpu(cfg->cap.offset); | |
644 | len = le32_to_cpu(cfg->cap.length); | |
645 | ||
2a639123 MT |
646 | if (len == 1 || len == 2 || len == 4) { |
647 | assert(len <= sizeof cfg->pci_cfg_data); | |
a93c8d82 | 648 | virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); |
ada434cd MT |
649 | } |
650 | } | |
651 | } | |
652 | ||
653 | static uint32_t virtio_read_config(PCIDevice *pci_dev, | |
654 | uint32_t address, int len) | |
655 | { | |
3f262b26 | 656 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
ada434cd MT |
657 | struct virtio_pci_cfg_cap *cfg; |
658 | ||
659 | if (proxy->config_cap && | |
660 | ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, | |
661 | pci_cfg_data), | |
662 | sizeof cfg->pci_cfg_data)) { | |
663 | uint32_t off; | |
664 | uint32_t len; | |
665 | ||
666 | cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); | |
667 | off = le32_to_cpu(cfg->cap.offset); | |
668 | len = le32_to_cpu(cfg->cap.length); | |
669 | ||
2a639123 MT |
670 | if (len == 1 || len == 2 || len == 4) { |
671 | assert(len <= sizeof cfg->pci_cfg_data); | |
a93c8d82 | 672 | virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); |
ada434cd MT |
673 | } |
674 | } | |
675 | ||
676 | return pci_default_read_config(pci_dev, address, len); | |
53c25cea PB |
677 | } |
678 | ||
7d37d351 JK |
679 | static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, |
680 | unsigned int queue_no, | |
d1f6af6a | 681 | unsigned int vector) |
7d37d351 | 682 | { |
7d37d351 | 683 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; |
15b2bd18 | 684 | int ret; |
7d37d351 JK |
685 | |
686 | if (irqfd->users == 0) { | |
d1f6af6a | 687 | ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); |
7d37d351 JK |
688 | if (ret < 0) { |
689 | return ret; | |
690 | } | |
691 | irqfd->virq = ret; | |
692 | } | |
693 | irqfd->users++; | |
7d37d351 JK |
694 | return 0; |
695 | } | |
696 | ||
697 | static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, | |
7d37d351 | 698 | unsigned int vector) |
774345f9 MT |
699 | { |
700 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; | |
701 | if (--irqfd->users == 0) { | |
702 | kvm_irqchip_release_virq(kvm_state, irqfd->virq); | |
703 | } | |
704 | } | |
705 | ||
f1d0f15a MT |
706 | static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, |
707 | unsigned int queue_no, | |
708 | unsigned int vector) | |
709 | { | |
710 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; | |
a3fc66d9 PB |
711 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
712 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
f1d0f15a | 713 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); |
9be38598 | 714 | return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); |
f1d0f15a MT |
715 | } |
716 | ||
717 | static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, | |
718 | unsigned int queue_no, | |
719 | unsigned int vector) | |
7d37d351 | 720 | { |
a3fc66d9 PB |
721 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
722 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
15b2bd18 | 723 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); |
7d37d351 | 724 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; |
15b2bd18 | 725 | int ret; |
7d37d351 | 726 | |
1c9b71a7 | 727 | ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); |
7d37d351 | 728 | assert(ret == 0); |
f1d0f15a | 729 | } |
7d37d351 | 730 | |
774345f9 MT |
731 | static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) |
732 | { | |
733 | PCIDevice *dev = &proxy->pci_dev; | |
a3fc66d9 | 734 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 735 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
774345f9 MT |
736 | unsigned int vector; |
737 | int ret, queue_no; | |
774345f9 MT |
738 | |
739 | for (queue_no = 0; queue_no < nvqs; queue_no++) { | |
740 | if (!virtio_queue_get_num(vdev, queue_no)) { | |
741 | break; | |
742 | } | |
743 | vector = virtio_queue_vector(vdev, queue_no); | |
744 | if (vector >= msix_nr_vectors_allocated(dev)) { | |
745 | continue; | |
746 | } | |
d1f6af6a | 747 | ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); |
774345f9 MT |
748 | if (ret < 0) { |
749 | goto undo; | |
7d37d351 | 750 | } |
f1d0f15a MT |
751 | /* If guest supports masking, set up irqfd now. |
752 | * Otherwise, delay until unmasked in the frontend. | |
753 | */ | |
5669655a | 754 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
f1d0f15a MT |
755 | ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); |
756 | if (ret < 0) { | |
757 | kvm_virtio_pci_vq_vector_release(proxy, vector); | |
758 | goto undo; | |
759 | } | |
760 | } | |
7d37d351 | 761 | } |
7d37d351 | 762 | return 0; |
774345f9 MT |
763 | |
764 | undo: | |
765 | while (--queue_no >= 0) { | |
766 | vector = virtio_queue_vector(vdev, queue_no); | |
767 | if (vector >= msix_nr_vectors_allocated(dev)) { | |
768 | continue; | |
769 | } | |
5669655a | 770 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
e387f99e | 771 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 772 | } |
774345f9 MT |
773 | kvm_virtio_pci_vq_vector_release(proxy, vector); |
774 | } | |
775 | return ret; | |
7d37d351 JK |
776 | } |
777 | ||
774345f9 MT |
778 | static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) |
779 | { | |
780 | PCIDevice *dev = &proxy->pci_dev; | |
a3fc66d9 | 781 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
774345f9 MT |
782 | unsigned int vector; |
783 | int queue_no; | |
181103cd | 784 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
774345f9 MT |
785 | |
786 | for (queue_no = 0; queue_no < nvqs; queue_no++) { | |
787 | if (!virtio_queue_get_num(vdev, queue_no)) { | |
788 | break; | |
789 | } | |
790 | vector = virtio_queue_vector(vdev, queue_no); | |
791 | if (vector >= msix_nr_vectors_allocated(dev)) { | |
792 | continue; | |
793 | } | |
f1d0f15a MT |
794 | /* If guest supports masking, clean up irqfd now. |
795 | * Otherwise, it was cleaned when masked in the frontend. | |
796 | */ | |
5669655a | 797 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
e387f99e | 798 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 799 | } |
774345f9 MT |
800 | kvm_virtio_pci_vq_vector_release(proxy, vector); |
801 | } | |
802 | } | |
803 | ||
a38b2c49 MT |
804 | static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, |
805 | unsigned int queue_no, | |
806 | unsigned int vector, | |
807 | MSIMessage msg) | |
774345f9 | 808 | { |
a3fc66d9 PB |
809 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
810 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); | |
811 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
774345f9 | 812 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); |
a38b2c49 | 813 | VirtIOIRQFD *irqfd; |
53510bfc | 814 | int ret = 0; |
774345f9 | 815 | |
a38b2c49 MT |
816 | if (proxy->vector_irqfd) { |
817 | irqfd = &proxy->vector_irqfd[vector]; | |
818 | if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { | |
dc9f06ca PF |
819 | ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, |
820 | &proxy->pci_dev); | |
a38b2c49 MT |
821 | if (ret < 0) { |
822 | return ret; | |
823 | } | |
3f1fea0f | 824 | kvm_irqchip_commit_routes(kvm_state); |
774345f9 MT |
825 | } |
826 | } | |
827 | ||
f1d0f15a MT |
828 | /* If guest supports masking, irqfd is already setup, unmask it. |
829 | * Otherwise, set it up now. | |
830 | */ | |
5669655a | 831 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
a3fc66d9 | 832 | k->guest_notifier_mask(vdev, queue_no, false); |
f1d0f15a | 833 | /* Test after unmasking to avoid losing events. */ |
181103cd | 834 | if (k->guest_notifier_pending && |
a3fc66d9 | 835 | k->guest_notifier_pending(vdev, queue_no)) { |
f1d0f15a MT |
836 | event_notifier_set(n); |
837 | } | |
838 | } else { | |
839 | ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); | |
7d37d351 | 840 | } |
774345f9 | 841 | return ret; |
7d37d351 JK |
842 | } |
843 | ||
a38b2c49 | 844 | static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, |
7d37d351 JK |
845 | unsigned int queue_no, |
846 | unsigned int vector) | |
847 | { | |
a3fc66d9 PB |
848 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
849 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); | |
181103cd | 850 | |
f1d0f15a MT |
851 | /* If guest supports masking, keep irqfd but mask it. |
852 | * Otherwise, clean it up now. | |
853 | */ | |
5669655a | 854 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
a3fc66d9 | 855 | k->guest_notifier_mask(vdev, queue_no, true); |
f1d0f15a | 856 | } else { |
e387f99e | 857 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 858 | } |
7d37d351 JK |
859 | } |
860 | ||
a38b2c49 MT |
861 | static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, |
862 | MSIMessage msg) | |
7d37d351 JK |
863 | { |
864 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 865 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
851c2a75 JW |
866 | VirtQueue *vq = virtio_vector_first_queue(vdev, vector); |
867 | int ret, index, unmasked = 0; | |
7d37d351 | 868 | |
851c2a75 JW |
869 | while (vq) { |
870 | index = virtio_get_queue_index(vq); | |
871 | if (!virtio_queue_get_num(vdev, index)) { | |
7d37d351 JK |
872 | break; |
873 | } | |
6652d081 JW |
874 | if (index < proxy->nvqs_with_notifiers) { |
875 | ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); | |
876 | if (ret < 0) { | |
877 | goto undo; | |
878 | } | |
879 | ++unmasked; | |
7d37d351 | 880 | } |
851c2a75 | 881 | vq = virtio_vector_next_queue(vq); |
7d37d351 | 882 | } |
851c2a75 | 883 | |
7d37d351 JK |
884 | return 0; |
885 | ||
886 | undo: | |
851c2a75 | 887 | vq = virtio_vector_first_queue(vdev, vector); |
6652d081 | 888 | while (vq && unmasked >= 0) { |
851c2a75 | 889 | index = virtio_get_queue_index(vq); |
6652d081 JW |
890 | if (index < proxy->nvqs_with_notifiers) { |
891 | virtio_pci_vq_vector_mask(proxy, index, vector); | |
892 | --unmasked; | |
893 | } | |
851c2a75 | 894 | vq = virtio_vector_next_queue(vq); |
7d37d351 JK |
895 | } |
896 | return ret; | |
897 | } | |
898 | ||
a38b2c49 | 899 | static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) |
7d37d351 JK |
900 | { |
901 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 902 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
851c2a75 JW |
903 | VirtQueue *vq = virtio_vector_first_queue(vdev, vector); |
904 | int index; | |
7d37d351 | 905 | |
851c2a75 JW |
906 | while (vq) { |
907 | index = virtio_get_queue_index(vq); | |
908 | if (!virtio_queue_get_num(vdev, index)) { | |
7d37d351 JK |
909 | break; |
910 | } | |
6652d081 JW |
911 | if (index < proxy->nvqs_with_notifiers) { |
912 | virtio_pci_vq_vector_mask(proxy, index, vector); | |
913 | } | |
851c2a75 | 914 | vq = virtio_vector_next_queue(vq); |
7d37d351 JK |
915 | } |
916 | } | |
917 | ||
a38b2c49 MT |
918 | static void virtio_pci_vector_poll(PCIDevice *dev, |
919 | unsigned int vector_start, | |
920 | unsigned int vector_end) | |
89d62be9 MT |
921 | { |
922 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 923 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 924 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
89d62be9 MT |
925 | int queue_no; |
926 | unsigned int vector; | |
927 | EventNotifier *notifier; | |
928 | VirtQueue *vq; | |
929 | ||
2d620f59 | 930 | for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { |
89d62be9 MT |
931 | if (!virtio_queue_get_num(vdev, queue_no)) { |
932 | break; | |
933 | } | |
934 | vector = virtio_queue_vector(vdev, queue_no); | |
935 | if (vector < vector_start || vector >= vector_end || | |
936 | !msix_is_masked(dev, vector)) { | |
937 | continue; | |
938 | } | |
939 | vq = virtio_get_queue(vdev, queue_no); | |
940 | notifier = virtio_queue_get_guest_notifier(vq); | |
181103cd FK |
941 | if (k->guest_notifier_pending) { |
942 | if (k->guest_notifier_pending(vdev, queue_no)) { | |
f1d0f15a MT |
943 | msix_set_pending(dev, vector); |
944 | } | |
945 | } else if (event_notifier_test_and_clear(notifier)) { | |
89d62be9 MT |
946 | msix_set_pending(dev, vector); |
947 | } | |
948 | } | |
949 | } | |
950 | ||
951 | static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, | |
952 | bool with_irqfd) | |
ade80dc8 | 953 | { |
d2a0ccc6 | 954 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
955 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
956 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); | |
957 | VirtQueue *vq = virtio_get_queue(vdev, n); | |
ade80dc8 MT |
958 | EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); |
959 | ||
960 | if (assign) { | |
961 | int r = event_notifier_init(notifier, 0); | |
962 | if (r < 0) { | |
963 | return r; | |
964 | } | |
89d62be9 | 965 | virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); |
ade80dc8 | 966 | } else { |
89d62be9 | 967 | virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); |
ade80dc8 MT |
968 | event_notifier_cleanup(notifier); |
969 | } | |
970 | ||
5669655a VK |
971 | if (!msix_enabled(&proxy->pci_dev) && |
972 | vdev->use_guest_notifier_mask && | |
973 | vdc->guest_notifier_mask) { | |
a3fc66d9 | 974 | vdc->guest_notifier_mask(vdev, n, !assign); |
62c96360 MT |
975 | } |
976 | ||
ade80dc8 MT |
977 | return 0; |
978 | } | |
979 | ||
d2a0ccc6 | 980 | static bool virtio_pci_query_guest_notifiers(DeviceState *d) |
5430a28f | 981 | { |
d2a0ccc6 | 982 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
5430a28f MT |
983 | return msix_enabled(&proxy->pci_dev); |
984 | } | |
985 | ||
2d620f59 | 986 | static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) |
54dd9321 | 987 | { |
d2a0ccc6 | 988 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 | 989 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 990 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
54dd9321 | 991 | int r, n; |
89d62be9 MT |
992 | bool with_irqfd = msix_enabled(&proxy->pci_dev) && |
993 | kvm_msi_via_irqfd_enabled(); | |
54dd9321 | 994 | |
87b3bd1c | 995 | nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); |
2d620f59 MT |
996 | |
997 | /* When deassigning, pass a consistent nvqs value | |
998 | * to avoid leaking notifiers. | |
999 | */ | |
1000 | assert(assign || nvqs == proxy->nvqs_with_notifiers); | |
1001 | ||
1002 | proxy->nvqs_with_notifiers = nvqs; | |
1003 | ||
7d37d351 | 1004 | /* Must unset vector notifier while guest notifier is still assigned */ |
181103cd | 1005 | if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { |
7d37d351 | 1006 | msix_unset_vector_notifiers(&proxy->pci_dev); |
a38b2c49 MT |
1007 | if (proxy->vector_irqfd) { |
1008 | kvm_virtio_pci_vector_release(proxy, nvqs); | |
1009 | g_free(proxy->vector_irqfd); | |
1010 | proxy->vector_irqfd = NULL; | |
1011 | } | |
7d37d351 JK |
1012 | } |
1013 | ||
2d620f59 | 1014 | for (n = 0; n < nvqs; n++) { |
54dd9321 MT |
1015 | if (!virtio_queue_get_num(vdev, n)) { |
1016 | break; | |
1017 | } | |
1018 | ||
23fe2b3f | 1019 | r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); |
54dd9321 MT |
1020 | if (r < 0) { |
1021 | goto assign_error; | |
1022 | } | |
1023 | } | |
1024 | ||
7d37d351 | 1025 | /* Must set vector notifier after guest notifier has been assigned */ |
181103cd | 1026 | if ((with_irqfd || k->guest_notifier_mask) && assign) { |
a38b2c49 MT |
1027 | if (with_irqfd) { |
1028 | proxy->vector_irqfd = | |
1029 | g_malloc0(sizeof(*proxy->vector_irqfd) * | |
1030 | msix_nr_vectors_allocated(&proxy->pci_dev)); | |
1031 | r = kvm_virtio_pci_vector_use(proxy, nvqs); | |
1032 | if (r < 0) { | |
1033 | goto assign_error; | |
1034 | } | |
774345f9 | 1035 | } |
7d37d351 | 1036 | r = msix_set_vector_notifiers(&proxy->pci_dev, |
a38b2c49 MT |
1037 | virtio_pci_vector_unmask, |
1038 | virtio_pci_vector_mask, | |
1039 | virtio_pci_vector_poll); | |
7d37d351 | 1040 | if (r < 0) { |
774345f9 | 1041 | goto notifiers_error; |
7d37d351 JK |
1042 | } |
1043 | } | |
1044 | ||
54dd9321 MT |
1045 | return 0; |
1046 | ||
774345f9 | 1047 | notifiers_error: |
a38b2c49 MT |
1048 | if (with_irqfd) { |
1049 | assert(assign); | |
1050 | kvm_virtio_pci_vector_release(proxy, nvqs); | |
1051 | } | |
774345f9 | 1052 | |
54dd9321 MT |
1053 | assign_error: |
1054 | /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ | |
7d37d351 | 1055 | assert(assign); |
54dd9321 | 1056 | while (--n >= 0) { |
89d62be9 | 1057 | virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); |
54dd9321 MT |
1058 | } |
1059 | return r; | |
1060 | } | |
1061 | ||
6f80e617 TB |
1062 | static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, |
1063 | MemoryRegion *mr, bool assign) | |
1064 | { | |
1065 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
1066 | int offset; | |
1067 | ||
1068 | if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || | |
1069 | virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { | |
1070 | return -1; | |
1071 | } | |
1072 | ||
1073 | if (assign) { | |
1074 | offset = virtio_pci_queue_mem_mult(proxy) * n; | |
1075 | memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); | |
1076 | } else { | |
1077 | memory_region_del_subregion(&proxy->notify.mr, mr); | |
1078 | } | |
1079 | ||
1080 | return 0; | |
1081 | } | |
1082 | ||
d2a0ccc6 | 1083 | static void virtio_pci_vmstate_change(DeviceState *d, bool running) |
25db9ebe | 1084 | { |
d2a0ccc6 | 1085 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 | 1086 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
25db9ebe SH |
1087 | |
1088 | if (running) { | |
68a27b20 MT |
1089 | /* Old QEMU versions did not set bus master enable on status write. |
1090 | * Detect DRIVER set and enable it. | |
1091 | */ | |
1092 | if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && | |
1093 | (vdev->status & VIRTIO_CONFIG_S_DRIVER) && | |
45363e46 | 1094 | !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { |
68a27b20 MT |
1095 | pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, |
1096 | proxy->pci_dev.config[PCI_COMMAND] | | |
1097 | PCI_COMMAND_MASTER, 1); | |
89c473fd | 1098 | } |
25db9ebe | 1099 | virtio_pci_start_ioeventfd(proxy); |
ade80dc8 | 1100 | } else { |
25db9ebe | 1101 | virtio_pci_stop_ioeventfd(proxy); |
ade80dc8 | 1102 | } |
ade80dc8 MT |
1103 | } |
1104 | ||
085bccb7 FK |
1105 | /* |
1106 | * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. | |
1107 | */ | |
1108 | ||
e0d686bf JW |
1109 | static int virtio_pci_query_nvectors(DeviceState *d) |
1110 | { | |
1111 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1112 | ||
1113 | return proxy->nvectors; | |
1114 | } | |
1115 | ||
8607f5c3 JW |
1116 | static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) |
1117 | { | |
1118 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1119 | PCIDevice *dev = &proxy->pci_dev; | |
1120 | ||
f0edf239 | 1121 | return pci_get_address_space(dev); |
8607f5c3 JW |
1122 | } |
1123 | ||
3d1e5d86 JW |
1124 | static bool virtio_pci_iommu_enabled(DeviceState *d) |
1125 | { | |
1126 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1127 | PCIDevice *dev = &proxy->pci_dev; | |
1128 | AddressSpace *dma_as = pci_device_iommu_address_space(dev); | |
1129 | ||
1130 | if (dma_as == &address_space_memory) { | |
1131 | return false; | |
1132 | } | |
1133 | ||
1134 | return true; | |
1135 | } | |
1136 | ||
f19bcdfe JW |
1137 | static bool virtio_pci_queue_enabled(DeviceState *d, int n) |
1138 | { | |
1139 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1140 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1141 | ||
1142 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { | |
a48aaf88 | 1143 | return proxy->vqs[n].enabled; |
f19bcdfe JW |
1144 | } |
1145 | ||
0c9753eb | 1146 | return virtio_queue_enabled_legacy(vdev, n); |
f19bcdfe JW |
1147 | } |
1148 | ||
ada434cd | 1149 | static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, |
dfb8e184 MT |
1150 | struct virtio_pci_cap *cap) |
1151 | { | |
1152 | PCIDevice *dev = &proxy->pci_dev; | |
1153 | int offset; | |
1154 | ||
9a7c2a59 MZ |
1155 | offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, |
1156 | cap->cap_len, &error_abort); | |
dfb8e184 MT |
1157 | |
1158 | assert(cap->cap_len >= sizeof *cap); | |
1159 | memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, | |
1160 | cap->cap_len - PCI_CAP_FLAGS); | |
ada434cd MT |
1161 | |
1162 | return offset; | |
dfb8e184 MT |
1163 | } |
1164 | ||
dfb8e184 MT |
1165 | static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, |
1166 | unsigned size) | |
1167 | { | |
1168 | VirtIOPCIProxy *proxy = opaque; | |
1169 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1170 | uint32_t val = 0; | |
1171 | int i; | |
1172 | ||
80ebfd69 AM |
1173 | if (vdev == NULL) { |
1174 | return UINT64_MAX; | |
1175 | } | |
1176 | ||
dfb8e184 MT |
1177 | switch (addr) { |
1178 | case VIRTIO_PCI_COMMON_DFSELECT: | |
1179 | val = proxy->dfselect; | |
1180 | break; | |
1181 | case VIRTIO_PCI_COMMON_DF: | |
1182 | if (proxy->dfselect <= 1) { | |
9b706dbb MT |
1183 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); |
1184 | ||
1185 | val = (vdev->host_features & ~vdc->legacy_features) >> | |
5f456073 | 1186 | (32 * proxy->dfselect); |
dfb8e184 MT |
1187 | } |
1188 | break; | |
1189 | case VIRTIO_PCI_COMMON_GFSELECT: | |
1190 | val = proxy->gfselect; | |
1191 | break; | |
1192 | case VIRTIO_PCI_COMMON_GF: | |
3750dabc | 1193 | if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { |
dfb8e184 MT |
1194 | val = proxy->guest_features[proxy->gfselect]; |
1195 | } | |
1196 | break; | |
1197 | case VIRTIO_PCI_COMMON_MSIX: | |
1198 | val = vdev->config_vector; | |
1199 | break; | |
1200 | case VIRTIO_PCI_COMMON_NUMQ: | |
1201 | for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { | |
1202 | if (virtio_queue_get_num(vdev, i)) { | |
1203 | val = i + 1; | |
1204 | } | |
1205 | } | |
1206 | break; | |
1207 | case VIRTIO_PCI_COMMON_STATUS: | |
1208 | val = vdev->status; | |
1209 | break; | |
1210 | case VIRTIO_PCI_COMMON_CFGGENERATION: | |
b8f05908 | 1211 | val = vdev->generation; |
dfb8e184 MT |
1212 | break; |
1213 | case VIRTIO_PCI_COMMON_Q_SELECT: | |
1214 | val = vdev->queue_sel; | |
1215 | break; | |
1216 | case VIRTIO_PCI_COMMON_Q_SIZE: | |
1217 | val = virtio_queue_get_num(vdev, vdev->queue_sel); | |
1218 | break; | |
1219 | case VIRTIO_PCI_COMMON_Q_MSIX: | |
1220 | val = virtio_queue_vector(vdev, vdev->queue_sel); | |
1221 | break; | |
1222 | case VIRTIO_PCI_COMMON_Q_ENABLE: | |
1223 | val = proxy->vqs[vdev->queue_sel].enabled; | |
1224 | break; | |
1225 | case VIRTIO_PCI_COMMON_Q_NOFF: | |
1226 | /* Simply map queues in order */ | |
1227 | val = vdev->queue_sel; | |
1228 | break; | |
1229 | case VIRTIO_PCI_COMMON_Q_DESCLO: | |
1230 | val = proxy->vqs[vdev->queue_sel].desc[0]; | |
1231 | break; | |
1232 | case VIRTIO_PCI_COMMON_Q_DESCHI: | |
1233 | val = proxy->vqs[vdev->queue_sel].desc[1]; | |
1234 | break; | |
1235 | case VIRTIO_PCI_COMMON_Q_AVAILLO: | |
1236 | val = proxy->vqs[vdev->queue_sel].avail[0]; | |
1237 | break; | |
1238 | case VIRTIO_PCI_COMMON_Q_AVAILHI: | |
1239 | val = proxy->vqs[vdev->queue_sel].avail[1]; | |
1240 | break; | |
1241 | case VIRTIO_PCI_COMMON_Q_USEDLO: | |
1242 | val = proxy->vqs[vdev->queue_sel].used[0]; | |
1243 | break; | |
1244 | case VIRTIO_PCI_COMMON_Q_USEDHI: | |
1245 | val = proxy->vqs[vdev->queue_sel].used[1]; | |
1246 | break; | |
1247 | default: | |
1248 | val = 0; | |
1249 | } | |
1250 | ||
1251 | return val; | |
1252 | } | |
1253 | ||
1254 | static void virtio_pci_common_write(void *opaque, hwaddr addr, | |
1255 | uint64_t val, unsigned size) | |
1256 | { | |
1257 | VirtIOPCIProxy *proxy = opaque; | |
1258 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1259 | ||
80ebfd69 AM |
1260 | if (vdev == NULL) { |
1261 | return; | |
1262 | } | |
1263 | ||
dfb8e184 MT |
1264 | switch (addr) { |
1265 | case VIRTIO_PCI_COMMON_DFSELECT: | |
1266 | proxy->dfselect = val; | |
1267 | break; | |
1268 | case VIRTIO_PCI_COMMON_GFSELECT: | |
1269 | proxy->gfselect = val; | |
1270 | break; | |
1271 | case VIRTIO_PCI_COMMON_GF: | |
3750dabc | 1272 | if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { |
dfb8e184 MT |
1273 | proxy->guest_features[proxy->gfselect] = val; |
1274 | virtio_set_features(vdev, | |
1275 | (((uint64_t)proxy->guest_features[1]) << 32) | | |
1276 | proxy->guest_features[0]); | |
1277 | } | |
1278 | break; | |
1279 | case VIRTIO_PCI_COMMON_MSIX: | |
1280 | msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); | |
1281 | /* Make it possible for guest to discover an error took place. */ | |
1282 | if (msix_vector_use(&proxy->pci_dev, val) < 0) { | |
1283 | val = VIRTIO_NO_VECTOR; | |
1284 | } | |
1285 | vdev->config_vector = val; | |
1286 | break; | |
1287 | case VIRTIO_PCI_COMMON_STATUS: | |
1288 | if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { | |
1289 | virtio_pci_stop_ioeventfd(proxy); | |
1290 | } | |
1291 | ||
1292 | virtio_set_status(vdev, val & 0xFF); | |
1293 | ||
1294 | if (val & VIRTIO_CONFIG_S_DRIVER_OK) { | |
1295 | virtio_pci_start_ioeventfd(proxy); | |
1296 | } | |
1297 | ||
1298 | if (vdev->status == 0) { | |
75fd6f13 | 1299 | virtio_pci_reset(DEVICE(proxy)); |
dfb8e184 MT |
1300 | } |
1301 | ||
1302 | break; | |
1303 | case VIRTIO_PCI_COMMON_Q_SELECT: | |
1304 | if (val < VIRTIO_QUEUE_MAX) { | |
1305 | vdev->queue_sel = val; | |
1306 | } | |
1307 | break; | |
1308 | case VIRTIO_PCI_COMMON_Q_SIZE: | |
1309 | proxy->vqs[vdev->queue_sel].num = val; | |
d0c5f643 MT |
1310 | virtio_queue_set_num(vdev, vdev->queue_sel, |
1311 | proxy->vqs[vdev->queue_sel].num); | |
dfb8e184 MT |
1312 | break; |
1313 | case VIRTIO_PCI_COMMON_Q_MSIX: | |
1314 | msix_vector_unuse(&proxy->pci_dev, | |
1315 | virtio_queue_vector(vdev, vdev->queue_sel)); | |
1316 | /* Make it possible for guest to discover an error took place. */ | |
1317 | if (msix_vector_use(&proxy->pci_dev, val) < 0) { | |
1318 | val = VIRTIO_NO_VECTOR; | |
1319 | } | |
1320 | virtio_queue_set_vector(vdev, vdev->queue_sel, val); | |
1321 | break; | |
1322 | case VIRTIO_PCI_COMMON_Q_ENABLE: | |
10d35e58 JW |
1323 | if (val == 1) { |
1324 | virtio_queue_set_num(vdev, vdev->queue_sel, | |
1325 | proxy->vqs[vdev->queue_sel].num); | |
1326 | virtio_queue_set_rings(vdev, vdev->queue_sel, | |
dfb8e184 MT |
1327 | ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | |
1328 | proxy->vqs[vdev->queue_sel].desc[0], | |
1329 | ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | | |
1330 | proxy->vqs[vdev->queue_sel].avail[0], | |
1331 | ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | | |
1332 | proxy->vqs[vdev->queue_sel].used[0]); | |
10d35e58 JW |
1333 | proxy->vqs[vdev->queue_sel].enabled = 1; |
1334 | } else { | |
1335 | virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val); | |
1336 | } | |
dfb8e184 MT |
1337 | break; |
1338 | case VIRTIO_PCI_COMMON_Q_DESCLO: | |
1339 | proxy->vqs[vdev->queue_sel].desc[0] = val; | |
1340 | break; | |
1341 | case VIRTIO_PCI_COMMON_Q_DESCHI: | |
1342 | proxy->vqs[vdev->queue_sel].desc[1] = val; | |
1343 | break; | |
1344 | case VIRTIO_PCI_COMMON_Q_AVAILLO: | |
1345 | proxy->vqs[vdev->queue_sel].avail[0] = val; | |
1346 | break; | |
1347 | case VIRTIO_PCI_COMMON_Q_AVAILHI: | |
1348 | proxy->vqs[vdev->queue_sel].avail[1] = val; | |
1349 | break; | |
1350 | case VIRTIO_PCI_COMMON_Q_USEDLO: | |
1351 | proxy->vqs[vdev->queue_sel].used[0] = val; | |
1352 | break; | |
1353 | case VIRTIO_PCI_COMMON_Q_USEDHI: | |
1354 | proxy->vqs[vdev->queue_sel].used[1] = val; | |
1355 | break; | |
1356 | default: | |
1357 | break; | |
1358 | } | |
1359 | } | |
1360 | ||
1361 | ||
1362 | static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, | |
1363 | unsigned size) | |
1364 | { | |
df07a8f8 AM |
1365 | VirtIOPCIProxy *proxy = opaque; |
1366 | if (virtio_bus_get_device(&proxy->bus) == NULL) { | |
1367 | return UINT64_MAX; | |
1368 | } | |
1369 | ||
dfb8e184 MT |
1370 | return 0; |
1371 | } | |
1372 | ||
1373 | static void virtio_pci_notify_write(void *opaque, hwaddr addr, | |
1374 | uint64_t val, unsigned size) | |
1375 | { | |
ccec7e96 AM |
1376 | VirtIOPCIProxy *proxy = opaque; |
1377 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1378 | ||
d9997d89 | 1379 | unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); |
dfb8e184 | 1380 | |
ccec7e96 | 1381 | if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { |
dfb8e184 MT |
1382 | virtio_queue_notify(vdev, queue); |
1383 | } | |
1384 | } | |
1385 | ||
9824d2a3 JW |
1386 | static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, |
1387 | uint64_t val, unsigned size) | |
1388 | { | |
ccec7e96 AM |
1389 | VirtIOPCIProxy *proxy = opaque; |
1390 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1391 | ||
9824d2a3 JW |
1392 | unsigned queue = val; |
1393 | ||
ccec7e96 | 1394 | if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { |
9824d2a3 JW |
1395 | virtio_queue_notify(vdev, queue); |
1396 | } | |
1397 | } | |
1398 | ||
dfb8e184 MT |
1399 | static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, |
1400 | unsigned size) | |
1401 | { | |
1402 | VirtIOPCIProxy *proxy = opaque; | |
1403 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
c3fd7061 YB |
1404 | uint64_t val; |
1405 | ||
1406 | if (vdev == NULL) { | |
df07a8f8 | 1407 | return UINT64_MAX; |
c3fd7061 | 1408 | } |
dfb8e184 | 1409 | |
c3fd7061 YB |
1410 | val = qatomic_xchg(&vdev->isr, 0); |
1411 | pci_irq_deassert(&proxy->pci_dev); | |
dfb8e184 MT |
1412 | return val; |
1413 | } | |
1414 | ||
1415 | static void virtio_pci_isr_write(void *opaque, hwaddr addr, | |
1416 | uint64_t val, unsigned size) | |
1417 | { | |
1418 | } | |
1419 | ||
1420 | static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, | |
1421 | unsigned size) | |
1422 | { | |
ccec7e96 AM |
1423 | VirtIOPCIProxy *proxy = opaque; |
1424 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
51e0e42c | 1425 | uint64_t val; |
dfb8e184 | 1426 | |
ccec7e96 | 1427 | if (vdev == NULL) { |
df07a8f8 | 1428 | return UINT64_MAX; |
ccec7e96 AM |
1429 | } |
1430 | ||
dfb8e184 MT |
1431 | switch (size) { |
1432 | case 1: | |
54c720d4 | 1433 | val = virtio_config_modern_readb(vdev, addr); |
dfb8e184 MT |
1434 | break; |
1435 | case 2: | |
54c720d4 | 1436 | val = virtio_config_modern_readw(vdev, addr); |
dfb8e184 MT |
1437 | break; |
1438 | case 4: | |
54c720d4 | 1439 | val = virtio_config_modern_readl(vdev, addr); |
dfb8e184 | 1440 | break; |
51e0e42c YB |
1441 | default: |
1442 | val = 0; | |
1443 | break; | |
dfb8e184 MT |
1444 | } |
1445 | return val; | |
1446 | } | |
1447 | ||
1448 | static void virtio_pci_device_write(void *opaque, hwaddr addr, | |
1449 | uint64_t val, unsigned size) | |
1450 | { | |
ccec7e96 AM |
1451 | VirtIOPCIProxy *proxy = opaque; |
1452 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1453 | ||
1454 | if (vdev == NULL) { | |
1455 | return; | |
1456 | } | |
1457 | ||
dfb8e184 MT |
1458 | switch (size) { |
1459 | case 1: | |
54c720d4 | 1460 | virtio_config_modern_writeb(vdev, addr, val); |
dfb8e184 MT |
1461 | break; |
1462 | case 2: | |
54c720d4 | 1463 | virtio_config_modern_writew(vdev, addr, val); |
dfb8e184 MT |
1464 | break; |
1465 | case 4: | |
54c720d4 | 1466 | virtio_config_modern_writel(vdev, addr, val); |
dfb8e184 MT |
1467 | break; |
1468 | } | |
1469 | } | |
1470 | ||
b74259e3 AB |
1471 | static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy, |
1472 | const char *vdev_name) | |
1141ce21 GH |
1473 | { |
1474 | static const MemoryRegionOps common_ops = { | |
1475 | .read = virtio_pci_common_read, | |
1476 | .write = virtio_pci_common_write, | |
1477 | .impl = { | |
1478 | .min_access_size = 1, | |
1479 | .max_access_size = 4, | |
1480 | }, | |
1481 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1482 | }; | |
1483 | static const MemoryRegionOps isr_ops = { | |
1484 | .read = virtio_pci_isr_read, | |
1485 | .write = virtio_pci_isr_write, | |
1486 | .impl = { | |
1487 | .min_access_size = 1, | |
1488 | .max_access_size = 4, | |
1489 | }, | |
1490 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1491 | }; | |
1492 | static const MemoryRegionOps device_ops = { | |
1493 | .read = virtio_pci_device_read, | |
1494 | .write = virtio_pci_device_write, | |
1495 | .impl = { | |
1496 | .min_access_size = 1, | |
1497 | .max_access_size = 4, | |
1498 | }, | |
1499 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1500 | }; | |
1501 | static const MemoryRegionOps notify_ops = { | |
1502 | .read = virtio_pci_notify_read, | |
1503 | .write = virtio_pci_notify_write, | |
1504 | .impl = { | |
1505 | .min_access_size = 1, | |
1506 | .max_access_size = 4, | |
1507 | }, | |
1508 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1509 | }; | |
9824d2a3 JW |
1510 | static const MemoryRegionOps notify_pio_ops = { |
1511 | .read = virtio_pci_notify_read, | |
1512 | .write = virtio_pci_notify_write_pio, | |
1513 | .impl = { | |
1514 | .min_access_size = 1, | |
1515 | .max_access_size = 4, | |
1516 | }, | |
1517 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1518 | }; | |
b74259e3 | 1519 | g_autoptr(GString) name = g_string_new(NULL); |
9824d2a3 | 1520 | |
b74259e3 | 1521 | g_string_printf(name, "virtio-pci-common-%s", vdev_name); |
1141ce21 GH |
1522 | memory_region_init_io(&proxy->common.mr, OBJECT(proxy), |
1523 | &common_ops, | |
1524 | proxy, | |
b74259e3 | 1525 | name->str, |
b6ce27a5 | 1526 | proxy->common.size); |
a3cc2e81 | 1527 | |
b74259e3 | 1528 | g_string_printf(name, "virtio-pci-isr-%s", vdev_name); |
1141ce21 GH |
1529 | memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), |
1530 | &isr_ops, | |
1531 | proxy, | |
b74259e3 | 1532 | name->str, |
b6ce27a5 | 1533 | proxy->isr.size); |
a3cc2e81 | 1534 | |
b74259e3 | 1535 | g_string_printf(name, "virtio-pci-device-%s", vdev_name); |
1141ce21 GH |
1536 | memory_region_init_io(&proxy->device.mr, OBJECT(proxy), |
1537 | &device_ops, | |
ccec7e96 | 1538 | proxy, |
b74259e3 | 1539 | name->str, |
b6ce27a5 | 1540 | proxy->device.size); |
a3cc2e81 | 1541 | |
b74259e3 | 1542 | g_string_printf(name, "virtio-pci-notify-%s", vdev_name); |
1141ce21 GH |
1543 | memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), |
1544 | ¬ify_ops, | |
ccec7e96 | 1545 | proxy, |
b74259e3 | 1546 | name->str, |
b6ce27a5 | 1547 | proxy->notify.size); |
9824d2a3 | 1548 | |
b74259e3 | 1549 | g_string_printf(name, "virtio-pci-notify-pio-%s", vdev_name); |
9824d2a3 JW |
1550 | memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), |
1551 | ¬ify_pio_ops, | |
ccec7e96 | 1552 | proxy, |
b74259e3 | 1553 | name->str, |
e3aab6c7 | 1554 | proxy->notify_pio.size); |
a3cc2e81 GH |
1555 | } |
1556 | ||
1557 | static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, | |
54790d71 | 1558 | VirtIOPCIRegion *region, |
9824d2a3 JW |
1559 | struct virtio_pci_cap *cap, |
1560 | MemoryRegion *mr, | |
1561 | uint8_t bar) | |
a3cc2e81 | 1562 | { |
9824d2a3 | 1563 | memory_region_add_subregion(mr, region->offset, ®ion->mr); |
54790d71 | 1564 | |
fc004905 | 1565 | cap->cfg_type = region->type; |
9824d2a3 | 1566 | cap->bar = bar; |
54790d71 | 1567 | cap->offset = cpu_to_le32(region->offset); |
b6ce27a5 | 1568 | cap->length = cpu_to_le32(region->size); |
54790d71 | 1569 | virtio_pci_add_mem_cap(proxy, cap); |
9824d2a3 JW |
1570 | |
1571 | } | |
1572 | ||
1573 | static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, | |
1574 | VirtIOPCIRegion *region, | |
1575 | struct virtio_pci_cap *cap) | |
1576 | { | |
1577 | virtio_pci_modern_region_map(proxy, region, cap, | |
7a25126d | 1578 | &proxy->modern_bar, proxy->modern_mem_bar_idx); |
1141ce21 | 1579 | } |
dfb8e184 | 1580 | |
9824d2a3 JW |
1581 | static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, |
1582 | VirtIOPCIRegion *region, | |
1583 | struct virtio_pci_cap *cap) | |
1584 | { | |
1585 | virtio_pci_modern_region_map(proxy, region, cap, | |
7a25126d | 1586 | &proxy->io_bar, proxy->modern_io_bar_idx); |
9824d2a3 JW |
1587 | } |
1588 | ||
1589 | static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, | |
1590 | VirtIOPCIRegion *region) | |
27462695 MT |
1591 | { |
1592 | memory_region_del_subregion(&proxy->modern_bar, | |
1593 | ®ion->mr); | |
1594 | } | |
1595 | ||
9824d2a3 JW |
1596 | static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, |
1597 | VirtIOPCIRegion *region) | |
1598 | { | |
1599 | memory_region_del_subregion(&proxy->io_bar, | |
1600 | ®ion->mr); | |
1601 | } | |
1602 | ||
d1b4259f MC |
1603 | static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) |
1604 | { | |
1605 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1606 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1607 | ||
1608 | if (virtio_pci_modern(proxy)) { | |
1609 | virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); | |
1610 | } | |
1611 | ||
1612 | virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); | |
1613 | } | |
1614 | ||
085bccb7 | 1615 | /* This is called by virtio-bus just after the device is plugged. */ |
e8398045 | 1616 | static void virtio_pci_device_plugged(DeviceState *d, Error **errp) |
085bccb7 FK |
1617 | { |
1618 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1619 | VirtioBusState *bus = &proxy->bus; | |
9a4c0e22 | 1620 | bool legacy = virtio_pci_legacy(proxy); |
d1b4259f | 1621 | bool modern; |
9824d2a3 | 1622 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
085bccb7 FK |
1623 | uint8_t *config; |
1624 | uint32_t size; | |
6b8f1020 | 1625 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
085bccb7 | 1626 | |
d1b4259f MC |
1627 | /* |
1628 | * Virtio capabilities present without | |
1629 | * VIRTIO_F_VERSION_1 confuses guests | |
1630 | */ | |
66d1c4c1 MC |
1631 | if (!proxy->ignore_backend_features && |
1632 | !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { | |
d1b4259f MC |
1633 | virtio_pci_disable_modern(proxy); |
1634 | ||
1635 | if (!legacy) { | |
1636 | error_setg(errp, "Device doesn't support modern mode, and legacy" | |
1637 | " mode is disabled"); | |
1638 | error_append_hint(errp, "Set disable-legacy to off\n"); | |
1639 | ||
1640 | return; | |
1641 | } | |
1642 | } | |
1643 | ||
1644 | modern = virtio_pci_modern(proxy); | |
1645 | ||
085bccb7 FK |
1646 | config = proxy->pci_dev.config; |
1647 | if (proxy->class_code) { | |
1648 | pci_config_set_class(config, proxy->class_code); | |
1649 | } | |
e266d421 GH |
1650 | |
1651 | if (legacy) { | |
9b3a35ec | 1652 | if (!virtio_legacy_allowed(vdev)) { |
d55f5182 SG |
1653 | /* |
1654 | * To avoid migration issues, we allow legacy mode when legacy | |
1655 | * check is disabled in the old machine types (< 5.1). | |
1656 | */ | |
1657 | if (virtio_legacy_check_disabled(vdev)) { | |
1658 | warn_report("device is modern-only, but for backward " | |
1659 | "compatibility legacy is allowed"); | |
1660 | } else { | |
1661 | error_setg(errp, | |
1662 | "device is modern-only, use disable-legacy=on"); | |
1663 | return; | |
1664 | } | |
9b3a35ec | 1665 | } |
8607f5c3 JW |
1666 | if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { |
1667 | error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" | |
2080a29f | 1668 | " neither legacy nor transitional device"); |
8607f5c3 JW |
1669 | return ; |
1670 | } | |
f2bc54de LP |
1671 | /* |
1672 | * Legacy and transitional devices use specific subsystem IDs. | |
1673 | * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) | |
1674 | * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. | |
1675 | */ | |
e266d421 GH |
1676 | pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); |
1677 | } else { | |
1678 | /* pure virtio-1.0 */ | |
1679 | pci_set_word(config + PCI_VENDOR_ID, | |
1680 | PCI_VENDOR_ID_REDHAT_QUMRANET); | |
1681 | pci_set_word(config + PCI_DEVICE_ID, | |
1682 | 0x1040 + virtio_bus_get_vdev_id(bus)); | |
1683 | pci_config_set_revision(config, 1); | |
1684 | } | |
085bccb7 FK |
1685 | config[PCI_INTERRUPT_PIN] = 1; |
1686 | ||
dfb8e184 | 1687 | |
e266d421 | 1688 | if (modern) { |
cc52ea90 GH |
1689 | struct virtio_pci_cap cap = { |
1690 | .cap_len = sizeof cap, | |
dfb8e184 MT |
1691 | }; |
1692 | struct virtio_pci_notify_cap notify = { | |
dfb8e184 | 1693 | .cap.cap_len = sizeof notify, |
dfb8e184 | 1694 | .notify_off_multiplier = |
d9997d89 | 1695 | cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), |
dfb8e184 | 1696 | }; |
ada434cd MT |
1697 | struct virtio_pci_cfg_cap cfg = { |
1698 | .cap.cap_len = sizeof cfg, | |
1699 | .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, | |
1700 | }; | |
9824d2a3 JW |
1701 | struct virtio_pci_notify_cap notify_pio = { |
1702 | .cap.cap_len = sizeof notify, | |
1703 | .notify_off_multiplier = cpu_to_le32(0x0), | |
1704 | }; | |
dfb8e184 | 1705 | |
9824d2a3 | 1706 | struct virtio_pci_cfg_cap *cfg_mask; |
dfb8e184 | 1707 | |
b74259e3 | 1708 | virtio_pci_modern_regions_init(proxy, vdev->name); |
9824d2a3 JW |
1709 | |
1710 | virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); | |
1711 | virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); | |
1712 | virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); | |
1713 | virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); | |
1714 | ||
1715 | if (modern_pio) { | |
1716 | memory_region_init(&proxy->io_bar, OBJECT(proxy), | |
1717 | "virtio-pci-io", 0x4); | |
1718 | ||
7a25126d | 1719 | pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, |
9824d2a3 JW |
1720 | PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); |
1721 | ||
1722 | virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, | |
1723 | ¬ify_pio.cap); | |
1724 | } | |
ada434cd | 1725 | |
7a25126d | 1726 | pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, |
4e93a68e GH |
1727 | PCI_BASE_ADDRESS_SPACE_MEMORY | |
1728 | PCI_BASE_ADDRESS_MEM_PREFETCH | | |
1729 | PCI_BASE_ADDRESS_MEM_TYPE_64, | |
dfb8e184 | 1730 | &proxy->modern_bar); |
ada434cd MT |
1731 | |
1732 | proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); | |
1733 | cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); | |
1734 | pci_set_byte(&cfg_mask->cap.bar, ~0x0); | |
1735 | pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); | |
1736 | pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); | |
1737 | pci_set_long(cfg_mask->pci_cfg_data, ~0x0); | |
dfb8e184 MT |
1738 | } |
1739 | ||
0d583647 RH |
1740 | if (proxy->nvectors) { |
1741 | int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, | |
ee640c62 | 1742 | proxy->msix_bar_idx, NULL); |
0d583647 | 1743 | if (err) { |
ee640c62 | 1744 | /* Notice when a system that supports MSIx can't initialize it */ |
0d583647 | 1745 | if (err != -ENOTSUP) { |
0765691e MA |
1746 | warn_report("unable to init msix vectors to %" PRIu32, |
1747 | proxy->nvectors); | |
0d583647 RH |
1748 | } |
1749 | proxy->nvectors = 0; | |
1750 | } | |
085bccb7 FK |
1751 | } |
1752 | ||
1753 | proxy->pci_dev.config_write = virtio_write_config; | |
ada434cd | 1754 | proxy->pci_dev.config_read = virtio_read_config; |
085bccb7 | 1755 | |
e266d421 GH |
1756 | if (legacy) { |
1757 | size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) | |
1758 | + virtio_bus_get_vdev_config_len(bus); | |
1d0148fe | 1759 | size = pow2ceil(size); |
085bccb7 | 1760 | |
e266d421 GH |
1761 | memory_region_init_io(&proxy->bar, OBJECT(proxy), |
1762 | &virtio_pci_config_ops, | |
1763 | proxy, "virtio-pci", size); | |
dfb8e184 | 1764 | |
7a25126d | 1765 | pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, |
23c5e397 | 1766 | PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); |
e266d421 | 1767 | } |
085bccb7 FK |
1768 | } |
1769 | ||
06a13073 PB |
1770 | static void virtio_pci_device_unplugged(DeviceState *d) |
1771 | { | |
06a13073 | 1772 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); |
9a4c0e22 | 1773 | bool modern = virtio_pci_modern(proxy); |
9824d2a3 | 1774 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
06a13073 PB |
1775 | |
1776 | virtio_pci_stop_ioeventfd(proxy); | |
27462695 MT |
1777 | |
1778 | if (modern) { | |
9824d2a3 JW |
1779 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); |
1780 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); | |
1781 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); | |
1782 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); | |
1783 | if (modern_pio) { | |
1784 | virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); | |
1785 | } | |
27462695 | 1786 | } |
06a13073 PB |
1787 | } |
1788 | ||
fc079951 | 1789 | static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) |
085bccb7 | 1790 | { |
b6ce27a5 | 1791 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
085bccb7 | 1792 | VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); |
fd56e061 DG |
1793 | bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && |
1794 | !pci_bus_is_root(pci_get_bus(pci_dev)); | |
fc079951 | 1795 | |
c324fd0a | 1796 | if (kvm_enabled() && !kvm_has_many_ioeventfds()) { |
ca2b413c PB |
1797 | proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; |
1798 | } | |
1799 | ||
3909c079 PD |
1800 | /* fd-based ioevents can't be synchronized in record/replay */ |
1801 | if (replay_mode != REPLAY_MODE_NONE) { | |
1802 | proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; | |
1803 | } | |
1804 | ||
b6ce27a5 GH |
1805 | /* |
1806 | * virtio pci bar layout used by default. | |
1807 | * subclasses can re-arrange things if needed. | |
1808 | * | |
1809 | * region 0 -- virtio legacy io bar | |
1810 | * region 1 -- msi-x bar | |
e6779156 | 1811 | * region 2 -- virtio modern io bar (off by default) |
b6ce27a5 GH |
1812 | * region 4+5 -- virtio modern memory (64bit) bar |
1813 | * | |
1814 | */ | |
7a25126d CF |
1815 | proxy->legacy_io_bar_idx = 0; |
1816 | proxy->msix_bar_idx = 1; | |
1817 | proxy->modern_io_bar_idx = 2; | |
1818 | proxy->modern_mem_bar_idx = 4; | |
b6ce27a5 GH |
1819 | |
1820 | proxy->common.offset = 0x0; | |
1821 | proxy->common.size = 0x1000; | |
1822 | proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; | |
1823 | ||
1824 | proxy->isr.offset = 0x1000; | |
1825 | proxy->isr.size = 0x1000; | |
1826 | proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; | |
1827 | ||
1828 | proxy->device.offset = 0x2000; | |
1829 | proxy->device.size = 0x1000; | |
1830 | proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; | |
1831 | ||
1832 | proxy->notify.offset = 0x3000; | |
d9997d89 | 1833 | proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; |
b6ce27a5 GH |
1834 | proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; |
1835 | ||
9824d2a3 JW |
1836 | proxy->notify_pio.offset = 0x0; |
1837 | proxy->notify_pio.size = 0x4; | |
1838 | proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; | |
1839 | ||
b6ce27a5 GH |
1840 | /* subclasses can enforce modern, so do this unconditionally */ |
1841 | memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", | |
d9997d89 MA |
1842 | /* PCI BAR regions must be powers of 2 */ |
1843 | pow2ceil(proxy->notify.offset + proxy->notify.size)); | |
b6ce27a5 | 1844 | |
dd56040d DDAG |
1845 | if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { |
1846 | proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; | |
1847 | } | |
1848 | ||
1849 | if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) { | |
1850 | error_setg(errp, "device cannot work as neither modern nor legacy mode" | |
1851 | " is enabled"); | |
1852 | error_append_hint(errp, "Set either disable-modern or disable-legacy" | |
1853 | " to off\n"); | |
1854 | return; | |
3eff3769 GK |
1855 | } |
1856 | ||
9a4c0e22 | 1857 | if (pcie_port && pci_is_express(pci_dev)) { |
1811e64c | 1858 | int pos; |
06e97442 | 1859 | uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE; |
1811e64c | 1860 | |
1811e64c MA |
1861 | pos = pcie_endpoint_cap_init(pci_dev, 0); |
1862 | assert(pos > 0); | |
1863 | ||
9a7c2a59 MZ |
1864 | pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, |
1865 | PCI_PM_SIZEOF, errp); | |
1866 | if (pos < 0) { | |
1867 | return; | |
1868 | } | |
1869 | ||
27ce0f3a | 1870 | pci_dev->exp.pm_cap = pos; |
1811e64c MA |
1871 | |
1872 | /* | |
1873 | * Indicates that this function complies with revision 1.2 of the | |
1874 | * PCI Power Management Interface Specification. | |
1875 | */ | |
1876 | pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); | |
615c4ed2 | 1877 | |
fdfa3b1d AM |
1878 | if (proxy->flags & VIRTIO_PCI_FLAG_AER) { |
1879 | pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset, | |
1880 | PCI_ERR_SIZEOF, NULL); | |
1881 | last_pcie_cap_offset += PCI_ERR_SIZEOF; | |
1882 | } | |
1883 | ||
c2cabb34 MA |
1884 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { |
1885 | /* Init error enabling flags */ | |
1886 | pcie_cap_deverr_init(pci_dev); | |
1887 | } | |
1888 | ||
d584f1b9 MA |
1889 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { |
1890 | /* Init Link Control Register */ | |
1891 | pcie_cap_lnkctl_init(pci_dev); | |
1892 | } | |
1893 | ||
27ce0f3a MA |
1894 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { |
1895 | /* Init Power Management Control Register */ | |
1896 | pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, | |
1897 | PCI_PM_CTRL_STATE_MASK); | |
1898 | } | |
1899 | ||
615c4ed2 | 1900 | if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { |
d83f46d1 JW |
1901 | pcie_ats_init(pci_dev, last_pcie_cap_offset, |
1902 | proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED); | |
06e97442 | 1903 | last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF; |
615c4ed2 JW |
1904 | } |
1905 | ||
eb1556c4 JS |
1906 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { |
1907 | /* Set Function Level Reset capability bit */ | |
1908 | pcie_cap_flr_init(pci_dev); | |
1909 | } | |
0560b0e9 SL |
1910 | } else { |
1911 | /* | |
1912 | * make future invocations of pci_is_express() return false | |
1913 | * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. | |
1914 | */ | |
1915 | pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; | |
1811e64c MA |
1916 | } |
1917 | ||
b6ce27a5 | 1918 | virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); |
fc079951 | 1919 | if (k->realize) { |
b6ce27a5 | 1920 | k->realize(proxy, errp); |
085bccb7 | 1921 | } |
085bccb7 FK |
1922 | } |
1923 | ||
1924 | static void virtio_pci_exit(PCIDevice *pci_dev) | |
1925 | { | |
fdfa3b1d AM |
1926 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
1927 | bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && | |
1928 | !pci_bus_is_root(pci_get_bus(pci_dev)); | |
1929 | ||
8b81bb3b | 1930 | msix_uninit_exclusive_bar(pci_dev); |
fdfa3b1d AM |
1931 | if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port && |
1932 | pci_is_express(pci_dev)) { | |
1933 | pcie_aer_exit(pci_dev); | |
1934 | } | |
085bccb7 FK |
1935 | } |
1936 | ||
59ccd20a | 1937 | static void virtio_pci_reset(DeviceState *qdev) |
085bccb7 FK |
1938 | { |
1939 | VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); | |
1940 | VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); | |
c2cabb34 | 1941 | PCIDevice *dev = PCI_DEVICE(qdev); |
393f04d3 JW |
1942 | int i; |
1943 | ||
085bccb7 FK |
1944 | virtio_pci_stop_ioeventfd(proxy); |
1945 | virtio_bus_reset(bus); | |
1946 | msix_unuse_all_vectors(&proxy->pci_dev); | |
393f04d3 JW |
1947 | |
1948 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { | |
1949 | proxy->vqs[i].enabled = 0; | |
60a8d802 JW |
1950 | proxy->vqs[i].num = 0; |
1951 | proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; | |
1952 | proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; | |
1953 | proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; | |
393f04d3 | 1954 | } |
c2cabb34 MA |
1955 | |
1956 | if (pci_is_express(dev)) { | |
1957 | pcie_cap_deverr_reset(dev); | |
d584f1b9 | 1958 | pcie_cap_lnkctl_reset(dev); |
27ce0f3a MA |
1959 | |
1960 | pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); | |
c2cabb34 | 1961 | } |
085bccb7 FK |
1962 | } |
1963 | ||
85d1277e | 1964 | static Property virtio_pci_properties[] = { |
68a27b20 MT |
1965 | DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, |
1966 | VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), | |
a6df8adf JW |
1967 | DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, |
1968 | VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), | |
9824d2a3 JW |
1969 | DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, |
1970 | VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), | |
1811e64c MA |
1971 | DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, |
1972 | VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), | |
d9997d89 MA |
1973 | DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, |
1974 | VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), | |
66d1c4c1 MC |
1975 | DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, |
1976 | ignore_backend_features, false), | |
615c4ed2 JW |
1977 | DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, |
1978 | VIRTIO_PCI_FLAG_ATS_BIT, false), | |
d83f46d1 JW |
1979 | DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy, flags, |
1980 | VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, true), | |
c2cabb34 MA |
1981 | DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, |
1982 | VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), | |
d584f1b9 MA |
1983 | DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, |
1984 | VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), | |
27ce0f3a MA |
1985 | DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, |
1986 | VIRTIO_PCI_FLAG_INIT_PM_BIT, true), | |
eb1556c4 JS |
1987 | DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags, |
1988 | VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), | |
fdfa3b1d AM |
1989 | DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, |
1990 | VIRTIO_PCI_FLAG_AER_BIT, false), | |
85d1277e ML |
1991 | DEFINE_PROP_END_OF_LIST(), |
1992 | }; | |
1993 | ||
0560b0e9 SL |
1994 | static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) |
1995 | { | |
1996 | VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); | |
1997 | VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); | |
1998 | PCIDevice *pci_dev = &proxy->pci_dev; | |
1999 | ||
2000 | if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && | |
9a4c0e22 | 2001 | virtio_pci_modern(proxy)) { |
0560b0e9 SL |
2002 | pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; |
2003 | } | |
2004 | ||
2005 | vpciklass->parent_dc_realize(qdev, errp); | |
2006 | } | |
2007 | ||
085bccb7 FK |
2008 | static void virtio_pci_class_init(ObjectClass *klass, void *data) |
2009 | { | |
2010 | DeviceClass *dc = DEVICE_CLASS(klass); | |
2011 | PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); | |
0560b0e9 | 2012 | VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); |
085bccb7 | 2013 | |
4f67d30b | 2014 | device_class_set_props(dc, virtio_pci_properties); |
fc079951 | 2015 | k->realize = virtio_pci_realize; |
085bccb7 FK |
2016 | k->exit = virtio_pci_exit; |
2017 | k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; | |
2018 | k->revision = VIRTIO_PCI_ABI_VERSION; | |
2019 | k->class_id = PCI_CLASS_OTHERS; | |
bf853881 PMD |
2020 | device_class_set_parent_realize(dc, virtio_pci_dc_realize, |
2021 | &vpciklass->parent_dc_realize); | |
59ccd20a | 2022 | dc->reset = virtio_pci_reset; |
085bccb7 FK |
2023 | } |
2024 | ||
2025 | static const TypeInfo virtio_pci_info = { | |
2026 | .name = TYPE_VIRTIO_PCI, | |
2027 | .parent = TYPE_PCI_DEVICE, | |
2028 | .instance_size = sizeof(VirtIOPCIProxy), | |
2029 | .class_init = virtio_pci_class_init, | |
2030 | .class_size = sizeof(VirtioPCIClass), | |
2031 | .abstract = true, | |
2032 | }; | |
2033 | ||
a4ee4c8b EH |
2034 | static Property virtio_pci_generic_properties[] = { |
2035 | DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, | |
2036 | ON_OFF_AUTO_AUTO), | |
2037 | DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), | |
2038 | DEFINE_PROP_END_OF_LIST(), | |
2039 | }; | |
2040 | ||
2041 | static void virtio_pci_base_class_init(ObjectClass *klass, void *data) | |
2042 | { | |
2043 | const VirtioPCIDeviceTypeInfo *t = data; | |
2044 | if (t->class_init) { | |
2045 | t->class_init(klass, NULL); | |
2046 | } | |
2047 | } | |
2048 | ||
2049 | static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) | |
2050 | { | |
2051 | DeviceClass *dc = DEVICE_CLASS(klass); | |
2052 | ||
4f67d30b | 2053 | device_class_set_props(dc, virtio_pci_generic_properties); |
a4ee4c8b EH |
2054 | } |
2055 | ||
a4ee4c8b EH |
2056 | static void virtio_pci_transitional_instance_init(Object *obj) |
2057 | { | |
2058 | VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); | |
2059 | ||
2060 | proxy->disable_legacy = ON_OFF_AUTO_OFF; | |
2061 | proxy->disable_modern = false; | |
2062 | } | |
2063 | ||
2064 | static void virtio_pci_non_transitional_instance_init(Object *obj) | |
2065 | { | |
2066 | VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); | |
2067 | ||
2068 | proxy->disable_legacy = ON_OFF_AUTO_ON; | |
2069 | proxy->disable_modern = false; | |
2070 | } | |
2071 | ||
2072 | void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) | |
2073 | { | |
683c1d89 | 2074 | char *base_name = NULL; |
a4ee4c8b EH |
2075 | TypeInfo base_type_info = { |
2076 | .name = t->base_name, | |
2077 | .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, | |
2078 | .instance_size = t->instance_size, | |
2079 | .instance_init = t->instance_init, | |
8ea90ee6 | 2080 | .class_size = t->class_size, |
a4ee4c8b | 2081 | .abstract = true, |
1e33b513 | 2082 | .interfaces = t->interfaces, |
a4ee4c8b EH |
2083 | }; |
2084 | TypeInfo generic_type_info = { | |
2085 | .name = t->generic_name, | |
2086 | .parent = base_type_info.name, | |
2087 | .class_init = virtio_pci_generic_class_init, | |
2088 | .interfaces = (InterfaceInfo[]) { | |
2089 | { INTERFACE_PCIE_DEVICE }, | |
2090 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
2091 | { } | |
2092 | }, | |
2093 | }; | |
2094 | ||
2095 | if (!base_type_info.name) { | |
2096 | /* No base type -> register a single generic device type */ | |
683c1d89 MAL |
2097 | /* use intermediate %s-base-type to add generic device props */ |
2098 | base_name = g_strdup_printf("%s-base-type", t->generic_name); | |
2099 | base_type_info.name = base_name; | |
2100 | base_type_info.class_init = virtio_pci_generic_class_init; | |
2101 | ||
2102 | generic_type_info.parent = base_name; | |
2103 | generic_type_info.class_init = virtio_pci_base_class_init; | |
2104 | generic_type_info.class_data = (void *)t; | |
2105 | ||
a4ee4c8b EH |
2106 | assert(!t->non_transitional_name); |
2107 | assert(!t->transitional_name); | |
683c1d89 MAL |
2108 | } else { |
2109 | base_type_info.class_init = virtio_pci_base_class_init; | |
2110 | base_type_info.class_data = (void *)t; | |
a4ee4c8b EH |
2111 | } |
2112 | ||
2113 | type_register(&base_type_info); | |
2114 | if (generic_type_info.name) { | |
2115 | type_register(&generic_type_info); | |
2116 | } | |
2117 | ||
2118 | if (t->non_transitional_name) { | |
2119 | const TypeInfo non_transitional_type_info = { | |
2120 | .name = t->non_transitional_name, | |
2121 | .parent = base_type_info.name, | |
2122 | .instance_init = virtio_pci_non_transitional_instance_init, | |
2123 | .interfaces = (InterfaceInfo[]) { | |
2124 | { INTERFACE_PCIE_DEVICE }, | |
2125 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
2126 | { } | |
2127 | }, | |
2128 | }; | |
2129 | type_register(&non_transitional_type_info); | |
2130 | } | |
2131 | ||
2132 | if (t->transitional_name) { | |
2133 | const TypeInfo transitional_type_info = { | |
2134 | .name = t->transitional_name, | |
2135 | .parent = base_type_info.name, | |
2136 | .instance_init = virtio_pci_transitional_instance_init, | |
2137 | .interfaces = (InterfaceInfo[]) { | |
2138 | /* | |
2139 | * Transitional virtio devices work only as Conventional PCI | |
2140 | * devices because they require PIO ports. | |
2141 | */ | |
2142 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
2143 | { } | |
2144 | }, | |
2145 | }; | |
2146 | type_register(&transitional_type_info); | |
2147 | } | |
683c1d89 | 2148 | g_free(base_name); |
a4ee4c8b EH |
2149 | } |
2150 | ||
1436f32a SH |
2151 | unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues) |
2152 | { | |
2153 | /* | |
2154 | * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted | |
2155 | * virtqueue buffers can handle their completion. When a different vCPU | |
2156 | * handles completion it may need to IPI the vCPU that submitted the | |
2157 | * request and this adds overhead. | |
2158 | * | |
2159 | * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in | |
2160 | * guests with very many vCPUs and a device that is only used by a few | |
2161 | * vCPUs. Unfortunately optimizing that case requires manual pinning inside | |
2162 | * the guest, so those users might as well manually set the number of | |
2163 | * queues. There is no upper limit that can be applied automatically and | |
2164 | * doing so arbitrarily would result in a sudden performance drop once the | |
2165 | * threshold number of vCPUs is exceeded. | |
2166 | */ | |
2167 | unsigned num_queues = current_machine->smp.cpus; | |
2168 | ||
2169 | /* | |
2170 | * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the | |
2171 | * config change interrupt and the fixed virtqueues must be taken into | |
2172 | * account too. | |
2173 | */ | |
2174 | num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues); | |
2175 | ||
2176 | /* | |
2177 | * There is a limit to how many virtqueues a device can have. | |
2178 | */ | |
2179 | return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues); | |
2180 | } | |
2181 | ||
0a2acf5e FK |
2182 | /* virtio-pci-bus */ |
2183 | ||
ac7af112 AF |
2184 | static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, |
2185 | VirtIOPCIProxy *dev) | |
0a2acf5e FK |
2186 | { |
2187 | DeviceState *qdev = DEVICE(dev); | |
f4dd69aa FK |
2188 | char virtio_bus_name[] = "virtio-bus"; |
2189 | ||
fb17dfe0 | 2190 | qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, |
f4dd69aa | 2191 | virtio_bus_name); |
0a2acf5e FK |
2192 | } |
2193 | ||
2194 | static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) | |
2195 | { | |
2196 | BusClass *bus_class = BUS_CLASS(klass); | |
2197 | VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); | |
2198 | bus_class->max_dev = 1; | |
2199 | k->notify = virtio_pci_notify; | |
2200 | k->save_config = virtio_pci_save_config; | |
2201 | k->load_config = virtio_pci_load_config; | |
2202 | k->save_queue = virtio_pci_save_queue; | |
2203 | k->load_queue = virtio_pci_load_queue; | |
a6df8adf JW |
2204 | k->save_extra_state = virtio_pci_save_extra_state; |
2205 | k->load_extra_state = virtio_pci_load_extra_state; | |
2206 | k->has_extra_state = virtio_pci_has_extra_state; | |
0a2acf5e | 2207 | k->query_guest_notifiers = virtio_pci_query_guest_notifiers; |
0a2acf5e | 2208 | k->set_guest_notifiers = virtio_pci_set_guest_notifiers; |
6f80e617 | 2209 | k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; |
0a2acf5e | 2210 | k->vmstate_change = virtio_pci_vmstate_change; |
d1b4259f | 2211 | k->pre_plugged = virtio_pci_pre_plugged; |
085bccb7 | 2212 | k->device_plugged = virtio_pci_device_plugged; |
06a13073 | 2213 | k->device_unplugged = virtio_pci_device_unplugged; |
e0d686bf | 2214 | k->query_nvectors = virtio_pci_query_nvectors; |
8e93cef1 | 2215 | k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; |
9f06e71a | 2216 | k->ioeventfd_assign = virtio_pci_ioeventfd_assign; |
8607f5c3 | 2217 | k->get_dma_as = virtio_pci_get_dma_as; |
3d1e5d86 | 2218 | k->iommu_enabled = virtio_pci_iommu_enabled; |
f19bcdfe | 2219 | k->queue_enabled = virtio_pci_queue_enabled; |
0a2acf5e FK |
2220 | } |
2221 | ||
2222 | static const TypeInfo virtio_pci_bus_info = { | |
2223 | .name = TYPE_VIRTIO_PCI_BUS, | |
2224 | .parent = TYPE_VIRTIO_BUS, | |
2225 | .instance_size = sizeof(VirtioPCIBusState), | |
74ded8b4 | 2226 | .class_size = sizeof(VirtioPCIBusClass), |
0a2acf5e FK |
2227 | .class_init = virtio_pci_bus_class_init, |
2228 | }; | |
2229 | ||
83f7d43a | 2230 | static void virtio_pci_register_types(void) |
53c25cea | 2231 | { |
a4ee4c8b EH |
2232 | /* Base types: */ |
2233 | type_register_static(&virtio_pci_bus_info); | |
2234 | type_register_static(&virtio_pci_info); | |
53c25cea PB |
2235 | } |
2236 | ||
83f7d43a | 2237 | type_init(virtio_pci_register_types) |
271458d7 | 2238 |