]>
Commit | Line | Data |
---|---|---|
53c25cea PB |
1 | /* |
2 | * Virtio PCI Bindings | |
3 | * | |
4 | * Copyright IBM, Corp. 2007 | |
5 | * Copyright (c) 2009 CodeSourcery | |
6 | * | |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * Paul Brook <paul@codesourcery.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
53c25cea PB |
16 | */ |
17 | ||
9b8bfe21 | 18 | #include "qemu/osdep.h" |
53c25cea | 19 | |
062c08d1 | 20 | #include "exec/memop.h" |
cbbe4f50 | 21 | #include "standard-headers/linux/virtio_pci.h" |
1436f32a | 22 | #include "hw/boards.h" |
0d09e41a | 23 | #include "hw/virtio/virtio.h" |
ca77ee28 | 24 | #include "migration/qemu-file-types.h" |
83c9f4ca | 25 | #include "hw/pci/pci.h" |
b0e5196a | 26 | #include "hw/pci/pci_bus.h" |
a27bd6c7 | 27 | #include "hw/qdev-properties.h" |
da34e65c | 28 | #include "qapi/error.h" |
1de7afc9 | 29 | #include "qemu/error-report.h" |
a8218588 | 30 | #include "qemu/log.h" |
0b8fa32f | 31 | #include "qemu/module.h" |
83c9f4ca PB |
32 | #include "hw/pci/msi.h" |
33 | #include "hw/pci/msix.h" | |
34 | #include "hw/loader.h" | |
9c17d615 | 35 | #include "sysemu/kvm.h" |
47b43a1f | 36 | #include "virtio-pci.h" |
1de7afc9 | 37 | #include "qemu/range.h" |
0d09e41a | 38 | #include "hw/virtio/virtio-bus.h" |
24a6e7f4 | 39 | #include "qapi/visitor.h" |
3909c079 | 40 | #include "sysemu/replay.h" |
53c25cea | 41 | |
cbbe4f50 | 42 | #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) |
aba800a3 | 43 | |
c17bef33 MT |
44 | #undef VIRTIO_PCI_CONFIG |
45 | ||
aba800a3 MT |
46 | /* The remaining space is defined by each driver as the per-driver |
47 | * configuration space */ | |
cbbe4f50 | 48 | #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) |
53c25cea | 49 | |
ac7af112 AF |
50 | static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, |
51 | VirtIOPCIProxy *dev); | |
75fd6f13 | 52 | static void virtio_pci_reset(DeviceState *qdev); |
d51fcfac | 53 | |
53c25cea | 54 | /* virtio device */ |
d2a0ccc6 MT |
55 | /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ |
56 | static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) | |
57 | { | |
58 | return container_of(d, VirtIOPCIProxy, pci_dev.qdev); | |
59 | } | |
53c25cea | 60 | |
d2a0ccc6 MT |
61 | /* DeviceState to VirtIOPCIProxy. Note: used on datapath, |
62 | * be careful and test performance if you change this. | |
63 | */ | |
64 | static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) | |
53c25cea | 65 | { |
d2a0ccc6 MT |
66 | return container_of(d, VirtIOPCIProxy, pci_dev.qdev); |
67 | } | |
68 | ||
69 | static void virtio_pci_notify(DeviceState *d, uint16_t vector) | |
70 | { | |
71 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); | |
a3fc66d9 | 72 | |
aba800a3 MT |
73 | if (msix_enabled(&proxy->pci_dev)) |
74 | msix_notify(&proxy->pci_dev, vector); | |
a3fc66d9 PB |
75 | else { |
76 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
d73415a3 | 77 | pci_set_irq(&proxy->pci_dev, qatomic_read(&vdev->isr) & 1); |
a3fc66d9 | 78 | } |
53c25cea PB |
79 | } |
80 | ||
d2a0ccc6 | 81 | static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) |
ff24bd58 | 82 | { |
d2a0ccc6 | 83 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
84 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
85 | ||
ff24bd58 MT |
86 | pci_device_save(&proxy->pci_dev, f); |
87 | msix_save(&proxy->pci_dev, f); | |
88 | if (msix_present(&proxy->pci_dev)) | |
a3fc66d9 | 89 | qemu_put_be16(f, vdev->config_vector); |
ff24bd58 MT |
90 | } |
91 | ||
b81b948e DDAG |
92 | static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { |
93 | .name = "virtio_pci/modern_queue_state", | |
94 | .version_id = 1, | |
95 | .minimum_version_id = 1, | |
96 | .fields = (VMStateField[]) { | |
97 | VMSTATE_UINT16(num, VirtIOPCIQueue), | |
98 | VMSTATE_UNUSED(1), /* enabled was stored as be16 */ | |
99 | VMSTATE_BOOL(enabled, VirtIOPCIQueue), | |
100 | VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), | |
101 | VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), | |
102 | VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), | |
103 | VMSTATE_END_OF_LIST() | |
a6df8adf | 104 | } |
a6df8adf JW |
105 | }; |
106 | ||
107 | static bool virtio_pci_modern_state_needed(void *opaque) | |
108 | { | |
109 | VirtIOPCIProxy *proxy = opaque; | |
110 | ||
9a4c0e22 | 111 | return virtio_pci_modern(proxy); |
a6df8adf JW |
112 | } |
113 | ||
b81b948e | 114 | static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { |
a6df8adf JW |
115 | .name = "virtio_pci/modern_state", |
116 | .version_id = 1, | |
117 | .minimum_version_id = 1, | |
118 | .needed = &virtio_pci_modern_state_needed, | |
119 | .fields = (VMStateField[]) { | |
b81b948e DDAG |
120 | VMSTATE_UINT32(dfselect, VirtIOPCIProxy), |
121 | VMSTATE_UINT32(gfselect, VirtIOPCIProxy), | |
122 | VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), | |
123 | VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, | |
124 | vmstate_virtio_pci_modern_queue_state, | |
125 | VirtIOPCIQueue), | |
a6df8adf JW |
126 | VMSTATE_END_OF_LIST() |
127 | } | |
128 | }; | |
129 | ||
130 | static const VMStateDescription vmstate_virtio_pci = { | |
131 | .name = "virtio_pci", | |
132 | .version_id = 1, | |
133 | .minimum_version_id = 1, | |
a6df8adf JW |
134 | .fields = (VMStateField[]) { |
135 | VMSTATE_END_OF_LIST() | |
136 | }, | |
137 | .subsections = (const VMStateDescription*[]) { | |
b81b948e | 138 | &vmstate_virtio_pci_modern_state_sub, |
a6df8adf JW |
139 | NULL |
140 | } | |
141 | }; | |
142 | ||
b81b948e DDAG |
143 | static bool virtio_pci_has_extra_state(DeviceState *d) |
144 | { | |
145 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
146 | ||
147 | return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; | |
148 | } | |
149 | ||
a6df8adf JW |
150 | static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) |
151 | { | |
152 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
153 | ||
154 | vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); | |
155 | } | |
156 | ||
157 | static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) | |
158 | { | |
159 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
160 | ||
161 | return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); | |
162 | } | |
163 | ||
d2a0ccc6 | 164 | static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) |
ff24bd58 | 165 | { |
d2a0ccc6 | 166 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
167 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
168 | ||
ff24bd58 | 169 | if (msix_present(&proxy->pci_dev)) |
a3fc66d9 | 170 | qemu_put_be16(f, virtio_queue_vector(vdev, n)); |
ff24bd58 MT |
171 | } |
172 | ||
d2a0ccc6 | 173 | static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) |
ff24bd58 | 174 | { |
d2a0ccc6 | 175 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
176 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
177 | ||
ff24bd58 MT |
178 | int ret; |
179 | ret = pci_device_load(&proxy->pci_dev, f); | |
e6da7680 | 180 | if (ret) { |
ff24bd58 | 181 | return ret; |
e6da7680 | 182 | } |
3cac001e | 183 | msix_unuse_all_vectors(&proxy->pci_dev); |
ff24bd58 | 184 | msix_load(&proxy->pci_dev, f); |
e6da7680 | 185 | if (msix_present(&proxy->pci_dev)) { |
a3fc66d9 | 186 | qemu_get_be16s(f, &vdev->config_vector); |
e6da7680 | 187 | } else { |
a3fc66d9 | 188 | vdev->config_vector = VIRTIO_NO_VECTOR; |
e6da7680 | 189 | } |
a3fc66d9 PB |
190 | if (vdev->config_vector != VIRTIO_NO_VECTOR) { |
191 | return msix_vector_use(&proxy->pci_dev, vdev->config_vector); | |
e6da7680 | 192 | } |
ff24bd58 MT |
193 | return 0; |
194 | } | |
195 | ||
d2a0ccc6 | 196 | static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) |
ff24bd58 | 197 | { |
d2a0ccc6 | 198 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
199 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
200 | ||
ff24bd58 | 201 | uint16_t vector; |
e6da7680 MT |
202 | if (msix_present(&proxy->pci_dev)) { |
203 | qemu_get_be16s(f, &vector); | |
204 | } else { | |
205 | vector = VIRTIO_NO_VECTOR; | |
206 | } | |
a3fc66d9 | 207 | virtio_queue_set_vector(vdev, n, vector); |
e6da7680 MT |
208 | if (vector != VIRTIO_NO_VECTOR) { |
209 | return msix_vector_use(&proxy->pci_dev, vector); | |
210 | } | |
a6df8adf | 211 | |
ff24bd58 MT |
212 | return 0; |
213 | } | |
214 | ||
8e93cef1 | 215 | static bool virtio_pci_ioeventfd_enabled(DeviceState *d) |
9f06e71a CH |
216 | { |
217 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
218 | ||
8e93cef1 | 219 | return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; |
9f06e71a CH |
220 | } |
221 | ||
975acc0a JW |
222 | #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 |
223 | ||
d9997d89 MA |
224 | static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) |
225 | { | |
226 | return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? | |
227 | QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; | |
228 | } | |
229 | ||
9f06e71a CH |
230 | static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, |
231 | int n, bool assign) | |
25db9ebe | 232 | { |
9f06e71a | 233 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
234 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
235 | VirtQueue *vq = virtio_get_queue(vdev, n); | |
9a4c0e22 MA |
236 | bool legacy = virtio_pci_legacy(proxy); |
237 | bool modern = virtio_pci_modern(proxy); | |
bc85ccfd | 238 | bool fast_mmio = kvm_ioeventfd_any_length_enabled(); |
9824d2a3 | 239 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
588255ad | 240 | MemoryRegion *modern_mr = &proxy->notify.mr; |
9824d2a3 | 241 | MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; |
975acc0a | 242 | MemoryRegion *legacy_mr = &proxy->bar; |
d9997d89 | 243 | hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * |
975acc0a JW |
244 | virtio_get_queue_index(vq); |
245 | hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; | |
da146d0a | 246 | |
25db9ebe | 247 | if (assign) { |
975acc0a | 248 | if (modern) { |
bc85ccfd JW |
249 | if (fast_mmio) { |
250 | memory_region_add_eventfd(modern_mr, modern_addr, 0, | |
251 | false, n, notifier); | |
252 | } else { | |
253 | memory_region_add_eventfd(modern_mr, modern_addr, 2, | |
254 | false, n, notifier); | |
255 | } | |
9824d2a3 JW |
256 | if (modern_pio) { |
257 | memory_region_add_eventfd(modern_notify_mr, 0, 2, | |
258 | true, n, notifier); | |
259 | } | |
975acc0a JW |
260 | } |
261 | if (legacy) { | |
262 | memory_region_add_eventfd(legacy_mr, legacy_addr, 2, | |
263 | true, n, notifier); | |
264 | } | |
25db9ebe | 265 | } else { |
975acc0a | 266 | if (modern) { |
bc85ccfd JW |
267 | if (fast_mmio) { |
268 | memory_region_del_eventfd(modern_mr, modern_addr, 0, | |
269 | false, n, notifier); | |
270 | } else { | |
271 | memory_region_del_eventfd(modern_mr, modern_addr, 2, | |
272 | false, n, notifier); | |
273 | } | |
9824d2a3 JW |
274 | if (modern_pio) { |
275 | memory_region_del_eventfd(modern_notify_mr, 0, 2, | |
276 | true, n, notifier); | |
277 | } | |
975acc0a JW |
278 | } |
279 | if (legacy) { | |
280 | memory_region_del_eventfd(legacy_mr, legacy_addr, 2, | |
281 | true, n, notifier); | |
282 | } | |
25db9ebe | 283 | } |
9f06e71a | 284 | return 0; |
25db9ebe SH |
285 | } |
286 | ||
b36e3914 | 287 | static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) |
25db9ebe | 288 | { |
9f06e71a | 289 | virtio_bus_start_ioeventfd(&proxy->bus); |
25db9ebe SH |
290 | } |
291 | ||
b36e3914 | 292 | static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) |
25db9ebe | 293 | { |
9f06e71a | 294 | virtio_bus_stop_ioeventfd(&proxy->bus); |
25db9ebe SH |
295 | } |
296 | ||
53c25cea PB |
297 | static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) |
298 | { | |
299 | VirtIOPCIProxy *proxy = opaque; | |
a3fc66d9 | 300 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
a8170e5e | 301 | hwaddr pa; |
53c25cea | 302 | |
53c25cea PB |
303 | switch (addr) { |
304 | case VIRTIO_PCI_GUEST_FEATURES: | |
181103cd FK |
305 | /* Guest does not negotiate properly? We have to assume nothing. */ |
306 | if (val & (1 << VIRTIO_F_BAD_FEATURE)) { | |
307 | val = virtio_bus_get_vdev_bad_features(&proxy->bus); | |
308 | } | |
ad0c9332 | 309 | virtio_set_features(vdev, val); |
53c25cea PB |
310 | break; |
311 | case VIRTIO_PCI_QUEUE_PFN: | |
a8170e5e | 312 | pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; |
1b8e9b27 | 313 | if (pa == 0) { |
75fd6f13 | 314 | virtio_pci_reset(DEVICE(proxy)); |
1b8e9b27 | 315 | } |
7055e687 MT |
316 | else |
317 | virtio_queue_set_addr(vdev, vdev->queue_sel, pa); | |
53c25cea PB |
318 | break; |
319 | case VIRTIO_PCI_QUEUE_SEL: | |
87b3bd1c | 320 | if (val < VIRTIO_QUEUE_MAX) |
53c25cea PB |
321 | vdev->queue_sel = val; |
322 | break; | |
323 | case VIRTIO_PCI_QUEUE_NOTIFY: | |
87b3bd1c | 324 | if (val < VIRTIO_QUEUE_MAX) { |
7157e2e2 SH |
325 | virtio_queue_notify(vdev, val); |
326 | } | |
53c25cea PB |
327 | break; |
328 | case VIRTIO_PCI_STATUS: | |
25db9ebe SH |
329 | if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { |
330 | virtio_pci_stop_ioeventfd(proxy); | |
331 | } | |
332 | ||
3e607cb5 | 333 | virtio_set_status(vdev, val & 0xFF); |
25db9ebe SH |
334 | |
335 | if (val & VIRTIO_CONFIG_S_DRIVER_OK) { | |
336 | virtio_pci_start_ioeventfd(proxy); | |
337 | } | |
338 | ||
1b8e9b27 | 339 | if (vdev->status == 0) { |
75fd6f13 | 340 | virtio_pci_reset(DEVICE(proxy)); |
1b8e9b27 | 341 | } |
c81131db | 342 | |
e43c0b2e MT |
343 | /* Linux before 2.6.34 drives the device without enabling |
344 | the PCI device bus master bit. Enable it automatically | |
345 | for the guest. This is a PCI spec violation but so is | |
346 | initiating DMA with bus master bit clear. */ | |
347 | if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { | |
348 | pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, | |
349 | proxy->pci_dev.config[PCI_COMMAND] | | |
350 | PCI_COMMAND_MASTER, 1); | |
351 | } | |
53c25cea | 352 | break; |
aba800a3 MT |
353 | case VIRTIO_MSI_CONFIG_VECTOR: |
354 | msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); | |
355 | /* Make it possible for guest to discover an error took place. */ | |
356 | if (msix_vector_use(&proxy->pci_dev, val) < 0) | |
357 | val = VIRTIO_NO_VECTOR; | |
358 | vdev->config_vector = val; | |
359 | break; | |
360 | case VIRTIO_MSI_QUEUE_VECTOR: | |
361 | msix_vector_unuse(&proxy->pci_dev, | |
362 | virtio_queue_vector(vdev, vdev->queue_sel)); | |
363 | /* Make it possible for guest to discover an error took place. */ | |
364 | if (msix_vector_use(&proxy->pci_dev, val) < 0) | |
365 | val = VIRTIO_NO_VECTOR; | |
366 | virtio_queue_set_vector(vdev, vdev->queue_sel, val); | |
367 | break; | |
368 | default: | |
a8218588 PMD |
369 | qemu_log_mask(LOG_GUEST_ERROR, |
370 | "%s: unexpected address 0x%x value 0x%x\n", | |
371 | __func__, addr, val); | |
aba800a3 | 372 | break; |
53c25cea PB |
373 | } |
374 | } | |
375 | ||
aba800a3 | 376 | static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) |
53c25cea | 377 | { |
a3fc66d9 | 378 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
53c25cea PB |
379 | uint32_t ret = 0xFFFFFFFF; |
380 | ||
53c25cea PB |
381 | switch (addr) { |
382 | case VIRTIO_PCI_HOST_FEATURES: | |
6b8f1020 | 383 | ret = vdev->host_features; |
53c25cea PB |
384 | break; |
385 | case VIRTIO_PCI_GUEST_FEATURES: | |
704a76fc | 386 | ret = vdev->guest_features; |
53c25cea PB |
387 | break; |
388 | case VIRTIO_PCI_QUEUE_PFN: | |
389 | ret = virtio_queue_get_addr(vdev, vdev->queue_sel) | |
390 | >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; | |
391 | break; | |
392 | case VIRTIO_PCI_QUEUE_NUM: | |
393 | ret = virtio_queue_get_num(vdev, vdev->queue_sel); | |
394 | break; | |
395 | case VIRTIO_PCI_QUEUE_SEL: | |
396 | ret = vdev->queue_sel; | |
397 | break; | |
398 | case VIRTIO_PCI_STATUS: | |
399 | ret = vdev->status; | |
400 | break; | |
401 | case VIRTIO_PCI_ISR: | |
402 | /* reading from the ISR also clears it. */ | |
d73415a3 | 403 | ret = qatomic_xchg(&vdev->isr, 0); |
9e64f8a3 | 404 | pci_irq_deassert(&proxy->pci_dev); |
53c25cea | 405 | break; |
aba800a3 MT |
406 | case VIRTIO_MSI_CONFIG_VECTOR: |
407 | ret = vdev->config_vector; | |
408 | break; | |
409 | case VIRTIO_MSI_QUEUE_VECTOR: | |
410 | ret = virtio_queue_vector(vdev, vdev->queue_sel); | |
411 | break; | |
53c25cea PB |
412 | default: |
413 | break; | |
414 | } | |
415 | ||
416 | return ret; | |
417 | } | |
418 | ||
df6db5b3 AG |
419 | static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, |
420 | unsigned size) | |
53c25cea PB |
421 | { |
422 | VirtIOPCIProxy *proxy = opaque; | |
a3fc66d9 | 423 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
cbbe4f50 | 424 | uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); |
df6db5b3 | 425 | uint64_t val = 0; |
bf697371 AM |
426 | |
427 | if (vdev == NULL) { | |
428 | return UINT64_MAX; | |
429 | } | |
430 | ||
aba800a3 | 431 | if (addr < config) { |
df6db5b3 | 432 | return virtio_ioport_read(proxy, addr); |
aba800a3 MT |
433 | } |
434 | addr -= config; | |
53c25cea | 435 | |
df6db5b3 AG |
436 | switch (size) { |
437 | case 1: | |
a3fc66d9 | 438 | val = virtio_config_readb(vdev, addr); |
df6db5b3 AG |
439 | break; |
440 | case 2: | |
a3fc66d9 | 441 | val = virtio_config_readw(vdev, addr); |
616a6552 | 442 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
443 | val = bswap16(val); |
444 | } | |
df6db5b3 AG |
445 | break; |
446 | case 4: | |
a3fc66d9 | 447 | val = virtio_config_readl(vdev, addr); |
616a6552 | 448 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
449 | val = bswap32(val); |
450 | } | |
df6db5b3 | 451 | break; |
82afa586 | 452 | } |
df6db5b3 | 453 | return val; |
53c25cea PB |
454 | } |
455 | ||
df6db5b3 AG |
456 | static void virtio_pci_config_write(void *opaque, hwaddr addr, |
457 | uint64_t val, unsigned size) | |
53c25cea PB |
458 | { |
459 | VirtIOPCIProxy *proxy = opaque; | |
cbbe4f50 | 460 | uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); |
a3fc66d9 | 461 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
bf697371 AM |
462 | |
463 | if (vdev == NULL) { | |
464 | return; | |
465 | } | |
466 | ||
aba800a3 MT |
467 | if (addr < config) { |
468 | virtio_ioport_write(proxy, addr, val); | |
469 | return; | |
470 | } | |
471 | addr -= config; | |
df6db5b3 AG |
472 | /* |
473 | * Virtio-PCI is odd. Ioports are LE but config space is target native | |
474 | * endian. | |
475 | */ | |
476 | switch (size) { | |
477 | case 1: | |
a3fc66d9 | 478 | virtio_config_writeb(vdev, addr, val); |
df6db5b3 AG |
479 | break; |
480 | case 2: | |
616a6552 | 481 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
482 | val = bswap16(val); |
483 | } | |
a3fc66d9 | 484 | virtio_config_writew(vdev, addr, val); |
df6db5b3 AG |
485 | break; |
486 | case 4: | |
616a6552 | 487 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
488 | val = bswap32(val); |
489 | } | |
a3fc66d9 | 490 | virtio_config_writel(vdev, addr, val); |
df6db5b3 | 491 | break; |
82afa586 | 492 | } |
53c25cea PB |
493 | } |
494 | ||
da146d0a | 495 | static const MemoryRegionOps virtio_pci_config_ops = { |
df6db5b3 AG |
496 | .read = virtio_pci_config_read, |
497 | .write = virtio_pci_config_write, | |
498 | .impl = { | |
499 | .min_access_size = 1, | |
500 | .max_access_size = 4, | |
501 | }, | |
8e4a424b | 502 | .endianness = DEVICE_LITTLE_ENDIAN, |
da146d0a | 503 | }; |
aba800a3 | 504 | |
a93c8d82 AK |
505 | static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, |
506 | hwaddr *off, int len) | |
507 | { | |
508 | int i; | |
509 | VirtIOPCIRegion *reg; | |
510 | ||
511 | for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { | |
512 | reg = &proxy->regs[i]; | |
513 | if (*off >= reg->offset && | |
514 | *off + len <= reg->offset + reg->size) { | |
515 | *off -= reg->offset; | |
516 | return ®->mr; | |
517 | } | |
518 | } | |
519 | ||
520 | return NULL; | |
521 | } | |
522 | ||
1e40356c MT |
523 | /* Below are generic functions to do memcpy from/to an address space, |
524 | * without byteswaps, with input validation. | |
525 | * | |
526 | * As regular address_space_* APIs all do some kind of byteswap at least for | |
527 | * some host/target combinations, we are forced to explicitly convert to a | |
528 | * known-endianness integer value. | |
529 | * It doesn't really matter which endian format to go through, so the code | |
530 | * below selects the endian that causes the least amount of work on the given | |
531 | * host. | |
532 | * | |
533 | * Note: host pointer must be aligned. | |
534 | */ | |
535 | static | |
a93c8d82 | 536 | void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, |
1e40356c MT |
537 | const uint8_t *buf, int len) |
538 | { | |
a93c8d82 AK |
539 | uint64_t val; |
540 | MemoryRegion *mr; | |
1e40356c MT |
541 | |
542 | /* address_space_* APIs assume an aligned address. | |
543 | * As address is under guest control, handle illegal values. | |
544 | */ | |
545 | addr &= ~(len - 1); | |
546 | ||
a93c8d82 AK |
547 | mr = virtio_address_space_lookup(proxy, &addr, len); |
548 | if (!mr) { | |
549 | return; | |
550 | } | |
551 | ||
1e40356c MT |
552 | /* Make sure caller aligned buf properly */ |
553 | assert(!(((uintptr_t)buf) & (len - 1))); | |
554 | ||
555 | switch (len) { | |
556 | case 1: | |
557 | val = pci_get_byte(buf); | |
1e40356c MT |
558 | break; |
559 | case 2: | |
9bf825bf | 560 | val = pci_get_word(buf); |
1e40356c MT |
561 | break; |
562 | case 4: | |
9bf825bf | 563 | val = pci_get_long(buf); |
1e40356c MT |
564 | break; |
565 | default: | |
566 | /* As length is under guest control, handle illegal values. */ | |
a93c8d82 | 567 | return; |
1e40356c | 568 | } |
d5d680ca | 569 | memory_region_dispatch_write(mr, addr, val, size_memop(len) | MO_LE, |
062c08d1 | 570 | MEMTXATTRS_UNSPECIFIED); |
1e40356c MT |
571 | } |
572 | ||
573 | static void | |
a93c8d82 AK |
574 | virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, |
575 | uint8_t *buf, int len) | |
1e40356c | 576 | { |
a93c8d82 AK |
577 | uint64_t val; |
578 | MemoryRegion *mr; | |
1e40356c MT |
579 | |
580 | /* address_space_* APIs assume an aligned address. | |
581 | * As address is under guest control, handle illegal values. | |
582 | */ | |
583 | addr &= ~(len - 1); | |
584 | ||
a93c8d82 AK |
585 | mr = virtio_address_space_lookup(proxy, &addr, len); |
586 | if (!mr) { | |
587 | return; | |
588 | } | |
589 | ||
1e40356c MT |
590 | /* Make sure caller aligned buf properly */ |
591 | assert(!(((uintptr_t)buf) & (len - 1))); | |
592 | ||
d5d680ca | 593 | memory_region_dispatch_read(mr, addr, &val, size_memop(len) | MO_LE, |
062c08d1 | 594 | MEMTXATTRS_UNSPECIFIED); |
1e40356c MT |
595 | switch (len) { |
596 | case 1: | |
1e40356c MT |
597 | pci_set_byte(buf, val); |
598 | break; | |
599 | case 2: | |
9bf825bf | 600 | pci_set_word(buf, val); |
1e40356c MT |
601 | break; |
602 | case 4: | |
9bf825bf | 603 | pci_set_long(buf, val); |
1e40356c MT |
604 | break; |
605 | default: | |
606 | /* As length is under guest control, handle illegal values. */ | |
607 | break; | |
608 | } | |
609 | } | |
610 | ||
aba800a3 MT |
611 | static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, |
612 | uint32_t val, int len) | |
613 | { | |
3f262b26 | 614 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
a3fc66d9 | 615 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
ada434cd | 616 | struct virtio_pci_cfg_cap *cfg; |
ed757e14 | 617 | |
1129714f MT |
618 | pci_default_write_config(pci_dev, address, val, len); |
619 | ||
eb1556c4 JS |
620 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { |
621 | pcie_cap_flr_write_config(pci_dev, address, val, len); | |
622 | } | |
623 | ||
9d7bd082 MR |
624 | if (range_covers_byte(address, len, PCI_COMMAND)) { |
625 | if (!(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { | |
626 | virtio_set_disabled(vdev, true); | |
627 | virtio_pci_stop_ioeventfd(proxy); | |
628 | virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); | |
629 | } else { | |
630 | virtio_set_disabled(vdev, false); | |
631 | } | |
ed757e14 | 632 | } |
ada434cd MT |
633 | |
634 | if (proxy->config_cap && | |
635 | ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, | |
636 | pci_cfg_data), | |
637 | sizeof cfg->pci_cfg_data)) { | |
638 | uint32_t off; | |
639 | uint32_t len; | |
640 | ||
641 | cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); | |
642 | off = le32_to_cpu(cfg->cap.offset); | |
643 | len = le32_to_cpu(cfg->cap.length); | |
644 | ||
2a639123 MT |
645 | if (len == 1 || len == 2 || len == 4) { |
646 | assert(len <= sizeof cfg->pci_cfg_data); | |
a93c8d82 | 647 | virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); |
ada434cd MT |
648 | } |
649 | } | |
650 | } | |
651 | ||
652 | static uint32_t virtio_read_config(PCIDevice *pci_dev, | |
653 | uint32_t address, int len) | |
654 | { | |
3f262b26 | 655 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
ada434cd MT |
656 | struct virtio_pci_cfg_cap *cfg; |
657 | ||
658 | if (proxy->config_cap && | |
659 | ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, | |
660 | pci_cfg_data), | |
661 | sizeof cfg->pci_cfg_data)) { | |
662 | uint32_t off; | |
663 | uint32_t len; | |
664 | ||
665 | cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); | |
666 | off = le32_to_cpu(cfg->cap.offset); | |
667 | len = le32_to_cpu(cfg->cap.length); | |
668 | ||
2a639123 MT |
669 | if (len == 1 || len == 2 || len == 4) { |
670 | assert(len <= sizeof cfg->pci_cfg_data); | |
a93c8d82 | 671 | virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); |
ada434cd MT |
672 | } |
673 | } | |
674 | ||
675 | return pci_default_read_config(pci_dev, address, len); | |
53c25cea PB |
676 | } |
677 | ||
7d37d351 | 678 | static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, |
38ce4051 | 679 | unsigned int queue_no, |
d1f6af6a | 680 | unsigned int vector) |
7d37d351 | 681 | { |
7d37d351 | 682 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; |
15b2bd18 | 683 | int ret; |
7d37d351 JK |
684 | |
685 | if (irqfd->users == 0) { | |
def4c557 LM |
686 | KVMRouteChange c = kvm_irqchip_begin_route_changes(kvm_state); |
687 | ret = kvm_irqchip_add_msi_route(&c, vector, &proxy->pci_dev); | |
7d37d351 JK |
688 | if (ret < 0) { |
689 | return ret; | |
690 | } | |
def4c557 | 691 | kvm_irqchip_commit_route_changes(&c); |
7d37d351 JK |
692 | irqfd->virq = ret; |
693 | } | |
694 | irqfd->users++; | |
7d37d351 JK |
695 | return 0; |
696 | } | |
697 | ||
698 | static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, | |
7d37d351 | 699 | unsigned int vector) |
774345f9 MT |
700 | { |
701 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; | |
702 | if (--irqfd->users == 0) { | |
703 | kvm_irqchip_release_virq(kvm_state, irqfd->virq); | |
704 | } | |
705 | } | |
706 | ||
f1d0f15a | 707 | static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, |
a20fa00c | 708 | unsigned int queue_no, |
f1d0f15a MT |
709 | unsigned int vector) |
710 | { | |
711 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; | |
a20fa00c MT |
712 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
713 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
714 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); | |
9be38598 | 715 | return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); |
f1d0f15a MT |
716 | } |
717 | ||
718 | static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, | |
a20fa00c | 719 | unsigned int queue_no, |
f1d0f15a | 720 | unsigned int vector) |
7d37d351 | 721 | { |
a20fa00c MT |
722 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
723 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
724 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); | |
7d37d351 | 725 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; |
15b2bd18 | 726 | int ret; |
7d37d351 | 727 | |
1c9b71a7 | 728 | ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); |
7d37d351 | 729 | assert(ret == 0); |
f1d0f15a | 730 | } |
7d37d351 | 731 | |
38ce4051 | 732 | static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) |
774345f9 MT |
733 | { |
734 | PCIDevice *dev = &proxy->pci_dev; | |
a3fc66d9 | 735 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 736 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
38ce4051 MT |
737 | unsigned int vector; |
738 | int ret, queue_no; | |
a20fa00c | 739 | |
38ce4051 MT |
740 | for (queue_no = 0; queue_no < nvqs; queue_no++) { |
741 | if (!virtio_queue_get_num(vdev, queue_no)) { | |
742 | break; | |
743 | } | |
a20fa00c | 744 | vector = virtio_queue_vector(vdev, queue_no); |
38ce4051 MT |
745 | if (vector >= msix_nr_vectors_allocated(dev)) { |
746 | continue; | |
747 | } | |
748 | ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); | |
774345f9 MT |
749 | if (ret < 0) { |
750 | goto undo; | |
7d37d351 | 751 | } |
38ce4051 MT |
752 | /* If guest supports masking, set up irqfd now. |
753 | * Otherwise, delay until unmasked in the frontend. | |
754 | */ | |
755 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { | |
a20fa00c | 756 | ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); |
38ce4051 MT |
757 | if (ret < 0) { |
758 | kvm_virtio_pci_vq_vector_release(proxy, vector); | |
759 | goto undo; | |
760 | } | |
761 | } | |
7d37d351 | 762 | } |
316011b8 | 763 | return 0; |
316011b8 | 764 | |
38ce4051 MT |
765 | undo: |
766 | while (--queue_no >= 0) { | |
767 | vector = virtio_queue_vector(vdev, queue_no); | |
768 | if (vector >= msix_nr_vectors_allocated(dev)) { | |
769 | continue; | |
774345f9 | 770 | } |
38ce4051 | 771 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
a20fa00c | 772 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 773 | } |
38ce4051 | 774 | kvm_virtio_pci_vq_vector_release(proxy, vector); |
774345f9 MT |
775 | } |
776 | return ret; | |
7d37d351 JK |
777 | } |
778 | ||
38ce4051 | 779 | static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) |
774345f9 | 780 | { |
38ce4051 | 781 | PCIDevice *dev = &proxy->pci_dev; |
a3fc66d9 | 782 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
774345f9 | 783 | unsigned int vector; |
316011b8 | 784 | int queue_no; |
38ce4051 | 785 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
a20fa00c | 786 | |
774345f9 MT |
787 | for (queue_no = 0; queue_no < nvqs; queue_no++) { |
788 | if (!virtio_queue_get_num(vdev, queue_no)) { | |
789 | break; | |
790 | } | |
a20fa00c | 791 | vector = virtio_queue_vector(vdev, queue_no); |
38ce4051 MT |
792 | if (vector >= msix_nr_vectors_allocated(dev)) { |
793 | continue; | |
794 | } | |
795 | /* If guest supports masking, clean up irqfd now. | |
796 | * Otherwise, it was cleaned when masked in the frontend. | |
797 | */ | |
798 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { | |
a20fa00c | 799 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
38ce4051 MT |
800 | } |
801 | kvm_virtio_pci_vq_vector_release(proxy, vector); | |
774345f9 MT |
802 | } |
803 | } | |
804 | ||
a20fa00c | 805 | static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, |
a38b2c49 MT |
806 | unsigned int queue_no, |
807 | unsigned int vector, | |
a20fa00c | 808 | MSIMessage msg) |
774345f9 | 809 | { |
a3fc66d9 PB |
810 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
811 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); | |
a20fa00c MT |
812 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); |
813 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); | |
a38b2c49 | 814 | VirtIOIRQFD *irqfd; |
53510bfc | 815 | int ret = 0; |
774345f9 | 816 | |
a38b2c49 MT |
817 | if (proxy->vector_irqfd) { |
818 | irqfd = &proxy->vector_irqfd[vector]; | |
819 | if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { | |
dc9f06ca PF |
820 | ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, |
821 | &proxy->pci_dev); | |
a38b2c49 MT |
822 | if (ret < 0) { |
823 | return ret; | |
824 | } | |
3f1fea0f | 825 | kvm_irqchip_commit_routes(kvm_state); |
774345f9 MT |
826 | } |
827 | } | |
828 | ||
f1d0f15a MT |
829 | /* If guest supports masking, irqfd is already setup, unmask it. |
830 | * Otherwise, set it up now. | |
831 | */ | |
5669655a | 832 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
a3fc66d9 | 833 | k->guest_notifier_mask(vdev, queue_no, false); |
f1d0f15a | 834 | /* Test after unmasking to avoid losing events. */ |
181103cd | 835 | if (k->guest_notifier_pending && |
a3fc66d9 | 836 | k->guest_notifier_pending(vdev, queue_no)) { |
f1d0f15a MT |
837 | event_notifier_set(n); |
838 | } | |
839 | } else { | |
a20fa00c | 840 | ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); |
7d37d351 | 841 | } |
774345f9 | 842 | return ret; |
7d37d351 JK |
843 | } |
844 | ||
a20fa00c | 845 | static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, |
7d37d351 | 846 | unsigned int queue_no, |
a20fa00c | 847 | unsigned int vector) |
7d37d351 | 848 | { |
a3fc66d9 PB |
849 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
850 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); | |
181103cd | 851 | |
f1d0f15a MT |
852 | /* If guest supports masking, keep irqfd but mask it. |
853 | * Otherwise, clean it up now. | |
854 | */ | |
5669655a | 855 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
a3fc66d9 | 856 | k->guest_notifier_mask(vdev, queue_no, true); |
f1d0f15a | 857 | } else { |
a20fa00c | 858 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 859 | } |
7d37d351 JK |
860 | } |
861 | ||
a38b2c49 MT |
862 | static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, |
863 | MSIMessage msg) | |
7d37d351 JK |
864 | { |
865 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 866 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
851c2a75 JW |
867 | VirtQueue *vq = virtio_vector_first_queue(vdev, vector); |
868 | int ret, index, unmasked = 0; | |
7d37d351 | 869 | |
851c2a75 JW |
870 | while (vq) { |
871 | index = virtio_get_queue_index(vq); | |
872 | if (!virtio_queue_get_num(vdev, index)) { | |
7d37d351 JK |
873 | break; |
874 | } | |
6652d081 | 875 | if (index < proxy->nvqs_with_notifiers) { |
a20fa00c | 876 | ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); |
6652d081 JW |
877 | if (ret < 0) { |
878 | goto undo; | |
879 | } | |
880 | ++unmasked; | |
7d37d351 | 881 | } |
851c2a75 | 882 | vq = virtio_vector_next_queue(vq); |
7d37d351 | 883 | } |
847e9bc9 | 884 | |
7d37d351 | 885 | return 0; |
847e9bc9 | 886 | |
7d37d351 | 887 | undo: |
851c2a75 | 888 | vq = virtio_vector_first_queue(vdev, vector); |
6652d081 | 889 | while (vq && unmasked >= 0) { |
851c2a75 | 890 | index = virtio_get_queue_index(vq); |
6652d081 | 891 | if (index < proxy->nvqs_with_notifiers) { |
a20fa00c | 892 | virtio_pci_vq_vector_mask(proxy, index, vector); |
6652d081 JW |
893 | --unmasked; |
894 | } | |
851c2a75 | 895 | vq = virtio_vector_next_queue(vq); |
7d37d351 JK |
896 | } |
897 | return ret; | |
898 | } | |
899 | ||
a38b2c49 | 900 | static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) |
7d37d351 JK |
901 | { |
902 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 903 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
851c2a75 JW |
904 | VirtQueue *vq = virtio_vector_first_queue(vdev, vector); |
905 | int index; | |
7d37d351 | 906 | |
851c2a75 JW |
907 | while (vq) { |
908 | index = virtio_get_queue_index(vq); | |
909 | if (!virtio_queue_get_num(vdev, index)) { | |
7d37d351 JK |
910 | break; |
911 | } | |
6652d081 | 912 | if (index < proxy->nvqs_with_notifiers) { |
a20fa00c | 913 | virtio_pci_vq_vector_mask(proxy, index, vector); |
6652d081 | 914 | } |
851c2a75 | 915 | vq = virtio_vector_next_queue(vq); |
7d37d351 JK |
916 | } |
917 | } | |
918 | ||
a38b2c49 MT |
919 | static void virtio_pci_vector_poll(PCIDevice *dev, |
920 | unsigned int vector_start, | |
921 | unsigned int vector_end) | |
89d62be9 MT |
922 | { |
923 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 924 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 925 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
89d62be9 MT |
926 | int queue_no; |
927 | unsigned int vector; | |
928 | EventNotifier *notifier; | |
a20fa00c | 929 | VirtQueue *vq; |
89d62be9 | 930 | |
2d620f59 | 931 | for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { |
a20fa00c | 932 | if (!virtio_queue_get_num(vdev, queue_no)) { |
89d62be9 MT |
933 | break; |
934 | } | |
a20fa00c | 935 | vector = virtio_queue_vector(vdev, queue_no); |
89d62be9 MT |
936 | if (vector < vector_start || vector >= vector_end || |
937 | !msix_is_masked(dev, vector)) { | |
938 | continue; | |
939 | } | |
a20fa00c MT |
940 | vq = virtio_get_queue(vdev, queue_no); |
941 | notifier = virtio_queue_get_guest_notifier(vq); | |
181103cd FK |
942 | if (k->guest_notifier_pending) { |
943 | if (k->guest_notifier_pending(vdev, queue_no)) { | |
f1d0f15a MT |
944 | msix_set_pending(dev, vector); |
945 | } | |
946 | } else if (event_notifier_test_and_clear(notifier)) { | |
89d62be9 MT |
947 | msix_set_pending(dev, vector); |
948 | } | |
949 | } | |
950 | } | |
951 | ||
952 | static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, | |
953 | bool with_irqfd) | |
ade80dc8 | 954 | { |
d2a0ccc6 | 955 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
956 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
957 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); | |
847e9bc9 MT |
958 | VirtQueue *vq = virtio_get_queue(vdev, n); |
959 | EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); | |
ade80dc8 MT |
960 | |
961 | if (assign) { | |
962 | int r = event_notifier_init(notifier, 0); | |
963 | if (r < 0) { | |
964 | return r; | |
965 | } | |
847e9bc9 | 966 | virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); |
ade80dc8 | 967 | } else { |
847e9bc9 | 968 | virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); |
ade80dc8 MT |
969 | event_notifier_cleanup(notifier); |
970 | } | |
971 | ||
5669655a VK |
972 | if (!msix_enabled(&proxy->pci_dev) && |
973 | vdev->use_guest_notifier_mask && | |
974 | vdc->guest_notifier_mask) { | |
a3fc66d9 | 975 | vdc->guest_notifier_mask(vdev, n, !assign); |
62c96360 MT |
976 | } |
977 | ||
ade80dc8 MT |
978 | return 0; |
979 | } | |
980 | ||
d2a0ccc6 | 981 | static bool virtio_pci_query_guest_notifiers(DeviceState *d) |
5430a28f | 982 | { |
d2a0ccc6 | 983 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
5430a28f MT |
984 | return msix_enabled(&proxy->pci_dev); |
985 | } | |
986 | ||
2d620f59 | 987 | static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) |
54dd9321 | 988 | { |
d2a0ccc6 | 989 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 | 990 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 991 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
54dd9321 | 992 | int r, n; |
89d62be9 MT |
993 | bool with_irqfd = msix_enabled(&proxy->pci_dev) && |
994 | kvm_msi_via_irqfd_enabled(); | |
54dd9321 | 995 | |
87b3bd1c | 996 | nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); |
2d620f59 MT |
997 | |
998 | /* When deassigning, pass a consistent nvqs value | |
999 | * to avoid leaking notifiers. | |
1000 | */ | |
1001 | assert(assign || nvqs == proxy->nvqs_with_notifiers); | |
1002 | ||
1003 | proxy->nvqs_with_notifiers = nvqs; | |
1004 | ||
7d37d351 | 1005 | /* Must unset vector notifier while guest notifier is still assigned */ |
181103cd | 1006 | if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { |
7d37d351 | 1007 | msix_unset_vector_notifiers(&proxy->pci_dev); |
a38b2c49 MT |
1008 | if (proxy->vector_irqfd) { |
1009 | kvm_virtio_pci_vector_release(proxy, nvqs); | |
1010 | g_free(proxy->vector_irqfd); | |
1011 | proxy->vector_irqfd = NULL; | |
1012 | } | |
7d37d351 JK |
1013 | } |
1014 | ||
2d620f59 | 1015 | for (n = 0; n < nvqs; n++) { |
54dd9321 MT |
1016 | if (!virtio_queue_get_num(vdev, n)) { |
1017 | break; | |
1018 | } | |
1019 | ||
23fe2b3f | 1020 | r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); |
54dd9321 MT |
1021 | if (r < 0) { |
1022 | goto assign_error; | |
1023 | } | |
1024 | } | |
847e9bc9 | 1025 | |
7d37d351 | 1026 | /* Must set vector notifier after guest notifier has been assigned */ |
181103cd | 1027 | if ((with_irqfd || k->guest_notifier_mask) && assign) { |
a38b2c49 MT |
1028 | if (with_irqfd) { |
1029 | proxy->vector_irqfd = | |
1030 | g_malloc0(sizeof(*proxy->vector_irqfd) * | |
1031 | msix_nr_vectors_allocated(&proxy->pci_dev)); | |
1032 | r = kvm_virtio_pci_vector_use(proxy, nvqs); | |
1033 | if (r < 0) { | |
847e9bc9 | 1034 | goto assign_error; |
a38b2c49 | 1035 | } |
774345f9 | 1036 | } |
847e9bc9 MT |
1037 | r = msix_set_vector_notifiers(&proxy->pci_dev, |
1038 | virtio_pci_vector_unmask, | |
a38b2c49 MT |
1039 | virtio_pci_vector_mask, |
1040 | virtio_pci_vector_poll); | |
7d37d351 | 1041 | if (r < 0) { |
774345f9 | 1042 | goto notifiers_error; |
7d37d351 JK |
1043 | } |
1044 | } | |
1045 | ||
54dd9321 MT |
1046 | return 0; |
1047 | ||
774345f9 | 1048 | notifiers_error: |
a38b2c49 MT |
1049 | if (with_irqfd) { |
1050 | assert(assign); | |
1051 | kvm_virtio_pci_vector_release(proxy, nvqs); | |
1052 | } | |
847e9bc9 | 1053 | |
54dd9321 MT |
1054 | assign_error: |
1055 | /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ | |
7d37d351 | 1056 | assert(assign); |
54dd9321 | 1057 | while (--n >= 0) { |
89d62be9 | 1058 | virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); |
54dd9321 MT |
1059 | } |
1060 | return r; | |
1061 | } | |
1062 | ||
6f80e617 TB |
1063 | static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, |
1064 | MemoryRegion *mr, bool assign) | |
1065 | { | |
1066 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
1067 | int offset; | |
1068 | ||
1069 | if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || | |
1070 | virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { | |
1071 | return -1; | |
1072 | } | |
1073 | ||
1074 | if (assign) { | |
1075 | offset = virtio_pci_queue_mem_mult(proxy) * n; | |
1076 | memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); | |
1077 | } else { | |
1078 | memory_region_del_subregion(&proxy->notify.mr, mr); | |
1079 | } | |
1080 | ||
1081 | return 0; | |
1082 | } | |
1083 | ||
d2a0ccc6 | 1084 | static void virtio_pci_vmstate_change(DeviceState *d, bool running) |
25db9ebe | 1085 | { |
d2a0ccc6 | 1086 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 | 1087 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
25db9ebe SH |
1088 | |
1089 | if (running) { | |
68a27b20 MT |
1090 | /* Old QEMU versions did not set bus master enable on status write. |
1091 | * Detect DRIVER set and enable it. | |
1092 | */ | |
1093 | if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && | |
1094 | (vdev->status & VIRTIO_CONFIG_S_DRIVER) && | |
45363e46 | 1095 | !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { |
68a27b20 MT |
1096 | pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, |
1097 | proxy->pci_dev.config[PCI_COMMAND] | | |
1098 | PCI_COMMAND_MASTER, 1); | |
89c473fd | 1099 | } |
25db9ebe | 1100 | virtio_pci_start_ioeventfd(proxy); |
ade80dc8 | 1101 | } else { |
25db9ebe | 1102 | virtio_pci_stop_ioeventfd(proxy); |
ade80dc8 | 1103 | } |
ade80dc8 MT |
1104 | } |
1105 | ||
085bccb7 FK |
1106 | /* |
1107 | * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. | |
1108 | */ | |
1109 | ||
e0d686bf JW |
1110 | static int virtio_pci_query_nvectors(DeviceState *d) |
1111 | { | |
1112 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1113 | ||
1114 | return proxy->nvectors; | |
1115 | } | |
1116 | ||
8607f5c3 JW |
1117 | static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) |
1118 | { | |
1119 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1120 | PCIDevice *dev = &proxy->pci_dev; | |
1121 | ||
f0edf239 | 1122 | return pci_get_address_space(dev); |
8607f5c3 JW |
1123 | } |
1124 | ||
3d1e5d86 JW |
1125 | static bool virtio_pci_iommu_enabled(DeviceState *d) |
1126 | { | |
1127 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1128 | PCIDevice *dev = &proxy->pci_dev; | |
1129 | AddressSpace *dma_as = pci_device_iommu_address_space(dev); | |
1130 | ||
1131 | if (dma_as == &address_space_memory) { | |
1132 | return false; | |
1133 | } | |
1134 | ||
1135 | return true; | |
1136 | } | |
1137 | ||
f19bcdfe JW |
1138 | static bool virtio_pci_queue_enabled(DeviceState *d, int n) |
1139 | { | |
1140 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1141 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1142 | ||
1143 | if (virtio_vdev_has_feature(vdev, VIRTIO_F_VERSION_1)) { | |
a48aaf88 | 1144 | return proxy->vqs[n].enabled; |
f19bcdfe JW |
1145 | } |
1146 | ||
0c9753eb | 1147 | return virtio_queue_enabled_legacy(vdev, n); |
f19bcdfe JW |
1148 | } |
1149 | ||
ada434cd | 1150 | static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, |
dfb8e184 MT |
1151 | struct virtio_pci_cap *cap) |
1152 | { | |
1153 | PCIDevice *dev = &proxy->pci_dev; | |
1154 | int offset; | |
1155 | ||
9a7c2a59 MZ |
1156 | offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, |
1157 | cap->cap_len, &error_abort); | |
dfb8e184 MT |
1158 | |
1159 | assert(cap->cap_len >= sizeof *cap); | |
1160 | memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, | |
1161 | cap->cap_len - PCI_CAP_FLAGS); | |
ada434cd MT |
1162 | |
1163 | return offset; | |
dfb8e184 MT |
1164 | } |
1165 | ||
dfb8e184 MT |
1166 | static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, |
1167 | unsigned size) | |
1168 | { | |
1169 | VirtIOPCIProxy *proxy = opaque; | |
1170 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1171 | uint32_t val = 0; | |
1172 | int i; | |
1173 | ||
80ebfd69 AM |
1174 | if (vdev == NULL) { |
1175 | return UINT64_MAX; | |
1176 | } | |
1177 | ||
dfb8e184 MT |
1178 | switch (addr) { |
1179 | case VIRTIO_PCI_COMMON_DFSELECT: | |
1180 | val = proxy->dfselect; | |
1181 | break; | |
1182 | case VIRTIO_PCI_COMMON_DF: | |
1183 | if (proxy->dfselect <= 1) { | |
9b706dbb MT |
1184 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); |
1185 | ||
1186 | val = (vdev->host_features & ~vdc->legacy_features) >> | |
5f456073 | 1187 | (32 * proxy->dfselect); |
dfb8e184 MT |
1188 | } |
1189 | break; | |
1190 | case VIRTIO_PCI_COMMON_GFSELECT: | |
1191 | val = proxy->gfselect; | |
1192 | break; | |
1193 | case VIRTIO_PCI_COMMON_GF: | |
3750dabc | 1194 | if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { |
dfb8e184 MT |
1195 | val = proxy->guest_features[proxy->gfselect]; |
1196 | } | |
1197 | break; | |
1198 | case VIRTIO_PCI_COMMON_MSIX: | |
1199 | val = vdev->config_vector; | |
1200 | break; | |
1201 | case VIRTIO_PCI_COMMON_NUMQ: | |
1202 | for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { | |
1203 | if (virtio_queue_get_num(vdev, i)) { | |
1204 | val = i + 1; | |
1205 | } | |
1206 | } | |
1207 | break; | |
1208 | case VIRTIO_PCI_COMMON_STATUS: | |
1209 | val = vdev->status; | |
1210 | break; | |
1211 | case VIRTIO_PCI_COMMON_CFGGENERATION: | |
b8f05908 | 1212 | val = vdev->generation; |
dfb8e184 MT |
1213 | break; |
1214 | case VIRTIO_PCI_COMMON_Q_SELECT: | |
1215 | val = vdev->queue_sel; | |
1216 | break; | |
1217 | case VIRTIO_PCI_COMMON_Q_SIZE: | |
1218 | val = virtio_queue_get_num(vdev, vdev->queue_sel); | |
1219 | break; | |
1220 | case VIRTIO_PCI_COMMON_Q_MSIX: | |
1221 | val = virtio_queue_vector(vdev, vdev->queue_sel); | |
1222 | break; | |
1223 | case VIRTIO_PCI_COMMON_Q_ENABLE: | |
1224 | val = proxy->vqs[vdev->queue_sel].enabled; | |
1225 | break; | |
1226 | case VIRTIO_PCI_COMMON_Q_NOFF: | |
1227 | /* Simply map queues in order */ | |
1228 | val = vdev->queue_sel; | |
1229 | break; | |
1230 | case VIRTIO_PCI_COMMON_Q_DESCLO: | |
1231 | val = proxy->vqs[vdev->queue_sel].desc[0]; | |
1232 | break; | |
1233 | case VIRTIO_PCI_COMMON_Q_DESCHI: | |
1234 | val = proxy->vqs[vdev->queue_sel].desc[1]; | |
1235 | break; | |
1236 | case VIRTIO_PCI_COMMON_Q_AVAILLO: | |
1237 | val = proxy->vqs[vdev->queue_sel].avail[0]; | |
1238 | break; | |
1239 | case VIRTIO_PCI_COMMON_Q_AVAILHI: | |
1240 | val = proxy->vqs[vdev->queue_sel].avail[1]; | |
1241 | break; | |
1242 | case VIRTIO_PCI_COMMON_Q_USEDLO: | |
1243 | val = proxy->vqs[vdev->queue_sel].used[0]; | |
1244 | break; | |
1245 | case VIRTIO_PCI_COMMON_Q_USEDHI: | |
1246 | val = proxy->vqs[vdev->queue_sel].used[1]; | |
1247 | break; | |
1248 | default: | |
1249 | val = 0; | |
1250 | } | |
1251 | ||
1252 | return val; | |
1253 | } | |
1254 | ||
1255 | static void virtio_pci_common_write(void *opaque, hwaddr addr, | |
1256 | uint64_t val, unsigned size) | |
1257 | { | |
1258 | VirtIOPCIProxy *proxy = opaque; | |
1259 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1260 | ||
80ebfd69 AM |
1261 | if (vdev == NULL) { |
1262 | return; | |
1263 | } | |
1264 | ||
dfb8e184 MT |
1265 | switch (addr) { |
1266 | case VIRTIO_PCI_COMMON_DFSELECT: | |
1267 | proxy->dfselect = val; | |
1268 | break; | |
1269 | case VIRTIO_PCI_COMMON_GFSELECT: | |
1270 | proxy->gfselect = val; | |
1271 | break; | |
1272 | case VIRTIO_PCI_COMMON_GF: | |
3750dabc | 1273 | if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { |
dfb8e184 MT |
1274 | proxy->guest_features[proxy->gfselect] = val; |
1275 | virtio_set_features(vdev, | |
1276 | (((uint64_t)proxy->guest_features[1]) << 32) | | |
1277 | proxy->guest_features[0]); | |
1278 | } | |
1279 | break; | |
1280 | case VIRTIO_PCI_COMMON_MSIX: | |
1281 | msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); | |
1282 | /* Make it possible for guest to discover an error took place. */ | |
1283 | if (msix_vector_use(&proxy->pci_dev, val) < 0) { | |
1284 | val = VIRTIO_NO_VECTOR; | |
1285 | } | |
1286 | vdev->config_vector = val; | |
1287 | break; | |
1288 | case VIRTIO_PCI_COMMON_STATUS: | |
1289 | if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { | |
1290 | virtio_pci_stop_ioeventfd(proxy); | |
1291 | } | |
1292 | ||
1293 | virtio_set_status(vdev, val & 0xFF); | |
1294 | ||
1295 | if (val & VIRTIO_CONFIG_S_DRIVER_OK) { | |
1296 | virtio_pci_start_ioeventfd(proxy); | |
1297 | } | |
1298 | ||
1299 | if (vdev->status == 0) { | |
75fd6f13 | 1300 | virtio_pci_reset(DEVICE(proxy)); |
dfb8e184 MT |
1301 | } |
1302 | ||
1303 | break; | |
1304 | case VIRTIO_PCI_COMMON_Q_SELECT: | |
1305 | if (val < VIRTIO_QUEUE_MAX) { | |
1306 | vdev->queue_sel = val; | |
1307 | } | |
1308 | break; | |
1309 | case VIRTIO_PCI_COMMON_Q_SIZE: | |
1310 | proxy->vqs[vdev->queue_sel].num = val; | |
d0c5f643 MT |
1311 | virtio_queue_set_num(vdev, vdev->queue_sel, |
1312 | proxy->vqs[vdev->queue_sel].num); | |
dfb8e184 MT |
1313 | break; |
1314 | case VIRTIO_PCI_COMMON_Q_MSIX: | |
1315 | msix_vector_unuse(&proxy->pci_dev, | |
1316 | virtio_queue_vector(vdev, vdev->queue_sel)); | |
1317 | /* Make it possible for guest to discover an error took place. */ | |
1318 | if (msix_vector_use(&proxy->pci_dev, val) < 0) { | |
1319 | val = VIRTIO_NO_VECTOR; | |
1320 | } | |
1321 | virtio_queue_set_vector(vdev, vdev->queue_sel, val); | |
1322 | break; | |
1323 | case VIRTIO_PCI_COMMON_Q_ENABLE: | |
10d35e58 JW |
1324 | if (val == 1) { |
1325 | virtio_queue_set_num(vdev, vdev->queue_sel, | |
1326 | proxy->vqs[vdev->queue_sel].num); | |
1327 | virtio_queue_set_rings(vdev, vdev->queue_sel, | |
dfb8e184 MT |
1328 | ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | |
1329 | proxy->vqs[vdev->queue_sel].desc[0], | |
1330 | ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | | |
1331 | proxy->vqs[vdev->queue_sel].avail[0], | |
1332 | ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | | |
1333 | proxy->vqs[vdev->queue_sel].used[0]); | |
10d35e58 JW |
1334 | proxy->vqs[vdev->queue_sel].enabled = 1; |
1335 | } else { | |
1336 | virtio_error(vdev, "wrong value for queue_enable %"PRIx64, val); | |
1337 | } | |
dfb8e184 MT |
1338 | break; |
1339 | case VIRTIO_PCI_COMMON_Q_DESCLO: | |
1340 | proxy->vqs[vdev->queue_sel].desc[0] = val; | |
1341 | break; | |
1342 | case VIRTIO_PCI_COMMON_Q_DESCHI: | |
1343 | proxy->vqs[vdev->queue_sel].desc[1] = val; | |
1344 | break; | |
1345 | case VIRTIO_PCI_COMMON_Q_AVAILLO: | |
1346 | proxy->vqs[vdev->queue_sel].avail[0] = val; | |
1347 | break; | |
1348 | case VIRTIO_PCI_COMMON_Q_AVAILHI: | |
1349 | proxy->vqs[vdev->queue_sel].avail[1] = val; | |
1350 | break; | |
1351 | case VIRTIO_PCI_COMMON_Q_USEDLO: | |
1352 | proxy->vqs[vdev->queue_sel].used[0] = val; | |
1353 | break; | |
1354 | case VIRTIO_PCI_COMMON_Q_USEDHI: | |
1355 | proxy->vqs[vdev->queue_sel].used[1] = val; | |
1356 | break; | |
1357 | default: | |
1358 | break; | |
1359 | } | |
1360 | } | |
1361 | ||
1362 | ||
1363 | static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, | |
1364 | unsigned size) | |
1365 | { | |
df07a8f8 AM |
1366 | VirtIOPCIProxy *proxy = opaque; |
1367 | if (virtio_bus_get_device(&proxy->bus) == NULL) { | |
1368 | return UINT64_MAX; | |
1369 | } | |
1370 | ||
dfb8e184 MT |
1371 | return 0; |
1372 | } | |
1373 | ||
1374 | static void virtio_pci_notify_write(void *opaque, hwaddr addr, | |
1375 | uint64_t val, unsigned size) | |
1376 | { | |
ccec7e96 AM |
1377 | VirtIOPCIProxy *proxy = opaque; |
1378 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1379 | ||
d9997d89 | 1380 | unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); |
dfb8e184 | 1381 | |
ccec7e96 | 1382 | if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { |
dfb8e184 MT |
1383 | virtio_queue_notify(vdev, queue); |
1384 | } | |
1385 | } | |
1386 | ||
9824d2a3 JW |
1387 | static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, |
1388 | uint64_t val, unsigned size) | |
1389 | { | |
ccec7e96 AM |
1390 | VirtIOPCIProxy *proxy = opaque; |
1391 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1392 | ||
9824d2a3 JW |
1393 | unsigned queue = val; |
1394 | ||
ccec7e96 | 1395 | if (vdev != NULL && queue < VIRTIO_QUEUE_MAX) { |
9824d2a3 JW |
1396 | virtio_queue_notify(vdev, queue); |
1397 | } | |
1398 | } | |
1399 | ||
dfb8e184 MT |
1400 | static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, |
1401 | unsigned size) | |
1402 | { | |
1403 | VirtIOPCIProxy *proxy = opaque; | |
1404 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
c3fd7061 YB |
1405 | uint64_t val; |
1406 | ||
1407 | if (vdev == NULL) { | |
df07a8f8 | 1408 | return UINT64_MAX; |
c3fd7061 | 1409 | } |
dfb8e184 | 1410 | |
c3fd7061 YB |
1411 | val = qatomic_xchg(&vdev->isr, 0); |
1412 | pci_irq_deassert(&proxy->pci_dev); | |
dfb8e184 MT |
1413 | return val; |
1414 | } | |
1415 | ||
1416 | static void virtio_pci_isr_write(void *opaque, hwaddr addr, | |
1417 | uint64_t val, unsigned size) | |
1418 | { | |
1419 | } | |
1420 | ||
1421 | static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, | |
1422 | unsigned size) | |
1423 | { | |
ccec7e96 AM |
1424 | VirtIOPCIProxy *proxy = opaque; |
1425 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
51e0e42c | 1426 | uint64_t val; |
dfb8e184 | 1427 | |
ccec7e96 | 1428 | if (vdev == NULL) { |
df07a8f8 | 1429 | return UINT64_MAX; |
ccec7e96 AM |
1430 | } |
1431 | ||
dfb8e184 MT |
1432 | switch (size) { |
1433 | case 1: | |
54c720d4 | 1434 | val = virtio_config_modern_readb(vdev, addr); |
dfb8e184 MT |
1435 | break; |
1436 | case 2: | |
54c720d4 | 1437 | val = virtio_config_modern_readw(vdev, addr); |
dfb8e184 MT |
1438 | break; |
1439 | case 4: | |
54c720d4 | 1440 | val = virtio_config_modern_readl(vdev, addr); |
dfb8e184 | 1441 | break; |
51e0e42c YB |
1442 | default: |
1443 | val = 0; | |
1444 | break; | |
dfb8e184 MT |
1445 | } |
1446 | return val; | |
1447 | } | |
1448 | ||
1449 | static void virtio_pci_device_write(void *opaque, hwaddr addr, | |
1450 | uint64_t val, unsigned size) | |
1451 | { | |
ccec7e96 AM |
1452 | VirtIOPCIProxy *proxy = opaque; |
1453 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1454 | ||
1455 | if (vdev == NULL) { | |
1456 | return; | |
1457 | } | |
1458 | ||
dfb8e184 MT |
1459 | switch (size) { |
1460 | case 1: | |
54c720d4 | 1461 | virtio_config_modern_writeb(vdev, addr, val); |
dfb8e184 MT |
1462 | break; |
1463 | case 2: | |
54c720d4 | 1464 | virtio_config_modern_writew(vdev, addr, val); |
dfb8e184 MT |
1465 | break; |
1466 | case 4: | |
54c720d4 | 1467 | virtio_config_modern_writel(vdev, addr, val); |
dfb8e184 MT |
1468 | break; |
1469 | } | |
1470 | } | |
1471 | ||
b74259e3 AB |
1472 | static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy, |
1473 | const char *vdev_name) | |
1141ce21 GH |
1474 | { |
1475 | static const MemoryRegionOps common_ops = { | |
1476 | .read = virtio_pci_common_read, | |
1477 | .write = virtio_pci_common_write, | |
1478 | .impl = { | |
1479 | .min_access_size = 1, | |
1480 | .max_access_size = 4, | |
1481 | }, | |
1482 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1483 | }; | |
1484 | static const MemoryRegionOps isr_ops = { | |
1485 | .read = virtio_pci_isr_read, | |
1486 | .write = virtio_pci_isr_write, | |
1487 | .impl = { | |
1488 | .min_access_size = 1, | |
1489 | .max_access_size = 4, | |
1490 | }, | |
1491 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1492 | }; | |
1493 | static const MemoryRegionOps device_ops = { | |
1494 | .read = virtio_pci_device_read, | |
1495 | .write = virtio_pci_device_write, | |
1496 | .impl = { | |
1497 | .min_access_size = 1, | |
1498 | .max_access_size = 4, | |
1499 | }, | |
1500 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1501 | }; | |
1502 | static const MemoryRegionOps notify_ops = { | |
1503 | .read = virtio_pci_notify_read, | |
1504 | .write = virtio_pci_notify_write, | |
1505 | .impl = { | |
1506 | .min_access_size = 1, | |
1507 | .max_access_size = 4, | |
1508 | }, | |
1509 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1510 | }; | |
9824d2a3 JW |
1511 | static const MemoryRegionOps notify_pio_ops = { |
1512 | .read = virtio_pci_notify_read, | |
1513 | .write = virtio_pci_notify_write_pio, | |
1514 | .impl = { | |
1515 | .min_access_size = 1, | |
1516 | .max_access_size = 4, | |
1517 | }, | |
1518 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1519 | }; | |
b74259e3 | 1520 | g_autoptr(GString) name = g_string_new(NULL); |
9824d2a3 | 1521 | |
b74259e3 | 1522 | g_string_printf(name, "virtio-pci-common-%s", vdev_name); |
1141ce21 GH |
1523 | memory_region_init_io(&proxy->common.mr, OBJECT(proxy), |
1524 | &common_ops, | |
1525 | proxy, | |
b74259e3 | 1526 | name->str, |
b6ce27a5 | 1527 | proxy->common.size); |
a3cc2e81 | 1528 | |
b74259e3 | 1529 | g_string_printf(name, "virtio-pci-isr-%s", vdev_name); |
1141ce21 GH |
1530 | memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), |
1531 | &isr_ops, | |
1532 | proxy, | |
b74259e3 | 1533 | name->str, |
b6ce27a5 | 1534 | proxy->isr.size); |
a3cc2e81 | 1535 | |
b74259e3 | 1536 | g_string_printf(name, "virtio-pci-device-%s", vdev_name); |
1141ce21 GH |
1537 | memory_region_init_io(&proxy->device.mr, OBJECT(proxy), |
1538 | &device_ops, | |
ccec7e96 | 1539 | proxy, |
b74259e3 | 1540 | name->str, |
b6ce27a5 | 1541 | proxy->device.size); |
a3cc2e81 | 1542 | |
b74259e3 | 1543 | g_string_printf(name, "virtio-pci-notify-%s", vdev_name); |
1141ce21 GH |
1544 | memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), |
1545 | ¬ify_ops, | |
ccec7e96 | 1546 | proxy, |
b74259e3 | 1547 | name->str, |
b6ce27a5 | 1548 | proxy->notify.size); |
9824d2a3 | 1549 | |
b74259e3 | 1550 | g_string_printf(name, "virtio-pci-notify-pio-%s", vdev_name); |
9824d2a3 JW |
1551 | memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), |
1552 | ¬ify_pio_ops, | |
ccec7e96 | 1553 | proxy, |
b74259e3 | 1554 | name->str, |
e3aab6c7 | 1555 | proxy->notify_pio.size); |
a3cc2e81 GH |
1556 | } |
1557 | ||
1558 | static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, | |
54790d71 | 1559 | VirtIOPCIRegion *region, |
9824d2a3 JW |
1560 | struct virtio_pci_cap *cap, |
1561 | MemoryRegion *mr, | |
1562 | uint8_t bar) | |
a3cc2e81 | 1563 | { |
9824d2a3 | 1564 | memory_region_add_subregion(mr, region->offset, ®ion->mr); |
54790d71 | 1565 | |
fc004905 | 1566 | cap->cfg_type = region->type; |
9824d2a3 | 1567 | cap->bar = bar; |
54790d71 | 1568 | cap->offset = cpu_to_le32(region->offset); |
b6ce27a5 | 1569 | cap->length = cpu_to_le32(region->size); |
54790d71 | 1570 | virtio_pci_add_mem_cap(proxy, cap); |
9824d2a3 JW |
1571 | |
1572 | } | |
1573 | ||
1574 | static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, | |
1575 | VirtIOPCIRegion *region, | |
1576 | struct virtio_pci_cap *cap) | |
1577 | { | |
1578 | virtio_pci_modern_region_map(proxy, region, cap, | |
7a25126d | 1579 | &proxy->modern_bar, proxy->modern_mem_bar_idx); |
1141ce21 | 1580 | } |
dfb8e184 | 1581 | |
9824d2a3 JW |
1582 | static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, |
1583 | VirtIOPCIRegion *region, | |
1584 | struct virtio_pci_cap *cap) | |
1585 | { | |
1586 | virtio_pci_modern_region_map(proxy, region, cap, | |
7a25126d | 1587 | &proxy->io_bar, proxy->modern_io_bar_idx); |
9824d2a3 JW |
1588 | } |
1589 | ||
1590 | static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, | |
1591 | VirtIOPCIRegion *region) | |
27462695 MT |
1592 | { |
1593 | memory_region_del_subregion(&proxy->modern_bar, | |
1594 | ®ion->mr); | |
1595 | } | |
1596 | ||
9824d2a3 JW |
1597 | static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, |
1598 | VirtIOPCIRegion *region) | |
1599 | { | |
1600 | memory_region_del_subregion(&proxy->io_bar, | |
1601 | ®ion->mr); | |
1602 | } | |
1603 | ||
d1b4259f MC |
1604 | static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) |
1605 | { | |
1606 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1607 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1608 | ||
1609 | if (virtio_pci_modern(proxy)) { | |
1610 | virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); | |
1611 | } | |
1612 | ||
1613 | virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); | |
1614 | } | |
1615 | ||
085bccb7 | 1616 | /* This is called by virtio-bus just after the device is plugged. */ |
e8398045 | 1617 | static void virtio_pci_device_plugged(DeviceState *d, Error **errp) |
085bccb7 FK |
1618 | { |
1619 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1620 | VirtioBusState *bus = &proxy->bus; | |
9a4c0e22 | 1621 | bool legacy = virtio_pci_legacy(proxy); |
d1b4259f | 1622 | bool modern; |
9824d2a3 | 1623 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
085bccb7 FK |
1624 | uint8_t *config; |
1625 | uint32_t size; | |
6b8f1020 | 1626 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
085bccb7 | 1627 | |
d1b4259f MC |
1628 | /* |
1629 | * Virtio capabilities present without | |
1630 | * VIRTIO_F_VERSION_1 confuses guests | |
1631 | */ | |
66d1c4c1 MC |
1632 | if (!proxy->ignore_backend_features && |
1633 | !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { | |
d1b4259f MC |
1634 | virtio_pci_disable_modern(proxy); |
1635 | ||
1636 | if (!legacy) { | |
1637 | error_setg(errp, "Device doesn't support modern mode, and legacy" | |
1638 | " mode is disabled"); | |
1639 | error_append_hint(errp, "Set disable-legacy to off\n"); | |
1640 | ||
1641 | return; | |
1642 | } | |
1643 | } | |
1644 | ||
1645 | modern = virtio_pci_modern(proxy); | |
1646 | ||
085bccb7 FK |
1647 | config = proxy->pci_dev.config; |
1648 | if (proxy->class_code) { | |
1649 | pci_config_set_class(config, proxy->class_code); | |
1650 | } | |
e266d421 GH |
1651 | |
1652 | if (legacy) { | |
9b3a35ec | 1653 | if (!virtio_legacy_allowed(vdev)) { |
d55f5182 SG |
1654 | /* |
1655 | * To avoid migration issues, we allow legacy mode when legacy | |
1656 | * check is disabled in the old machine types (< 5.1). | |
1657 | */ | |
1658 | if (virtio_legacy_check_disabled(vdev)) { | |
1659 | warn_report("device is modern-only, but for backward " | |
1660 | "compatibility legacy is allowed"); | |
1661 | } else { | |
1662 | error_setg(errp, | |
1663 | "device is modern-only, use disable-legacy=on"); | |
1664 | return; | |
1665 | } | |
9b3a35ec | 1666 | } |
8607f5c3 JW |
1667 | if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { |
1668 | error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" | |
2080a29f | 1669 | " neither legacy nor transitional device"); |
8607f5c3 JW |
1670 | return ; |
1671 | } | |
f2bc54de LP |
1672 | /* |
1673 | * Legacy and transitional devices use specific subsystem IDs. | |
1674 | * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) | |
1675 | * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. | |
1676 | */ | |
e266d421 GH |
1677 | pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); |
1678 | } else { | |
1679 | /* pure virtio-1.0 */ | |
1680 | pci_set_word(config + PCI_VENDOR_ID, | |
1681 | PCI_VENDOR_ID_REDHAT_QUMRANET); | |
1682 | pci_set_word(config + PCI_DEVICE_ID, | |
1683 | 0x1040 + virtio_bus_get_vdev_id(bus)); | |
1684 | pci_config_set_revision(config, 1); | |
1685 | } | |
085bccb7 FK |
1686 | config[PCI_INTERRUPT_PIN] = 1; |
1687 | ||
dfb8e184 | 1688 | |
e266d421 | 1689 | if (modern) { |
cc52ea90 GH |
1690 | struct virtio_pci_cap cap = { |
1691 | .cap_len = sizeof cap, | |
dfb8e184 MT |
1692 | }; |
1693 | struct virtio_pci_notify_cap notify = { | |
dfb8e184 | 1694 | .cap.cap_len = sizeof notify, |
dfb8e184 | 1695 | .notify_off_multiplier = |
d9997d89 | 1696 | cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), |
dfb8e184 | 1697 | }; |
ada434cd MT |
1698 | struct virtio_pci_cfg_cap cfg = { |
1699 | .cap.cap_len = sizeof cfg, | |
1700 | .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, | |
1701 | }; | |
9824d2a3 JW |
1702 | struct virtio_pci_notify_cap notify_pio = { |
1703 | .cap.cap_len = sizeof notify, | |
1704 | .notify_off_multiplier = cpu_to_le32(0x0), | |
1705 | }; | |
dfb8e184 | 1706 | |
9824d2a3 | 1707 | struct virtio_pci_cfg_cap *cfg_mask; |
dfb8e184 | 1708 | |
b74259e3 | 1709 | virtio_pci_modern_regions_init(proxy, vdev->name); |
9824d2a3 JW |
1710 | |
1711 | virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); | |
1712 | virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); | |
1713 | virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); | |
1714 | virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); | |
1715 | ||
1716 | if (modern_pio) { | |
1717 | memory_region_init(&proxy->io_bar, OBJECT(proxy), | |
1718 | "virtio-pci-io", 0x4); | |
1719 | ||
7a25126d | 1720 | pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, |
9824d2a3 JW |
1721 | PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); |
1722 | ||
1723 | virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, | |
1724 | ¬ify_pio.cap); | |
1725 | } | |
ada434cd | 1726 | |
7a25126d | 1727 | pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, |
4e93a68e GH |
1728 | PCI_BASE_ADDRESS_SPACE_MEMORY | |
1729 | PCI_BASE_ADDRESS_MEM_PREFETCH | | |
1730 | PCI_BASE_ADDRESS_MEM_TYPE_64, | |
dfb8e184 | 1731 | &proxy->modern_bar); |
ada434cd MT |
1732 | |
1733 | proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); | |
1734 | cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); | |
1735 | pci_set_byte(&cfg_mask->cap.bar, ~0x0); | |
1736 | pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); | |
1737 | pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); | |
1738 | pci_set_long(cfg_mask->pci_cfg_data, ~0x0); | |
dfb8e184 MT |
1739 | } |
1740 | ||
0d583647 RH |
1741 | if (proxy->nvectors) { |
1742 | int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, | |
ee640c62 | 1743 | proxy->msix_bar_idx, NULL); |
0d583647 | 1744 | if (err) { |
ee640c62 | 1745 | /* Notice when a system that supports MSIx can't initialize it */ |
0d583647 | 1746 | if (err != -ENOTSUP) { |
0765691e MA |
1747 | warn_report("unable to init msix vectors to %" PRIu32, |
1748 | proxy->nvectors); | |
0d583647 RH |
1749 | } |
1750 | proxy->nvectors = 0; | |
1751 | } | |
085bccb7 FK |
1752 | } |
1753 | ||
1754 | proxy->pci_dev.config_write = virtio_write_config; | |
ada434cd | 1755 | proxy->pci_dev.config_read = virtio_read_config; |
085bccb7 | 1756 | |
e266d421 GH |
1757 | if (legacy) { |
1758 | size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) | |
1759 | + virtio_bus_get_vdev_config_len(bus); | |
1d0148fe | 1760 | size = pow2ceil(size); |
085bccb7 | 1761 | |
e266d421 GH |
1762 | memory_region_init_io(&proxy->bar, OBJECT(proxy), |
1763 | &virtio_pci_config_ops, | |
1764 | proxy, "virtio-pci", size); | |
dfb8e184 | 1765 | |
7a25126d | 1766 | pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, |
23c5e397 | 1767 | PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); |
e266d421 | 1768 | } |
085bccb7 FK |
1769 | } |
1770 | ||
06a13073 PB |
1771 | static void virtio_pci_device_unplugged(DeviceState *d) |
1772 | { | |
06a13073 | 1773 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); |
9a4c0e22 | 1774 | bool modern = virtio_pci_modern(proxy); |
9824d2a3 | 1775 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
06a13073 PB |
1776 | |
1777 | virtio_pci_stop_ioeventfd(proxy); | |
27462695 MT |
1778 | |
1779 | if (modern) { | |
9824d2a3 JW |
1780 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); |
1781 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); | |
1782 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); | |
1783 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); | |
1784 | if (modern_pio) { | |
1785 | virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); | |
1786 | } | |
27462695 | 1787 | } |
06a13073 PB |
1788 | } |
1789 | ||
fc079951 | 1790 | static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) |
085bccb7 | 1791 | { |
b6ce27a5 | 1792 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
085bccb7 | 1793 | VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); |
fd56e061 DG |
1794 | bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && |
1795 | !pci_bus_is_root(pci_get_bus(pci_dev)); | |
fc079951 | 1796 | |
c324fd0a | 1797 | if (kvm_enabled() && !kvm_has_many_ioeventfds()) { |
ca2b413c PB |
1798 | proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; |
1799 | } | |
1800 | ||
3909c079 PD |
1801 | /* fd-based ioevents can't be synchronized in record/replay */ |
1802 | if (replay_mode != REPLAY_MODE_NONE) { | |
1803 | proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; | |
1804 | } | |
1805 | ||
b6ce27a5 GH |
1806 | /* |
1807 | * virtio pci bar layout used by default. | |
1808 | * subclasses can re-arrange things if needed. | |
1809 | * | |
1810 | * region 0 -- virtio legacy io bar | |
1811 | * region 1 -- msi-x bar | |
e6779156 | 1812 | * region 2 -- virtio modern io bar (off by default) |
b6ce27a5 GH |
1813 | * region 4+5 -- virtio modern memory (64bit) bar |
1814 | * | |
1815 | */ | |
7a25126d CF |
1816 | proxy->legacy_io_bar_idx = 0; |
1817 | proxy->msix_bar_idx = 1; | |
1818 | proxy->modern_io_bar_idx = 2; | |
1819 | proxy->modern_mem_bar_idx = 4; | |
b6ce27a5 GH |
1820 | |
1821 | proxy->common.offset = 0x0; | |
1822 | proxy->common.size = 0x1000; | |
1823 | proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; | |
1824 | ||
1825 | proxy->isr.offset = 0x1000; | |
1826 | proxy->isr.size = 0x1000; | |
1827 | proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; | |
1828 | ||
1829 | proxy->device.offset = 0x2000; | |
1830 | proxy->device.size = 0x1000; | |
1831 | proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; | |
1832 | ||
1833 | proxy->notify.offset = 0x3000; | |
d9997d89 | 1834 | proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; |
b6ce27a5 GH |
1835 | proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; |
1836 | ||
9824d2a3 JW |
1837 | proxy->notify_pio.offset = 0x0; |
1838 | proxy->notify_pio.size = 0x4; | |
1839 | proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; | |
1840 | ||
b6ce27a5 GH |
1841 | /* subclasses can enforce modern, so do this unconditionally */ |
1842 | memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", | |
d9997d89 MA |
1843 | /* PCI BAR regions must be powers of 2 */ |
1844 | pow2ceil(proxy->notify.offset + proxy->notify.size)); | |
b6ce27a5 | 1845 | |
dd56040d DDAG |
1846 | if (proxy->disable_legacy == ON_OFF_AUTO_AUTO) { |
1847 | proxy->disable_legacy = pcie_port ? ON_OFF_AUTO_ON : ON_OFF_AUTO_OFF; | |
1848 | } | |
1849 | ||
1850 | if (!virtio_pci_modern(proxy) && !virtio_pci_legacy(proxy)) { | |
1851 | error_setg(errp, "device cannot work as neither modern nor legacy mode" | |
1852 | " is enabled"); | |
1853 | error_append_hint(errp, "Set either disable-modern or disable-legacy" | |
1854 | " to off\n"); | |
1855 | return; | |
3eff3769 GK |
1856 | } |
1857 | ||
9a4c0e22 | 1858 | if (pcie_port && pci_is_express(pci_dev)) { |
1811e64c | 1859 | int pos; |
06e97442 | 1860 | uint16_t last_pcie_cap_offset = PCI_CONFIG_SPACE_SIZE; |
1811e64c | 1861 | |
1811e64c MA |
1862 | pos = pcie_endpoint_cap_init(pci_dev, 0); |
1863 | assert(pos > 0); | |
1864 | ||
9a7c2a59 MZ |
1865 | pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, |
1866 | PCI_PM_SIZEOF, errp); | |
1867 | if (pos < 0) { | |
1868 | return; | |
1869 | } | |
1870 | ||
27ce0f3a | 1871 | pci_dev->exp.pm_cap = pos; |
1811e64c MA |
1872 | |
1873 | /* | |
1874 | * Indicates that this function complies with revision 1.2 of the | |
1875 | * PCI Power Management Interface Specification. | |
1876 | */ | |
1877 | pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); | |
615c4ed2 | 1878 | |
fdfa3b1d AM |
1879 | if (proxy->flags & VIRTIO_PCI_FLAG_AER) { |
1880 | pcie_aer_init(pci_dev, PCI_ERR_VER, last_pcie_cap_offset, | |
1881 | PCI_ERR_SIZEOF, NULL); | |
1882 | last_pcie_cap_offset += PCI_ERR_SIZEOF; | |
1883 | } | |
1884 | ||
c2cabb34 MA |
1885 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { |
1886 | /* Init error enabling flags */ | |
1887 | pcie_cap_deverr_init(pci_dev); | |
1888 | } | |
1889 | ||
d584f1b9 MA |
1890 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { |
1891 | /* Init Link Control Register */ | |
1892 | pcie_cap_lnkctl_init(pci_dev); | |
1893 | } | |
1894 | ||
27ce0f3a MA |
1895 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { |
1896 | /* Init Power Management Control Register */ | |
1897 | pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, | |
1898 | PCI_PM_CTRL_STATE_MASK); | |
1899 | } | |
1900 | ||
615c4ed2 | 1901 | if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { |
d83f46d1 JW |
1902 | pcie_ats_init(pci_dev, last_pcie_cap_offset, |
1903 | proxy->flags & VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED); | |
06e97442 | 1904 | last_pcie_cap_offset += PCI_EXT_CAP_ATS_SIZEOF; |
615c4ed2 JW |
1905 | } |
1906 | ||
eb1556c4 JS |
1907 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_FLR) { |
1908 | /* Set Function Level Reset capability bit */ | |
1909 | pcie_cap_flr_init(pci_dev); | |
1910 | } | |
0560b0e9 SL |
1911 | } else { |
1912 | /* | |
1913 | * make future invocations of pci_is_express() return false | |
1914 | * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. | |
1915 | */ | |
1916 | pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; | |
1811e64c MA |
1917 | } |
1918 | ||
b6ce27a5 | 1919 | virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); |
fc079951 | 1920 | if (k->realize) { |
b6ce27a5 | 1921 | k->realize(proxy, errp); |
085bccb7 | 1922 | } |
085bccb7 FK |
1923 | } |
1924 | ||
1925 | static void virtio_pci_exit(PCIDevice *pci_dev) | |
1926 | { | |
fdfa3b1d AM |
1927 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
1928 | bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && | |
1929 | !pci_bus_is_root(pci_get_bus(pci_dev)); | |
1930 | ||
8b81bb3b | 1931 | msix_uninit_exclusive_bar(pci_dev); |
fdfa3b1d AM |
1932 | if (proxy->flags & VIRTIO_PCI_FLAG_AER && pcie_port && |
1933 | pci_is_express(pci_dev)) { | |
1934 | pcie_aer_exit(pci_dev); | |
1935 | } | |
085bccb7 FK |
1936 | } |
1937 | ||
59ccd20a | 1938 | static void virtio_pci_reset(DeviceState *qdev) |
085bccb7 FK |
1939 | { |
1940 | VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); | |
1941 | VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); | |
c2cabb34 | 1942 | PCIDevice *dev = PCI_DEVICE(qdev); |
393f04d3 JW |
1943 | int i; |
1944 | ||
085bccb7 FK |
1945 | virtio_pci_stop_ioeventfd(proxy); |
1946 | virtio_bus_reset(bus); | |
1947 | msix_unuse_all_vectors(&proxy->pci_dev); | |
393f04d3 JW |
1948 | |
1949 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { | |
1950 | proxy->vqs[i].enabled = 0; | |
60a8d802 JW |
1951 | proxy->vqs[i].num = 0; |
1952 | proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; | |
1953 | proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; | |
1954 | proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; | |
393f04d3 | 1955 | } |
c2cabb34 MA |
1956 | |
1957 | if (pci_is_express(dev)) { | |
1958 | pcie_cap_deverr_reset(dev); | |
d584f1b9 | 1959 | pcie_cap_lnkctl_reset(dev); |
27ce0f3a MA |
1960 | |
1961 | pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); | |
c2cabb34 | 1962 | } |
085bccb7 FK |
1963 | } |
1964 | ||
85d1277e | 1965 | static Property virtio_pci_properties[] = { |
68a27b20 MT |
1966 | DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, |
1967 | VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), | |
a6df8adf JW |
1968 | DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, |
1969 | VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), | |
9824d2a3 JW |
1970 | DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, |
1971 | VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), | |
1811e64c MA |
1972 | DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, |
1973 | VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), | |
d9997d89 MA |
1974 | DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, |
1975 | VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), | |
66d1c4c1 MC |
1976 | DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, |
1977 | ignore_backend_features, false), | |
615c4ed2 JW |
1978 | DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, |
1979 | VIRTIO_PCI_FLAG_ATS_BIT, false), | |
d83f46d1 JW |
1980 | DEFINE_PROP_BIT("x-ats-page-aligned", VirtIOPCIProxy, flags, |
1981 | VIRTIO_PCI_FLAG_ATS_PAGE_ALIGNED_BIT, true), | |
c2cabb34 MA |
1982 | DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, |
1983 | VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), | |
d584f1b9 MA |
1984 | DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, |
1985 | VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), | |
27ce0f3a MA |
1986 | DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, |
1987 | VIRTIO_PCI_FLAG_INIT_PM_BIT, true), | |
eb1556c4 JS |
1988 | DEFINE_PROP_BIT("x-pcie-flr-init", VirtIOPCIProxy, flags, |
1989 | VIRTIO_PCI_FLAG_INIT_FLR_BIT, true), | |
fdfa3b1d AM |
1990 | DEFINE_PROP_BIT("aer", VirtIOPCIProxy, flags, |
1991 | VIRTIO_PCI_FLAG_AER_BIT, false), | |
85d1277e ML |
1992 | DEFINE_PROP_END_OF_LIST(), |
1993 | }; | |
1994 | ||
0560b0e9 SL |
1995 | static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) |
1996 | { | |
1997 | VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); | |
1998 | VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); | |
1999 | PCIDevice *pci_dev = &proxy->pci_dev; | |
2000 | ||
2001 | if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && | |
9a4c0e22 | 2002 | virtio_pci_modern(proxy)) { |
0560b0e9 SL |
2003 | pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; |
2004 | } | |
2005 | ||
2006 | vpciklass->parent_dc_realize(qdev, errp); | |
2007 | } | |
2008 | ||
085bccb7 FK |
2009 | static void virtio_pci_class_init(ObjectClass *klass, void *data) |
2010 | { | |
2011 | DeviceClass *dc = DEVICE_CLASS(klass); | |
2012 | PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); | |
0560b0e9 | 2013 | VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); |
085bccb7 | 2014 | |
4f67d30b | 2015 | device_class_set_props(dc, virtio_pci_properties); |
fc079951 | 2016 | k->realize = virtio_pci_realize; |
085bccb7 FK |
2017 | k->exit = virtio_pci_exit; |
2018 | k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; | |
2019 | k->revision = VIRTIO_PCI_ABI_VERSION; | |
2020 | k->class_id = PCI_CLASS_OTHERS; | |
bf853881 PMD |
2021 | device_class_set_parent_realize(dc, virtio_pci_dc_realize, |
2022 | &vpciklass->parent_dc_realize); | |
59ccd20a | 2023 | dc->reset = virtio_pci_reset; |
085bccb7 FK |
2024 | } |
2025 | ||
2026 | static const TypeInfo virtio_pci_info = { | |
2027 | .name = TYPE_VIRTIO_PCI, | |
2028 | .parent = TYPE_PCI_DEVICE, | |
2029 | .instance_size = sizeof(VirtIOPCIProxy), | |
2030 | .class_init = virtio_pci_class_init, | |
2031 | .class_size = sizeof(VirtioPCIClass), | |
2032 | .abstract = true, | |
2033 | }; | |
2034 | ||
a4ee4c8b EH |
2035 | static Property virtio_pci_generic_properties[] = { |
2036 | DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, | |
2037 | ON_OFF_AUTO_AUTO), | |
2038 | DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), | |
2039 | DEFINE_PROP_END_OF_LIST(), | |
2040 | }; | |
2041 | ||
2042 | static void virtio_pci_base_class_init(ObjectClass *klass, void *data) | |
2043 | { | |
2044 | const VirtioPCIDeviceTypeInfo *t = data; | |
2045 | if (t->class_init) { | |
2046 | t->class_init(klass, NULL); | |
2047 | } | |
2048 | } | |
2049 | ||
2050 | static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) | |
2051 | { | |
2052 | DeviceClass *dc = DEVICE_CLASS(klass); | |
2053 | ||
4f67d30b | 2054 | device_class_set_props(dc, virtio_pci_generic_properties); |
a4ee4c8b EH |
2055 | } |
2056 | ||
a4ee4c8b EH |
2057 | static void virtio_pci_transitional_instance_init(Object *obj) |
2058 | { | |
2059 | VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); | |
2060 | ||
2061 | proxy->disable_legacy = ON_OFF_AUTO_OFF; | |
2062 | proxy->disable_modern = false; | |
2063 | } | |
2064 | ||
2065 | static void virtio_pci_non_transitional_instance_init(Object *obj) | |
2066 | { | |
2067 | VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); | |
2068 | ||
2069 | proxy->disable_legacy = ON_OFF_AUTO_ON; | |
2070 | proxy->disable_modern = false; | |
2071 | } | |
2072 | ||
2073 | void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) | |
2074 | { | |
683c1d89 | 2075 | char *base_name = NULL; |
a4ee4c8b EH |
2076 | TypeInfo base_type_info = { |
2077 | .name = t->base_name, | |
2078 | .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, | |
2079 | .instance_size = t->instance_size, | |
2080 | .instance_init = t->instance_init, | |
8ea90ee6 | 2081 | .class_size = t->class_size, |
a4ee4c8b | 2082 | .abstract = true, |
1e33b513 | 2083 | .interfaces = t->interfaces, |
a4ee4c8b EH |
2084 | }; |
2085 | TypeInfo generic_type_info = { | |
2086 | .name = t->generic_name, | |
2087 | .parent = base_type_info.name, | |
2088 | .class_init = virtio_pci_generic_class_init, | |
2089 | .interfaces = (InterfaceInfo[]) { | |
2090 | { INTERFACE_PCIE_DEVICE }, | |
2091 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
2092 | { } | |
2093 | }, | |
2094 | }; | |
2095 | ||
2096 | if (!base_type_info.name) { | |
2097 | /* No base type -> register a single generic device type */ | |
683c1d89 MAL |
2098 | /* use intermediate %s-base-type to add generic device props */ |
2099 | base_name = g_strdup_printf("%s-base-type", t->generic_name); | |
2100 | base_type_info.name = base_name; | |
2101 | base_type_info.class_init = virtio_pci_generic_class_init; | |
2102 | ||
2103 | generic_type_info.parent = base_name; | |
2104 | generic_type_info.class_init = virtio_pci_base_class_init; | |
2105 | generic_type_info.class_data = (void *)t; | |
2106 | ||
a4ee4c8b EH |
2107 | assert(!t->non_transitional_name); |
2108 | assert(!t->transitional_name); | |
683c1d89 MAL |
2109 | } else { |
2110 | base_type_info.class_init = virtio_pci_base_class_init; | |
2111 | base_type_info.class_data = (void *)t; | |
a4ee4c8b EH |
2112 | } |
2113 | ||
2114 | type_register(&base_type_info); | |
2115 | if (generic_type_info.name) { | |
2116 | type_register(&generic_type_info); | |
2117 | } | |
2118 | ||
2119 | if (t->non_transitional_name) { | |
2120 | const TypeInfo non_transitional_type_info = { | |
2121 | .name = t->non_transitional_name, | |
2122 | .parent = base_type_info.name, | |
2123 | .instance_init = virtio_pci_non_transitional_instance_init, | |
2124 | .interfaces = (InterfaceInfo[]) { | |
2125 | { INTERFACE_PCIE_DEVICE }, | |
2126 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
2127 | { } | |
2128 | }, | |
2129 | }; | |
2130 | type_register(&non_transitional_type_info); | |
2131 | } | |
2132 | ||
2133 | if (t->transitional_name) { | |
2134 | const TypeInfo transitional_type_info = { | |
2135 | .name = t->transitional_name, | |
2136 | .parent = base_type_info.name, | |
2137 | .instance_init = virtio_pci_transitional_instance_init, | |
2138 | .interfaces = (InterfaceInfo[]) { | |
2139 | /* | |
2140 | * Transitional virtio devices work only as Conventional PCI | |
2141 | * devices because they require PIO ports. | |
2142 | */ | |
2143 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
2144 | { } | |
2145 | }, | |
2146 | }; | |
2147 | type_register(&transitional_type_info); | |
2148 | } | |
683c1d89 | 2149 | g_free(base_name); |
a4ee4c8b EH |
2150 | } |
2151 | ||
1436f32a SH |
2152 | unsigned virtio_pci_optimal_num_queues(unsigned fixed_queues) |
2153 | { | |
2154 | /* | |
2155 | * 1:1 vq to vCPU mapping is ideal because the same vCPU that submitted | |
2156 | * virtqueue buffers can handle their completion. When a different vCPU | |
2157 | * handles completion it may need to IPI the vCPU that submitted the | |
2158 | * request and this adds overhead. | |
2159 | * | |
2160 | * Virtqueues consume guest RAM and MSI-X vectors. This is wasteful in | |
2161 | * guests with very many vCPUs and a device that is only used by a few | |
2162 | * vCPUs. Unfortunately optimizing that case requires manual pinning inside | |
2163 | * the guest, so those users might as well manually set the number of | |
2164 | * queues. There is no upper limit that can be applied automatically and | |
2165 | * doing so arbitrarily would result in a sudden performance drop once the | |
2166 | * threshold number of vCPUs is exceeded. | |
2167 | */ | |
2168 | unsigned num_queues = current_machine->smp.cpus; | |
2169 | ||
2170 | /* | |
2171 | * The maximum number of MSI-X vectors is PCI_MSIX_FLAGS_QSIZE + 1, but the | |
2172 | * config change interrupt and the fixed virtqueues must be taken into | |
2173 | * account too. | |
2174 | */ | |
2175 | num_queues = MIN(num_queues, PCI_MSIX_FLAGS_QSIZE - fixed_queues); | |
2176 | ||
2177 | /* | |
2178 | * There is a limit to how many virtqueues a device can have. | |
2179 | */ | |
2180 | return MIN(num_queues, VIRTIO_QUEUE_MAX - fixed_queues); | |
2181 | } | |
2182 | ||
0a2acf5e FK |
2183 | /* virtio-pci-bus */ |
2184 | ||
ac7af112 AF |
2185 | static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, |
2186 | VirtIOPCIProxy *dev) | |
0a2acf5e FK |
2187 | { |
2188 | DeviceState *qdev = DEVICE(dev); | |
f4dd69aa FK |
2189 | char virtio_bus_name[] = "virtio-bus"; |
2190 | ||
d637e1dc | 2191 | qbus_init(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, virtio_bus_name); |
0a2acf5e FK |
2192 | } |
2193 | ||
2194 | static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) | |
2195 | { | |
2196 | BusClass *bus_class = BUS_CLASS(klass); | |
2197 | VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); | |
2198 | bus_class->max_dev = 1; | |
2199 | k->notify = virtio_pci_notify; | |
2200 | k->save_config = virtio_pci_save_config; | |
2201 | k->load_config = virtio_pci_load_config; | |
2202 | k->save_queue = virtio_pci_save_queue; | |
2203 | k->load_queue = virtio_pci_load_queue; | |
a6df8adf JW |
2204 | k->save_extra_state = virtio_pci_save_extra_state; |
2205 | k->load_extra_state = virtio_pci_load_extra_state; | |
2206 | k->has_extra_state = virtio_pci_has_extra_state; | |
0a2acf5e | 2207 | k->query_guest_notifiers = virtio_pci_query_guest_notifiers; |
0a2acf5e | 2208 | k->set_guest_notifiers = virtio_pci_set_guest_notifiers; |
6f80e617 | 2209 | k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; |
0a2acf5e | 2210 | k->vmstate_change = virtio_pci_vmstate_change; |
d1b4259f | 2211 | k->pre_plugged = virtio_pci_pre_plugged; |
085bccb7 | 2212 | k->device_plugged = virtio_pci_device_plugged; |
06a13073 | 2213 | k->device_unplugged = virtio_pci_device_unplugged; |
e0d686bf | 2214 | k->query_nvectors = virtio_pci_query_nvectors; |
8e93cef1 | 2215 | k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; |
9f06e71a | 2216 | k->ioeventfd_assign = virtio_pci_ioeventfd_assign; |
8607f5c3 | 2217 | k->get_dma_as = virtio_pci_get_dma_as; |
3d1e5d86 | 2218 | k->iommu_enabled = virtio_pci_iommu_enabled; |
f19bcdfe | 2219 | k->queue_enabled = virtio_pci_queue_enabled; |
0a2acf5e FK |
2220 | } |
2221 | ||
2222 | static const TypeInfo virtio_pci_bus_info = { | |
2223 | .name = TYPE_VIRTIO_PCI_BUS, | |
2224 | .parent = TYPE_VIRTIO_BUS, | |
2225 | .instance_size = sizeof(VirtioPCIBusState), | |
74ded8b4 | 2226 | .class_size = sizeof(VirtioPCIBusClass), |
0a2acf5e FK |
2227 | .class_init = virtio_pci_bus_class_init, |
2228 | }; | |
2229 | ||
83f7d43a | 2230 | static void virtio_pci_register_types(void) |
53c25cea | 2231 | { |
a4ee4c8b EH |
2232 | /* Base types: */ |
2233 | type_register_static(&virtio_pci_bus_info); | |
2234 | type_register_static(&virtio_pci_info); | |
53c25cea PB |
2235 | } |
2236 | ||
83f7d43a | 2237 | type_init(virtio_pci_register_types) |
271458d7 | 2238 |