]>
Commit | Line | Data |
---|---|---|
53c25cea PB |
1 | /* |
2 | * Virtio PCI Bindings | |
3 | * | |
4 | * Copyright IBM, Corp. 2007 | |
5 | * Copyright (c) 2009 CodeSourcery | |
6 | * | |
7 | * Authors: | |
8 | * Anthony Liguori <aliguori@us.ibm.com> | |
9 | * Paul Brook <paul@codesourcery.com> | |
10 | * | |
11 | * This work is licensed under the terms of the GNU GPL, version 2. See | |
12 | * the COPYING file in the top-level directory. | |
13 | * | |
6b620ca3 PB |
14 | * Contributions after 2012-01-13 are licensed under the terms of the |
15 | * GNU GPL, version 2 or (at your option) any later version. | |
53c25cea PB |
16 | */ |
17 | ||
9b8bfe21 | 18 | #include "qemu/osdep.h" |
53c25cea | 19 | |
cbbe4f50 | 20 | #include "standard-headers/linux/virtio_pci.h" |
0d09e41a | 21 | #include "hw/virtio/virtio.h" |
83c9f4ca | 22 | #include "hw/pci/pci.h" |
da34e65c | 23 | #include "qapi/error.h" |
1de7afc9 | 24 | #include "qemu/error-report.h" |
83c9f4ca PB |
25 | #include "hw/pci/msi.h" |
26 | #include "hw/pci/msix.h" | |
27 | #include "hw/loader.h" | |
9c17d615 | 28 | #include "sysemu/kvm.h" |
47b43a1f | 29 | #include "virtio-pci.h" |
1de7afc9 | 30 | #include "qemu/range.h" |
0d09e41a | 31 | #include "hw/virtio/virtio-bus.h" |
24a6e7f4 | 32 | #include "qapi/visitor.h" |
53c25cea | 33 | |
cbbe4f50 | 34 | #define VIRTIO_PCI_REGION_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_present(dev)) |
aba800a3 | 35 | |
c17bef33 MT |
36 | #undef VIRTIO_PCI_CONFIG |
37 | ||
aba800a3 MT |
38 | /* The remaining space is defined by each driver as the per-driver |
39 | * configuration space */ | |
cbbe4f50 | 40 | #define VIRTIO_PCI_CONFIG_SIZE(dev) VIRTIO_PCI_CONFIG_OFF(msix_enabled(dev)) |
53c25cea | 41 | |
ac7af112 AF |
42 | static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, |
43 | VirtIOPCIProxy *dev); | |
75fd6f13 | 44 | static void virtio_pci_reset(DeviceState *qdev); |
d51fcfac | 45 | |
53c25cea | 46 | /* virtio device */ |
d2a0ccc6 MT |
47 | /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */ |
48 | static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d) | |
49 | { | |
50 | return container_of(d, VirtIOPCIProxy, pci_dev.qdev); | |
51 | } | |
53c25cea | 52 | |
d2a0ccc6 MT |
53 | /* DeviceState to VirtIOPCIProxy. Note: used on datapath, |
54 | * be careful and test performance if you change this. | |
55 | */ | |
56 | static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d) | |
53c25cea | 57 | { |
d2a0ccc6 MT |
58 | return container_of(d, VirtIOPCIProxy, pci_dev.qdev); |
59 | } | |
60 | ||
61 | static void virtio_pci_notify(DeviceState *d, uint16_t vector) | |
62 | { | |
63 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d); | |
a3fc66d9 | 64 | |
aba800a3 MT |
65 | if (msix_enabled(&proxy->pci_dev)) |
66 | msix_notify(&proxy->pci_dev, vector); | |
a3fc66d9 PB |
67 | else { |
68 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
0687c37c | 69 | pci_set_irq(&proxy->pci_dev, atomic_read(&vdev->isr) & 1); |
a3fc66d9 | 70 | } |
53c25cea PB |
71 | } |
72 | ||
d2a0ccc6 | 73 | static void virtio_pci_save_config(DeviceState *d, QEMUFile *f) |
ff24bd58 | 74 | { |
d2a0ccc6 | 75 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
76 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
77 | ||
ff24bd58 MT |
78 | pci_device_save(&proxy->pci_dev, f); |
79 | msix_save(&proxy->pci_dev, f); | |
80 | if (msix_present(&proxy->pci_dev)) | |
a3fc66d9 | 81 | qemu_put_be16(f, vdev->config_vector); |
ff24bd58 MT |
82 | } |
83 | ||
b81b948e DDAG |
84 | static const VMStateDescription vmstate_virtio_pci_modern_queue_state = { |
85 | .name = "virtio_pci/modern_queue_state", | |
86 | .version_id = 1, | |
87 | .minimum_version_id = 1, | |
88 | .fields = (VMStateField[]) { | |
89 | VMSTATE_UINT16(num, VirtIOPCIQueue), | |
90 | VMSTATE_UNUSED(1), /* enabled was stored as be16 */ | |
91 | VMSTATE_BOOL(enabled, VirtIOPCIQueue), | |
92 | VMSTATE_UINT32_ARRAY(desc, VirtIOPCIQueue, 2), | |
93 | VMSTATE_UINT32_ARRAY(avail, VirtIOPCIQueue, 2), | |
94 | VMSTATE_UINT32_ARRAY(used, VirtIOPCIQueue, 2), | |
95 | VMSTATE_END_OF_LIST() | |
a6df8adf | 96 | } |
a6df8adf JW |
97 | }; |
98 | ||
99 | static bool virtio_pci_modern_state_needed(void *opaque) | |
100 | { | |
101 | VirtIOPCIProxy *proxy = opaque; | |
102 | ||
9a4c0e22 | 103 | return virtio_pci_modern(proxy); |
a6df8adf JW |
104 | } |
105 | ||
b81b948e | 106 | static const VMStateDescription vmstate_virtio_pci_modern_state_sub = { |
a6df8adf JW |
107 | .name = "virtio_pci/modern_state", |
108 | .version_id = 1, | |
109 | .minimum_version_id = 1, | |
110 | .needed = &virtio_pci_modern_state_needed, | |
111 | .fields = (VMStateField[]) { | |
b81b948e DDAG |
112 | VMSTATE_UINT32(dfselect, VirtIOPCIProxy), |
113 | VMSTATE_UINT32(gfselect, VirtIOPCIProxy), | |
114 | VMSTATE_UINT32_ARRAY(guest_features, VirtIOPCIProxy, 2), | |
115 | VMSTATE_STRUCT_ARRAY(vqs, VirtIOPCIProxy, VIRTIO_QUEUE_MAX, 0, | |
116 | vmstate_virtio_pci_modern_queue_state, | |
117 | VirtIOPCIQueue), | |
a6df8adf JW |
118 | VMSTATE_END_OF_LIST() |
119 | } | |
120 | }; | |
121 | ||
122 | static const VMStateDescription vmstate_virtio_pci = { | |
123 | .name = "virtio_pci", | |
124 | .version_id = 1, | |
125 | .minimum_version_id = 1, | |
126 | .minimum_version_id_old = 1, | |
127 | .fields = (VMStateField[]) { | |
128 | VMSTATE_END_OF_LIST() | |
129 | }, | |
130 | .subsections = (const VMStateDescription*[]) { | |
b81b948e | 131 | &vmstate_virtio_pci_modern_state_sub, |
a6df8adf JW |
132 | NULL |
133 | } | |
134 | }; | |
135 | ||
b81b948e DDAG |
136 | static bool virtio_pci_has_extra_state(DeviceState *d) |
137 | { | |
138 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
139 | ||
140 | return proxy->flags & VIRTIO_PCI_FLAG_MIGRATE_EXTRA; | |
141 | } | |
142 | ||
a6df8adf JW |
143 | static void virtio_pci_save_extra_state(DeviceState *d, QEMUFile *f) |
144 | { | |
145 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
146 | ||
147 | vmstate_save_state(f, &vmstate_virtio_pci, proxy, NULL); | |
148 | } | |
149 | ||
150 | static int virtio_pci_load_extra_state(DeviceState *d, QEMUFile *f) | |
151 | { | |
152 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
153 | ||
154 | return vmstate_load_state(f, &vmstate_virtio_pci, proxy, 1); | |
155 | } | |
156 | ||
d2a0ccc6 | 157 | static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f) |
ff24bd58 | 158 | { |
d2a0ccc6 | 159 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
160 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
161 | ||
ff24bd58 | 162 | if (msix_present(&proxy->pci_dev)) |
a3fc66d9 | 163 | qemu_put_be16(f, virtio_queue_vector(vdev, n)); |
ff24bd58 MT |
164 | } |
165 | ||
d2a0ccc6 | 166 | static int virtio_pci_load_config(DeviceState *d, QEMUFile *f) |
ff24bd58 | 167 | { |
d2a0ccc6 | 168 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
169 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
170 | ||
ff24bd58 MT |
171 | int ret; |
172 | ret = pci_device_load(&proxy->pci_dev, f); | |
e6da7680 | 173 | if (ret) { |
ff24bd58 | 174 | return ret; |
e6da7680 | 175 | } |
3cac001e | 176 | msix_unuse_all_vectors(&proxy->pci_dev); |
ff24bd58 | 177 | msix_load(&proxy->pci_dev, f); |
e6da7680 | 178 | if (msix_present(&proxy->pci_dev)) { |
a3fc66d9 | 179 | qemu_get_be16s(f, &vdev->config_vector); |
e6da7680 | 180 | } else { |
a3fc66d9 | 181 | vdev->config_vector = VIRTIO_NO_VECTOR; |
e6da7680 | 182 | } |
a3fc66d9 PB |
183 | if (vdev->config_vector != VIRTIO_NO_VECTOR) { |
184 | return msix_vector_use(&proxy->pci_dev, vdev->config_vector); | |
e6da7680 | 185 | } |
ff24bd58 MT |
186 | return 0; |
187 | } | |
188 | ||
d2a0ccc6 | 189 | static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f) |
ff24bd58 | 190 | { |
d2a0ccc6 | 191 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
192 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
193 | ||
ff24bd58 | 194 | uint16_t vector; |
e6da7680 MT |
195 | if (msix_present(&proxy->pci_dev)) { |
196 | qemu_get_be16s(f, &vector); | |
197 | } else { | |
198 | vector = VIRTIO_NO_VECTOR; | |
199 | } | |
a3fc66d9 | 200 | virtio_queue_set_vector(vdev, n, vector); |
e6da7680 MT |
201 | if (vector != VIRTIO_NO_VECTOR) { |
202 | return msix_vector_use(&proxy->pci_dev, vector); | |
203 | } | |
a6df8adf | 204 | |
ff24bd58 MT |
205 | return 0; |
206 | } | |
207 | ||
8e93cef1 | 208 | static bool virtio_pci_ioeventfd_enabled(DeviceState *d) |
9f06e71a CH |
209 | { |
210 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
211 | ||
8e93cef1 | 212 | return (proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) != 0; |
9f06e71a CH |
213 | } |
214 | ||
975acc0a JW |
215 | #define QEMU_VIRTIO_PCI_QUEUE_MEM_MULT 0x1000 |
216 | ||
d9997d89 MA |
217 | static inline int virtio_pci_queue_mem_mult(struct VirtIOPCIProxy *proxy) |
218 | { | |
219 | return (proxy->flags & VIRTIO_PCI_FLAG_PAGE_PER_VQ) ? | |
220 | QEMU_VIRTIO_PCI_QUEUE_MEM_MULT : 4; | |
221 | } | |
222 | ||
9f06e71a CH |
223 | static int virtio_pci_ioeventfd_assign(DeviceState *d, EventNotifier *notifier, |
224 | int n, bool assign) | |
25db9ebe | 225 | { |
9f06e71a | 226 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
227 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
228 | VirtQueue *vq = virtio_get_queue(vdev, n); | |
9a4c0e22 MA |
229 | bool legacy = virtio_pci_legacy(proxy); |
230 | bool modern = virtio_pci_modern(proxy); | |
bc85ccfd | 231 | bool fast_mmio = kvm_ioeventfd_any_length_enabled(); |
9824d2a3 | 232 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
588255ad | 233 | MemoryRegion *modern_mr = &proxy->notify.mr; |
9824d2a3 | 234 | MemoryRegion *modern_notify_mr = &proxy->notify_pio.mr; |
975acc0a | 235 | MemoryRegion *legacy_mr = &proxy->bar; |
d9997d89 | 236 | hwaddr modern_addr = virtio_pci_queue_mem_mult(proxy) * |
975acc0a JW |
237 | virtio_get_queue_index(vq); |
238 | hwaddr legacy_addr = VIRTIO_PCI_QUEUE_NOTIFY; | |
da146d0a | 239 | |
25db9ebe | 240 | if (assign) { |
975acc0a | 241 | if (modern) { |
bc85ccfd JW |
242 | if (fast_mmio) { |
243 | memory_region_add_eventfd(modern_mr, modern_addr, 0, | |
244 | false, n, notifier); | |
245 | } else { | |
246 | memory_region_add_eventfd(modern_mr, modern_addr, 2, | |
247 | false, n, notifier); | |
248 | } | |
9824d2a3 JW |
249 | if (modern_pio) { |
250 | memory_region_add_eventfd(modern_notify_mr, 0, 2, | |
251 | true, n, notifier); | |
252 | } | |
975acc0a JW |
253 | } |
254 | if (legacy) { | |
255 | memory_region_add_eventfd(legacy_mr, legacy_addr, 2, | |
256 | true, n, notifier); | |
257 | } | |
25db9ebe | 258 | } else { |
975acc0a | 259 | if (modern) { |
bc85ccfd JW |
260 | if (fast_mmio) { |
261 | memory_region_del_eventfd(modern_mr, modern_addr, 0, | |
262 | false, n, notifier); | |
263 | } else { | |
264 | memory_region_del_eventfd(modern_mr, modern_addr, 2, | |
265 | false, n, notifier); | |
266 | } | |
9824d2a3 JW |
267 | if (modern_pio) { |
268 | memory_region_del_eventfd(modern_notify_mr, 0, 2, | |
269 | true, n, notifier); | |
270 | } | |
975acc0a JW |
271 | } |
272 | if (legacy) { | |
273 | memory_region_del_eventfd(legacy_mr, legacy_addr, 2, | |
274 | true, n, notifier); | |
275 | } | |
25db9ebe | 276 | } |
9f06e71a | 277 | return 0; |
25db9ebe SH |
278 | } |
279 | ||
b36e3914 | 280 | static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy) |
25db9ebe | 281 | { |
9f06e71a | 282 | virtio_bus_start_ioeventfd(&proxy->bus); |
25db9ebe SH |
283 | } |
284 | ||
b36e3914 | 285 | static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy) |
25db9ebe | 286 | { |
9f06e71a | 287 | virtio_bus_stop_ioeventfd(&proxy->bus); |
25db9ebe SH |
288 | } |
289 | ||
53c25cea PB |
290 | static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val) |
291 | { | |
292 | VirtIOPCIProxy *proxy = opaque; | |
a3fc66d9 | 293 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
a8170e5e | 294 | hwaddr pa; |
53c25cea | 295 | |
53c25cea PB |
296 | switch (addr) { |
297 | case VIRTIO_PCI_GUEST_FEATURES: | |
181103cd FK |
298 | /* Guest does not negotiate properly? We have to assume nothing. */ |
299 | if (val & (1 << VIRTIO_F_BAD_FEATURE)) { | |
300 | val = virtio_bus_get_vdev_bad_features(&proxy->bus); | |
301 | } | |
ad0c9332 | 302 | virtio_set_features(vdev, val); |
53c25cea PB |
303 | break; |
304 | case VIRTIO_PCI_QUEUE_PFN: | |
a8170e5e | 305 | pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT; |
1b8e9b27 | 306 | if (pa == 0) { |
75fd6f13 | 307 | virtio_pci_reset(DEVICE(proxy)); |
1b8e9b27 | 308 | } |
7055e687 MT |
309 | else |
310 | virtio_queue_set_addr(vdev, vdev->queue_sel, pa); | |
53c25cea PB |
311 | break; |
312 | case VIRTIO_PCI_QUEUE_SEL: | |
87b3bd1c | 313 | if (val < VIRTIO_QUEUE_MAX) |
53c25cea PB |
314 | vdev->queue_sel = val; |
315 | break; | |
316 | case VIRTIO_PCI_QUEUE_NOTIFY: | |
87b3bd1c | 317 | if (val < VIRTIO_QUEUE_MAX) { |
7157e2e2 SH |
318 | virtio_queue_notify(vdev, val); |
319 | } | |
53c25cea PB |
320 | break; |
321 | case VIRTIO_PCI_STATUS: | |
25db9ebe SH |
322 | if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { |
323 | virtio_pci_stop_ioeventfd(proxy); | |
324 | } | |
325 | ||
3e607cb5 | 326 | virtio_set_status(vdev, val & 0xFF); |
25db9ebe SH |
327 | |
328 | if (val & VIRTIO_CONFIG_S_DRIVER_OK) { | |
329 | virtio_pci_start_ioeventfd(proxy); | |
330 | } | |
331 | ||
1b8e9b27 | 332 | if (vdev->status == 0) { |
75fd6f13 | 333 | virtio_pci_reset(DEVICE(proxy)); |
1b8e9b27 | 334 | } |
c81131db | 335 | |
e43c0b2e MT |
336 | /* Linux before 2.6.34 drives the device without enabling |
337 | the PCI device bus master bit. Enable it automatically | |
338 | for the guest. This is a PCI spec violation but so is | |
339 | initiating DMA with bus master bit clear. */ | |
340 | if (val == (VIRTIO_CONFIG_S_ACKNOWLEDGE | VIRTIO_CONFIG_S_DRIVER)) { | |
341 | pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, | |
342 | proxy->pci_dev.config[PCI_COMMAND] | | |
343 | PCI_COMMAND_MASTER, 1); | |
344 | } | |
53c25cea | 345 | break; |
aba800a3 MT |
346 | case VIRTIO_MSI_CONFIG_VECTOR: |
347 | msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); | |
348 | /* Make it possible for guest to discover an error took place. */ | |
349 | if (msix_vector_use(&proxy->pci_dev, val) < 0) | |
350 | val = VIRTIO_NO_VECTOR; | |
351 | vdev->config_vector = val; | |
352 | break; | |
353 | case VIRTIO_MSI_QUEUE_VECTOR: | |
354 | msix_vector_unuse(&proxy->pci_dev, | |
355 | virtio_queue_vector(vdev, vdev->queue_sel)); | |
356 | /* Make it possible for guest to discover an error took place. */ | |
357 | if (msix_vector_use(&proxy->pci_dev, val) < 0) | |
358 | val = VIRTIO_NO_VECTOR; | |
359 | virtio_queue_set_vector(vdev, vdev->queue_sel, val); | |
360 | break; | |
361 | default: | |
4e02d460 SH |
362 | error_report("%s: unexpected address 0x%x value 0x%x", |
363 | __func__, addr, val); | |
aba800a3 | 364 | break; |
53c25cea PB |
365 | } |
366 | } | |
367 | ||
aba800a3 | 368 | static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr) |
53c25cea | 369 | { |
a3fc66d9 | 370 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
53c25cea PB |
371 | uint32_t ret = 0xFFFFFFFF; |
372 | ||
53c25cea PB |
373 | switch (addr) { |
374 | case VIRTIO_PCI_HOST_FEATURES: | |
6b8f1020 | 375 | ret = vdev->host_features; |
53c25cea PB |
376 | break; |
377 | case VIRTIO_PCI_GUEST_FEATURES: | |
704a76fc | 378 | ret = vdev->guest_features; |
53c25cea PB |
379 | break; |
380 | case VIRTIO_PCI_QUEUE_PFN: | |
381 | ret = virtio_queue_get_addr(vdev, vdev->queue_sel) | |
382 | >> VIRTIO_PCI_QUEUE_ADDR_SHIFT; | |
383 | break; | |
384 | case VIRTIO_PCI_QUEUE_NUM: | |
385 | ret = virtio_queue_get_num(vdev, vdev->queue_sel); | |
386 | break; | |
387 | case VIRTIO_PCI_QUEUE_SEL: | |
388 | ret = vdev->queue_sel; | |
389 | break; | |
390 | case VIRTIO_PCI_STATUS: | |
391 | ret = vdev->status; | |
392 | break; | |
393 | case VIRTIO_PCI_ISR: | |
394 | /* reading from the ISR also clears it. */ | |
0687c37c | 395 | ret = atomic_xchg(&vdev->isr, 0); |
9e64f8a3 | 396 | pci_irq_deassert(&proxy->pci_dev); |
53c25cea | 397 | break; |
aba800a3 MT |
398 | case VIRTIO_MSI_CONFIG_VECTOR: |
399 | ret = vdev->config_vector; | |
400 | break; | |
401 | case VIRTIO_MSI_QUEUE_VECTOR: | |
402 | ret = virtio_queue_vector(vdev, vdev->queue_sel); | |
403 | break; | |
53c25cea PB |
404 | default: |
405 | break; | |
406 | } | |
407 | ||
408 | return ret; | |
409 | } | |
410 | ||
df6db5b3 AG |
411 | static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr, |
412 | unsigned size) | |
53c25cea PB |
413 | { |
414 | VirtIOPCIProxy *proxy = opaque; | |
a3fc66d9 | 415 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
cbbe4f50 | 416 | uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); |
df6db5b3 | 417 | uint64_t val = 0; |
aba800a3 | 418 | if (addr < config) { |
df6db5b3 | 419 | return virtio_ioport_read(proxy, addr); |
aba800a3 MT |
420 | } |
421 | addr -= config; | |
53c25cea | 422 | |
df6db5b3 AG |
423 | switch (size) { |
424 | case 1: | |
a3fc66d9 | 425 | val = virtio_config_readb(vdev, addr); |
df6db5b3 AG |
426 | break; |
427 | case 2: | |
a3fc66d9 | 428 | val = virtio_config_readw(vdev, addr); |
616a6552 | 429 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
430 | val = bswap16(val); |
431 | } | |
df6db5b3 AG |
432 | break; |
433 | case 4: | |
a3fc66d9 | 434 | val = virtio_config_readl(vdev, addr); |
616a6552 | 435 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
436 | val = bswap32(val); |
437 | } | |
df6db5b3 | 438 | break; |
82afa586 | 439 | } |
df6db5b3 | 440 | return val; |
53c25cea PB |
441 | } |
442 | ||
df6db5b3 AG |
443 | static void virtio_pci_config_write(void *opaque, hwaddr addr, |
444 | uint64_t val, unsigned size) | |
53c25cea PB |
445 | { |
446 | VirtIOPCIProxy *proxy = opaque; | |
cbbe4f50 | 447 | uint32_t config = VIRTIO_PCI_CONFIG_SIZE(&proxy->pci_dev); |
a3fc66d9 | 448 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
aba800a3 MT |
449 | if (addr < config) { |
450 | virtio_ioport_write(proxy, addr, val); | |
451 | return; | |
452 | } | |
453 | addr -= config; | |
df6db5b3 AG |
454 | /* |
455 | * Virtio-PCI is odd. Ioports are LE but config space is target native | |
456 | * endian. | |
457 | */ | |
458 | switch (size) { | |
459 | case 1: | |
a3fc66d9 | 460 | virtio_config_writeb(vdev, addr, val); |
df6db5b3 AG |
461 | break; |
462 | case 2: | |
616a6552 | 463 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
464 | val = bswap16(val); |
465 | } | |
a3fc66d9 | 466 | virtio_config_writew(vdev, addr, val); |
df6db5b3 AG |
467 | break; |
468 | case 4: | |
616a6552 | 469 | if (virtio_is_big_endian(vdev)) { |
8e4a424b BS |
470 | val = bswap32(val); |
471 | } | |
a3fc66d9 | 472 | virtio_config_writel(vdev, addr, val); |
df6db5b3 | 473 | break; |
82afa586 | 474 | } |
53c25cea PB |
475 | } |
476 | ||
da146d0a | 477 | static const MemoryRegionOps virtio_pci_config_ops = { |
df6db5b3 AG |
478 | .read = virtio_pci_config_read, |
479 | .write = virtio_pci_config_write, | |
480 | .impl = { | |
481 | .min_access_size = 1, | |
482 | .max_access_size = 4, | |
483 | }, | |
8e4a424b | 484 | .endianness = DEVICE_LITTLE_ENDIAN, |
da146d0a | 485 | }; |
aba800a3 | 486 | |
a93c8d82 AK |
487 | static MemoryRegion *virtio_address_space_lookup(VirtIOPCIProxy *proxy, |
488 | hwaddr *off, int len) | |
489 | { | |
490 | int i; | |
491 | VirtIOPCIRegion *reg; | |
492 | ||
493 | for (i = 0; i < ARRAY_SIZE(proxy->regs); ++i) { | |
494 | reg = &proxy->regs[i]; | |
495 | if (*off >= reg->offset && | |
496 | *off + len <= reg->offset + reg->size) { | |
497 | *off -= reg->offset; | |
498 | return ®->mr; | |
499 | } | |
500 | } | |
501 | ||
502 | return NULL; | |
503 | } | |
504 | ||
1e40356c MT |
505 | /* Below are generic functions to do memcpy from/to an address space, |
506 | * without byteswaps, with input validation. | |
507 | * | |
508 | * As regular address_space_* APIs all do some kind of byteswap at least for | |
509 | * some host/target combinations, we are forced to explicitly convert to a | |
510 | * known-endianness integer value. | |
511 | * It doesn't really matter which endian format to go through, so the code | |
512 | * below selects the endian that causes the least amount of work on the given | |
513 | * host. | |
514 | * | |
515 | * Note: host pointer must be aligned. | |
516 | */ | |
517 | static | |
a93c8d82 | 518 | void virtio_address_space_write(VirtIOPCIProxy *proxy, hwaddr addr, |
1e40356c MT |
519 | const uint8_t *buf, int len) |
520 | { | |
a93c8d82 AK |
521 | uint64_t val; |
522 | MemoryRegion *mr; | |
1e40356c MT |
523 | |
524 | /* address_space_* APIs assume an aligned address. | |
525 | * As address is under guest control, handle illegal values. | |
526 | */ | |
527 | addr &= ~(len - 1); | |
528 | ||
a93c8d82 AK |
529 | mr = virtio_address_space_lookup(proxy, &addr, len); |
530 | if (!mr) { | |
531 | return; | |
532 | } | |
533 | ||
1e40356c MT |
534 | /* Make sure caller aligned buf properly */ |
535 | assert(!(((uintptr_t)buf) & (len - 1))); | |
536 | ||
537 | switch (len) { | |
538 | case 1: | |
539 | val = pci_get_byte(buf); | |
1e40356c MT |
540 | break; |
541 | case 2: | |
a93c8d82 | 542 | val = cpu_to_le16(pci_get_word(buf)); |
1e40356c MT |
543 | break; |
544 | case 4: | |
a93c8d82 | 545 | val = cpu_to_le32(pci_get_long(buf)); |
1e40356c MT |
546 | break; |
547 | default: | |
548 | /* As length is under guest control, handle illegal values. */ | |
a93c8d82 | 549 | return; |
1e40356c | 550 | } |
a93c8d82 | 551 | memory_region_dispatch_write(mr, addr, val, len, MEMTXATTRS_UNSPECIFIED); |
1e40356c MT |
552 | } |
553 | ||
554 | static void | |
a93c8d82 AK |
555 | virtio_address_space_read(VirtIOPCIProxy *proxy, hwaddr addr, |
556 | uint8_t *buf, int len) | |
1e40356c | 557 | { |
a93c8d82 AK |
558 | uint64_t val; |
559 | MemoryRegion *mr; | |
1e40356c MT |
560 | |
561 | /* address_space_* APIs assume an aligned address. | |
562 | * As address is under guest control, handle illegal values. | |
563 | */ | |
564 | addr &= ~(len - 1); | |
565 | ||
a93c8d82 AK |
566 | mr = virtio_address_space_lookup(proxy, &addr, len); |
567 | if (!mr) { | |
568 | return; | |
569 | } | |
570 | ||
1e40356c MT |
571 | /* Make sure caller aligned buf properly */ |
572 | assert(!(((uintptr_t)buf) & (len - 1))); | |
573 | ||
a93c8d82 | 574 | memory_region_dispatch_read(mr, addr, &val, len, MEMTXATTRS_UNSPECIFIED); |
1e40356c MT |
575 | switch (len) { |
576 | case 1: | |
1e40356c MT |
577 | pci_set_byte(buf, val); |
578 | break; | |
579 | case 2: | |
a93c8d82 | 580 | pci_set_word(buf, le16_to_cpu(val)); |
1e40356c MT |
581 | break; |
582 | case 4: | |
a93c8d82 | 583 | pci_set_long(buf, le32_to_cpu(val)); |
1e40356c MT |
584 | break; |
585 | default: | |
586 | /* As length is under guest control, handle illegal values. */ | |
587 | break; | |
588 | } | |
589 | } | |
590 | ||
aba800a3 MT |
591 | static void virtio_write_config(PCIDevice *pci_dev, uint32_t address, |
592 | uint32_t val, int len) | |
593 | { | |
3f262b26 | 594 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
a3fc66d9 | 595 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
ada434cd | 596 | struct virtio_pci_cfg_cap *cfg; |
ed757e14 | 597 | |
1129714f MT |
598 | pci_default_write_config(pci_dev, address, val, len); |
599 | ||
600 | if (range_covers_byte(address, len, PCI_COMMAND) && | |
68a27b20 | 601 | !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { |
1129714f | 602 | virtio_pci_stop_ioeventfd(proxy); |
45363e46 | 603 | virtio_set_status(vdev, vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK); |
ed757e14 | 604 | } |
ada434cd MT |
605 | |
606 | if (proxy->config_cap && | |
607 | ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, | |
608 | pci_cfg_data), | |
609 | sizeof cfg->pci_cfg_data)) { | |
610 | uint32_t off; | |
611 | uint32_t len; | |
612 | ||
613 | cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); | |
614 | off = le32_to_cpu(cfg->cap.offset); | |
615 | len = le32_to_cpu(cfg->cap.length); | |
616 | ||
2a639123 MT |
617 | if (len == 1 || len == 2 || len == 4) { |
618 | assert(len <= sizeof cfg->pci_cfg_data); | |
a93c8d82 | 619 | virtio_address_space_write(proxy, off, cfg->pci_cfg_data, len); |
ada434cd MT |
620 | } |
621 | } | |
622 | } | |
623 | ||
624 | static uint32_t virtio_read_config(PCIDevice *pci_dev, | |
625 | uint32_t address, int len) | |
626 | { | |
3f262b26 | 627 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
ada434cd MT |
628 | struct virtio_pci_cfg_cap *cfg; |
629 | ||
630 | if (proxy->config_cap && | |
631 | ranges_overlap(address, len, proxy->config_cap + offsetof(struct virtio_pci_cfg_cap, | |
632 | pci_cfg_data), | |
633 | sizeof cfg->pci_cfg_data)) { | |
634 | uint32_t off; | |
635 | uint32_t len; | |
636 | ||
637 | cfg = (void *)(proxy->pci_dev.config + proxy->config_cap); | |
638 | off = le32_to_cpu(cfg->cap.offset); | |
639 | len = le32_to_cpu(cfg->cap.length); | |
640 | ||
2a639123 MT |
641 | if (len == 1 || len == 2 || len == 4) { |
642 | assert(len <= sizeof cfg->pci_cfg_data); | |
a93c8d82 | 643 | virtio_address_space_read(proxy, off, cfg->pci_cfg_data, len); |
ada434cd MT |
644 | } |
645 | } | |
646 | ||
647 | return pci_default_read_config(pci_dev, address, len); | |
53c25cea PB |
648 | } |
649 | ||
7d37d351 JK |
650 | static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy, |
651 | unsigned int queue_no, | |
d1f6af6a | 652 | unsigned int vector) |
7d37d351 | 653 | { |
7d37d351 | 654 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; |
15b2bd18 | 655 | int ret; |
7d37d351 JK |
656 | |
657 | if (irqfd->users == 0) { | |
d1f6af6a | 658 | ret = kvm_irqchip_add_msi_route(kvm_state, vector, &proxy->pci_dev); |
7d37d351 JK |
659 | if (ret < 0) { |
660 | return ret; | |
661 | } | |
662 | irqfd->virq = ret; | |
663 | } | |
664 | irqfd->users++; | |
7d37d351 JK |
665 | return 0; |
666 | } | |
667 | ||
668 | static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy, | |
7d37d351 | 669 | unsigned int vector) |
774345f9 MT |
670 | { |
671 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; | |
672 | if (--irqfd->users == 0) { | |
673 | kvm_irqchip_release_virq(kvm_state, irqfd->virq); | |
674 | } | |
675 | } | |
676 | ||
f1d0f15a MT |
677 | static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy, |
678 | unsigned int queue_no, | |
679 | unsigned int vector) | |
680 | { | |
681 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; | |
a3fc66d9 PB |
682 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
683 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
f1d0f15a | 684 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); |
9be38598 | 685 | return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, irqfd->virq); |
f1d0f15a MT |
686 | } |
687 | ||
688 | static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy, | |
689 | unsigned int queue_no, | |
690 | unsigned int vector) | |
7d37d351 | 691 | { |
a3fc66d9 PB |
692 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
693 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
15b2bd18 | 694 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); |
7d37d351 | 695 | VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector]; |
15b2bd18 | 696 | int ret; |
7d37d351 | 697 | |
1c9b71a7 | 698 | ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n, irqfd->virq); |
7d37d351 | 699 | assert(ret == 0); |
f1d0f15a | 700 | } |
7d37d351 | 701 | |
774345f9 MT |
702 | static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs) |
703 | { | |
704 | PCIDevice *dev = &proxy->pci_dev; | |
a3fc66d9 | 705 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 706 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
774345f9 MT |
707 | unsigned int vector; |
708 | int ret, queue_no; | |
774345f9 MT |
709 | |
710 | for (queue_no = 0; queue_no < nvqs; queue_no++) { | |
711 | if (!virtio_queue_get_num(vdev, queue_no)) { | |
712 | break; | |
713 | } | |
714 | vector = virtio_queue_vector(vdev, queue_no); | |
715 | if (vector >= msix_nr_vectors_allocated(dev)) { | |
716 | continue; | |
717 | } | |
d1f6af6a | 718 | ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector); |
774345f9 MT |
719 | if (ret < 0) { |
720 | goto undo; | |
7d37d351 | 721 | } |
f1d0f15a MT |
722 | /* If guest supports masking, set up irqfd now. |
723 | * Otherwise, delay until unmasked in the frontend. | |
724 | */ | |
5669655a | 725 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
f1d0f15a MT |
726 | ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); |
727 | if (ret < 0) { | |
728 | kvm_virtio_pci_vq_vector_release(proxy, vector); | |
729 | goto undo; | |
730 | } | |
731 | } | |
7d37d351 | 732 | } |
7d37d351 | 733 | return 0; |
774345f9 MT |
734 | |
735 | undo: | |
736 | while (--queue_no >= 0) { | |
737 | vector = virtio_queue_vector(vdev, queue_no); | |
738 | if (vector >= msix_nr_vectors_allocated(dev)) { | |
739 | continue; | |
740 | } | |
5669655a | 741 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
e387f99e | 742 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 743 | } |
774345f9 MT |
744 | kvm_virtio_pci_vq_vector_release(proxy, vector); |
745 | } | |
746 | return ret; | |
7d37d351 JK |
747 | } |
748 | ||
774345f9 MT |
749 | static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs) |
750 | { | |
751 | PCIDevice *dev = &proxy->pci_dev; | |
a3fc66d9 | 752 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
774345f9 MT |
753 | unsigned int vector; |
754 | int queue_no; | |
181103cd | 755 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
774345f9 MT |
756 | |
757 | for (queue_no = 0; queue_no < nvqs; queue_no++) { | |
758 | if (!virtio_queue_get_num(vdev, queue_no)) { | |
759 | break; | |
760 | } | |
761 | vector = virtio_queue_vector(vdev, queue_no); | |
762 | if (vector >= msix_nr_vectors_allocated(dev)) { | |
763 | continue; | |
764 | } | |
f1d0f15a MT |
765 | /* If guest supports masking, clean up irqfd now. |
766 | * Otherwise, it was cleaned when masked in the frontend. | |
767 | */ | |
5669655a | 768 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
e387f99e | 769 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 770 | } |
774345f9 MT |
771 | kvm_virtio_pci_vq_vector_release(proxy, vector); |
772 | } | |
773 | } | |
774 | ||
a38b2c49 MT |
775 | static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy, |
776 | unsigned int queue_no, | |
777 | unsigned int vector, | |
778 | MSIMessage msg) | |
774345f9 | 779 | { |
a3fc66d9 PB |
780 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
781 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); | |
782 | VirtQueue *vq = virtio_get_queue(vdev, queue_no); | |
774345f9 | 783 | EventNotifier *n = virtio_queue_get_guest_notifier(vq); |
a38b2c49 | 784 | VirtIOIRQFD *irqfd; |
53510bfc | 785 | int ret = 0; |
774345f9 | 786 | |
a38b2c49 MT |
787 | if (proxy->vector_irqfd) { |
788 | irqfd = &proxy->vector_irqfd[vector]; | |
789 | if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) { | |
dc9f06ca PF |
790 | ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg, |
791 | &proxy->pci_dev); | |
a38b2c49 MT |
792 | if (ret < 0) { |
793 | return ret; | |
794 | } | |
3f1fea0f | 795 | kvm_irqchip_commit_routes(kvm_state); |
774345f9 MT |
796 | } |
797 | } | |
798 | ||
f1d0f15a MT |
799 | /* If guest supports masking, irqfd is already setup, unmask it. |
800 | * Otherwise, set it up now. | |
801 | */ | |
5669655a | 802 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
a3fc66d9 | 803 | k->guest_notifier_mask(vdev, queue_no, false); |
f1d0f15a | 804 | /* Test after unmasking to avoid losing events. */ |
181103cd | 805 | if (k->guest_notifier_pending && |
a3fc66d9 | 806 | k->guest_notifier_pending(vdev, queue_no)) { |
f1d0f15a MT |
807 | event_notifier_set(n); |
808 | } | |
809 | } else { | |
810 | ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector); | |
7d37d351 | 811 | } |
774345f9 | 812 | return ret; |
7d37d351 JK |
813 | } |
814 | ||
a38b2c49 | 815 | static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy, |
7d37d351 JK |
816 | unsigned int queue_no, |
817 | unsigned int vector) | |
818 | { | |
a3fc66d9 PB |
819 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
820 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); | |
181103cd | 821 | |
f1d0f15a MT |
822 | /* If guest supports masking, keep irqfd but mask it. |
823 | * Otherwise, clean it up now. | |
824 | */ | |
5669655a | 825 | if (vdev->use_guest_notifier_mask && k->guest_notifier_mask) { |
a3fc66d9 | 826 | k->guest_notifier_mask(vdev, queue_no, true); |
f1d0f15a | 827 | } else { |
e387f99e | 828 | kvm_virtio_pci_irqfd_release(proxy, queue_no, vector); |
f1d0f15a | 829 | } |
7d37d351 JK |
830 | } |
831 | ||
a38b2c49 MT |
832 | static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector, |
833 | MSIMessage msg) | |
7d37d351 JK |
834 | { |
835 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 836 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
851c2a75 JW |
837 | VirtQueue *vq = virtio_vector_first_queue(vdev, vector); |
838 | int ret, index, unmasked = 0; | |
7d37d351 | 839 | |
851c2a75 JW |
840 | while (vq) { |
841 | index = virtio_get_queue_index(vq); | |
842 | if (!virtio_queue_get_num(vdev, index)) { | |
7d37d351 JK |
843 | break; |
844 | } | |
6652d081 JW |
845 | if (index < proxy->nvqs_with_notifiers) { |
846 | ret = virtio_pci_vq_vector_unmask(proxy, index, vector, msg); | |
847 | if (ret < 0) { | |
848 | goto undo; | |
849 | } | |
850 | ++unmasked; | |
7d37d351 | 851 | } |
851c2a75 | 852 | vq = virtio_vector_next_queue(vq); |
7d37d351 | 853 | } |
851c2a75 | 854 | |
7d37d351 JK |
855 | return 0; |
856 | ||
857 | undo: | |
851c2a75 | 858 | vq = virtio_vector_first_queue(vdev, vector); |
6652d081 | 859 | while (vq && unmasked >= 0) { |
851c2a75 | 860 | index = virtio_get_queue_index(vq); |
6652d081 JW |
861 | if (index < proxy->nvqs_with_notifiers) { |
862 | virtio_pci_vq_vector_mask(proxy, index, vector); | |
863 | --unmasked; | |
864 | } | |
851c2a75 | 865 | vq = virtio_vector_next_queue(vq); |
7d37d351 JK |
866 | } |
867 | return ret; | |
868 | } | |
869 | ||
a38b2c49 | 870 | static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector) |
7d37d351 JK |
871 | { |
872 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 873 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
851c2a75 JW |
874 | VirtQueue *vq = virtio_vector_first_queue(vdev, vector); |
875 | int index; | |
7d37d351 | 876 | |
851c2a75 JW |
877 | while (vq) { |
878 | index = virtio_get_queue_index(vq); | |
879 | if (!virtio_queue_get_num(vdev, index)) { | |
7d37d351 JK |
880 | break; |
881 | } | |
6652d081 JW |
882 | if (index < proxy->nvqs_with_notifiers) { |
883 | virtio_pci_vq_vector_mask(proxy, index, vector); | |
884 | } | |
851c2a75 | 885 | vq = virtio_vector_next_queue(vq); |
7d37d351 JK |
886 | } |
887 | } | |
888 | ||
a38b2c49 MT |
889 | static void virtio_pci_vector_poll(PCIDevice *dev, |
890 | unsigned int vector_start, | |
891 | unsigned int vector_end) | |
89d62be9 MT |
892 | { |
893 | VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev); | |
a3fc66d9 | 894 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 895 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
89d62be9 MT |
896 | int queue_no; |
897 | unsigned int vector; | |
898 | EventNotifier *notifier; | |
899 | VirtQueue *vq; | |
900 | ||
2d620f59 | 901 | for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) { |
89d62be9 MT |
902 | if (!virtio_queue_get_num(vdev, queue_no)) { |
903 | break; | |
904 | } | |
905 | vector = virtio_queue_vector(vdev, queue_no); | |
906 | if (vector < vector_start || vector >= vector_end || | |
907 | !msix_is_masked(dev, vector)) { | |
908 | continue; | |
909 | } | |
910 | vq = virtio_get_queue(vdev, queue_no); | |
911 | notifier = virtio_queue_get_guest_notifier(vq); | |
181103cd FK |
912 | if (k->guest_notifier_pending) { |
913 | if (k->guest_notifier_pending(vdev, queue_no)) { | |
f1d0f15a MT |
914 | msix_set_pending(dev, vector); |
915 | } | |
916 | } else if (event_notifier_test_and_clear(notifier)) { | |
89d62be9 MT |
917 | msix_set_pending(dev, vector); |
918 | } | |
919 | } | |
920 | } | |
921 | ||
922 | static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign, | |
923 | bool with_irqfd) | |
ade80dc8 | 924 | { |
d2a0ccc6 | 925 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 PB |
926 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
927 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); | |
928 | VirtQueue *vq = virtio_get_queue(vdev, n); | |
ade80dc8 MT |
929 | EventNotifier *notifier = virtio_queue_get_guest_notifier(vq); |
930 | ||
931 | if (assign) { | |
932 | int r = event_notifier_init(notifier, 0); | |
933 | if (r < 0) { | |
934 | return r; | |
935 | } | |
89d62be9 | 936 | virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd); |
ade80dc8 | 937 | } else { |
89d62be9 | 938 | virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd); |
ade80dc8 MT |
939 | event_notifier_cleanup(notifier); |
940 | } | |
941 | ||
5669655a VK |
942 | if (!msix_enabled(&proxy->pci_dev) && |
943 | vdev->use_guest_notifier_mask && | |
944 | vdc->guest_notifier_mask) { | |
a3fc66d9 | 945 | vdc->guest_notifier_mask(vdev, n, !assign); |
62c96360 MT |
946 | } |
947 | ||
ade80dc8 MT |
948 | return 0; |
949 | } | |
950 | ||
d2a0ccc6 | 951 | static bool virtio_pci_query_guest_notifiers(DeviceState *d) |
5430a28f | 952 | { |
d2a0ccc6 | 953 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
5430a28f MT |
954 | return msix_enabled(&proxy->pci_dev); |
955 | } | |
956 | ||
2d620f59 | 957 | static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign) |
54dd9321 | 958 | { |
d2a0ccc6 | 959 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 | 960 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
181103cd | 961 | VirtioDeviceClass *k = VIRTIO_DEVICE_GET_CLASS(vdev); |
54dd9321 | 962 | int r, n; |
89d62be9 MT |
963 | bool with_irqfd = msix_enabled(&proxy->pci_dev) && |
964 | kvm_msi_via_irqfd_enabled(); | |
54dd9321 | 965 | |
87b3bd1c | 966 | nvqs = MIN(nvqs, VIRTIO_QUEUE_MAX); |
2d620f59 MT |
967 | |
968 | /* When deassigning, pass a consistent nvqs value | |
969 | * to avoid leaking notifiers. | |
970 | */ | |
971 | assert(assign || nvqs == proxy->nvqs_with_notifiers); | |
972 | ||
973 | proxy->nvqs_with_notifiers = nvqs; | |
974 | ||
7d37d351 | 975 | /* Must unset vector notifier while guest notifier is still assigned */ |
181103cd | 976 | if ((proxy->vector_irqfd || k->guest_notifier_mask) && !assign) { |
7d37d351 | 977 | msix_unset_vector_notifiers(&proxy->pci_dev); |
a38b2c49 MT |
978 | if (proxy->vector_irqfd) { |
979 | kvm_virtio_pci_vector_release(proxy, nvqs); | |
980 | g_free(proxy->vector_irqfd); | |
981 | proxy->vector_irqfd = NULL; | |
982 | } | |
7d37d351 JK |
983 | } |
984 | ||
2d620f59 | 985 | for (n = 0; n < nvqs; n++) { |
54dd9321 MT |
986 | if (!virtio_queue_get_num(vdev, n)) { |
987 | break; | |
988 | } | |
989 | ||
23fe2b3f | 990 | r = virtio_pci_set_guest_notifier(d, n, assign, with_irqfd); |
54dd9321 MT |
991 | if (r < 0) { |
992 | goto assign_error; | |
993 | } | |
994 | } | |
995 | ||
7d37d351 | 996 | /* Must set vector notifier after guest notifier has been assigned */ |
181103cd | 997 | if ((with_irqfd || k->guest_notifier_mask) && assign) { |
a38b2c49 MT |
998 | if (with_irqfd) { |
999 | proxy->vector_irqfd = | |
1000 | g_malloc0(sizeof(*proxy->vector_irqfd) * | |
1001 | msix_nr_vectors_allocated(&proxy->pci_dev)); | |
1002 | r = kvm_virtio_pci_vector_use(proxy, nvqs); | |
1003 | if (r < 0) { | |
1004 | goto assign_error; | |
1005 | } | |
774345f9 | 1006 | } |
7d37d351 | 1007 | r = msix_set_vector_notifiers(&proxy->pci_dev, |
a38b2c49 MT |
1008 | virtio_pci_vector_unmask, |
1009 | virtio_pci_vector_mask, | |
1010 | virtio_pci_vector_poll); | |
7d37d351 | 1011 | if (r < 0) { |
774345f9 | 1012 | goto notifiers_error; |
7d37d351 JK |
1013 | } |
1014 | } | |
1015 | ||
54dd9321 MT |
1016 | return 0; |
1017 | ||
774345f9 | 1018 | notifiers_error: |
a38b2c49 MT |
1019 | if (with_irqfd) { |
1020 | assert(assign); | |
1021 | kvm_virtio_pci_vector_release(proxy, nvqs); | |
1022 | } | |
774345f9 | 1023 | |
54dd9321 MT |
1024 | assign_error: |
1025 | /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */ | |
7d37d351 | 1026 | assert(assign); |
54dd9321 | 1027 | while (--n >= 0) { |
89d62be9 | 1028 | virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd); |
54dd9321 MT |
1029 | } |
1030 | return r; | |
1031 | } | |
1032 | ||
6f80e617 TB |
1033 | static int virtio_pci_set_host_notifier_mr(DeviceState *d, int n, |
1034 | MemoryRegion *mr, bool assign) | |
1035 | { | |
1036 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); | |
1037 | int offset; | |
1038 | ||
1039 | if (n >= VIRTIO_QUEUE_MAX || !virtio_pci_modern(proxy) || | |
1040 | virtio_pci_queue_mem_mult(proxy) != memory_region_size(mr)) { | |
1041 | return -1; | |
1042 | } | |
1043 | ||
1044 | if (assign) { | |
1045 | offset = virtio_pci_queue_mem_mult(proxy) * n; | |
1046 | memory_region_add_subregion_overlap(&proxy->notify.mr, offset, mr, 1); | |
1047 | } else { | |
1048 | memory_region_del_subregion(&proxy->notify.mr, mr); | |
1049 | } | |
1050 | ||
1051 | return 0; | |
1052 | } | |
1053 | ||
d2a0ccc6 | 1054 | static void virtio_pci_vmstate_change(DeviceState *d, bool running) |
25db9ebe | 1055 | { |
d2a0ccc6 | 1056 | VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d); |
a3fc66d9 | 1057 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
25db9ebe SH |
1058 | |
1059 | if (running) { | |
68a27b20 MT |
1060 | /* Old QEMU versions did not set bus master enable on status write. |
1061 | * Detect DRIVER set and enable it. | |
1062 | */ | |
1063 | if ((proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION) && | |
1064 | (vdev->status & VIRTIO_CONFIG_S_DRIVER) && | |
45363e46 | 1065 | !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) { |
68a27b20 MT |
1066 | pci_default_write_config(&proxy->pci_dev, PCI_COMMAND, |
1067 | proxy->pci_dev.config[PCI_COMMAND] | | |
1068 | PCI_COMMAND_MASTER, 1); | |
89c473fd | 1069 | } |
25db9ebe | 1070 | virtio_pci_start_ioeventfd(proxy); |
ade80dc8 | 1071 | } else { |
25db9ebe | 1072 | virtio_pci_stop_ioeventfd(proxy); |
ade80dc8 | 1073 | } |
ade80dc8 MT |
1074 | } |
1075 | ||
085bccb7 FK |
1076 | /* |
1077 | * virtio-pci: This is the PCIDevice which has a virtio-pci-bus. | |
1078 | */ | |
1079 | ||
e0d686bf JW |
1080 | static int virtio_pci_query_nvectors(DeviceState *d) |
1081 | { | |
1082 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1083 | ||
1084 | return proxy->nvectors; | |
1085 | } | |
1086 | ||
8607f5c3 JW |
1087 | static AddressSpace *virtio_pci_get_dma_as(DeviceState *d) |
1088 | { | |
1089 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1090 | PCIDevice *dev = &proxy->pci_dev; | |
1091 | ||
f0edf239 | 1092 | return pci_get_address_space(dev); |
8607f5c3 JW |
1093 | } |
1094 | ||
ada434cd | 1095 | static int virtio_pci_add_mem_cap(VirtIOPCIProxy *proxy, |
dfb8e184 MT |
1096 | struct virtio_pci_cap *cap) |
1097 | { | |
1098 | PCIDevice *dev = &proxy->pci_dev; | |
1099 | int offset; | |
1100 | ||
9a7c2a59 MZ |
1101 | offset = pci_add_capability(dev, PCI_CAP_ID_VNDR, 0, |
1102 | cap->cap_len, &error_abort); | |
dfb8e184 MT |
1103 | |
1104 | assert(cap->cap_len >= sizeof *cap); | |
1105 | memcpy(dev->config + offset + PCI_CAP_FLAGS, &cap->cap_len, | |
1106 | cap->cap_len - PCI_CAP_FLAGS); | |
ada434cd MT |
1107 | |
1108 | return offset; | |
dfb8e184 MT |
1109 | } |
1110 | ||
dfb8e184 MT |
1111 | static uint64_t virtio_pci_common_read(void *opaque, hwaddr addr, |
1112 | unsigned size) | |
1113 | { | |
1114 | VirtIOPCIProxy *proxy = opaque; | |
1115 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1116 | uint32_t val = 0; | |
1117 | int i; | |
1118 | ||
1119 | switch (addr) { | |
1120 | case VIRTIO_PCI_COMMON_DFSELECT: | |
1121 | val = proxy->dfselect; | |
1122 | break; | |
1123 | case VIRTIO_PCI_COMMON_DF: | |
1124 | if (proxy->dfselect <= 1) { | |
9b706dbb MT |
1125 | VirtioDeviceClass *vdc = VIRTIO_DEVICE_GET_CLASS(vdev); |
1126 | ||
1127 | val = (vdev->host_features & ~vdc->legacy_features) >> | |
5f456073 | 1128 | (32 * proxy->dfselect); |
dfb8e184 MT |
1129 | } |
1130 | break; | |
1131 | case VIRTIO_PCI_COMMON_GFSELECT: | |
1132 | val = proxy->gfselect; | |
1133 | break; | |
1134 | case VIRTIO_PCI_COMMON_GF: | |
3750dabc | 1135 | if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { |
dfb8e184 MT |
1136 | val = proxy->guest_features[proxy->gfselect]; |
1137 | } | |
1138 | break; | |
1139 | case VIRTIO_PCI_COMMON_MSIX: | |
1140 | val = vdev->config_vector; | |
1141 | break; | |
1142 | case VIRTIO_PCI_COMMON_NUMQ: | |
1143 | for (i = 0; i < VIRTIO_QUEUE_MAX; ++i) { | |
1144 | if (virtio_queue_get_num(vdev, i)) { | |
1145 | val = i + 1; | |
1146 | } | |
1147 | } | |
1148 | break; | |
1149 | case VIRTIO_PCI_COMMON_STATUS: | |
1150 | val = vdev->status; | |
1151 | break; | |
1152 | case VIRTIO_PCI_COMMON_CFGGENERATION: | |
b8f05908 | 1153 | val = vdev->generation; |
dfb8e184 MT |
1154 | break; |
1155 | case VIRTIO_PCI_COMMON_Q_SELECT: | |
1156 | val = vdev->queue_sel; | |
1157 | break; | |
1158 | case VIRTIO_PCI_COMMON_Q_SIZE: | |
1159 | val = virtio_queue_get_num(vdev, vdev->queue_sel); | |
1160 | break; | |
1161 | case VIRTIO_PCI_COMMON_Q_MSIX: | |
1162 | val = virtio_queue_vector(vdev, vdev->queue_sel); | |
1163 | break; | |
1164 | case VIRTIO_PCI_COMMON_Q_ENABLE: | |
1165 | val = proxy->vqs[vdev->queue_sel].enabled; | |
1166 | break; | |
1167 | case VIRTIO_PCI_COMMON_Q_NOFF: | |
1168 | /* Simply map queues in order */ | |
1169 | val = vdev->queue_sel; | |
1170 | break; | |
1171 | case VIRTIO_PCI_COMMON_Q_DESCLO: | |
1172 | val = proxy->vqs[vdev->queue_sel].desc[0]; | |
1173 | break; | |
1174 | case VIRTIO_PCI_COMMON_Q_DESCHI: | |
1175 | val = proxy->vqs[vdev->queue_sel].desc[1]; | |
1176 | break; | |
1177 | case VIRTIO_PCI_COMMON_Q_AVAILLO: | |
1178 | val = proxy->vqs[vdev->queue_sel].avail[0]; | |
1179 | break; | |
1180 | case VIRTIO_PCI_COMMON_Q_AVAILHI: | |
1181 | val = proxy->vqs[vdev->queue_sel].avail[1]; | |
1182 | break; | |
1183 | case VIRTIO_PCI_COMMON_Q_USEDLO: | |
1184 | val = proxy->vqs[vdev->queue_sel].used[0]; | |
1185 | break; | |
1186 | case VIRTIO_PCI_COMMON_Q_USEDHI: | |
1187 | val = proxy->vqs[vdev->queue_sel].used[1]; | |
1188 | break; | |
1189 | default: | |
1190 | val = 0; | |
1191 | } | |
1192 | ||
1193 | return val; | |
1194 | } | |
1195 | ||
1196 | static void virtio_pci_common_write(void *opaque, hwaddr addr, | |
1197 | uint64_t val, unsigned size) | |
1198 | { | |
1199 | VirtIOPCIProxy *proxy = opaque; | |
1200 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1201 | ||
1202 | switch (addr) { | |
1203 | case VIRTIO_PCI_COMMON_DFSELECT: | |
1204 | proxy->dfselect = val; | |
1205 | break; | |
1206 | case VIRTIO_PCI_COMMON_GFSELECT: | |
1207 | proxy->gfselect = val; | |
1208 | break; | |
1209 | case VIRTIO_PCI_COMMON_GF: | |
3750dabc | 1210 | if (proxy->gfselect < ARRAY_SIZE(proxy->guest_features)) { |
dfb8e184 MT |
1211 | proxy->guest_features[proxy->gfselect] = val; |
1212 | virtio_set_features(vdev, | |
1213 | (((uint64_t)proxy->guest_features[1]) << 32) | | |
1214 | proxy->guest_features[0]); | |
1215 | } | |
1216 | break; | |
1217 | case VIRTIO_PCI_COMMON_MSIX: | |
1218 | msix_vector_unuse(&proxy->pci_dev, vdev->config_vector); | |
1219 | /* Make it possible for guest to discover an error took place. */ | |
1220 | if (msix_vector_use(&proxy->pci_dev, val) < 0) { | |
1221 | val = VIRTIO_NO_VECTOR; | |
1222 | } | |
1223 | vdev->config_vector = val; | |
1224 | break; | |
1225 | case VIRTIO_PCI_COMMON_STATUS: | |
1226 | if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) { | |
1227 | virtio_pci_stop_ioeventfd(proxy); | |
1228 | } | |
1229 | ||
1230 | virtio_set_status(vdev, val & 0xFF); | |
1231 | ||
1232 | if (val & VIRTIO_CONFIG_S_DRIVER_OK) { | |
1233 | virtio_pci_start_ioeventfd(proxy); | |
1234 | } | |
1235 | ||
1236 | if (vdev->status == 0) { | |
75fd6f13 | 1237 | virtio_pci_reset(DEVICE(proxy)); |
dfb8e184 MT |
1238 | } |
1239 | ||
1240 | break; | |
1241 | case VIRTIO_PCI_COMMON_Q_SELECT: | |
1242 | if (val < VIRTIO_QUEUE_MAX) { | |
1243 | vdev->queue_sel = val; | |
1244 | } | |
1245 | break; | |
1246 | case VIRTIO_PCI_COMMON_Q_SIZE: | |
1247 | proxy->vqs[vdev->queue_sel].num = val; | |
1248 | break; | |
1249 | case VIRTIO_PCI_COMMON_Q_MSIX: | |
1250 | msix_vector_unuse(&proxy->pci_dev, | |
1251 | virtio_queue_vector(vdev, vdev->queue_sel)); | |
1252 | /* Make it possible for guest to discover an error took place. */ | |
1253 | if (msix_vector_use(&proxy->pci_dev, val) < 0) { | |
1254 | val = VIRTIO_NO_VECTOR; | |
1255 | } | |
1256 | virtio_queue_set_vector(vdev, vdev->queue_sel, val); | |
1257 | break; | |
1258 | case VIRTIO_PCI_COMMON_Q_ENABLE: | |
dfb8e184 MT |
1259 | virtio_queue_set_num(vdev, vdev->queue_sel, |
1260 | proxy->vqs[vdev->queue_sel].num); | |
1261 | virtio_queue_set_rings(vdev, vdev->queue_sel, | |
1262 | ((uint64_t)proxy->vqs[vdev->queue_sel].desc[1]) << 32 | | |
1263 | proxy->vqs[vdev->queue_sel].desc[0], | |
1264 | ((uint64_t)proxy->vqs[vdev->queue_sel].avail[1]) << 32 | | |
1265 | proxy->vqs[vdev->queue_sel].avail[0], | |
1266 | ((uint64_t)proxy->vqs[vdev->queue_sel].used[1]) << 32 | | |
1267 | proxy->vqs[vdev->queue_sel].used[0]); | |
393f04d3 | 1268 | proxy->vqs[vdev->queue_sel].enabled = 1; |
dfb8e184 MT |
1269 | break; |
1270 | case VIRTIO_PCI_COMMON_Q_DESCLO: | |
1271 | proxy->vqs[vdev->queue_sel].desc[0] = val; | |
1272 | break; | |
1273 | case VIRTIO_PCI_COMMON_Q_DESCHI: | |
1274 | proxy->vqs[vdev->queue_sel].desc[1] = val; | |
1275 | break; | |
1276 | case VIRTIO_PCI_COMMON_Q_AVAILLO: | |
1277 | proxy->vqs[vdev->queue_sel].avail[0] = val; | |
1278 | break; | |
1279 | case VIRTIO_PCI_COMMON_Q_AVAILHI: | |
1280 | proxy->vqs[vdev->queue_sel].avail[1] = val; | |
1281 | break; | |
1282 | case VIRTIO_PCI_COMMON_Q_USEDLO: | |
1283 | proxy->vqs[vdev->queue_sel].used[0] = val; | |
1284 | break; | |
1285 | case VIRTIO_PCI_COMMON_Q_USEDHI: | |
1286 | proxy->vqs[vdev->queue_sel].used[1] = val; | |
1287 | break; | |
1288 | default: | |
1289 | break; | |
1290 | } | |
1291 | } | |
1292 | ||
1293 | ||
1294 | static uint64_t virtio_pci_notify_read(void *opaque, hwaddr addr, | |
1295 | unsigned size) | |
1296 | { | |
1297 | return 0; | |
1298 | } | |
1299 | ||
1300 | static void virtio_pci_notify_write(void *opaque, hwaddr addr, | |
1301 | uint64_t val, unsigned size) | |
1302 | { | |
1303 | VirtIODevice *vdev = opaque; | |
d9997d89 MA |
1304 | VirtIOPCIProxy *proxy = VIRTIO_PCI(DEVICE(vdev)->parent_bus->parent); |
1305 | unsigned queue = addr / virtio_pci_queue_mem_mult(proxy); | |
dfb8e184 MT |
1306 | |
1307 | if (queue < VIRTIO_QUEUE_MAX) { | |
1308 | virtio_queue_notify(vdev, queue); | |
1309 | } | |
1310 | } | |
1311 | ||
9824d2a3 JW |
1312 | static void virtio_pci_notify_write_pio(void *opaque, hwaddr addr, |
1313 | uint64_t val, unsigned size) | |
1314 | { | |
1315 | VirtIODevice *vdev = opaque; | |
1316 | unsigned queue = val; | |
1317 | ||
1318 | if (queue < VIRTIO_QUEUE_MAX) { | |
1319 | virtio_queue_notify(vdev, queue); | |
1320 | } | |
1321 | } | |
1322 | ||
dfb8e184 MT |
1323 | static uint64_t virtio_pci_isr_read(void *opaque, hwaddr addr, |
1324 | unsigned size) | |
1325 | { | |
1326 | VirtIOPCIProxy *proxy = opaque; | |
1327 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
0687c37c | 1328 | uint64_t val = atomic_xchg(&vdev->isr, 0); |
dfb8e184 MT |
1329 | pci_irq_deassert(&proxy->pci_dev); |
1330 | ||
1331 | return val; | |
1332 | } | |
1333 | ||
1334 | static void virtio_pci_isr_write(void *opaque, hwaddr addr, | |
1335 | uint64_t val, unsigned size) | |
1336 | { | |
1337 | } | |
1338 | ||
1339 | static uint64_t virtio_pci_device_read(void *opaque, hwaddr addr, | |
1340 | unsigned size) | |
1341 | { | |
1342 | VirtIODevice *vdev = opaque; | |
1343 | uint64_t val = 0; | |
1344 | ||
1345 | switch (size) { | |
1346 | case 1: | |
54c720d4 | 1347 | val = virtio_config_modern_readb(vdev, addr); |
dfb8e184 MT |
1348 | break; |
1349 | case 2: | |
54c720d4 | 1350 | val = virtio_config_modern_readw(vdev, addr); |
dfb8e184 MT |
1351 | break; |
1352 | case 4: | |
54c720d4 | 1353 | val = virtio_config_modern_readl(vdev, addr); |
dfb8e184 MT |
1354 | break; |
1355 | } | |
1356 | return val; | |
1357 | } | |
1358 | ||
1359 | static void virtio_pci_device_write(void *opaque, hwaddr addr, | |
1360 | uint64_t val, unsigned size) | |
1361 | { | |
1362 | VirtIODevice *vdev = opaque; | |
1363 | switch (size) { | |
1364 | case 1: | |
54c720d4 | 1365 | virtio_config_modern_writeb(vdev, addr, val); |
dfb8e184 MT |
1366 | break; |
1367 | case 2: | |
54c720d4 | 1368 | virtio_config_modern_writew(vdev, addr, val); |
dfb8e184 MT |
1369 | break; |
1370 | case 4: | |
54c720d4 | 1371 | virtio_config_modern_writel(vdev, addr, val); |
dfb8e184 MT |
1372 | break; |
1373 | } | |
1374 | } | |
1375 | ||
1141ce21 GH |
1376 | static void virtio_pci_modern_regions_init(VirtIOPCIProxy *proxy) |
1377 | { | |
1378 | static const MemoryRegionOps common_ops = { | |
1379 | .read = virtio_pci_common_read, | |
1380 | .write = virtio_pci_common_write, | |
1381 | .impl = { | |
1382 | .min_access_size = 1, | |
1383 | .max_access_size = 4, | |
1384 | }, | |
1385 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1386 | }; | |
1387 | static const MemoryRegionOps isr_ops = { | |
1388 | .read = virtio_pci_isr_read, | |
1389 | .write = virtio_pci_isr_write, | |
1390 | .impl = { | |
1391 | .min_access_size = 1, | |
1392 | .max_access_size = 4, | |
1393 | }, | |
1394 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1395 | }; | |
1396 | static const MemoryRegionOps device_ops = { | |
1397 | .read = virtio_pci_device_read, | |
1398 | .write = virtio_pci_device_write, | |
1399 | .impl = { | |
1400 | .min_access_size = 1, | |
1401 | .max_access_size = 4, | |
1402 | }, | |
1403 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1404 | }; | |
1405 | static const MemoryRegionOps notify_ops = { | |
1406 | .read = virtio_pci_notify_read, | |
1407 | .write = virtio_pci_notify_write, | |
1408 | .impl = { | |
1409 | .min_access_size = 1, | |
1410 | .max_access_size = 4, | |
1411 | }, | |
1412 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1413 | }; | |
9824d2a3 JW |
1414 | static const MemoryRegionOps notify_pio_ops = { |
1415 | .read = virtio_pci_notify_read, | |
1416 | .write = virtio_pci_notify_write_pio, | |
1417 | .impl = { | |
1418 | .min_access_size = 1, | |
1419 | .max_access_size = 4, | |
1420 | }, | |
1421 | .endianness = DEVICE_LITTLE_ENDIAN, | |
1422 | }; | |
1423 | ||
1141ce21 GH |
1424 | |
1425 | memory_region_init_io(&proxy->common.mr, OBJECT(proxy), | |
1426 | &common_ops, | |
1427 | proxy, | |
b6ce27a5 GH |
1428 | "virtio-pci-common", |
1429 | proxy->common.size); | |
a3cc2e81 | 1430 | |
1141ce21 GH |
1431 | memory_region_init_io(&proxy->isr.mr, OBJECT(proxy), |
1432 | &isr_ops, | |
1433 | proxy, | |
b6ce27a5 GH |
1434 | "virtio-pci-isr", |
1435 | proxy->isr.size); | |
a3cc2e81 | 1436 | |
1141ce21 GH |
1437 | memory_region_init_io(&proxy->device.mr, OBJECT(proxy), |
1438 | &device_ops, | |
1439 | virtio_bus_get_device(&proxy->bus), | |
b6ce27a5 GH |
1440 | "virtio-pci-device", |
1441 | proxy->device.size); | |
a3cc2e81 | 1442 | |
1141ce21 GH |
1443 | memory_region_init_io(&proxy->notify.mr, OBJECT(proxy), |
1444 | ¬ify_ops, | |
1445 | virtio_bus_get_device(&proxy->bus), | |
1446 | "virtio-pci-notify", | |
b6ce27a5 | 1447 | proxy->notify.size); |
9824d2a3 JW |
1448 | |
1449 | memory_region_init_io(&proxy->notify_pio.mr, OBJECT(proxy), | |
1450 | ¬ify_pio_ops, | |
1451 | virtio_bus_get_device(&proxy->bus), | |
1452 | "virtio-pci-notify-pio", | |
e3aab6c7 | 1453 | proxy->notify_pio.size); |
a3cc2e81 GH |
1454 | } |
1455 | ||
1456 | static void virtio_pci_modern_region_map(VirtIOPCIProxy *proxy, | |
54790d71 | 1457 | VirtIOPCIRegion *region, |
9824d2a3 JW |
1458 | struct virtio_pci_cap *cap, |
1459 | MemoryRegion *mr, | |
1460 | uint8_t bar) | |
a3cc2e81 | 1461 | { |
9824d2a3 | 1462 | memory_region_add_subregion(mr, region->offset, ®ion->mr); |
54790d71 | 1463 | |
fc004905 | 1464 | cap->cfg_type = region->type; |
9824d2a3 | 1465 | cap->bar = bar; |
54790d71 | 1466 | cap->offset = cpu_to_le32(region->offset); |
b6ce27a5 | 1467 | cap->length = cpu_to_le32(region->size); |
54790d71 | 1468 | virtio_pci_add_mem_cap(proxy, cap); |
9824d2a3 JW |
1469 | |
1470 | } | |
1471 | ||
1472 | static void virtio_pci_modern_mem_region_map(VirtIOPCIProxy *proxy, | |
1473 | VirtIOPCIRegion *region, | |
1474 | struct virtio_pci_cap *cap) | |
1475 | { | |
1476 | virtio_pci_modern_region_map(proxy, region, cap, | |
7a25126d | 1477 | &proxy->modern_bar, proxy->modern_mem_bar_idx); |
1141ce21 | 1478 | } |
dfb8e184 | 1479 | |
9824d2a3 JW |
1480 | static void virtio_pci_modern_io_region_map(VirtIOPCIProxy *proxy, |
1481 | VirtIOPCIRegion *region, | |
1482 | struct virtio_pci_cap *cap) | |
1483 | { | |
1484 | virtio_pci_modern_region_map(proxy, region, cap, | |
7a25126d | 1485 | &proxy->io_bar, proxy->modern_io_bar_idx); |
9824d2a3 JW |
1486 | } |
1487 | ||
1488 | static void virtio_pci_modern_mem_region_unmap(VirtIOPCIProxy *proxy, | |
1489 | VirtIOPCIRegion *region) | |
27462695 MT |
1490 | { |
1491 | memory_region_del_subregion(&proxy->modern_bar, | |
1492 | ®ion->mr); | |
1493 | } | |
1494 | ||
9824d2a3 JW |
1495 | static void virtio_pci_modern_io_region_unmap(VirtIOPCIProxy *proxy, |
1496 | VirtIOPCIRegion *region) | |
1497 | { | |
1498 | memory_region_del_subregion(&proxy->io_bar, | |
1499 | ®ion->mr); | |
1500 | } | |
1501 | ||
d1b4259f MC |
1502 | static void virtio_pci_pre_plugged(DeviceState *d, Error **errp) |
1503 | { | |
1504 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1505 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); | |
1506 | ||
1507 | if (virtio_pci_modern(proxy)) { | |
1508 | virtio_add_feature(&vdev->host_features, VIRTIO_F_VERSION_1); | |
1509 | } | |
1510 | ||
1511 | virtio_add_feature(&vdev->host_features, VIRTIO_F_BAD_FEATURE); | |
1512 | } | |
1513 | ||
085bccb7 | 1514 | /* This is called by virtio-bus just after the device is plugged. */ |
e8398045 | 1515 | static void virtio_pci_device_plugged(DeviceState *d, Error **errp) |
085bccb7 FK |
1516 | { |
1517 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); | |
1518 | VirtioBusState *bus = &proxy->bus; | |
9a4c0e22 | 1519 | bool legacy = virtio_pci_legacy(proxy); |
d1b4259f | 1520 | bool modern; |
9824d2a3 | 1521 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
085bccb7 FK |
1522 | uint8_t *config; |
1523 | uint32_t size; | |
6b8f1020 | 1524 | VirtIODevice *vdev = virtio_bus_get_device(&proxy->bus); |
085bccb7 | 1525 | |
d1b4259f MC |
1526 | /* |
1527 | * Virtio capabilities present without | |
1528 | * VIRTIO_F_VERSION_1 confuses guests | |
1529 | */ | |
66d1c4c1 MC |
1530 | if (!proxy->ignore_backend_features && |
1531 | !virtio_has_feature(vdev->host_features, VIRTIO_F_VERSION_1)) { | |
d1b4259f MC |
1532 | virtio_pci_disable_modern(proxy); |
1533 | ||
1534 | if (!legacy) { | |
1535 | error_setg(errp, "Device doesn't support modern mode, and legacy" | |
1536 | " mode is disabled"); | |
1537 | error_append_hint(errp, "Set disable-legacy to off\n"); | |
1538 | ||
1539 | return; | |
1540 | } | |
1541 | } | |
1542 | ||
1543 | modern = virtio_pci_modern(proxy); | |
1544 | ||
085bccb7 FK |
1545 | config = proxy->pci_dev.config; |
1546 | if (proxy->class_code) { | |
1547 | pci_config_set_class(config, proxy->class_code); | |
1548 | } | |
e266d421 GH |
1549 | |
1550 | if (legacy) { | |
8607f5c3 JW |
1551 | if (virtio_host_has_feature(vdev, VIRTIO_F_IOMMU_PLATFORM)) { |
1552 | error_setg(errp, "VIRTIO_F_IOMMU_PLATFORM was supported by" | |
2080a29f | 1553 | " neither legacy nor transitional device"); |
8607f5c3 JW |
1554 | return ; |
1555 | } | |
f2bc54de LP |
1556 | /* |
1557 | * Legacy and transitional devices use specific subsystem IDs. | |
1558 | * Note that the subsystem vendor ID (config + PCI_SUBSYSTEM_VENDOR_ID) | |
1559 | * is set to PCI_SUBVENDOR_ID_REDHAT_QUMRANET by default. | |
1560 | */ | |
e266d421 GH |
1561 | pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus)); |
1562 | } else { | |
1563 | /* pure virtio-1.0 */ | |
1564 | pci_set_word(config + PCI_VENDOR_ID, | |
1565 | PCI_VENDOR_ID_REDHAT_QUMRANET); | |
1566 | pci_set_word(config + PCI_DEVICE_ID, | |
1567 | 0x1040 + virtio_bus_get_vdev_id(bus)); | |
1568 | pci_config_set_revision(config, 1); | |
1569 | } | |
085bccb7 FK |
1570 | config[PCI_INTERRUPT_PIN] = 1; |
1571 | ||
dfb8e184 | 1572 | |
e266d421 | 1573 | if (modern) { |
cc52ea90 GH |
1574 | struct virtio_pci_cap cap = { |
1575 | .cap_len = sizeof cap, | |
dfb8e184 MT |
1576 | }; |
1577 | struct virtio_pci_notify_cap notify = { | |
dfb8e184 | 1578 | .cap.cap_len = sizeof notify, |
dfb8e184 | 1579 | .notify_off_multiplier = |
d9997d89 | 1580 | cpu_to_le32(virtio_pci_queue_mem_mult(proxy)), |
dfb8e184 | 1581 | }; |
ada434cd MT |
1582 | struct virtio_pci_cfg_cap cfg = { |
1583 | .cap.cap_len = sizeof cfg, | |
1584 | .cap.cfg_type = VIRTIO_PCI_CAP_PCI_CFG, | |
1585 | }; | |
9824d2a3 JW |
1586 | struct virtio_pci_notify_cap notify_pio = { |
1587 | .cap.cap_len = sizeof notify, | |
1588 | .notify_off_multiplier = cpu_to_le32(0x0), | |
1589 | }; | |
dfb8e184 | 1590 | |
9824d2a3 | 1591 | struct virtio_pci_cfg_cap *cfg_mask; |
dfb8e184 | 1592 | |
1141ce21 | 1593 | virtio_pci_modern_regions_init(proxy); |
9824d2a3 JW |
1594 | |
1595 | virtio_pci_modern_mem_region_map(proxy, &proxy->common, &cap); | |
1596 | virtio_pci_modern_mem_region_map(proxy, &proxy->isr, &cap); | |
1597 | virtio_pci_modern_mem_region_map(proxy, &proxy->device, &cap); | |
1598 | virtio_pci_modern_mem_region_map(proxy, &proxy->notify, ¬ify.cap); | |
1599 | ||
1600 | if (modern_pio) { | |
1601 | memory_region_init(&proxy->io_bar, OBJECT(proxy), | |
1602 | "virtio-pci-io", 0x4); | |
1603 | ||
7a25126d | 1604 | pci_register_bar(&proxy->pci_dev, proxy->modern_io_bar_idx, |
9824d2a3 JW |
1605 | PCI_BASE_ADDRESS_SPACE_IO, &proxy->io_bar); |
1606 | ||
1607 | virtio_pci_modern_io_region_map(proxy, &proxy->notify_pio, | |
1608 | ¬ify_pio.cap); | |
1609 | } | |
ada434cd | 1610 | |
7a25126d | 1611 | pci_register_bar(&proxy->pci_dev, proxy->modern_mem_bar_idx, |
4e93a68e GH |
1612 | PCI_BASE_ADDRESS_SPACE_MEMORY | |
1613 | PCI_BASE_ADDRESS_MEM_PREFETCH | | |
1614 | PCI_BASE_ADDRESS_MEM_TYPE_64, | |
dfb8e184 | 1615 | &proxy->modern_bar); |
ada434cd MT |
1616 | |
1617 | proxy->config_cap = virtio_pci_add_mem_cap(proxy, &cfg.cap); | |
1618 | cfg_mask = (void *)(proxy->pci_dev.wmask + proxy->config_cap); | |
1619 | pci_set_byte(&cfg_mask->cap.bar, ~0x0); | |
1620 | pci_set_long((uint8_t *)&cfg_mask->cap.offset, ~0x0); | |
1621 | pci_set_long((uint8_t *)&cfg_mask->cap.length, ~0x0); | |
1622 | pci_set_long(cfg_mask->pci_cfg_data, ~0x0); | |
dfb8e184 MT |
1623 | } |
1624 | ||
0d583647 RH |
1625 | if (proxy->nvectors) { |
1626 | int err = msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, | |
ee640c62 | 1627 | proxy->msix_bar_idx, NULL); |
0d583647 | 1628 | if (err) { |
ee640c62 | 1629 | /* Notice when a system that supports MSIx can't initialize it */ |
0d583647 | 1630 | if (err != -ENOTSUP) { |
0765691e MA |
1631 | warn_report("unable to init msix vectors to %" PRIu32, |
1632 | proxy->nvectors); | |
0d583647 RH |
1633 | } |
1634 | proxy->nvectors = 0; | |
1635 | } | |
085bccb7 FK |
1636 | } |
1637 | ||
1638 | proxy->pci_dev.config_write = virtio_write_config; | |
ada434cd | 1639 | proxy->pci_dev.config_read = virtio_read_config; |
085bccb7 | 1640 | |
e266d421 GH |
1641 | if (legacy) { |
1642 | size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) | |
1643 | + virtio_bus_get_vdev_config_len(bus); | |
1d0148fe | 1644 | size = pow2ceil(size); |
085bccb7 | 1645 | |
e266d421 GH |
1646 | memory_region_init_io(&proxy->bar, OBJECT(proxy), |
1647 | &virtio_pci_config_ops, | |
1648 | proxy, "virtio-pci", size); | |
dfb8e184 | 1649 | |
7a25126d | 1650 | pci_register_bar(&proxy->pci_dev, proxy->legacy_io_bar_idx, |
23c5e397 | 1651 | PCI_BASE_ADDRESS_SPACE_IO, &proxy->bar); |
e266d421 | 1652 | } |
085bccb7 FK |
1653 | } |
1654 | ||
06a13073 PB |
1655 | static void virtio_pci_device_unplugged(DeviceState *d) |
1656 | { | |
06a13073 | 1657 | VirtIOPCIProxy *proxy = VIRTIO_PCI(d); |
9a4c0e22 | 1658 | bool modern = virtio_pci_modern(proxy); |
9824d2a3 | 1659 | bool modern_pio = proxy->flags & VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY; |
06a13073 PB |
1660 | |
1661 | virtio_pci_stop_ioeventfd(proxy); | |
27462695 MT |
1662 | |
1663 | if (modern) { | |
9824d2a3 JW |
1664 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->common); |
1665 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->isr); | |
1666 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->device); | |
1667 | virtio_pci_modern_mem_region_unmap(proxy, &proxy->notify); | |
1668 | if (modern_pio) { | |
1669 | virtio_pci_modern_io_region_unmap(proxy, &proxy->notify_pio); | |
1670 | } | |
27462695 | 1671 | } |
06a13073 PB |
1672 | } |
1673 | ||
fc079951 | 1674 | static void virtio_pci_realize(PCIDevice *pci_dev, Error **errp) |
085bccb7 | 1675 | { |
b6ce27a5 | 1676 | VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev); |
085bccb7 | 1677 | VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev); |
fd56e061 DG |
1678 | bool pcie_port = pci_bus_is_express(pci_get_bus(pci_dev)) && |
1679 | !pci_bus_is_root(pci_get_bus(pci_dev)); | |
fc079951 | 1680 | |
c324fd0a | 1681 | if (kvm_enabled() && !kvm_has_many_ioeventfds()) { |
ca2b413c PB |
1682 | proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD; |
1683 | } | |
1684 | ||
b6ce27a5 GH |
1685 | /* |
1686 | * virtio pci bar layout used by default. | |
1687 | * subclasses can re-arrange things if needed. | |
1688 | * | |
1689 | * region 0 -- virtio legacy io bar | |
1690 | * region 1 -- msi-x bar | |
1691 | * region 4+5 -- virtio modern memory (64bit) bar | |
1692 | * | |
1693 | */ | |
7a25126d CF |
1694 | proxy->legacy_io_bar_idx = 0; |
1695 | proxy->msix_bar_idx = 1; | |
1696 | proxy->modern_io_bar_idx = 2; | |
1697 | proxy->modern_mem_bar_idx = 4; | |
b6ce27a5 GH |
1698 | |
1699 | proxy->common.offset = 0x0; | |
1700 | proxy->common.size = 0x1000; | |
1701 | proxy->common.type = VIRTIO_PCI_CAP_COMMON_CFG; | |
1702 | ||
1703 | proxy->isr.offset = 0x1000; | |
1704 | proxy->isr.size = 0x1000; | |
1705 | proxy->isr.type = VIRTIO_PCI_CAP_ISR_CFG; | |
1706 | ||
1707 | proxy->device.offset = 0x2000; | |
1708 | proxy->device.size = 0x1000; | |
1709 | proxy->device.type = VIRTIO_PCI_CAP_DEVICE_CFG; | |
1710 | ||
1711 | proxy->notify.offset = 0x3000; | |
d9997d89 | 1712 | proxy->notify.size = virtio_pci_queue_mem_mult(proxy) * VIRTIO_QUEUE_MAX; |
b6ce27a5 GH |
1713 | proxy->notify.type = VIRTIO_PCI_CAP_NOTIFY_CFG; |
1714 | ||
9824d2a3 JW |
1715 | proxy->notify_pio.offset = 0x0; |
1716 | proxy->notify_pio.size = 0x4; | |
1717 | proxy->notify_pio.type = VIRTIO_PCI_CAP_NOTIFY_CFG; | |
1718 | ||
b6ce27a5 GH |
1719 | /* subclasses can enforce modern, so do this unconditionally */ |
1720 | memory_region_init(&proxy->modern_bar, OBJECT(proxy), "virtio-pci", | |
d9997d89 MA |
1721 | /* PCI BAR regions must be powers of 2 */ |
1722 | pow2ceil(proxy->notify.offset + proxy->notify.size)); | |
b6ce27a5 | 1723 | |
f2784eed DB |
1724 | if ((proxy->disable_legacy == ON_OFF_AUTO_ON) || |
1725 | ((proxy->disable_legacy == ON_OFF_AUTO_AUTO) && pcie_port)) { | |
1726 | if (proxy->disable_modern) { | |
1727 | error_setg(errp, "device cannot work as neither modern nor " | |
1728 | "legacy mode is enabled"); | |
1729 | error_append_hint(errp, "Set either disable-modern or " | |
1730 | "disable-legacy to off\n"); | |
1731 | return; | |
1732 | } | |
1733 | proxy->mode = VIRTIO_PCI_MODE_MODERN; | |
1734 | } else { | |
1735 | if (proxy->disable_modern) { | |
1736 | proxy->mode = VIRTIO_PCI_MODE_LEGACY; | |
1737 | } else { | |
1738 | proxy->mode = VIRTIO_PCI_MODE_TRANSITIONAL; | |
1739 | } | |
3eff3769 GK |
1740 | } |
1741 | ||
9a4c0e22 | 1742 | if (pcie_port && pci_is_express(pci_dev)) { |
1811e64c MA |
1743 | int pos; |
1744 | ||
1811e64c MA |
1745 | pos = pcie_endpoint_cap_init(pci_dev, 0); |
1746 | assert(pos > 0); | |
1747 | ||
9a7c2a59 MZ |
1748 | pos = pci_add_capability(pci_dev, PCI_CAP_ID_PM, 0, |
1749 | PCI_PM_SIZEOF, errp); | |
1750 | if (pos < 0) { | |
1751 | return; | |
1752 | } | |
1753 | ||
27ce0f3a | 1754 | pci_dev->exp.pm_cap = pos; |
1811e64c MA |
1755 | |
1756 | /* | |
1757 | * Indicates that this function complies with revision 1.2 of the | |
1758 | * PCI Power Management Interface Specification. | |
1759 | */ | |
1760 | pci_set_word(pci_dev->config + pos + PCI_PM_PMC, 0x3); | |
615c4ed2 | 1761 | |
c2cabb34 MA |
1762 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_DEVERR) { |
1763 | /* Init error enabling flags */ | |
1764 | pcie_cap_deverr_init(pci_dev); | |
1765 | } | |
1766 | ||
d584f1b9 MA |
1767 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_LNKCTL) { |
1768 | /* Init Link Control Register */ | |
1769 | pcie_cap_lnkctl_init(pci_dev); | |
1770 | } | |
1771 | ||
27ce0f3a MA |
1772 | if (proxy->flags & VIRTIO_PCI_FLAG_INIT_PM) { |
1773 | /* Init Power Management Control Register */ | |
1774 | pci_set_word(pci_dev->wmask + pos + PCI_PM_CTRL, | |
1775 | PCI_PM_CTRL_STATE_MASK); | |
1776 | } | |
1777 | ||
615c4ed2 JW |
1778 | if (proxy->flags & VIRTIO_PCI_FLAG_ATS) { |
1779 | pcie_ats_init(pci_dev, 256); | |
1780 | } | |
1781 | ||
0560b0e9 SL |
1782 | } else { |
1783 | /* | |
1784 | * make future invocations of pci_is_express() return false | |
1785 | * and pci_config_size() return PCI_CONFIG_SPACE_SIZE. | |
1786 | */ | |
1787 | pci_dev->cap_present &= ~QEMU_PCI_CAP_EXPRESS; | |
1811e64c MA |
1788 | } |
1789 | ||
b6ce27a5 | 1790 | virtio_pci_bus_new(&proxy->bus, sizeof(proxy->bus), proxy); |
fc079951 | 1791 | if (k->realize) { |
b6ce27a5 | 1792 | k->realize(proxy, errp); |
085bccb7 | 1793 | } |
085bccb7 FK |
1794 | } |
1795 | ||
1796 | static void virtio_pci_exit(PCIDevice *pci_dev) | |
1797 | { | |
8b81bb3b | 1798 | msix_uninit_exclusive_bar(pci_dev); |
085bccb7 FK |
1799 | } |
1800 | ||
59ccd20a | 1801 | static void virtio_pci_reset(DeviceState *qdev) |
085bccb7 FK |
1802 | { |
1803 | VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); | |
1804 | VirtioBusState *bus = VIRTIO_BUS(&proxy->bus); | |
c2cabb34 | 1805 | PCIDevice *dev = PCI_DEVICE(qdev); |
393f04d3 JW |
1806 | int i; |
1807 | ||
085bccb7 FK |
1808 | virtio_pci_stop_ioeventfd(proxy); |
1809 | virtio_bus_reset(bus); | |
1810 | msix_unuse_all_vectors(&proxy->pci_dev); | |
393f04d3 JW |
1811 | |
1812 | for (i = 0; i < VIRTIO_QUEUE_MAX; i++) { | |
1813 | proxy->vqs[i].enabled = 0; | |
60a8d802 JW |
1814 | proxy->vqs[i].num = 0; |
1815 | proxy->vqs[i].desc[0] = proxy->vqs[i].desc[1] = 0; | |
1816 | proxy->vqs[i].avail[0] = proxy->vqs[i].avail[1] = 0; | |
1817 | proxy->vqs[i].used[0] = proxy->vqs[i].used[1] = 0; | |
393f04d3 | 1818 | } |
c2cabb34 MA |
1819 | |
1820 | if (pci_is_express(dev)) { | |
1821 | pcie_cap_deverr_reset(dev); | |
d584f1b9 | 1822 | pcie_cap_lnkctl_reset(dev); |
27ce0f3a MA |
1823 | |
1824 | pci_set_word(dev->config + dev->exp.pm_cap + PCI_PM_CTRL, 0); | |
c2cabb34 | 1825 | } |
085bccb7 FK |
1826 | } |
1827 | ||
85d1277e | 1828 | static Property virtio_pci_properties[] = { |
68a27b20 MT |
1829 | DEFINE_PROP_BIT("virtio-pci-bus-master-bug-migration", VirtIOPCIProxy, flags, |
1830 | VIRTIO_PCI_FLAG_BUS_MASTER_BUG_MIGRATION_BIT, false), | |
a6df8adf JW |
1831 | DEFINE_PROP_BIT("migrate-extra", VirtIOPCIProxy, flags, |
1832 | VIRTIO_PCI_FLAG_MIGRATE_EXTRA_BIT, true), | |
9824d2a3 JW |
1833 | DEFINE_PROP_BIT("modern-pio-notify", VirtIOPCIProxy, flags, |
1834 | VIRTIO_PCI_FLAG_MODERN_PIO_NOTIFY_BIT, false), | |
1811e64c MA |
1835 | DEFINE_PROP_BIT("x-disable-pcie", VirtIOPCIProxy, flags, |
1836 | VIRTIO_PCI_FLAG_DISABLE_PCIE_BIT, false), | |
d9997d89 MA |
1837 | DEFINE_PROP_BIT("page-per-vq", VirtIOPCIProxy, flags, |
1838 | VIRTIO_PCI_FLAG_PAGE_PER_VQ_BIT, false), | |
66d1c4c1 MC |
1839 | DEFINE_PROP_BOOL("x-ignore-backend-features", VirtIOPCIProxy, |
1840 | ignore_backend_features, false), | |
615c4ed2 JW |
1841 | DEFINE_PROP_BIT("ats", VirtIOPCIProxy, flags, |
1842 | VIRTIO_PCI_FLAG_ATS_BIT, false), | |
c2cabb34 MA |
1843 | DEFINE_PROP_BIT("x-pcie-deverr-init", VirtIOPCIProxy, flags, |
1844 | VIRTIO_PCI_FLAG_INIT_DEVERR_BIT, true), | |
d584f1b9 MA |
1845 | DEFINE_PROP_BIT("x-pcie-lnkctl-init", VirtIOPCIProxy, flags, |
1846 | VIRTIO_PCI_FLAG_INIT_LNKCTL_BIT, true), | |
27ce0f3a MA |
1847 | DEFINE_PROP_BIT("x-pcie-pm-init", VirtIOPCIProxy, flags, |
1848 | VIRTIO_PCI_FLAG_INIT_PM_BIT, true), | |
85d1277e ML |
1849 | DEFINE_PROP_END_OF_LIST(), |
1850 | }; | |
1851 | ||
0560b0e9 SL |
1852 | static void virtio_pci_dc_realize(DeviceState *qdev, Error **errp) |
1853 | { | |
1854 | VirtioPCIClass *vpciklass = VIRTIO_PCI_GET_CLASS(qdev); | |
1855 | VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev); | |
1856 | PCIDevice *pci_dev = &proxy->pci_dev; | |
1857 | ||
1858 | if (!(proxy->flags & VIRTIO_PCI_FLAG_DISABLE_PCIE) && | |
9a4c0e22 | 1859 | virtio_pci_modern(proxy)) { |
0560b0e9 SL |
1860 | pci_dev->cap_present |= QEMU_PCI_CAP_EXPRESS; |
1861 | } | |
1862 | ||
1863 | vpciklass->parent_dc_realize(qdev, errp); | |
1864 | } | |
1865 | ||
085bccb7 FK |
1866 | static void virtio_pci_class_init(ObjectClass *klass, void *data) |
1867 | { | |
1868 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1869 | PCIDeviceClass *k = PCI_DEVICE_CLASS(klass); | |
0560b0e9 | 1870 | VirtioPCIClass *vpciklass = VIRTIO_PCI_CLASS(klass); |
085bccb7 | 1871 | |
85d1277e | 1872 | dc->props = virtio_pci_properties; |
fc079951 | 1873 | k->realize = virtio_pci_realize; |
085bccb7 FK |
1874 | k->exit = virtio_pci_exit; |
1875 | k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET; | |
1876 | k->revision = VIRTIO_PCI_ABI_VERSION; | |
1877 | k->class_id = PCI_CLASS_OTHERS; | |
bf853881 PMD |
1878 | device_class_set_parent_realize(dc, virtio_pci_dc_realize, |
1879 | &vpciklass->parent_dc_realize); | |
59ccd20a | 1880 | dc->reset = virtio_pci_reset; |
085bccb7 FK |
1881 | } |
1882 | ||
1883 | static const TypeInfo virtio_pci_info = { | |
1884 | .name = TYPE_VIRTIO_PCI, | |
1885 | .parent = TYPE_PCI_DEVICE, | |
1886 | .instance_size = sizeof(VirtIOPCIProxy), | |
1887 | .class_init = virtio_pci_class_init, | |
1888 | .class_size = sizeof(VirtioPCIClass), | |
1889 | .abstract = true, | |
1890 | }; | |
1891 | ||
a4ee4c8b EH |
1892 | static Property virtio_pci_generic_properties[] = { |
1893 | DEFINE_PROP_ON_OFF_AUTO("disable-legacy", VirtIOPCIProxy, disable_legacy, | |
1894 | ON_OFF_AUTO_AUTO), | |
1895 | DEFINE_PROP_BOOL("disable-modern", VirtIOPCIProxy, disable_modern, false), | |
1896 | DEFINE_PROP_END_OF_LIST(), | |
1897 | }; | |
1898 | ||
1899 | static void virtio_pci_base_class_init(ObjectClass *klass, void *data) | |
1900 | { | |
1901 | const VirtioPCIDeviceTypeInfo *t = data; | |
1902 | if (t->class_init) { | |
1903 | t->class_init(klass, NULL); | |
1904 | } | |
1905 | } | |
1906 | ||
1907 | static void virtio_pci_generic_class_init(ObjectClass *klass, void *data) | |
1908 | { | |
1909 | DeviceClass *dc = DEVICE_CLASS(klass); | |
1910 | ||
1911 | dc->props = virtio_pci_generic_properties; | |
1912 | } | |
1913 | ||
1914 | /* Used when the generic type and the base type is the same */ | |
1915 | static void virtio_pci_generic_base_class_init(ObjectClass *klass, void *data) | |
1916 | { | |
1917 | virtio_pci_base_class_init(klass, data); | |
1918 | virtio_pci_generic_class_init(klass, NULL); | |
1919 | } | |
1920 | ||
1921 | static void virtio_pci_transitional_instance_init(Object *obj) | |
1922 | { | |
1923 | VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); | |
1924 | ||
1925 | proxy->disable_legacy = ON_OFF_AUTO_OFF; | |
1926 | proxy->disable_modern = false; | |
1927 | } | |
1928 | ||
1929 | static void virtio_pci_non_transitional_instance_init(Object *obj) | |
1930 | { | |
1931 | VirtIOPCIProxy *proxy = VIRTIO_PCI(obj); | |
1932 | ||
1933 | proxy->disable_legacy = ON_OFF_AUTO_ON; | |
1934 | proxy->disable_modern = false; | |
1935 | } | |
1936 | ||
1937 | void virtio_pci_types_register(const VirtioPCIDeviceTypeInfo *t) | |
1938 | { | |
1939 | TypeInfo base_type_info = { | |
1940 | .name = t->base_name, | |
1941 | .parent = t->parent ? t->parent : TYPE_VIRTIO_PCI, | |
1942 | .instance_size = t->instance_size, | |
1943 | .instance_init = t->instance_init, | |
8ea90ee6 | 1944 | .class_size = t->class_size, |
a4ee4c8b EH |
1945 | .class_init = virtio_pci_base_class_init, |
1946 | .class_data = (void *)t, | |
1947 | .abstract = true, | |
1948 | }; | |
1949 | TypeInfo generic_type_info = { | |
1950 | .name = t->generic_name, | |
1951 | .parent = base_type_info.name, | |
1952 | .class_init = virtio_pci_generic_class_init, | |
1953 | .interfaces = (InterfaceInfo[]) { | |
1954 | { INTERFACE_PCIE_DEVICE }, | |
1955 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
1956 | { } | |
1957 | }, | |
1958 | }; | |
1959 | ||
1960 | if (!base_type_info.name) { | |
1961 | /* No base type -> register a single generic device type */ | |
1962 | base_type_info.name = t->generic_name; | |
1963 | base_type_info.class_init = virtio_pci_generic_base_class_init; | |
1964 | base_type_info.interfaces = generic_type_info.interfaces; | |
1965 | base_type_info.abstract = false; | |
1966 | generic_type_info.name = NULL; | |
1967 | assert(!t->non_transitional_name); | |
1968 | assert(!t->transitional_name); | |
1969 | } | |
1970 | ||
1971 | type_register(&base_type_info); | |
1972 | if (generic_type_info.name) { | |
1973 | type_register(&generic_type_info); | |
1974 | } | |
1975 | ||
1976 | if (t->non_transitional_name) { | |
1977 | const TypeInfo non_transitional_type_info = { | |
1978 | .name = t->non_transitional_name, | |
1979 | .parent = base_type_info.name, | |
1980 | .instance_init = virtio_pci_non_transitional_instance_init, | |
1981 | .interfaces = (InterfaceInfo[]) { | |
1982 | { INTERFACE_PCIE_DEVICE }, | |
1983 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
1984 | { } | |
1985 | }, | |
1986 | }; | |
1987 | type_register(&non_transitional_type_info); | |
1988 | } | |
1989 | ||
1990 | if (t->transitional_name) { | |
1991 | const TypeInfo transitional_type_info = { | |
1992 | .name = t->transitional_name, | |
1993 | .parent = base_type_info.name, | |
1994 | .instance_init = virtio_pci_transitional_instance_init, | |
1995 | .interfaces = (InterfaceInfo[]) { | |
1996 | /* | |
1997 | * Transitional virtio devices work only as Conventional PCI | |
1998 | * devices because they require PIO ports. | |
1999 | */ | |
2000 | { INTERFACE_CONVENTIONAL_PCI_DEVICE }, | |
2001 | { } | |
2002 | }, | |
2003 | }; | |
2004 | type_register(&transitional_type_info); | |
2005 | } | |
2006 | } | |
2007 | ||
0a2acf5e FK |
2008 | /* virtio-pci-bus */ |
2009 | ||
ac7af112 AF |
2010 | static void virtio_pci_bus_new(VirtioBusState *bus, size_t bus_size, |
2011 | VirtIOPCIProxy *dev) | |
0a2acf5e FK |
2012 | { |
2013 | DeviceState *qdev = DEVICE(dev); | |
f4dd69aa FK |
2014 | char virtio_bus_name[] = "virtio-bus"; |
2015 | ||
fb17dfe0 | 2016 | qbus_create_inplace(bus, bus_size, TYPE_VIRTIO_PCI_BUS, qdev, |
f4dd69aa | 2017 | virtio_bus_name); |
0a2acf5e FK |
2018 | } |
2019 | ||
2020 | static void virtio_pci_bus_class_init(ObjectClass *klass, void *data) | |
2021 | { | |
2022 | BusClass *bus_class = BUS_CLASS(klass); | |
2023 | VirtioBusClass *k = VIRTIO_BUS_CLASS(klass); | |
2024 | bus_class->max_dev = 1; | |
2025 | k->notify = virtio_pci_notify; | |
2026 | k->save_config = virtio_pci_save_config; | |
2027 | k->load_config = virtio_pci_load_config; | |
2028 | k->save_queue = virtio_pci_save_queue; | |
2029 | k->load_queue = virtio_pci_load_queue; | |
a6df8adf JW |
2030 | k->save_extra_state = virtio_pci_save_extra_state; |
2031 | k->load_extra_state = virtio_pci_load_extra_state; | |
2032 | k->has_extra_state = virtio_pci_has_extra_state; | |
0a2acf5e | 2033 | k->query_guest_notifiers = virtio_pci_query_guest_notifiers; |
0a2acf5e | 2034 | k->set_guest_notifiers = virtio_pci_set_guest_notifiers; |
6f80e617 | 2035 | k->set_host_notifier_mr = virtio_pci_set_host_notifier_mr; |
0a2acf5e | 2036 | k->vmstate_change = virtio_pci_vmstate_change; |
d1b4259f | 2037 | k->pre_plugged = virtio_pci_pre_plugged; |
085bccb7 | 2038 | k->device_plugged = virtio_pci_device_plugged; |
06a13073 | 2039 | k->device_unplugged = virtio_pci_device_unplugged; |
e0d686bf | 2040 | k->query_nvectors = virtio_pci_query_nvectors; |
8e93cef1 | 2041 | k->ioeventfd_enabled = virtio_pci_ioeventfd_enabled; |
9f06e71a | 2042 | k->ioeventfd_assign = virtio_pci_ioeventfd_assign; |
8607f5c3 | 2043 | k->get_dma_as = virtio_pci_get_dma_as; |
0a2acf5e FK |
2044 | } |
2045 | ||
2046 | static const TypeInfo virtio_pci_bus_info = { | |
2047 | .name = TYPE_VIRTIO_PCI_BUS, | |
2048 | .parent = TYPE_VIRTIO_BUS, | |
2049 | .instance_size = sizeof(VirtioPCIBusState), | |
2050 | .class_init = virtio_pci_bus_class_init, | |
2051 | }; | |
2052 | ||
83f7d43a | 2053 | static void virtio_pci_register_types(void) |
53c25cea | 2054 | { |
a4ee4c8b EH |
2055 | /* Base types: */ |
2056 | type_register_static(&virtio_pci_bus_info); | |
2057 | type_register_static(&virtio_pci_info); | |
53c25cea PB |
2058 | } |
2059 | ||
83f7d43a | 2060 | type_init(virtio_pci_register_types) |
271458d7 | 2061 |