]> git.proxmox.com Git - qemu.git/blob - hw/virtio/virtio-pci.c
virtio-serial-pci: switch to the new API.
[qemu.git] / hw / virtio / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include <inttypes.h>
19
20 #include "hw/virtio/virtio.h"
21 #include "hw/virtio/virtio-blk.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "hw/virtio/virtio-serial.h"
24 #include "hw/virtio/virtio-scsi.h"
25 #include "hw/virtio/virtio-balloon.h"
26 #include "hw/pci/pci.h"
27 #include "qemu/error-report.h"
28 #include "hw/pci/msi.h"
29 #include "hw/pci/msix.h"
30 #include "hw/loader.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/blockdev.h"
33 #include "virtio-pci.h"
34 #include "qemu/range.h"
35 #include "hw/virtio/virtio-bus.h"
36
37 /* from Linux's linux/virtio_pci.h */
38
39 /* A 32-bit r/o bitmask of the features supported by the host */
40 #define VIRTIO_PCI_HOST_FEATURES 0
41
42 /* A 32-bit r/w bitmask of features activated by the guest */
43 #define VIRTIO_PCI_GUEST_FEATURES 4
44
45 /* A 32-bit r/w PFN for the currently selected queue */
46 #define VIRTIO_PCI_QUEUE_PFN 8
47
48 /* A 16-bit r/o queue size for the currently selected queue */
49 #define VIRTIO_PCI_QUEUE_NUM 12
50
51 /* A 16-bit r/w queue selector */
52 #define VIRTIO_PCI_QUEUE_SEL 14
53
54 /* A 16-bit r/w queue notifier */
55 #define VIRTIO_PCI_QUEUE_NOTIFY 16
56
57 /* An 8-bit device status register. */
58 #define VIRTIO_PCI_STATUS 18
59
60 /* An 8-bit r/o interrupt status register. Reading the value will return the
61 * current contents of the ISR and will also clear it. This is effectively
62 * a read-and-acknowledge. */
63 #define VIRTIO_PCI_ISR 19
64
65 /* MSI-X registers: only enabled if MSI-X is enabled. */
66 /* A 16-bit vector for configuration changes. */
67 #define VIRTIO_MSI_CONFIG_VECTOR 20
68 /* A 16-bit vector for selected queue notifications. */
69 #define VIRTIO_MSI_QUEUE_VECTOR 22
70
71 /* Config space size */
72 #define VIRTIO_PCI_CONFIG_NOMSI 20
73 #define VIRTIO_PCI_CONFIG_MSI 24
74 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
75 VIRTIO_PCI_CONFIG_MSI : \
76 VIRTIO_PCI_CONFIG_NOMSI)
77
78 /* The remaining space is defined by each driver as the per-driver
79 * configuration space */
80 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
81 VIRTIO_PCI_CONFIG_MSI : \
82 VIRTIO_PCI_CONFIG_NOMSI)
83
84 /* How many bits to shift physical queue address written to QUEUE_PFN.
85 * 12 is historical, and due to x86 page size. */
86 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
87
88 /* Flags track per-device state like workarounds for quirks in older guests. */
89 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
90
91 /* QEMU doesn't strictly need write barriers since everything runs in
92 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
93 * KVM or if kqemu gets SMP support.
94 */
95 #define wmb() do { } while (0)
96
97 /* HACK for virtio to determine if it's running a big endian guest */
98 bool virtio_is_big_endian(void);
99
100 /* virtio device */
101 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
102 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
103 {
104 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
105 }
106
107 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
108 * be careful and test performance if you change this.
109 */
110 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
111 {
112 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
113 }
114
115 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
116 {
117 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
118 if (msix_enabled(&proxy->pci_dev))
119 msix_notify(&proxy->pci_dev, vector);
120 else
121 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
122 }
123
124 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
125 {
126 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
127 pci_device_save(&proxy->pci_dev, f);
128 msix_save(&proxy->pci_dev, f);
129 if (msix_present(&proxy->pci_dev))
130 qemu_put_be16(f, proxy->vdev->config_vector);
131 }
132
133 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
134 {
135 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
136 if (msix_present(&proxy->pci_dev))
137 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
138 }
139
140 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
141 {
142 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
143 int ret;
144 ret = pci_device_load(&proxy->pci_dev, f);
145 if (ret) {
146 return ret;
147 }
148 msix_unuse_all_vectors(&proxy->pci_dev);
149 msix_load(&proxy->pci_dev, f);
150 if (msix_present(&proxy->pci_dev)) {
151 qemu_get_be16s(f, &proxy->vdev->config_vector);
152 } else {
153 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
154 }
155 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
156 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
157 }
158 return 0;
159 }
160
161 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
162 {
163 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
164 uint16_t vector;
165 if (msix_present(&proxy->pci_dev)) {
166 qemu_get_be16s(f, &vector);
167 } else {
168 vector = VIRTIO_NO_VECTOR;
169 }
170 virtio_queue_set_vector(proxy->vdev, n, vector);
171 if (vector != VIRTIO_NO_VECTOR) {
172 return msix_vector_use(&proxy->pci_dev, vector);
173 }
174 return 0;
175 }
176
177 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
178 int n, bool assign, bool set_handler)
179 {
180 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
181 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
182 int r = 0;
183
184 if (assign) {
185 r = event_notifier_init(notifier, 1);
186 if (r < 0) {
187 error_report("%s: unable to init event notifier: %d",
188 __func__, r);
189 return r;
190 }
191 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
192 memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
193 true, n, notifier);
194 } else {
195 memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
196 true, n, notifier);
197 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
198 event_notifier_cleanup(notifier);
199 }
200 return r;
201 }
202
203 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
204 {
205 int n, r;
206
207 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
208 proxy->ioeventfd_disabled ||
209 proxy->ioeventfd_started) {
210 return;
211 }
212
213 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
214 if (!virtio_queue_get_num(proxy->vdev, n)) {
215 continue;
216 }
217
218 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
219 if (r < 0) {
220 goto assign_error;
221 }
222 }
223 proxy->ioeventfd_started = true;
224 return;
225
226 assign_error:
227 while (--n >= 0) {
228 if (!virtio_queue_get_num(proxy->vdev, n)) {
229 continue;
230 }
231
232 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
233 assert(r >= 0);
234 }
235 proxy->ioeventfd_started = false;
236 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
237 }
238
239 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
240 {
241 int r;
242 int n;
243
244 if (!proxy->ioeventfd_started) {
245 return;
246 }
247
248 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
249 if (!virtio_queue_get_num(proxy->vdev, n)) {
250 continue;
251 }
252
253 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
254 assert(r >= 0);
255 }
256 proxy->ioeventfd_started = false;
257 }
258
259 static void virtio_pci_reset(DeviceState *d)
260 {
261 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
262 virtio_pci_stop_ioeventfd(proxy);
263 virtio_reset(proxy->vdev);
264 msix_unuse_all_vectors(&proxy->pci_dev);
265 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
266 }
267
268 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
269 {
270 VirtIOPCIProxy *proxy = opaque;
271 VirtIODevice *vdev = proxy->vdev;
272 hwaddr pa;
273
274 switch (addr) {
275 case VIRTIO_PCI_GUEST_FEATURES:
276 /* Guest does not negotiate properly? We have to assume nothing. */
277 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
278 val = vdev->bad_features ? vdev->bad_features(vdev) : 0;
279 }
280 virtio_set_features(vdev, val);
281 break;
282 case VIRTIO_PCI_QUEUE_PFN:
283 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
284 if (pa == 0) {
285 virtio_pci_stop_ioeventfd(proxy);
286 virtio_reset(proxy->vdev);
287 msix_unuse_all_vectors(&proxy->pci_dev);
288 }
289 else
290 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
291 break;
292 case VIRTIO_PCI_QUEUE_SEL:
293 if (val < VIRTIO_PCI_QUEUE_MAX)
294 vdev->queue_sel = val;
295 break;
296 case VIRTIO_PCI_QUEUE_NOTIFY:
297 if (val < VIRTIO_PCI_QUEUE_MAX) {
298 virtio_queue_notify(vdev, val);
299 }
300 break;
301 case VIRTIO_PCI_STATUS:
302 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
303 virtio_pci_stop_ioeventfd(proxy);
304 }
305
306 virtio_set_status(vdev, val & 0xFF);
307
308 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
309 virtio_pci_start_ioeventfd(proxy);
310 }
311
312 if (vdev->status == 0) {
313 virtio_reset(proxy->vdev);
314 msix_unuse_all_vectors(&proxy->pci_dev);
315 }
316
317 /* Linux before 2.6.34 sets the device as OK without enabling
318 the PCI device bus master bit. In this case we need to disable
319 some safety checks. */
320 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
321 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
322 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
323 }
324 break;
325 case VIRTIO_MSI_CONFIG_VECTOR:
326 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
327 /* Make it possible for guest to discover an error took place. */
328 if (msix_vector_use(&proxy->pci_dev, val) < 0)
329 val = VIRTIO_NO_VECTOR;
330 vdev->config_vector = val;
331 break;
332 case VIRTIO_MSI_QUEUE_VECTOR:
333 msix_vector_unuse(&proxy->pci_dev,
334 virtio_queue_vector(vdev, vdev->queue_sel));
335 /* Make it possible for guest to discover an error took place. */
336 if (msix_vector_use(&proxy->pci_dev, val) < 0)
337 val = VIRTIO_NO_VECTOR;
338 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
339 break;
340 default:
341 error_report("%s: unexpected address 0x%x value 0x%x",
342 __func__, addr, val);
343 break;
344 }
345 }
346
347 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
348 {
349 VirtIODevice *vdev = proxy->vdev;
350 uint32_t ret = 0xFFFFFFFF;
351
352 switch (addr) {
353 case VIRTIO_PCI_HOST_FEATURES:
354 ret = proxy->host_features;
355 break;
356 case VIRTIO_PCI_GUEST_FEATURES:
357 ret = vdev->guest_features;
358 break;
359 case VIRTIO_PCI_QUEUE_PFN:
360 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
361 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
362 break;
363 case VIRTIO_PCI_QUEUE_NUM:
364 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
365 break;
366 case VIRTIO_PCI_QUEUE_SEL:
367 ret = vdev->queue_sel;
368 break;
369 case VIRTIO_PCI_STATUS:
370 ret = vdev->status;
371 break;
372 case VIRTIO_PCI_ISR:
373 /* reading from the ISR also clears it. */
374 ret = vdev->isr;
375 vdev->isr = 0;
376 qemu_set_irq(proxy->pci_dev.irq[0], 0);
377 break;
378 case VIRTIO_MSI_CONFIG_VECTOR:
379 ret = vdev->config_vector;
380 break;
381 case VIRTIO_MSI_QUEUE_VECTOR:
382 ret = virtio_queue_vector(vdev, vdev->queue_sel);
383 break;
384 default:
385 break;
386 }
387
388 return ret;
389 }
390
391 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
392 unsigned size)
393 {
394 VirtIOPCIProxy *proxy = opaque;
395 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
396 uint64_t val = 0;
397 if (addr < config) {
398 return virtio_ioport_read(proxy, addr);
399 }
400 addr -= config;
401
402 switch (size) {
403 case 1:
404 val = virtio_config_readb(proxy->vdev, addr);
405 break;
406 case 2:
407 val = virtio_config_readw(proxy->vdev, addr);
408 if (virtio_is_big_endian()) {
409 val = bswap16(val);
410 }
411 break;
412 case 4:
413 val = virtio_config_readl(proxy->vdev, addr);
414 if (virtio_is_big_endian()) {
415 val = bswap32(val);
416 }
417 break;
418 }
419 return val;
420 }
421
422 static void virtio_pci_config_write(void *opaque, hwaddr addr,
423 uint64_t val, unsigned size)
424 {
425 VirtIOPCIProxy *proxy = opaque;
426 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
427 if (addr < config) {
428 virtio_ioport_write(proxy, addr, val);
429 return;
430 }
431 addr -= config;
432 /*
433 * Virtio-PCI is odd. Ioports are LE but config space is target native
434 * endian.
435 */
436 switch (size) {
437 case 1:
438 virtio_config_writeb(proxy->vdev, addr, val);
439 break;
440 case 2:
441 if (virtio_is_big_endian()) {
442 val = bswap16(val);
443 }
444 virtio_config_writew(proxy->vdev, addr, val);
445 break;
446 case 4:
447 if (virtio_is_big_endian()) {
448 val = bswap32(val);
449 }
450 virtio_config_writel(proxy->vdev, addr, val);
451 break;
452 }
453 }
454
455 static const MemoryRegionOps virtio_pci_config_ops = {
456 .read = virtio_pci_config_read,
457 .write = virtio_pci_config_write,
458 .impl = {
459 .min_access_size = 1,
460 .max_access_size = 4,
461 },
462 .endianness = DEVICE_LITTLE_ENDIAN,
463 };
464
465 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
466 uint32_t val, int len)
467 {
468 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
469
470 pci_default_write_config(pci_dev, address, val, len);
471
472 if (range_covers_byte(address, len, PCI_COMMAND) &&
473 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
474 !(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
475 virtio_pci_stop_ioeventfd(proxy);
476 virtio_set_status(proxy->vdev,
477 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
478 }
479 }
480
481 static unsigned virtio_pci_get_features(DeviceState *d)
482 {
483 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
484 return proxy->host_features;
485 }
486
487 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
488 unsigned int queue_no,
489 unsigned int vector,
490 MSIMessage msg)
491 {
492 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
493 int ret;
494
495 if (irqfd->users == 0) {
496 ret = kvm_irqchip_add_msi_route(kvm_state, msg);
497 if (ret < 0) {
498 return ret;
499 }
500 irqfd->virq = ret;
501 }
502 irqfd->users++;
503 return 0;
504 }
505
506 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
507 unsigned int vector)
508 {
509 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
510 if (--irqfd->users == 0) {
511 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
512 }
513 }
514
515 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
516 unsigned int queue_no,
517 unsigned int vector)
518 {
519 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
520 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
521 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
522 int ret;
523 ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
524 return ret;
525 }
526
527 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
528 unsigned int queue_no,
529 unsigned int vector)
530 {
531 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
532 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
533 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
534 int ret;
535
536 ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
537 assert(ret == 0);
538 }
539
540 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
541 {
542 PCIDevice *dev = &proxy->pci_dev;
543 VirtIODevice *vdev = proxy->vdev;
544 unsigned int vector;
545 int ret, queue_no;
546 MSIMessage msg;
547
548 for (queue_no = 0; queue_no < nvqs; queue_no++) {
549 if (!virtio_queue_get_num(vdev, queue_no)) {
550 break;
551 }
552 vector = virtio_queue_vector(vdev, queue_no);
553 if (vector >= msix_nr_vectors_allocated(dev)) {
554 continue;
555 }
556 msg = msix_get_message(dev, vector);
557 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
558 if (ret < 0) {
559 goto undo;
560 }
561 /* If guest supports masking, set up irqfd now.
562 * Otherwise, delay until unmasked in the frontend.
563 */
564 if (proxy->vdev->guest_notifier_mask) {
565 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
566 if (ret < 0) {
567 kvm_virtio_pci_vq_vector_release(proxy, vector);
568 goto undo;
569 }
570 }
571 }
572 return 0;
573
574 undo:
575 while (--queue_no >= 0) {
576 vector = virtio_queue_vector(vdev, queue_no);
577 if (vector >= msix_nr_vectors_allocated(dev)) {
578 continue;
579 }
580 if (proxy->vdev->guest_notifier_mask) {
581 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
582 }
583 kvm_virtio_pci_vq_vector_release(proxy, vector);
584 }
585 return ret;
586 }
587
588 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
589 {
590 PCIDevice *dev = &proxy->pci_dev;
591 VirtIODevice *vdev = proxy->vdev;
592 unsigned int vector;
593 int queue_no;
594
595 for (queue_no = 0; queue_no < nvqs; queue_no++) {
596 if (!virtio_queue_get_num(vdev, queue_no)) {
597 break;
598 }
599 vector = virtio_queue_vector(vdev, queue_no);
600 if (vector >= msix_nr_vectors_allocated(dev)) {
601 continue;
602 }
603 /* If guest supports masking, clean up irqfd now.
604 * Otherwise, it was cleaned when masked in the frontend.
605 */
606 if (proxy->vdev->guest_notifier_mask) {
607 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
608 }
609 kvm_virtio_pci_vq_vector_release(proxy, vector);
610 }
611 }
612
613 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
614 unsigned int queue_no,
615 unsigned int vector,
616 MSIMessage msg)
617 {
618 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
619 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
620 VirtIOIRQFD *irqfd;
621 int ret = 0;
622
623 if (proxy->vector_irqfd) {
624 irqfd = &proxy->vector_irqfd[vector];
625 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
626 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg);
627 if (ret < 0) {
628 return ret;
629 }
630 }
631 }
632
633 /* If guest supports masking, irqfd is already setup, unmask it.
634 * Otherwise, set it up now.
635 */
636 if (proxy->vdev->guest_notifier_mask) {
637 proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, false);
638 /* Test after unmasking to avoid losing events. */
639 if (proxy->vdev->guest_notifier_pending &&
640 proxy->vdev->guest_notifier_pending(proxy->vdev, queue_no)) {
641 event_notifier_set(n);
642 }
643 } else {
644 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
645 }
646 return ret;
647 }
648
649 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
650 unsigned int queue_no,
651 unsigned int vector)
652 {
653 /* If guest supports masking, keep irqfd but mask it.
654 * Otherwise, clean it up now.
655 */
656 if (proxy->vdev->guest_notifier_mask) {
657 proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, true);
658 } else {
659 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
660 }
661 }
662
663 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
664 MSIMessage msg)
665 {
666 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
667 VirtIODevice *vdev = proxy->vdev;
668 int ret, queue_no;
669
670 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
671 if (!virtio_queue_get_num(vdev, queue_no)) {
672 break;
673 }
674 if (virtio_queue_vector(vdev, queue_no) != vector) {
675 continue;
676 }
677 ret = virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
678 if (ret < 0) {
679 goto undo;
680 }
681 }
682 return 0;
683
684 undo:
685 while (--queue_no >= 0) {
686 if (virtio_queue_vector(vdev, queue_no) != vector) {
687 continue;
688 }
689 virtio_pci_vq_vector_mask(proxy, queue_no, vector);
690 }
691 return ret;
692 }
693
694 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
695 {
696 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
697 VirtIODevice *vdev = proxy->vdev;
698 int queue_no;
699
700 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
701 if (!virtio_queue_get_num(vdev, queue_no)) {
702 break;
703 }
704 if (virtio_queue_vector(vdev, queue_no) != vector) {
705 continue;
706 }
707 virtio_pci_vq_vector_mask(proxy, queue_no, vector);
708 }
709 }
710
711 static void virtio_pci_vector_poll(PCIDevice *dev,
712 unsigned int vector_start,
713 unsigned int vector_end)
714 {
715 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
716 VirtIODevice *vdev = proxy->vdev;
717 int queue_no;
718 unsigned int vector;
719 EventNotifier *notifier;
720 VirtQueue *vq;
721
722 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
723 if (!virtio_queue_get_num(vdev, queue_no)) {
724 break;
725 }
726 vector = virtio_queue_vector(vdev, queue_no);
727 if (vector < vector_start || vector >= vector_end ||
728 !msix_is_masked(dev, vector)) {
729 continue;
730 }
731 vq = virtio_get_queue(vdev, queue_no);
732 notifier = virtio_queue_get_guest_notifier(vq);
733 if (vdev->guest_notifier_pending) {
734 if (vdev->guest_notifier_pending(vdev, queue_no)) {
735 msix_set_pending(dev, vector);
736 }
737 } else if (event_notifier_test_and_clear(notifier)) {
738 msix_set_pending(dev, vector);
739 }
740 }
741 }
742
743 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
744 bool with_irqfd)
745 {
746 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
747 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
748 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
749
750 if (assign) {
751 int r = event_notifier_init(notifier, 0);
752 if (r < 0) {
753 return r;
754 }
755 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
756 } else {
757 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
758 event_notifier_cleanup(notifier);
759 }
760
761 return 0;
762 }
763
764 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
765 {
766 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
767 return msix_enabled(&proxy->pci_dev);
768 }
769
770 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
771 {
772 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
773 VirtIODevice *vdev = proxy->vdev;
774 int r, n;
775 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
776 kvm_msi_via_irqfd_enabled();
777
778 nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
779
780 /* When deassigning, pass a consistent nvqs value
781 * to avoid leaking notifiers.
782 */
783 assert(assign || nvqs == proxy->nvqs_with_notifiers);
784
785 proxy->nvqs_with_notifiers = nvqs;
786
787 /* Must unset vector notifier while guest notifier is still assigned */
788 if ((proxy->vector_irqfd || vdev->guest_notifier_mask) && !assign) {
789 msix_unset_vector_notifiers(&proxy->pci_dev);
790 if (proxy->vector_irqfd) {
791 kvm_virtio_pci_vector_release(proxy, nvqs);
792 g_free(proxy->vector_irqfd);
793 proxy->vector_irqfd = NULL;
794 }
795 }
796
797 for (n = 0; n < nvqs; n++) {
798 if (!virtio_queue_get_num(vdev, n)) {
799 break;
800 }
801
802 r = virtio_pci_set_guest_notifier(d, n, assign,
803 kvm_msi_via_irqfd_enabled());
804 if (r < 0) {
805 goto assign_error;
806 }
807 }
808
809 /* Must set vector notifier after guest notifier has been assigned */
810 if ((with_irqfd || vdev->guest_notifier_mask) && assign) {
811 if (with_irqfd) {
812 proxy->vector_irqfd =
813 g_malloc0(sizeof(*proxy->vector_irqfd) *
814 msix_nr_vectors_allocated(&proxy->pci_dev));
815 r = kvm_virtio_pci_vector_use(proxy, nvqs);
816 if (r < 0) {
817 goto assign_error;
818 }
819 }
820 r = msix_set_vector_notifiers(&proxy->pci_dev,
821 virtio_pci_vector_unmask,
822 virtio_pci_vector_mask,
823 virtio_pci_vector_poll);
824 if (r < 0) {
825 goto notifiers_error;
826 }
827 }
828
829 return 0;
830
831 notifiers_error:
832 if (with_irqfd) {
833 assert(assign);
834 kvm_virtio_pci_vector_release(proxy, nvqs);
835 }
836
837 assign_error:
838 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
839 assert(assign);
840 while (--n >= 0) {
841 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
842 }
843 return r;
844 }
845
846 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
847 {
848 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
849
850 /* Stop using ioeventfd for virtqueue kick if the device starts using host
851 * notifiers. This makes it easy to avoid stepping on each others' toes.
852 */
853 proxy->ioeventfd_disabled = assign;
854 if (assign) {
855 virtio_pci_stop_ioeventfd(proxy);
856 }
857 /* We don't need to start here: it's not needed because backend
858 * currently only stops on status change away from ok,
859 * reset, vmstop and such. If we do add code to start here,
860 * need to check vmstate, device state etc. */
861 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
862 }
863
864 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
865 {
866 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
867
868 if (running) {
869 /* Try to find out if the guest has bus master disabled, but is
870 in ready state. Then we have a buggy guest OS. */
871 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
872 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
873 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
874 }
875 virtio_pci_start_ioeventfd(proxy);
876 } else {
877 virtio_pci_stop_ioeventfd(proxy);
878 }
879 }
880
881 static const VirtIOBindings virtio_pci_bindings = {
882 .notify = virtio_pci_notify,
883 .save_config = virtio_pci_save_config,
884 .load_config = virtio_pci_load_config,
885 .save_queue = virtio_pci_save_queue,
886 .load_queue = virtio_pci_load_queue,
887 .get_features = virtio_pci_get_features,
888 .query_guest_notifiers = virtio_pci_query_guest_notifiers,
889 .set_host_notifier = virtio_pci_set_host_notifier,
890 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
891 .vmstate_change = virtio_pci_vmstate_change,
892 };
893
894 void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev)
895 {
896 uint8_t *config;
897 uint32_t size;
898
899 proxy->vdev = vdev;
900
901 config = proxy->pci_dev.config;
902
903 if (proxy->class_code) {
904 pci_config_set_class(config, proxy->class_code);
905 }
906 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
907 pci_get_word(config + PCI_VENDOR_ID));
908 pci_set_word(config + PCI_SUBSYSTEM_ID, vdev->device_id);
909 config[PCI_INTERRUPT_PIN] = 1;
910
911 if (vdev->nvectors &&
912 msix_init_exclusive_bar(&proxy->pci_dev, vdev->nvectors, 1)) {
913 vdev->nvectors = 0;
914 }
915
916 proxy->pci_dev.config_write = virtio_write_config;
917
918 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
919 if (size & (size-1))
920 size = 1 << qemu_fls(size);
921
922 memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
923 "virtio-pci", size);
924 pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
925 &proxy->bar);
926
927 if (!kvm_has_many_ioeventfds()) {
928 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
929 }
930
931 virtio_bind_device(vdev, &virtio_pci_bindings, DEVICE(proxy));
932 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
933 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
934 proxy->host_features = vdev->get_features(vdev, proxy->host_features);
935 }
936
937 static void virtio_exit_pci(PCIDevice *pci_dev)
938 {
939 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
940
941 memory_region_destroy(&proxy->bar);
942 msix_uninit_exclusive_bar(pci_dev);
943 }
944
945 static int virtio_net_init_pci(PCIDevice *pci_dev)
946 {
947 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
948 VirtIODevice *vdev;
949
950 vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net,
951 proxy->host_features);
952
953 vdev->nvectors = proxy->nvectors;
954 virtio_init_pci(proxy, vdev);
955
956 /* make the actual value visible */
957 proxy->nvectors = vdev->nvectors;
958 return 0;
959 }
960
961 static void virtio_net_exit_pci(PCIDevice *pci_dev)
962 {
963 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
964
965 virtio_pci_stop_ioeventfd(proxy);
966 virtio_net_exit(proxy->vdev);
967 virtio_exit_pci(pci_dev);
968 }
969
970 static int virtio_rng_init_pci(PCIDevice *pci_dev)
971 {
972 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
973 VirtIODevice *vdev;
974
975 if (proxy->rng.rng == NULL) {
976 proxy->rng.default_backend = RNG_RANDOM(object_new(TYPE_RNG_RANDOM));
977
978 object_property_add_child(OBJECT(pci_dev),
979 "default-backend",
980 OBJECT(proxy->rng.default_backend),
981 NULL);
982
983 object_property_set_link(OBJECT(pci_dev),
984 OBJECT(proxy->rng.default_backend),
985 "rng", NULL);
986 }
987
988 vdev = virtio_rng_init(&pci_dev->qdev, &proxy->rng);
989 if (!vdev) {
990 return -1;
991 }
992 virtio_init_pci(proxy, vdev);
993 return 0;
994 }
995
996 static void virtio_rng_exit_pci(PCIDevice *pci_dev)
997 {
998 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
999
1000 virtio_pci_stop_ioeventfd(proxy);
1001 virtio_rng_exit(proxy->vdev);
1002 virtio_exit_pci(pci_dev);
1003 }
1004
1005 static Property virtio_net_properties[] = {
1006 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
1007 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
1008 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
1009 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
1010 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy, net.txtimer, TX_TIMER_INTERVAL),
1011 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy, net.txburst, TX_BURST),
1012 DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
1013 DEFINE_PROP_END_OF_LIST(),
1014 };
1015
1016 static void virtio_net_class_init(ObjectClass *klass, void *data)
1017 {
1018 DeviceClass *dc = DEVICE_CLASS(klass);
1019 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1020
1021 k->init = virtio_net_init_pci;
1022 k->exit = virtio_net_exit_pci;
1023 k->romfile = "efi-virtio.rom";
1024 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1025 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
1026 k->revision = VIRTIO_PCI_ABI_VERSION;
1027 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1028 dc->reset = virtio_pci_reset;
1029 dc->props = virtio_net_properties;
1030 }
1031
1032 static const TypeInfo virtio_net_info = {
1033 .name = "virtio-net-pci",
1034 .parent = TYPE_PCI_DEVICE,
1035 .instance_size = sizeof(VirtIOPCIProxy),
1036 .class_init = virtio_net_class_init,
1037 };
1038
1039 static void virtio_rng_initfn(Object *obj)
1040 {
1041 PCIDevice *pci_dev = PCI_DEVICE(obj);
1042 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1043
1044 object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
1045 (Object **)&proxy->rng.rng, NULL);
1046 }
1047
1048 static Property virtio_rng_properties[] = {
1049 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1050 /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
1051 you have an entropy source capable of generating more entropy than this
1052 and you can pass it through via virtio-rng, then hats off to you. Until
1053 then, this is unlimited for all practical purposes.
1054 */
1055 DEFINE_PROP_UINT64("max-bytes", VirtIOPCIProxy, rng.max_bytes, INT64_MAX),
1056 DEFINE_PROP_UINT32("period", VirtIOPCIProxy, rng.period_ms, 1 << 16),
1057 DEFINE_PROP_END_OF_LIST(),
1058 };
1059
1060 static void virtio_rng_class_init(ObjectClass *klass, void *data)
1061 {
1062 DeviceClass *dc = DEVICE_CLASS(klass);
1063 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1064
1065 k->init = virtio_rng_init_pci;
1066 k->exit = virtio_rng_exit_pci;
1067 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1068 k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
1069 k->revision = VIRTIO_PCI_ABI_VERSION;
1070 k->class_id = PCI_CLASS_OTHERS;
1071 dc->reset = virtio_pci_reset;
1072 dc->props = virtio_rng_properties;
1073 }
1074
1075 static const TypeInfo virtio_rng_info = {
1076 .name = "virtio-rng-pci",
1077 .parent = TYPE_PCI_DEVICE,
1078 .instance_size = sizeof(VirtIOPCIProxy),
1079 .instance_init = virtio_rng_initfn,
1080 .class_init = virtio_rng_class_init,
1081 };
1082
1083 #ifdef CONFIG_VIRTFS
1084 static int virtio_9p_init_pci(PCIDevice *pci_dev)
1085 {
1086 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1087 VirtIODevice *vdev;
1088
1089 vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf);
1090 vdev->nvectors = proxy->nvectors;
1091 virtio_init_pci(proxy, vdev);
1092 /* make the actual value visible */
1093 proxy->nvectors = vdev->nvectors;
1094 return 0;
1095 }
1096
1097 static Property virtio_9p_properties[] = {
1098 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1099 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1100 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1101 DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag),
1102 DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id),
1103 DEFINE_PROP_END_OF_LIST(),
1104 };
1105
1106 static void virtio_9p_class_init(ObjectClass *klass, void *data)
1107 {
1108 DeviceClass *dc = DEVICE_CLASS(klass);
1109 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1110
1111 k->init = virtio_9p_init_pci;
1112 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1113 k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1114 k->revision = VIRTIO_PCI_ABI_VERSION;
1115 k->class_id = 0x2;
1116 dc->props = virtio_9p_properties;
1117 dc->reset = virtio_pci_reset;
1118 }
1119
1120 static const TypeInfo virtio_9p_info = {
1121 .name = "virtio-9p-pci",
1122 .parent = TYPE_PCI_DEVICE,
1123 .instance_size = sizeof(VirtIOPCIProxy),
1124 .class_init = virtio_9p_class_init,
1125 };
1126 #endif
1127
1128 /*
1129 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1130 */
1131
1132 /* This is called by virtio-bus just after the device is plugged. */
1133 static void virtio_pci_device_plugged(DeviceState *d)
1134 {
1135 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1136 VirtioBusState *bus = &proxy->bus;
1137 uint8_t *config;
1138 uint32_t size;
1139
1140 proxy->vdev = bus->vdev;
1141
1142 config = proxy->pci_dev.config;
1143 if (proxy->class_code) {
1144 pci_config_set_class(config, proxy->class_code);
1145 }
1146 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
1147 pci_get_word(config + PCI_VENDOR_ID));
1148 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1149 config[PCI_INTERRUPT_PIN] = 1;
1150
1151 if (proxy->nvectors &&
1152 msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
1153 proxy->nvectors = 0;
1154 }
1155
1156 proxy->pci_dev.config_write = virtio_write_config;
1157
1158 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1159 + virtio_bus_get_vdev_config_len(bus);
1160 if (size & (size - 1)) {
1161 size = 1 << qemu_fls(size);
1162 }
1163
1164 memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
1165 "virtio-pci", size);
1166 pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
1167 &proxy->bar);
1168
1169 if (!kvm_has_many_ioeventfds()) {
1170 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1171 }
1172
1173 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
1174 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
1175 proxy->host_features = virtio_bus_get_vdev_features(bus,
1176 proxy->host_features);
1177 }
1178
1179 static int virtio_pci_init(PCIDevice *pci_dev)
1180 {
1181 VirtIOPCIProxy *dev = VIRTIO_PCI(pci_dev);
1182 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1183 virtio_pci_bus_new(&dev->bus, dev);
1184 if (k->init != NULL) {
1185 return k->init(dev);
1186 }
1187 return 0;
1188 }
1189
1190 static void virtio_pci_exit(PCIDevice *pci_dev)
1191 {
1192 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1193 virtio_pci_stop_ioeventfd(proxy);
1194 virtio_exit_pci(pci_dev);
1195 }
1196
1197 /*
1198 * This will be renamed virtio_pci_reset at the end of the series.
1199 * virtio_pci_reset is still in use at this moment.
1200 */
1201 static void virtio_pci_rst(DeviceState *qdev)
1202 {
1203 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1204 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1205 virtio_pci_stop_ioeventfd(proxy);
1206 virtio_bus_reset(bus);
1207 msix_unuse_all_vectors(&proxy->pci_dev);
1208 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
1209 }
1210
1211 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1212 {
1213 DeviceClass *dc = DEVICE_CLASS(klass);
1214 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1215
1216 k->init = virtio_pci_init;
1217 k->exit = virtio_pci_exit;
1218 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1219 k->revision = VIRTIO_PCI_ABI_VERSION;
1220 k->class_id = PCI_CLASS_OTHERS;
1221 dc->reset = virtio_pci_rst;
1222 }
1223
1224 static const TypeInfo virtio_pci_info = {
1225 .name = TYPE_VIRTIO_PCI,
1226 .parent = TYPE_PCI_DEVICE,
1227 .instance_size = sizeof(VirtIOPCIProxy),
1228 .class_init = virtio_pci_class_init,
1229 .class_size = sizeof(VirtioPCIClass),
1230 .abstract = true,
1231 };
1232
1233 /* virtio-blk-pci */
1234
1235 static Property virtio_blk_pci_properties[] = {
1236 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1237 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1238 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1239 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1240 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
1241 DEFINE_PROP_BIT("x-data-plane", VirtIOBlkPCI, blk.data_plane, 0, false),
1242 #endif
1243 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
1244 DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlkPCI, blk),
1245 DEFINE_PROP_END_OF_LIST(),
1246 };
1247
1248 static int virtio_blk_pci_init(VirtIOPCIProxy *vpci_dev)
1249 {
1250 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1251 DeviceState *vdev = DEVICE(&dev->vdev);
1252 virtio_blk_set_conf(vdev, &(dev->blk));
1253 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1254 if (qdev_init(vdev) < 0) {
1255 return -1;
1256 }
1257 return 0;
1258 }
1259
1260 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1261 {
1262 DeviceClass *dc = DEVICE_CLASS(klass);
1263 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1264 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1265
1266 dc->props = virtio_blk_pci_properties;
1267 k->init = virtio_blk_pci_init;
1268 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1269 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1270 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1271 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1272 }
1273
1274 static void virtio_blk_pci_instance_init(Object *obj)
1275 {
1276 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1277 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BLK);
1278 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1279 }
1280
1281 static const TypeInfo virtio_blk_pci_info = {
1282 .name = TYPE_VIRTIO_BLK_PCI,
1283 .parent = TYPE_VIRTIO_PCI,
1284 .instance_size = sizeof(VirtIOBlkPCI),
1285 .instance_init = virtio_blk_pci_instance_init,
1286 .class_init = virtio_blk_pci_class_init,
1287 };
1288
1289 /* virtio-scsi-pci */
1290
1291 static Property virtio_scsi_pci_properties[] = {
1292 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1293 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1294 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1295 DEV_NVECTORS_UNSPECIFIED),
1296 DEFINE_VIRTIO_SCSI_FEATURES(VirtIOPCIProxy, host_features),
1297 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOSCSIPCI, vdev.conf),
1298 DEFINE_PROP_END_OF_LIST(),
1299 };
1300
1301 static int virtio_scsi_pci_init_pci(VirtIOPCIProxy *vpci_dev)
1302 {
1303 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
1304 DeviceState *vdev = DEVICE(&dev->vdev);
1305
1306 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1307 vpci_dev->nvectors = dev->vdev.conf.num_queues + 3;
1308 }
1309
1310 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1311 if (qdev_init(vdev) < 0) {
1312 return -1;
1313 }
1314 return 0;
1315 }
1316
1317 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
1318 {
1319 DeviceClass *dc = DEVICE_CLASS(klass);
1320 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1321 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1322 k->init = virtio_scsi_pci_init_pci;
1323 dc->props = virtio_scsi_pci_properties;
1324 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1325 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
1326 pcidev_k->revision = 0x00;
1327 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1328 }
1329
1330 static void virtio_scsi_pci_instance_init(Object *obj)
1331 {
1332 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
1333 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SCSI);
1334 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1335 }
1336
1337 static const TypeInfo virtio_scsi_pci_info = {
1338 .name = TYPE_VIRTIO_SCSI_PCI,
1339 .parent = TYPE_VIRTIO_PCI,
1340 .instance_size = sizeof(VirtIOSCSIPCI),
1341 .instance_init = virtio_scsi_pci_instance_init,
1342 .class_init = virtio_scsi_pci_class_init,
1343 };
1344
1345 /* virtio-balloon-pci */
1346
1347 static Property virtio_balloon_pci_properties[] = {
1348 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1349 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1350 DEFINE_PROP_END_OF_LIST(),
1351 };
1352
1353 static int virtio_balloon_pci_init(VirtIOPCIProxy *vpci_dev)
1354 {
1355 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
1356 DeviceState *vdev = DEVICE(&dev->vdev);
1357
1358 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
1359 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
1360 vpci_dev->class_code = PCI_CLASS_OTHERS;
1361 }
1362
1363 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1364 if (qdev_init(vdev) < 0) {
1365 return -1;
1366 }
1367 return 0;
1368 }
1369
1370 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
1371 {
1372 DeviceClass *dc = DEVICE_CLASS(klass);
1373 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1374 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1375 k->init = virtio_balloon_pci_init;
1376 dc->props = virtio_balloon_pci_properties;
1377 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1378 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
1379 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1380 pcidev_k->class_id = PCI_CLASS_OTHERS;
1381 }
1382
1383 static void virtio_balloon_pci_instance_init(Object *obj)
1384 {
1385 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
1386 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BALLOON);
1387 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1388 }
1389
1390 static const TypeInfo virtio_balloon_pci_info = {
1391 .name = TYPE_VIRTIO_BALLOON_PCI,
1392 .parent = TYPE_VIRTIO_PCI,
1393 .instance_size = sizeof(VirtIOBalloonPCI),
1394 .instance_init = virtio_balloon_pci_instance_init,
1395 .class_init = virtio_balloon_pci_class_init,
1396 };
1397
1398 /* virtio-serial-pci */
1399
1400 static int virtio_serial_pci_init(VirtIOPCIProxy *vpci_dev)
1401 {
1402 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(vpci_dev);
1403 DeviceState *vdev = DEVICE(&dev->vdev);
1404
1405 if (vpci_dev->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
1406 vpci_dev->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
1407 vpci_dev->class_code != PCI_CLASS_OTHERS) { /* qemu-kvm */
1408 vpci_dev->class_code = PCI_CLASS_COMMUNICATION_OTHER;
1409 }
1410
1411 /* backwards-compatibility with machines that were created with
1412 DEV_NVECTORS_UNSPECIFIED */
1413 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1414 vpci_dev->nvectors = dev->vdev.serial.max_virtserial_ports + 1;
1415 }
1416
1417 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1418 if (qdev_init(vdev) < 0) {
1419 return -1;
1420 }
1421 return 0;
1422 }
1423
1424 static Property virtio_serial_pci_properties[] = {
1425 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1426 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1427 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1428 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1429 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1430 DEFINE_VIRTIO_SERIAL_PROPERTIES(VirtIOSerialPCI, vdev.serial),
1431 DEFINE_PROP_END_OF_LIST(),
1432 };
1433
1434 static void virtio_serial_pci_class_init(ObjectClass *klass, void *data)
1435 {
1436 DeviceClass *dc = DEVICE_CLASS(klass);
1437 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1438 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1439 k->init = virtio_serial_pci_init;
1440 dc->props = virtio_serial_pci_properties;
1441 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1442 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
1443 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1444 pcidev_k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
1445 }
1446
1447 static void virtio_serial_pci_instance_init(Object *obj)
1448 {
1449 VirtIOSerialPCI *dev = VIRTIO_SERIAL_PCI(obj);
1450 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SERIAL);
1451 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1452 }
1453
1454 static const TypeInfo virtio_serial_pci_info = {
1455 .name = TYPE_VIRTIO_SERIAL_PCI,
1456 .parent = TYPE_VIRTIO_PCI,
1457 .instance_size = sizeof(VirtIOSerialPCI),
1458 .instance_init = virtio_serial_pci_instance_init,
1459 .class_init = virtio_serial_pci_class_init,
1460 };
1461
1462 /* virtio-pci-bus */
1463
1464 void virtio_pci_bus_new(VirtioBusState *bus, VirtIOPCIProxy *dev)
1465 {
1466 DeviceState *qdev = DEVICE(dev);
1467 BusState *qbus;
1468 qbus_create_inplace((BusState *)bus, TYPE_VIRTIO_PCI_BUS, qdev, NULL);
1469 qbus = BUS(bus);
1470 qbus->allow_hotplug = 1;
1471 }
1472
1473 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
1474 {
1475 BusClass *bus_class = BUS_CLASS(klass);
1476 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1477 bus_class->max_dev = 1;
1478 k->notify = virtio_pci_notify;
1479 k->save_config = virtio_pci_save_config;
1480 k->load_config = virtio_pci_load_config;
1481 k->save_queue = virtio_pci_save_queue;
1482 k->load_queue = virtio_pci_load_queue;
1483 k->get_features = virtio_pci_get_features;
1484 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
1485 k->set_host_notifier = virtio_pci_set_host_notifier;
1486 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
1487 k->vmstate_change = virtio_pci_vmstate_change;
1488 k->device_plugged = virtio_pci_device_plugged;
1489 }
1490
1491 static const TypeInfo virtio_pci_bus_info = {
1492 .name = TYPE_VIRTIO_PCI_BUS,
1493 .parent = TYPE_VIRTIO_BUS,
1494 .instance_size = sizeof(VirtioPCIBusState),
1495 .class_init = virtio_pci_bus_class_init,
1496 };
1497
1498 static void virtio_pci_register_types(void)
1499 {
1500 type_register_static(&virtio_net_info);
1501 type_register_static(&virtio_rng_info);
1502 type_register_static(&virtio_pci_bus_info);
1503 type_register_static(&virtio_pci_info);
1504 #ifdef CONFIG_VIRTFS
1505 type_register_static(&virtio_9p_info);
1506 #endif
1507 type_register_static(&virtio_blk_pci_info);
1508 type_register_static(&virtio_scsi_pci_info);
1509 type_register_static(&virtio_balloon_pci_info);
1510 type_register_static(&virtio_serial_pci_info);
1511 }
1512
1513 type_init(virtio_pci_register_types)