]> git.proxmox.com Git - qemu.git/blob - hw/virtio/virtio-pci.c
Merge branch 'tcg-s390' of git://github.com/rth7680/qemu
[qemu.git] / hw / virtio / virtio-pci.c
1 /*
2 * Virtio PCI Bindings
3 *
4 * Copyright IBM, Corp. 2007
5 * Copyright (c) 2009 CodeSourcery
6 *
7 * Authors:
8 * Anthony Liguori <aliguori@us.ibm.com>
9 * Paul Brook <paul@codesourcery.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2. See
12 * the COPYING file in the top-level directory.
13 *
14 * Contributions after 2012-01-13 are licensed under the terms of the
15 * GNU GPL, version 2 or (at your option) any later version.
16 */
17
18 #include <inttypes.h>
19
20 #include "hw/virtio/virtio.h"
21 #include "hw/virtio/virtio-blk.h"
22 #include "hw/virtio/virtio-net.h"
23 #include "hw/virtio/virtio-serial.h"
24 #include "hw/virtio/virtio-scsi.h"
25 #include "hw/virtio/virtio-balloon.h"
26 #include "hw/pci/pci.h"
27 #include "qemu/error-report.h"
28 #include "hw/pci/msi.h"
29 #include "hw/pci/msix.h"
30 #include "hw/loader.h"
31 #include "sysemu/kvm.h"
32 #include "sysemu/blockdev.h"
33 #include "virtio-pci.h"
34 #include "qemu/range.h"
35 #include "hw/virtio/virtio-bus.h"
36
37 /* from Linux's linux/virtio_pci.h */
38
39 /* A 32-bit r/o bitmask of the features supported by the host */
40 #define VIRTIO_PCI_HOST_FEATURES 0
41
42 /* A 32-bit r/w bitmask of features activated by the guest */
43 #define VIRTIO_PCI_GUEST_FEATURES 4
44
45 /* A 32-bit r/w PFN for the currently selected queue */
46 #define VIRTIO_PCI_QUEUE_PFN 8
47
48 /* A 16-bit r/o queue size for the currently selected queue */
49 #define VIRTIO_PCI_QUEUE_NUM 12
50
51 /* A 16-bit r/w queue selector */
52 #define VIRTIO_PCI_QUEUE_SEL 14
53
54 /* A 16-bit r/w queue notifier */
55 #define VIRTIO_PCI_QUEUE_NOTIFY 16
56
57 /* An 8-bit device status register. */
58 #define VIRTIO_PCI_STATUS 18
59
60 /* An 8-bit r/o interrupt status register. Reading the value will return the
61 * current contents of the ISR and will also clear it. This is effectively
62 * a read-and-acknowledge. */
63 #define VIRTIO_PCI_ISR 19
64
65 /* MSI-X registers: only enabled if MSI-X is enabled. */
66 /* A 16-bit vector for configuration changes. */
67 #define VIRTIO_MSI_CONFIG_VECTOR 20
68 /* A 16-bit vector for selected queue notifications. */
69 #define VIRTIO_MSI_QUEUE_VECTOR 22
70
71 /* Config space size */
72 #define VIRTIO_PCI_CONFIG_NOMSI 20
73 #define VIRTIO_PCI_CONFIG_MSI 24
74 #define VIRTIO_PCI_REGION_SIZE(dev) (msix_present(dev) ? \
75 VIRTIO_PCI_CONFIG_MSI : \
76 VIRTIO_PCI_CONFIG_NOMSI)
77
78 /* The remaining space is defined by each driver as the per-driver
79 * configuration space */
80 #define VIRTIO_PCI_CONFIG(dev) (msix_enabled(dev) ? \
81 VIRTIO_PCI_CONFIG_MSI : \
82 VIRTIO_PCI_CONFIG_NOMSI)
83
84 /* How many bits to shift physical queue address written to QUEUE_PFN.
85 * 12 is historical, and due to x86 page size. */
86 #define VIRTIO_PCI_QUEUE_ADDR_SHIFT 12
87
88 /* Flags track per-device state like workarounds for quirks in older guests. */
89 #define VIRTIO_PCI_FLAG_BUS_MASTER_BUG (1 << 0)
90
91 /* QEMU doesn't strictly need write barriers since everything runs in
92 * lock-step. We'll leave the calls to wmb() in though to make it obvious for
93 * KVM or if kqemu gets SMP support.
94 */
95 #define wmb() do { } while (0)
96
97 /* HACK for virtio to determine if it's running a big endian guest */
98 bool virtio_is_big_endian(void);
99
100 /* virtio device */
101 /* DeviceState to VirtIOPCIProxy. For use off data-path. TODO: use QOM. */
102 static inline VirtIOPCIProxy *to_virtio_pci_proxy(DeviceState *d)
103 {
104 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
105 }
106
107 /* DeviceState to VirtIOPCIProxy. Note: used on datapath,
108 * be careful and test performance if you change this.
109 */
110 static inline VirtIOPCIProxy *to_virtio_pci_proxy_fast(DeviceState *d)
111 {
112 return container_of(d, VirtIOPCIProxy, pci_dev.qdev);
113 }
114
115 static void virtio_pci_notify(DeviceState *d, uint16_t vector)
116 {
117 VirtIOPCIProxy *proxy = to_virtio_pci_proxy_fast(d);
118 if (msix_enabled(&proxy->pci_dev))
119 msix_notify(&proxy->pci_dev, vector);
120 else
121 qemu_set_irq(proxy->pci_dev.irq[0], proxy->vdev->isr & 1);
122 }
123
124 static void virtio_pci_save_config(DeviceState *d, QEMUFile *f)
125 {
126 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
127 pci_device_save(&proxy->pci_dev, f);
128 msix_save(&proxy->pci_dev, f);
129 if (msix_present(&proxy->pci_dev))
130 qemu_put_be16(f, proxy->vdev->config_vector);
131 }
132
133 static void virtio_pci_save_queue(DeviceState *d, int n, QEMUFile *f)
134 {
135 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
136 if (msix_present(&proxy->pci_dev))
137 qemu_put_be16(f, virtio_queue_vector(proxy->vdev, n));
138 }
139
140 static int virtio_pci_load_config(DeviceState *d, QEMUFile *f)
141 {
142 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
143 int ret;
144 ret = pci_device_load(&proxy->pci_dev, f);
145 if (ret) {
146 return ret;
147 }
148 msix_unuse_all_vectors(&proxy->pci_dev);
149 msix_load(&proxy->pci_dev, f);
150 if (msix_present(&proxy->pci_dev)) {
151 qemu_get_be16s(f, &proxy->vdev->config_vector);
152 } else {
153 proxy->vdev->config_vector = VIRTIO_NO_VECTOR;
154 }
155 if (proxy->vdev->config_vector != VIRTIO_NO_VECTOR) {
156 return msix_vector_use(&proxy->pci_dev, proxy->vdev->config_vector);
157 }
158 return 0;
159 }
160
161 static int virtio_pci_load_queue(DeviceState *d, int n, QEMUFile *f)
162 {
163 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
164 uint16_t vector;
165 if (msix_present(&proxy->pci_dev)) {
166 qemu_get_be16s(f, &vector);
167 } else {
168 vector = VIRTIO_NO_VECTOR;
169 }
170 virtio_queue_set_vector(proxy->vdev, n, vector);
171 if (vector != VIRTIO_NO_VECTOR) {
172 return msix_vector_use(&proxy->pci_dev, vector);
173 }
174 return 0;
175 }
176
177 static int virtio_pci_set_host_notifier_internal(VirtIOPCIProxy *proxy,
178 int n, bool assign, bool set_handler)
179 {
180 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
181 EventNotifier *notifier = virtio_queue_get_host_notifier(vq);
182 int r = 0;
183
184 if (assign) {
185 r = event_notifier_init(notifier, 1);
186 if (r < 0) {
187 error_report("%s: unable to init event notifier: %d",
188 __func__, r);
189 return r;
190 }
191 virtio_queue_set_host_notifier_fd_handler(vq, true, set_handler);
192 memory_region_add_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
193 true, n, notifier);
194 } else {
195 memory_region_del_eventfd(&proxy->bar, VIRTIO_PCI_QUEUE_NOTIFY, 2,
196 true, n, notifier);
197 virtio_queue_set_host_notifier_fd_handler(vq, false, false);
198 event_notifier_cleanup(notifier);
199 }
200 return r;
201 }
202
203 static void virtio_pci_start_ioeventfd(VirtIOPCIProxy *proxy)
204 {
205 int n, r;
206
207 if (!(proxy->flags & VIRTIO_PCI_FLAG_USE_IOEVENTFD) ||
208 proxy->ioeventfd_disabled ||
209 proxy->ioeventfd_started) {
210 return;
211 }
212
213 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
214 if (!virtio_queue_get_num(proxy->vdev, n)) {
215 continue;
216 }
217
218 r = virtio_pci_set_host_notifier_internal(proxy, n, true, true);
219 if (r < 0) {
220 goto assign_error;
221 }
222 }
223 proxy->ioeventfd_started = true;
224 return;
225
226 assign_error:
227 while (--n >= 0) {
228 if (!virtio_queue_get_num(proxy->vdev, n)) {
229 continue;
230 }
231
232 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
233 assert(r >= 0);
234 }
235 proxy->ioeventfd_started = false;
236 error_report("%s: failed. Fallback to a userspace (slower).", __func__);
237 }
238
239 static void virtio_pci_stop_ioeventfd(VirtIOPCIProxy *proxy)
240 {
241 int r;
242 int n;
243
244 if (!proxy->ioeventfd_started) {
245 return;
246 }
247
248 for (n = 0; n < VIRTIO_PCI_QUEUE_MAX; n++) {
249 if (!virtio_queue_get_num(proxy->vdev, n)) {
250 continue;
251 }
252
253 r = virtio_pci_set_host_notifier_internal(proxy, n, false, false);
254 assert(r >= 0);
255 }
256 proxy->ioeventfd_started = false;
257 }
258
259 static void virtio_pci_reset(DeviceState *d)
260 {
261 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
262 virtio_pci_stop_ioeventfd(proxy);
263 virtio_reset(proxy->vdev);
264 msix_unuse_all_vectors(&proxy->pci_dev);
265 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
266 }
267
268 static void virtio_ioport_write(void *opaque, uint32_t addr, uint32_t val)
269 {
270 VirtIOPCIProxy *proxy = opaque;
271 VirtIODevice *vdev = proxy->vdev;
272 hwaddr pa;
273
274 switch (addr) {
275 case VIRTIO_PCI_GUEST_FEATURES:
276 /* Guest does not negotiate properly? We have to assume nothing. */
277 if (val & (1 << VIRTIO_F_BAD_FEATURE)) {
278 val = vdev->bad_features ? vdev->bad_features(vdev) : 0;
279 }
280 virtio_set_features(vdev, val);
281 break;
282 case VIRTIO_PCI_QUEUE_PFN:
283 pa = (hwaddr)val << VIRTIO_PCI_QUEUE_ADDR_SHIFT;
284 if (pa == 0) {
285 virtio_pci_stop_ioeventfd(proxy);
286 virtio_reset(proxy->vdev);
287 msix_unuse_all_vectors(&proxy->pci_dev);
288 }
289 else
290 virtio_queue_set_addr(vdev, vdev->queue_sel, pa);
291 break;
292 case VIRTIO_PCI_QUEUE_SEL:
293 if (val < VIRTIO_PCI_QUEUE_MAX)
294 vdev->queue_sel = val;
295 break;
296 case VIRTIO_PCI_QUEUE_NOTIFY:
297 if (val < VIRTIO_PCI_QUEUE_MAX) {
298 virtio_queue_notify(vdev, val);
299 }
300 break;
301 case VIRTIO_PCI_STATUS:
302 if (!(val & VIRTIO_CONFIG_S_DRIVER_OK)) {
303 virtio_pci_stop_ioeventfd(proxy);
304 }
305
306 virtio_set_status(vdev, val & 0xFF);
307
308 if (val & VIRTIO_CONFIG_S_DRIVER_OK) {
309 virtio_pci_start_ioeventfd(proxy);
310 }
311
312 if (vdev->status == 0) {
313 virtio_reset(proxy->vdev);
314 msix_unuse_all_vectors(&proxy->pci_dev);
315 }
316
317 /* Linux before 2.6.34 sets the device as OK without enabling
318 the PCI device bus master bit. In this case we need to disable
319 some safety checks. */
320 if ((val & VIRTIO_CONFIG_S_DRIVER_OK) &&
321 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
322 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
323 }
324 break;
325 case VIRTIO_MSI_CONFIG_VECTOR:
326 msix_vector_unuse(&proxy->pci_dev, vdev->config_vector);
327 /* Make it possible for guest to discover an error took place. */
328 if (msix_vector_use(&proxy->pci_dev, val) < 0)
329 val = VIRTIO_NO_VECTOR;
330 vdev->config_vector = val;
331 break;
332 case VIRTIO_MSI_QUEUE_VECTOR:
333 msix_vector_unuse(&proxy->pci_dev,
334 virtio_queue_vector(vdev, vdev->queue_sel));
335 /* Make it possible for guest to discover an error took place. */
336 if (msix_vector_use(&proxy->pci_dev, val) < 0)
337 val = VIRTIO_NO_VECTOR;
338 virtio_queue_set_vector(vdev, vdev->queue_sel, val);
339 break;
340 default:
341 error_report("%s: unexpected address 0x%x value 0x%x",
342 __func__, addr, val);
343 break;
344 }
345 }
346
347 static uint32_t virtio_ioport_read(VirtIOPCIProxy *proxy, uint32_t addr)
348 {
349 VirtIODevice *vdev = proxy->vdev;
350 uint32_t ret = 0xFFFFFFFF;
351
352 switch (addr) {
353 case VIRTIO_PCI_HOST_FEATURES:
354 ret = proxy->host_features;
355 break;
356 case VIRTIO_PCI_GUEST_FEATURES:
357 ret = vdev->guest_features;
358 break;
359 case VIRTIO_PCI_QUEUE_PFN:
360 ret = virtio_queue_get_addr(vdev, vdev->queue_sel)
361 >> VIRTIO_PCI_QUEUE_ADDR_SHIFT;
362 break;
363 case VIRTIO_PCI_QUEUE_NUM:
364 ret = virtio_queue_get_num(vdev, vdev->queue_sel);
365 break;
366 case VIRTIO_PCI_QUEUE_SEL:
367 ret = vdev->queue_sel;
368 break;
369 case VIRTIO_PCI_STATUS:
370 ret = vdev->status;
371 break;
372 case VIRTIO_PCI_ISR:
373 /* reading from the ISR also clears it. */
374 ret = vdev->isr;
375 vdev->isr = 0;
376 qemu_set_irq(proxy->pci_dev.irq[0], 0);
377 break;
378 case VIRTIO_MSI_CONFIG_VECTOR:
379 ret = vdev->config_vector;
380 break;
381 case VIRTIO_MSI_QUEUE_VECTOR:
382 ret = virtio_queue_vector(vdev, vdev->queue_sel);
383 break;
384 default:
385 break;
386 }
387
388 return ret;
389 }
390
391 static uint64_t virtio_pci_config_read(void *opaque, hwaddr addr,
392 unsigned size)
393 {
394 VirtIOPCIProxy *proxy = opaque;
395 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
396 uint64_t val = 0;
397 if (addr < config) {
398 return virtio_ioport_read(proxy, addr);
399 }
400 addr -= config;
401
402 switch (size) {
403 case 1:
404 val = virtio_config_readb(proxy->vdev, addr);
405 break;
406 case 2:
407 val = virtio_config_readw(proxy->vdev, addr);
408 if (virtio_is_big_endian()) {
409 val = bswap16(val);
410 }
411 break;
412 case 4:
413 val = virtio_config_readl(proxy->vdev, addr);
414 if (virtio_is_big_endian()) {
415 val = bswap32(val);
416 }
417 break;
418 }
419 return val;
420 }
421
422 static void virtio_pci_config_write(void *opaque, hwaddr addr,
423 uint64_t val, unsigned size)
424 {
425 VirtIOPCIProxy *proxy = opaque;
426 uint32_t config = VIRTIO_PCI_CONFIG(&proxy->pci_dev);
427 if (addr < config) {
428 virtio_ioport_write(proxy, addr, val);
429 return;
430 }
431 addr -= config;
432 /*
433 * Virtio-PCI is odd. Ioports are LE but config space is target native
434 * endian.
435 */
436 switch (size) {
437 case 1:
438 virtio_config_writeb(proxy->vdev, addr, val);
439 break;
440 case 2:
441 if (virtio_is_big_endian()) {
442 val = bswap16(val);
443 }
444 virtio_config_writew(proxy->vdev, addr, val);
445 break;
446 case 4:
447 if (virtio_is_big_endian()) {
448 val = bswap32(val);
449 }
450 virtio_config_writel(proxy->vdev, addr, val);
451 break;
452 }
453 }
454
455 static const MemoryRegionOps virtio_pci_config_ops = {
456 .read = virtio_pci_config_read,
457 .write = virtio_pci_config_write,
458 .impl = {
459 .min_access_size = 1,
460 .max_access_size = 4,
461 },
462 .endianness = DEVICE_LITTLE_ENDIAN,
463 };
464
465 static void virtio_write_config(PCIDevice *pci_dev, uint32_t address,
466 uint32_t val, int len)
467 {
468 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
469
470 pci_default_write_config(pci_dev, address, val, len);
471
472 if (range_covers_byte(address, len, PCI_COMMAND) &&
473 !(pci_dev->config[PCI_COMMAND] & PCI_COMMAND_MASTER) &&
474 !(proxy->flags & VIRTIO_PCI_FLAG_BUS_MASTER_BUG)) {
475 virtio_pci_stop_ioeventfd(proxy);
476 virtio_set_status(proxy->vdev,
477 proxy->vdev->status & ~VIRTIO_CONFIG_S_DRIVER_OK);
478 }
479 }
480
481 static unsigned virtio_pci_get_features(DeviceState *d)
482 {
483 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
484 return proxy->host_features;
485 }
486
487 static int kvm_virtio_pci_vq_vector_use(VirtIOPCIProxy *proxy,
488 unsigned int queue_no,
489 unsigned int vector,
490 MSIMessage msg)
491 {
492 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
493 int ret;
494
495 if (irqfd->users == 0) {
496 ret = kvm_irqchip_add_msi_route(kvm_state, msg);
497 if (ret < 0) {
498 return ret;
499 }
500 irqfd->virq = ret;
501 }
502 irqfd->users++;
503 return 0;
504 }
505
506 static void kvm_virtio_pci_vq_vector_release(VirtIOPCIProxy *proxy,
507 unsigned int vector)
508 {
509 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
510 if (--irqfd->users == 0) {
511 kvm_irqchip_release_virq(kvm_state, irqfd->virq);
512 }
513 }
514
515 static int kvm_virtio_pci_irqfd_use(VirtIOPCIProxy *proxy,
516 unsigned int queue_no,
517 unsigned int vector)
518 {
519 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
520 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
521 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
522 int ret;
523 ret = kvm_irqchip_add_irqfd_notifier(kvm_state, n, irqfd->virq);
524 return ret;
525 }
526
527 static void kvm_virtio_pci_irqfd_release(VirtIOPCIProxy *proxy,
528 unsigned int queue_no,
529 unsigned int vector)
530 {
531 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
532 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
533 VirtIOIRQFD *irqfd = &proxy->vector_irqfd[vector];
534 int ret;
535
536 ret = kvm_irqchip_remove_irqfd_notifier(kvm_state, n, irqfd->virq);
537 assert(ret == 0);
538 }
539
540 static int kvm_virtio_pci_vector_use(VirtIOPCIProxy *proxy, int nvqs)
541 {
542 PCIDevice *dev = &proxy->pci_dev;
543 VirtIODevice *vdev = proxy->vdev;
544 unsigned int vector;
545 int ret, queue_no;
546 MSIMessage msg;
547
548 for (queue_no = 0; queue_no < nvqs; queue_no++) {
549 if (!virtio_queue_get_num(vdev, queue_no)) {
550 break;
551 }
552 vector = virtio_queue_vector(vdev, queue_no);
553 if (vector >= msix_nr_vectors_allocated(dev)) {
554 continue;
555 }
556 msg = msix_get_message(dev, vector);
557 ret = kvm_virtio_pci_vq_vector_use(proxy, queue_no, vector, msg);
558 if (ret < 0) {
559 goto undo;
560 }
561 /* If guest supports masking, set up irqfd now.
562 * Otherwise, delay until unmasked in the frontend.
563 */
564 if (proxy->vdev->guest_notifier_mask) {
565 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
566 if (ret < 0) {
567 kvm_virtio_pci_vq_vector_release(proxy, vector);
568 goto undo;
569 }
570 }
571 }
572 return 0;
573
574 undo:
575 while (--queue_no >= 0) {
576 vector = virtio_queue_vector(vdev, queue_no);
577 if (vector >= msix_nr_vectors_allocated(dev)) {
578 continue;
579 }
580 if (proxy->vdev->guest_notifier_mask) {
581 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
582 }
583 kvm_virtio_pci_vq_vector_release(proxy, vector);
584 }
585 return ret;
586 }
587
588 static void kvm_virtio_pci_vector_release(VirtIOPCIProxy *proxy, int nvqs)
589 {
590 PCIDevice *dev = &proxy->pci_dev;
591 VirtIODevice *vdev = proxy->vdev;
592 unsigned int vector;
593 int queue_no;
594
595 for (queue_no = 0; queue_no < nvqs; queue_no++) {
596 if (!virtio_queue_get_num(vdev, queue_no)) {
597 break;
598 }
599 vector = virtio_queue_vector(vdev, queue_no);
600 if (vector >= msix_nr_vectors_allocated(dev)) {
601 continue;
602 }
603 /* If guest supports masking, clean up irqfd now.
604 * Otherwise, it was cleaned when masked in the frontend.
605 */
606 if (proxy->vdev->guest_notifier_mask) {
607 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
608 }
609 kvm_virtio_pci_vq_vector_release(proxy, vector);
610 }
611 }
612
613 static int virtio_pci_vq_vector_unmask(VirtIOPCIProxy *proxy,
614 unsigned int queue_no,
615 unsigned int vector,
616 MSIMessage msg)
617 {
618 VirtQueue *vq = virtio_get_queue(proxy->vdev, queue_no);
619 EventNotifier *n = virtio_queue_get_guest_notifier(vq);
620 VirtIOIRQFD *irqfd;
621 int ret = 0;
622
623 if (proxy->vector_irqfd) {
624 irqfd = &proxy->vector_irqfd[vector];
625 if (irqfd->msg.data != msg.data || irqfd->msg.address != msg.address) {
626 ret = kvm_irqchip_update_msi_route(kvm_state, irqfd->virq, msg);
627 if (ret < 0) {
628 return ret;
629 }
630 }
631 }
632
633 /* If guest supports masking, irqfd is already setup, unmask it.
634 * Otherwise, set it up now.
635 */
636 if (proxy->vdev->guest_notifier_mask) {
637 proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, false);
638 /* Test after unmasking to avoid losing events. */
639 if (proxy->vdev->guest_notifier_pending &&
640 proxy->vdev->guest_notifier_pending(proxy->vdev, queue_no)) {
641 event_notifier_set(n);
642 }
643 } else {
644 ret = kvm_virtio_pci_irqfd_use(proxy, queue_no, vector);
645 }
646 return ret;
647 }
648
649 static void virtio_pci_vq_vector_mask(VirtIOPCIProxy *proxy,
650 unsigned int queue_no,
651 unsigned int vector)
652 {
653 /* If guest supports masking, keep irqfd but mask it.
654 * Otherwise, clean it up now.
655 */
656 if (proxy->vdev->guest_notifier_mask) {
657 proxy->vdev->guest_notifier_mask(proxy->vdev, queue_no, true);
658 } else {
659 kvm_virtio_pci_irqfd_release(proxy, queue_no, vector);
660 }
661 }
662
663 static int virtio_pci_vector_unmask(PCIDevice *dev, unsigned vector,
664 MSIMessage msg)
665 {
666 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
667 VirtIODevice *vdev = proxy->vdev;
668 int ret, queue_no;
669
670 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
671 if (!virtio_queue_get_num(vdev, queue_no)) {
672 break;
673 }
674 if (virtio_queue_vector(vdev, queue_no) != vector) {
675 continue;
676 }
677 ret = virtio_pci_vq_vector_unmask(proxy, queue_no, vector, msg);
678 if (ret < 0) {
679 goto undo;
680 }
681 }
682 return 0;
683
684 undo:
685 while (--queue_no >= 0) {
686 if (virtio_queue_vector(vdev, queue_no) != vector) {
687 continue;
688 }
689 virtio_pci_vq_vector_mask(proxy, queue_no, vector);
690 }
691 return ret;
692 }
693
694 static void virtio_pci_vector_mask(PCIDevice *dev, unsigned vector)
695 {
696 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
697 VirtIODevice *vdev = proxy->vdev;
698 int queue_no;
699
700 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
701 if (!virtio_queue_get_num(vdev, queue_no)) {
702 break;
703 }
704 if (virtio_queue_vector(vdev, queue_no) != vector) {
705 continue;
706 }
707 virtio_pci_vq_vector_mask(proxy, queue_no, vector);
708 }
709 }
710
711 static void virtio_pci_vector_poll(PCIDevice *dev,
712 unsigned int vector_start,
713 unsigned int vector_end)
714 {
715 VirtIOPCIProxy *proxy = container_of(dev, VirtIOPCIProxy, pci_dev);
716 VirtIODevice *vdev = proxy->vdev;
717 int queue_no;
718 unsigned int vector;
719 EventNotifier *notifier;
720 VirtQueue *vq;
721
722 for (queue_no = 0; queue_no < proxy->nvqs_with_notifiers; queue_no++) {
723 if (!virtio_queue_get_num(vdev, queue_no)) {
724 break;
725 }
726 vector = virtio_queue_vector(vdev, queue_no);
727 if (vector < vector_start || vector >= vector_end ||
728 !msix_is_masked(dev, vector)) {
729 continue;
730 }
731 vq = virtio_get_queue(vdev, queue_no);
732 notifier = virtio_queue_get_guest_notifier(vq);
733 if (vdev->guest_notifier_pending) {
734 if (vdev->guest_notifier_pending(vdev, queue_no)) {
735 msix_set_pending(dev, vector);
736 }
737 } else if (event_notifier_test_and_clear(notifier)) {
738 msix_set_pending(dev, vector);
739 }
740 }
741 }
742
743 static int virtio_pci_set_guest_notifier(DeviceState *d, int n, bool assign,
744 bool with_irqfd)
745 {
746 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
747 VirtQueue *vq = virtio_get_queue(proxy->vdev, n);
748 EventNotifier *notifier = virtio_queue_get_guest_notifier(vq);
749
750 if (assign) {
751 int r = event_notifier_init(notifier, 0);
752 if (r < 0) {
753 return r;
754 }
755 virtio_queue_set_guest_notifier_fd_handler(vq, true, with_irqfd);
756 } else {
757 virtio_queue_set_guest_notifier_fd_handler(vq, false, with_irqfd);
758 event_notifier_cleanup(notifier);
759 }
760
761 return 0;
762 }
763
764 static bool virtio_pci_query_guest_notifiers(DeviceState *d)
765 {
766 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
767 return msix_enabled(&proxy->pci_dev);
768 }
769
770 static int virtio_pci_set_guest_notifiers(DeviceState *d, int nvqs, bool assign)
771 {
772 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
773 VirtIODevice *vdev = proxy->vdev;
774 int r, n;
775 bool with_irqfd = msix_enabled(&proxy->pci_dev) &&
776 kvm_msi_via_irqfd_enabled();
777
778 nvqs = MIN(nvqs, VIRTIO_PCI_QUEUE_MAX);
779
780 /* When deassigning, pass a consistent nvqs value
781 * to avoid leaking notifiers.
782 */
783 assert(assign || nvqs == proxy->nvqs_with_notifiers);
784
785 proxy->nvqs_with_notifiers = nvqs;
786
787 /* Must unset vector notifier while guest notifier is still assigned */
788 if ((proxy->vector_irqfd || vdev->guest_notifier_mask) && !assign) {
789 msix_unset_vector_notifiers(&proxy->pci_dev);
790 if (proxy->vector_irqfd) {
791 kvm_virtio_pci_vector_release(proxy, nvqs);
792 g_free(proxy->vector_irqfd);
793 proxy->vector_irqfd = NULL;
794 }
795 }
796
797 for (n = 0; n < nvqs; n++) {
798 if (!virtio_queue_get_num(vdev, n)) {
799 break;
800 }
801
802 r = virtio_pci_set_guest_notifier(d, n, assign,
803 kvm_msi_via_irqfd_enabled());
804 if (r < 0) {
805 goto assign_error;
806 }
807 }
808
809 /* Must set vector notifier after guest notifier has been assigned */
810 if ((with_irqfd || vdev->guest_notifier_mask) && assign) {
811 if (with_irqfd) {
812 proxy->vector_irqfd =
813 g_malloc0(sizeof(*proxy->vector_irqfd) *
814 msix_nr_vectors_allocated(&proxy->pci_dev));
815 r = kvm_virtio_pci_vector_use(proxy, nvqs);
816 if (r < 0) {
817 goto assign_error;
818 }
819 }
820 r = msix_set_vector_notifiers(&proxy->pci_dev,
821 virtio_pci_vector_unmask,
822 virtio_pci_vector_mask,
823 virtio_pci_vector_poll);
824 if (r < 0) {
825 goto notifiers_error;
826 }
827 }
828
829 return 0;
830
831 notifiers_error:
832 if (with_irqfd) {
833 assert(assign);
834 kvm_virtio_pci_vector_release(proxy, nvqs);
835 }
836
837 assign_error:
838 /* We get here on assignment failure. Recover by undoing for VQs 0 .. n. */
839 assert(assign);
840 while (--n >= 0) {
841 virtio_pci_set_guest_notifier(d, n, !assign, with_irqfd);
842 }
843 return r;
844 }
845
846 static int virtio_pci_set_host_notifier(DeviceState *d, int n, bool assign)
847 {
848 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
849
850 /* Stop using ioeventfd for virtqueue kick if the device starts using host
851 * notifiers. This makes it easy to avoid stepping on each others' toes.
852 */
853 proxy->ioeventfd_disabled = assign;
854 if (assign) {
855 virtio_pci_stop_ioeventfd(proxy);
856 }
857 /* We don't need to start here: it's not needed because backend
858 * currently only stops on status change away from ok,
859 * reset, vmstop and such. If we do add code to start here,
860 * need to check vmstate, device state etc. */
861 return virtio_pci_set_host_notifier_internal(proxy, n, assign, false);
862 }
863
864 static void virtio_pci_vmstate_change(DeviceState *d, bool running)
865 {
866 VirtIOPCIProxy *proxy = to_virtio_pci_proxy(d);
867
868 if (running) {
869 /* Try to find out if the guest has bus master disabled, but is
870 in ready state. Then we have a buggy guest OS. */
871 if ((proxy->vdev->status & VIRTIO_CONFIG_S_DRIVER_OK) &&
872 !(proxy->pci_dev.config[PCI_COMMAND] & PCI_COMMAND_MASTER)) {
873 proxy->flags |= VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
874 }
875 virtio_pci_start_ioeventfd(proxy);
876 } else {
877 virtio_pci_stop_ioeventfd(proxy);
878 }
879 }
880
881 static const VirtIOBindings virtio_pci_bindings = {
882 .notify = virtio_pci_notify,
883 .save_config = virtio_pci_save_config,
884 .load_config = virtio_pci_load_config,
885 .save_queue = virtio_pci_save_queue,
886 .load_queue = virtio_pci_load_queue,
887 .get_features = virtio_pci_get_features,
888 .query_guest_notifiers = virtio_pci_query_guest_notifiers,
889 .set_host_notifier = virtio_pci_set_host_notifier,
890 .set_guest_notifiers = virtio_pci_set_guest_notifiers,
891 .vmstate_change = virtio_pci_vmstate_change,
892 };
893
894 void virtio_init_pci(VirtIOPCIProxy *proxy, VirtIODevice *vdev)
895 {
896 uint8_t *config;
897 uint32_t size;
898
899 proxy->vdev = vdev;
900
901 config = proxy->pci_dev.config;
902
903 if (proxy->class_code) {
904 pci_config_set_class(config, proxy->class_code);
905 }
906 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
907 pci_get_word(config + PCI_VENDOR_ID));
908 pci_set_word(config + PCI_SUBSYSTEM_ID, vdev->device_id);
909 config[PCI_INTERRUPT_PIN] = 1;
910
911 if (vdev->nvectors &&
912 msix_init_exclusive_bar(&proxy->pci_dev, vdev->nvectors, 1)) {
913 vdev->nvectors = 0;
914 }
915
916 proxy->pci_dev.config_write = virtio_write_config;
917
918 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev) + vdev->config_len;
919 if (size & (size-1))
920 size = 1 << qemu_fls(size);
921
922 memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
923 "virtio-pci", size);
924 pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
925 &proxy->bar);
926
927 if (!kvm_has_many_ioeventfds()) {
928 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
929 }
930
931 virtio_bind_device(vdev, &virtio_pci_bindings, DEVICE(proxy));
932 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
933 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
934 proxy->host_features = vdev->get_features(vdev, proxy->host_features);
935 }
936
937 static void virtio_exit_pci(PCIDevice *pci_dev)
938 {
939 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
940
941 memory_region_destroy(&proxy->bar);
942 msix_uninit_exclusive_bar(pci_dev);
943 }
944
945 static int virtio_serial_init_pci(PCIDevice *pci_dev)
946 {
947 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
948 VirtIODevice *vdev;
949
950 if (proxy->class_code != PCI_CLASS_COMMUNICATION_OTHER &&
951 proxy->class_code != PCI_CLASS_DISPLAY_OTHER && /* qemu 0.10 */
952 proxy->class_code != PCI_CLASS_OTHERS) /* qemu-kvm */
953 proxy->class_code = PCI_CLASS_COMMUNICATION_OTHER;
954
955 vdev = virtio_serial_init(&pci_dev->qdev, &proxy->serial);
956 if (!vdev) {
957 return -1;
958 }
959
960 /* backwards-compatibility with machines that were created with
961 DEV_NVECTORS_UNSPECIFIED */
962 vdev->nvectors = proxy->nvectors == DEV_NVECTORS_UNSPECIFIED
963 ? proxy->serial.max_virtserial_ports + 1
964 : proxy->nvectors;
965 virtio_init_pci(proxy, vdev);
966 proxy->nvectors = vdev->nvectors;
967 return 0;
968 }
969
970 static void virtio_serial_exit_pci(PCIDevice *pci_dev)
971 {
972 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
973
974 virtio_pci_stop_ioeventfd(proxy);
975 virtio_serial_exit(proxy->vdev);
976 virtio_exit_pci(pci_dev);
977 }
978
979 static int virtio_net_init_pci(PCIDevice *pci_dev)
980 {
981 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
982 VirtIODevice *vdev;
983
984 vdev = virtio_net_init(&pci_dev->qdev, &proxy->nic, &proxy->net,
985 proxy->host_features);
986
987 vdev->nvectors = proxy->nvectors;
988 virtio_init_pci(proxy, vdev);
989
990 /* make the actual value visible */
991 proxy->nvectors = vdev->nvectors;
992 return 0;
993 }
994
995 static void virtio_net_exit_pci(PCIDevice *pci_dev)
996 {
997 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
998
999 virtio_pci_stop_ioeventfd(proxy);
1000 virtio_net_exit(proxy->vdev);
1001 virtio_exit_pci(pci_dev);
1002 }
1003
1004 static int virtio_rng_init_pci(PCIDevice *pci_dev)
1005 {
1006 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1007 VirtIODevice *vdev;
1008
1009 if (proxy->rng.rng == NULL) {
1010 proxy->rng.default_backend = RNG_RANDOM(object_new(TYPE_RNG_RANDOM));
1011
1012 object_property_add_child(OBJECT(pci_dev),
1013 "default-backend",
1014 OBJECT(proxy->rng.default_backend),
1015 NULL);
1016
1017 object_property_set_link(OBJECT(pci_dev),
1018 OBJECT(proxy->rng.default_backend),
1019 "rng", NULL);
1020 }
1021
1022 vdev = virtio_rng_init(&pci_dev->qdev, &proxy->rng);
1023 if (!vdev) {
1024 return -1;
1025 }
1026 virtio_init_pci(proxy, vdev);
1027 return 0;
1028 }
1029
1030 static void virtio_rng_exit_pci(PCIDevice *pci_dev)
1031 {
1032 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1033
1034 virtio_pci_stop_ioeventfd(proxy);
1035 virtio_rng_exit(proxy->vdev);
1036 virtio_exit_pci(pci_dev);
1037 }
1038
1039 static Property virtio_net_properties[] = {
1040 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, false),
1041 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 3),
1042 DEFINE_VIRTIO_NET_FEATURES(VirtIOPCIProxy, host_features),
1043 DEFINE_NIC_PROPERTIES(VirtIOPCIProxy, nic),
1044 DEFINE_PROP_UINT32("x-txtimer", VirtIOPCIProxy, net.txtimer, TX_TIMER_INTERVAL),
1045 DEFINE_PROP_INT32("x-txburst", VirtIOPCIProxy, net.txburst, TX_BURST),
1046 DEFINE_PROP_STRING("tx", VirtIOPCIProxy, net.tx),
1047 DEFINE_PROP_END_OF_LIST(),
1048 };
1049
1050 static void virtio_net_class_init(ObjectClass *klass, void *data)
1051 {
1052 DeviceClass *dc = DEVICE_CLASS(klass);
1053 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1054
1055 k->init = virtio_net_init_pci;
1056 k->exit = virtio_net_exit_pci;
1057 k->romfile = "efi-virtio.rom";
1058 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1059 k->device_id = PCI_DEVICE_ID_VIRTIO_NET;
1060 k->revision = VIRTIO_PCI_ABI_VERSION;
1061 k->class_id = PCI_CLASS_NETWORK_ETHERNET;
1062 dc->reset = virtio_pci_reset;
1063 dc->props = virtio_net_properties;
1064 }
1065
1066 static const TypeInfo virtio_net_info = {
1067 .name = "virtio-net-pci",
1068 .parent = TYPE_PCI_DEVICE,
1069 .instance_size = sizeof(VirtIOPCIProxy),
1070 .class_init = virtio_net_class_init,
1071 };
1072
1073 static Property virtio_serial_properties[] = {
1074 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1075 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1076 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1077 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1078 DEFINE_PROP_UINT32("max_ports", VirtIOPCIProxy, serial.max_virtserial_ports, 31),
1079 DEFINE_PROP_END_OF_LIST(),
1080 };
1081
1082 static void virtio_serial_class_init(ObjectClass *klass, void *data)
1083 {
1084 DeviceClass *dc = DEVICE_CLASS(klass);
1085 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1086
1087 k->init = virtio_serial_init_pci;
1088 k->exit = virtio_serial_exit_pci;
1089 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1090 k->device_id = PCI_DEVICE_ID_VIRTIO_CONSOLE;
1091 k->revision = VIRTIO_PCI_ABI_VERSION;
1092 k->class_id = PCI_CLASS_COMMUNICATION_OTHER;
1093 dc->reset = virtio_pci_reset;
1094 dc->props = virtio_serial_properties;
1095 }
1096
1097 static const TypeInfo virtio_serial_info = {
1098 .name = "virtio-serial-pci",
1099 .parent = TYPE_PCI_DEVICE,
1100 .instance_size = sizeof(VirtIOPCIProxy),
1101 .class_init = virtio_serial_class_init,
1102 };
1103
1104 static void virtio_rng_initfn(Object *obj)
1105 {
1106 PCIDevice *pci_dev = PCI_DEVICE(obj);
1107 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1108
1109 object_property_add_link(obj, "rng", TYPE_RNG_BACKEND,
1110 (Object **)&proxy->rng.rng, NULL);
1111 }
1112
1113 static Property virtio_rng_properties[] = {
1114 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1115 /* Set a default rate limit of 2^47 bytes per minute or roughly 2TB/s. If
1116 you have an entropy source capable of generating more entropy than this
1117 and you can pass it through via virtio-rng, then hats off to you. Until
1118 then, this is unlimited for all practical purposes.
1119 */
1120 DEFINE_PROP_UINT64("max-bytes", VirtIOPCIProxy, rng.max_bytes, INT64_MAX),
1121 DEFINE_PROP_UINT32("period", VirtIOPCIProxy, rng.period_ms, 1 << 16),
1122 DEFINE_PROP_END_OF_LIST(),
1123 };
1124
1125 static void virtio_rng_class_init(ObjectClass *klass, void *data)
1126 {
1127 DeviceClass *dc = DEVICE_CLASS(klass);
1128 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1129
1130 k->init = virtio_rng_init_pci;
1131 k->exit = virtio_rng_exit_pci;
1132 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1133 k->device_id = PCI_DEVICE_ID_VIRTIO_RNG;
1134 k->revision = VIRTIO_PCI_ABI_VERSION;
1135 k->class_id = PCI_CLASS_OTHERS;
1136 dc->reset = virtio_pci_reset;
1137 dc->props = virtio_rng_properties;
1138 }
1139
1140 static const TypeInfo virtio_rng_info = {
1141 .name = "virtio-rng-pci",
1142 .parent = TYPE_PCI_DEVICE,
1143 .instance_size = sizeof(VirtIOPCIProxy),
1144 .instance_init = virtio_rng_initfn,
1145 .class_init = virtio_rng_class_init,
1146 };
1147
1148 #ifdef CONFIG_VIRTFS
1149 static int virtio_9p_init_pci(PCIDevice *pci_dev)
1150 {
1151 VirtIOPCIProxy *proxy = DO_UPCAST(VirtIOPCIProxy, pci_dev, pci_dev);
1152 VirtIODevice *vdev;
1153
1154 vdev = virtio_9p_init(&pci_dev->qdev, &proxy->fsconf);
1155 vdev->nvectors = proxy->nvectors;
1156 virtio_init_pci(proxy, vdev);
1157 /* make the actual value visible */
1158 proxy->nvectors = vdev->nvectors;
1159 return 0;
1160 }
1161
1162 static Property virtio_9p_properties[] = {
1163 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags, VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1164 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1165 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1166 DEFINE_PROP_STRING("mount_tag", VirtIOPCIProxy, fsconf.tag),
1167 DEFINE_PROP_STRING("fsdev", VirtIOPCIProxy, fsconf.fsdev_id),
1168 DEFINE_PROP_END_OF_LIST(),
1169 };
1170
1171 static void virtio_9p_class_init(ObjectClass *klass, void *data)
1172 {
1173 DeviceClass *dc = DEVICE_CLASS(klass);
1174 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1175
1176 k->init = virtio_9p_init_pci;
1177 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1178 k->device_id = PCI_DEVICE_ID_VIRTIO_9P;
1179 k->revision = VIRTIO_PCI_ABI_VERSION;
1180 k->class_id = 0x2;
1181 dc->props = virtio_9p_properties;
1182 dc->reset = virtio_pci_reset;
1183 }
1184
1185 static const TypeInfo virtio_9p_info = {
1186 .name = "virtio-9p-pci",
1187 .parent = TYPE_PCI_DEVICE,
1188 .instance_size = sizeof(VirtIOPCIProxy),
1189 .class_init = virtio_9p_class_init,
1190 };
1191 #endif
1192
1193 /*
1194 * virtio-pci: This is the PCIDevice which has a virtio-pci-bus.
1195 */
1196
1197 /* This is called by virtio-bus just after the device is plugged. */
1198 static void virtio_pci_device_plugged(DeviceState *d)
1199 {
1200 VirtIOPCIProxy *proxy = VIRTIO_PCI(d);
1201 VirtioBusState *bus = &proxy->bus;
1202 uint8_t *config;
1203 uint32_t size;
1204
1205 proxy->vdev = bus->vdev;
1206
1207 config = proxy->pci_dev.config;
1208 if (proxy->class_code) {
1209 pci_config_set_class(config, proxy->class_code);
1210 }
1211 pci_set_word(config + PCI_SUBSYSTEM_VENDOR_ID,
1212 pci_get_word(config + PCI_VENDOR_ID));
1213 pci_set_word(config + PCI_SUBSYSTEM_ID, virtio_bus_get_vdev_id(bus));
1214 config[PCI_INTERRUPT_PIN] = 1;
1215
1216 if (proxy->nvectors &&
1217 msix_init_exclusive_bar(&proxy->pci_dev, proxy->nvectors, 1)) {
1218 proxy->nvectors = 0;
1219 }
1220
1221 proxy->pci_dev.config_write = virtio_write_config;
1222
1223 size = VIRTIO_PCI_REGION_SIZE(&proxy->pci_dev)
1224 + virtio_bus_get_vdev_config_len(bus);
1225 if (size & (size - 1)) {
1226 size = 1 << qemu_fls(size);
1227 }
1228
1229 memory_region_init_io(&proxy->bar, &virtio_pci_config_ops, proxy,
1230 "virtio-pci", size);
1231 pci_register_bar(&proxy->pci_dev, 0, PCI_BASE_ADDRESS_SPACE_IO,
1232 &proxy->bar);
1233
1234 if (!kvm_has_many_ioeventfds()) {
1235 proxy->flags &= ~VIRTIO_PCI_FLAG_USE_IOEVENTFD;
1236 }
1237
1238 proxy->host_features |= 0x1 << VIRTIO_F_NOTIFY_ON_EMPTY;
1239 proxy->host_features |= 0x1 << VIRTIO_F_BAD_FEATURE;
1240 proxy->host_features = virtio_bus_get_vdev_features(bus,
1241 proxy->host_features);
1242 }
1243
1244 static int virtio_pci_init(PCIDevice *pci_dev)
1245 {
1246 VirtIOPCIProxy *dev = VIRTIO_PCI(pci_dev);
1247 VirtioPCIClass *k = VIRTIO_PCI_GET_CLASS(pci_dev);
1248 virtio_pci_bus_new(&dev->bus, dev);
1249 if (k->init != NULL) {
1250 return k->init(dev);
1251 }
1252 return 0;
1253 }
1254
1255 static void virtio_pci_exit(PCIDevice *pci_dev)
1256 {
1257 VirtIOPCIProxy *proxy = VIRTIO_PCI(pci_dev);
1258 virtio_pci_stop_ioeventfd(proxy);
1259 virtio_exit_pci(pci_dev);
1260 }
1261
1262 /*
1263 * This will be renamed virtio_pci_reset at the end of the series.
1264 * virtio_pci_reset is still in use at this moment.
1265 */
1266 static void virtio_pci_rst(DeviceState *qdev)
1267 {
1268 VirtIOPCIProxy *proxy = VIRTIO_PCI(qdev);
1269 VirtioBusState *bus = VIRTIO_BUS(&proxy->bus);
1270 virtio_pci_stop_ioeventfd(proxy);
1271 virtio_bus_reset(bus);
1272 msix_unuse_all_vectors(&proxy->pci_dev);
1273 proxy->flags &= ~VIRTIO_PCI_FLAG_BUS_MASTER_BUG;
1274 }
1275
1276 static void virtio_pci_class_init(ObjectClass *klass, void *data)
1277 {
1278 DeviceClass *dc = DEVICE_CLASS(klass);
1279 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1280
1281 k->init = virtio_pci_init;
1282 k->exit = virtio_pci_exit;
1283 k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1284 k->revision = VIRTIO_PCI_ABI_VERSION;
1285 k->class_id = PCI_CLASS_OTHERS;
1286 dc->reset = virtio_pci_rst;
1287 }
1288
1289 static const TypeInfo virtio_pci_info = {
1290 .name = TYPE_VIRTIO_PCI,
1291 .parent = TYPE_PCI_DEVICE,
1292 .instance_size = sizeof(VirtIOPCIProxy),
1293 .class_init = virtio_pci_class_init,
1294 .class_size = sizeof(VirtioPCIClass),
1295 .abstract = true,
1296 };
1297
1298 /* virtio-blk-pci */
1299
1300 static Property virtio_blk_pci_properties[] = {
1301 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1302 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1303 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1304 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors, 2),
1305 #ifdef CONFIG_VIRTIO_BLK_DATA_PLANE
1306 DEFINE_PROP_BIT("x-data-plane", VirtIOBlkPCI, blk.data_plane, 0, false),
1307 #endif
1308 DEFINE_VIRTIO_BLK_FEATURES(VirtIOPCIProxy, host_features),
1309 DEFINE_VIRTIO_BLK_PROPERTIES(VirtIOBlkPCI, blk),
1310 DEFINE_PROP_END_OF_LIST(),
1311 };
1312
1313 static int virtio_blk_pci_init(VirtIOPCIProxy *vpci_dev)
1314 {
1315 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(vpci_dev);
1316 DeviceState *vdev = DEVICE(&dev->vdev);
1317 virtio_blk_set_conf(vdev, &(dev->blk));
1318 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1319 if (qdev_init(vdev) < 0) {
1320 return -1;
1321 }
1322 return 0;
1323 }
1324
1325 static void virtio_blk_pci_class_init(ObjectClass *klass, void *data)
1326 {
1327 DeviceClass *dc = DEVICE_CLASS(klass);
1328 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1329 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1330
1331 dc->props = virtio_blk_pci_properties;
1332 k->init = virtio_blk_pci_init;
1333 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1334 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BLOCK;
1335 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1336 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1337 }
1338
1339 static void virtio_blk_pci_instance_init(Object *obj)
1340 {
1341 VirtIOBlkPCI *dev = VIRTIO_BLK_PCI(obj);
1342 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BLK);
1343 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1344 }
1345
1346 static const TypeInfo virtio_blk_pci_info = {
1347 .name = TYPE_VIRTIO_BLK_PCI,
1348 .parent = TYPE_VIRTIO_PCI,
1349 .instance_size = sizeof(VirtIOBlkPCI),
1350 .instance_init = virtio_blk_pci_instance_init,
1351 .class_init = virtio_blk_pci_class_init,
1352 };
1353
1354 /* virtio-scsi-pci */
1355
1356 static Property virtio_scsi_pci_properties[] = {
1357 DEFINE_PROP_BIT("ioeventfd", VirtIOPCIProxy, flags,
1358 VIRTIO_PCI_FLAG_USE_IOEVENTFD_BIT, true),
1359 DEFINE_PROP_UINT32("vectors", VirtIOPCIProxy, nvectors,
1360 DEV_NVECTORS_UNSPECIFIED),
1361 DEFINE_VIRTIO_SCSI_FEATURES(VirtIOPCIProxy, host_features),
1362 DEFINE_VIRTIO_SCSI_PROPERTIES(VirtIOSCSIPCI, vdev.conf),
1363 DEFINE_PROP_END_OF_LIST(),
1364 };
1365
1366 static int virtio_scsi_pci_init_pci(VirtIOPCIProxy *vpci_dev)
1367 {
1368 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(vpci_dev);
1369 DeviceState *vdev = DEVICE(&dev->vdev);
1370
1371 if (vpci_dev->nvectors == DEV_NVECTORS_UNSPECIFIED) {
1372 vpci_dev->nvectors = dev->vdev.conf.num_queues + 3;
1373 }
1374
1375 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1376 if (qdev_init(vdev) < 0) {
1377 return -1;
1378 }
1379 return 0;
1380 }
1381
1382 static void virtio_scsi_pci_class_init(ObjectClass *klass, void *data)
1383 {
1384 DeviceClass *dc = DEVICE_CLASS(klass);
1385 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1386 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1387 k->init = virtio_scsi_pci_init_pci;
1388 dc->props = virtio_scsi_pci_properties;
1389 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1390 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_SCSI;
1391 pcidev_k->revision = 0x00;
1392 pcidev_k->class_id = PCI_CLASS_STORAGE_SCSI;
1393 }
1394
1395 static void virtio_scsi_pci_instance_init(Object *obj)
1396 {
1397 VirtIOSCSIPCI *dev = VIRTIO_SCSI_PCI(obj);
1398 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_SCSI);
1399 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1400 }
1401
1402 static const TypeInfo virtio_scsi_pci_info = {
1403 .name = TYPE_VIRTIO_SCSI_PCI,
1404 .parent = TYPE_VIRTIO_PCI,
1405 .instance_size = sizeof(VirtIOSCSIPCI),
1406 .instance_init = virtio_scsi_pci_instance_init,
1407 .class_init = virtio_scsi_pci_class_init,
1408 };
1409
1410 /* virtio-balloon-pci */
1411
1412 static Property virtio_balloon_pci_properties[] = {
1413 DEFINE_VIRTIO_COMMON_FEATURES(VirtIOPCIProxy, host_features),
1414 DEFINE_PROP_HEX32("class", VirtIOPCIProxy, class_code, 0),
1415 DEFINE_PROP_END_OF_LIST(),
1416 };
1417
1418 static int virtio_balloon_pci_init(VirtIOPCIProxy *vpci_dev)
1419 {
1420 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(vpci_dev);
1421 DeviceState *vdev = DEVICE(&dev->vdev);
1422
1423 if (vpci_dev->class_code != PCI_CLASS_OTHERS &&
1424 vpci_dev->class_code != PCI_CLASS_MEMORY_RAM) { /* qemu < 1.1 */
1425 vpci_dev->class_code = PCI_CLASS_OTHERS;
1426 }
1427
1428 qdev_set_parent_bus(vdev, BUS(&vpci_dev->bus));
1429 if (qdev_init(vdev) < 0) {
1430 return -1;
1431 }
1432 return 0;
1433 }
1434
1435 static void virtio_balloon_pci_class_init(ObjectClass *klass, void *data)
1436 {
1437 DeviceClass *dc = DEVICE_CLASS(klass);
1438 VirtioPCIClass *k = VIRTIO_PCI_CLASS(klass);
1439 PCIDeviceClass *pcidev_k = PCI_DEVICE_CLASS(klass);
1440 k->init = virtio_balloon_pci_init;
1441 dc->props = virtio_balloon_pci_properties;
1442 pcidev_k->vendor_id = PCI_VENDOR_ID_REDHAT_QUMRANET;
1443 pcidev_k->device_id = PCI_DEVICE_ID_VIRTIO_BALLOON;
1444 pcidev_k->revision = VIRTIO_PCI_ABI_VERSION;
1445 pcidev_k->class_id = PCI_CLASS_OTHERS;
1446 }
1447
1448 static void virtio_balloon_pci_instance_init(Object *obj)
1449 {
1450 VirtIOBalloonPCI *dev = VIRTIO_BALLOON_PCI(obj);
1451 object_initialize(OBJECT(&dev->vdev), TYPE_VIRTIO_BALLOON);
1452 object_property_add_child(obj, "virtio-backend", OBJECT(&dev->vdev), NULL);
1453 }
1454
1455 static const TypeInfo virtio_balloon_pci_info = {
1456 .name = TYPE_VIRTIO_BALLOON_PCI,
1457 .parent = TYPE_VIRTIO_PCI,
1458 .instance_size = sizeof(VirtIOBalloonPCI),
1459 .instance_init = virtio_balloon_pci_instance_init,
1460 .class_init = virtio_balloon_pci_class_init,
1461 };
1462
1463 /* virtio-pci-bus */
1464
1465 void virtio_pci_bus_new(VirtioBusState *bus, VirtIOPCIProxy *dev)
1466 {
1467 DeviceState *qdev = DEVICE(dev);
1468 BusState *qbus;
1469 qbus_create_inplace((BusState *)bus, TYPE_VIRTIO_PCI_BUS, qdev, NULL);
1470 qbus = BUS(bus);
1471 qbus->allow_hotplug = 1;
1472 }
1473
1474 static void virtio_pci_bus_class_init(ObjectClass *klass, void *data)
1475 {
1476 BusClass *bus_class = BUS_CLASS(klass);
1477 VirtioBusClass *k = VIRTIO_BUS_CLASS(klass);
1478 bus_class->max_dev = 1;
1479 k->notify = virtio_pci_notify;
1480 k->save_config = virtio_pci_save_config;
1481 k->load_config = virtio_pci_load_config;
1482 k->save_queue = virtio_pci_save_queue;
1483 k->load_queue = virtio_pci_load_queue;
1484 k->get_features = virtio_pci_get_features;
1485 k->query_guest_notifiers = virtio_pci_query_guest_notifiers;
1486 k->set_host_notifier = virtio_pci_set_host_notifier;
1487 k->set_guest_notifiers = virtio_pci_set_guest_notifiers;
1488 k->vmstate_change = virtio_pci_vmstate_change;
1489 k->device_plugged = virtio_pci_device_plugged;
1490 }
1491
1492 static const TypeInfo virtio_pci_bus_info = {
1493 .name = TYPE_VIRTIO_PCI_BUS,
1494 .parent = TYPE_VIRTIO_BUS,
1495 .instance_size = sizeof(VirtioPCIBusState),
1496 .class_init = virtio_pci_bus_class_init,
1497 };
1498
1499 static void virtio_pci_register_types(void)
1500 {
1501 type_register_static(&virtio_net_info);
1502 type_register_static(&virtio_serial_info);
1503 type_register_static(&virtio_rng_info);
1504 type_register_static(&virtio_pci_bus_info);
1505 type_register_static(&virtio_pci_info);
1506 #ifdef CONFIG_VIRTFS
1507 type_register_static(&virtio_9p_info);
1508 #endif
1509 type_register_static(&virtio_blk_pci_info);
1510 type_register_static(&virtio_scsi_pci_info);
1511 type_register_static(&virtio_balloon_pci_info);
1512 }
1513
1514 type_init(virtio_pci_register_types)