]> git.proxmox.com Git - mirror_qemu.git/blame - hw/misc/ivshmem.c
ivshmem: Fix harmless misuse of Error
[mirror_qemu.git] / hw / misc / ivshmem.c
CommitLineData
6cbf4c8c
CM
1/*
2 * Inter-VM Shared Memory PCI device.
3 *
4 * Author:
5 * Cam Macdonell <cam@cs.ualberta.ca>
6 *
7 * Based On: cirrus_vga.c
8 * Copyright (c) 2004 Fabrice Bellard
9 * Copyright (c) 2004 Makoto Suzuki (suzu)
10 *
11 * and rtl8139.c
12 * Copyright (c) 2006 Igor Kovalenko
13 *
14 * This code is licensed under the GNU GPL v2.
6b620ca3
PB
15 *
16 * Contributions after 2012-01-13 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
6cbf4c8c 18 */
0d1c9782 19#include "qemu/osdep.h"
83c9f4ca 20#include "hw/hw.h"
0d09e41a 21#include "hw/i386/pc.h"
83c9f4ca 22#include "hw/pci/pci.h"
660c97ee 23#include "hw/pci/msi.h"
83c9f4ca 24#include "hw/pci/msix.h"
9c17d615 25#include "sysemu/kvm.h"
caf71f86 26#include "migration/migration.h"
d49b6836 27#include "qemu/error-report.h"
1de7afc9 28#include "qemu/event_notifier.h"
a2e9011b 29#include "qemu/fifo8.h"
dccfcd0e 30#include "sysemu/char.h"
d9453c93
MAL
31#include "sysemu/hostmem.h"
32#include "qapi/visitor.h"
56a571d9 33#include "exec/ram_addr.h"
6cbf4c8c 34
5105b1d8
DM
35#include "hw/misc/ivshmem.h"
36
6cbf4c8c 37#include <sys/mman.h>
6cbf4c8c 38
b8ef62a9
PB
39#define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET
40#define PCI_DEVICE_ID_IVSHMEM 0x1110
41
61ea2d86 42#define IVSHMEM_MAX_PEERS G_MAXUINT16
6cbf4c8c
CM
43#define IVSHMEM_IOEVENTFD 0
44#define IVSHMEM_MSI 1
45
46#define IVSHMEM_PEER 0
47#define IVSHMEM_MASTER 1
48
49#define IVSHMEM_REG_BAR_SIZE 0x100
50
a4fa93bf
MA
51#define IVSHMEM_DEBUG 0
52#define IVSHMEM_DPRINTF(fmt, ...) \
53 do { \
54 if (IVSHMEM_DEBUG) { \
55 printf("IVSHMEM: " fmt, ## __VA_ARGS__); \
56 } \
57 } while (0)
6cbf4c8c 58
eb3fedf3
PC
59#define TYPE_IVSHMEM "ivshmem"
60#define IVSHMEM(obj) \
61 OBJECT_CHECK(IVShmemState, (obj), TYPE_IVSHMEM)
62
6cbf4c8c
CM
63typedef struct Peer {
64 int nb_eventfds;
563027cc 65 EventNotifier *eventfds;
6cbf4c8c
CM
66} Peer;
67
0f57350e 68typedef struct MSIVector {
6cbf4c8c 69 PCIDevice *pdev;
660c97ee 70 int virq;
0f57350e 71} MSIVector;
6cbf4c8c
CM
72
73typedef struct IVShmemState {
b7578eaa
AF
74 /*< private >*/
75 PCIDevice parent_obj;
76 /*< public >*/
77
d9453c93 78 HostMemoryBackend *hostmem;
6cbf4c8c
CM
79 uint32_t intrmask;
80 uint32_t intrstatus;
6cbf4c8c 81
6cbf4c8c 82 CharDriverState *server_chr;
a2e9011b 83 Fifo8 incoming_fifo;
cb06608e 84 MemoryRegion ivshmem_mmio;
6cbf4c8c 85
cb06608e
AK
86 /* We might need to register the BAR before we actually have the memory.
87 * So prepare a container MemoryRegion for the BAR immediately and
88 * add a subregion when we have the memory.
89 */
90 MemoryRegion bar;
91 MemoryRegion ivshmem;
6cbf4c8c 92 uint64_t ivshmem_size; /* size of shared memory region */
c08ba66f 93 uint32_t ivshmem_64bit;
6cbf4c8c
CM
94
95 Peer *peers;
f456179f 96 int nb_peers; /* how many peers we have space for */
6cbf4c8c
CM
97
98 int vm_id;
99 uint32_t vectors;
100 uint32_t features;
0f57350e 101 MSIVector *msi_vectors;
6cbf4c8c 102
38e0735e
AL
103 Error *migration_blocker;
104
6cbf4c8c
CM
105 char * shmobj;
106 char * sizearg;
107 char * role;
108 int role_val; /* scalar to avoid multiple string comparisons */
109} IVShmemState;
110
111/* registers for the Inter-VM shared memory device */
112enum ivshmem_registers {
113 INTRMASK = 0,
114 INTRSTATUS = 4,
115 IVPOSITION = 8,
116 DOORBELL = 12,
117};
118
119static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
120 unsigned int feature) {
121 return (ivs->features & (1 << feature));
122}
123
6cbf4c8c 124/* accessing registers - based on rtl8139 */
d8a5da07 125static void ivshmem_update_irq(IVShmemState *s)
6cbf4c8c 126{
b7578eaa 127 PCIDevice *d = PCI_DEVICE(s);
6cbf4c8c
CM
128 int isr;
129 isr = (s->intrstatus & s->intrmask) & 0xffffffff;
130
131 /* don't print ISR resets */
132 if (isr) {
133 IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n",
dbc464d4 134 isr ? 1 : 0, s->intrstatus, s->intrmask);
6cbf4c8c
CM
135 }
136
9e64f8a3 137 pci_set_irq(d, (isr != 0));
6cbf4c8c
CM
138}
139
140static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
141{
142 IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
143
144 s->intrmask = val;
145
d8a5da07 146 ivshmem_update_irq(s);
6cbf4c8c
CM
147}
148
149static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
150{
151 uint32_t ret = s->intrmask;
152
153 IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
154
155 return ret;
156}
157
158static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
159{
160 IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
161
162 s->intrstatus = val;
163
d8a5da07 164 ivshmem_update_irq(s);
6cbf4c8c
CM
165}
166
167static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
168{
169 uint32_t ret = s->intrstatus;
170
171 /* reading ISR clears all interrupts */
172 s->intrstatus = 0;
173
d8a5da07 174 ivshmem_update_irq(s);
6cbf4c8c
CM
175
176 return ret;
177}
178
a8170e5e 179static void ivshmem_io_write(void *opaque, hwaddr addr,
cb06608e 180 uint64_t val, unsigned size)
6cbf4c8c
CM
181{
182 IVShmemState *s = opaque;
183
6cbf4c8c
CM
184 uint16_t dest = val >> 16;
185 uint16_t vector = val & 0xff;
186
187 addr &= 0xfc;
188
189 IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr);
190 switch (addr)
191 {
192 case INTRMASK:
193 ivshmem_IntrMask_write(s, val);
194 break;
195
196 case INTRSTATUS:
197 ivshmem_IntrStatus_write(s, val);
198 break;
199
200 case DOORBELL:
201 /* check that dest VM ID is reasonable */
95c8425c 202 if (dest >= s->nb_peers) {
6cbf4c8c
CM
203 IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
204 break;
205 }
206
207 /* check doorbell range */
1b27d7a1 208 if (vector < s->peers[dest].nb_eventfds) {
563027cc
PB
209 IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
210 event_notifier_set(&s->peers[dest].eventfds[vector]);
f59bb378
MAL
211 } else {
212 IVSHMEM_DPRINTF("Invalid destination vector %d on VM %d\n",
213 vector, dest);
6cbf4c8c
CM
214 }
215 break;
216 default:
f59bb378 217 IVSHMEM_DPRINTF("Unhandled write " TARGET_FMT_plx "\n", addr);
6cbf4c8c
CM
218 }
219}
220
a8170e5e 221static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
cb06608e 222 unsigned size)
6cbf4c8c
CM
223{
224
225 IVShmemState *s = opaque;
226 uint32_t ret;
227
228 switch (addr)
229 {
230 case INTRMASK:
231 ret = ivshmem_IntrMask_read(s);
232 break;
233
234 case INTRSTATUS:
235 ret = ivshmem_IntrStatus_read(s);
236 break;
237
238 case IVPOSITION:
239 /* return my VM ID if the memory is mapped */
f689d281 240 if (memory_region_is_mapped(&s->ivshmem)) {
6cbf4c8c
CM
241 ret = s->vm_id;
242 } else {
243 ret = -1;
244 }
245 break;
246
247 default:
248 IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
249 ret = 0;
250 }
251
252 return ret;
253}
254
cb06608e
AK
255static const MemoryRegionOps ivshmem_mmio_ops = {
256 .read = ivshmem_io_read,
257 .write = ivshmem_io_write,
258 .endianness = DEVICE_NATIVE_ENDIAN,
259 .impl = {
260 .min_access_size = 4,
261 .max_access_size = 4,
262 },
6cbf4c8c
CM
263};
264
6cbf4c8c
CM
265static int ivshmem_can_receive(void * opaque)
266{
f7a199b2 267 return sizeof(int64_t);
6cbf4c8c
CM
268}
269
9940c323
MAL
270static void ivshmem_vector_notify(void *opaque)
271{
0f57350e 272 MSIVector *entry = opaque;
6cbf4c8c 273 PCIDevice *pdev = entry->pdev;
d160f3f7 274 IVShmemState *s = IVSHMEM(pdev);
0f57350e 275 int vector = entry - s->msi_vectors;
9940c323
MAL
276 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
277
278 if (!event_notifier_test_and_clear(n)) {
279 return;
280 }
6cbf4c8c 281
d160f3f7 282 IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, vector);
9940c323
MAL
283 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
284 msix_notify(pdev, vector);
285 } else {
286 ivshmem_IntrStatus_write(s, 1);
287 }
6cbf4c8c
CM
288}
289
660c97ee
MAL
290static int ivshmem_vector_unmask(PCIDevice *dev, unsigned vector,
291 MSIMessage msg)
292{
293 IVShmemState *s = IVSHMEM(dev);
294 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
295 MSIVector *v = &s->msi_vectors[vector];
296 int ret;
297
298 IVSHMEM_DPRINTF("vector unmask %p %d\n", dev, vector);
299
300 ret = kvm_irqchip_update_msi_route(kvm_state, v->virq, msg, dev);
301 if (ret < 0) {
302 return ret;
303 }
304
305 return kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL, v->virq);
306}
307
308static void ivshmem_vector_mask(PCIDevice *dev, unsigned vector)
309{
310 IVShmemState *s = IVSHMEM(dev);
311 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
312 int ret;
313
314 IVSHMEM_DPRINTF("vector mask %p %d\n", dev, vector);
315
316 ret = kvm_irqchip_remove_irqfd_notifier_gsi(kvm_state, n,
317 s->msi_vectors[vector].virq);
318 if (ret != 0) {
319 error_report("remove_irqfd_notifier_gsi failed");
320 }
321}
322
323static void ivshmem_vector_poll(PCIDevice *dev,
324 unsigned int vector_start,
325 unsigned int vector_end)
326{
327 IVShmemState *s = IVSHMEM(dev);
328 unsigned int vector;
329
330 IVSHMEM_DPRINTF("vector poll %p %d-%d\n", dev, vector_start, vector_end);
331
332 vector_end = MIN(vector_end, s->vectors);
333
334 for (vector = vector_start; vector < vector_end; vector++) {
335 EventNotifier *notifier = &s->peers[s->vm_id].eventfds[vector];
336
337 if (!msix_is_masked(dev, vector)) {
338 continue;
339 }
340
341 if (event_notifier_test_and_clear(notifier)) {
342 msix_set_pending(dev, vector);
343 }
344 }
345}
346
9940c323
MAL
347static void watch_vector_notifier(IVShmemState *s, EventNotifier *n,
348 int vector)
6cbf4c8c 349{
563027cc 350 int eventfd = event_notifier_get_fd(n);
6cbf4c8c
CM
351
352 /* if MSI is supported we need multiple interrupts */
9940c323 353 s->msi_vectors[vector].pdev = PCI_DEVICE(s);
6cbf4c8c 354
9940c323
MAL
355 qemu_set_fd_handler(eventfd, ivshmem_vector_notify,
356 NULL, &s->msi_vectors[vector]);
6cbf4c8c
CM
357}
358
d58d7e84
MAL
359static int check_shm_size(IVShmemState *s, int fd, Error **errp)
360{
6cbf4c8c
CM
361 /* check that the guest isn't going to try and map more memory than the
362 * the object has allocated return -1 to indicate error */
363
364 struct stat buf;
365
5edbdbcd 366 if (fstat(fd, &buf) < 0) {
d58d7e84
MAL
367 error_setg(errp, "exiting: fstat on fd %d failed: %s",
368 fd, strerror(errno));
5edbdbcd
HZ
369 return -1;
370 }
6cbf4c8c
CM
371
372 if (s->ivshmem_size > buf.st_size) {
d58d7e84
MAL
373 error_setg(errp, "Requested memory size greater"
374 " than shared object size (%" PRIu64 " > %" PRIu64")",
375 s->ivshmem_size, (uint64_t)buf.st_size);
6cbf4c8c
CM
376 return -1;
377 } else {
378 return 0;
379 }
380}
381
382/* create the shared memory BAR when we are not using the server, so we can
383 * create the BAR and map the memory immediately */
d58d7e84
MAL
384static int create_shared_memory_BAR(IVShmemState *s, int fd, uint8_t attr,
385 Error **errp)
386{
6cbf4c8c
CM
387 void * ptr;
388
6cbf4c8c 389 ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
d58d7e84
MAL
390 if (ptr == MAP_FAILED) {
391 error_setg_errno(errp, errno, "Failed to mmap shared memory");
392 return -1;
393 }
394
3c161542 395 memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s), "ivshmem.bar2",
cb06608e 396 s->ivshmem_size, ptr);
8e41fb63 397 qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem), fd);
eb3fedf3 398 vmstate_register_ram(&s->ivshmem, DEVICE(s));
cb06608e 399 memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
6cbf4c8c
CM
400
401 /* region for shared memory */
9113e3f3 402 pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar);
d58d7e84
MAL
403
404 return 0;
6cbf4c8c
CM
405}
406
563027cc
PB
407static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
408{
409 memory_region_add_eventfd(&s->ivshmem_mmio,
410 DOORBELL,
411 4,
412 true,
413 (posn << 16) | i,
753d5e14 414 &s->peers[posn].eventfds[i]);
563027cc
PB
415}
416
417static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
418{
419 memory_region_del_eventfd(&s->ivshmem_mmio,
420 DOORBELL,
421 4,
422 true,
423 (posn << 16) | i,
753d5e14 424 &s->peers[posn].eventfds[i]);
563027cc
PB
425}
426
f456179f 427static void close_peer_eventfds(IVShmemState *s, int posn)
6cbf4c8c 428{
f456179f 429 int i, n;
6cbf4c8c 430
98609cd8
PB
431 if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
432 return;
433 }
363ba1c7 434 if (posn < 0 || posn >= s->nb_peers) {
ffa99afd 435 error_report("invalid peer %d", posn);
363ba1c7
SH
436 return;
437 }
98609cd8 438
f456179f 439 n = s->peers[posn].nb_eventfds;
6cbf4c8c 440
b6a1f3a5 441 memory_region_transaction_begin();
f456179f 442 for (i = 0; i < n; i++) {
563027cc 443 ivshmem_del_eventfd(s, posn, i);
b6a1f3a5
PB
444 }
445 memory_region_transaction_commit();
f456179f 446 for (i = 0; i < n; i++) {
563027cc 447 event_notifier_cleanup(&s->peers[posn].eventfds[i]);
6cbf4c8c
CM
448 }
449
7267c094 450 g_free(s->peers[posn].eventfds);
6cbf4c8c
CM
451 s->peers[posn].nb_eventfds = 0;
452}
453
6cbf4c8c 454/* this function increase the dynamic storage need to store data about other
f456179f 455 * peers */
1300b273 456static int resize_peers(IVShmemState *s, int new_min_size)
34bc07c5 457{
6cbf4c8c 458
1300b273 459 int j, old_size;
6cbf4c8c 460
61ea2d86
MAL
461 /* limit number of max peers */
462 if (new_min_size <= 0 || new_min_size > IVSHMEM_MAX_PEERS) {
34bc07c5
SK
463 return -1;
464 }
1300b273 465 if (new_min_size <= s->nb_peers) {
34bc07c5
SK
466 return 0;
467 }
6cbf4c8c 468
1300b273
MAL
469 old_size = s->nb_peers;
470 s->nb_peers = new_min_size;
471
f456179f 472 IVSHMEM_DPRINTF("bumping storage to %d peers\n", s->nb_peers);
1300b273 473
7267c094 474 s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer));
6cbf4c8c 475
1300b273 476 for (j = old_size; j < s->nb_peers; j++) {
81e507f0 477 s->peers[j].eventfds = g_new0(EventNotifier, s->vectors);
6cbf4c8c
CM
478 s->peers[j].nb_eventfds = 0;
479 }
34bc07c5
SK
480
481 return 0;
6cbf4c8c
CM
482}
483
0f14fd71
MAL
484static bool fifo_update_and_get(IVShmemState *s, const uint8_t *buf, int size,
485 void *data, size_t len)
486{
487 const uint8_t *p;
488 uint32_t num;
489
f7a199b2 490 assert(len <= sizeof(int64_t)); /* limitation of the fifo */
0f14fd71
MAL
491 if (fifo8_is_empty(&s->incoming_fifo) && size == len) {
492 memcpy(data, buf, size);
493 return true;
494 }
495
496 IVSHMEM_DPRINTF("short read of %d bytes\n", size);
497
f7a199b2 498 num = MIN(size, sizeof(int64_t) - fifo8_num_used(&s->incoming_fifo));
0f14fd71
MAL
499 fifo8_push_all(&s->incoming_fifo, buf, num);
500
501 if (fifo8_num_used(&s->incoming_fifo) < len) {
502 assert(num == 0);
503 return false;
504 }
505
506 size -= num;
507 buf += num;
508 p = fifo8_pop_buf(&s->incoming_fifo, len, &num);
509 assert(num == len);
510
511 memcpy(data, p, len);
512
513 if (size > 0) {
514 fifo8_push_all(&s->incoming_fifo, buf, size);
515 }
516
517 return true;
518}
519
f7a199b2
MAL
520static bool fifo_update_and_get_i64(IVShmemState *s,
521 const uint8_t *buf, int size, int64_t *i64)
522{
523 if (fifo_update_and_get(s, buf, size, i64, sizeof(*i64))) {
524 *i64 = GINT64_FROM_LE(*i64);
525 return true;
526 }
527
528 return false;
529}
530
660c97ee
MAL
531static int ivshmem_add_kvm_msi_virq(IVShmemState *s, int vector)
532{
533 PCIDevice *pdev = PCI_DEVICE(s);
534 MSIMessage msg = msix_get_message(pdev, vector);
535 int ret;
536
537 IVSHMEM_DPRINTF("ivshmem_add_kvm_msi_virq vector:%d\n", vector);
538
539 if (s->msi_vectors[vector].pdev != NULL) {
540 return 0;
541 }
542
543 ret = kvm_irqchip_add_msi_route(kvm_state, msg, pdev);
544 if (ret < 0) {
545 error_report("ivshmem: kvm_irqchip_add_msi_route failed");
546 return -1;
547 }
548
549 s->msi_vectors[vector].virq = ret;
550 s->msi_vectors[vector].pdev = pdev;
551
552 return 0;
553}
554
555static void setup_interrupt(IVShmemState *s, int vector)
556{
557 EventNotifier *n = &s->peers[s->vm_id].eventfds[vector];
558 bool with_irqfd = kvm_msi_via_irqfd_enabled() &&
559 ivshmem_has_feature(s, IVSHMEM_MSI);
560 PCIDevice *pdev = PCI_DEVICE(s);
561
562 IVSHMEM_DPRINTF("setting up interrupt for vector: %d\n", vector);
563
564 if (!with_irqfd) {
97553976 565 IVSHMEM_DPRINTF("with eventfd\n");
9940c323 566 watch_vector_notifier(s, n, vector);
660c97ee 567 } else if (msix_enabled(pdev)) {
97553976 568 IVSHMEM_DPRINTF("with irqfd\n");
660c97ee
MAL
569 if (ivshmem_add_kvm_msi_virq(s, vector) < 0) {
570 return;
571 }
572
573 if (!msix_is_masked(pdev, vector)) {
574 kvm_irqchip_add_irqfd_notifier_gsi(kvm_state, n, NULL,
575 s->msi_vectors[vector].virq);
576 }
577 } else {
578 /* it will be delayed until msix is enabled, in write_config */
97553976 579 IVSHMEM_DPRINTF("with irqfd, delayed until msix enabled\n");
660c97ee
MAL
580 }
581}
582
a2e9011b 583static void ivshmem_read(void *opaque, const uint8_t *buf, int size)
6cbf4c8c
CM
584{
585 IVShmemState *s = opaque;
dee2151e 586 int incoming_fd;
9a2f0e64 587 int new_eventfd;
f7a199b2 588 int64_t incoming_posn;
d58d7e84 589 Error *err = NULL;
9a2f0e64 590 Peer *peer;
6cbf4c8c 591
f7a199b2 592 if (!fifo_update_and_get_i64(s, buf, size, &incoming_posn)) {
0f14fd71 593 return;
a2e9011b
SH
594 }
595
363ba1c7 596 if (incoming_posn < -1) {
f7a199b2 597 IVSHMEM_DPRINTF("invalid incoming_posn %" PRId64 "\n", incoming_posn);
363ba1c7
SH
598 return;
599 }
600
6cbf4c8c 601 /* pick off s->server_chr->msgfd and store it, posn should accompany msg */
dee2151e 602 incoming_fd = qemu_chr_fe_get_msgfd(s->server_chr);
f7a199b2
MAL
603 IVSHMEM_DPRINTF("posn is %" PRId64 ", fd is %d\n",
604 incoming_posn, incoming_fd);
6cbf4c8c 605
f456179f 606 /* make sure we have enough space for this peer */
6cbf4c8c 607 if (incoming_posn >= s->nb_peers) {
1300b273
MAL
608 if (resize_peers(s, incoming_posn + 1) < 0) {
609 error_report("failed to resize peers array");
dee2151e
MAL
610 if (incoming_fd != -1) {
611 close(incoming_fd);
34bc07c5
SK
612 }
613 return;
614 }
6cbf4c8c
CM
615 }
616
9a2f0e64
MAL
617 peer = &s->peers[incoming_posn];
618
dee2151e 619 if (incoming_fd == -1) {
6cbf4c8c 620 /* if posn is positive and unseen before then this is our posn*/
81e507f0 621 if (incoming_posn >= 0 && s->vm_id == -1) {
6cbf4c8c
CM
622 /* receive our posn */
623 s->vm_id = incoming_posn;
6cbf4c8c 624 } else {
f456179f 625 /* otherwise an fd == -1 means an existing peer has gone away */
f7a199b2 626 IVSHMEM_DPRINTF("posn %" PRId64 " has gone away\n", incoming_posn);
f456179f 627 close_peer_eventfds(s, incoming_posn);
6cbf4c8c 628 }
6f8a16d5 629 return;
6cbf4c8c
CM
630 }
631
6cbf4c8c
CM
632 /* if the position is -1, then it's shared memory region fd */
633 if (incoming_posn == -1) {
6cbf4c8c
CM
634 void * map_ptr;
635
f689d281 636 if (memory_region_is_mapped(&s->ivshmem)) {
945001a1
MAL
637 error_report("shm already initialized");
638 close(incoming_fd);
639 return;
640 }
641
d58d7e84
MAL
642 if (check_shm_size(s, incoming_fd, &err) == -1) {
643 error_report_err(err);
644 close(incoming_fd);
645 return;
6cbf4c8c
CM
646 }
647
648 /* mmap the region and map into the BAR2 */
649 map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
650 incoming_fd, 0);
d58d7e84
MAL
651 if (map_ptr == MAP_FAILED) {
652 error_report("Failed to mmap shared memory %s", strerror(errno));
653 close(incoming_fd);
654 return;
655 }
3c161542 656 memory_region_init_ram_ptr(&s->ivshmem, OBJECT(s),
cb06608e 657 "ivshmem.bar2", s->ivshmem_size, map_ptr);
8e41fb63
FZ
658 qemu_set_ram_fd(memory_region_get_ram_addr(&s->ivshmem),
659 incoming_fd);
eb3fedf3 660 vmstate_register_ram(&s->ivshmem, DEVICE(s));
6cbf4c8c 661
7f9efb6b 662 IVSHMEM_DPRINTF("guest h/w addr = %p, size = %" PRIu64 "\n",
dbc464d4 663 map_ptr, s->ivshmem_size);
6cbf4c8c 664
cb06608e 665 memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
6cbf4c8c 666
6cbf4c8c
CM
667 return;
668 }
669
9a2f0e64
MAL
670 /* each peer has an associated array of eventfds, and we keep
671 * track of how many eventfds received so far */
672 /* get a new eventfd: */
1ee57de4
MAL
673 if (peer->nb_eventfds >= s->vectors) {
674 error_report("Too many eventfd received, device has %d vectors",
675 s->vectors);
676 close(incoming_fd);
677 return;
678 }
679
9a2f0e64 680 new_eventfd = peer->nb_eventfds++;
6cbf4c8c 681
f456179f 682 /* this is an eventfd for a particular peer VM */
f7a199b2 683 IVSHMEM_DPRINTF("eventfds[%" PRId64 "][%d] = %d\n", incoming_posn,
9a2f0e64
MAL
684 new_eventfd, incoming_fd);
685 event_notifier_init_fd(&peer->eventfds[new_eventfd], incoming_fd);
660c97ee 686 fcntl_setfl(incoming_fd, O_NONBLOCK); /* msix/irqfd poll non block */
6cbf4c8c 687
6cbf4c8c 688 if (incoming_posn == s->vm_id) {
660c97ee 689 setup_interrupt(s, new_eventfd);
6cbf4c8c
CM
690 }
691
692 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
9a2f0e64 693 ivshmem_add_eventfd(s, incoming_posn, new_eventfd);
6cbf4c8c 694 }
6cbf4c8c
CM
695}
696
5105b1d8
DM
697static void ivshmem_check_version(void *opaque, const uint8_t * buf, int size)
698{
699 IVShmemState *s = opaque;
700 int tmp;
f7a199b2 701 int64_t version;
5105b1d8 702
f7a199b2 703 if (!fifo_update_and_get_i64(s, buf, size, &version)) {
5105b1d8
DM
704 return;
705 }
706
707 tmp = qemu_chr_fe_get_msgfd(s->server_chr);
708 if (tmp != -1 || version != IVSHMEM_PROTOCOL_VERSION) {
709 fprintf(stderr, "incompatible version, you are connecting to a ivshmem-"
710 "server using a different protocol please check your setup\n");
71c26581 711 qemu_chr_add_handlers(s->server_chr, NULL, NULL, NULL, s);
5105b1d8
DM
712 return;
713 }
714
715 IVSHMEM_DPRINTF("version check ok, switch to real chardev handler\n");
716 qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read,
c20fc0c3 717 NULL, s);
5105b1d8
DM
718}
719
4490c711
MT
720/* Select the MSI-X vectors used by device.
721 * ivshmem maps events to vectors statically, so
722 * we just enable all vectors on init and after reset. */
723static void ivshmem_use_msix(IVShmemState * s)
724{
b7578eaa 725 PCIDevice *d = PCI_DEVICE(s);
4490c711
MT
726 int i;
727
f59bb378 728 IVSHMEM_DPRINTF("%s, msix present: %d\n", __func__, msix_present(d));
b7578eaa 729 if (!msix_present(d)) {
4490c711
MT
730 return;
731 }
732
733 for (i = 0; i < s->vectors; i++) {
b7578eaa 734 msix_vector_use(d, i);
4490c711
MT
735 }
736}
737
6cbf4c8c
CM
738static void ivshmem_reset(DeviceState *d)
739{
eb3fedf3 740 IVShmemState *s = IVSHMEM(d);
6cbf4c8c
CM
741
742 s->intrstatus = 0;
972ad215 743 s->intrmask = 0;
4490c711 744 ivshmem_use_msix(s);
6cbf4c8c
CM
745}
746
fd47bfe5 747static int ivshmem_setup_interrupts(IVShmemState *s)
4490c711 748{
fd47bfe5
MAL
749 /* allocate QEMU callback data for receiving interrupts */
750 s->msi_vectors = g_malloc0(s->vectors * sizeof(MSIVector));
6cbf4c8c 751
fd47bfe5
MAL
752 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
753 if (msix_init_exclusive_bar(PCI_DEVICE(s), s->vectors, 1)) {
754 return -1;
755 }
1116b539 756
fd47bfe5
MAL
757 IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
758 ivshmem_use_msix(s);
759 }
4490c711 760
d58d7e84 761 return 0;
6cbf4c8c
CM
762}
763
660c97ee
MAL
764static void ivshmem_enable_irqfd(IVShmemState *s)
765{
766 PCIDevice *pdev = PCI_DEVICE(s);
767 int i;
768
769 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
770 ivshmem_add_kvm_msi_virq(s, i);
771 }
772
773 if (msix_set_vector_notifiers(pdev,
774 ivshmem_vector_unmask,
775 ivshmem_vector_mask,
776 ivshmem_vector_poll)) {
777 error_report("ivshmem: msix_set_vector_notifiers failed");
778 }
779}
780
781static void ivshmem_remove_kvm_msi_virq(IVShmemState *s, int vector)
782{
783 IVSHMEM_DPRINTF("ivshmem_remove_kvm_msi_virq vector:%d\n", vector);
784
785 if (s->msi_vectors[vector].pdev == NULL) {
786 return;
787 }
788
789 /* it was cleaned when masked in the frontend. */
790 kvm_irqchip_release_virq(kvm_state, s->msi_vectors[vector].virq);
791
792 s->msi_vectors[vector].pdev = NULL;
793}
794
795static void ivshmem_disable_irqfd(IVShmemState *s)
796{
797 PCIDevice *pdev = PCI_DEVICE(s);
798 int i;
799
800 for (i = 0; i < s->peers[s->vm_id].nb_eventfds; i++) {
801 ivshmem_remove_kvm_msi_virq(s, i);
802 }
803
804 msix_unset_vector_notifiers(pdev);
805}
806
807static void ivshmem_write_config(PCIDevice *pdev, uint32_t address,
d58d7e84 808 uint32_t val, int len)
4490c711 809{
660c97ee
MAL
810 IVShmemState *s = IVSHMEM(pdev);
811 int is_enabled, was_enabled = msix_enabled(pdev);
812
813 pci_default_write_config(pdev, address, val, len);
814 is_enabled = msix_enabled(pdev);
815
816 if (kvm_msi_via_irqfd_enabled() && s->vm_id != -1) {
817 if (!was_enabled && is_enabled) {
818 ivshmem_enable_irqfd(s);
819 } else if (was_enabled && !is_enabled) {
820 ivshmem_disable_irqfd(s);
821 }
822 }
4490c711
MT
823}
824
d58d7e84 825static void pci_ivshmem_realize(PCIDevice *dev, Error **errp)
6cbf4c8c 826{
eb3fedf3 827 IVShmemState *s = IVSHMEM(dev);
6cbf4c8c 828 uint8_t *pci_conf;
9113e3f3
MAL
829 uint8_t attr = PCI_BASE_ADDRESS_SPACE_MEMORY |
830 PCI_BASE_ADDRESS_MEM_PREFETCH;
6cbf4c8c 831
d9453c93 832 if (!!s->server_chr + !!s->shmobj + !!s->hostmem != 1) {
1d649244
MA
833 error_setg(errp,
834 "You must specify either 'shm', 'chardev' or 'x-memdev'");
d9453c93
MAL
835 return;
836 }
837
838 if (s->hostmem) {
839 MemoryRegion *mr;
840
841 if (s->sizearg) {
842 g_warning("size argument ignored with hostmem");
843 }
844
9cf70c52 845 mr = host_memory_backend_get_memory(s->hostmem, &error_abort);
d9453c93
MAL
846 s->ivshmem_size = memory_region_size(mr);
847 } else if (s->sizearg == NULL) {
6cbf4c8c 848 s->ivshmem_size = 4 << 20; /* 4 MB default */
d58d7e84 849 } else {
2c04752c
MAL
850 char *end;
851 int64_t size = qemu_strtosz(s->sizearg, &end);
852 if (size < 0 || *end != '\0' || !is_power_of_2(size)) {
853 error_setg(errp, "Invalid size %s", s->sizearg);
d58d7e84
MAL
854 return;
855 }
2c04752c 856 s->ivshmem_size = size;
6cbf4c8c
CM
857 }
858
f7a199b2 859 fifo8_create(&s->incoming_fifo, sizeof(int64_t));
1f8552df 860
6cbf4c8c
CM
861 /* IRQFD requires MSI */
862 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
863 !ivshmem_has_feature(s, IVSHMEM_MSI)) {
d58d7e84
MAL
864 error_setg(errp, "ioeventfd/irqfd requires MSI");
865 return;
6cbf4c8c
CM
866 }
867
868 /* check that role is reasonable */
869 if (s->role) {
870 if (strncmp(s->role, "peer", 5) == 0) {
871 s->role_val = IVSHMEM_PEER;
872 } else if (strncmp(s->role, "master", 7) == 0) {
873 s->role_val = IVSHMEM_MASTER;
874 } else {
d58d7e84
MAL
875 error_setg(errp, "'role' must be 'peer' or 'master'");
876 return;
6cbf4c8c
CM
877 }
878 } else {
879 s->role_val = IVSHMEM_MASTER; /* default */
880 }
881
882 if (s->role_val == IVSHMEM_PEER) {
f231b88d
CR
883 error_setg(&s->migration_blocker,
884 "Migration is disabled when using feature 'peer mode' in device 'ivshmem'");
38e0735e 885 migrate_add_blocker(s->migration_blocker);
6cbf4c8c
CM
886 }
887
b7578eaa 888 pci_conf = dev->config;
6cbf4c8c 889 pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
6cbf4c8c
CM
890
891 pci_config_set_interrupt_pin(pci_conf, 1);
892
3c161542 893 memory_region_init_io(&s->ivshmem_mmio, OBJECT(s), &ivshmem_mmio_ops, s,
cb06608e
AK
894 "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
895
6cbf4c8c 896 /* region for registers*/
b7578eaa 897 pci_register_bar(dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
e824b2cc 898 &s->ivshmem_mmio);
cb06608e 899
3c161542 900 memory_region_init(&s->bar, OBJECT(s), "ivshmem-bar2-container", s->ivshmem_size);
c08ba66f 901 if (s->ivshmem_64bit) {
9113e3f3 902 attr |= PCI_BASE_ADDRESS_MEM_TYPE_64;
c08ba66f 903 }
6cbf4c8c 904
d9453c93
MAL
905 if (s->hostmem != NULL) {
906 MemoryRegion *mr;
907
908 IVSHMEM_DPRINTF("using hostmem\n");
909
9cf70c52
MA
910 mr = host_memory_backend_get_memory(MEMORY_BACKEND(s->hostmem),
911 &error_abort);
d9453c93
MAL
912 vmstate_register_ram(mr, DEVICE(s));
913 memory_region_add_subregion(&s->bar, 0, mr);
914 pci_register_bar(PCI_DEVICE(s), 2, attr, &s->bar);
915 } else if (s->server_chr != NULL) {
2825717c 916 /* FIXME do not rely on what chr drivers put into filename */
36617792
MAL
917 if (strncmp(s->server_chr->filename, "unix:", 5)) {
918 error_setg(errp, "chardev is not a unix client socket");
919 return;
920 }
921
6cbf4c8c
CM
922 /* if we get a UNIX socket as the parameter we will talk
923 * to the ivshmem server to receive the memory region */
924
6cbf4c8c 925 IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
dbc464d4 926 s->server_chr->filename);
6cbf4c8c 927
fd47bfe5
MAL
928 if (ivshmem_setup_interrupts(s) < 0) {
929 error_setg(errp, "failed to initialize interrupts");
d58d7e84 930 return;
6cbf4c8c
CM
931 }
932
f456179f 933 /* we allocate enough space for 16 peers and grow as needed */
1300b273 934 resize_peers(s, 16);
6cbf4c8c
CM
935 s->vm_id = -1;
936
9113e3f3 937 pci_register_bar(dev, 2, attr, &s->bar);
6cbf4c8c 938
5105b1d8 939 qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive,
c20fc0c3 940 ivshmem_check_version, NULL, s);
6cbf4c8c
CM
941 } else {
942 /* just map the file immediately, we're not using a server */
943 int fd;
944
6cbf4c8c
CM
945 IVSHMEM_DPRINTF("using shm_open (shm object = %s)\n", s->shmobj);
946
947 /* try opening with O_EXCL and if it succeeds zero the memory
948 * by truncating to 0 */
949 if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL,
950 S_IRWXU|S_IRWXG|S_IRWXO)) > 0) {
951 /* truncate file to length PCI device's memory */
952 if (ftruncate(fd, s->ivshmem_size) != 0) {
dbc464d4 953 error_report("could not truncate shared file");
6cbf4c8c
CM
954 }
955
956 } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR,
957 S_IRWXU|S_IRWXG|S_IRWXO)) < 0) {
d58d7e84
MAL
958 error_setg(errp, "could not open shared file");
959 return;
6cbf4c8c
CM
960 }
961
d58d7e84
MAL
962 if (check_shm_size(s, fd, errp) == -1) {
963 return;
6cbf4c8c
CM
964 }
965
d58d7e84 966 create_shared_memory_BAR(s, fd, attr, errp);
6cbf4c8c 967 }
6cbf4c8c
CM
968}
969
d58d7e84 970static void pci_ivshmem_exit(PCIDevice *dev)
6cbf4c8c 971{
eb3fedf3 972 IVShmemState *s = IVSHMEM(dev);
f64a078d
MAL
973 int i;
974
975 fifo8_destroy(&s->incoming_fifo);
6cbf4c8c 976
38e0735e
AL
977 if (s->migration_blocker) {
978 migrate_del_blocker(s->migration_blocker);
979 error_free(s->migration_blocker);
980 }
981
f689d281 982 if (memory_region_is_mapped(&s->ivshmem)) {
d9453c93
MAL
983 if (!s->hostmem) {
984 void *addr = memory_region_get_ram_ptr(&s->ivshmem);
56a571d9 985 int fd;
d9453c93
MAL
986
987 if (munmap(addr, s->ivshmem_size) == -1) {
988 error_report("Failed to munmap shared memory %s",
989 strerror(errno));
990 }
56a571d9 991
8e41fb63
FZ
992 fd = qemu_get_ram_fd(memory_region_get_ram_addr(&s->ivshmem));
993 if (fd != -1) {
56a571d9 994 close(fd);
8e41fb63 995 }
d9453c93 996 }
f64a078d
MAL
997
998 vmstate_unregister_ram(&s->ivshmem, DEVICE(dev));
999 memory_region_del_subregion(&s->bar, &s->ivshmem);
f64a078d
MAL
1000 }
1001
f64a078d
MAL
1002 if (s->peers) {
1003 for (i = 0; i < s->nb_peers; i++) {
f456179f 1004 close_peer_eventfds(s, i);
f64a078d
MAL
1005 }
1006 g_free(s->peers);
1007 }
1008
1009 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
1010 msix_uninit_exclusive_bar(dev);
1011 }
1012
0f57350e 1013 g_free(s->msi_vectors);
6cbf4c8c
CM
1014}
1015
1f8552df
MAL
1016static bool test_msix(void *opaque, int version_id)
1017{
1018 IVShmemState *s = opaque;
1019
1020 return ivshmem_has_feature(s, IVSHMEM_MSI);
1021}
1022
1023static bool test_no_msix(void *opaque, int version_id)
1024{
1025 return !test_msix(opaque, version_id);
1026}
1027
1028static int ivshmem_pre_load(void *opaque)
1029{
1030 IVShmemState *s = opaque;
1031
1032 if (s->role_val == IVSHMEM_PEER) {
1033 error_report("'peer' devices are not migratable");
1034 return -EINVAL;
1035 }
1036
1037 return 0;
1038}
1039
1040static int ivshmem_post_load(void *opaque, int version_id)
1041{
1042 IVShmemState *s = opaque;
1043
1044 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
1045 ivshmem_use_msix(s);
1046 }
1047
1048 return 0;
1049}
1050
1051static int ivshmem_load_old(QEMUFile *f, void *opaque, int version_id)
1052{
1053 IVShmemState *s = opaque;
1054 PCIDevice *pdev = PCI_DEVICE(s);
1055 int ret;
1056
1057 IVSHMEM_DPRINTF("ivshmem_load_old\n");
1058
1059 if (version_id != 0) {
1060 return -EINVAL;
1061 }
1062
1063 if (s->role_val == IVSHMEM_PEER) {
1064 error_report("'peer' devices are not migratable");
1065 return -EINVAL;
1066 }
1067
1068 ret = pci_device_load(pdev, f);
1069 if (ret) {
1070 return ret;
1071 }
1072
1073 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
1074 msix_load(pdev, f);
1075 ivshmem_use_msix(s);
1076 } else {
1077 s->intrstatus = qemu_get_be32(f);
1078 s->intrmask = qemu_get_be32(f);
1079 }
1080
1081 return 0;
1082}
1083
1084static const VMStateDescription ivshmem_vmsd = {
1085 .name = "ivshmem",
1086 .version_id = 1,
1087 .minimum_version_id = 1,
1088 .pre_load = ivshmem_pre_load,
1089 .post_load = ivshmem_post_load,
1090 .fields = (VMStateField[]) {
1091 VMSTATE_PCI_DEVICE(parent_obj, IVShmemState),
1092
1093 VMSTATE_MSIX_TEST(parent_obj, IVShmemState, test_msix),
1094 VMSTATE_UINT32_TEST(intrstatus, IVShmemState, test_no_msix),
1095 VMSTATE_UINT32_TEST(intrmask, IVShmemState, test_no_msix),
1096
1097 VMSTATE_END_OF_LIST()
1098 },
1099 .load_state_old = ivshmem_load_old,
1100 .minimum_version_id_old = 0
1101};
1102
40021f08
AL
1103static Property ivshmem_properties[] = {
1104 DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
1105 DEFINE_PROP_STRING("size", IVShmemState, sizearg),
1106 DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
1107 DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false),
1108 DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true),
1109 DEFINE_PROP_STRING("shm", IVShmemState, shmobj),
1110 DEFINE_PROP_STRING("role", IVShmemState, role),
c08ba66f 1111 DEFINE_PROP_UINT32("use64", IVShmemState, ivshmem_64bit, 1),
40021f08
AL
1112 DEFINE_PROP_END_OF_LIST(),
1113};
1114
1115static void ivshmem_class_init(ObjectClass *klass, void *data)
1116{
39bffca2 1117 DeviceClass *dc = DEVICE_CLASS(klass);
40021f08
AL
1118 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
1119
d58d7e84
MAL
1120 k->realize = pci_ivshmem_realize;
1121 k->exit = pci_ivshmem_exit;
1122 k->config_write = ivshmem_write_config;
b8ef62a9
PB
1123 k->vendor_id = PCI_VENDOR_ID_IVSHMEM;
1124 k->device_id = PCI_DEVICE_ID_IVSHMEM;
40021f08 1125 k->class_id = PCI_CLASS_MEMORY_RAM;
39bffca2
AL
1126 dc->reset = ivshmem_reset;
1127 dc->props = ivshmem_properties;
1f8552df 1128 dc->vmsd = &ivshmem_vmsd;
125ee0ed 1129 set_bit(DEVICE_CATEGORY_MISC, dc->categories);
d383537d 1130 dc->desc = "Inter-VM shared memory";
40021f08
AL
1131}
1132
d9453c93
MAL
1133static void ivshmem_check_memdev_is_busy(Object *obj, const char *name,
1134 Object *val, Error **errp)
1135{
1136 MemoryRegion *mr;
1137
9cf70c52 1138 mr = host_memory_backend_get_memory(MEMORY_BACKEND(val), &error_abort);
d9453c93
MAL
1139 if (memory_region_is_mapped(mr)) {
1140 char *path = object_get_canonical_path_component(val);
1141 error_setg(errp, "can't use already busy memdev: %s", path);
1142 g_free(path);
1143 } else {
1144 qdev_prop_allow_set_link_before_realize(obj, name, val, errp);
1145 }
1146}
1147
1148static void ivshmem_init(Object *obj)
1149{
1150 IVShmemState *s = IVSHMEM(obj);
1151
1d649244 1152 object_property_add_link(obj, "x-memdev", TYPE_MEMORY_BACKEND,
d9453c93
MAL
1153 (Object **)&s->hostmem,
1154 ivshmem_check_memdev_is_busy,
1155 OBJ_PROP_LINK_UNREF_ON_RELEASE,
1156 &error_abort);
1157}
1158
8c43a6f0 1159static const TypeInfo ivshmem_info = {
eb3fedf3 1160 .name = TYPE_IVSHMEM,
39bffca2
AL
1161 .parent = TYPE_PCI_DEVICE,
1162 .instance_size = sizeof(IVShmemState),
d9453c93 1163 .instance_init = ivshmem_init,
39bffca2 1164 .class_init = ivshmem_class_init,
6cbf4c8c
CM
1165};
1166
83f7d43a 1167static void ivshmem_register_types(void)
6cbf4c8c 1168{
39bffca2 1169 type_register_static(&ivshmem_info);
6cbf4c8c
CM
1170}
1171
83f7d43a 1172type_init(ivshmem_register_types)