]> git.proxmox.com Git - qemu.git/blob - hw/ivshmem.c
afaf9b3bbfcb258d9e861253b37596c611d7f457
[qemu.git] / hw / ivshmem.c
1 /*
2 * Inter-VM Shared Memory PCI device.
3 *
4 * Author:
5 * Cam Macdonell <cam@cs.ualberta.ca>
6 *
7 * Based On: cirrus_vga.c
8 * Copyright (c) 2004 Fabrice Bellard
9 * Copyright (c) 2004 Makoto Suzuki (suzu)
10 *
11 * and rtl8139.c
12 * Copyright (c) 2006 Igor Kovalenko
13 *
14 * This code is licensed under the GNU GPL v2.
15 *
16 * Contributions after 2012-01-13 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
18 */
19 #include "hw.h"
20 #include "pc.h"
21 #include "pci/pci.h"
22 #include "pci/msix.h"
23 #include "sysemu/kvm.h"
24 #include "migration/migration.h"
25 #include "qapi/qmp/qerror.h"
26 #include "qemu/event_notifier.h"
27 #include "char/char.h"
28
29 #include <sys/mman.h>
30 #include <sys/types.h>
31
32 #define PCI_VENDOR_ID_IVSHMEM PCI_VENDOR_ID_REDHAT_QUMRANET
33 #define PCI_DEVICE_ID_IVSHMEM 0x1110
34
35 #define IVSHMEM_IOEVENTFD 0
36 #define IVSHMEM_MSI 1
37
38 #define IVSHMEM_PEER 0
39 #define IVSHMEM_MASTER 1
40
41 #define IVSHMEM_REG_BAR_SIZE 0x100
42
43 //#define DEBUG_IVSHMEM
44 #ifdef DEBUG_IVSHMEM
45 #define IVSHMEM_DPRINTF(fmt, ...) \
46 do {printf("IVSHMEM: " fmt, ## __VA_ARGS__); } while (0)
47 #else
48 #define IVSHMEM_DPRINTF(fmt, ...)
49 #endif
50
51 typedef struct Peer {
52 int nb_eventfds;
53 EventNotifier *eventfds;
54 } Peer;
55
56 typedef struct EventfdEntry {
57 PCIDevice *pdev;
58 int vector;
59 } EventfdEntry;
60
61 typedef struct IVShmemState {
62 PCIDevice dev;
63 uint32_t intrmask;
64 uint32_t intrstatus;
65 uint32_t doorbell;
66
67 CharDriverState **eventfd_chr;
68 CharDriverState *server_chr;
69 MemoryRegion ivshmem_mmio;
70
71 /* We might need to register the BAR before we actually have the memory.
72 * So prepare a container MemoryRegion for the BAR immediately and
73 * add a subregion when we have the memory.
74 */
75 MemoryRegion bar;
76 MemoryRegion ivshmem;
77 uint64_t ivshmem_size; /* size of shared memory region */
78 uint32_t ivshmem_attr;
79 uint32_t ivshmem_64bit;
80 int shm_fd; /* shared memory file descriptor */
81
82 Peer *peers;
83 int nb_peers; /* how many guests we have space for */
84 int max_peer; /* maximum numbered peer */
85
86 int vm_id;
87 uint32_t vectors;
88 uint32_t features;
89 EventfdEntry *eventfd_table;
90
91 Error *migration_blocker;
92
93 char * shmobj;
94 char * sizearg;
95 char * role;
96 int role_val; /* scalar to avoid multiple string comparisons */
97 } IVShmemState;
98
99 /* registers for the Inter-VM shared memory device */
100 enum ivshmem_registers {
101 INTRMASK = 0,
102 INTRSTATUS = 4,
103 IVPOSITION = 8,
104 DOORBELL = 12,
105 };
106
107 static inline uint32_t ivshmem_has_feature(IVShmemState *ivs,
108 unsigned int feature) {
109 return (ivs->features & (1 << feature));
110 }
111
112 static inline bool is_power_of_two(uint64_t x) {
113 return (x & (x - 1)) == 0;
114 }
115
116 /* accessing registers - based on rtl8139 */
117 static void ivshmem_update_irq(IVShmemState *s, int val)
118 {
119 int isr;
120 isr = (s->intrstatus & s->intrmask) & 0xffffffff;
121
122 /* don't print ISR resets */
123 if (isr) {
124 IVSHMEM_DPRINTF("Set IRQ to %d (%04x %04x)\n",
125 isr ? 1 : 0, s->intrstatus, s->intrmask);
126 }
127
128 qemu_set_irq(s->dev.irq[0], (isr != 0));
129 }
130
131 static void ivshmem_IntrMask_write(IVShmemState *s, uint32_t val)
132 {
133 IVSHMEM_DPRINTF("IntrMask write(w) val = 0x%04x\n", val);
134
135 s->intrmask = val;
136
137 ivshmem_update_irq(s, val);
138 }
139
140 static uint32_t ivshmem_IntrMask_read(IVShmemState *s)
141 {
142 uint32_t ret = s->intrmask;
143
144 IVSHMEM_DPRINTF("intrmask read(w) val = 0x%04x\n", ret);
145
146 return ret;
147 }
148
149 static void ivshmem_IntrStatus_write(IVShmemState *s, uint32_t val)
150 {
151 IVSHMEM_DPRINTF("IntrStatus write(w) val = 0x%04x\n", val);
152
153 s->intrstatus = val;
154
155 ivshmem_update_irq(s, val);
156 }
157
158 static uint32_t ivshmem_IntrStatus_read(IVShmemState *s)
159 {
160 uint32_t ret = s->intrstatus;
161
162 /* reading ISR clears all interrupts */
163 s->intrstatus = 0;
164
165 ivshmem_update_irq(s, 0);
166
167 return ret;
168 }
169
170 static void ivshmem_io_write(void *opaque, hwaddr addr,
171 uint64_t val, unsigned size)
172 {
173 IVShmemState *s = opaque;
174
175 uint16_t dest = val >> 16;
176 uint16_t vector = val & 0xff;
177
178 addr &= 0xfc;
179
180 IVSHMEM_DPRINTF("writing to addr " TARGET_FMT_plx "\n", addr);
181 switch (addr)
182 {
183 case INTRMASK:
184 ivshmem_IntrMask_write(s, val);
185 break;
186
187 case INTRSTATUS:
188 ivshmem_IntrStatus_write(s, val);
189 break;
190
191 case DOORBELL:
192 /* check that dest VM ID is reasonable */
193 if (dest > s->max_peer) {
194 IVSHMEM_DPRINTF("Invalid destination VM ID (%d)\n", dest);
195 break;
196 }
197
198 /* check doorbell range */
199 if (vector < s->peers[dest].nb_eventfds) {
200 IVSHMEM_DPRINTF("Notifying VM %d on vector %d\n", dest, vector);
201 event_notifier_set(&s->peers[dest].eventfds[vector]);
202 }
203 break;
204 default:
205 IVSHMEM_DPRINTF("Invalid VM Doorbell VM %d\n", dest);
206 }
207 }
208
209 static uint64_t ivshmem_io_read(void *opaque, hwaddr addr,
210 unsigned size)
211 {
212
213 IVShmemState *s = opaque;
214 uint32_t ret;
215
216 switch (addr)
217 {
218 case INTRMASK:
219 ret = ivshmem_IntrMask_read(s);
220 break;
221
222 case INTRSTATUS:
223 ret = ivshmem_IntrStatus_read(s);
224 break;
225
226 case IVPOSITION:
227 /* return my VM ID if the memory is mapped */
228 if (s->shm_fd > 0) {
229 ret = s->vm_id;
230 } else {
231 ret = -1;
232 }
233 break;
234
235 default:
236 IVSHMEM_DPRINTF("why are we reading " TARGET_FMT_plx "\n", addr);
237 ret = 0;
238 }
239
240 return ret;
241 }
242
243 static const MemoryRegionOps ivshmem_mmio_ops = {
244 .read = ivshmem_io_read,
245 .write = ivshmem_io_write,
246 .endianness = DEVICE_NATIVE_ENDIAN,
247 .impl = {
248 .min_access_size = 4,
249 .max_access_size = 4,
250 },
251 };
252
253 static void ivshmem_receive(void *opaque, const uint8_t *buf, int size)
254 {
255 IVShmemState *s = opaque;
256
257 ivshmem_IntrStatus_write(s, *buf);
258
259 IVSHMEM_DPRINTF("ivshmem_receive 0x%02x\n", *buf);
260 }
261
262 static int ivshmem_can_receive(void * opaque)
263 {
264 return 8;
265 }
266
267 static void ivshmem_event(void *opaque, int event)
268 {
269 IVSHMEM_DPRINTF("ivshmem_event %d\n", event);
270 }
271
272 static void fake_irqfd(void *opaque, const uint8_t *buf, int size) {
273
274 EventfdEntry *entry = opaque;
275 PCIDevice *pdev = entry->pdev;
276
277 IVSHMEM_DPRINTF("interrupt on vector %p %d\n", pdev, entry->vector);
278 msix_notify(pdev, entry->vector);
279 }
280
281 static CharDriverState* create_eventfd_chr_device(void * opaque, EventNotifier *n,
282 int vector)
283 {
284 /* create a event character device based on the passed eventfd */
285 IVShmemState *s = opaque;
286 CharDriverState * chr;
287 int eventfd = event_notifier_get_fd(n);
288
289 chr = qemu_chr_open_eventfd(eventfd);
290
291 if (chr == NULL) {
292 fprintf(stderr, "creating eventfd for eventfd %d failed\n", eventfd);
293 exit(-1);
294 }
295
296 /* if MSI is supported we need multiple interrupts */
297 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
298 s->eventfd_table[vector].pdev = &s->dev;
299 s->eventfd_table[vector].vector = vector;
300
301 qemu_chr_add_handlers(chr, ivshmem_can_receive, fake_irqfd,
302 ivshmem_event, &s->eventfd_table[vector]);
303 } else {
304 qemu_chr_add_handlers(chr, ivshmem_can_receive, ivshmem_receive,
305 ivshmem_event, s);
306 }
307
308 return chr;
309
310 }
311
312 static int check_shm_size(IVShmemState *s, int fd) {
313 /* check that the guest isn't going to try and map more memory than the
314 * the object has allocated return -1 to indicate error */
315
316 struct stat buf;
317
318 fstat(fd, &buf);
319
320 if (s->ivshmem_size > buf.st_size) {
321 fprintf(stderr,
322 "IVSHMEM ERROR: Requested memory size greater"
323 " than shared object size (%" PRIu64 " > %" PRIu64")\n",
324 s->ivshmem_size, (uint64_t)buf.st_size);
325 return -1;
326 } else {
327 return 0;
328 }
329 }
330
331 /* create the shared memory BAR when we are not using the server, so we can
332 * create the BAR and map the memory immediately */
333 static void create_shared_memory_BAR(IVShmemState *s, int fd) {
334
335 void * ptr;
336
337 s->shm_fd = fd;
338
339 ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
340
341 memory_region_init_ram_ptr(&s->ivshmem, "ivshmem.bar2",
342 s->ivshmem_size, ptr);
343 vmstate_register_ram(&s->ivshmem, &s->dev.qdev);
344 memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
345
346 /* region for shared memory */
347 pci_register_bar(&s->dev, 2, s->ivshmem_attr, &s->bar);
348 }
349
350 static void ivshmem_add_eventfd(IVShmemState *s, int posn, int i)
351 {
352 memory_region_add_eventfd(&s->ivshmem_mmio,
353 DOORBELL,
354 4,
355 true,
356 (posn << 16) | i,
357 &s->peers[posn].eventfds[i]);
358 }
359
360 static void ivshmem_del_eventfd(IVShmemState *s, int posn, int i)
361 {
362 memory_region_del_eventfd(&s->ivshmem_mmio,
363 DOORBELL,
364 4,
365 true,
366 (posn << 16) | i,
367 &s->peers[posn].eventfds[i]);
368 }
369
370 static void close_guest_eventfds(IVShmemState *s, int posn)
371 {
372 int i, guest_curr_max;
373
374 if (!ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
375 return;
376 }
377
378 guest_curr_max = s->peers[posn].nb_eventfds;
379
380 memory_region_transaction_begin();
381 for (i = 0; i < guest_curr_max; i++) {
382 ivshmem_del_eventfd(s, posn, i);
383 }
384 memory_region_transaction_commit();
385 for (i = 0; i < guest_curr_max; i++) {
386 event_notifier_cleanup(&s->peers[posn].eventfds[i]);
387 }
388
389 g_free(s->peers[posn].eventfds);
390 s->peers[posn].nb_eventfds = 0;
391 }
392
393 /* this function increase the dynamic storage need to store data about other
394 * guests */
395 static void increase_dynamic_storage(IVShmemState *s, int new_min_size) {
396
397 int j, old_nb_alloc;
398
399 old_nb_alloc = s->nb_peers;
400
401 while (new_min_size >= s->nb_peers)
402 s->nb_peers = s->nb_peers * 2;
403
404 IVSHMEM_DPRINTF("bumping storage to %d guests\n", s->nb_peers);
405 s->peers = g_realloc(s->peers, s->nb_peers * sizeof(Peer));
406
407 /* zero out new pointers */
408 for (j = old_nb_alloc; j < s->nb_peers; j++) {
409 s->peers[j].eventfds = NULL;
410 s->peers[j].nb_eventfds = 0;
411 }
412 }
413
414 static void ivshmem_read(void *opaque, const uint8_t * buf, int flags)
415 {
416 IVShmemState *s = opaque;
417 int incoming_fd, tmp_fd;
418 int guest_max_eventfd;
419 long incoming_posn;
420
421 memcpy(&incoming_posn, buf, sizeof(long));
422 /* pick off s->server_chr->msgfd and store it, posn should accompany msg */
423 tmp_fd = qemu_chr_fe_get_msgfd(s->server_chr);
424 IVSHMEM_DPRINTF("posn is %ld, fd is %d\n", incoming_posn, tmp_fd);
425
426 /* make sure we have enough space for this guest */
427 if (incoming_posn >= s->nb_peers) {
428 increase_dynamic_storage(s, incoming_posn);
429 }
430
431 if (tmp_fd == -1) {
432 /* if posn is positive and unseen before then this is our posn*/
433 if ((incoming_posn >= 0) &&
434 (s->peers[incoming_posn].eventfds == NULL)) {
435 /* receive our posn */
436 s->vm_id = incoming_posn;
437 return;
438 } else {
439 /* otherwise an fd == -1 means an existing guest has gone away */
440 IVSHMEM_DPRINTF("posn %ld has gone away\n", incoming_posn);
441 close_guest_eventfds(s, incoming_posn);
442 return;
443 }
444 }
445
446 /* because of the implementation of get_msgfd, we need a dup */
447 incoming_fd = dup(tmp_fd);
448
449 if (incoming_fd == -1) {
450 fprintf(stderr, "could not allocate file descriptor %s\n",
451 strerror(errno));
452 return;
453 }
454
455 /* if the position is -1, then it's shared memory region fd */
456 if (incoming_posn == -1) {
457
458 void * map_ptr;
459
460 s->max_peer = 0;
461
462 if (check_shm_size(s, incoming_fd) == -1) {
463 exit(-1);
464 }
465
466 /* mmap the region and map into the BAR2 */
467 map_ptr = mmap(0, s->ivshmem_size, PROT_READ|PROT_WRITE, MAP_SHARED,
468 incoming_fd, 0);
469 memory_region_init_ram_ptr(&s->ivshmem,
470 "ivshmem.bar2", s->ivshmem_size, map_ptr);
471 vmstate_register_ram(&s->ivshmem, &s->dev.qdev);
472
473 IVSHMEM_DPRINTF("guest h/w addr = %" PRIu64 ", size = %" PRIu64 "\n",
474 s->ivshmem_offset, s->ivshmem_size);
475
476 memory_region_add_subregion(&s->bar, 0, &s->ivshmem);
477
478 /* only store the fd if it is successfully mapped */
479 s->shm_fd = incoming_fd;
480
481 return;
482 }
483
484 /* each guest has an array of eventfds, and we keep track of how many
485 * guests for each VM */
486 guest_max_eventfd = s->peers[incoming_posn].nb_eventfds;
487
488 if (guest_max_eventfd == 0) {
489 /* one eventfd per MSI vector */
490 s->peers[incoming_posn].eventfds = g_new(EventNotifier, s->vectors);
491 }
492
493 /* this is an eventfd for a particular guest VM */
494 IVSHMEM_DPRINTF("eventfds[%ld][%d] = %d\n", incoming_posn,
495 guest_max_eventfd, incoming_fd);
496 event_notifier_init_fd(&s->peers[incoming_posn].eventfds[guest_max_eventfd],
497 incoming_fd);
498
499 /* increment count for particular guest */
500 s->peers[incoming_posn].nb_eventfds++;
501
502 /* keep track of the maximum VM ID */
503 if (incoming_posn > s->max_peer) {
504 s->max_peer = incoming_posn;
505 }
506
507 if (incoming_posn == s->vm_id) {
508 s->eventfd_chr[guest_max_eventfd] = create_eventfd_chr_device(s,
509 &s->peers[s->vm_id].eventfds[guest_max_eventfd],
510 guest_max_eventfd);
511 }
512
513 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD)) {
514 ivshmem_add_eventfd(s, incoming_posn, guest_max_eventfd);
515 }
516 }
517
518 /* Select the MSI-X vectors used by device.
519 * ivshmem maps events to vectors statically, so
520 * we just enable all vectors on init and after reset. */
521 static void ivshmem_use_msix(IVShmemState * s)
522 {
523 int i;
524
525 if (!msix_present(&s->dev)) {
526 return;
527 }
528
529 for (i = 0; i < s->vectors; i++) {
530 msix_vector_use(&s->dev, i);
531 }
532 }
533
534 static void ivshmem_reset(DeviceState *d)
535 {
536 IVShmemState *s = DO_UPCAST(IVShmemState, dev.qdev, d);
537
538 s->intrstatus = 0;
539 ivshmem_use_msix(s);
540 }
541
542 static uint64_t ivshmem_get_size(IVShmemState * s) {
543
544 uint64_t value;
545 char *ptr;
546
547 value = strtoull(s->sizearg, &ptr, 10);
548 switch (*ptr) {
549 case 0: case 'M': case 'm':
550 value <<= 20;
551 break;
552 case 'G': case 'g':
553 value <<= 30;
554 break;
555 default:
556 fprintf(stderr, "qemu: invalid ram size: %s\n", s->sizearg);
557 exit(1);
558 }
559
560 /* BARs must be a power of 2 */
561 if (!is_power_of_two(value)) {
562 fprintf(stderr, "ivshmem: size must be power of 2\n");
563 exit(1);
564 }
565
566 return value;
567 }
568
569 static void ivshmem_setup_msi(IVShmemState * s)
570 {
571 if (msix_init_exclusive_bar(&s->dev, s->vectors, 1)) {
572 IVSHMEM_DPRINTF("msix initialization failed\n");
573 exit(1);
574 }
575
576 IVSHMEM_DPRINTF("msix initialized (%d vectors)\n", s->vectors);
577
578 /* allocate QEMU char devices for receiving interrupts */
579 s->eventfd_table = g_malloc0(s->vectors * sizeof(EventfdEntry));
580
581 ivshmem_use_msix(s);
582 }
583
584 static void ivshmem_save(QEMUFile* f, void *opaque)
585 {
586 IVShmemState *proxy = opaque;
587
588 IVSHMEM_DPRINTF("ivshmem_save\n");
589 pci_device_save(&proxy->dev, f);
590
591 if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
592 msix_save(&proxy->dev, f);
593 } else {
594 qemu_put_be32(f, proxy->intrstatus);
595 qemu_put_be32(f, proxy->intrmask);
596 }
597
598 }
599
600 static int ivshmem_load(QEMUFile* f, void *opaque, int version_id)
601 {
602 IVSHMEM_DPRINTF("ivshmem_load\n");
603
604 IVShmemState *proxy = opaque;
605 int ret;
606
607 if (version_id > 0) {
608 return -EINVAL;
609 }
610
611 if (proxy->role_val == IVSHMEM_PEER) {
612 fprintf(stderr, "ivshmem: 'peer' devices are not migratable\n");
613 return -EINVAL;
614 }
615
616 ret = pci_device_load(&proxy->dev, f);
617 if (ret) {
618 return ret;
619 }
620
621 if (ivshmem_has_feature(proxy, IVSHMEM_MSI)) {
622 msix_load(&proxy->dev, f);
623 ivshmem_use_msix(proxy);
624 } else {
625 proxy->intrstatus = qemu_get_be32(f);
626 proxy->intrmask = qemu_get_be32(f);
627 }
628
629 return 0;
630 }
631
632 static void ivshmem_write_config(PCIDevice *pci_dev, uint32_t address,
633 uint32_t val, int len)
634 {
635 pci_default_write_config(pci_dev, address, val, len);
636 msix_write_config(pci_dev, address, val, len);
637 }
638
639 static int pci_ivshmem_init(PCIDevice *dev)
640 {
641 IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
642 uint8_t *pci_conf;
643
644 if (s->sizearg == NULL)
645 s->ivshmem_size = 4 << 20; /* 4 MB default */
646 else {
647 s->ivshmem_size = ivshmem_get_size(s);
648 }
649
650 register_savevm(&s->dev.qdev, "ivshmem", 0, 0, ivshmem_save, ivshmem_load,
651 dev);
652
653 /* IRQFD requires MSI */
654 if (ivshmem_has_feature(s, IVSHMEM_IOEVENTFD) &&
655 !ivshmem_has_feature(s, IVSHMEM_MSI)) {
656 fprintf(stderr, "ivshmem: ioeventfd/irqfd requires MSI\n");
657 exit(1);
658 }
659
660 /* check that role is reasonable */
661 if (s->role) {
662 if (strncmp(s->role, "peer", 5) == 0) {
663 s->role_val = IVSHMEM_PEER;
664 } else if (strncmp(s->role, "master", 7) == 0) {
665 s->role_val = IVSHMEM_MASTER;
666 } else {
667 fprintf(stderr, "ivshmem: 'role' must be 'peer' or 'master'\n");
668 exit(1);
669 }
670 } else {
671 s->role_val = IVSHMEM_MASTER; /* default */
672 }
673
674 if (s->role_val == IVSHMEM_PEER) {
675 error_set(&s->migration_blocker, QERR_DEVICE_FEATURE_BLOCKS_MIGRATION,
676 "peer mode", "ivshmem");
677 migrate_add_blocker(s->migration_blocker);
678 }
679
680 pci_conf = s->dev.config;
681 pci_conf[PCI_COMMAND] = PCI_COMMAND_IO | PCI_COMMAND_MEMORY;
682
683 pci_config_set_interrupt_pin(pci_conf, 1);
684
685 s->shm_fd = 0;
686
687 memory_region_init_io(&s->ivshmem_mmio, &ivshmem_mmio_ops, s,
688 "ivshmem-mmio", IVSHMEM_REG_BAR_SIZE);
689
690 /* region for registers*/
691 pci_register_bar(&s->dev, 0, PCI_BASE_ADDRESS_SPACE_MEMORY,
692 &s->ivshmem_mmio);
693
694 memory_region_init(&s->bar, "ivshmem-bar2-container", s->ivshmem_size);
695 s->ivshmem_attr = PCI_BASE_ADDRESS_SPACE_MEMORY |
696 PCI_BASE_ADDRESS_MEM_PREFETCH;
697 if (s->ivshmem_64bit) {
698 s->ivshmem_attr |= PCI_BASE_ADDRESS_MEM_TYPE_64;
699 }
700
701 if ((s->server_chr != NULL) &&
702 (strncmp(s->server_chr->filename, "unix:", 5) == 0)) {
703 /* if we get a UNIX socket as the parameter we will talk
704 * to the ivshmem server to receive the memory region */
705
706 if (s->shmobj != NULL) {
707 fprintf(stderr, "WARNING: do not specify both 'chardev' "
708 "and 'shm' with ivshmem\n");
709 }
710
711 IVSHMEM_DPRINTF("using shared memory server (socket = %s)\n",
712 s->server_chr->filename);
713
714 if (ivshmem_has_feature(s, IVSHMEM_MSI)) {
715 ivshmem_setup_msi(s);
716 }
717
718 /* we allocate enough space for 16 guests and grow as needed */
719 s->nb_peers = 16;
720 s->vm_id = -1;
721
722 /* allocate/initialize space for interrupt handling */
723 s->peers = g_malloc0(s->nb_peers * sizeof(Peer));
724
725 pci_register_bar(&s->dev, 2, s->ivshmem_attr, &s->bar);
726
727 s->eventfd_chr = g_malloc0(s->vectors * sizeof(CharDriverState *));
728
729 qemu_chr_add_handlers(s->server_chr, ivshmem_can_receive, ivshmem_read,
730 ivshmem_event, s);
731 } else {
732 /* just map the file immediately, we're not using a server */
733 int fd;
734
735 if (s->shmobj == NULL) {
736 fprintf(stderr, "Must specify 'chardev' or 'shm' to ivshmem\n");
737 }
738
739 IVSHMEM_DPRINTF("using shm_open (shm object = %s)\n", s->shmobj);
740
741 /* try opening with O_EXCL and if it succeeds zero the memory
742 * by truncating to 0 */
743 if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR|O_EXCL,
744 S_IRWXU|S_IRWXG|S_IRWXO)) > 0) {
745 /* truncate file to length PCI device's memory */
746 if (ftruncate(fd, s->ivshmem_size) != 0) {
747 fprintf(stderr, "ivshmem: could not truncate shared file\n");
748 }
749
750 } else if ((fd = shm_open(s->shmobj, O_CREAT|O_RDWR,
751 S_IRWXU|S_IRWXG|S_IRWXO)) < 0) {
752 fprintf(stderr, "ivshmem: could not open shared file\n");
753 exit(-1);
754
755 }
756
757 if (check_shm_size(s, fd) == -1) {
758 exit(-1);
759 }
760
761 create_shared_memory_BAR(s, fd);
762
763 }
764
765 s->dev.config_write = ivshmem_write_config;
766
767 return 0;
768 }
769
770 static void pci_ivshmem_uninit(PCIDevice *dev)
771 {
772 IVShmemState *s = DO_UPCAST(IVShmemState, dev, dev);
773
774 if (s->migration_blocker) {
775 migrate_del_blocker(s->migration_blocker);
776 error_free(s->migration_blocker);
777 }
778
779 memory_region_destroy(&s->ivshmem_mmio);
780 memory_region_del_subregion(&s->bar, &s->ivshmem);
781 vmstate_unregister_ram(&s->ivshmem, &s->dev.qdev);
782 memory_region_destroy(&s->ivshmem);
783 memory_region_destroy(&s->bar);
784 unregister_savevm(&dev->qdev, "ivshmem", s);
785 }
786
787 static Property ivshmem_properties[] = {
788 DEFINE_PROP_CHR("chardev", IVShmemState, server_chr),
789 DEFINE_PROP_STRING("size", IVShmemState, sizearg),
790 DEFINE_PROP_UINT32("vectors", IVShmemState, vectors, 1),
791 DEFINE_PROP_BIT("ioeventfd", IVShmemState, features, IVSHMEM_IOEVENTFD, false),
792 DEFINE_PROP_BIT("msi", IVShmemState, features, IVSHMEM_MSI, true),
793 DEFINE_PROP_STRING("shm", IVShmemState, shmobj),
794 DEFINE_PROP_STRING("role", IVShmemState, role),
795 DEFINE_PROP_UINT32("use64", IVShmemState, ivshmem_64bit, 1),
796 DEFINE_PROP_END_OF_LIST(),
797 };
798
799 static void ivshmem_class_init(ObjectClass *klass, void *data)
800 {
801 DeviceClass *dc = DEVICE_CLASS(klass);
802 PCIDeviceClass *k = PCI_DEVICE_CLASS(klass);
803
804 k->init = pci_ivshmem_init;
805 k->exit = pci_ivshmem_uninit;
806 k->vendor_id = PCI_VENDOR_ID_IVSHMEM;
807 k->device_id = PCI_DEVICE_ID_IVSHMEM;
808 k->class_id = PCI_CLASS_MEMORY_RAM;
809 dc->reset = ivshmem_reset;
810 dc->props = ivshmem_properties;
811 }
812
813 static const TypeInfo ivshmem_info = {
814 .name = "ivshmem",
815 .parent = TYPE_PCI_DEVICE,
816 .instance_size = sizeof(IVShmemState),
817 .class_init = ivshmem_class_init,
818 };
819
820 static void ivshmem_register_types(void)
821 {
822 type_register_static(&ivshmem_info);
823 }
824
825 type_init(ivshmem_register_types)