1 // SPDX-License-Identifier: GPL-2.0-only
5 * Copyright (C) 2012 VMware, Inc. All rights reserved.
8 #include <linux/vmw_vmci_defs.h>
9 #include <linux/vmw_vmci_api.h>
10 #include <linux/moduleparam.h>
11 #include <linux/interrupt.h>
12 #include <linux/highmem.h>
13 #include <linux/kernel.h>
15 #include <linux/module.h>
16 #include <linux/sched.h>
17 #include <linux/slab.h>
18 #include <linux/init.h>
19 #include <linux/pci.h>
20 #include <linux/smp.h>
22 #include <linux/vmalloc.h>
24 #include "vmci_datagram.h"
25 #include "vmci_doorbell.h"
26 #include "vmci_context.h"
27 #include "vmci_driver.h"
28 #include "vmci_event.h"
30 #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
32 #define VMCI_UTIL_NUM_RESOURCES 1
34 static bool vmci_disable_msi
;
35 module_param_named(disable_msi
, vmci_disable_msi
, bool, 0);
36 MODULE_PARM_DESC(disable_msi
, "Disable MSI use in driver - (default=0)");
38 static bool vmci_disable_msix
;
39 module_param_named(disable_msix
, vmci_disable_msix
, bool, 0);
40 MODULE_PARM_DESC(disable_msix
, "Disable MSI-X use in driver - (default=0)");
42 static u32 ctx_update_sub_id
= VMCI_INVALID_ID
;
43 static u32 vm_context_id
= VMCI_INVALID_ID
;
45 struct vmci_guest_device
{
46 struct device
*dev
; /* PCI device we are attached to */
48 void __iomem
*mmio_base
;
50 bool exclusive_vectors
;
52 struct tasklet_struct datagram_tasklet
;
53 struct tasklet_struct bm_tasklet
;
56 void *notification_bitmap
;
57 dma_addr_t notification_base
;
60 static bool use_ppn64
;
62 bool vmci_use_ppn64(void)
67 /* vmci_dev singleton device and supporting data*/
68 struct pci_dev
*vmci_pdev
;
69 static struct vmci_guest_device
*vmci_dev_g
;
70 static DEFINE_SPINLOCK(vmci_dev_spinlock
);
72 static atomic_t vmci_num_guest_devices
= ATOMIC_INIT(0);
74 bool vmci_guest_code_active(void)
76 return atomic_read(&vmci_num_guest_devices
) != 0;
79 u32
vmci_get_vm_context_id(void)
81 if (vm_context_id
== VMCI_INVALID_ID
) {
82 struct vmci_datagram get_cid_msg
;
84 vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID
,
86 get_cid_msg
.src
= VMCI_ANON_SRC_HANDLE
;
87 get_cid_msg
.payload_size
= 0;
88 vm_context_id
= vmci_send_datagram(&get_cid_msg
);
93 static unsigned int vmci_read_reg(struct vmci_guest_device
*dev
, u32 reg
)
95 if (dev
->mmio_base
!= NULL
)
96 return readl(dev
->mmio_base
+ reg
);
97 return ioread32(dev
->iobase
+ reg
);
100 static void vmci_write_reg(struct vmci_guest_device
*dev
, u32 val
, u32 reg
)
102 if (dev
->mmio_base
!= NULL
)
103 writel(val
, dev
->mmio_base
+ reg
);
105 iowrite32(val
, dev
->iobase
+ reg
);
109 * VM to hypervisor call mechanism. We use the standard VMware naming
110 * convention since shared code is calling this function as well.
112 int vmci_send_datagram(struct vmci_datagram
*dg
)
119 return VMCI_ERROR_INVALID_ARGS
;
122 * Need to acquire spinlock on the device because the datagram
123 * data may be spread over multiple pages and the monitor may
124 * interleave device user rpc calls from multiple
125 * VCPUs. Acquiring the spinlock precludes that
126 * possibility. Disabling interrupts to avoid incoming
127 * datagrams during a "rep out" and possibly landing up in
130 spin_lock_irqsave(&vmci_dev_spinlock
, flags
);
133 iowrite8_rep(vmci_dev_g
->iobase
+ VMCI_DATA_OUT_ADDR
,
134 dg
, VMCI_DG_SIZE(dg
));
135 result
= vmci_read_reg(vmci_dev_g
, VMCI_RESULT_LOW_ADDR
);
137 result
= VMCI_ERROR_UNAVAILABLE
;
140 spin_unlock_irqrestore(&vmci_dev_spinlock
, flags
);
144 EXPORT_SYMBOL_GPL(vmci_send_datagram
);
147 * Gets called with the new context id if updated or resumed.
150 static void vmci_guest_cid_update(u32 sub_id
,
151 const struct vmci_event_data
*event_data
,
154 const struct vmci_event_payld_ctx
*ev_payload
=
155 vmci_event_data_const_payload(event_data
);
157 if (sub_id
!= ctx_update_sub_id
) {
158 pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id
);
162 if (!event_data
|| ev_payload
->context_id
== VMCI_INVALID_ID
) {
163 pr_devel("Invalid event data\n");
167 pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
168 vm_context_id
, ev_payload
->context_id
, event_data
->event
);
170 vm_context_id
= ev_payload
->context_id
;
174 * Verify that the host supports the hypercalls we need. If it does not,
175 * try to find fallback hypercalls and use those instead. Returns
176 * true if required hypercalls (or fallback hypercalls) are
177 * supported by the host, false otherwise.
179 static int vmci_check_host_caps(struct pci_dev
*pdev
)
182 struct vmci_resource_query_msg
*msg
;
183 u32 msg_size
= sizeof(struct vmci_resource_query_hdr
) +
184 VMCI_UTIL_NUM_RESOURCES
* sizeof(u32
);
185 struct vmci_datagram
*check_msg
;
187 check_msg
= kzalloc(msg_size
, GFP_KERNEL
);
189 dev_err(&pdev
->dev
, "%s: Insufficient memory\n", __func__
);
193 check_msg
->dst
= vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID
,
194 VMCI_RESOURCES_QUERY
);
195 check_msg
->src
= VMCI_ANON_SRC_HANDLE
;
196 check_msg
->payload_size
= msg_size
- VMCI_DG_HEADERSIZE
;
197 msg
= (struct vmci_resource_query_msg
*)VMCI_DG_PAYLOAD(check_msg
);
199 msg
->num_resources
= VMCI_UTIL_NUM_RESOURCES
;
200 msg
->resources
[0] = VMCI_GET_CONTEXT_ID
;
202 /* Checks that hyper calls are supported */
203 result
= vmci_send_datagram(check_msg
) == 0x01;
206 dev_dbg(&pdev
->dev
, "%s: Host capability check: %s\n",
207 __func__
, result
? "PASSED" : "FAILED");
209 /* We need the vector. There are no fallbacks. */
210 return result
? 0 : -ENXIO
;
214 * Reads datagrams from the data in port and dispatches them. We
215 * always start reading datagrams into only the first page of the
216 * datagram buffer. If the datagrams don't fit into one page, we
217 * use the maximum datagram buffer size for the remainder of the
218 * invocation. This is a simple heuristic for not penalizing
221 * This function assumes that it has exclusive access to the data
222 * in port for the duration of the call.
224 static void vmci_dispatch_dgs(unsigned long data
)
226 struct vmci_guest_device
*vmci_dev
= (struct vmci_guest_device
*)data
;
227 u8
*dg_in_buffer
= vmci_dev
->data_buffer
;
228 struct vmci_datagram
*dg
;
229 size_t dg_in_buffer_size
= VMCI_MAX_DG_SIZE
;
230 size_t current_dg_in_buffer_size
= PAGE_SIZE
;
231 size_t remaining_bytes
;
233 BUILD_BUG_ON(VMCI_MAX_DG_SIZE
< PAGE_SIZE
);
235 ioread8_rep(vmci_dev
->iobase
+ VMCI_DATA_IN_ADDR
,
236 vmci_dev
->data_buffer
, current_dg_in_buffer_size
);
237 dg
= (struct vmci_datagram
*)dg_in_buffer
;
238 remaining_bytes
= current_dg_in_buffer_size
;
240 while (dg
->dst
.resource
!= VMCI_INVALID_ID
||
241 remaining_bytes
> PAGE_SIZE
) {
245 * When the input buffer spans multiple pages, a datagram can
246 * start on any page boundary in the buffer.
248 if (dg
->dst
.resource
== VMCI_INVALID_ID
) {
249 dg
= (struct vmci_datagram
*)roundup(
250 (uintptr_t)dg
+ 1, PAGE_SIZE
);
252 (size_t)(dg_in_buffer
+
253 current_dg_in_buffer_size
-
258 dg_in_size
= VMCI_DG_SIZE_ALIGNED(dg
);
260 if (dg_in_size
<= dg_in_buffer_size
) {
264 * If the remaining bytes in the datagram
265 * buffer doesn't contain the complete
266 * datagram, we first make sure we have enough
267 * room for it and then we read the reminder
268 * of the datagram and possibly any following
271 if (dg_in_size
> remaining_bytes
) {
272 if (remaining_bytes
!=
273 current_dg_in_buffer_size
) {
276 * We move the partial
277 * datagram to the front and
278 * read the reminder of the
279 * datagram and possibly
280 * following calls into the
283 memmove(dg_in_buffer
, dg_in_buffer
+
284 current_dg_in_buffer_size
-
287 dg
= (struct vmci_datagram
*)
291 if (current_dg_in_buffer_size
!=
293 current_dg_in_buffer_size
=
296 ioread8_rep(vmci_dev
->iobase
+
298 vmci_dev
->data_buffer
+
300 current_dg_in_buffer_size
-
305 * We special case event datagrams from the
308 if (dg
->src
.context
== VMCI_HYPERVISOR_CONTEXT_ID
&&
309 dg
->dst
.resource
== VMCI_EVENT_HANDLER
) {
310 result
= vmci_event_dispatch(dg
);
312 result
= vmci_datagram_invoke_guest_handler(dg
);
314 if (result
< VMCI_SUCCESS
)
315 dev_dbg(vmci_dev
->dev
,
316 "Datagram with resource (ID=0x%x) failed (err=%d)\n",
317 dg
->dst
.resource
, result
);
319 /* On to the next datagram. */
320 dg
= (struct vmci_datagram
*)((u8
*)dg
+
323 size_t bytes_to_skip
;
326 * Datagram doesn't fit in datagram buffer of maximal
329 dev_dbg(vmci_dev
->dev
,
330 "Failed to receive datagram (size=%u bytes)\n",
333 bytes_to_skip
= dg_in_size
- remaining_bytes
;
334 if (current_dg_in_buffer_size
!= dg_in_buffer_size
)
335 current_dg_in_buffer_size
= dg_in_buffer_size
;
338 ioread8_rep(vmci_dev
->iobase
+
340 vmci_dev
->data_buffer
,
341 current_dg_in_buffer_size
);
342 if (bytes_to_skip
<= current_dg_in_buffer_size
)
345 bytes_to_skip
-= current_dg_in_buffer_size
;
347 dg
= (struct vmci_datagram
*)(dg_in_buffer
+
352 (size_t) (dg_in_buffer
+ current_dg_in_buffer_size
-
355 if (remaining_bytes
< VMCI_DG_HEADERSIZE
) {
356 /* Get the next batch of datagrams. */
358 ioread8_rep(vmci_dev
->iobase
+ VMCI_DATA_IN_ADDR
,
359 vmci_dev
->data_buffer
,
360 current_dg_in_buffer_size
);
361 dg
= (struct vmci_datagram
*)dg_in_buffer
;
362 remaining_bytes
= current_dg_in_buffer_size
;
368 * Scans the notification bitmap for raised flags, clears them
369 * and handles the notifications.
371 static void vmci_process_bitmap(unsigned long data
)
373 struct vmci_guest_device
*dev
= (struct vmci_guest_device
*)data
;
375 if (!dev
->notification_bitmap
) {
376 dev_dbg(dev
->dev
, "No bitmap present in %s\n", __func__
);
380 vmci_dbell_scan_notification_entries(dev
->notification_bitmap
);
384 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
385 * interrupt (vector VMCI_INTR_DATAGRAM).
387 static irqreturn_t
vmci_interrupt(int irq
, void *_dev
)
389 struct vmci_guest_device
*dev
= _dev
;
392 * If we are using MSI-X with exclusive vectors then we simply schedule
393 * the datagram tasklet, since we know the interrupt was meant for us.
394 * Otherwise we must read the ICR to determine what to do.
397 if (dev
->exclusive_vectors
) {
398 tasklet_schedule(&dev
->datagram_tasklet
);
402 /* Acknowledge interrupt and determine what needs doing. */
403 icr
= vmci_read_reg(dev
, VMCI_ICR_ADDR
);
404 if (icr
== 0 || icr
== ~0)
407 if (icr
& VMCI_ICR_DATAGRAM
) {
408 tasklet_schedule(&dev
->datagram_tasklet
);
409 icr
&= ~VMCI_ICR_DATAGRAM
;
412 if (icr
& VMCI_ICR_NOTIFICATION
) {
413 tasklet_schedule(&dev
->bm_tasklet
);
414 icr
&= ~VMCI_ICR_NOTIFICATION
;
419 "Ignoring unknown interrupt cause (%d)\n",
427 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
428 * which is for the notification bitmap. Will only get called if we are
429 * using MSI-X with exclusive vectors.
431 static irqreturn_t
vmci_interrupt_bm(int irq
, void *_dev
)
433 struct vmci_guest_device
*dev
= _dev
;
435 /* For MSI-X we can just assume it was meant for us. */
436 tasklet_schedule(&dev
->bm_tasklet
);
442 * Most of the initialization at module load time is done here.
444 static int vmci_guest_probe_device(struct pci_dev
*pdev
,
445 const struct pci_device_id
*id
)
447 struct vmci_guest_device
*vmci_dev
;
448 void __iomem
*iobase
= NULL
;
449 void __iomem
*mmio_base
= NULL
;
450 unsigned int capabilities
;
451 unsigned int caps_in_use
;
456 dev_dbg(&pdev
->dev
, "Probing for vmci/PCI guest device\n");
458 error
= pcim_enable_device(pdev
);
461 "Failed to enable VMCI device: %d\n", error
);
466 * The VMCI device with mmio access to registers requests 256KB
467 * for BAR1. If present, driver will use new VMCI device
468 * functionality for register access and datagram send/recv.
471 if (pci_resource_len(pdev
, 1) == VMCI_WITH_MMIO_ACCESS_BAR_SIZE
) {
472 dev_info(&pdev
->dev
, "MMIO register access is available\n");
473 mmio_base
= pci_iomap_range(pdev
, 1, VMCI_MMIO_ACCESS_OFFSET
,
474 VMCI_MMIO_ACCESS_SIZE
);
475 /* If the map fails, we fall back to IOIO access. */
477 dev_warn(&pdev
->dev
, "Failed to map MMIO register access\n");
481 error
= pcim_iomap_regions(pdev
, BIT(0), KBUILD_MODNAME
);
483 dev_err(&pdev
->dev
, "Failed to reserve/map IO regions\n");
486 iobase
= pcim_iomap_table(pdev
)[0];
489 vmci_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*vmci_dev
), GFP_KERNEL
);
492 "Can't allocate memory for VMCI device\n");
496 vmci_dev
->dev
= &pdev
->dev
;
497 vmci_dev
->exclusive_vectors
= false;
498 vmci_dev
->iobase
= iobase
;
499 vmci_dev
->mmio_base
= mmio_base
;
501 tasklet_init(&vmci_dev
->datagram_tasklet
,
502 vmci_dispatch_dgs
, (unsigned long)vmci_dev
);
503 tasklet_init(&vmci_dev
->bm_tasklet
,
504 vmci_process_bitmap
, (unsigned long)vmci_dev
);
506 vmci_dev
->data_buffer
= vmalloc(VMCI_MAX_DG_SIZE
);
507 if (!vmci_dev
->data_buffer
) {
509 "Can't allocate memory for datagram buffer\n");
513 pci_set_master(pdev
); /* To enable queue_pair functionality. */
516 * Verify that the VMCI Device supports the capabilities that
517 * we need. If the device is missing capabilities that we would
518 * like to use, check for fallback capabilities and use those
519 * instead (so we can run a new VM on old hosts). Fail the load if
520 * a required capability is missing and there is no fallback.
522 * Right now, we need datagrams. There are no fallbacks.
524 capabilities
= vmci_read_reg(vmci_dev
, VMCI_CAPS_ADDR
);
525 if (!(capabilities
& VMCI_CAPS_DATAGRAM
)) {
526 dev_err(&pdev
->dev
, "Device does not support datagrams\n");
528 goto err_free_data_buffer
;
530 caps_in_use
= VMCI_CAPS_DATAGRAM
;
533 * Use 64-bit PPNs if the device supports.
535 * There is no check for the return value of dma_set_mask_and_coherent
536 * since this driver can handle the default mask values if
537 * dma_set_mask_and_coherent fails.
539 if (capabilities
& VMCI_CAPS_PPN64
) {
540 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(64));
542 caps_in_use
|= VMCI_CAPS_PPN64
;
544 dma_set_mask_and_coherent(&pdev
->dev
, DMA_BIT_MASK(44));
549 * If the hardware supports notifications, we will use that as
552 if (capabilities
& VMCI_CAPS_NOTIFICATIONS
) {
553 vmci_dev
->notification_bitmap
= dma_alloc_coherent(
554 &pdev
->dev
, PAGE_SIZE
, &vmci_dev
->notification_base
,
556 if (!vmci_dev
->notification_bitmap
) {
558 "Unable to allocate notification bitmap\n");
560 memset(vmci_dev
->notification_bitmap
, 0, PAGE_SIZE
);
561 caps_in_use
|= VMCI_CAPS_NOTIFICATIONS
;
565 if (mmio_base
!= NULL
) {
566 if (capabilities
& VMCI_CAPS_DMA_DATAGRAM
) {
567 caps_in_use
|= VMCI_CAPS_DMA_DATAGRAM
;
570 "Missing capability: VMCI_CAPS_DMA_DATAGRAM\n");
572 goto err_free_data_buffer
;
576 dev_info(&pdev
->dev
, "Using capabilities 0x%x\n", caps_in_use
);
578 /* Let the host know which capabilities we intend to use. */
579 vmci_write_reg(vmci_dev
, caps_in_use
, VMCI_CAPS_ADDR
);
581 /* Let the device know the size for pages passed down. */
582 if (caps_in_use
& VMCI_CAPS_DMA_DATAGRAM
)
583 vmci_write_reg(vmci_dev
, PAGE_SHIFT
, VMCI_GUEST_PAGE_SHIFT
);
585 /* Set up global device so that we can start sending datagrams */
586 spin_lock_irq(&vmci_dev_spinlock
);
587 vmci_dev_g
= vmci_dev
;
589 spin_unlock_irq(&vmci_dev_spinlock
);
592 * Register notification bitmap with device if that capability is
595 if (caps_in_use
& VMCI_CAPS_NOTIFICATIONS
) {
596 unsigned long bitmap_ppn
=
597 vmci_dev
->notification_base
>> PAGE_SHIFT
;
598 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn
)) {
600 "VMCI device unable to register notification bitmap with PPN 0x%lx\n",
603 goto err_remove_vmci_dev_g
;
607 /* Check host capabilities. */
608 error
= vmci_check_host_caps(pdev
);
610 goto err_remove_bitmap
;
615 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
616 * update the internal context id when needed.
618 vmci_err
= vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE
,
619 vmci_guest_cid_update
, NULL
,
621 if (vmci_err
< VMCI_SUCCESS
)
623 "Failed to subscribe to event (type=%d): %d\n",
624 VMCI_EVENT_CTX_ID_UPDATE
, vmci_err
);
627 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
630 error
= pci_alloc_irq_vectors(pdev
, VMCI_MAX_INTRS
, VMCI_MAX_INTRS
,
633 error
= pci_alloc_irq_vectors(pdev
, 1, 1,
634 PCI_IRQ_MSIX
| PCI_IRQ_MSI
| PCI_IRQ_LEGACY
);
636 goto err_remove_bitmap
;
638 vmci_dev
->exclusive_vectors
= true;
642 * Request IRQ for legacy or MSI interrupts, or for first
645 error
= request_irq(pci_irq_vector(pdev
, 0), vmci_interrupt
,
646 IRQF_SHARED
, KBUILD_MODNAME
, vmci_dev
);
648 dev_err(&pdev
->dev
, "Irq %u in use: %d\n",
649 pci_irq_vector(pdev
, 0), error
);
650 goto err_disable_msi
;
654 * For MSI-X with exclusive vectors we need to request an
655 * interrupt for each vector so that we get a separate
656 * interrupt handler routine. This allows us to distinguish
657 * between the vectors.
659 if (vmci_dev
->exclusive_vectors
) {
660 error
= request_irq(pci_irq_vector(pdev
, 1),
661 vmci_interrupt_bm
, 0, KBUILD_MODNAME
,
665 "Failed to allocate irq %u: %d\n",
666 pci_irq_vector(pdev
, 1), error
);
671 dev_dbg(&pdev
->dev
, "Registered device\n");
673 atomic_inc(&vmci_num_guest_devices
);
675 /* Enable specific interrupt bits. */
676 cmd
= VMCI_IMR_DATAGRAM
;
677 if (caps_in_use
& VMCI_CAPS_NOTIFICATIONS
)
678 cmd
|= VMCI_IMR_NOTIFICATION
;
679 vmci_write_reg(vmci_dev
, cmd
, VMCI_IMR_ADDR
);
681 /* Enable interrupts. */
682 vmci_write_reg(vmci_dev
, VMCI_CONTROL_INT_ENABLE
, VMCI_CONTROL_ADDR
);
684 pci_set_drvdata(pdev
, vmci_dev
);
686 vmci_call_vsock_callback(false);
690 free_irq(pci_irq_vector(pdev
, 0), vmci_dev
);
691 tasklet_kill(&vmci_dev
->datagram_tasklet
);
692 tasklet_kill(&vmci_dev
->bm_tasklet
);
695 pci_free_irq_vectors(pdev
);
697 vmci_err
= vmci_event_unsubscribe(ctx_update_sub_id
);
698 if (vmci_err
< VMCI_SUCCESS
)
700 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
701 VMCI_EVENT_CTX_ID_UPDATE
, ctx_update_sub_id
, vmci_err
);
704 if (vmci_dev
->notification_bitmap
) {
705 vmci_write_reg(vmci_dev
, VMCI_CONTROL_RESET
, VMCI_CONTROL_ADDR
);
706 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
707 vmci_dev
->notification_bitmap
,
708 vmci_dev
->notification_base
);
711 err_remove_vmci_dev_g
:
712 spin_lock_irq(&vmci_dev_spinlock
);
715 spin_unlock_irq(&vmci_dev_spinlock
);
717 err_free_data_buffer
:
718 vfree(vmci_dev
->data_buffer
);
720 /* The rest are managed resources and will be freed by PCI core */
724 static void vmci_guest_remove_device(struct pci_dev
*pdev
)
726 struct vmci_guest_device
*vmci_dev
= pci_get_drvdata(pdev
);
729 dev_dbg(&pdev
->dev
, "Removing device\n");
731 atomic_dec(&vmci_num_guest_devices
);
733 vmci_qp_guest_endpoints_exit();
735 vmci_err
= vmci_event_unsubscribe(ctx_update_sub_id
);
736 if (vmci_err
< VMCI_SUCCESS
)
738 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
739 VMCI_EVENT_CTX_ID_UPDATE
, ctx_update_sub_id
, vmci_err
);
741 spin_lock_irq(&vmci_dev_spinlock
);
744 spin_unlock_irq(&vmci_dev_spinlock
);
746 dev_dbg(&pdev
->dev
, "Resetting vmci device\n");
747 vmci_write_reg(vmci_dev
, VMCI_CONTROL_RESET
, VMCI_CONTROL_ADDR
);
750 * Free IRQ and then disable MSI/MSI-X as appropriate. For
751 * MSI-X, we might have multiple vectors, each with their own
752 * IRQ, which we must free too.
754 if (vmci_dev
->exclusive_vectors
)
755 free_irq(pci_irq_vector(pdev
, 1), vmci_dev
);
756 free_irq(pci_irq_vector(pdev
, 0), vmci_dev
);
757 pci_free_irq_vectors(pdev
);
759 tasklet_kill(&vmci_dev
->datagram_tasklet
);
760 tasklet_kill(&vmci_dev
->bm_tasklet
);
762 if (vmci_dev
->notification_bitmap
) {
764 * The device reset above cleared the bitmap state of the
765 * device, so we can safely free it here.
768 dma_free_coherent(&pdev
->dev
, PAGE_SIZE
,
769 vmci_dev
->notification_bitmap
,
770 vmci_dev
->notification_base
);
773 vfree(vmci_dev
->data_buffer
);
775 /* The rest are managed resources and will be freed by PCI core */
778 static const struct pci_device_id vmci_ids
[] = {
779 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE
, PCI_DEVICE_ID_VMWARE_VMCI
), },
782 MODULE_DEVICE_TABLE(pci
, vmci_ids
);
784 static struct pci_driver vmci_guest_driver
= {
785 .name
= KBUILD_MODNAME
,
786 .id_table
= vmci_ids
,
787 .probe
= vmci_guest_probe_device
,
788 .remove
= vmci_guest_remove_device
,
791 int __init
vmci_guest_init(void)
793 return pci_register_driver(&vmci_guest_driver
);
796 void __exit
vmci_guest_exit(void)
798 pci_unregister_driver(&vmci_guest_driver
);