4 * Copyright (C) 2012 VMware, Inc. All rights reserved.
6 * This program is free software; you can redistribute it and/or modify it
7 * under the terms of the GNU General Public License as published by the
8 * Free Software Foundation version 2 and no later version.
10 * This program is distributed in the hope that it will be useful, but
11 * WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
12 * or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
16 #include <linux/vmw_vmci_defs.h>
17 #include <linux/vmw_vmci_api.h>
18 #include <linux/moduleparam.h>
19 #include <linux/interrupt.h>
20 #include <linux/highmem.h>
21 #include <linux/kernel.h>
23 #include <linux/module.h>
24 #include <linux/sched.h>
25 #include <linux/slab.h>
26 #include <linux/init.h>
27 #include <linux/pci.h>
28 #include <linux/smp.h>
30 #include <linux/vmalloc.h>
32 #include "vmci_datagram.h"
33 #include "vmci_doorbell.h"
34 #include "vmci_context.h"
35 #include "vmci_driver.h"
36 #include "vmci_event.h"
38 #define PCI_VENDOR_ID_VMWARE 0x15AD
39 #define PCI_DEVICE_ID_VMWARE_VMCI 0x0740
41 #define VMCI_UTIL_NUM_RESOURCES 1
43 static bool vmci_disable_msi
;
44 module_param_named(disable_msi
, vmci_disable_msi
, bool, 0);
45 MODULE_PARM_DESC(disable_msi
, "Disable MSI use in driver - (default=0)");
47 static bool vmci_disable_msix
;
48 module_param_named(disable_msix
, vmci_disable_msix
, bool, 0);
49 MODULE_PARM_DESC(disable_msix
, "Disable MSI-X use in driver - (default=0)");
51 static u32 ctx_update_sub_id
= VMCI_INVALID_ID
;
52 static u32 vm_context_id
= VMCI_INVALID_ID
;
54 struct vmci_guest_device
{
55 struct device
*dev
; /* PCI device we are attached to */
59 unsigned int intr_type
;
60 bool exclusive_vectors
;
61 struct msix_entry msix_entries
[VMCI_MAX_INTRS
];
63 struct tasklet_struct datagram_tasklet
;
64 struct tasklet_struct bm_tasklet
;
67 void *notification_bitmap
;
70 /* vmci_dev singleton device and supporting data*/
71 static struct vmci_guest_device
*vmci_dev_g
;
72 static DEFINE_SPINLOCK(vmci_dev_spinlock
);
74 static atomic_t vmci_num_guest_devices
= ATOMIC_INIT(0);
76 bool vmci_guest_code_active(void)
78 return atomic_read(&vmci_num_guest_devices
) != 0;
81 u32
vmci_get_vm_context_id(void)
83 if (vm_context_id
== VMCI_INVALID_ID
) {
84 struct vmci_datagram get_cid_msg
;
86 vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID
,
88 get_cid_msg
.src
= VMCI_ANON_SRC_HANDLE
;
89 get_cid_msg
.payload_size
= 0;
90 vm_context_id
= vmci_send_datagram(&get_cid_msg
);
96 * VM to hypervisor call mechanism. We use the standard VMware naming
97 * convention since shared code is calling this function as well.
99 int vmci_send_datagram(struct vmci_datagram
*dg
)
106 return VMCI_ERROR_INVALID_ARGS
;
109 * Need to acquire spinlock on the device because the datagram
110 * data may be spread over multiple pages and the monitor may
111 * interleave device user rpc calls from multiple
112 * VCPUs. Acquiring the spinlock precludes that
113 * possibility. Disabling interrupts to avoid incoming
114 * datagrams during a "rep out" and possibly landing up in
117 spin_lock_irqsave(&vmci_dev_spinlock
, flags
);
120 iowrite8_rep(vmci_dev_g
->iobase
+ VMCI_DATA_OUT_ADDR
,
121 dg
, VMCI_DG_SIZE(dg
));
122 result
= ioread32(vmci_dev_g
->iobase
+ VMCI_RESULT_LOW_ADDR
);
124 result
= VMCI_ERROR_UNAVAILABLE
;
127 spin_unlock_irqrestore(&vmci_dev_spinlock
, flags
);
131 EXPORT_SYMBOL_GPL(vmci_send_datagram
);
134 * Gets called with the new context id if updated or resumed.
137 static void vmci_guest_cid_update(u32 sub_id
,
138 const struct vmci_event_data
*event_data
,
141 const struct vmci_event_payld_ctx
*ev_payload
=
142 vmci_event_data_const_payload(event_data
);
144 if (sub_id
!= ctx_update_sub_id
) {
145 pr_devel("Invalid subscriber (ID=0x%x)\n", sub_id
);
149 if (!event_data
|| ev_payload
->context_id
== VMCI_INVALID_ID
) {
150 pr_devel("Invalid event data\n");
154 pr_devel("Updating context from (ID=0x%x) to (ID=0x%x) on event (type=%d)\n",
155 vm_context_id
, ev_payload
->context_id
, event_data
->event
);
157 vm_context_id
= ev_payload
->context_id
;
161 * Verify that the host supports the hypercalls we need. If it does not,
162 * try to find fallback hypercalls and use those instead. Returns
163 * true if required hypercalls (or fallback hypercalls) are
164 * supported by the host, false otherwise.
166 static bool vmci_check_host_caps(struct pci_dev
*pdev
)
169 struct vmci_resource_query_msg
*msg
;
170 u32 msg_size
= sizeof(struct vmci_resource_query_hdr
) +
171 VMCI_UTIL_NUM_RESOURCES
* sizeof(u32
);
172 struct vmci_datagram
*check_msg
;
174 check_msg
= kmalloc(msg_size
, GFP_KERNEL
);
176 dev_err(&pdev
->dev
, "%s: Insufficient memory\n", __func__
);
180 check_msg
->dst
= vmci_make_handle(VMCI_HYPERVISOR_CONTEXT_ID
,
181 VMCI_RESOURCES_QUERY
);
182 check_msg
->src
= VMCI_ANON_SRC_HANDLE
;
183 check_msg
->payload_size
= msg_size
- VMCI_DG_HEADERSIZE
;
184 msg
= (struct vmci_resource_query_msg
*)VMCI_DG_PAYLOAD(check_msg
);
186 msg
->num_resources
= VMCI_UTIL_NUM_RESOURCES
;
187 msg
->resources
[0] = VMCI_GET_CONTEXT_ID
;
189 /* Checks that hyper calls are supported */
190 result
= vmci_send_datagram(check_msg
) == 0x01;
193 dev_dbg(&pdev
->dev
, "%s: Host capability check: %s\n",
194 __func__
, result
? "PASSED" : "FAILED");
196 /* We need the vector. There are no fallbacks. */
201 * Reads datagrams from the data in port and dispatches them. We
202 * always start reading datagrams into only the first page of the
203 * datagram buffer. If the datagrams don't fit into one page, we
204 * use the maximum datagram buffer size for the remainder of the
205 * invocation. This is a simple heuristic for not penalizing
208 * This function assumes that it has exclusive access to the data
209 * in port for the duration of the call.
211 static void vmci_dispatch_dgs(unsigned long data
)
213 struct vmci_guest_device
*vmci_dev
= (struct vmci_guest_device
*)data
;
214 u8
*dg_in_buffer
= vmci_dev
->data_buffer
;
215 struct vmci_datagram
*dg
;
216 size_t dg_in_buffer_size
= VMCI_MAX_DG_SIZE
;
217 size_t current_dg_in_buffer_size
= PAGE_SIZE
;
218 size_t remaining_bytes
;
220 BUILD_BUG_ON(VMCI_MAX_DG_SIZE
< PAGE_SIZE
);
222 ioread8_rep(vmci_dev
->iobase
+ VMCI_DATA_IN_ADDR
,
223 vmci_dev
->data_buffer
, current_dg_in_buffer_size
);
224 dg
= (struct vmci_datagram
*)dg_in_buffer
;
225 remaining_bytes
= current_dg_in_buffer_size
;
227 while (dg
->dst
.resource
!= VMCI_INVALID_ID
||
228 remaining_bytes
> PAGE_SIZE
) {
232 * When the input buffer spans multiple pages, a datagram can
233 * start on any page boundary in the buffer.
235 if (dg
->dst
.resource
== VMCI_INVALID_ID
) {
236 dg
= (struct vmci_datagram
*)roundup(
237 (uintptr_t)dg
+ 1, PAGE_SIZE
);
239 (size_t)(dg_in_buffer
+
240 current_dg_in_buffer_size
-
245 dg_in_size
= VMCI_DG_SIZE_ALIGNED(dg
);
247 if (dg_in_size
<= dg_in_buffer_size
) {
251 * If the remaining bytes in the datagram
252 * buffer doesn't contain the complete
253 * datagram, we first make sure we have enough
254 * room for it and then we read the reminder
255 * of the datagram and possibly any following
258 if (dg_in_size
> remaining_bytes
) {
259 if (remaining_bytes
!=
260 current_dg_in_buffer_size
) {
263 * We move the partial
264 * datagram to the front and
265 * read the reminder of the
266 * datagram and possibly
267 * following calls into the
270 memmove(dg_in_buffer
, dg_in_buffer
+
271 current_dg_in_buffer_size
-
274 dg
= (struct vmci_datagram
*)
278 if (current_dg_in_buffer_size
!=
280 current_dg_in_buffer_size
=
283 ioread8_rep(vmci_dev
->iobase
+
285 vmci_dev
->data_buffer
+
287 current_dg_in_buffer_size
-
292 * We special case event datagrams from the
295 if (dg
->src
.context
== VMCI_HYPERVISOR_CONTEXT_ID
&&
296 dg
->dst
.resource
== VMCI_EVENT_HANDLER
) {
297 result
= vmci_event_dispatch(dg
);
299 result
= vmci_datagram_invoke_guest_handler(dg
);
301 if (result
< VMCI_SUCCESS
)
302 dev_dbg(vmci_dev
->dev
,
303 "Datagram with resource (ID=0x%x) failed (err=%d)\n",
304 dg
->dst
.resource
, result
);
306 /* On to the next datagram. */
307 dg
= (struct vmci_datagram
*)((u8
*)dg
+
310 size_t bytes_to_skip
;
313 * Datagram doesn't fit in datagram buffer of maximal
316 dev_dbg(vmci_dev
->dev
,
317 "Failed to receive datagram (size=%u bytes)\n",
320 bytes_to_skip
= dg_in_size
- remaining_bytes
;
321 if (current_dg_in_buffer_size
!= dg_in_buffer_size
)
322 current_dg_in_buffer_size
= dg_in_buffer_size
;
325 ioread8_rep(vmci_dev
->iobase
+
327 vmci_dev
->data_buffer
,
328 current_dg_in_buffer_size
);
329 if (bytes_to_skip
<= current_dg_in_buffer_size
)
332 bytes_to_skip
-= current_dg_in_buffer_size
;
334 dg
= (struct vmci_datagram
*)(dg_in_buffer
+
339 (size_t) (dg_in_buffer
+ current_dg_in_buffer_size
-
342 if (remaining_bytes
< VMCI_DG_HEADERSIZE
) {
343 /* Get the next batch of datagrams. */
345 ioread8_rep(vmci_dev
->iobase
+ VMCI_DATA_IN_ADDR
,
346 vmci_dev
->data_buffer
,
347 current_dg_in_buffer_size
);
348 dg
= (struct vmci_datagram
*)dg_in_buffer
;
349 remaining_bytes
= current_dg_in_buffer_size
;
355 * Scans the notification bitmap for raised flags, clears them
356 * and handles the notifications.
358 static void vmci_process_bitmap(unsigned long data
)
360 struct vmci_guest_device
*dev
= (struct vmci_guest_device
*)data
;
362 if (!dev
->notification_bitmap
) {
363 dev_dbg(dev
->dev
, "No bitmap present in %s\n", __func__
);
367 vmci_dbell_scan_notification_entries(dev
->notification_bitmap
);
371 * Enable MSI-X. Try exclusive vectors first, then shared vectors.
373 static int vmci_enable_msix(struct pci_dev
*pdev
,
374 struct vmci_guest_device
*vmci_dev
)
379 for (i
= 0; i
< VMCI_MAX_INTRS
; ++i
) {
380 vmci_dev
->msix_entries
[i
].entry
= i
;
381 vmci_dev
->msix_entries
[i
].vector
= i
;
384 result
= pci_enable_msix(pdev
, vmci_dev
->msix_entries
, VMCI_MAX_INTRS
);
386 vmci_dev
->exclusive_vectors
= true;
388 result
= pci_enable_msix(pdev
, vmci_dev
->msix_entries
, 1);
394 * Interrupt handler for legacy or MSI interrupt, or for first MSI-X
395 * interrupt (vector VMCI_INTR_DATAGRAM).
397 static irqreturn_t
vmci_interrupt(int irq
, void *_dev
)
399 struct vmci_guest_device
*dev
= _dev
;
402 * If we are using MSI-X with exclusive vectors then we simply schedule
403 * the datagram tasklet, since we know the interrupt was meant for us.
404 * Otherwise we must read the ICR to determine what to do.
407 if (dev
->intr_type
== VMCI_INTR_TYPE_MSIX
&& dev
->exclusive_vectors
) {
408 tasklet_schedule(&dev
->datagram_tasklet
);
412 /* Acknowledge interrupt and determine what needs doing. */
413 icr
= ioread32(dev
->iobase
+ VMCI_ICR_ADDR
);
414 if (icr
== 0 || icr
== ~0)
417 if (icr
& VMCI_ICR_DATAGRAM
) {
418 tasklet_schedule(&dev
->datagram_tasklet
);
419 icr
&= ~VMCI_ICR_DATAGRAM
;
422 if (icr
& VMCI_ICR_NOTIFICATION
) {
423 tasklet_schedule(&dev
->bm_tasklet
);
424 icr
&= ~VMCI_ICR_NOTIFICATION
;
429 "Ignoring unknown interrupt cause (%d)\n",
437 * Interrupt handler for MSI-X interrupt vector VMCI_INTR_NOTIFICATION,
438 * which is for the notification bitmap. Will only get called if we are
439 * using MSI-X with exclusive vectors.
441 static irqreturn_t
vmci_interrupt_bm(int irq
, void *_dev
)
443 struct vmci_guest_device
*dev
= _dev
;
445 /* For MSI-X we can just assume it was meant for us. */
446 tasklet_schedule(&dev
->bm_tasklet
);
452 * Most of the initialization at module load time is done here.
454 static int vmci_guest_probe_device(struct pci_dev
*pdev
,
455 const struct pci_device_id
*id
)
457 struct vmci_guest_device
*vmci_dev
;
458 void __iomem
*iobase
;
459 unsigned int capabilities
;
464 dev_dbg(&pdev
->dev
, "Probing for vmci/PCI guest device\n");
466 error
= pcim_enable_device(pdev
);
469 "Failed to enable VMCI device: %d\n", error
);
473 error
= pcim_iomap_regions(pdev
, 1 << 0, KBUILD_MODNAME
);
475 dev_err(&pdev
->dev
, "Failed to reserve/map IO regions\n");
479 iobase
= pcim_iomap_table(pdev
)[0];
481 dev_info(&pdev
->dev
, "Found VMCI PCI device at %#lx, irq %u\n",
482 (unsigned long)iobase
, pdev
->irq
);
484 vmci_dev
= devm_kzalloc(&pdev
->dev
, sizeof(*vmci_dev
), GFP_KERNEL
);
487 "Can't allocate memory for VMCI device\n");
491 vmci_dev
->dev
= &pdev
->dev
;
492 vmci_dev
->intr_type
= VMCI_INTR_TYPE_INTX
;
493 vmci_dev
->exclusive_vectors
= false;
494 vmci_dev
->iobase
= iobase
;
496 tasklet_init(&vmci_dev
->datagram_tasklet
,
497 vmci_dispatch_dgs
, (unsigned long)vmci_dev
);
498 tasklet_init(&vmci_dev
->bm_tasklet
,
499 vmci_process_bitmap
, (unsigned long)vmci_dev
);
501 vmci_dev
->data_buffer
= vmalloc(VMCI_MAX_DG_SIZE
);
502 if (!vmci_dev
->data_buffer
) {
504 "Can't allocate memory for datagram buffer\n");
508 pci_set_master(pdev
); /* To enable queue_pair functionality. */
511 * Verify that the VMCI Device supports the capabilities that
512 * we need. If the device is missing capabilities that we would
513 * like to use, check for fallback capabilities and use those
514 * instead (so we can run a new VM on old hosts). Fail the load if
515 * a required capability is missing and there is no fallback.
517 * Right now, we need datagrams. There are no fallbacks.
519 capabilities
= ioread32(vmci_dev
->iobase
+ VMCI_CAPS_ADDR
);
520 if (!(capabilities
& VMCI_CAPS_DATAGRAM
)) {
521 dev_err(&pdev
->dev
, "Device does not support datagrams\n");
523 goto err_free_data_buffer
;
527 * If the hardware supports notifications, we will use that as
530 if (capabilities
& VMCI_CAPS_NOTIFICATIONS
) {
531 vmci_dev
->notification_bitmap
= vmalloc(PAGE_SIZE
);
532 if (!vmci_dev
->notification_bitmap
) {
534 "Unable to allocate notification bitmap\n");
536 memset(vmci_dev
->notification_bitmap
, 0, PAGE_SIZE
);
537 capabilities
|= VMCI_CAPS_NOTIFICATIONS
;
541 dev_info(&pdev
->dev
, "Using capabilities 0x%x\n", capabilities
);
543 /* Let the host know which capabilities we intend to use. */
544 iowrite32(capabilities
, vmci_dev
->iobase
+ VMCI_CAPS_ADDR
);
546 /* Set up global device so that we can start sending datagrams */
547 spin_lock_irq(&vmci_dev_spinlock
);
548 vmci_dev_g
= vmci_dev
;
549 spin_unlock_irq(&vmci_dev_spinlock
);
552 * Register notification bitmap with device if that capability is
555 if (capabilities
& VMCI_CAPS_NOTIFICATIONS
) {
557 vmalloc_to_page(vmci_dev
->notification_bitmap
);
558 unsigned long bitmap_ppn
= page_to_pfn(page
);
559 if (!vmci_dbell_register_notification_bitmap(bitmap_ppn
)) {
561 "VMCI device unable to register notification bitmap with PPN 0x%x\n",
563 goto err_remove_vmci_dev_g
;
567 /* Check host capabilities. */
568 if (!vmci_check_host_caps(pdev
))
569 goto err_remove_bitmap
;
574 * We subscribe to the VMCI_EVENT_CTX_ID_UPDATE here so we can
575 * update the internal context id when needed.
577 vmci_err
= vmci_event_subscribe(VMCI_EVENT_CTX_ID_UPDATE
,
578 vmci_guest_cid_update
, NULL
,
580 if (vmci_err
< VMCI_SUCCESS
)
582 "Failed to subscribe to event (type=%d): %d\n",
583 VMCI_EVENT_CTX_ID_UPDATE
, vmci_err
);
586 * Enable interrupts. Try MSI-X first, then MSI, and then fallback on
589 if (!vmci_disable_msix
&& !vmci_enable_msix(pdev
, vmci_dev
)) {
590 vmci_dev
->intr_type
= VMCI_INTR_TYPE_MSIX
;
591 vmci_dev
->irq
= vmci_dev
->msix_entries
[0].vector
;
592 } else if (!vmci_disable_msi
&& !pci_enable_msi(pdev
)) {
593 vmci_dev
->intr_type
= VMCI_INTR_TYPE_MSI
;
594 vmci_dev
->irq
= pdev
->irq
;
596 vmci_dev
->intr_type
= VMCI_INTR_TYPE_INTX
;
597 vmci_dev
->irq
= pdev
->irq
;
601 * Request IRQ for legacy or MSI interrupts, or for first
604 error
= request_irq(vmci_dev
->irq
, vmci_interrupt
, IRQF_SHARED
,
605 KBUILD_MODNAME
, vmci_dev
);
607 dev_err(&pdev
->dev
, "Irq %u in use: %d\n",
608 vmci_dev
->irq
, error
);
609 goto err_disable_msi
;
613 * For MSI-X with exclusive vectors we need to request an
614 * interrupt for each vector so that we get a separate
615 * interrupt handler routine. This allows us to distinguish
616 * between the vectors.
618 if (vmci_dev
->exclusive_vectors
) {
619 error
= request_irq(vmci_dev
->msix_entries
[1].vector
,
620 vmci_interrupt_bm
, 0, KBUILD_MODNAME
,
624 "Failed to allocate irq %u: %d\n",
625 vmci_dev
->msix_entries
[1].vector
, error
);
630 dev_dbg(&pdev
->dev
, "Registered device\n");
632 atomic_inc(&vmci_num_guest_devices
);
634 /* Enable specific interrupt bits. */
635 cmd
= VMCI_IMR_DATAGRAM
;
636 if (capabilities
& VMCI_CAPS_NOTIFICATIONS
)
637 cmd
|= VMCI_IMR_NOTIFICATION
;
638 iowrite32(cmd
, vmci_dev
->iobase
+ VMCI_IMR_ADDR
);
640 /* Enable interrupts. */
641 iowrite32(VMCI_CONTROL_INT_ENABLE
,
642 vmci_dev
->iobase
+ VMCI_CONTROL_ADDR
);
644 pci_set_drvdata(pdev
, vmci_dev
);
648 free_irq(vmci_dev
->irq
, &vmci_dev
);
649 tasklet_kill(&vmci_dev
->datagram_tasklet
);
650 tasklet_kill(&vmci_dev
->bm_tasklet
);
653 if (vmci_dev
->intr_type
== VMCI_INTR_TYPE_MSIX
)
654 pci_disable_msix(pdev
);
655 else if (vmci_dev
->intr_type
== VMCI_INTR_TYPE_MSI
)
656 pci_disable_msi(pdev
);
658 vmci_err
= vmci_event_unsubscribe(ctx_update_sub_id
);
659 if (vmci_err
< VMCI_SUCCESS
)
661 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
662 VMCI_EVENT_CTX_ID_UPDATE
, ctx_update_sub_id
, vmci_err
);
665 if (vmci_dev
->notification_bitmap
) {
666 iowrite32(VMCI_CONTROL_RESET
,
667 vmci_dev
->iobase
+ VMCI_CONTROL_ADDR
);
668 vfree(vmci_dev
->notification_bitmap
);
671 err_remove_vmci_dev_g
:
672 spin_lock_irq(&vmci_dev_spinlock
);
674 spin_unlock_irq(&vmci_dev_spinlock
);
676 err_free_data_buffer
:
677 vfree(vmci_dev
->data_buffer
);
679 /* The rest are managed resources and will be freed by PCI core */
683 static void vmci_guest_remove_device(struct pci_dev
*pdev
)
685 struct vmci_guest_device
*vmci_dev
= pci_get_drvdata(pdev
);
688 dev_dbg(&pdev
->dev
, "Removing device\n");
690 atomic_dec(&vmci_num_guest_devices
);
692 vmci_qp_guest_endpoints_exit();
694 vmci_err
= vmci_event_unsubscribe(ctx_update_sub_id
);
695 if (vmci_err
< VMCI_SUCCESS
)
697 "Failed to unsubscribe from event (type=%d) with subscriber (ID=0x%x): %d\n",
698 VMCI_EVENT_CTX_ID_UPDATE
, ctx_update_sub_id
, vmci_err
);
700 spin_lock_irq(&vmci_dev_spinlock
);
702 spin_unlock_irq(&vmci_dev_spinlock
);
704 dev_dbg(&pdev
->dev
, "Resetting vmci device\n");
705 iowrite32(VMCI_CONTROL_RESET
, vmci_dev
->iobase
+ VMCI_CONTROL_ADDR
);
708 * Free IRQ and then disable MSI/MSI-X as appropriate. For
709 * MSI-X, we might have multiple vectors, each with their own
710 * IRQ, which we must free too.
712 free_irq(vmci_dev
->irq
, vmci_dev
);
713 if (vmci_dev
->intr_type
== VMCI_INTR_TYPE_MSIX
) {
714 if (vmci_dev
->exclusive_vectors
)
715 free_irq(vmci_dev
->msix_entries
[1].vector
, vmci_dev
);
716 pci_disable_msix(pdev
);
717 } else if (vmci_dev
->intr_type
== VMCI_INTR_TYPE_MSI
) {
718 pci_disable_msi(pdev
);
721 tasklet_kill(&vmci_dev
->datagram_tasklet
);
722 tasklet_kill(&vmci_dev
->bm_tasklet
);
724 if (vmci_dev
->notification_bitmap
) {
726 * The device reset above cleared the bitmap state of the
727 * device, so we can safely free it here.
730 vfree(vmci_dev
->notification_bitmap
);
733 vfree(vmci_dev
->data_buffer
);
735 /* The rest are managed resources and will be freed by PCI core */
738 static DEFINE_PCI_DEVICE_TABLE(vmci_ids
) = {
739 { PCI_DEVICE(PCI_VENDOR_ID_VMWARE
, PCI_DEVICE_ID_VMWARE_VMCI
), },
742 MODULE_DEVICE_TABLE(pci
, vmci_ids
);
744 static struct pci_driver vmci_guest_driver
= {
745 .name
= KBUILD_MODNAME
,
746 .id_table
= vmci_ids
,
747 .probe
= vmci_guest_probe_device
,
748 .remove
= vmci_guest_remove_device
,
751 int __init
vmci_guest_init(void)
753 return pci_register_driver(&vmci_guest_driver
);
756 void __exit
vmci_guest_exit(void)
758 pci_unregister_driver(&vmci_guest_driver
);