2 * kvm eventfd support - use eventfd objects to signal various KVM events
4 * Copyright 2009 Novell. All Rights Reserved.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 * Gregory Haskins <ghaskins@novell.com>
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/kvm_host.h>
25 #include <linux/kvm.h>
26 #include <linux/workqueue.h>
27 #include <linux/syscalls.h>
28 #include <linux/wait.h>
29 #include <linux/poll.h>
30 #include <linux/file.h>
31 #include <linux/list.h>
32 #include <linux/eventfd.h>
33 #include <linux/kernel.h>
34 #include <linux/slab.h>
39 * --------------------------------------------------------------------
40 * irqfd: Allows an fd to be used to inject an interrupt to the guest
42 * Credit goes to Avi Kivity for the original idea.
43 * --------------------------------------------------------------------
47 * Resampling irqfds are a special variety of irqfds used to emulate
48 * level triggered interrupts. The interrupt is asserted on eventfd
49 * trigger. On acknowledgement through the irq ack notifier, the
50 * interrupt is de-asserted and userspace is notified through the
51 * resamplefd. All resamplers on the same gsi are de-asserted
52 * together, so we don't need to track the state of each individual
53 * user. We can also therefore share the same irq source ID.
55 struct _irqfd_resampler
{
58 * List of resampling struct _irqfd objects sharing this gsi.
59 * RCU list modified under kvm->irqfds.resampler_lock
61 struct list_head list
;
62 struct kvm_irq_ack_notifier notifier
;
64 * Entry in list of kvm->irqfd.resampler_list. Use for sharing
65 * resamplers among irqfds on the same gsi.
66 * Accessed and modified under kvm->irqfds.resampler_lock
68 struct list_head link
;
72 /* Used for MSI fast-path */
75 /* Update side is protected by irqfds.lock */
76 struct kvm_kernel_irq_routing_entry __rcu
*irq_entry
;
77 /* Used for level IRQ fast-path */
79 struct work_struct inject
;
80 /* The resampler used by this irqfd (resampler-only) */
81 struct _irqfd_resampler
*resampler
;
82 /* Eventfd notified on resample (resampler-only) */
83 struct eventfd_ctx
*resamplefd
;
84 /* Entry in list of irqfds for a resampler (resampler-only) */
85 struct list_head resampler_link
;
86 /* Used for setup/shutdown */
87 struct eventfd_ctx
*eventfd
;
88 struct list_head list
;
90 struct work_struct shutdown
;
93 static struct workqueue_struct
*irqfd_cleanup_wq
;
96 irqfd_inject(struct work_struct
*work
)
98 struct _irqfd
*irqfd
= container_of(work
, struct _irqfd
, inject
);
99 struct kvm
*kvm
= irqfd
->kvm
;
101 if (!irqfd
->resampler
) {
102 kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
, irqfd
->gsi
, 1);
103 kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
, irqfd
->gsi
, 0);
105 kvm_set_irq(kvm
, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
110 * Since resampler irqfds share an IRQ source ID, we de-assert once
111 * then notify all of the resampler irqfds using this GSI. We can't
112 * do multiple de-asserts or we risk racing with incoming re-asserts.
115 irqfd_resampler_ack(struct kvm_irq_ack_notifier
*kian
)
117 struct _irqfd_resampler
*resampler
;
118 struct _irqfd
*irqfd
;
120 resampler
= container_of(kian
, struct _irqfd_resampler
, notifier
);
122 kvm_set_irq(resampler
->kvm
, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
123 resampler
->notifier
.gsi
, 0);
127 list_for_each_entry_rcu(irqfd
, &resampler
->list
, resampler_link
)
128 eventfd_signal(irqfd
->resamplefd
, 1);
134 irqfd_resampler_shutdown(struct _irqfd
*irqfd
)
136 struct _irqfd_resampler
*resampler
= irqfd
->resampler
;
137 struct kvm
*kvm
= resampler
->kvm
;
139 mutex_lock(&kvm
->irqfds
.resampler_lock
);
141 list_del_rcu(&irqfd
->resampler_link
);
144 if (list_empty(&resampler
->list
)) {
145 list_del(&resampler
->link
);
146 kvm_unregister_irq_ack_notifier(kvm
, &resampler
->notifier
);
147 kvm_set_irq(kvm
, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
148 resampler
->notifier
.gsi
, 0);
152 mutex_unlock(&kvm
->irqfds
.resampler_lock
);
156 * Race-free decouple logic (ordering is critical)
159 irqfd_shutdown(struct work_struct
*work
)
161 struct _irqfd
*irqfd
= container_of(work
, struct _irqfd
, shutdown
);
165 * Synchronize with the wait-queue and unhook ourselves to prevent
168 eventfd_ctx_remove_wait_queue(irqfd
->eventfd
, &irqfd
->wait
, &cnt
);
171 * We know no new events will be scheduled at this point, so block
172 * until all previously outstanding events have completed
174 flush_work(&irqfd
->inject
);
176 if (irqfd
->resampler
) {
177 irqfd_resampler_shutdown(irqfd
);
178 eventfd_ctx_put(irqfd
->resamplefd
);
182 * It is now safe to release the object's resources
184 eventfd_ctx_put(irqfd
->eventfd
);
189 /* assumes kvm->irqfds.lock is held */
191 irqfd_is_active(struct _irqfd
*irqfd
)
193 return list_empty(&irqfd
->list
) ? false : true;
197 * Mark the irqfd as inactive and schedule it for removal
199 * assumes kvm->irqfds.lock is held
202 irqfd_deactivate(struct _irqfd
*irqfd
)
204 BUG_ON(!irqfd_is_active(irqfd
));
206 list_del_init(&irqfd
->list
);
208 queue_work(irqfd_cleanup_wq
, &irqfd
->shutdown
);
212 * Called with wqh->lock held and interrupts disabled
215 irqfd_wakeup(wait_queue_t
*wait
, unsigned mode
, int sync
, void *key
)
217 struct _irqfd
*irqfd
= container_of(wait
, struct _irqfd
, wait
);
218 unsigned long flags
= (unsigned long)key
;
219 struct kvm_kernel_irq_routing_entry
*irq
;
220 struct kvm
*kvm
= irqfd
->kvm
;
222 if (flags
& POLLIN
) {
224 irq
= rcu_dereference(irqfd
->irq_entry
);
225 /* An event has been signaled, inject an interrupt */
227 kvm_set_msi(irq
, kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
, 1);
229 schedule_work(&irqfd
->inject
);
233 if (flags
& POLLHUP
) {
234 /* The eventfd is closing, detach from KVM */
237 spin_lock_irqsave(&kvm
->irqfds
.lock
, flags
);
240 * We must check if someone deactivated the irqfd before
241 * we could acquire the irqfds.lock since the item is
242 * deactivated from the KVM side before it is unhooked from
243 * the wait-queue. If it is already deactivated, we can
244 * simply return knowing the other side will cleanup for us.
245 * We cannot race against the irqfd going away since the
246 * other side is required to acquire wqh->lock, which we hold
248 if (irqfd_is_active(irqfd
))
249 irqfd_deactivate(irqfd
);
251 spin_unlock_irqrestore(&kvm
->irqfds
.lock
, flags
);
258 irqfd_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*wqh
,
261 struct _irqfd
*irqfd
= container_of(pt
, struct _irqfd
, pt
);
262 add_wait_queue(wqh
, &irqfd
->wait
);
265 /* Must be called under irqfds.lock */
266 static void irqfd_update(struct kvm
*kvm
, struct _irqfd
*irqfd
,
267 struct kvm_irq_routing_table
*irq_rt
)
269 struct kvm_kernel_irq_routing_entry
*e
;
270 struct hlist_node
*n
;
272 if (irqfd
->gsi
>= irq_rt
->nr_rt_entries
) {
273 rcu_assign_pointer(irqfd
->irq_entry
, NULL
);
277 hlist_for_each_entry(e
, n
, &irq_rt
->map
[irqfd
->gsi
], link
) {
278 /* Only fast-path MSI. */
279 if (e
->type
== KVM_IRQ_ROUTING_MSI
)
280 rcu_assign_pointer(irqfd
->irq_entry
, e
);
282 rcu_assign_pointer(irqfd
->irq_entry
, NULL
);
287 kvm_irqfd_assign(struct kvm
*kvm
, struct kvm_irqfd
*args
)
289 struct kvm_irq_routing_table
*irq_rt
;
290 struct _irqfd
*irqfd
, *tmp
;
291 struct file
*file
= NULL
;
292 struct eventfd_ctx
*eventfd
= NULL
, *resamplefd
= NULL
;
296 irqfd
= kzalloc(sizeof(*irqfd
), GFP_KERNEL
);
301 irqfd
->gsi
= args
->gsi
;
302 INIT_LIST_HEAD(&irqfd
->list
);
303 INIT_WORK(&irqfd
->inject
, irqfd_inject
);
304 INIT_WORK(&irqfd
->shutdown
, irqfd_shutdown
);
306 file
= eventfd_fget(args
->fd
);
312 eventfd
= eventfd_ctx_fileget(file
);
313 if (IS_ERR(eventfd
)) {
314 ret
= PTR_ERR(eventfd
);
318 irqfd
->eventfd
= eventfd
;
320 if (args
->flags
& KVM_IRQFD_FLAG_RESAMPLE
) {
321 struct _irqfd_resampler
*resampler
;
323 resamplefd
= eventfd_ctx_fdget(args
->resamplefd
);
324 if (IS_ERR(resamplefd
)) {
325 ret
= PTR_ERR(resamplefd
);
329 irqfd
->resamplefd
= resamplefd
;
330 INIT_LIST_HEAD(&irqfd
->resampler_link
);
332 mutex_lock(&kvm
->irqfds
.resampler_lock
);
334 list_for_each_entry(resampler
,
335 &kvm
->irqfds
.resampler_list
, list
) {
336 if (resampler
->notifier
.gsi
== irqfd
->gsi
) {
337 irqfd
->resampler
= resampler
;
342 if (!irqfd
->resampler
) {
343 resampler
= kzalloc(sizeof(*resampler
), GFP_KERNEL
);
346 mutex_unlock(&kvm
->irqfds
.resampler_lock
);
350 resampler
->kvm
= kvm
;
351 INIT_LIST_HEAD(&resampler
->list
);
352 resampler
->notifier
.gsi
= irqfd
->gsi
;
353 resampler
->notifier
.irq_acked
= irqfd_resampler_ack
;
354 INIT_LIST_HEAD(&resampler
->link
);
356 list_add(&resampler
->link
, &kvm
->irqfds
.resampler_list
);
357 kvm_register_irq_ack_notifier(kvm
,
358 &resampler
->notifier
);
359 irqfd
->resampler
= resampler
;
362 list_add_rcu(&irqfd
->resampler_link
, &irqfd
->resampler
->list
);
365 mutex_unlock(&kvm
->irqfds
.resampler_lock
);
369 * Install our own custom wake-up handling so we are notified via
370 * a callback whenever someone signals the underlying eventfd
372 init_waitqueue_func_entry(&irqfd
->wait
, irqfd_wakeup
);
373 init_poll_funcptr(&irqfd
->pt
, irqfd_ptable_queue_proc
);
375 spin_lock_irq(&kvm
->irqfds
.lock
);
378 list_for_each_entry(tmp
, &kvm
->irqfds
.items
, list
) {
379 if (irqfd
->eventfd
!= tmp
->eventfd
)
381 /* This fd is used for another irq already. */
383 spin_unlock_irq(&kvm
->irqfds
.lock
);
387 irq_rt
= rcu_dereference_protected(kvm
->irq_routing
,
388 lockdep_is_held(&kvm
->irqfds
.lock
));
389 irqfd_update(kvm
, irqfd
, irq_rt
);
391 events
= file
->f_op
->poll(file
, &irqfd
->pt
);
393 list_add_tail(&irqfd
->list
, &kvm
->irqfds
.items
);
396 * Check if there was an event already pending on the eventfd
397 * before we registered, and trigger it as if we didn't miss it.
400 schedule_work(&irqfd
->inject
);
402 spin_unlock_irq(&kvm
->irqfds
.lock
);
405 * do not drop the file until the irqfd is fully initialized, otherwise
406 * we might race against the POLLHUP
413 if (irqfd
->resampler
)
414 irqfd_resampler_shutdown(irqfd
);
416 if (resamplefd
&& !IS_ERR(resamplefd
))
417 eventfd_ctx_put(resamplefd
);
419 if (eventfd
&& !IS_ERR(eventfd
))
420 eventfd_ctx_put(eventfd
);
430 kvm_eventfd_init(struct kvm
*kvm
)
432 spin_lock_init(&kvm
->irqfds
.lock
);
433 INIT_LIST_HEAD(&kvm
->irqfds
.items
);
434 INIT_LIST_HEAD(&kvm
->irqfds
.resampler_list
);
435 mutex_init(&kvm
->irqfds
.resampler_lock
);
436 INIT_LIST_HEAD(&kvm
->ioeventfds
);
440 * shutdown any irqfd's that match fd+gsi
443 kvm_irqfd_deassign(struct kvm
*kvm
, struct kvm_irqfd
*args
)
445 struct _irqfd
*irqfd
, *tmp
;
446 struct eventfd_ctx
*eventfd
;
448 eventfd
= eventfd_ctx_fdget(args
->fd
);
450 return PTR_ERR(eventfd
);
452 spin_lock_irq(&kvm
->irqfds
.lock
);
454 list_for_each_entry_safe(irqfd
, tmp
, &kvm
->irqfds
.items
, list
) {
455 if (irqfd
->eventfd
== eventfd
&& irqfd
->gsi
== args
->gsi
) {
457 * This rcu_assign_pointer is needed for when
458 * another thread calls kvm_irq_routing_update before
459 * we flush workqueue below (we synchronize with
460 * kvm_irq_routing_update using irqfds.lock).
461 * It is paired with synchronize_rcu done by caller
464 rcu_assign_pointer(irqfd
->irq_entry
, NULL
);
465 irqfd_deactivate(irqfd
);
469 spin_unlock_irq(&kvm
->irqfds
.lock
);
470 eventfd_ctx_put(eventfd
);
473 * Block until we know all outstanding shutdown jobs have completed
474 * so that we guarantee there will not be any more interrupts on this
475 * gsi once this deassign function returns.
477 flush_workqueue(irqfd_cleanup_wq
);
483 kvm_irqfd(struct kvm
*kvm
, struct kvm_irqfd
*args
)
485 if (args
->flags
& ~(KVM_IRQFD_FLAG_DEASSIGN
| KVM_IRQFD_FLAG_RESAMPLE
))
488 if (args
->flags
& KVM_IRQFD_FLAG_DEASSIGN
)
489 return kvm_irqfd_deassign(kvm
, args
);
491 return kvm_irqfd_assign(kvm
, args
);
495 * This function is called as the kvm VM fd is being released. Shutdown all
496 * irqfds that still remain open
499 kvm_irqfd_release(struct kvm
*kvm
)
501 struct _irqfd
*irqfd
, *tmp
;
503 spin_lock_irq(&kvm
->irqfds
.lock
);
505 list_for_each_entry_safe(irqfd
, tmp
, &kvm
->irqfds
.items
, list
)
506 irqfd_deactivate(irqfd
);
508 spin_unlock_irq(&kvm
->irqfds
.lock
);
511 * Block until we know all outstanding shutdown jobs have completed
512 * since we do not take a kvm* reference.
514 flush_workqueue(irqfd_cleanup_wq
);
519 * Change irq_routing and irqfd.
520 * Caller must invoke synchronize_rcu afterwards.
522 void kvm_irq_routing_update(struct kvm
*kvm
,
523 struct kvm_irq_routing_table
*irq_rt
)
525 struct _irqfd
*irqfd
;
527 spin_lock_irq(&kvm
->irqfds
.lock
);
529 rcu_assign_pointer(kvm
->irq_routing
, irq_rt
);
531 list_for_each_entry(irqfd
, &kvm
->irqfds
.items
, list
)
532 irqfd_update(kvm
, irqfd
, irq_rt
);
534 spin_unlock_irq(&kvm
->irqfds
.lock
);
538 * create a host-wide workqueue for issuing deferred shutdown requests
539 * aggregated from all vm* instances. We need our own isolated single-thread
540 * queue to prevent deadlock against flushing the normal work-queue.
542 static int __init
irqfd_module_init(void)
544 irqfd_cleanup_wq
= create_singlethread_workqueue("kvm-irqfd-cleanup");
545 if (!irqfd_cleanup_wq
)
551 static void __exit
irqfd_module_exit(void)
553 destroy_workqueue(irqfd_cleanup_wq
);
556 module_init(irqfd_module_init
);
557 module_exit(irqfd_module_exit
);
560 * --------------------------------------------------------------------
561 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
563 * userspace can register a PIO/MMIO address with an eventfd for receiving
564 * notification when the memory has been touched.
565 * --------------------------------------------------------------------
569 struct list_head list
;
572 struct eventfd_ctx
*eventfd
;
574 struct kvm_io_device dev
;
578 static inline struct _ioeventfd
*
579 to_ioeventfd(struct kvm_io_device
*dev
)
581 return container_of(dev
, struct _ioeventfd
, dev
);
585 ioeventfd_release(struct _ioeventfd
*p
)
587 eventfd_ctx_put(p
->eventfd
);
593 ioeventfd_in_range(struct _ioeventfd
*p
, gpa_t addr
, int len
, const void *val
)
597 if (!(addr
== p
->addr
&& len
== p
->length
))
598 /* address-range must be precise for a hit */
602 /* all else equal, wildcard is always a hit */
605 /* otherwise, we have to actually compare the data */
607 BUG_ON(!IS_ALIGNED((unsigned long)val
, len
));
626 return _val
== p
->datamatch
? true : false;
629 /* MMIO/PIO writes trigger an event if the addr/val match */
631 ioeventfd_write(struct kvm_io_device
*this, gpa_t addr
, int len
,
634 struct _ioeventfd
*p
= to_ioeventfd(this);
636 if (!ioeventfd_in_range(p
, addr
, len
, val
))
639 eventfd_signal(p
->eventfd
, 1);
644 * This function is called as KVM is completely shutting down. We do not
645 * need to worry about locking just nuke anything we have as quickly as possible
648 ioeventfd_destructor(struct kvm_io_device
*this)
650 struct _ioeventfd
*p
= to_ioeventfd(this);
652 ioeventfd_release(p
);
655 static const struct kvm_io_device_ops ioeventfd_ops
= {
656 .write
= ioeventfd_write
,
657 .destructor
= ioeventfd_destructor
,
660 /* assumes kvm->slots_lock held */
662 ioeventfd_check_collision(struct kvm
*kvm
, struct _ioeventfd
*p
)
664 struct _ioeventfd
*_p
;
666 list_for_each_entry(_p
, &kvm
->ioeventfds
, list
)
667 if (_p
->addr
== p
->addr
&& _p
->length
== p
->length
&&
668 (_p
->wildcard
|| p
->wildcard
||
669 _p
->datamatch
== p
->datamatch
))
676 kvm_assign_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
678 int pio
= args
->flags
& KVM_IOEVENTFD_FLAG_PIO
;
679 enum kvm_bus bus_idx
= pio
? KVM_PIO_BUS
: KVM_MMIO_BUS
;
680 struct _ioeventfd
*p
;
681 struct eventfd_ctx
*eventfd
;
684 /* must be natural-word sized */
695 /* check for range overflow */
696 if (args
->addr
+ args
->len
< args
->addr
)
699 /* check for extra flags that we don't understand */
700 if (args
->flags
& ~KVM_IOEVENTFD_VALID_FLAG_MASK
)
703 eventfd
= eventfd_ctx_fdget(args
->fd
);
705 return PTR_ERR(eventfd
);
707 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
713 INIT_LIST_HEAD(&p
->list
);
714 p
->addr
= args
->addr
;
715 p
->length
= args
->len
;
716 p
->eventfd
= eventfd
;
718 /* The datamatch feature is optional, otherwise this is a wildcard */
719 if (args
->flags
& KVM_IOEVENTFD_FLAG_DATAMATCH
)
720 p
->datamatch
= args
->datamatch
;
724 mutex_lock(&kvm
->slots_lock
);
726 /* Verify that there isn't a match already */
727 if (ioeventfd_check_collision(kvm
, p
)) {
732 kvm_iodevice_init(&p
->dev
, &ioeventfd_ops
);
734 ret
= kvm_io_bus_register_dev(kvm
, bus_idx
, p
->addr
, p
->length
,
739 list_add_tail(&p
->list
, &kvm
->ioeventfds
);
741 mutex_unlock(&kvm
->slots_lock
);
746 mutex_unlock(&kvm
->slots_lock
);
750 eventfd_ctx_put(eventfd
);
756 kvm_deassign_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
758 int pio
= args
->flags
& KVM_IOEVENTFD_FLAG_PIO
;
759 enum kvm_bus bus_idx
= pio
? KVM_PIO_BUS
: KVM_MMIO_BUS
;
760 struct _ioeventfd
*p
, *tmp
;
761 struct eventfd_ctx
*eventfd
;
764 eventfd
= eventfd_ctx_fdget(args
->fd
);
766 return PTR_ERR(eventfd
);
768 mutex_lock(&kvm
->slots_lock
);
770 list_for_each_entry_safe(p
, tmp
, &kvm
->ioeventfds
, list
) {
771 bool wildcard
= !(args
->flags
& KVM_IOEVENTFD_FLAG_DATAMATCH
);
773 if (p
->eventfd
!= eventfd
||
774 p
->addr
!= args
->addr
||
775 p
->length
!= args
->len
||
776 p
->wildcard
!= wildcard
)
779 if (!p
->wildcard
&& p
->datamatch
!= args
->datamatch
)
782 kvm_io_bus_unregister_dev(kvm
, bus_idx
, &p
->dev
);
783 ioeventfd_release(p
);
788 mutex_unlock(&kvm
->slots_lock
);
790 eventfd_ctx_put(eventfd
);
796 kvm_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
798 if (args
->flags
& KVM_IOEVENTFD_FLAG_DEASSIGN
)
799 return kvm_deassign_ioeventfd(kvm
, args
);
801 return kvm_assign_ioeventfd(kvm
, args
);