2 * kvm eventfd support - use eventfd objects to signal various KVM events
4 * Copyright 2009 Novell. All Rights Reserved.
5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
8 * Gregory Haskins <ghaskins@novell.com>
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
24 #include <linux/kvm_host.h>
25 #include <linux/kvm.h>
26 #include <linux/kvm_irqfd.h>
27 #include <linux/workqueue.h>
28 #include <linux/syscalls.h>
29 #include <linux/wait.h>
30 #include <linux/poll.h>
31 #include <linux/file.h>
32 #include <linux/list.h>
33 #include <linux/eventfd.h>
34 #include <linux/kernel.h>
35 #include <linux/srcu.h>
36 #include <linux/slab.h>
37 #include <linux/seqlock.h>
38 #include <linux/irqbypass.h>
39 #include <trace/events/kvm.h>
41 #include <kvm/iodev.h>
43 #ifdef CONFIG_HAVE_KVM_IRQFD
45 static struct workqueue_struct
*irqfd_cleanup_wq
;
47 bool __attribute__((weak
))
48 kvm_arch_irqfd_allowed(struct kvm
*kvm
, struct kvm_irqfd
*args
)
54 irqfd_inject(struct work_struct
*work
)
56 struct kvm_kernel_irqfd
*irqfd
=
57 container_of(work
, struct kvm_kernel_irqfd
, inject
);
58 struct kvm
*kvm
= irqfd
->kvm
;
60 if (!irqfd
->resampler
) {
61 kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
, irqfd
->gsi
, 1,
63 kvm_set_irq(kvm
, KVM_USERSPACE_IRQ_SOURCE_ID
, irqfd
->gsi
, 0,
66 kvm_set_irq(kvm
, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
67 irqfd
->gsi
, 1, false);
71 * Since resampler irqfds share an IRQ source ID, we de-assert once
72 * then notify all of the resampler irqfds using this GSI. We can't
73 * do multiple de-asserts or we risk racing with incoming re-asserts.
76 irqfd_resampler_ack(struct kvm_irq_ack_notifier
*kian
)
78 struct kvm_kernel_irqfd_resampler
*resampler
;
80 struct kvm_kernel_irqfd
*irqfd
;
83 resampler
= container_of(kian
,
84 struct kvm_kernel_irqfd_resampler
, notifier
);
87 kvm_set_irq(kvm
, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
88 resampler
->notifier
.gsi
, 0, false);
90 idx
= srcu_read_lock(&kvm
->irq_srcu
);
92 list_for_each_entry_rcu(irqfd
, &resampler
->list
, resampler_link
)
93 eventfd_signal(irqfd
->resamplefd
, 1);
95 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
99 irqfd_resampler_shutdown(struct kvm_kernel_irqfd
*irqfd
)
101 struct kvm_kernel_irqfd_resampler
*resampler
= irqfd
->resampler
;
102 struct kvm
*kvm
= resampler
->kvm
;
104 mutex_lock(&kvm
->irqfds
.resampler_lock
);
106 list_del_rcu(&irqfd
->resampler_link
);
107 synchronize_srcu(&kvm
->irq_srcu
);
109 if (list_empty(&resampler
->list
)) {
110 list_del(&resampler
->link
);
111 kvm_unregister_irq_ack_notifier(kvm
, &resampler
->notifier
);
112 kvm_set_irq(kvm
, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID
,
113 resampler
->notifier
.gsi
, 0, false);
117 mutex_unlock(&kvm
->irqfds
.resampler_lock
);
121 * Race-free decouple logic (ordering is critical)
124 irqfd_shutdown(struct work_struct
*work
)
126 struct kvm_kernel_irqfd
*irqfd
=
127 container_of(work
, struct kvm_kernel_irqfd
, shutdown
);
128 struct kvm
*kvm
= irqfd
->kvm
;
131 /* Make sure irqfd has been initalized in assign path. */
132 synchronize_srcu(&kvm
->irq_srcu
);
135 * Synchronize with the wait-queue and unhook ourselves to prevent
138 eventfd_ctx_remove_wait_queue(irqfd
->eventfd
, &irqfd
->wait
, &cnt
);
141 * We know no new events will be scheduled at this point, so block
142 * until all previously outstanding events have completed
144 flush_work(&irqfd
->inject
);
146 if (irqfd
->resampler
) {
147 irqfd_resampler_shutdown(irqfd
);
148 eventfd_ctx_put(irqfd
->resamplefd
);
152 * It is now safe to release the object's resources
154 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
155 irq_bypass_unregister_consumer(&irqfd
->consumer
);
157 eventfd_ctx_put(irqfd
->eventfd
);
162 /* assumes kvm->irqfds.lock is held */
164 irqfd_is_active(struct kvm_kernel_irqfd
*irqfd
)
166 return list_empty(&irqfd
->list
) ? false : true;
170 * Mark the irqfd as inactive and schedule it for removal
172 * assumes kvm->irqfds.lock is held
175 irqfd_deactivate(struct kvm_kernel_irqfd
*irqfd
)
177 BUG_ON(!irqfd_is_active(irqfd
));
179 list_del_init(&irqfd
->list
);
181 queue_work(irqfd_cleanup_wq
, &irqfd
->shutdown
);
184 int __attribute__((weak
)) kvm_arch_set_irq_inatomic(
185 struct kvm_kernel_irq_routing_entry
*irq
,
186 struct kvm
*kvm
, int irq_source_id
,
194 * Called with wqh->lock held and interrupts disabled
197 irqfd_wakeup(wait_queue_entry_t
*wait
, unsigned mode
, int sync
, void *key
)
199 struct kvm_kernel_irqfd
*irqfd
=
200 container_of(wait
, struct kvm_kernel_irqfd
, wait
);
201 unsigned long flags
= (unsigned long)key
;
202 struct kvm_kernel_irq_routing_entry irq
;
203 struct kvm
*kvm
= irqfd
->kvm
;
207 if (flags
& POLLIN
) {
208 idx
= srcu_read_lock(&kvm
->irq_srcu
);
210 seq
= read_seqcount_begin(&irqfd
->irq_entry_sc
);
211 irq
= irqfd
->irq_entry
;
212 } while (read_seqcount_retry(&irqfd
->irq_entry_sc
, seq
));
213 /* An event has been signaled, inject an interrupt */
214 if (kvm_arch_set_irq_inatomic(&irq
, kvm
,
215 KVM_USERSPACE_IRQ_SOURCE_ID
, 1,
216 false) == -EWOULDBLOCK
)
217 schedule_work(&irqfd
->inject
);
218 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
221 if (flags
& POLLHUP
) {
222 /* The eventfd is closing, detach from KVM */
225 spin_lock_irqsave(&kvm
->irqfds
.lock
, flags
);
228 * We must check if someone deactivated the irqfd before
229 * we could acquire the irqfds.lock since the item is
230 * deactivated from the KVM side before it is unhooked from
231 * the wait-queue. If it is already deactivated, we can
232 * simply return knowing the other side will cleanup for us.
233 * We cannot race against the irqfd going away since the
234 * other side is required to acquire wqh->lock, which we hold
236 if (irqfd_is_active(irqfd
))
237 irqfd_deactivate(irqfd
);
239 spin_unlock_irqrestore(&kvm
->irqfds
.lock
, flags
);
246 irqfd_ptable_queue_proc(struct file
*file
, wait_queue_head_t
*wqh
,
249 struct kvm_kernel_irqfd
*irqfd
=
250 container_of(pt
, struct kvm_kernel_irqfd
, pt
);
251 add_wait_queue(wqh
, &irqfd
->wait
);
254 /* Must be called under irqfds.lock */
255 static void irqfd_update(struct kvm
*kvm
, struct kvm_kernel_irqfd
*irqfd
)
257 struct kvm_kernel_irq_routing_entry
*e
;
258 struct kvm_kernel_irq_routing_entry entries
[KVM_NR_IRQCHIPS
];
261 n_entries
= kvm_irq_map_gsi(kvm
, entries
, irqfd
->gsi
);
263 write_seqcount_begin(&irqfd
->irq_entry_sc
);
267 irqfd
->irq_entry
= *e
;
269 irqfd
->irq_entry
.type
= 0;
271 write_seqcount_end(&irqfd
->irq_entry_sc
);
274 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
275 void __attribute__((weak
)) kvm_arch_irq_bypass_stop(
276 struct irq_bypass_consumer
*cons
)
280 void __attribute__((weak
)) kvm_arch_irq_bypass_start(
281 struct irq_bypass_consumer
*cons
)
285 int __attribute__((weak
)) kvm_arch_update_irqfd_routing(
286 struct kvm
*kvm
, unsigned int host_irq
,
287 uint32_t guest_irq
, bool set
)
294 kvm_irqfd_assign(struct kvm
*kvm
, struct kvm_irqfd
*args
)
296 struct kvm_kernel_irqfd
*irqfd
, *tmp
;
298 struct eventfd_ctx
*eventfd
= NULL
, *resamplefd
= NULL
;
303 if (!kvm_arch_intc_initialized(kvm
))
306 if (!kvm_arch_irqfd_allowed(kvm
, args
))
309 irqfd
= kzalloc(sizeof(*irqfd
), GFP_KERNEL
);
314 irqfd
->gsi
= args
->gsi
;
315 INIT_LIST_HEAD(&irqfd
->list
);
316 INIT_WORK(&irqfd
->inject
, irqfd_inject
);
317 INIT_WORK(&irqfd
->shutdown
, irqfd_shutdown
);
318 seqcount_init(&irqfd
->irq_entry_sc
);
326 eventfd
= eventfd_ctx_fileget(f
.file
);
327 if (IS_ERR(eventfd
)) {
328 ret
= PTR_ERR(eventfd
);
332 irqfd
->eventfd
= eventfd
;
334 if (args
->flags
& KVM_IRQFD_FLAG_RESAMPLE
) {
335 struct kvm_kernel_irqfd_resampler
*resampler
;
337 resamplefd
= eventfd_ctx_fdget(args
->resamplefd
);
338 if (IS_ERR(resamplefd
)) {
339 ret
= PTR_ERR(resamplefd
);
343 irqfd
->resamplefd
= resamplefd
;
344 INIT_LIST_HEAD(&irqfd
->resampler_link
);
346 mutex_lock(&kvm
->irqfds
.resampler_lock
);
348 list_for_each_entry(resampler
,
349 &kvm
->irqfds
.resampler_list
, link
) {
350 if (resampler
->notifier
.gsi
== irqfd
->gsi
) {
351 irqfd
->resampler
= resampler
;
356 if (!irqfd
->resampler
) {
357 resampler
= kzalloc(sizeof(*resampler
), GFP_KERNEL
);
360 mutex_unlock(&kvm
->irqfds
.resampler_lock
);
364 resampler
->kvm
= kvm
;
365 INIT_LIST_HEAD(&resampler
->list
);
366 resampler
->notifier
.gsi
= irqfd
->gsi
;
367 resampler
->notifier
.irq_acked
= irqfd_resampler_ack
;
368 INIT_LIST_HEAD(&resampler
->link
);
370 list_add(&resampler
->link
, &kvm
->irqfds
.resampler_list
);
371 kvm_register_irq_ack_notifier(kvm
,
372 &resampler
->notifier
);
373 irqfd
->resampler
= resampler
;
376 list_add_rcu(&irqfd
->resampler_link
, &irqfd
->resampler
->list
);
377 synchronize_srcu(&kvm
->irq_srcu
);
379 mutex_unlock(&kvm
->irqfds
.resampler_lock
);
383 * Install our own custom wake-up handling so we are notified via
384 * a callback whenever someone signals the underlying eventfd
386 init_waitqueue_func_entry(&irqfd
->wait
, irqfd_wakeup
);
387 init_poll_funcptr(&irqfd
->pt
, irqfd_ptable_queue_proc
);
389 spin_lock_irq(&kvm
->irqfds
.lock
);
392 list_for_each_entry(tmp
, &kvm
->irqfds
.items
, list
) {
393 if (irqfd
->eventfd
!= tmp
->eventfd
)
395 /* This fd is used for another irq already. */
397 spin_unlock_irq(&kvm
->irqfds
.lock
);
401 idx
= srcu_read_lock(&kvm
->irq_srcu
);
402 irqfd_update(kvm
, irqfd
);
404 list_add_tail(&irqfd
->list
, &kvm
->irqfds
.items
);
406 spin_unlock_irq(&kvm
->irqfds
.lock
);
409 * Check if there was an event already pending on the eventfd
410 * before we registered, and trigger it as if we didn't miss it.
412 events
= f
.file
->f_op
->poll(f
.file
, &irqfd
->pt
);
415 schedule_work(&irqfd
->inject
);
417 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
418 if (kvm_arch_has_irq_bypass()) {
419 irqfd
->consumer
.token
= (void *)irqfd
->eventfd
;
420 irqfd
->consumer
.add_producer
= kvm_arch_irq_bypass_add_producer
;
421 irqfd
->consumer
.del_producer
= kvm_arch_irq_bypass_del_producer
;
422 irqfd
->consumer
.stop
= kvm_arch_irq_bypass_stop
;
423 irqfd
->consumer
.start
= kvm_arch_irq_bypass_start
;
424 ret
= irq_bypass_register_consumer(&irqfd
->consumer
);
426 pr_info("irq bypass consumer (token %p) registration fails: %d\n",
427 irqfd
->consumer
.token
, ret
);
431 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
434 * do not drop the file until the irqfd is fully initialized, otherwise
435 * we might race against the EPOLLHUP
441 if (irqfd
->resampler
)
442 irqfd_resampler_shutdown(irqfd
);
444 if (resamplefd
&& !IS_ERR(resamplefd
))
445 eventfd_ctx_put(resamplefd
);
447 if (eventfd
&& !IS_ERR(eventfd
))
448 eventfd_ctx_put(eventfd
);
457 bool kvm_irq_has_notifier(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
459 struct kvm_irq_ack_notifier
*kian
;
462 idx
= srcu_read_lock(&kvm
->irq_srcu
);
463 gsi
= kvm_irq_map_chip_pin(kvm
, irqchip
, pin
);
465 hlist_for_each_entry_rcu(kian
, &kvm
->irq_ack_notifier_list
,
467 if (kian
->gsi
== gsi
) {
468 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
472 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
476 EXPORT_SYMBOL_GPL(kvm_irq_has_notifier
);
478 void kvm_notify_acked_gsi(struct kvm
*kvm
, int gsi
)
480 struct kvm_irq_ack_notifier
*kian
;
482 hlist_for_each_entry_rcu(kian
, &kvm
->irq_ack_notifier_list
,
484 if (kian
->gsi
== gsi
)
485 kian
->irq_acked(kian
);
488 void kvm_notify_acked_irq(struct kvm
*kvm
, unsigned irqchip
, unsigned pin
)
492 trace_kvm_ack_irq(irqchip
, pin
);
494 idx
= srcu_read_lock(&kvm
->irq_srcu
);
495 gsi
= kvm_irq_map_chip_pin(kvm
, irqchip
, pin
);
497 kvm_notify_acked_gsi(kvm
, gsi
);
498 srcu_read_unlock(&kvm
->irq_srcu
, idx
);
501 void kvm_register_irq_ack_notifier(struct kvm
*kvm
,
502 struct kvm_irq_ack_notifier
*kian
)
504 mutex_lock(&kvm
->irq_lock
);
505 hlist_add_head_rcu(&kian
->link
, &kvm
->irq_ack_notifier_list
);
506 mutex_unlock(&kvm
->irq_lock
);
507 kvm_arch_post_irq_ack_notifier_list_update(kvm
);
510 void kvm_unregister_irq_ack_notifier(struct kvm
*kvm
,
511 struct kvm_irq_ack_notifier
*kian
)
513 mutex_lock(&kvm
->irq_lock
);
514 hlist_del_init_rcu(&kian
->link
);
515 mutex_unlock(&kvm
->irq_lock
);
516 synchronize_srcu(&kvm
->irq_srcu
);
517 kvm_arch_post_irq_ack_notifier_list_update(kvm
);
522 kvm_eventfd_init(struct kvm
*kvm
)
524 #ifdef CONFIG_HAVE_KVM_IRQFD
525 spin_lock_init(&kvm
->irqfds
.lock
);
526 INIT_LIST_HEAD(&kvm
->irqfds
.items
);
527 INIT_LIST_HEAD(&kvm
->irqfds
.resampler_list
);
528 mutex_init(&kvm
->irqfds
.resampler_lock
);
530 INIT_LIST_HEAD(&kvm
->ioeventfds
);
533 #ifdef CONFIG_HAVE_KVM_IRQFD
535 * shutdown any irqfd's that match fd+gsi
538 kvm_irqfd_deassign(struct kvm
*kvm
, struct kvm_irqfd
*args
)
540 struct kvm_kernel_irqfd
*irqfd
, *tmp
;
541 struct eventfd_ctx
*eventfd
;
543 eventfd
= eventfd_ctx_fdget(args
->fd
);
545 return PTR_ERR(eventfd
);
547 spin_lock_irq(&kvm
->irqfds
.lock
);
549 list_for_each_entry_safe(irqfd
, tmp
, &kvm
->irqfds
.items
, list
) {
550 if (irqfd
->eventfd
== eventfd
&& irqfd
->gsi
== args
->gsi
) {
552 * This clearing of irq_entry.type is needed for when
553 * another thread calls kvm_irq_routing_update before
554 * we flush workqueue below (we synchronize with
555 * kvm_irq_routing_update using irqfds.lock).
557 write_seqcount_begin(&irqfd
->irq_entry_sc
);
558 irqfd
->irq_entry
.type
= 0;
559 write_seqcount_end(&irqfd
->irq_entry_sc
);
560 irqfd_deactivate(irqfd
);
564 spin_unlock_irq(&kvm
->irqfds
.lock
);
565 eventfd_ctx_put(eventfd
);
568 * Block until we know all outstanding shutdown jobs have completed
569 * so that we guarantee there will not be any more interrupts on this
570 * gsi once this deassign function returns.
572 flush_workqueue(irqfd_cleanup_wq
);
578 kvm_irqfd(struct kvm
*kvm
, struct kvm_irqfd
*args
)
580 if (args
->flags
& ~(KVM_IRQFD_FLAG_DEASSIGN
| KVM_IRQFD_FLAG_RESAMPLE
))
583 if (args
->flags
& KVM_IRQFD_FLAG_DEASSIGN
)
584 return kvm_irqfd_deassign(kvm
, args
);
586 return kvm_irqfd_assign(kvm
, args
);
590 * This function is called as the kvm VM fd is being released. Shutdown all
591 * irqfds that still remain open
594 kvm_irqfd_release(struct kvm
*kvm
)
596 struct kvm_kernel_irqfd
*irqfd
, *tmp
;
598 spin_lock_irq(&kvm
->irqfds
.lock
);
600 list_for_each_entry_safe(irqfd
, tmp
, &kvm
->irqfds
.items
, list
)
601 irqfd_deactivate(irqfd
);
603 spin_unlock_irq(&kvm
->irqfds
.lock
);
606 * Block until we know all outstanding shutdown jobs have completed
607 * since we do not take a kvm* reference.
609 flush_workqueue(irqfd_cleanup_wq
);
614 * Take note of a change in irq routing.
615 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
617 void kvm_irq_routing_update(struct kvm
*kvm
)
619 struct kvm_kernel_irqfd
*irqfd
;
621 spin_lock_irq(&kvm
->irqfds
.lock
);
623 list_for_each_entry(irqfd
, &kvm
->irqfds
.items
, list
) {
624 irqfd_update(kvm
, irqfd
);
626 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
627 if (irqfd
->producer
) {
628 int ret
= kvm_arch_update_irqfd_routing(
629 irqfd
->kvm
, irqfd
->producer
->irq
,
636 spin_unlock_irq(&kvm
->irqfds
.lock
);
640 * create a host-wide workqueue for issuing deferred shutdown requests
641 * aggregated from all vm* instances. We need our own isolated
642 * queue to ease flushing work items when a VM exits.
644 int kvm_irqfd_init(void)
646 irqfd_cleanup_wq
= alloc_workqueue("kvm-irqfd-cleanup", 0, 0);
647 if (!irqfd_cleanup_wq
)
653 void kvm_irqfd_exit(void)
655 destroy_workqueue(irqfd_cleanup_wq
);
660 * --------------------------------------------------------------------
661 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
663 * userspace can register a PIO/MMIO address with an eventfd for receiving
664 * notification when the memory has been touched.
665 * --------------------------------------------------------------------
669 struct list_head list
;
672 struct eventfd_ctx
*eventfd
;
674 struct kvm_io_device dev
;
679 static inline struct _ioeventfd
*
680 to_ioeventfd(struct kvm_io_device
*dev
)
682 return container_of(dev
, struct _ioeventfd
, dev
);
686 ioeventfd_release(struct _ioeventfd
*p
)
688 eventfd_ctx_put(p
->eventfd
);
694 ioeventfd_in_range(struct _ioeventfd
*p
, gpa_t addr
, int len
, const void *val
)
699 /* address must be precise for a hit */
703 /* length = 0 means only look at the address, so always a hit */
706 if (len
!= p
->length
)
707 /* address-range must be precise for a hit */
711 /* all else equal, wildcard is always a hit */
714 /* otherwise, we have to actually compare the data */
716 BUG_ON(!IS_ALIGNED((unsigned long)val
, len
));
735 return _val
== p
->datamatch
? true : false;
738 /* MMIO/PIO writes trigger an event if the addr/val match */
740 ioeventfd_write(struct kvm_vcpu
*vcpu
, struct kvm_io_device
*this, gpa_t addr
,
741 int len
, const void *val
)
743 struct _ioeventfd
*p
= to_ioeventfd(this);
745 if (!ioeventfd_in_range(p
, addr
, len
, val
))
748 eventfd_signal(p
->eventfd
, 1);
753 * This function is called as KVM is completely shutting down. We do not
754 * need to worry about locking just nuke anything we have as quickly as possible
757 ioeventfd_destructor(struct kvm_io_device
*this)
759 struct _ioeventfd
*p
= to_ioeventfd(this);
761 ioeventfd_release(p
);
764 static const struct kvm_io_device_ops ioeventfd_ops
= {
765 .write
= ioeventfd_write
,
766 .destructor
= ioeventfd_destructor
,
769 /* assumes kvm->slots_lock held */
771 ioeventfd_check_collision(struct kvm
*kvm
, struct _ioeventfd
*p
)
773 struct _ioeventfd
*_p
;
775 list_for_each_entry(_p
, &kvm
->ioeventfds
, list
)
776 if (_p
->bus_idx
== p
->bus_idx
&&
777 _p
->addr
== p
->addr
&&
778 (!_p
->length
|| !p
->length
||
779 (_p
->length
== p
->length
&&
780 (_p
->wildcard
|| p
->wildcard
||
781 _p
->datamatch
== p
->datamatch
))))
787 static enum kvm_bus
ioeventfd_bus_from_flags(__u32 flags
)
789 if (flags
& KVM_IOEVENTFD_FLAG_PIO
)
791 if (flags
& KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY
)
792 return KVM_VIRTIO_CCW_NOTIFY_BUS
;
796 static int kvm_assign_ioeventfd_idx(struct kvm
*kvm
,
797 enum kvm_bus bus_idx
,
798 struct kvm_ioeventfd
*args
)
801 struct eventfd_ctx
*eventfd
;
802 struct _ioeventfd
*p
;
805 eventfd
= eventfd_ctx_fdget(args
->fd
);
807 return PTR_ERR(eventfd
);
809 p
= kzalloc(sizeof(*p
), GFP_KERNEL
);
815 INIT_LIST_HEAD(&p
->list
);
816 p
->addr
= args
->addr
;
817 p
->bus_idx
= bus_idx
;
818 p
->length
= args
->len
;
819 p
->eventfd
= eventfd
;
821 /* The datamatch feature is optional, otherwise this is a wildcard */
822 if (args
->flags
& KVM_IOEVENTFD_FLAG_DATAMATCH
)
823 p
->datamatch
= args
->datamatch
;
827 mutex_lock(&kvm
->slots_lock
);
829 /* Verify that there isn't a match already */
830 if (ioeventfd_check_collision(kvm
, p
)) {
835 kvm_iodevice_init(&p
->dev
, &ioeventfd_ops
);
837 ret
= kvm_io_bus_register_dev(kvm
, bus_idx
, p
->addr
, p
->length
,
842 kvm_get_bus(kvm
, bus_idx
)->ioeventfd_count
++;
843 list_add_tail(&p
->list
, &kvm
->ioeventfds
);
845 mutex_unlock(&kvm
->slots_lock
);
850 mutex_unlock(&kvm
->slots_lock
);
854 eventfd_ctx_put(eventfd
);
860 kvm_deassign_ioeventfd_idx(struct kvm
*kvm
, enum kvm_bus bus_idx
,
861 struct kvm_ioeventfd
*args
)
863 struct _ioeventfd
*p
, *tmp
;
864 struct eventfd_ctx
*eventfd
;
865 struct kvm_io_bus
*bus
;
868 eventfd
= eventfd_ctx_fdget(args
->fd
);
870 return PTR_ERR(eventfd
);
872 mutex_lock(&kvm
->slots_lock
);
874 list_for_each_entry_safe(p
, tmp
, &kvm
->ioeventfds
, list
) {
875 bool wildcard
= !(args
->flags
& KVM_IOEVENTFD_FLAG_DATAMATCH
);
877 if (p
->bus_idx
!= bus_idx
||
878 p
->eventfd
!= eventfd
||
879 p
->addr
!= args
->addr
||
880 p
->length
!= args
->len
||
881 p
->wildcard
!= wildcard
)
884 if (!p
->wildcard
&& p
->datamatch
!= args
->datamatch
)
887 kvm_io_bus_unregister_dev(kvm
, bus_idx
, &p
->dev
);
888 bus
= kvm_get_bus(kvm
, bus_idx
);
890 bus
->ioeventfd_count
--;
891 ioeventfd_release(p
);
896 mutex_unlock(&kvm
->slots_lock
);
898 eventfd_ctx_put(eventfd
);
903 static int kvm_deassign_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
905 enum kvm_bus bus_idx
= ioeventfd_bus_from_flags(args
->flags
);
906 int ret
= kvm_deassign_ioeventfd_idx(kvm
, bus_idx
, args
);
908 if (!args
->len
&& bus_idx
== KVM_MMIO_BUS
)
909 kvm_deassign_ioeventfd_idx(kvm
, KVM_FAST_MMIO_BUS
, args
);
915 kvm_assign_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
917 enum kvm_bus bus_idx
;
920 bus_idx
= ioeventfd_bus_from_flags(args
->flags
);
921 /* must be natural-word sized, or 0 to ignore length */
933 /* check for range overflow */
934 if (args
->addr
+ args
->len
< args
->addr
)
937 /* check for extra flags that we don't understand */
938 if (args
->flags
& ~KVM_IOEVENTFD_VALID_FLAG_MASK
)
941 /* ioeventfd with no length can't be combined with DATAMATCH */
942 if (!args
->len
&& (args
->flags
& KVM_IOEVENTFD_FLAG_DATAMATCH
))
945 ret
= kvm_assign_ioeventfd_idx(kvm
, bus_idx
, args
);
949 /* When length is ignored, MMIO is also put on a separate bus, for
952 if (!args
->len
&& bus_idx
== KVM_MMIO_BUS
) {
953 ret
= kvm_assign_ioeventfd_idx(kvm
, KVM_FAST_MMIO_BUS
, args
);
961 kvm_deassign_ioeventfd_idx(kvm
, bus_idx
, args
);
967 kvm_ioeventfd(struct kvm
*kvm
, struct kvm_ioeventfd
*args
)
969 if (args
->flags
& KVM_IOEVENTFD_FLAG_DEASSIGN
)
970 return kvm_deassign_ioeventfd(kvm
, args
);
972 return kvm_assign_ioeventfd(kvm
, args
);