]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - virt/kvm/eventfd.c
virt: Add virt directory to the top Makefile
[mirror_ubuntu-bionic-kernel.git] / virt / kvm / eventfd.c
CommitLineData
721eecbf
GH
1/*
2 * kvm eventfd support - use eventfd objects to signal various KVM events
3 *
4 * Copyright 2009 Novell. All Rights Reserved.
221d059d 5 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
721eecbf
GH
6 *
7 * Author:
8 * Gregory Haskins <ghaskins@novell.com>
9 *
10 * This file is free software; you can redistribute it and/or modify
11 * it under the terms of version 2 of the GNU General Public License
12 * as published by the Free Software Foundation.
13 *
14 * This program is distributed in the hope that it will be useful,
15 * but WITHOUT ANY WARRANTY; without even the implied warranty of
16 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
17 * GNU General Public License for more details.
18 *
19 * You should have received a copy of the GNU General Public License
20 * along with this program; if not, write to the Free Software Foundation,
21 * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301, USA.
22 */
23
24#include <linux/kvm_host.h>
d34e6b17 25#include <linux/kvm.h>
721eecbf
GH
26#include <linux/workqueue.h>
27#include <linux/syscalls.h>
28#include <linux/wait.h>
29#include <linux/poll.h>
30#include <linux/file.h>
31#include <linux/list.h>
32#include <linux/eventfd.h>
d34e6b17 33#include <linux/kernel.h>
719d93cd 34#include <linux/srcu.h>
5a0e3ad6 35#include <linux/slab.h>
56f89f36 36#include <linux/seqlock.h>
e4d57e1e 37#include <trace/events/kvm.h>
d34e6b17 38
af669ac6 39#include <kvm/iodev.h>
721eecbf 40
297e2105 41#ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf
GH
42/*
43 * --------------------------------------------------------------------
44 * irqfd: Allows an fd to be used to inject an interrupt to the guest
45 *
46 * Credit goes to Avi Kivity for the original idea.
47 * --------------------------------------------------------------------
48 */
49
7a84428a
AW
50/*
51 * Resampling irqfds are a special variety of irqfds used to emulate
52 * level triggered interrupts. The interrupt is asserted on eventfd
53 * trigger. On acknowledgement through the irq ack notifier, the
54 * interrupt is de-asserted and userspace is notified through the
55 * resamplefd. All resamplers on the same gsi are de-asserted
56 * together, so we don't need to track the state of each individual
57 * user. We can also therefore share the same irq source ID.
58 */
59struct _irqfd_resampler {
60 struct kvm *kvm;
61 /*
62 * List of resampling struct _irqfd objects sharing this gsi.
63 * RCU list modified under kvm->irqfds.resampler_lock
64 */
65 struct list_head list;
66 struct kvm_irq_ack_notifier notifier;
67 /*
68 * Entry in list of kvm->irqfd.resampler_list. Use for sharing
69 * resamplers among irqfds on the same gsi.
70 * Accessed and modified under kvm->irqfds.resampler_lock
71 */
72 struct list_head link;
73};
74
721eecbf 75struct _irqfd {
bd2b53b2
MT
76 /* Used for MSI fast-path */
77 struct kvm *kvm;
78 wait_queue_t wait;
79 /* Update side is protected by irqfds.lock */
56f89f36
PM
80 struct kvm_kernel_irq_routing_entry irq_entry;
81 seqcount_t irq_entry_sc;
bd2b53b2
MT
82 /* Used for level IRQ fast-path */
83 int gsi;
84 struct work_struct inject;
7a84428a
AW
85 /* The resampler used by this irqfd (resampler-only) */
86 struct _irqfd_resampler *resampler;
87 /* Eventfd notified on resample (resampler-only) */
88 struct eventfd_ctx *resamplefd;
89 /* Entry in list of irqfds for a resampler (resampler-only) */
90 struct list_head resampler_link;
bd2b53b2
MT
91 /* Used for setup/shutdown */
92 struct eventfd_ctx *eventfd;
93 struct list_head list;
94 poll_table pt;
95 struct work_struct shutdown;
721eecbf
GH
96};
97
98static struct workqueue_struct *irqfd_cleanup_wq;
99
100static void
101irqfd_inject(struct work_struct *work)
102{
103 struct _irqfd *irqfd = container_of(work, struct _irqfd, inject);
104 struct kvm *kvm = irqfd->kvm;
105
7a84428a 106 if (!irqfd->resampler) {
aa2fbe6d
YZ
107 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 1,
108 false);
109 kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, irqfd->gsi, 0,
110 false);
7a84428a
AW
111 } else
112 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d 113 irqfd->gsi, 1, false);
7a84428a
AW
114}
115
116/*
117 * Since resampler irqfds share an IRQ source ID, we de-assert once
118 * then notify all of the resampler irqfds using this GSI. We can't
119 * do multiple de-asserts or we risk racing with incoming re-asserts.
120 */
121static void
122irqfd_resampler_ack(struct kvm_irq_ack_notifier *kian)
123{
124 struct _irqfd_resampler *resampler;
719d93cd 125 struct kvm *kvm;
7a84428a 126 struct _irqfd *irqfd;
719d93cd 127 int idx;
7a84428a
AW
128
129 resampler = container_of(kian, struct _irqfd_resampler, notifier);
719d93cd 130 kvm = resampler->kvm;
7a84428a 131
719d93cd 132 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d 133 resampler->notifier.gsi, 0, false);
7a84428a 134
719d93cd 135 idx = srcu_read_lock(&kvm->irq_srcu);
7a84428a
AW
136
137 list_for_each_entry_rcu(irqfd, &resampler->list, resampler_link)
138 eventfd_signal(irqfd->resamplefd, 1);
139
719d93cd 140 srcu_read_unlock(&kvm->irq_srcu, idx);
7a84428a
AW
141}
142
143static void
144irqfd_resampler_shutdown(struct _irqfd *irqfd)
145{
146 struct _irqfd_resampler *resampler = irqfd->resampler;
147 struct kvm *kvm = resampler->kvm;
148
149 mutex_lock(&kvm->irqfds.resampler_lock);
150
151 list_del_rcu(&irqfd->resampler_link);
719d93cd 152 synchronize_srcu(&kvm->irq_srcu);
7a84428a
AW
153
154 if (list_empty(&resampler->list)) {
155 list_del(&resampler->link);
156 kvm_unregister_irq_ack_notifier(kvm, &resampler->notifier);
157 kvm_set_irq(kvm, KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID,
aa2fbe6d 158 resampler->notifier.gsi, 0, false);
7a84428a
AW
159 kfree(resampler);
160 }
161
162 mutex_unlock(&kvm->irqfds.resampler_lock);
721eecbf
GH
163}
164
165/*
166 * Race-free decouple logic (ordering is critical)
167 */
168static void
169irqfd_shutdown(struct work_struct *work)
170{
171 struct _irqfd *irqfd = container_of(work, struct _irqfd, shutdown);
b6a114d2 172 u64 cnt;
721eecbf
GH
173
174 /*
175 * Synchronize with the wait-queue and unhook ourselves to prevent
176 * further events.
177 */
b6a114d2 178 eventfd_ctx_remove_wait_queue(irqfd->eventfd, &irqfd->wait, &cnt);
721eecbf
GH
179
180 /*
181 * We know no new events will be scheduled at this point, so block
182 * until all previously outstanding events have completed
183 */
43829731 184 flush_work(&irqfd->inject);
721eecbf 185
7a84428a
AW
186 if (irqfd->resampler) {
187 irqfd_resampler_shutdown(irqfd);
188 eventfd_ctx_put(irqfd->resamplefd);
189 }
190
721eecbf
GH
191 /*
192 * It is now safe to release the object's resources
193 */
194 eventfd_ctx_put(irqfd->eventfd);
195 kfree(irqfd);
196}
197
198
199/* assumes kvm->irqfds.lock is held */
200static bool
201irqfd_is_active(struct _irqfd *irqfd)
202{
203 return list_empty(&irqfd->list) ? false : true;
204}
205
206/*
207 * Mark the irqfd as inactive and schedule it for removal
208 *
209 * assumes kvm->irqfds.lock is held
210 */
211static void
212irqfd_deactivate(struct _irqfd *irqfd)
213{
214 BUG_ON(!irqfd_is_active(irqfd));
215
216 list_del_init(&irqfd->list);
217
218 queue_work(irqfd_cleanup_wq, &irqfd->shutdown);
219}
220
221/*
222 * Called with wqh->lock held and interrupts disabled
223 */
224static int
225irqfd_wakeup(wait_queue_t *wait, unsigned mode, int sync, void *key)
226{
227 struct _irqfd *irqfd = container_of(wait, struct _irqfd, wait);
228 unsigned long flags = (unsigned long)key;
56f89f36 229 struct kvm_kernel_irq_routing_entry irq;
bd2b53b2 230 struct kvm *kvm = irqfd->kvm;
56f89f36 231 unsigned seq;
719d93cd 232 int idx;
721eecbf 233
bd2b53b2 234 if (flags & POLLIN) {
719d93cd 235 idx = srcu_read_lock(&kvm->irq_srcu);
56f89f36
PM
236 do {
237 seq = read_seqcount_begin(&irqfd->irq_entry_sc);
238 irq = irqfd->irq_entry;
239 } while (read_seqcount_retry(&irqfd->irq_entry_sc, seq));
721eecbf 240 /* An event has been signaled, inject an interrupt */
56f89f36
PM
241 if (irq.type == KVM_IRQ_ROUTING_MSI)
242 kvm_set_msi(&irq, kvm, KVM_USERSPACE_IRQ_SOURCE_ID, 1,
aa2fbe6d 243 false);
bd2b53b2
MT
244 else
245 schedule_work(&irqfd->inject);
719d93cd 246 srcu_read_unlock(&kvm->irq_srcu, idx);
bd2b53b2 247 }
721eecbf
GH
248
249 if (flags & POLLHUP) {
250 /* The eventfd is closing, detach from KVM */
721eecbf
GH
251 unsigned long flags;
252
253 spin_lock_irqsave(&kvm->irqfds.lock, flags);
254
255 /*
256 * We must check if someone deactivated the irqfd before
257 * we could acquire the irqfds.lock since the item is
258 * deactivated from the KVM side before it is unhooked from
259 * the wait-queue. If it is already deactivated, we can
260 * simply return knowing the other side will cleanup for us.
261 * We cannot race against the irqfd going away since the
262 * other side is required to acquire wqh->lock, which we hold
263 */
264 if (irqfd_is_active(irqfd))
265 irqfd_deactivate(irqfd);
266
267 spin_unlock_irqrestore(&kvm->irqfds.lock, flags);
268 }
269
270 return 0;
271}
272
273static void
274irqfd_ptable_queue_proc(struct file *file, wait_queue_head_t *wqh,
275 poll_table *pt)
276{
277 struct _irqfd *irqfd = container_of(pt, struct _irqfd, pt);
721eecbf
GH
278 add_wait_queue(wqh, &irqfd->wait);
279}
280
bd2b53b2 281/* Must be called under irqfds.lock */
9957c86d 282static void irqfd_update(struct kvm *kvm, struct _irqfd *irqfd)
bd2b53b2
MT
283{
284 struct kvm_kernel_irq_routing_entry *e;
8ba918d4
PM
285 struct kvm_kernel_irq_routing_entry entries[KVM_NR_IRQCHIPS];
286 int i, n_entries;
287
9957c86d 288 n_entries = kvm_irq_map_gsi(kvm, entries, irqfd->gsi);
bd2b53b2 289
56f89f36
PM
290 write_seqcount_begin(&irqfd->irq_entry_sc);
291
292 irqfd->irq_entry.type = 0;
bd2b53b2 293
8ba918d4
PM
294 e = entries;
295 for (i = 0; i < n_entries; ++i, ++e) {
bd2b53b2
MT
296 /* Only fast-path MSI. */
297 if (e->type == KVM_IRQ_ROUTING_MSI)
56f89f36 298 irqfd->irq_entry = *e;
bd2b53b2 299 }
56f89f36 300
56f89f36 301 write_seqcount_end(&irqfd->irq_entry_sc);
bd2b53b2
MT
302}
303
721eecbf 304static int
d4db2935 305kvm_irqfd_assign(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf 306{
f1d1c309 307 struct _irqfd *irqfd, *tmp;
cffe78d9 308 struct fd f;
7a84428a 309 struct eventfd_ctx *eventfd = NULL, *resamplefd = NULL;
721eecbf
GH
310 int ret;
311 unsigned int events;
9957c86d 312 int idx;
721eecbf 313
01c94e64
EA
314 if (!kvm_arch_intc_initialized(kvm))
315 return -EAGAIN;
316
721eecbf
GH
317 irqfd = kzalloc(sizeof(*irqfd), GFP_KERNEL);
318 if (!irqfd)
319 return -ENOMEM;
320
321 irqfd->kvm = kvm;
d4db2935 322 irqfd->gsi = args->gsi;
721eecbf
GH
323 INIT_LIST_HEAD(&irqfd->list);
324 INIT_WORK(&irqfd->inject, irqfd_inject);
325 INIT_WORK(&irqfd->shutdown, irqfd_shutdown);
56f89f36 326 seqcount_init(&irqfd->irq_entry_sc);
721eecbf 327
cffe78d9
AV
328 f = fdget(args->fd);
329 if (!f.file) {
330 ret = -EBADF;
331 goto out;
721eecbf
GH
332 }
333
cffe78d9 334 eventfd = eventfd_ctx_fileget(f.file);
721eecbf
GH
335 if (IS_ERR(eventfd)) {
336 ret = PTR_ERR(eventfd);
337 goto fail;
338 }
339
340 irqfd->eventfd = eventfd;
341
7a84428a
AW
342 if (args->flags & KVM_IRQFD_FLAG_RESAMPLE) {
343 struct _irqfd_resampler *resampler;
344
345 resamplefd = eventfd_ctx_fdget(args->resamplefd);
346 if (IS_ERR(resamplefd)) {
347 ret = PTR_ERR(resamplefd);
348 goto fail;
349 }
350
351 irqfd->resamplefd = resamplefd;
352 INIT_LIST_HEAD(&irqfd->resampler_link);
353
354 mutex_lock(&kvm->irqfds.resampler_lock);
355
356 list_for_each_entry(resampler,
49f8a1a5 357 &kvm->irqfds.resampler_list, link) {
7a84428a
AW
358 if (resampler->notifier.gsi == irqfd->gsi) {
359 irqfd->resampler = resampler;
360 break;
361 }
362 }
363
364 if (!irqfd->resampler) {
365 resampler = kzalloc(sizeof(*resampler), GFP_KERNEL);
366 if (!resampler) {
367 ret = -ENOMEM;
368 mutex_unlock(&kvm->irqfds.resampler_lock);
369 goto fail;
370 }
371
372 resampler->kvm = kvm;
373 INIT_LIST_HEAD(&resampler->list);
374 resampler->notifier.gsi = irqfd->gsi;
375 resampler->notifier.irq_acked = irqfd_resampler_ack;
376 INIT_LIST_HEAD(&resampler->link);
377
378 list_add(&resampler->link, &kvm->irqfds.resampler_list);
379 kvm_register_irq_ack_notifier(kvm,
380 &resampler->notifier);
381 irqfd->resampler = resampler;
382 }
383
384 list_add_rcu(&irqfd->resampler_link, &irqfd->resampler->list);
719d93cd 385 synchronize_srcu(&kvm->irq_srcu);
7a84428a
AW
386
387 mutex_unlock(&kvm->irqfds.resampler_lock);
388 }
389
721eecbf
GH
390 /*
391 * Install our own custom wake-up handling so we are notified via
392 * a callback whenever someone signals the underlying eventfd
393 */
394 init_waitqueue_func_entry(&irqfd->wait, irqfd_wakeup);
395 init_poll_funcptr(&irqfd->pt, irqfd_ptable_queue_proc);
396
f1d1c309
MT
397 spin_lock_irq(&kvm->irqfds.lock);
398
399 ret = 0;
400 list_for_each_entry(tmp, &kvm->irqfds.items, list) {
401 if (irqfd->eventfd != tmp->eventfd)
402 continue;
403 /* This fd is used for another irq already. */
404 ret = -EBUSY;
405 spin_unlock_irq(&kvm->irqfds.lock);
406 goto fail;
407 }
408
9957c86d
PM
409 idx = srcu_read_lock(&kvm->irq_srcu);
410 irqfd_update(kvm, irqfd);
411 srcu_read_unlock(&kvm->irq_srcu, idx);
bd2b53b2 412
721eecbf 413 list_add_tail(&irqfd->list, &kvm->irqfds.items);
721eecbf 414
684a0b71
CH
415 spin_unlock_irq(&kvm->irqfds.lock);
416
721eecbf
GH
417 /*
418 * Check if there was an event already pending on the eventfd
419 * before we registered, and trigger it as if we didn't miss it.
420 */
684a0b71
CH
421 events = f.file->f_op->poll(f.file, &irqfd->pt);
422
721eecbf
GH
423 if (events & POLLIN)
424 schedule_work(&irqfd->inject);
425
426 /*
427 * do not drop the file until the irqfd is fully initialized, otherwise
428 * we might race against the POLLHUP
429 */
cffe78d9 430 fdput(f);
721eecbf
GH
431
432 return 0;
433
434fail:
7a84428a
AW
435 if (irqfd->resampler)
436 irqfd_resampler_shutdown(irqfd);
437
438 if (resamplefd && !IS_ERR(resamplefd))
439 eventfd_ctx_put(resamplefd);
440
721eecbf
GH
441 if (eventfd && !IS_ERR(eventfd))
442 eventfd_ctx_put(eventfd);
443
cffe78d9 444 fdput(f);
721eecbf 445
cffe78d9 446out:
721eecbf
GH
447 kfree(irqfd);
448 return ret;
449}
c77dcacb
PB
450
451bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin)
452{
453 struct kvm_irq_ack_notifier *kian;
454 int gsi, idx;
455
456 idx = srcu_read_lock(&kvm->irq_srcu);
457 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
458 if (gsi != -1)
459 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
460 link)
461 if (kian->gsi == gsi) {
462 srcu_read_unlock(&kvm->irq_srcu, idx);
463 return true;
464 }
465
466 srcu_read_unlock(&kvm->irq_srcu, idx);
467
468 return false;
469}
470EXPORT_SYMBOL_GPL(kvm_irq_has_notifier);
471
472void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin)
473{
474 struct kvm_irq_ack_notifier *kian;
475 int gsi, idx;
476
477 trace_kvm_ack_irq(irqchip, pin);
478
479 idx = srcu_read_lock(&kvm->irq_srcu);
480 gsi = kvm_irq_map_chip_pin(kvm, irqchip, pin);
481 if (gsi != -1)
482 hlist_for_each_entry_rcu(kian, &kvm->irq_ack_notifier_list,
483 link)
484 if (kian->gsi == gsi)
485 kian->irq_acked(kian);
486 srcu_read_unlock(&kvm->irq_srcu, idx);
487}
488
489void kvm_register_irq_ack_notifier(struct kvm *kvm,
490 struct kvm_irq_ack_notifier *kian)
491{
492 mutex_lock(&kvm->irq_lock);
493 hlist_add_head_rcu(&kian->link, &kvm->irq_ack_notifier_list);
494 mutex_unlock(&kvm->irq_lock);
c77dcacb 495 kvm_vcpu_request_scan_ioapic(kvm);
c77dcacb
PB
496}
497
498void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
499 struct kvm_irq_ack_notifier *kian)
500{
501 mutex_lock(&kvm->irq_lock);
502 hlist_del_init_rcu(&kian->link);
503 mutex_unlock(&kvm->irq_lock);
504 synchronize_srcu(&kvm->irq_srcu);
c77dcacb 505 kvm_vcpu_request_scan_ioapic(kvm);
c77dcacb 506}
914daba8 507#endif
721eecbf
GH
508
509void
d34e6b17 510kvm_eventfd_init(struct kvm *kvm)
721eecbf 511{
297e2105 512#ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf
GH
513 spin_lock_init(&kvm->irqfds.lock);
514 INIT_LIST_HEAD(&kvm->irqfds.items);
7a84428a
AW
515 INIT_LIST_HEAD(&kvm->irqfds.resampler_list);
516 mutex_init(&kvm->irqfds.resampler_lock);
914daba8 517#endif
d34e6b17 518 INIT_LIST_HEAD(&kvm->ioeventfds);
721eecbf
GH
519}
520
297e2105 521#ifdef CONFIG_HAVE_KVM_IRQFD
721eecbf
GH
522/*
523 * shutdown any irqfd's that match fd+gsi
524 */
525static int
d4db2935 526kvm_irqfd_deassign(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf
GH
527{
528 struct _irqfd *irqfd, *tmp;
529 struct eventfd_ctx *eventfd;
530
d4db2935 531 eventfd = eventfd_ctx_fdget(args->fd);
721eecbf
GH
532 if (IS_ERR(eventfd))
533 return PTR_ERR(eventfd);
534
535 spin_lock_irq(&kvm->irqfds.lock);
536
537 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list) {
d4db2935 538 if (irqfd->eventfd == eventfd && irqfd->gsi == args->gsi) {
bd2b53b2 539 /*
56f89f36 540 * This clearing of irq_entry.type is needed for when
c8ce057e
MT
541 * another thread calls kvm_irq_routing_update before
542 * we flush workqueue below (we synchronize with
543 * kvm_irq_routing_update using irqfds.lock).
bd2b53b2 544 */
56f89f36
PM
545 write_seqcount_begin(&irqfd->irq_entry_sc);
546 irqfd->irq_entry.type = 0;
547 write_seqcount_end(&irqfd->irq_entry_sc);
721eecbf 548 irqfd_deactivate(irqfd);
bd2b53b2 549 }
721eecbf
GH
550 }
551
552 spin_unlock_irq(&kvm->irqfds.lock);
553 eventfd_ctx_put(eventfd);
554
555 /*
556 * Block until we know all outstanding shutdown jobs have completed
557 * so that we guarantee there will not be any more interrupts on this
558 * gsi once this deassign function returns.
559 */
560 flush_workqueue(irqfd_cleanup_wq);
561
562 return 0;
563}
564
565int
d4db2935 566kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
721eecbf 567{
7a84428a 568 if (args->flags & ~(KVM_IRQFD_FLAG_DEASSIGN | KVM_IRQFD_FLAG_RESAMPLE))
326cf033
AW
569 return -EINVAL;
570
d4db2935
AW
571 if (args->flags & KVM_IRQFD_FLAG_DEASSIGN)
572 return kvm_irqfd_deassign(kvm, args);
721eecbf 573
d4db2935 574 return kvm_irqfd_assign(kvm, args);
721eecbf
GH
575}
576
577/*
578 * This function is called as the kvm VM fd is being released. Shutdown all
579 * irqfds that still remain open
580 */
581void
582kvm_irqfd_release(struct kvm *kvm)
583{
584 struct _irqfd *irqfd, *tmp;
585
586 spin_lock_irq(&kvm->irqfds.lock);
587
588 list_for_each_entry_safe(irqfd, tmp, &kvm->irqfds.items, list)
589 irqfd_deactivate(irqfd);
590
591 spin_unlock_irq(&kvm->irqfds.lock);
592
593 /*
594 * Block until we know all outstanding shutdown jobs have completed
595 * since we do not take a kvm* reference.
596 */
597 flush_workqueue(irqfd_cleanup_wq);
598
599}
600
bd2b53b2 601/*
9957c86d 602 * Take note of a change in irq routing.
719d93cd 603 * Caller must invoke synchronize_srcu(&kvm->irq_srcu) afterwards.
bd2b53b2 604 */
9957c86d 605void kvm_irq_routing_update(struct kvm *kvm)
bd2b53b2
MT
606{
607 struct _irqfd *irqfd;
608
609 spin_lock_irq(&kvm->irqfds.lock);
610
bd2b53b2 611 list_for_each_entry(irqfd, &kvm->irqfds.items, list)
9957c86d 612 irqfd_update(kvm, irqfd);
bd2b53b2
MT
613
614 spin_unlock_irq(&kvm->irqfds.lock);
615}
616
721eecbf
GH
617/*
618 * create a host-wide workqueue for issuing deferred shutdown requests
619 * aggregated from all vm* instances. We need our own isolated single-thread
620 * queue to prevent deadlock against flushing the normal work-queue.
621 */
a0f155e9 622int kvm_irqfd_init(void)
721eecbf
GH
623{
624 irqfd_cleanup_wq = create_singlethread_workqueue("kvm-irqfd-cleanup");
625 if (!irqfd_cleanup_wq)
626 return -ENOMEM;
627
628 return 0;
629}
630
a0f155e9 631void kvm_irqfd_exit(void)
721eecbf
GH
632{
633 destroy_workqueue(irqfd_cleanup_wq);
634}
914daba8 635#endif
d34e6b17
GH
636
637/*
638 * --------------------------------------------------------------------
639 * ioeventfd: translate a PIO/MMIO memory write to an eventfd signal.
640 *
641 * userspace can register a PIO/MMIO address with an eventfd for receiving
642 * notification when the memory has been touched.
643 * --------------------------------------------------------------------
644 */
645
646struct _ioeventfd {
647 struct list_head list;
648 u64 addr;
649 int length;
650 struct eventfd_ctx *eventfd;
651 u64 datamatch;
652 struct kvm_io_device dev;
05e07f9b 653 u8 bus_idx;
d34e6b17
GH
654 bool wildcard;
655};
656
657static inline struct _ioeventfd *
658to_ioeventfd(struct kvm_io_device *dev)
659{
660 return container_of(dev, struct _ioeventfd, dev);
661}
662
663static void
664ioeventfd_release(struct _ioeventfd *p)
665{
666 eventfd_ctx_put(p->eventfd);
667 list_del(&p->list);
668 kfree(p);
669}
670
671static bool
672ioeventfd_in_range(struct _ioeventfd *p, gpa_t addr, int len, const void *val)
673{
674 u64 _val;
675
f848a5a8
MT
676 if (addr != p->addr)
677 /* address must be precise for a hit */
678 return false;
679
680 if (!p->length)
681 /* length = 0 means only look at the address, so always a hit */
682 return true;
683
684 if (len != p->length)
d34e6b17
GH
685 /* address-range must be precise for a hit */
686 return false;
687
688 if (p->wildcard)
689 /* all else equal, wildcard is always a hit */
690 return true;
691
692 /* otherwise, we have to actually compare the data */
693
694 BUG_ON(!IS_ALIGNED((unsigned long)val, len));
695
696 switch (len) {
697 case 1:
698 _val = *(u8 *)val;
699 break;
700 case 2:
701 _val = *(u16 *)val;
702 break;
703 case 4:
704 _val = *(u32 *)val;
705 break;
706 case 8:
707 _val = *(u64 *)val;
708 break;
709 default:
710 return false;
711 }
712
713 return _val == p->datamatch ? true : false;
714}
715
716/* MMIO/PIO writes trigger an event if the addr/val match */
717static int
e32edf4f
NN
718ioeventfd_write(struct kvm_vcpu *vcpu, struct kvm_io_device *this, gpa_t addr,
719 int len, const void *val)
d34e6b17
GH
720{
721 struct _ioeventfd *p = to_ioeventfd(this);
722
723 if (!ioeventfd_in_range(p, addr, len, val))
724 return -EOPNOTSUPP;
725
726 eventfd_signal(p->eventfd, 1);
727 return 0;
728}
729
730/*
731 * This function is called as KVM is completely shutting down. We do not
732 * need to worry about locking just nuke anything we have as quickly as possible
733 */
734static void
735ioeventfd_destructor(struct kvm_io_device *this)
736{
737 struct _ioeventfd *p = to_ioeventfd(this);
738
739 ioeventfd_release(p);
740}
741
742static const struct kvm_io_device_ops ioeventfd_ops = {
743 .write = ioeventfd_write,
744 .destructor = ioeventfd_destructor,
745};
746
747/* assumes kvm->slots_lock held */
748static bool
749ioeventfd_check_collision(struct kvm *kvm, struct _ioeventfd *p)
750{
751 struct _ioeventfd *_p;
752
753 list_for_each_entry(_p, &kvm->ioeventfds, list)
05e07f9b 754 if (_p->bus_idx == p->bus_idx &&
f848a5a8
MT
755 _p->addr == p->addr &&
756 (!_p->length || !p->length ||
757 (_p->length == p->length &&
758 (_p->wildcard || p->wildcard ||
759 _p->datamatch == p->datamatch))))
d34e6b17
GH
760 return true;
761
762 return false;
763}
764
2b83451b
CH
765static enum kvm_bus ioeventfd_bus_from_flags(__u32 flags)
766{
767 if (flags & KVM_IOEVENTFD_FLAG_PIO)
768 return KVM_PIO_BUS;
769 if (flags & KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY)
770 return KVM_VIRTIO_CCW_NOTIFY_BUS;
771 return KVM_MMIO_BUS;
772}
773
85da11ca
JW
774static int kvm_assign_ioeventfd_idx(struct kvm *kvm,
775 enum kvm_bus bus_idx,
776 struct kvm_ioeventfd *args)
d34e6b17 777{
d34e6b17 778
85da11ca
JW
779 struct eventfd_ctx *eventfd;
780 struct _ioeventfd *p;
781 int ret;
f848a5a8 782
d34e6b17
GH
783 eventfd = eventfd_ctx_fdget(args->fd);
784 if (IS_ERR(eventfd))
785 return PTR_ERR(eventfd);
786
787 p = kzalloc(sizeof(*p), GFP_KERNEL);
788 if (!p) {
789 ret = -ENOMEM;
790 goto fail;
791 }
792
793 INIT_LIST_HEAD(&p->list);
794 p->addr = args->addr;
05e07f9b 795 p->bus_idx = bus_idx;
d34e6b17
GH
796 p->length = args->len;
797 p->eventfd = eventfd;
798
799 /* The datamatch feature is optional, otherwise this is a wildcard */
800 if (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH)
801 p->datamatch = args->datamatch;
802 else
803 p->wildcard = true;
804
79fac95e 805 mutex_lock(&kvm->slots_lock);
d34e6b17 806
25985edc 807 /* Verify that there isn't a match already */
d34e6b17
GH
808 if (ioeventfd_check_collision(kvm, p)) {
809 ret = -EEXIST;
810 goto unlock_fail;
811 }
812
813 kvm_iodevice_init(&p->dev, &ioeventfd_ops);
814
743eeb0b
SL
815 ret = kvm_io_bus_register_dev(kvm, bus_idx, p->addr, p->length,
816 &p->dev);
d34e6b17
GH
817 if (ret < 0)
818 goto unlock_fail;
819
6ea34c9b 820 kvm->buses[bus_idx]->ioeventfd_count++;
d34e6b17
GH
821 list_add_tail(&p->list, &kvm->ioeventfds);
822
79fac95e 823 mutex_unlock(&kvm->slots_lock);
d34e6b17
GH
824
825 return 0;
826
827unlock_fail:
79fac95e 828 mutex_unlock(&kvm->slots_lock);
d34e6b17
GH
829
830fail:
831 kfree(p);
832 eventfd_ctx_put(eventfd);
833
834 return ret;
835}
836
837static int
85da11ca
JW
838kvm_deassign_ioeventfd_idx(struct kvm *kvm, enum kvm_bus bus_idx,
839 struct kvm_ioeventfd *args)
d34e6b17 840{
d34e6b17
GH
841 struct _ioeventfd *p, *tmp;
842 struct eventfd_ctx *eventfd;
843 int ret = -ENOENT;
844
845 eventfd = eventfd_ctx_fdget(args->fd);
846 if (IS_ERR(eventfd))
847 return PTR_ERR(eventfd);
848
79fac95e 849 mutex_lock(&kvm->slots_lock);
d34e6b17
GH
850
851 list_for_each_entry_safe(p, tmp, &kvm->ioeventfds, list) {
852 bool wildcard = !(args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH);
853
05e07f9b
MT
854 if (p->bus_idx != bus_idx ||
855 p->eventfd != eventfd ||
d34e6b17
GH
856 p->addr != args->addr ||
857 p->length != args->len ||
858 p->wildcard != wildcard)
859 continue;
860
861 if (!p->wildcard && p->datamatch != args->datamatch)
862 continue;
863
e93f8a0f 864 kvm_io_bus_unregister_dev(kvm, bus_idx, &p->dev);
6ea34c9b 865 kvm->buses[bus_idx]->ioeventfd_count--;
d34e6b17
GH
866 ioeventfd_release(p);
867 ret = 0;
868 break;
869 }
870
79fac95e 871 mutex_unlock(&kvm->slots_lock);
d34e6b17
GH
872
873 eventfd_ctx_put(eventfd);
874
875 return ret;
876}
877
85da11ca
JW
878static int kvm_deassign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
879{
880 enum kvm_bus bus_idx = ioeventfd_bus_from_flags(args->flags);
eefd6b06
JW
881 int ret = kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
882
883 if (!args->len && bus_idx == KVM_MMIO_BUS)
884 kvm_deassign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
85da11ca 885
eefd6b06 886 return ret;
85da11ca
JW
887}
888
889static int
890kvm_assign_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
891{
892 enum kvm_bus bus_idx;
eefd6b06 893 int ret;
85da11ca
JW
894
895 bus_idx = ioeventfd_bus_from_flags(args->flags);
896 /* must be natural-word sized, or 0 to ignore length */
897 switch (args->len) {
898 case 0:
899 case 1:
900 case 2:
901 case 4:
902 case 8:
903 break;
904 default:
905 return -EINVAL;
906 }
907
908 /* check for range overflow */
909 if (args->addr + args->len < args->addr)
910 return -EINVAL;
911
912 /* check for extra flags that we don't understand */
913 if (args->flags & ~KVM_IOEVENTFD_VALID_FLAG_MASK)
914 return -EINVAL;
915
916 /* ioeventfd with no length can't be combined with DATAMATCH */
e9ea5069 917 if (!args->len && (args->flags & KVM_IOEVENTFD_FLAG_DATAMATCH))
85da11ca
JW
918 return -EINVAL;
919
eefd6b06
JW
920 ret = kvm_assign_ioeventfd_idx(kvm, bus_idx, args);
921 if (ret)
922 goto fail;
923
924 /* When length is ignored, MMIO is also put on a separate bus, for
925 * faster lookups.
926 */
927 if (!args->len && bus_idx == KVM_MMIO_BUS) {
928 ret = kvm_assign_ioeventfd_idx(kvm, KVM_FAST_MMIO_BUS, args);
929 if (ret < 0)
930 goto fast_fail;
931 }
932
933 return 0;
934
935fast_fail:
936 kvm_deassign_ioeventfd_idx(kvm, bus_idx, args);
937fail:
938 return ret;
85da11ca
JW
939}
940
d34e6b17
GH
941int
942kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
943{
944 if (args->flags & KVM_IOEVENTFD_FLAG_DEASSIGN)
945 return kvm_deassign_ioeventfd(kvm, args);
946
947 return kvm_assign_ioeventfd(kvm, args);
948}