]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - include/linux/kvm_host.h
ipv6: initialize route null entry in addrconf_init()
[mirror_ubuntu-artful-kernel.git] / include / linux / kvm_host.h
1 #ifndef __KVM_HOST_H
2 #define __KVM_HOST_H
3
4 /*
5 * This work is licensed under the terms of the GNU GPL, version 2. See
6 * the COPYING file in the top-level directory.
7 */
8
9 #include <linux/types.h>
10 #include <linux/hardirq.h>
11 #include <linux/list.h>
12 #include <linux/mutex.h>
13 #include <linux/spinlock.h>
14 #include <linux/signal.h>
15 #include <linux/sched.h>
16 #include <linux/bug.h>
17 #include <linux/mm.h>
18 #include <linux/mmu_notifier.h>
19 #include <linux/preempt.h>
20 #include <linux/msi.h>
21 #include <linux/slab.h>
22 #include <linux/rcupdate.h>
23 #include <linux/ratelimit.h>
24 #include <linux/err.h>
25 #include <linux/irqflags.h>
26 #include <linux/context_tracking.h>
27 #include <linux/irqbypass.h>
28 #include <asm/signal.h>
29
30 #include <linux/kvm.h>
31 #include <linux/kvm_para.h>
32
33 #include <linux/kvm_types.h>
34
35 #include <asm/kvm_host.h>
36
37 /*
38 * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used
39 * in kvm, other bits are visible for userspace which are defined in
40 * include/linux/kvm_h.
41 */
42 #define KVM_MEMSLOT_INVALID (1UL << 16)
43 #define KVM_MEMSLOT_INCOHERENT (1UL << 17)
44
45 /* Two fragments for cross MMIO pages. */
46 #define KVM_MAX_MMIO_FRAGMENTS 2
47
48 #ifndef KVM_ADDRESS_SPACE_NUM
49 #define KVM_ADDRESS_SPACE_NUM 1
50 #endif
51
52 /*
53 * For the normal pfn, the highest 12 bits should be zero,
54 * so we can mask bit 62 ~ bit 52 to indicate the error pfn,
55 * mask bit 63 to indicate the noslot pfn.
56 */
57 #define KVM_PFN_ERR_MASK (0x7ffULL << 52)
58 #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52)
59 #define KVM_PFN_NOSLOT (0x1ULL << 63)
60
61 #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK)
62 #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1)
63 #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2)
64
65 /*
66 * error pfns indicate that the gfn is in slot but faild to
67 * translate it to pfn on host.
68 */
69 static inline bool is_error_pfn(pfn_t pfn)
70 {
71 return !!(pfn & KVM_PFN_ERR_MASK);
72 }
73
74 /*
75 * error_noslot pfns indicate that the gfn can not be
76 * translated to pfn - it is not in slot or failed to
77 * translate it to pfn.
78 */
79 static inline bool is_error_noslot_pfn(pfn_t pfn)
80 {
81 return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK);
82 }
83
84 /* noslot pfn indicates that the gfn is not in slot. */
85 static inline bool is_noslot_pfn(pfn_t pfn)
86 {
87 return pfn == KVM_PFN_NOSLOT;
88 }
89
90 /*
91 * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390)
92 * provide own defines and kvm_is_error_hva
93 */
94 #ifndef KVM_HVA_ERR_BAD
95
96 #define KVM_HVA_ERR_BAD (PAGE_OFFSET)
97 #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE)
98
99 static inline bool kvm_is_error_hva(unsigned long addr)
100 {
101 return addr >= PAGE_OFFSET;
102 }
103
104 #endif
105
106 #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT))
107
108 static inline bool is_error_page(struct page *page)
109 {
110 return IS_ERR(page);
111 }
112
113 /*
114 * vcpu->requests bit members
115 */
116 #define KVM_REQ_TLB_FLUSH 0
117 #define KVM_REQ_MIGRATE_TIMER 1
118 #define KVM_REQ_REPORT_TPR_ACCESS 2
119 #define KVM_REQ_MMU_RELOAD 3
120 #define KVM_REQ_TRIPLE_FAULT 4
121 #define KVM_REQ_PENDING_TIMER 5
122 #define KVM_REQ_UNHALT 6
123 #define KVM_REQ_MMU_SYNC 7
124 #define KVM_REQ_CLOCK_UPDATE 8
125 #define KVM_REQ_KICK 9
126 #define KVM_REQ_DEACTIVATE_FPU 10
127 #define KVM_REQ_EVENT 11
128 #define KVM_REQ_APF_HALT 12
129 #define KVM_REQ_STEAL_UPDATE 13
130 #define KVM_REQ_NMI 14
131 #define KVM_REQ_PMU 15
132 #define KVM_REQ_PMI 16
133 #define KVM_REQ_WATCHDOG 17
134 #define KVM_REQ_MASTERCLOCK_UPDATE 18
135 #define KVM_REQ_MCLOCK_INPROGRESS 19
136 #define KVM_REQ_EPR_EXIT 20
137 #define KVM_REQ_SCAN_IOAPIC 21
138 #define KVM_REQ_GLOBAL_CLOCK_UPDATE 22
139 #define KVM_REQ_ENABLE_IBS 23
140 #define KVM_REQ_DISABLE_IBS 24
141 #define KVM_REQ_APIC_PAGE_RELOAD 25
142 #define KVM_REQ_SMI 26
143 #define KVM_REQ_HV_CRASH 27
144 #define KVM_REQ_IOAPIC_EOI_EXIT 28
145 #define KVM_REQ_HV_RESET 29
146 #define KVM_REQ_HV_EXIT 30
147 #define KVM_REQ_HV_STIMER 31
148
149 #define KVM_USERSPACE_IRQ_SOURCE_ID 0
150 #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
151
152 extern struct kmem_cache *kvm_vcpu_cache;
153
154 extern spinlock_t kvm_lock;
155 extern struct list_head vm_list;
156
157 struct kvm_io_range {
158 gpa_t addr;
159 int len;
160 struct kvm_io_device *dev;
161 };
162
163 #define NR_IOBUS_DEVS 1000
164
165 struct kvm_io_bus {
166 int dev_count;
167 int ioeventfd_count;
168 struct kvm_io_range range[];
169 };
170
171 enum kvm_bus {
172 KVM_MMIO_BUS,
173 KVM_PIO_BUS,
174 KVM_VIRTIO_CCW_NOTIFY_BUS,
175 KVM_FAST_MMIO_BUS,
176 KVM_NR_BUSES
177 };
178
179 int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
180 int len, const void *val);
181 int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx,
182 gpa_t addr, int len, const void *val, long cookie);
183 int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr,
184 int len, void *val);
185 int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr,
186 int len, struct kvm_io_device *dev);
187 void kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx,
188 struct kvm_io_device *dev);
189
190 #ifdef CONFIG_KVM_ASYNC_PF
191 struct kvm_async_pf {
192 struct work_struct work;
193 struct list_head link;
194 struct list_head queue;
195 struct kvm_vcpu *vcpu;
196 struct mm_struct *mm;
197 gva_t gva;
198 unsigned long addr;
199 struct kvm_arch_async_pf arch;
200 bool wakeup_all;
201 };
202
203 void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu);
204 void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu);
205 int kvm_setup_async_pf(struct kvm_vcpu *vcpu, gva_t gva, unsigned long hva,
206 struct kvm_arch_async_pf *arch);
207 int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu);
208 #endif
209
210 enum {
211 OUTSIDE_GUEST_MODE,
212 IN_GUEST_MODE,
213 EXITING_GUEST_MODE,
214 READING_SHADOW_PAGE_TABLES,
215 };
216
217 /*
218 * Sometimes a large or cross-page mmio needs to be broken up into separate
219 * exits for userspace servicing.
220 */
221 struct kvm_mmio_fragment {
222 gpa_t gpa;
223 void *data;
224 unsigned len;
225 };
226
227 struct kvm_vcpu {
228 struct kvm *kvm;
229 #ifdef CONFIG_PREEMPT_NOTIFIERS
230 struct preempt_notifier preempt_notifier;
231 #endif
232 int cpu;
233 int vcpu_id;
234 int srcu_idx;
235 int mode;
236 unsigned long requests;
237 unsigned long guest_debug;
238
239 int pre_pcpu;
240 struct list_head blocked_vcpu_list;
241
242 struct mutex mutex;
243 struct kvm_run *run;
244
245 int fpu_active;
246 int guest_fpu_loaded, guest_xcr0_loaded;
247 unsigned char fpu_counter;
248 wait_queue_head_t wq;
249 struct pid *pid;
250 int sigset_active;
251 sigset_t sigset;
252 struct kvm_vcpu_stat stat;
253 unsigned int halt_poll_ns;
254
255 #ifdef CONFIG_HAS_IOMEM
256 int mmio_needed;
257 int mmio_read_completed;
258 int mmio_is_write;
259 int mmio_cur_fragment;
260 int mmio_nr_fragments;
261 struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS];
262 #endif
263
264 #ifdef CONFIG_KVM_ASYNC_PF
265 struct {
266 u32 queued;
267 struct list_head queue;
268 struct list_head done;
269 spinlock_t lock;
270 } async_pf;
271 #endif
272
273 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
274 /*
275 * Cpu relax intercept or pause loop exit optimization
276 * in_spin_loop: set when a vcpu does a pause loop exit
277 * or cpu relax intercepted.
278 * dy_eligible: indicates whether vcpu is eligible for directed yield.
279 */
280 struct {
281 bool in_spin_loop;
282 bool dy_eligible;
283 } spin_loop;
284 #endif
285 bool preempted;
286 struct kvm_vcpu_arch arch;
287 };
288
289 static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
290 {
291 return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
292 }
293
294 /*
295 * Some of the bitops functions do not support too long bitmaps.
296 * This number must be determined not to exceed such limits.
297 */
298 #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1)
299
300 struct kvm_memory_slot {
301 gfn_t base_gfn;
302 unsigned long npages;
303 unsigned long *dirty_bitmap;
304 struct kvm_arch_memory_slot arch;
305 unsigned long userspace_addr;
306 u32 flags;
307 short id;
308 };
309
310 static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot)
311 {
312 return ALIGN(memslot->npages, BITS_PER_LONG) / 8;
313 }
314
315 struct kvm_s390_adapter_int {
316 u64 ind_addr;
317 u64 summary_addr;
318 u64 ind_offset;
319 u32 summary_offset;
320 u32 adapter_id;
321 };
322
323 struct kvm_hv_sint {
324 u32 vcpu;
325 u32 sint;
326 };
327
328 struct kvm_kernel_irq_routing_entry {
329 u32 gsi;
330 u32 type;
331 int (*set)(struct kvm_kernel_irq_routing_entry *e,
332 struct kvm *kvm, int irq_source_id, int level,
333 bool line_status);
334 union {
335 struct {
336 unsigned irqchip;
337 unsigned pin;
338 } irqchip;
339 struct msi_msg msi;
340 struct kvm_s390_adapter_int adapter;
341 struct kvm_hv_sint hv_sint;
342 };
343 struct hlist_node link;
344 };
345
346 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
347 struct kvm_irq_routing_table {
348 int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS];
349 u32 nr_rt_entries;
350 /*
351 * Array indexed by gsi. Each entry contains list of irq chips
352 * the gsi is connected to.
353 */
354 struct hlist_head map[0];
355 };
356 #endif
357
358 #ifndef KVM_PRIVATE_MEM_SLOTS
359 #define KVM_PRIVATE_MEM_SLOTS 0
360 #endif
361
362 #ifndef KVM_MEM_SLOTS_NUM
363 #define KVM_MEM_SLOTS_NUM (KVM_USER_MEM_SLOTS + KVM_PRIVATE_MEM_SLOTS)
364 #endif
365
366 #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE
367 static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu)
368 {
369 return 0;
370 }
371 #endif
372
373 /*
374 * Note:
375 * memslots are not sorted by id anymore, please use id_to_memslot()
376 * to get the memslot by its id.
377 */
378 struct kvm_memslots {
379 u64 generation;
380 struct kvm_memory_slot memslots[KVM_MEM_SLOTS_NUM];
381 /* The mapping table from slot id to the index in memslots[]. */
382 short id_to_index[KVM_MEM_SLOTS_NUM];
383 atomic_t lru_slot;
384 int used_slots;
385 };
386
387 struct kvm {
388 spinlock_t mmu_lock;
389 struct mutex slots_lock;
390 struct mm_struct *mm; /* userspace tied to this vm */
391 struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
392 struct srcu_struct srcu;
393 struct srcu_struct irq_srcu;
394 struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
395 atomic_t online_vcpus;
396 int last_boosted_vcpu;
397 struct list_head vm_list;
398 struct mutex lock;
399 struct kvm_io_bus *buses[KVM_NR_BUSES];
400 #ifdef CONFIG_HAVE_KVM_EVENTFD
401 struct {
402 spinlock_t lock;
403 struct list_head items;
404 struct list_head resampler_list;
405 struct mutex resampler_lock;
406 } irqfds;
407 struct list_head ioeventfds;
408 #endif
409 struct kvm_vm_stat stat;
410 struct kvm_arch arch;
411 atomic_t users_count;
412 #ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
413 struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
414 spinlock_t ring_lock;
415 struct list_head coalesced_zones;
416 #endif
417
418 struct mutex irq_lock;
419 #ifdef CONFIG_HAVE_KVM_IRQCHIP
420 /*
421 * Update side is protected by irq_lock.
422 */
423 struct kvm_irq_routing_table __rcu *irq_routing;
424 #endif
425 #ifdef CONFIG_HAVE_KVM_IRQFD
426 struct hlist_head irq_ack_notifier_list;
427 #endif
428
429 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
430 struct mmu_notifier mmu_notifier;
431 unsigned long mmu_notifier_seq;
432 long mmu_notifier_count;
433 #endif
434 long tlbs_dirty;
435 struct list_head devices;
436 };
437
438 #define kvm_err(fmt, ...) \
439 pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
440 #define kvm_info(fmt, ...) \
441 pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
442 #define kvm_debug(fmt, ...) \
443 pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__)
444 #define kvm_pr_unimpl(fmt, ...) \
445 pr_err_ratelimited("kvm [%i]: " fmt, \
446 task_tgid_nr(current), ## __VA_ARGS__)
447
448 /* The guest did something we don't support. */
449 #define vcpu_unimpl(vcpu, fmt, ...) \
450 kvm_pr_unimpl("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
451
452 #define vcpu_debug(vcpu, fmt, ...) \
453 kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
454 #define vcpu_err(vcpu, fmt, ...) \
455 kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__)
456
457 static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i)
458 {
459 /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu, in case
460 * the caller has read kvm->online_vcpus before (as is the case
461 * for kvm_for_each_vcpu, for example).
462 */
463 smp_rmb();
464 return kvm->vcpus[i];
465 }
466
467 #define kvm_for_each_vcpu(idx, vcpup, kvm) \
468 for (idx = 0; \
469 idx < atomic_read(&kvm->online_vcpus) && \
470 (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \
471 idx++)
472
473 static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
474 {
475 struct kvm_vcpu *vcpu;
476 int i;
477
478 kvm_for_each_vcpu(i, vcpu, kvm)
479 if (vcpu->vcpu_id == id)
480 return vcpu;
481 return NULL;
482 }
483
484 #define kvm_for_each_memslot(memslot, slots) \
485 for (memslot = &slots->memslots[0]; \
486 memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
487 memslot++)
488
489 int kvm_vcpu_init(struct kvm_vcpu *vcpu, struct kvm *kvm, unsigned id);
490 void kvm_vcpu_uninit(struct kvm_vcpu *vcpu);
491
492 int __must_check vcpu_load(struct kvm_vcpu *vcpu);
493 void vcpu_put(struct kvm_vcpu *vcpu);
494
495 #ifdef __KVM_HAVE_IOAPIC
496 void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
497 void kvm_arch_post_irq_routing_update(struct kvm *kvm);
498 #else
499 static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
500 {
501 }
502 static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
503 {
504 }
505 #endif
506
507 #ifdef CONFIG_HAVE_KVM_IRQFD
508 int kvm_irqfd_init(void);
509 void kvm_irqfd_exit(void);
510 #else
511 static inline int kvm_irqfd_init(void)
512 {
513 return 0;
514 }
515
516 static inline void kvm_irqfd_exit(void)
517 {
518 }
519 #endif
520 int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align,
521 struct module *module);
522 void kvm_exit(void);
523
524 void kvm_get_kvm(struct kvm *kvm);
525 void kvm_put_kvm(struct kvm *kvm);
526
527 static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id)
528 {
529 return rcu_dereference_check(kvm->memslots[as_id],
530 srcu_read_lock_held(&kvm->srcu)
531 || lockdep_is_held(&kvm->slots_lock));
532 }
533
534 static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm)
535 {
536 return __kvm_memslots(kvm, 0);
537 }
538
539 static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu)
540 {
541 int as_id = kvm_arch_vcpu_memslots_id(vcpu);
542
543 return __kvm_memslots(vcpu->kvm, as_id);
544 }
545
546 static inline struct kvm_memory_slot *
547 id_to_memslot(struct kvm_memslots *slots, int id)
548 {
549 int index = slots->id_to_index[id];
550 struct kvm_memory_slot *slot;
551
552 slot = &slots->memslots[index];
553
554 WARN_ON(slot->id != id);
555 return slot;
556 }
557
558 /*
559 * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations:
560 * - create a new memory slot
561 * - delete an existing memory slot
562 * - modify an existing memory slot
563 * -- move it in the guest physical memory space
564 * -- just change its flags
565 *
566 * Since flags can be changed by some of these operations, the following
567 * differentiation is the best we can do for __kvm_set_memory_region():
568 */
569 enum kvm_mr_change {
570 KVM_MR_CREATE,
571 KVM_MR_DELETE,
572 KVM_MR_MOVE,
573 KVM_MR_FLAGS_ONLY,
574 };
575
576 int kvm_set_memory_region(struct kvm *kvm,
577 const struct kvm_userspace_memory_region *mem);
578 int __kvm_set_memory_region(struct kvm *kvm,
579 const struct kvm_userspace_memory_region *mem);
580 void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
581 struct kvm_memory_slot *dont);
582 int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
583 unsigned long npages);
584 void kvm_arch_memslots_updated(struct kvm *kvm, struct kvm_memslots *slots);
585 int kvm_arch_prepare_memory_region(struct kvm *kvm,
586 struct kvm_memory_slot *memslot,
587 const struct kvm_userspace_memory_region *mem,
588 enum kvm_mr_change change);
589 void kvm_arch_commit_memory_region(struct kvm *kvm,
590 const struct kvm_userspace_memory_region *mem,
591 const struct kvm_memory_slot *old,
592 const struct kvm_memory_slot *new,
593 enum kvm_mr_change change);
594 bool kvm_largepages_enabled(void);
595 void kvm_disable_largepages(void);
596 /* flush all memory translations */
597 void kvm_arch_flush_shadow_all(struct kvm *kvm);
598 /* flush memory translations pointing to 'slot' */
599 void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
600 struct kvm_memory_slot *slot);
601
602 int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn,
603 struct page **pages, int nr_pages);
604
605 struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn);
606 unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn);
607 unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable);
608 unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
609 unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn,
610 bool *writable);
611 void kvm_release_page_clean(struct page *page);
612 void kvm_release_page_dirty(struct page *page);
613 void kvm_set_page_accessed(struct page *page);
614
615 pfn_t gfn_to_pfn_atomic(struct kvm *kvm, gfn_t gfn);
616 pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn);
617 pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault,
618 bool *writable);
619 pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn);
620 pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn);
621 pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, bool atomic,
622 bool *async, bool write_fault, bool *writable);
623
624 void kvm_release_pfn_clean(pfn_t pfn);
625 void kvm_set_pfn_dirty(pfn_t pfn);
626 void kvm_set_pfn_accessed(pfn_t pfn);
627 void kvm_get_pfn(pfn_t pfn);
628
629 int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
630 int len);
631 int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
632 unsigned long len);
633 int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
634 int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
635 void *data, unsigned long len);
636 int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
637 int offset, int len);
638 int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
639 unsigned long len);
640 int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
641 void *data, unsigned long len);
642 int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
643 gpa_t gpa, unsigned long len);
644 int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
645 int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
646 struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
647 int kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn);
648 unsigned long kvm_host_page_size(struct kvm *kvm, gfn_t gfn);
649 void mark_page_dirty(struct kvm *kvm, gfn_t gfn);
650
651 struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu);
652 struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn);
653 pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn);
654 pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn);
655 struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn);
656 unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn);
657 unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable);
658 int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset,
659 int len);
660 int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
661 unsigned long len);
662 int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data,
663 unsigned long len);
664 int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data,
665 int offset, int len);
666 int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data,
667 unsigned long len);
668 void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
669
670 void kvm_vcpu_block(struct kvm_vcpu *vcpu);
671 void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
672 void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
673 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
674 int kvm_vcpu_yield_to(struct kvm_vcpu *target);
675 void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
676 void kvm_load_guest_fpu(struct kvm_vcpu *vcpu);
677 void kvm_put_guest_fpu(struct kvm_vcpu *vcpu);
678
679 void kvm_flush_remote_tlbs(struct kvm *kvm);
680 void kvm_reload_remote_mmus(struct kvm *kvm);
681 void kvm_make_mclock_inprogress_request(struct kvm *kvm);
682 void kvm_make_scan_ioapic_request(struct kvm *kvm);
683 bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req);
684
685 long kvm_arch_dev_ioctl(struct file *filp,
686 unsigned int ioctl, unsigned long arg);
687 long kvm_arch_vcpu_ioctl(struct file *filp,
688 unsigned int ioctl, unsigned long arg);
689 int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf);
690
691 int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext);
692
693 int kvm_get_dirty_log(struct kvm *kvm,
694 struct kvm_dirty_log *log, int *is_dirty);
695
696 int kvm_get_dirty_log_protect(struct kvm *kvm,
697 struct kvm_dirty_log *log, bool *is_dirty);
698
699 void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
700 struct kvm_memory_slot *slot,
701 gfn_t gfn_offset,
702 unsigned long mask);
703
704 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
705 struct kvm_dirty_log *log);
706
707 int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
708 bool line_status);
709 long kvm_arch_vm_ioctl(struct file *filp,
710 unsigned int ioctl, unsigned long arg);
711
712 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
713 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu);
714
715 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
716 struct kvm_translation *tr);
717
718 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
719 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs);
720 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
721 struct kvm_sregs *sregs);
722 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
723 struct kvm_sregs *sregs);
724 int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
725 struct kvm_mp_state *mp_state);
726 int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
727 struct kvm_mp_state *mp_state);
728 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
729 struct kvm_guest_debug *dbg);
730 int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run);
731
732 int kvm_arch_init(void *opaque);
733 void kvm_arch_exit(void);
734
735 int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu);
736 void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu);
737
738 void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu);
739
740 void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu);
741 void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
742 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu);
743 struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id);
744 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu);
745 void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu);
746 void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu);
747
748 int kvm_arch_hardware_enable(void);
749 void kvm_arch_hardware_disable(void);
750 int kvm_arch_hardware_setup(void);
751 void kvm_arch_hardware_unsetup(void);
752 void kvm_arch_check_processor_compat(void *rtn);
753 int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
754 int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
755
756 void *kvm_kvzalloc(unsigned long size);
757
758 #ifndef __KVM_HAVE_ARCH_VM_ALLOC
759 static inline struct kvm *kvm_arch_alloc_vm(void)
760 {
761 return kzalloc(sizeof(struct kvm), GFP_KERNEL);
762 }
763
764 static inline void kvm_arch_free_vm(struct kvm *kvm)
765 {
766 kfree(kvm);
767 }
768 #endif
769
770 #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA
771 void kvm_arch_register_noncoherent_dma(struct kvm *kvm);
772 void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm);
773 bool kvm_arch_has_noncoherent_dma(struct kvm *kvm);
774 #else
775 static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm)
776 {
777 }
778
779 static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm)
780 {
781 }
782
783 static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm)
784 {
785 return false;
786 }
787 #endif
788 #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE
789 void kvm_arch_start_assignment(struct kvm *kvm);
790 void kvm_arch_end_assignment(struct kvm *kvm);
791 bool kvm_arch_has_assigned_device(struct kvm *kvm);
792 #else
793 static inline void kvm_arch_start_assignment(struct kvm *kvm)
794 {
795 }
796
797 static inline void kvm_arch_end_assignment(struct kvm *kvm)
798 {
799 }
800
801 static inline bool kvm_arch_has_assigned_device(struct kvm *kvm)
802 {
803 return false;
804 }
805 #endif
806
807 static inline wait_queue_head_t *kvm_arch_vcpu_wq(struct kvm_vcpu *vcpu)
808 {
809 #ifdef __KVM_HAVE_ARCH_WQP
810 return vcpu->arch.wqp;
811 #else
812 return &vcpu->wq;
813 #endif
814 }
815
816 #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED
817 /*
818 * returns true if the virtual interrupt controller is initialized and
819 * ready to accept virtual IRQ. On some architectures the virtual interrupt
820 * controller is dynamically instantiated and this is not always true.
821 */
822 bool kvm_arch_intc_initialized(struct kvm *kvm);
823 #else
824 static inline bool kvm_arch_intc_initialized(struct kvm *kvm)
825 {
826 return true;
827 }
828 #endif
829
830 int kvm_arch_init_vm(struct kvm *kvm, unsigned long type);
831 void kvm_arch_destroy_vm(struct kvm *kvm);
832 void kvm_arch_sync_events(struct kvm *kvm);
833
834 int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu);
835 void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
836
837 bool kvm_is_reserved_pfn(pfn_t pfn);
838
839 struct kvm_irq_ack_notifier {
840 struct hlist_node link;
841 unsigned gsi;
842 void (*irq_acked)(struct kvm_irq_ack_notifier *kian);
843 };
844
845 int kvm_irq_map_gsi(struct kvm *kvm,
846 struct kvm_kernel_irq_routing_entry *entries, int gsi);
847 int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin);
848
849 int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level,
850 bool line_status);
851 int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm,
852 int irq_source_id, int level, bool line_status);
853 int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e,
854 struct kvm *kvm, int irq_source_id,
855 int level, bool line_status);
856 bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin);
857 void kvm_notify_acked_gsi(struct kvm *kvm, int gsi);
858 void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin);
859 void kvm_register_irq_ack_notifier(struct kvm *kvm,
860 struct kvm_irq_ack_notifier *kian);
861 void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
862 struct kvm_irq_ack_notifier *kian);
863 int kvm_request_irq_source_id(struct kvm *kvm);
864 void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
865
866 #ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
867 int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
868 void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
869 #else
870 static inline int kvm_iommu_map_pages(struct kvm *kvm,
871 struct kvm_memory_slot *slot)
872 {
873 return 0;
874 }
875
876 static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
877 struct kvm_memory_slot *slot)
878 {
879 }
880 #endif
881
882 /* must be called with irqs disabled */
883 static inline void __kvm_guest_enter(void)
884 {
885 guest_enter();
886 /* KVM does not hold any references to rcu protected data when it
887 * switches CPU into a guest mode. In fact switching to a guest mode
888 * is very similar to exiting to userspace from rcu point of view. In
889 * addition CPU may stay in a guest mode for quite a long time (up to
890 * one time slice). Lets treat guest mode as quiescent state, just like
891 * we do with user-mode execution.
892 */
893 if (!context_tracking_cpu_is_enabled())
894 rcu_virt_note_context_switch(smp_processor_id());
895 }
896
897 /* must be called with irqs disabled */
898 static inline void __kvm_guest_exit(void)
899 {
900 guest_exit();
901 }
902
903 static inline void kvm_guest_enter(void)
904 {
905 unsigned long flags;
906
907 local_irq_save(flags);
908 __kvm_guest_enter();
909 local_irq_restore(flags);
910 }
911
912 static inline void kvm_guest_exit(void)
913 {
914 unsigned long flags;
915
916 local_irq_save(flags);
917 __kvm_guest_exit();
918 local_irq_restore(flags);
919 }
920
921 /*
922 * search_memslots() and __gfn_to_memslot() are here because they are
923 * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
924 * gfn_to_memslot() itself isn't here as an inline because that would
925 * bloat other code too much.
926 */
927 static inline struct kvm_memory_slot *
928 search_memslots(struct kvm_memslots *slots, gfn_t gfn)
929 {
930 int start = 0, end = slots->used_slots;
931 int slot = atomic_read(&slots->lru_slot);
932 struct kvm_memory_slot *memslots = slots->memslots;
933
934 if (gfn >= memslots[slot].base_gfn &&
935 gfn < memslots[slot].base_gfn + memslots[slot].npages)
936 return &memslots[slot];
937
938 while (start < end) {
939 slot = start + (end - start) / 2;
940
941 if (gfn >= memslots[slot].base_gfn)
942 end = slot;
943 else
944 start = slot + 1;
945 }
946
947 if (gfn >= memslots[start].base_gfn &&
948 gfn < memslots[start].base_gfn + memslots[start].npages) {
949 atomic_set(&slots->lru_slot, start);
950 return &memslots[start];
951 }
952
953 return NULL;
954 }
955
956 static inline struct kvm_memory_slot *
957 __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn)
958 {
959 return search_memslots(slots, gfn);
960 }
961
962 static inline unsigned long
963 __gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn)
964 {
965 return slot->userspace_addr + (gfn - slot->base_gfn) * PAGE_SIZE;
966 }
967
968 static inline int memslot_id(struct kvm *kvm, gfn_t gfn)
969 {
970 return gfn_to_memslot(kvm, gfn)->id;
971 }
972
973 static inline gfn_t
974 hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot)
975 {
976 gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT;
977
978 return slot->base_gfn + gfn_offset;
979 }
980
981 static inline gpa_t gfn_to_gpa(gfn_t gfn)
982 {
983 return (gpa_t)gfn << PAGE_SHIFT;
984 }
985
986 static inline gfn_t gpa_to_gfn(gpa_t gpa)
987 {
988 return (gfn_t)(gpa >> PAGE_SHIFT);
989 }
990
991 static inline hpa_t pfn_to_hpa(pfn_t pfn)
992 {
993 return (hpa_t)pfn << PAGE_SHIFT;
994 }
995
996 static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa)
997 {
998 unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa));
999
1000 return kvm_is_error_hva(hva);
1001 }
1002
1003 static inline void kvm_migrate_timers(struct kvm_vcpu *vcpu)
1004 {
1005 set_bit(KVM_REQ_MIGRATE_TIMER, &vcpu->requests);
1006 }
1007
1008 enum kvm_stat_kind {
1009 KVM_STAT_VM,
1010 KVM_STAT_VCPU,
1011 };
1012
1013 struct kvm_stats_debugfs_item {
1014 const char *name;
1015 int offset;
1016 enum kvm_stat_kind kind;
1017 struct dentry *dentry;
1018 };
1019 extern struct kvm_stats_debugfs_item debugfs_entries[];
1020 extern struct dentry *kvm_debugfs_dir;
1021
1022 #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER)
1023 static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
1024 {
1025 if (unlikely(kvm->mmu_notifier_count))
1026 return 1;
1027 /*
1028 * Ensure the read of mmu_notifier_count happens before the read
1029 * of mmu_notifier_seq. This interacts with the smp_wmb() in
1030 * mmu_notifier_invalidate_range_end to make sure that the caller
1031 * either sees the old (non-zero) value of mmu_notifier_count or
1032 * the new (incremented) value of mmu_notifier_seq.
1033 * PowerPC Book3s HV KVM calls this under a per-page lock
1034 * rather than under kvm->mmu_lock, for scalability, so
1035 * can't rely on kvm->mmu_lock to keep things ordered.
1036 */
1037 smp_rmb();
1038 if (kvm->mmu_notifier_seq != mmu_seq)
1039 return 1;
1040 return 0;
1041 }
1042 #endif
1043
1044 #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING
1045
1046 #ifdef CONFIG_S390
1047 #define KVM_MAX_IRQ_ROUTES 4096 //FIXME: we can have more than that...
1048 #else
1049 #define KVM_MAX_IRQ_ROUTES 1024
1050 #endif
1051
1052 int kvm_setup_default_irq_routing(struct kvm *kvm);
1053 int kvm_setup_empty_irq_routing(struct kvm *kvm);
1054 int kvm_set_irq_routing(struct kvm *kvm,
1055 const struct kvm_irq_routing_entry *entries,
1056 unsigned nr,
1057 unsigned flags);
1058 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
1059 const struct kvm_irq_routing_entry *ue);
1060 void kvm_free_irq_routing(struct kvm *kvm);
1061
1062 #else
1063
1064 static inline void kvm_free_irq_routing(struct kvm *kvm) {}
1065
1066 #endif
1067
1068 int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi);
1069
1070 #ifdef CONFIG_HAVE_KVM_EVENTFD
1071
1072 void kvm_eventfd_init(struct kvm *kvm);
1073 int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args);
1074
1075 #ifdef CONFIG_HAVE_KVM_IRQFD
1076 int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args);
1077 void kvm_irqfd_release(struct kvm *kvm);
1078 void kvm_irq_routing_update(struct kvm *);
1079 #else
1080 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1081 {
1082 return -EINVAL;
1083 }
1084
1085 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1086 #endif
1087
1088 #else
1089
1090 static inline void kvm_eventfd_init(struct kvm *kvm) {}
1091
1092 static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args)
1093 {
1094 return -EINVAL;
1095 }
1096
1097 static inline void kvm_irqfd_release(struct kvm *kvm) {}
1098
1099 #ifdef CONFIG_HAVE_KVM_IRQCHIP
1100 static inline void kvm_irq_routing_update(struct kvm *kvm)
1101 {
1102 }
1103 #endif
1104 void kvm_arch_irq_routing_update(struct kvm *kvm);
1105
1106 static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args)
1107 {
1108 return -ENOSYS;
1109 }
1110
1111 #endif /* CONFIG_HAVE_KVM_EVENTFD */
1112
1113 #ifdef CONFIG_KVM_APIC_ARCHITECTURE
1114 bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu);
1115 #else
1116 static inline bool kvm_vcpu_compatible(struct kvm_vcpu *vcpu) { return true; }
1117 #endif
1118
1119 static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
1120 {
1121 set_bit(req, &vcpu->requests);
1122 }
1123
1124 static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
1125 {
1126 if (test_bit(req, &vcpu->requests)) {
1127 clear_bit(req, &vcpu->requests);
1128 return true;
1129 } else {
1130 return false;
1131 }
1132 }
1133
1134 extern bool kvm_rebooting;
1135
1136 struct kvm_device {
1137 struct kvm_device_ops *ops;
1138 struct kvm *kvm;
1139 void *private;
1140 struct list_head vm_node;
1141 };
1142
1143 /* create, destroy, and name are mandatory */
1144 struct kvm_device_ops {
1145 const char *name;
1146 int (*create)(struct kvm_device *dev, u32 type);
1147
1148 /*
1149 * Destroy is responsible for freeing dev.
1150 *
1151 * Destroy may be called before or after destructors are called
1152 * on emulated I/O regions, depending on whether a reference is
1153 * held by a vcpu or other kvm component that gets destroyed
1154 * after the emulated I/O.
1155 */
1156 void (*destroy)(struct kvm_device *dev);
1157
1158 int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1159 int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1160 int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr);
1161 long (*ioctl)(struct kvm_device *dev, unsigned int ioctl,
1162 unsigned long arg);
1163 };
1164
1165 void kvm_device_get(struct kvm_device *dev);
1166 void kvm_device_put(struct kvm_device *dev);
1167 struct kvm_device *kvm_device_from_filp(struct file *filp);
1168 int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
1169 void kvm_unregister_device_ops(u32 type);
1170
1171 extern struct kvm_device_ops kvm_mpic_ops;
1172 extern struct kvm_device_ops kvm_xics_ops;
1173 extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
1174 extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
1175
1176 #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT
1177
1178 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1179 {
1180 vcpu->spin_loop.in_spin_loop = val;
1181 }
1182 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1183 {
1184 vcpu->spin_loop.dy_eligible = val;
1185 }
1186
1187 #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1188
1189 static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val)
1190 {
1191 }
1192
1193 static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val)
1194 {
1195 }
1196 #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */
1197
1198 #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS
1199 int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *,
1200 struct irq_bypass_producer *);
1201 void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *,
1202 struct irq_bypass_producer *);
1203 void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *);
1204 void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *);
1205 int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq,
1206 uint32_t guest_irq, bool set);
1207 #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */
1208
1209 #endif