]>
Commit | Line | Data |
---|---|---|
cd93f165 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
d77a39d9 HB |
2 | |
3 | #ifndef __KVM_TYPES_H__ | |
4 | #define __KVM_TYPES_H__ | |
5 | ||
65647300 PB |
6 | struct kvm; |
7 | struct kvm_async_pf; | |
8 | struct kvm_device_ops; | |
9 | struct kvm_interrupt; | |
10 | struct kvm_irq_routing_table; | |
11 | struct kvm_memory_slot; | |
12 | struct kvm_one_reg; | |
13 | struct kvm_run; | |
14 | struct kvm_userspace_memory_region; | |
15 | struct kvm_vcpu; | |
16 | struct kvm_vcpu_init; | |
15f46015 | 17 | struct kvm_memslots; |
65647300 PB |
18 | |
19 | enum kvm_mr_change; | |
20 | ||
91724814 | 21 | #include <linux/types.h> |
d77a39d9 | 22 | |
2aa9c199 SC |
23 | #include <asm/kvm_types.h> |
24 | ||
d77a39d9 HB |
25 | /* |
26 | * Address types: | |
27 | * | |
28 | * gva - guest virtual address | |
29 | * gpa - guest physical address | |
30 | * gfn - guest frame number | |
31 | * hva - host virtual address | |
32 | * hpa - host physical address | |
33 | * hfn - host frame number | |
34 | */ | |
35 | ||
36 | typedef unsigned long gva_t; | |
37 | typedef u64 gpa_t; | |
5689cc53 | 38 | typedef u64 gfn_t; |
d77a39d9 | 39 | |
8564d637 SP |
40 | #define GPA_INVALID (~(gpa_t)0) |
41 | ||
d77a39d9 HB |
42 | typedef unsigned long hva_t; |
43 | typedef u64 hpa_t; | |
5689cc53 | 44 | typedef u64 hfn_t; |
d77a39d9 | 45 | |
ba049e93 | 46 | typedef hfn_t kvm_pfn_t; |
35149e21 | 47 | |
49c7754c GN |
48 | struct gfn_to_hva_cache { |
49 | u64 generation; | |
50 | gpa_t gpa; | |
51 | unsigned long hva; | |
8f964525 | 52 | unsigned long len; |
49c7754c GN |
53 | struct kvm_memory_slot *memslot; |
54 | }; | |
55 | ||
91724814 BO |
56 | struct gfn_to_pfn_cache { |
57 | u64 generation; | |
58 | gfn_t gfn; | |
59 | kvm_pfn_t pfn; | |
60 | bool dirty; | |
61 | }; | |
62 | ||
2aa9c199 SC |
63 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
64 | /* | |
65 | * Memory caches are used to preallocate memory ahead of various MMU flows, | |
66 | * e.g. page fault handlers. Gracefully handling allocation failures deep in | |
67 | * MMU flows is problematic, as is triggering reclaim, I/O, etc... while | |
68 | * holding MMU locks. Note, these caches act more like prefetch buffers than | |
69 | * classical caches, i.e. objects are not returned to the cache on being freed. | |
70 | */ | |
71 | struct kvm_mmu_memory_cache { | |
72 | int nobjs; | |
73 | gfp_t gfp_zero; | |
74 | struct kmem_cache *kmem_cache; | |
75 | void *objects[KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE]; | |
76 | }; | |
77 | #endif | |
78 | ||
79 | ||
d77a39d9 | 80 | #endif /* __KVM_TYPES_H__ */ |