]>
Commit | Line | Data |
---|---|---|
20c8ccb1 | 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
edf88417 AK |
2 | #ifndef __KVM_HOST_H |
3 | #define __KVM_HOST_H | |
6aa8b732 | 4 | |
6aa8b732 AK |
5 | |
6 | #include <linux/types.h> | |
e56a7a28 | 7 | #include <linux/hardirq.h> |
6aa8b732 AK |
8 | #include <linux/list.h> |
9 | #include <linux/mutex.h> | |
10 | #include <linux/spinlock.h> | |
06ff0d37 MR |
11 | #include <linux/signal.h> |
12 | #include <linux/sched.h> | |
6bd5b743 | 13 | #include <linux/sched/stat.h> |
187f1882 | 14 | #include <linux/bug.h> |
4a42d848 | 15 | #include <linux/minmax.h> |
6aa8b732 | 16 | #include <linux/mm.h> |
b297e672 | 17 | #include <linux/mmu_notifier.h> |
15ad7146 | 18 | #include <linux/preempt.h> |
0937c48d | 19 | #include <linux/msi.h> |
d89f5eff | 20 | #include <linux/slab.h> |
d1e5b0e9 | 21 | #include <linux/vmalloc.h> |
bd2b53b2 | 22 | #include <linux/rcupdate.h> |
bd80158a | 23 | #include <linux/ratelimit.h> |
83f09228 | 24 | #include <linux/err.h> |
c11f11fc | 25 | #include <linux/irqflags.h> |
521921ba | 26 | #include <linux/context_tracking.h> |
1a02b270 | 27 | #include <linux/irqbypass.h> |
da4ad88c | 28 | #include <linux/rcuwait.h> |
e3736c3e | 29 | #include <linux/refcount.h> |
1d487e9b | 30 | #include <linux/nospec.h> |
e8edc6e0 | 31 | #include <asm/signal.h> |
6aa8b732 | 32 | |
6aa8b732 | 33 | #include <linux/kvm.h> |
102d8325 | 34 | #include <linux/kvm_para.h> |
6aa8b732 | 35 | |
edf88417 | 36 | #include <linux/kvm_types.h> |
d77a39d9 | 37 | |
edf88417 | 38 | #include <asm/kvm_host.h> |
fb04a1ed | 39 | #include <linux/kvm_dirty_ring.h> |
d657a98e | 40 | |
0b1b1dfd GK |
41 | #ifndef KVM_MAX_VCPU_ID |
42 | #define KVM_MAX_VCPU_ID KVM_MAX_VCPUS | |
43 | #endif | |
44 | ||
67b29204 XG |
45 | /* |
46 | * The bit 16 ~ bit 31 of kvm_memory_region::flags are internally used | |
47 | * in kvm, other bits are visible for userspace which are defined in | |
48 | * include/linux/kvm_h. | |
49 | */ | |
50 | #define KVM_MEMSLOT_INVALID (1UL << 16) | |
51 | ||
361209e0 | 52 | /* |
164bf7e5 | 53 | * Bit 63 of the memslot generation number is an "update in-progress flag", |
361209e0 SC |
54 | * e.g. is temporarily set for the duration of install_new_memslots(). |
55 | * This flag effectively creates a unique generation number that is used to | |
56 | * mark cached memslot data, e.g. MMIO accesses, as potentially being stale, | |
57 | * i.e. may (or may not) have come from the previous memslots generation. | |
58 | * | |
59 | * This is necessary because the actual memslots update is not atomic with | |
60 | * respect to the generation number update. Updating the generation number | |
61 | * first would allow a vCPU to cache a spte from the old memslots using the | |
62 | * new generation number, and updating the generation number after switching | |
63 | * to the new memslots would allow cache hits using the old generation number | |
64 | * to reference the defunct memslots. | |
65 | * | |
66 | * This mechanism is used to prevent getting hits in KVM's caches while a | |
67 | * memslot update is in-progress, and to prevent cache hits *after* updating | |
68 | * the actual generation number against accesses that were inserted into the | |
69 | * cache *before* the memslots were updated. | |
70 | */ | |
164bf7e5 | 71 | #define KVM_MEMSLOT_GEN_UPDATE_IN_PROGRESS BIT_ULL(63) |
361209e0 | 72 | |
87da7e66 XG |
73 | /* Two fragments for cross MMIO pages. */ |
74 | #define KVM_MAX_MMIO_FRAGMENTS 2 | |
f78146b0 | 75 | |
f481b069 PB |
76 | #ifndef KVM_ADDRESS_SPACE_NUM |
77 | #define KVM_ADDRESS_SPACE_NUM 1 | |
78 | #endif | |
79 | ||
9c5b1172 XG |
80 | /* |
81 | * For the normal pfn, the highest 12 bits should be zero, | |
81c52c56 XG |
82 | * so we can mask bit 62 ~ bit 52 to indicate the error pfn, |
83 | * mask bit 63 to indicate the noslot pfn. | |
9c5b1172 | 84 | */ |
81c52c56 XG |
85 | #define KVM_PFN_ERR_MASK (0x7ffULL << 52) |
86 | #define KVM_PFN_ERR_NOSLOT_MASK (0xfffULL << 52) | |
87 | #define KVM_PFN_NOSLOT (0x1ULL << 63) | |
9c5b1172 XG |
88 | |
89 | #define KVM_PFN_ERR_FAULT (KVM_PFN_ERR_MASK) | |
90 | #define KVM_PFN_ERR_HWPOISON (KVM_PFN_ERR_MASK + 1) | |
81c52c56 | 91 | #define KVM_PFN_ERR_RO_FAULT (KVM_PFN_ERR_MASK + 2) |
6c8ee57b | 92 | |
81c52c56 XG |
93 | /* |
94 | * error pfns indicate that the gfn is in slot but faild to | |
95 | * translate it to pfn on host. | |
96 | */ | |
ba049e93 | 97 | static inline bool is_error_pfn(kvm_pfn_t pfn) |
83f09228 | 98 | { |
9c5b1172 | 99 | return !!(pfn & KVM_PFN_ERR_MASK); |
83f09228 XG |
100 | } |
101 | ||
81c52c56 XG |
102 | /* |
103 | * error_noslot pfns indicate that the gfn can not be | |
104 | * translated to pfn - it is not in slot or failed to | |
105 | * translate it to pfn. | |
106 | */ | |
ba049e93 | 107 | static inline bool is_error_noslot_pfn(kvm_pfn_t pfn) |
83f09228 | 108 | { |
81c52c56 | 109 | return !!(pfn & KVM_PFN_ERR_NOSLOT_MASK); |
83f09228 XG |
110 | } |
111 | ||
81c52c56 | 112 | /* noslot pfn indicates that the gfn is not in slot. */ |
ba049e93 | 113 | static inline bool is_noslot_pfn(kvm_pfn_t pfn) |
83f09228 | 114 | { |
81c52c56 | 115 | return pfn == KVM_PFN_NOSLOT; |
83f09228 XG |
116 | } |
117 | ||
bf640876 DD |
118 | /* |
119 | * architectures with KVM_HVA_ERR_BAD other than PAGE_OFFSET (e.g. s390) | |
120 | * provide own defines and kvm_is_error_hva | |
121 | */ | |
122 | #ifndef KVM_HVA_ERR_BAD | |
123 | ||
7068d097 XG |
124 | #define KVM_HVA_ERR_BAD (PAGE_OFFSET) |
125 | #define KVM_HVA_ERR_RO_BAD (PAGE_OFFSET + PAGE_SIZE) | |
ca3a490c XG |
126 | |
127 | static inline bool kvm_is_error_hva(unsigned long addr) | |
128 | { | |
7068d097 | 129 | return addr >= PAGE_OFFSET; |
ca3a490c XG |
130 | } |
131 | ||
bf640876 DD |
132 | #endif |
133 | ||
6cede2e6 XG |
134 | #define KVM_ERR_PTR_BAD_PAGE (ERR_PTR(-ENOENT)) |
135 | ||
9c5b1172 | 136 | static inline bool is_error_page(struct page *page) |
6cede2e6 XG |
137 | { |
138 | return IS_ERR(page); | |
139 | } | |
140 | ||
930f7fd6 RK |
141 | #define KVM_REQUEST_MASK GENMASK(7,0) |
142 | #define KVM_REQUEST_NO_WAKEUP BIT(8) | |
7a97cec2 | 143 | #define KVM_REQUEST_WAIT BIT(9) |
d9e368d6 | 144 | /* |
2860c4b1 PB |
145 | * Architecture-independent vcpu->requests bit members |
146 | * Bits 4-7 are reserved for more arch-independent bits. | |
d9e368d6 | 147 | */ |
7a97cec2 PB |
148 | #define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) |
149 | #define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP) | |
084071d5 | 150 | #define KVM_REQ_UNBLOCK 2 |
7a97cec2 | 151 | #define KVM_REQ_UNHALT 3 |
2387149e AJ |
152 | #define KVM_REQUEST_ARCH_BASE 8 |
153 | ||
154 | #define KVM_ARCH_REQ_FLAGS(nr, flags) ({ \ | |
c593642c | 155 | BUILD_BUG_ON((unsigned)(nr) >= (sizeof_field(struct kvm_vcpu, requests) * 8) - KVM_REQUEST_ARCH_BASE); \ |
2387149e AJ |
156 | (unsigned)(((nr) + KVM_REQUEST_ARCH_BASE) | (flags)); \ |
157 | }) | |
158 | #define KVM_ARCH_REQ(nr) KVM_ARCH_REQ_FLAGS(nr, 0) | |
0cd31043 | 159 | |
7a84428a AW |
160 | #define KVM_USERSPACE_IRQ_SOURCE_ID 0 |
161 | #define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1 | |
5550af4d | 162 | |
0d9ce162 | 163 | extern struct mutex kvm_lock; |
fc1b7492 GL |
164 | extern struct list_head vm_list; |
165 | ||
743eeb0b SL |
166 | struct kvm_io_range { |
167 | gpa_t addr; | |
168 | int len; | |
169 | struct kvm_io_device *dev; | |
170 | }; | |
171 | ||
786a9f88 | 172 | #define NR_IOBUS_DEVS 1000 |
a1300716 | 173 | |
2eeb2e94 | 174 | struct kvm_io_bus { |
6ea34c9b AK |
175 | int dev_count; |
176 | int ioeventfd_count; | |
a1300716 | 177 | struct kvm_io_range range[]; |
2eeb2e94 GH |
178 | }; |
179 | ||
e93f8a0f MT |
180 | enum kvm_bus { |
181 | KVM_MMIO_BUS, | |
182 | KVM_PIO_BUS, | |
060f0ce6 | 183 | KVM_VIRTIO_CCW_NOTIFY_BUS, |
68c3b4d1 | 184 | KVM_FAST_MMIO_BUS, |
e93f8a0f MT |
185 | KVM_NR_BUSES |
186 | }; | |
187 | ||
e32edf4f | 188 | int kvm_io_bus_write(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, |
e93f8a0f | 189 | int len, const void *val); |
e32edf4f NN |
190 | int kvm_io_bus_write_cookie(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, |
191 | gpa_t addr, int len, const void *val, long cookie); | |
192 | int kvm_io_bus_read(struct kvm_vcpu *vcpu, enum kvm_bus bus_idx, gpa_t addr, | |
193 | int len, void *val); | |
743eeb0b SL |
194 | int kvm_io_bus_register_dev(struct kvm *kvm, enum kvm_bus bus_idx, gpa_t addr, |
195 | int len, struct kvm_io_device *dev); | |
5d3c4c79 SC |
196 | int kvm_io_bus_unregister_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
197 | struct kvm_io_device *dev); | |
8a39d006 AP |
198 | struct kvm_io_device *kvm_io_bus_get_dev(struct kvm *kvm, enum kvm_bus bus_idx, |
199 | gpa_t addr); | |
2eeb2e94 | 200 | |
af585b92 GN |
201 | #ifdef CONFIG_KVM_ASYNC_PF |
202 | struct kvm_async_pf { | |
203 | struct work_struct work; | |
204 | struct list_head link; | |
205 | struct list_head queue; | |
206 | struct kvm_vcpu *vcpu; | |
207 | struct mm_struct *mm; | |
736c291c | 208 | gpa_t cr2_or_gpa; |
af585b92 GN |
209 | unsigned long addr; |
210 | struct kvm_arch_async_pf arch; | |
f2e10669 | 211 | bool wakeup_all; |
2a18b7e7 | 212 | bool notpresent_injected; |
af585b92 GN |
213 | }; |
214 | ||
215 | void kvm_clear_async_pf_completion_queue(struct kvm_vcpu *vcpu); | |
216 | void kvm_check_async_pf_completion(struct kvm_vcpu *vcpu); | |
e8c22266 VK |
217 | bool kvm_setup_async_pf(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa, |
218 | unsigned long hva, struct kvm_arch_async_pf *arch); | |
344d9588 | 219 | int kvm_async_pf_wakeup_all(struct kvm_vcpu *vcpu); |
af585b92 GN |
220 | #endif |
221 | ||
5f7c292b | 222 | #ifdef KVM_ARCH_WANT_MMU_NOTIFIER |
3039bcc7 SC |
223 | struct kvm_gfn_range { |
224 | struct kvm_memory_slot *slot; | |
225 | gfn_t start; | |
226 | gfn_t end; | |
227 | pte_t pte; | |
228 | bool may_block; | |
229 | }; | |
230 | bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range); | |
231 | bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); | |
232 | bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range); | |
233 | bool kvm_set_spte_gfn(struct kvm *kvm, struct kvm_gfn_range *range); | |
5f7c292b SC |
234 | #endif |
235 | ||
6b7e2d09 XG |
236 | enum { |
237 | OUTSIDE_GUEST_MODE, | |
238 | IN_GUEST_MODE, | |
c142786c AK |
239 | EXITING_GUEST_MODE, |
240 | READING_SHADOW_PAGE_TABLES, | |
6b7e2d09 XG |
241 | }; |
242 | ||
e45adf66 KA |
243 | #define KVM_UNMAPPED_PAGE ((void *) 0x500 + POISON_POINTER_DELTA) |
244 | ||
245 | struct kvm_host_map { | |
246 | /* | |
247 | * Only valid if the 'pfn' is managed by the host kernel (i.e. There is | |
248 | * a 'struct page' for it. When using mem= kernel parameter some memory | |
249 | * can be used as guest memory but they are not managed by host | |
250 | * kernel). | |
251 | * If 'pfn' is not managed by the host kernel, this field is | |
252 | * initialized to KVM_UNMAPPED_PAGE. | |
253 | */ | |
254 | struct page *page; | |
255 | void *hva; | |
256 | kvm_pfn_t pfn; | |
257 | kvm_pfn_t gfn; | |
258 | }; | |
259 | ||
260 | /* | |
261 | * Used to check if the mapping is valid or not. Never use 'kvm_host_map' | |
262 | * directly to check for that. | |
263 | */ | |
264 | static inline bool kvm_vcpu_mapped(struct kvm_host_map *map) | |
265 | { | |
266 | return !!map->hva; | |
267 | } | |
268 | ||
6bd5b743 WL |
269 | static inline bool kvm_vcpu_can_poll(ktime_t cur, ktime_t stop) |
270 | { | |
271 | return single_task_running() && !need_resched() && ktime_before(cur, stop); | |
272 | } | |
273 | ||
f78146b0 AK |
274 | /* |
275 | * Sometimes a large or cross-page mmio needs to be broken up into separate | |
276 | * exits for userspace servicing. | |
277 | */ | |
278 | struct kvm_mmio_fragment { | |
279 | gpa_t gpa; | |
280 | void *data; | |
281 | unsigned len; | |
282 | }; | |
283 | ||
d17fbbf7 ZX |
284 | struct kvm_vcpu { |
285 | struct kvm *kvm; | |
31bb117e | 286 | #ifdef CONFIG_PREEMPT_NOTIFIERS |
d17fbbf7 | 287 | struct preempt_notifier preempt_notifier; |
31bb117e | 288 | #endif |
6b7e2d09 | 289 | int cpu; |
8750e72a RK |
290 | int vcpu_id; /* id given by userspace at creation */ |
291 | int vcpu_idx; /* index in kvm->vcpus array */ | |
6b7e2d09 XG |
292 | int srcu_idx; |
293 | int mode; | |
86dafed5 | 294 | u64 requests; |
d0bfb940 | 295 | unsigned long guest_debug; |
6b7e2d09 | 296 | |
bf9f6ac8 FW |
297 | int pre_pcpu; |
298 | struct list_head blocked_vcpu_list; | |
299 | ||
6b7e2d09 XG |
300 | struct mutex mutex; |
301 | struct kvm_run *run; | |
f656ce01 | 302 | |
da4ad88c | 303 | struct rcuwait wait; |
0e4524a5 | 304 | struct pid __rcu *pid; |
d17fbbf7 ZX |
305 | int sigset_active; |
306 | sigset_t sigset; | |
307 | struct kvm_vcpu_stat stat; | |
19020f8a | 308 | unsigned int halt_poll_ns; |
3491caf2 | 309 | bool valid_wakeup; |
d17fbbf7 | 310 | |
34c16eec | 311 | #ifdef CONFIG_HAS_IOMEM |
d17fbbf7 ZX |
312 | int mmio_needed; |
313 | int mmio_read_completed; | |
314 | int mmio_is_write; | |
f78146b0 AK |
315 | int mmio_cur_fragment; |
316 | int mmio_nr_fragments; | |
317 | struct kvm_mmio_fragment mmio_fragments[KVM_MAX_MMIO_FRAGMENTS]; | |
34c16eec | 318 | #endif |
1165f5fe | 319 | |
af585b92 GN |
320 | #ifdef CONFIG_KVM_ASYNC_PF |
321 | struct { | |
322 | u32 queued; | |
323 | struct list_head queue; | |
324 | struct list_head done; | |
325 | spinlock_t lock; | |
326 | } async_pf; | |
327 | #endif | |
328 | ||
4c088493 R |
329 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
330 | /* | |
331 | * Cpu relax intercept or pause loop exit optimization | |
332 | * in_spin_loop: set when a vcpu does a pause loop exit | |
333 | * or cpu relax intercepted. | |
334 | * dy_eligible: indicates whether vcpu is eligible for directed yield. | |
335 | */ | |
336 | struct { | |
337 | bool in_spin_loop; | |
338 | bool dy_eligible; | |
339 | } spin_loop; | |
340 | #endif | |
3a08a8f9 | 341 | bool preempted; |
d73eb57b | 342 | bool ready; |
d657a98e | 343 | struct kvm_vcpu_arch arch; |
fb04a1ed | 344 | struct kvm_dirty_ring dirty_ring; |
d657a98e ZX |
345 | }; |
346 | ||
1ca0016c SC |
347 | /* must be called with irqs disabled */ |
348 | static __always_inline void guest_enter_irqoff(void) | |
349 | { | |
350 | /* | |
351 | * This is running in ioctl context so its safe to assume that it's the | |
352 | * stime pending cputime to flush. | |
353 | */ | |
354 | instrumentation_begin(); | |
355 | vtime_account_guest_enter(); | |
356 | instrumentation_end(); | |
357 | ||
358 | /* | |
359 | * KVM does not hold any references to rcu protected data when it | |
360 | * switches CPU into a guest mode. In fact switching to a guest mode | |
361 | * is very similar to exiting to userspace from rcu point of view. In | |
362 | * addition CPU may stay in a guest mode for quite a long time (up to | |
363 | * one time slice). Lets treat guest mode as quiescent state, just like | |
364 | * we do with user-mode execution. | |
365 | */ | |
366 | if (!context_tracking_guest_enter()) { | |
367 | instrumentation_begin(); | |
368 | rcu_virt_note_context_switch(smp_processor_id()); | |
369 | instrumentation_end(); | |
370 | } | |
371 | } | |
372 | ||
373 | static __always_inline void guest_exit_irqoff(void) | |
374 | { | |
375 | context_tracking_guest_exit(); | |
376 | ||
377 | instrumentation_begin(); | |
378 | /* Flush the guest cputime we spent on the guest */ | |
379 | vtime_account_guest_exit(); | |
380 | instrumentation_end(); | |
381 | } | |
382 | ||
383 | static inline void guest_exit(void) | |
384 | { | |
385 | unsigned long flags; | |
386 | ||
387 | local_irq_save(flags); | |
388 | guest_exit_irqoff(); | |
389 | local_irq_restore(flags); | |
390 | } | |
391 | ||
6b7e2d09 XG |
392 | static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu) |
393 | { | |
cde9af6e AJ |
394 | /* |
395 | * The memory barrier ensures a previous write to vcpu->requests cannot | |
396 | * be reordered with the read of vcpu->mode. It pairs with the general | |
397 | * memory barrier following the write of vcpu->mode in VCPU RUN. | |
398 | */ | |
399 | smp_mb__before_atomic(); | |
6b7e2d09 XG |
400 | return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE); |
401 | } | |
402 | ||
660c22c4 TY |
403 | /* |
404 | * Some of the bitops functions do not support too long bitmaps. | |
405 | * This number must be determined not to exceed such limits. | |
406 | */ | |
407 | #define KVM_MEM_MAX_NR_PAGES ((1UL << 31) - 1) | |
408 | ||
6aa8b732 AK |
409 | struct kvm_memory_slot { |
410 | gfn_t base_gfn; | |
411 | unsigned long npages; | |
6aa8b732 | 412 | unsigned long *dirty_bitmap; |
db3fe4eb | 413 | struct kvm_arch_memory_slot arch; |
8a7ae055 | 414 | unsigned long userspace_addr; |
6104f472 | 415 | u32 flags; |
1e702d9a | 416 | short id; |
9e9eb226 | 417 | u16 as_id; |
6aa8b732 AK |
418 | }; |
419 | ||
044c59c4 PX |
420 | static inline bool kvm_slot_dirty_track_enabled(struct kvm_memory_slot *slot) |
421 | { | |
422 | return slot->flags & KVM_MEM_LOG_DIRTY_PAGES; | |
423 | } | |
424 | ||
87bf6e7d TY |
425 | static inline unsigned long kvm_dirty_bitmap_bytes(struct kvm_memory_slot *memslot) |
426 | { | |
427 | return ALIGN(memslot->npages, BITS_PER_LONG) / 8; | |
428 | } | |
429 | ||
03133347 CI |
430 | static inline unsigned long *kvm_second_dirty_bitmap(struct kvm_memory_slot *memslot) |
431 | { | |
432 | unsigned long len = kvm_dirty_bitmap_bytes(memslot); | |
433 | ||
434 | return memslot->dirty_bitmap + len / sizeof(*memslot->dirty_bitmap); | |
435 | } | |
436 | ||
3c9bd400 JZ |
437 | #ifndef KVM_DIRTY_LOG_MANUAL_CAPS |
438 | #define KVM_DIRTY_LOG_MANUAL_CAPS KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE | |
439 | #endif | |
440 | ||
84223598 CH |
441 | struct kvm_s390_adapter_int { |
442 | u64 ind_addr; | |
443 | u64 summary_addr; | |
444 | u64 ind_offset; | |
445 | u32 summary_offset; | |
446 | u32 adapter_id; | |
447 | }; | |
448 | ||
5c919412 AS |
449 | struct kvm_hv_sint { |
450 | u32 vcpu; | |
451 | u32 sint; | |
452 | }; | |
453 | ||
399ec807 AK |
454 | struct kvm_kernel_irq_routing_entry { |
455 | u32 gsi; | |
5116d8f6 | 456 | u32 type; |
4925663a | 457 | int (*set)(struct kvm_kernel_irq_routing_entry *e, |
aa2fbe6d YZ |
458 | struct kvm *kvm, int irq_source_id, int level, |
459 | bool line_status); | |
399ec807 AK |
460 | union { |
461 | struct { | |
462 | unsigned irqchip; | |
463 | unsigned pin; | |
464 | } irqchip; | |
0455e72c EA |
465 | struct { |
466 | u32 address_lo; | |
467 | u32 address_hi; | |
468 | u32 data; | |
469 | u32 flags; | |
470 | u32 devid; | |
471 | } msi; | |
84223598 | 472 | struct kvm_s390_adapter_int adapter; |
5c919412 | 473 | struct kvm_hv_sint hv_sint; |
399ec807 | 474 | }; |
46e624b9 GN |
475 | struct hlist_node link; |
476 | }; | |
477 | ||
b053b2ae SR |
478 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
479 | struct kvm_irq_routing_table { | |
480 | int chip[KVM_NR_IRQCHIPS][KVM_IRQCHIP_NUM_PINS]; | |
481 | u32 nr_rt_entries; | |
482 | /* | |
483 | * Array indexed by gsi. Each entry contains list of irq chips | |
484 | * the gsi is connected to. | |
485 | */ | |
764e515f | 486 | struct hlist_head map[]; |
b053b2ae SR |
487 | }; |
488 | #endif | |
489 | ||
0743247f AW |
490 | #ifndef KVM_PRIVATE_MEM_SLOTS |
491 | #define KVM_PRIVATE_MEM_SLOTS 0 | |
492 | #endif | |
493 | ||
4fc096a9 VK |
494 | #define KVM_MEM_SLOTS_NUM SHRT_MAX |
495 | #define KVM_USER_MEM_SLOTS (KVM_MEM_SLOTS_NUM - KVM_PRIVATE_MEM_SLOTS) | |
93a5cef0 | 496 | |
f481b069 PB |
497 | #ifndef __KVM_VCPU_MULTIPLE_ADDRESS_SPACE |
498 | static inline int kvm_arch_vcpu_memslots_id(struct kvm_vcpu *vcpu) | |
499 | { | |
500 | return 0; | |
501 | } | |
502 | #endif | |
503 | ||
bf3e05bc XG |
504 | /* |
505 | * Note: | |
506 | * memslots are not sorted by id anymore, please use id_to_memslot() | |
507 | * to get the memslot by its id. | |
508 | */ | |
46a26bf5 | 509 | struct kvm_memslots { |
49c7754c | 510 | u64 generation; |
f85e2cb5 | 511 | /* The mapping table from slot id to the index in memslots[]. */ |
1e702d9a | 512 | short id_to_index[KVM_MEM_SLOTS_NUM]; |
d4ae84a0 | 513 | atomic_t lru_slot; |
9c1a5d38 | 514 | int used_slots; |
36947254 | 515 | struct kvm_memory_slot memslots[]; |
46a26bf5 MT |
516 | }; |
517 | ||
6aa8b732 | 518 | struct kvm { |
531810ca BG |
519 | #ifdef KVM_HAVE_MMU_RWLOCK |
520 | rwlock_t mmu_lock; | |
521 | #else | |
aaee2c94 | 522 | spinlock_t mmu_lock; |
531810ca BG |
523 | #endif /* KVM_HAVE_MMU_RWLOCK */ |
524 | ||
79fac95e | 525 | struct mutex slots_lock; |
6d4e4c4f | 526 | struct mm_struct *mm; /* userspace tied to this vm */ |
a80cf7b5 | 527 | struct kvm_memslots __rcu *memslots[KVM_ADDRESS_SPACE_NUM]; |
fb3f0f51 | 528 | struct kvm_vcpu *vcpus[KVM_MAX_VCPUS]; |
6c7caebc PB |
529 | |
530 | /* | |
531 | * created_vcpus is protected by kvm->lock, and is incremented | |
532 | * at the beginning of KVM_CREATE_VCPU. online_vcpus is only | |
533 | * incremented after storing the kvm_vcpu pointer in vcpus, | |
534 | * and is accessed atomically. | |
535 | */ | |
73880c80 | 536 | atomic_t online_vcpus; |
6c7caebc | 537 | int created_vcpus; |
217ece61 | 538 | int last_boosted_vcpu; |
133de902 | 539 | struct list_head vm_list; |
60eead79 | 540 | struct mutex lock; |
4a12f951 | 541 | struct kvm_io_bus __rcu *buses[KVM_NR_BUSES]; |
721eecbf GH |
542 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
543 | struct { | |
544 | spinlock_t lock; | |
545 | struct list_head items; | |
7a84428a AW |
546 | struct list_head resampler_list; |
547 | struct mutex resampler_lock; | |
721eecbf | 548 | } irqfds; |
d34e6b17 | 549 | struct list_head ioeventfds; |
721eecbf | 550 | #endif |
ba1389b7 | 551 | struct kvm_vm_stat stat; |
d69fb81f | 552 | struct kvm_arch arch; |
e3736c3e | 553 | refcount_t users_count; |
4b4357e0 | 554 | #ifdef CONFIG_KVM_MMIO |
5f94c174 | 555 | struct kvm_coalesced_mmio_ring *coalesced_mmio_ring; |
2b3c246a SL |
556 | spinlock_t ring_lock; |
557 | struct list_head coalesced_zones; | |
5f94c174 | 558 | #endif |
e930bffe | 559 | |
60eead79 | 560 | struct mutex irq_lock; |
75858a84 | 561 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
bd2b53b2 | 562 | /* |
9957c86d | 563 | * Update side is protected by irq_lock. |
bd2b53b2 | 564 | */ |
4b6a2872 | 565 | struct kvm_irq_routing_table __rcu *irq_routing; |
c77dcacb PB |
566 | #endif |
567 | #ifdef CONFIG_HAVE_KVM_IRQFD | |
136bdfee | 568 | struct hlist_head irq_ack_notifier_list; |
75858a84 AK |
569 | #endif |
570 | ||
36c1ed82 | 571 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
e930bffe AA |
572 | struct mmu_notifier mmu_notifier; |
573 | unsigned long mmu_notifier_seq; | |
574 | long mmu_notifier_count; | |
4a42d848 DS |
575 | unsigned long mmu_notifier_range_start; |
576 | unsigned long mmu_notifier_range_end; | |
e930bffe | 577 | #endif |
a086f6a1 | 578 | long tlbs_dirty; |
07f0a7bd | 579 | struct list_head devices; |
3c9bd400 | 580 | u64 manual_dirty_log_protect; |
536a6f88 JF |
581 | struct dentry *debugfs_dentry; |
582 | struct kvm_stat_data **debugfs_stat_data; | |
6ade8694 PM |
583 | struct srcu_struct srcu; |
584 | struct srcu_struct irq_srcu; | |
fdeaf7e3 | 585 | pid_t userspace_pid; |
acd05785 | 586 | unsigned int max_halt_poll_ns; |
fb04a1ed | 587 | u32 dirty_ring_size; |
6aa8b732 AK |
588 | }; |
589 | ||
a737f256 CD |
590 | #define kvm_err(fmt, ...) \ |
591 | pr_err("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
592 | #define kvm_info(fmt, ...) \ | |
593 | pr_info("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
594 | #define kvm_debug(fmt, ...) \ | |
595 | pr_debug("kvm [%i]: " fmt, task_pid_nr(current), ## __VA_ARGS__) | |
ae0f5499 BD |
596 | #define kvm_debug_ratelimited(fmt, ...) \ |
597 | pr_debug_ratelimited("kvm [%i]: " fmt, task_pid_nr(current), \ | |
598 | ## __VA_ARGS__) | |
a737f256 CD |
599 | #define kvm_pr_unimpl(fmt, ...) \ |
600 | pr_err_ratelimited("kvm [%i]: " fmt, \ | |
601 | task_tgid_nr(current), ## __VA_ARGS__) | |
f0242478 | 602 | |
a737f256 CD |
603 | /* The guest did something we don't support. */ |
604 | #define vcpu_unimpl(vcpu, fmt, ...) \ | |
671d9ab3 BP |
605 | kvm_pr_unimpl("vcpu%i, guest rIP: 0x%lx " fmt, \ |
606 | (vcpu)->vcpu_id, kvm_rip_read(vcpu), ## __VA_ARGS__) | |
6aa8b732 | 607 | |
ee86dbc6 AS |
608 | #define vcpu_debug(vcpu, fmt, ...) \ |
609 | kvm_debug("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | |
ae0f5499 BD |
610 | #define vcpu_debug_ratelimited(vcpu, fmt, ...) \ |
611 | kvm_debug_ratelimited("vcpu%i " fmt, (vcpu)->vcpu_id, \ | |
612 | ## __VA_ARGS__) | |
765eaa0f AS |
613 | #define vcpu_err(vcpu, fmt, ...) \ |
614 | kvm_err("vcpu%i " fmt, (vcpu)->vcpu_id, ## __VA_ARGS__) | |
ee86dbc6 | 615 | |
3c9bd400 JZ |
616 | static inline bool kvm_dirty_log_manual_protect_and_init_set(struct kvm *kvm) |
617 | { | |
618 | return !!(kvm->manual_dirty_log_protect & KVM_DIRTY_LOG_INITIALLY_SET); | |
619 | } | |
620 | ||
4a12f951 CB |
621 | static inline struct kvm_io_bus *kvm_get_bus(struct kvm *kvm, enum kvm_bus idx) |
622 | { | |
623 | return srcu_dereference_check(kvm->buses[idx], &kvm->srcu, | |
3898da94 PB |
624 | lockdep_is_held(&kvm->slots_lock) || |
625 | !refcount_read(&kvm->users_count)); | |
4a12f951 CB |
626 | } |
627 | ||
988a2cae GN |
628 | static inline struct kvm_vcpu *kvm_get_vcpu(struct kvm *kvm, int i) |
629 | { | |
1d487e9b PB |
630 | int num_vcpus = atomic_read(&kvm->online_vcpus); |
631 | i = array_index_nospec(i, num_vcpus); | |
632 | ||
633 | /* Pairs with smp_wmb() in kvm_vm_ioctl_create_vcpu. */ | |
988a2cae GN |
634 | smp_rmb(); |
635 | return kvm->vcpus[i]; | |
636 | } | |
637 | ||
638 | #define kvm_for_each_vcpu(idx, vcpup, kvm) \ | |
b42fc3cb JM |
639 | for (idx = 0; \ |
640 | idx < atomic_read(&kvm->online_vcpus) && \ | |
641 | (vcpup = kvm_get_vcpu(kvm, idx)) != NULL; \ | |
642 | idx++) | |
988a2cae | 643 | |
db27a7a3 DH |
644 | static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id) |
645 | { | |
9b9e3fc4 | 646 | struct kvm_vcpu *vcpu = NULL; |
db27a7a3 DH |
647 | int i; |
648 | ||
9b9e3fc4 | 649 | if (id < 0) |
c896939f | 650 | return NULL; |
9b9e3fc4 GK |
651 | if (id < KVM_MAX_VCPUS) |
652 | vcpu = kvm_get_vcpu(kvm, id); | |
c896939f DH |
653 | if (vcpu && vcpu->vcpu_id == id) |
654 | return vcpu; | |
db27a7a3 DH |
655 | kvm_for_each_vcpu(i, vcpu, kvm) |
656 | if (vcpu->vcpu_id == id) | |
657 | return vcpu; | |
658 | return NULL; | |
659 | } | |
660 | ||
497d72d8 CD |
661 | static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu) |
662 | { | |
8750e72a | 663 | return vcpu->vcpu_idx; |
497d72d8 CD |
664 | } |
665 | ||
0577d1ab SC |
666 | #define kvm_for_each_memslot(memslot, slots) \ |
667 | for (memslot = &slots->memslots[0]; \ | |
668 | memslot < slots->memslots + slots->used_slots; memslot++) \ | |
669 | if (WARN_ON_ONCE(!memslot->npages)) { \ | |
670 | } else | |
be6ba0f0 | 671 | |
4543bdc0 | 672 | void kvm_vcpu_destroy(struct kvm_vcpu *vcpu); |
fb3f0f51 | 673 | |
ec7660cc | 674 | void vcpu_load(struct kvm_vcpu *vcpu); |
313a3dc7 CO |
675 | void vcpu_put(struct kvm_vcpu *vcpu); |
676 | ||
6ef768fa | 677 | #ifdef __KVM_HAVE_IOAPIC |
993225ad | 678 | void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm); |
abdb080f | 679 | void kvm_arch_post_irq_routing_update(struct kvm *kvm); |
6ef768fa | 680 | #else |
993225ad | 681 | static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm) |
6ef768fa PB |
682 | { |
683 | } | |
abdb080f | 684 | static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm) |
b053b2ae SR |
685 | { |
686 | } | |
6ef768fa PB |
687 | #endif |
688 | ||
297e2105 | 689 | #ifdef CONFIG_HAVE_KVM_IRQFD |
a0f155e9 CH |
690 | int kvm_irqfd_init(void); |
691 | void kvm_irqfd_exit(void); | |
692 | #else | |
693 | static inline int kvm_irqfd_init(void) | |
694 | { | |
695 | return 0; | |
696 | } | |
697 | ||
698 | static inline void kvm_irqfd_exit(void) | |
699 | { | |
700 | } | |
701 | #endif | |
0ee75bea | 702 | int kvm_init(void *opaque, unsigned vcpu_size, unsigned vcpu_align, |
c16f862d | 703 | struct module *module); |
cb498ea2 | 704 | void kvm_exit(void); |
6aa8b732 | 705 | |
d39f13b0 IE |
706 | void kvm_get_kvm(struct kvm *kvm); |
707 | void kvm_put_kvm(struct kvm *kvm); | |
54526d1f | 708 | bool file_is_kvm(struct file *file); |
149487bd | 709 | void kvm_put_kvm_no_destroy(struct kvm *kvm); |
d39f13b0 | 710 | |
f481b069 | 711 | static inline struct kvm_memslots *__kvm_memslots(struct kvm *kvm, int as_id) |
90d83dc3 | 712 | { |
1d487e9b | 713 | as_id = array_index_nospec(as_id, KVM_ADDRESS_SPACE_NUM); |
7e988b10 | 714 | return srcu_dereference_check(kvm->memslots[as_id], &kvm->srcu, |
3898da94 PB |
715 | lockdep_is_held(&kvm->slots_lock) || |
716 | !refcount_read(&kvm->users_count)); | |
90d83dc3 LJ |
717 | } |
718 | ||
f481b069 PB |
719 | static inline struct kvm_memslots *kvm_memslots(struct kvm *kvm) |
720 | { | |
721 | return __kvm_memslots(kvm, 0); | |
722 | } | |
723 | ||
8e73485c PB |
724 | static inline struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu) |
725 | { | |
f481b069 PB |
726 | int as_id = kvm_arch_vcpu_memslots_id(vcpu); |
727 | ||
728 | return __kvm_memslots(vcpu->kvm, as_id); | |
8e73485c PB |
729 | } |
730 | ||
0577d1ab SC |
731 | static inline |
732 | struct kvm_memory_slot *id_to_memslot(struct kvm_memslots *slots, int id) | |
28a37544 | 733 | { |
f85e2cb5 XG |
734 | int index = slots->id_to_index[id]; |
735 | struct kvm_memory_slot *slot; | |
bf3e05bc | 736 | |
0577d1ab SC |
737 | if (index < 0) |
738 | return NULL; | |
739 | ||
f85e2cb5 | 740 | slot = &slots->memslots[index]; |
bf3e05bc | 741 | |
f85e2cb5 XG |
742 | WARN_ON(slot->id != id); |
743 | return slot; | |
28a37544 XG |
744 | } |
745 | ||
74d0727c TY |
746 | /* |
747 | * KVM_SET_USER_MEMORY_REGION ioctl allows the following operations: | |
748 | * - create a new memory slot | |
749 | * - delete an existing memory slot | |
750 | * - modify an existing memory slot | |
751 | * -- move it in the guest physical memory space | |
752 | * -- just change its flags | |
753 | * | |
754 | * Since flags can be changed by some of these operations, the following | |
755 | * differentiation is the best we can do for __kvm_set_memory_region(): | |
756 | */ | |
757 | enum kvm_mr_change { | |
758 | KVM_MR_CREATE, | |
759 | KVM_MR_DELETE, | |
760 | KVM_MR_MOVE, | |
761 | KVM_MR_FLAGS_ONLY, | |
762 | }; | |
763 | ||
210c7c4d | 764 | int kvm_set_memory_region(struct kvm *kvm, |
09170a49 | 765 | const struct kvm_userspace_memory_region *mem); |
f78e0e2e | 766 | int __kvm_set_memory_region(struct kvm *kvm, |
09170a49 | 767 | const struct kvm_userspace_memory_region *mem); |
e96c81ee | 768 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *slot); |
15248258 | 769 | void kvm_arch_memslots_updated(struct kvm *kvm, u64 gen); |
f7784b8e MT |
770 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
771 | struct kvm_memory_slot *memslot, | |
09170a49 | 772 | const struct kvm_userspace_memory_region *mem, |
7b6195a9 | 773 | enum kvm_mr_change change); |
f7784b8e | 774 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a49 | 775 | const struct kvm_userspace_memory_region *mem, |
9d4c197c | 776 | struct kvm_memory_slot *old, |
f36f3f28 | 777 | const struct kvm_memory_slot *new, |
8482644a | 778 | enum kvm_mr_change change); |
2df72e9b MT |
779 | /* flush all memory translations */ |
780 | void kvm_arch_flush_shadow_all(struct kvm *kvm); | |
781 | /* flush memory translations pointing to 'slot' */ | |
782 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
783 | struct kvm_memory_slot *slot); | |
a983fb23 | 784 | |
d9ef13c2 PB |
785 | int gfn_to_page_many_atomic(struct kvm_memory_slot *slot, gfn_t gfn, |
786 | struct page **pages, int nr_pages); | |
48987781 | 787 | |
954bbbc2 | 788 | struct page *gfn_to_page(struct kvm *kvm, gfn_t gfn); |
05da4558 | 789 | unsigned long gfn_to_hva(struct kvm *kvm, gfn_t gfn); |
ba6a3541 | 790 | unsigned long gfn_to_hva_prot(struct kvm *kvm, gfn_t gfn, bool *writable); |
4d8b81ab | 791 | unsigned long gfn_to_hva_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
64d83126 CD |
792 | unsigned long gfn_to_hva_memslot_prot(struct kvm_memory_slot *slot, gfn_t gfn, |
793 | bool *writable); | |
b4231d61 IE |
794 | void kvm_release_page_clean(struct page *page); |
795 | void kvm_release_page_dirty(struct page *page); | |
35149e21 AL |
796 | void kvm_set_page_accessed(struct page *page); |
797 | ||
ba049e93 DW |
798 | kvm_pfn_t gfn_to_pfn(struct kvm *kvm, gfn_t gfn); |
799 | kvm_pfn_t gfn_to_pfn_prot(struct kvm *kvm, gfn_t gfn, bool write_fault, | |
612819c3 | 800 | bool *writable); |
ba049e93 DW |
801 | kvm_pfn_t gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn); |
802 | kvm_pfn_t gfn_to_pfn_memslot_atomic(struct kvm_memory_slot *slot, gfn_t gfn); | |
803 | kvm_pfn_t __gfn_to_pfn_memslot(struct kvm_memory_slot *slot, gfn_t gfn, | |
804 | bool atomic, bool *async, bool write_fault, | |
4a42d848 | 805 | bool *writable, hva_t *hva); |
037d92dc | 806 | |
ba049e93 | 807 | void kvm_release_pfn_clean(kvm_pfn_t pfn); |
f7a6509f | 808 | void kvm_release_pfn_dirty(kvm_pfn_t pfn); |
ba049e93 DW |
809 | void kvm_set_pfn_dirty(kvm_pfn_t pfn); |
810 | void kvm_set_pfn_accessed(kvm_pfn_t pfn); | |
811 | void kvm_get_pfn(kvm_pfn_t pfn); | |
35149e21 | 812 | |
91724814 | 813 | void kvm_release_pfn(kvm_pfn_t pfn, bool dirty, struct gfn_to_pfn_cache *cache); |
195aefde IE |
814 | int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset, |
815 | int len); | |
816 | int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len); | |
4e335d9e PB |
817 | int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
818 | void *data, unsigned long len); | |
0958f0ce VK |
819 | int kvm_read_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
820 | void *data, unsigned int offset, | |
821 | unsigned long len); | |
195aefde IE |
822 | int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data, |
823 | int offset, int len); | |
824 | int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data, | |
825 | unsigned long len); | |
4e335d9e PB |
826 | int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
827 | void *data, unsigned long len); | |
828 | int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc, | |
7a86dab8 JM |
829 | void *data, unsigned int offset, |
830 | unsigned long len); | |
4e335d9e PB |
831 | int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc, |
832 | gpa_t gpa, unsigned long len); | |
cac0f1b7 | 833 | |
53f98558 AJ |
834 | #define __kvm_get_guest(kvm, gfn, offset, v) \ |
835 | ({ \ | |
836 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ | |
837 | typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ | |
838 | int __ret = -EFAULT; \ | |
839 | \ | |
840 | if (!kvm_is_error_hva(__addr)) \ | |
841 | __ret = get_user(v, __uaddr); \ | |
842 | __ret; \ | |
843 | }) | |
844 | ||
845 | #define kvm_get_guest(kvm, gpa, v) \ | |
846 | ({ \ | |
847 | gpa_t __gpa = gpa; \ | |
848 | struct kvm *__kvm = kvm; \ | |
849 | \ | |
850 | __kvm_get_guest(__kvm, __gpa >> PAGE_SHIFT, \ | |
851 | offset_in_page(__gpa), v); \ | |
852 | }) | |
853 | ||
4d2d4ce0 | 854 | #define __kvm_put_guest(kvm, gfn, offset, v) \ |
cac0f1b7 SP |
855 | ({ \ |
856 | unsigned long __addr = gfn_to_hva(kvm, gfn); \ | |
4d2d4ce0 | 857 | typeof(v) __user *__uaddr = (typeof(__uaddr))(__addr + offset); \ |
cac0f1b7 SP |
858 | int __ret = -EFAULT; \ |
859 | \ | |
860 | if (!kvm_is_error_hva(__addr)) \ | |
4d2d4ce0 | 861 | __ret = put_user(v, __uaddr); \ |
cac0f1b7 SP |
862 | if (!__ret) \ |
863 | mark_page_dirty(kvm, gfn); \ | |
864 | __ret; \ | |
865 | }) | |
866 | ||
4d2d4ce0 | 867 | #define kvm_put_guest(kvm, gpa, v) \ |
cac0f1b7 SP |
868 | ({ \ |
869 | gpa_t __gpa = gpa; \ | |
870 | struct kvm *__kvm = kvm; \ | |
4d2d4ce0 | 871 | \ |
cac0f1b7 | 872 | __kvm_put_guest(__kvm, __gpa >> PAGE_SHIFT, \ |
4d2d4ce0 | 873 | offset_in_page(__gpa), v); \ |
cac0f1b7 SP |
874 | }) |
875 | ||
195aefde | 876 | int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len); |
6aa8b732 | 877 | struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn); |
33e94154 | 878 | bool kvm_is_visible_gfn(struct kvm *kvm, gfn_t gfn); |
995decb6 | 879 | bool kvm_vcpu_is_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn); |
f9b84e19 | 880 | unsigned long kvm_host_page_size(struct kvm_vcpu *vcpu, gfn_t gfn); |
28bd726a | 881 | void mark_page_dirty_in_slot(struct kvm *kvm, struct kvm_memory_slot *memslot, gfn_t gfn); |
6aa8b732 AK |
882 | void mark_page_dirty(struct kvm *kvm, gfn_t gfn); |
883 | ||
8e73485c PB |
884 | struct kvm_memslots *kvm_vcpu_memslots(struct kvm_vcpu *vcpu); |
885 | struct kvm_memory_slot *kvm_vcpu_gfn_to_memslot(struct kvm_vcpu *vcpu, gfn_t gfn); | |
ba049e93 DW |
886 | kvm_pfn_t kvm_vcpu_gfn_to_pfn_atomic(struct kvm_vcpu *vcpu, gfn_t gfn); |
887 | kvm_pfn_t kvm_vcpu_gfn_to_pfn(struct kvm_vcpu *vcpu, gfn_t gfn); | |
e45adf66 | 888 | int kvm_vcpu_map(struct kvm_vcpu *vcpu, gpa_t gpa, struct kvm_host_map *map); |
91724814 BO |
889 | int kvm_map_gfn(struct kvm_vcpu *vcpu, gfn_t gfn, struct kvm_host_map *map, |
890 | struct gfn_to_pfn_cache *cache, bool atomic); | |
8e73485c | 891 | struct page *kvm_vcpu_gfn_to_page(struct kvm_vcpu *vcpu, gfn_t gfn); |
e45adf66 | 892 | void kvm_vcpu_unmap(struct kvm_vcpu *vcpu, struct kvm_host_map *map, bool dirty); |
91724814 BO |
893 | int kvm_unmap_gfn(struct kvm_vcpu *vcpu, struct kvm_host_map *map, |
894 | struct gfn_to_pfn_cache *cache, bool dirty, bool atomic); | |
8e73485c PB |
895 | unsigned long kvm_vcpu_gfn_to_hva(struct kvm_vcpu *vcpu, gfn_t gfn); |
896 | unsigned long kvm_vcpu_gfn_to_hva_prot(struct kvm_vcpu *vcpu, gfn_t gfn, bool *writable); | |
897 | int kvm_vcpu_read_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, void *data, int offset, | |
898 | int len); | |
899 | int kvm_vcpu_read_guest_atomic(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | |
900 | unsigned long len); | |
901 | int kvm_vcpu_read_guest(struct kvm_vcpu *vcpu, gpa_t gpa, void *data, | |
902 | unsigned long len); | |
903 | int kvm_vcpu_write_guest_page(struct kvm_vcpu *vcpu, gfn_t gfn, const void *data, | |
904 | int offset, int len); | |
905 | int kvm_vcpu_write_guest(struct kvm_vcpu *vcpu, gpa_t gpa, const void *data, | |
906 | unsigned long len); | |
907 | void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn); | |
908 | ||
20b7035c JS |
909 | void kvm_sigset_activate(struct kvm_vcpu *vcpu); |
910 | void kvm_sigset_deactivate(struct kvm_vcpu *vcpu); | |
911 | ||
8776e519 | 912 | void kvm_vcpu_block(struct kvm_vcpu *vcpu); |
3217f7c2 CD |
913 | void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu); |
914 | void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu); | |
178f02ff | 915 | bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu); |
b6d33834 | 916 | void kvm_vcpu_kick(struct kvm_vcpu *vcpu); |
fa93384f | 917 | int kvm_vcpu_yield_to(struct kvm_vcpu *target); |
199b5763 | 918 | void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu, bool usermode_vcpu_not_eligible); |
a4ee1ca4 | 919 | |
d9e368d6 | 920 | void kvm_flush_remote_tlbs(struct kvm *kvm); |
2e53d63a | 921 | void kvm_reload_remote_mmus(struct kvm *kvm); |
7053df4e | 922 | |
6926f95a SC |
923 | #ifdef KVM_ARCH_NR_OBJS_PER_MEMORY_CACHE |
924 | int kvm_mmu_topup_memory_cache(struct kvm_mmu_memory_cache *mc, int min); | |
925 | int kvm_mmu_memory_cache_nr_free_objects(struct kvm_mmu_memory_cache *mc); | |
926 | void kvm_mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc); | |
927 | void *kvm_mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc); | |
928 | #endif | |
929 | ||
7053df4e | 930 | bool kvm_make_vcpus_request_mask(struct kvm *kvm, unsigned int req, |
54163a34 | 931 | struct kvm_vcpu *except, |
7053df4e | 932 | unsigned long *vcpu_bitmap, cpumask_var_t tmp); |
445b8236 | 933 | bool kvm_make_all_cpus_request(struct kvm *kvm, unsigned int req); |
54163a34 SS |
934 | bool kvm_make_all_cpus_request_except(struct kvm *kvm, unsigned int req, |
935 | struct kvm_vcpu *except); | |
7ee30bc1 NNL |
936 | bool kvm_make_cpus_request_mask(struct kvm *kvm, unsigned int req, |
937 | unsigned long *vcpu_bitmap); | |
6aa8b732 | 938 | |
043405e1 CO |
939 | long kvm_arch_dev_ioctl(struct file *filp, |
940 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 CO |
941 | long kvm_arch_vcpu_ioctl(struct file *filp, |
942 | unsigned int ioctl, unsigned long arg); | |
1499fa80 | 943 | vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf); |
018d00d2 | 944 | |
784aa3d7 | 945 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext); |
018d00d2 | 946 | |
3b0f1d01 | 947 | void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm, |
ba0513b5 MS |
948 | struct kvm_memory_slot *slot, |
949 | gfn_t gfn_offset, | |
950 | unsigned long mask); | |
0dff0846 SC |
951 | void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot); |
952 | ||
953 | #ifdef CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT | |
954 | void kvm_arch_flush_remote_tlbs_memslot(struct kvm *kvm, | |
6c9dd6d2 | 955 | const struct kvm_memory_slot *memslot); |
0dff0846 SC |
956 | #else /* !CONFIG_KVM_GENERIC_DIRTYLOG_READ_PROTECT */ |
957 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log); | |
958 | int kvm_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log, | |
2a49f61d | 959 | int *is_dirty, struct kvm_memory_slot **memslot); |
0dff0846 | 960 | #endif |
5bb064dc | 961 | |
aa2fbe6d YZ |
962 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level, |
963 | bool line_status); | |
e5d83c74 PB |
964 | int kvm_vm_ioctl_enable_cap(struct kvm *kvm, |
965 | struct kvm_enable_cap *cap); | |
1fe779f8 CO |
966 | long kvm_arch_vm_ioctl(struct file *filp, |
967 | unsigned int ioctl, unsigned long arg); | |
313a3dc7 | 968 | |
d0752060 HB |
969 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); |
970 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu); | |
971 | ||
8b006791 ZX |
972 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
973 | struct kvm_translation *tr); | |
974 | ||
b6c7a5dc HB |
975 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); |
976 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs); | |
977 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, | |
978 | struct kvm_sregs *sregs); | |
979 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
980 | struct kvm_sregs *sregs); | |
62d9f0db MT |
981 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
982 | struct kvm_mp_state *mp_state); | |
983 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
984 | struct kvm_mp_state *mp_state); | |
d0bfb940 JK |
985 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
986 | struct kvm_guest_debug *dbg); | |
1b94f6f8 | 987 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu); |
b6c7a5dc | 988 | |
f8c16bba ZX |
989 | int kvm_arch_init(void *opaque); |
990 | void kvm_arch_exit(void); | |
043405e1 | 991 | |
e790d9ef RK |
992 | void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu); |
993 | ||
e9b11c17 ZX |
994 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu); |
995 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu); | |
897cc38e | 996 | int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id); |
e529ef66 | 997 | int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu); |
31928aa5 | 998 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu); |
d40ccc62 | 999 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu); |
e9b11c17 | 1000 | |
741cbbae | 1001 | #ifdef __KVM_HAVE_ARCH_VCPU_DEBUGFS |
d56f5136 | 1002 | void kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu, struct dentry *debugfs_dentry); |
741cbbae | 1003 | #endif |
235539b4 | 1004 | |
13a34e06 RK |
1005 | int kvm_arch_hardware_enable(void); |
1006 | void kvm_arch_hardware_disable(void); | |
b9904085 | 1007 | int kvm_arch_hardware_setup(void *opaque); |
e9b11c17 | 1008 | void kvm_arch_hardware_unsetup(void); |
b9904085 | 1009 | int kvm_arch_check_processor_compat(void *opaque); |
1d737c8a | 1010 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu); |
199b5763 | 1011 | bool kvm_arch_vcpu_in_kernel(struct kvm_vcpu *vcpu); |
b6d33834 | 1012 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu); |
17e433b5 | 1013 | bool kvm_arch_dy_runnable(struct kvm_vcpu *vcpu); |
52acd22f | 1014 | bool kvm_arch_dy_has_pending_interrupt(struct kvm_vcpu *vcpu); |
d970a325 PB |
1015 | int kvm_arch_post_init_vm(struct kvm *kvm); |
1016 | void kvm_arch_pre_destroy_vm(struct kvm *kvm); | |
e9b11c17 | 1017 | |
d89f5eff | 1018 | #ifndef __KVM_HAVE_ARCH_VM_ALLOC |
d1e5b0e9 MO |
1019 | /* |
1020 | * All architectures that want to use vzalloc currently also | |
1021 | * need their own kvm_arch_alloc_vm implementation. | |
1022 | */ | |
d89f5eff JK |
1023 | static inline struct kvm *kvm_arch_alloc_vm(void) |
1024 | { | |
1025 | return kzalloc(sizeof(struct kvm), GFP_KERNEL); | |
1026 | } | |
1027 | ||
1028 | static inline void kvm_arch_free_vm(struct kvm *kvm) | |
1029 | { | |
1030 | kfree(kvm); | |
1031 | } | |
1032 | #endif | |
1033 | ||
b08660e5 TL |
1034 | #ifndef __KVM_HAVE_ARCH_FLUSH_REMOTE_TLB |
1035 | static inline int kvm_arch_flush_remote_tlb(struct kvm *kvm) | |
1036 | { | |
1037 | return -ENOTSUPP; | |
1038 | } | |
1039 | #endif | |
1040 | ||
e0f0bbc5 AW |
1041 | #ifdef __KVM_HAVE_ARCH_NONCOHERENT_DMA |
1042 | void kvm_arch_register_noncoherent_dma(struct kvm *kvm); | |
1043 | void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm); | |
1044 | bool kvm_arch_has_noncoherent_dma(struct kvm *kvm); | |
1045 | #else | |
1046 | static inline void kvm_arch_register_noncoherent_dma(struct kvm *kvm) | |
1047 | { | |
1048 | } | |
1049 | ||
1050 | static inline void kvm_arch_unregister_noncoherent_dma(struct kvm *kvm) | |
1051 | { | |
1052 | } | |
1053 | ||
1054 | static inline bool kvm_arch_has_noncoherent_dma(struct kvm *kvm) | |
1055 | { | |
1056 | return false; | |
1057 | } | |
1058 | #endif | |
5544eb9b PB |
1059 | #ifdef __KVM_HAVE_ARCH_ASSIGNED_DEVICE |
1060 | void kvm_arch_start_assignment(struct kvm *kvm); | |
1061 | void kvm_arch_end_assignment(struct kvm *kvm); | |
1062 | bool kvm_arch_has_assigned_device(struct kvm *kvm); | |
1063 | #else | |
1064 | static inline void kvm_arch_start_assignment(struct kvm *kvm) | |
1065 | { | |
1066 | } | |
1067 | ||
1068 | static inline void kvm_arch_end_assignment(struct kvm *kvm) | |
1069 | { | |
1070 | } | |
1071 | ||
1072 | static inline bool kvm_arch_has_assigned_device(struct kvm *kvm) | |
1073 | { | |
1074 | return false; | |
1075 | } | |
1076 | #endif | |
e0f0bbc5 | 1077 | |
da4ad88c | 1078 | static inline struct rcuwait *kvm_arch_vcpu_get_wait(struct kvm_vcpu *vcpu) |
b6d33834 | 1079 | { |
2246f8b5 | 1080 | #ifdef __KVM_HAVE_ARCH_WQP |
da4ad88c | 1081 | return vcpu->arch.waitp; |
2246f8b5 | 1082 | #else |
da4ad88c | 1083 | return &vcpu->wait; |
b6d33834 | 1084 | #endif |
2246f8b5 | 1085 | } |
b6d33834 | 1086 | |
01c94e64 EA |
1087 | #ifdef __KVM_HAVE_ARCH_INTC_INITIALIZED |
1088 | /* | |
1089 | * returns true if the virtual interrupt controller is initialized and | |
1090 | * ready to accept virtual IRQ. On some architectures the virtual interrupt | |
1091 | * controller is dynamically instantiated and this is not always true. | |
1092 | */ | |
1093 | bool kvm_arch_intc_initialized(struct kvm *kvm); | |
1094 | #else | |
1095 | static inline bool kvm_arch_intc_initialized(struct kvm *kvm) | |
1096 | { | |
1097 | return true; | |
1098 | } | |
1099 | #endif | |
1100 | ||
e08b9637 | 1101 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type); |
d19a9cd2 | 1102 | void kvm_arch_destroy_vm(struct kvm *kvm); |
ad8ba2cd | 1103 | void kvm_arch_sync_events(struct kvm *kvm); |
e9b11c17 | 1104 | |
3d80840d | 1105 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu); |
682c59a3 | 1106 | |
ba049e93 | 1107 | bool kvm_is_reserved_pfn(kvm_pfn_t pfn); |
a78986aa | 1108 | bool kvm_is_zone_device_pfn(kvm_pfn_t pfn); |
005ba37c | 1109 | bool kvm_is_transparent_hugepage(kvm_pfn_t pfn); |
c77fb9dc | 1110 | |
62c476c7 BAY |
1111 | struct kvm_irq_ack_notifier { |
1112 | struct hlist_node link; | |
1113 | unsigned gsi; | |
1114 | void (*irq_acked)(struct kvm_irq_ack_notifier *kian); | |
1115 | }; | |
1116 | ||
9957c86d PM |
1117 | int kvm_irq_map_gsi(struct kvm *kvm, |
1118 | struct kvm_kernel_irq_routing_entry *entries, int gsi); | |
1119 | int kvm_irq_map_chip_pin(struct kvm *kvm, unsigned irqchip, unsigned pin); | |
8ba918d4 | 1120 | |
aa2fbe6d YZ |
1121 | int kvm_set_irq(struct kvm *kvm, int irq_source_id, u32 irq, int level, |
1122 | bool line_status); | |
bd2b53b2 | 1123 | int kvm_set_msi(struct kvm_kernel_irq_routing_entry *irq_entry, struct kvm *kvm, |
aa2fbe6d | 1124 | int irq_source_id, int level, bool line_status); |
b97e6de9 PB |
1125 | int kvm_arch_set_irq_inatomic(struct kvm_kernel_irq_routing_entry *e, |
1126 | struct kvm *kvm, int irq_source_id, | |
1127 | int level, bool line_status); | |
c7c9c56c | 1128 | bool kvm_irq_has_notifier(struct kvm *kvm, unsigned irqchip, unsigned pin); |
ba1aefcd | 1129 | void kvm_notify_acked_gsi(struct kvm *kvm, int gsi); |
44882eed | 1130 | void kvm_notify_acked_irq(struct kvm *kvm, unsigned irqchip, unsigned pin); |
3de42dc0 XZ |
1131 | void kvm_register_irq_ack_notifier(struct kvm *kvm, |
1132 | struct kvm_irq_ack_notifier *kian); | |
fa40a821 MT |
1133 | void kvm_unregister_irq_ack_notifier(struct kvm *kvm, |
1134 | struct kvm_irq_ack_notifier *kian); | |
5550af4d SY |
1135 | int kvm_request_irq_source_id(struct kvm *kvm); |
1136 | void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id); | |
cdc238eb | 1137 | bool kvm_arch_irqfd_allowed(struct kvm *kvm, struct kvm_irqfd *args); |
62c476c7 | 1138 | |
9d4cba7f PM |
1139 | /* |
1140 | * search_memslots() and __gfn_to_memslot() are here because they are | |
1141 | * used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c. | |
1142 | * gfn_to_memslot() itself isn't here as an inline because that would | |
1143 | * bloat other code too much. | |
0577d1ab SC |
1144 | * |
1145 | * IMPORTANT: Slots are sorted from highest GFN to lowest GFN! | |
9d4cba7f PM |
1146 | */ |
1147 | static inline struct kvm_memory_slot * | |
1148 | search_memslots(struct kvm_memslots *slots, gfn_t gfn) | |
1149 | { | |
9c1a5d38 | 1150 | int start = 0, end = slots->used_slots; |
d4ae84a0 | 1151 | int slot = atomic_read(&slots->lru_slot); |
9c1a5d38 IM |
1152 | struct kvm_memory_slot *memslots = slots->memslots; |
1153 | ||
0774a964 SC |
1154 | if (unlikely(!slots->used_slots)) |
1155 | return NULL; | |
1156 | ||
9c1a5d38 IM |
1157 | if (gfn >= memslots[slot].base_gfn && |
1158 | gfn < memslots[slot].base_gfn + memslots[slot].npages) | |
1159 | return &memslots[slot]; | |
1160 | ||
1161 | while (start < end) { | |
1162 | slot = start + (end - start) / 2; | |
1163 | ||
1164 | if (gfn >= memslots[slot].base_gfn) | |
1165 | end = slot; | |
1166 | else | |
1167 | start = slot + 1; | |
1168 | } | |
1169 | ||
b6467ab1 | 1170 | if (start < slots->used_slots && gfn >= memslots[start].base_gfn && |
9c1a5d38 IM |
1171 | gfn < memslots[start].base_gfn + memslots[start].npages) { |
1172 | atomic_set(&slots->lru_slot, start); | |
1173 | return &memslots[start]; | |
1174 | } | |
9d4cba7f PM |
1175 | |
1176 | return NULL; | |
1177 | } | |
1178 | ||
1179 | static inline struct kvm_memory_slot * | |
1180 | __gfn_to_memslot(struct kvm_memslots *slots, gfn_t gfn) | |
1181 | { | |
1182 | return search_memslots(slots, gfn); | |
1183 | } | |
1184 | ||
66a03505 | 1185 | static inline unsigned long |
8ca6f063 | 1186 | __gfn_to_hva_memslot(const struct kvm_memory_slot *slot, gfn_t gfn) |
66a03505 | 1187 | { |
da27a83f PB |
1188 | /* |
1189 | * The index was checked originally in search_memslots. To avoid | |
1190 | * that a malicious guest builds a Spectre gadget out of e.g. page | |
1191 | * table walks, do not let the processor speculate loads outside | |
1192 | * the guest's registered memslots. | |
1193 | */ | |
4422829e PB |
1194 | unsigned long offset = gfn - slot->base_gfn; |
1195 | offset = array_index_nospec(offset, slot->npages); | |
da27a83f | 1196 | return slot->userspace_addr + offset * PAGE_SIZE; |
66a03505 GS |
1197 | } |
1198 | ||
0ee8dcb8 XG |
1199 | static inline int memslot_id(struct kvm *kvm, gfn_t gfn) |
1200 | { | |
1201 | return gfn_to_memslot(kvm, gfn)->id; | |
1202 | } | |
1203 | ||
d19a748b TY |
1204 | static inline gfn_t |
1205 | hva_to_gfn_memslot(unsigned long hva, struct kvm_memory_slot *slot) | |
887c08ac | 1206 | { |
d19a748b TY |
1207 | gfn_t gfn_offset = (hva - slot->userspace_addr) >> PAGE_SHIFT; |
1208 | ||
1209 | return slot->base_gfn + gfn_offset; | |
887c08ac XG |
1210 | } |
1211 | ||
1755fbcc AK |
1212 | static inline gpa_t gfn_to_gpa(gfn_t gfn) |
1213 | { | |
1214 | return (gpa_t)gfn << PAGE_SHIFT; | |
1215 | } | |
6aa8b732 | 1216 | |
c30a358d JR |
1217 | static inline gfn_t gpa_to_gfn(gpa_t gpa) |
1218 | { | |
1219 | return (gfn_t)(gpa >> PAGE_SHIFT); | |
1220 | } | |
1221 | ||
ba049e93 | 1222 | static inline hpa_t pfn_to_hpa(kvm_pfn_t pfn) |
62c476c7 BAY |
1223 | { |
1224 | return (hpa_t)pfn << PAGE_SHIFT; | |
1225 | } | |
1226 | ||
5e2f30b7 DH |
1227 | static inline struct page *kvm_vcpu_gpa_to_page(struct kvm_vcpu *vcpu, |
1228 | gpa_t gpa) | |
1229 | { | |
1230 | return kvm_vcpu_gfn_to_page(vcpu, gpa_to_gfn(gpa)); | |
1231 | } | |
1232 | ||
dfeec843 HC |
1233 | static inline bool kvm_is_error_gpa(struct kvm *kvm, gpa_t gpa) |
1234 | { | |
1235 | unsigned long hva = gfn_to_hva(kvm, gpa_to_gfn(gpa)); | |
1236 | ||
1237 | return kvm_is_error_hva(hva); | |
1238 | } | |
1239 | ||
ba1389b7 AK |
1240 | enum kvm_stat_kind { |
1241 | KVM_STAT_VM, | |
1242 | KVM_STAT_VCPU, | |
1243 | }; | |
1244 | ||
536a6f88 | 1245 | struct kvm_stat_data { |
536a6f88 | 1246 | struct kvm *kvm; |
09cbcef6 | 1247 | struct kvm_stats_debugfs_item *dbgfs_item; |
536a6f88 JF |
1248 | }; |
1249 | ||
417bc304 HB |
1250 | struct kvm_stats_debugfs_item { |
1251 | const char *name; | |
1252 | int offset; | |
ba1389b7 | 1253 | enum kvm_stat_kind kind; |
833b45de | 1254 | int mode; |
417bc304 | 1255 | }; |
09cbcef6 MP |
1256 | |
1257 | #define KVM_DBGFS_GET_MODE(dbgfs_item) \ | |
1258 | ((dbgfs_item)->mode ? (dbgfs_item)->mode : 0644) | |
1259 | ||
812756a8 EGE |
1260 | #define VM_STAT(n, x, ...) \ |
1261 | { n, offsetof(struct kvm, stat.x), KVM_STAT_VM, ## __VA_ARGS__ } | |
1262 | #define VCPU_STAT(n, x, ...) \ | |
1263 | { n, offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU, ## __VA_ARGS__ } | |
1264 | ||
417bc304 | 1265 | extern struct kvm_stats_debugfs_item debugfs_entries[]; |
76f7c879 | 1266 | extern struct dentry *kvm_debugfs_dir; |
d4c9ff2d | 1267 | |
36c1ed82 | 1268 | #if defined(CONFIG_MMU_NOTIFIER) && defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
8ca40a70 | 1269 | static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq) |
e930bffe | 1270 | { |
8ca40a70 | 1271 | if (unlikely(kvm->mmu_notifier_count)) |
e930bffe AA |
1272 | return 1; |
1273 | /* | |
a355aa54 PM |
1274 | * Ensure the read of mmu_notifier_count happens before the read |
1275 | * of mmu_notifier_seq. This interacts with the smp_wmb() in | |
1276 | * mmu_notifier_invalidate_range_end to make sure that the caller | |
1277 | * either sees the old (non-zero) value of mmu_notifier_count or | |
1278 | * the new (incremented) value of mmu_notifier_seq. | |
1279 | * PowerPC Book3s HV KVM calls this under a per-page lock | |
1280 | * rather than under kvm->mmu_lock, for scalability, so | |
1281 | * can't rely on kvm->mmu_lock to keep things ordered. | |
e930bffe | 1282 | */ |
a355aa54 | 1283 | smp_rmb(); |
8ca40a70 | 1284 | if (kvm->mmu_notifier_seq != mmu_seq) |
e930bffe AA |
1285 | return 1; |
1286 | return 0; | |
1287 | } | |
4a42d848 DS |
1288 | |
1289 | static inline int mmu_notifier_retry_hva(struct kvm *kvm, | |
1290 | unsigned long mmu_seq, | |
1291 | unsigned long hva) | |
1292 | { | |
1293 | lockdep_assert_held(&kvm->mmu_lock); | |
1294 | /* | |
1295 | * If mmu_notifier_count is non-zero, then the range maintained by | |
1296 | * kvm_mmu_notifier_invalidate_range_start contains all addresses that | |
1297 | * might be being invalidated. Note that it may include some false | |
1298 | * positives, due to shortcuts when handing concurrent invalidations. | |
1299 | */ | |
1300 | if (unlikely(kvm->mmu_notifier_count) && | |
1301 | hva >= kvm->mmu_notifier_range_start && | |
1302 | hva < kvm->mmu_notifier_range_end) | |
1303 | return 1; | |
1304 | if (kvm->mmu_notifier_seq != mmu_seq) | |
1305 | return 1; | |
1306 | return 0; | |
1307 | } | |
e930bffe AA |
1308 | #endif |
1309 | ||
a725d56a | 1310 | #ifdef CONFIG_HAVE_KVM_IRQ_ROUTING |
399ec807 | 1311 | |
ddc9cfb7 | 1312 | #define KVM_MAX_IRQ_ROUTES 4096 /* might need extension/rework in the future */ |
399ec807 | 1313 | |
5c0aea0e | 1314 | bool kvm_arch_can_set_irq_routing(struct kvm *kvm); |
399ec807 AK |
1315 | int kvm_set_irq_routing(struct kvm *kvm, |
1316 | const struct kvm_irq_routing_entry *entries, | |
1317 | unsigned nr, | |
1318 | unsigned flags); | |
c63cf538 RK |
1319 | int kvm_set_routing_entry(struct kvm *kvm, |
1320 | struct kvm_kernel_irq_routing_entry *e, | |
e8cde093 | 1321 | const struct kvm_irq_routing_entry *ue); |
399ec807 AK |
1322 | void kvm_free_irq_routing(struct kvm *kvm); |
1323 | ||
1324 | #else | |
1325 | ||
1326 | static inline void kvm_free_irq_routing(struct kvm *kvm) {} | |
1327 | ||
1328 | #endif | |
1329 | ||
297e2105 PM |
1330 | int kvm_send_userspace_msi(struct kvm *kvm, struct kvm_msi *msi); |
1331 | ||
721eecbf GH |
1332 | #ifdef CONFIG_HAVE_KVM_EVENTFD |
1333 | ||
d34e6b17 | 1334 | void kvm_eventfd_init(struct kvm *kvm); |
914daba8 AG |
1335 | int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args); |
1336 | ||
297e2105 | 1337 | #ifdef CONFIG_HAVE_KVM_IRQFD |
d4db2935 | 1338 | int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args); |
721eecbf | 1339 | void kvm_irqfd_release(struct kvm *kvm); |
9957c86d | 1340 | void kvm_irq_routing_update(struct kvm *); |
914daba8 AG |
1341 | #else |
1342 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) | |
1343 | { | |
1344 | return -EINVAL; | |
1345 | } | |
1346 | ||
1347 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | |
1348 | #endif | |
721eecbf GH |
1349 | |
1350 | #else | |
1351 | ||
d34e6b17 | 1352 | static inline void kvm_eventfd_init(struct kvm *kvm) {} |
bd2b53b2 | 1353 | |
d4db2935 | 1354 | static inline int kvm_irqfd(struct kvm *kvm, struct kvm_irqfd *args) |
721eecbf GH |
1355 | { |
1356 | return -EINVAL; | |
1357 | } | |
1358 | ||
1359 | static inline void kvm_irqfd_release(struct kvm *kvm) {} | |
bd2b53b2 | 1360 | |
27923eb1 | 1361 | #ifdef CONFIG_HAVE_KVM_IRQCHIP |
9957c86d | 1362 | static inline void kvm_irq_routing_update(struct kvm *kvm) |
bd2b53b2 | 1363 | { |
bd2b53b2 | 1364 | } |
27923eb1 | 1365 | #endif |
bd2b53b2 | 1366 | |
d34e6b17 GH |
1367 | static inline int kvm_ioeventfd(struct kvm *kvm, struct kvm_ioeventfd *args) |
1368 | { | |
1369 | return -ENOSYS; | |
1370 | } | |
721eecbf GH |
1371 | |
1372 | #endif /* CONFIG_HAVE_KVM_EVENTFD */ | |
1373 | ||
07646749 SO |
1374 | void kvm_arch_irq_routing_update(struct kvm *kvm); |
1375 | ||
a8eeb04a AK |
1376 | static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu) |
1377 | { | |
2e4682ba PB |
1378 | /* |
1379 | * Ensure the rest of the request is published to kvm_check_request's | |
1380 | * caller. Paired with the smp_mb__after_atomic in kvm_check_request. | |
1381 | */ | |
1382 | smp_wmb(); | |
86dafed5 | 1383 | set_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
a8eeb04a AK |
1384 | } |
1385 | ||
2fa6e1e1 RK |
1386 | static inline bool kvm_request_pending(struct kvm_vcpu *vcpu) |
1387 | { | |
1388 | return READ_ONCE(vcpu->requests); | |
1389 | } | |
1390 | ||
72875d8a RK |
1391 | static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu) |
1392 | { | |
86dafed5 | 1393 | return test_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
72875d8a RK |
1394 | } |
1395 | ||
1396 | static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu) | |
1397 | { | |
86dafed5 | 1398 | clear_bit(req & KVM_REQUEST_MASK, (void *)&vcpu->requests); |
72875d8a RK |
1399 | } |
1400 | ||
a8eeb04a AK |
1401 | static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu) |
1402 | { | |
72875d8a RK |
1403 | if (kvm_test_request(req, vcpu)) { |
1404 | kvm_clear_request(req, vcpu); | |
2e4682ba PB |
1405 | |
1406 | /* | |
1407 | * Ensure the rest of the request is visible to kvm_check_request's | |
1408 | * caller. Paired with the smp_wmb in kvm_make_request. | |
1409 | */ | |
1410 | smp_mb__after_atomic(); | |
0719837c AK |
1411 | return true; |
1412 | } else { | |
1413 | return false; | |
1414 | } | |
a8eeb04a AK |
1415 | } |
1416 | ||
8b415dcd GL |
1417 | extern bool kvm_rebooting; |
1418 | ||
ec76d819 SJS |
1419 | extern unsigned int halt_poll_ns; |
1420 | extern unsigned int halt_poll_ns_grow; | |
49113d36 | 1421 | extern unsigned int halt_poll_ns_grow_start; |
ec76d819 SJS |
1422 | extern unsigned int halt_poll_ns_shrink; |
1423 | ||
852b6d57 | 1424 | struct kvm_device { |
8538cb22 | 1425 | const struct kvm_device_ops *ops; |
852b6d57 | 1426 | struct kvm *kvm; |
852b6d57 | 1427 | void *private; |
07f0a7bd | 1428 | struct list_head vm_node; |
852b6d57 SW |
1429 | }; |
1430 | ||
1431 | /* create, destroy, and name are mandatory */ | |
1432 | struct kvm_device_ops { | |
1433 | const char *name; | |
a28ebea2 CD |
1434 | |
1435 | /* | |
1436 | * create is called holding kvm->lock and any operations not suitable | |
1437 | * to do while holding the lock should be deferred to init (see | |
1438 | * below). | |
1439 | */ | |
852b6d57 SW |
1440 | int (*create)(struct kvm_device *dev, u32 type); |
1441 | ||
023e9fdd CD |
1442 | /* |
1443 | * init is called after create if create is successful and is called | |
1444 | * outside of holding kvm->lock. | |
1445 | */ | |
1446 | void (*init)(struct kvm_device *dev); | |
1447 | ||
852b6d57 SW |
1448 | /* |
1449 | * Destroy is responsible for freeing dev. | |
1450 | * | |
1451 | * Destroy may be called before or after destructors are called | |
1452 | * on emulated I/O regions, depending on whether a reference is | |
1453 | * held by a vcpu or other kvm component that gets destroyed | |
1454 | * after the emulated I/O. | |
1455 | */ | |
1456 | void (*destroy)(struct kvm_device *dev); | |
1457 | ||
2bde9b3e CLG |
1458 | /* |
1459 | * Release is an alternative method to free the device. It is | |
1460 | * called when the device file descriptor is closed. Once | |
1461 | * release is called, the destroy method will not be called | |
1462 | * anymore as the device is removed from the device list of | |
1463 | * the VM. kvm->lock is held. | |
1464 | */ | |
1465 | void (*release)(struct kvm_device *dev); | |
1466 | ||
852b6d57 SW |
1467 | int (*set_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); |
1468 | int (*get_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | |
1469 | int (*has_attr)(struct kvm_device *dev, struct kvm_device_attr *attr); | |
1470 | long (*ioctl)(struct kvm_device *dev, unsigned int ioctl, | |
1471 | unsigned long arg); | |
a1cd3f08 | 1472 | int (*mmap)(struct kvm_device *dev, struct vm_area_struct *vma); |
852b6d57 SW |
1473 | }; |
1474 | ||
1475 | void kvm_device_get(struct kvm_device *dev); | |
1476 | void kvm_device_put(struct kvm_device *dev); | |
1477 | struct kvm_device *kvm_device_from_filp(struct file *filp); | |
8538cb22 | 1478 | int kvm_register_device_ops(const struct kvm_device_ops *ops, u32 type); |
571ee1b6 | 1479 | void kvm_unregister_device_ops(u32 type); |
852b6d57 | 1480 | |
5df554ad | 1481 | extern struct kvm_device_ops kvm_mpic_ops; |
ea2f83a7 | 1482 | extern struct kvm_device_ops kvm_arm_vgic_v2_ops; |
a0675c25 | 1483 | extern struct kvm_device_ops kvm_arm_vgic_v3_ops; |
5df554ad | 1484 | |
4c088493 R |
1485 | #ifdef CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT |
1486 | ||
1487 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
1488 | { | |
1489 | vcpu->spin_loop.in_spin_loop = val; | |
1490 | } | |
1491 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
1492 | { | |
1493 | vcpu->spin_loop.dy_eligible = val; | |
1494 | } | |
1495 | ||
1496 | #else /* !CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ | |
1497 | ||
1498 | static inline void kvm_vcpu_set_in_spin_loop(struct kvm_vcpu *vcpu, bool val) | |
1499 | { | |
1500 | } | |
1501 | ||
1502 | static inline void kvm_vcpu_set_dy_eligible(struct kvm_vcpu *vcpu, bool val) | |
1503 | { | |
1504 | } | |
4c088493 | 1505 | #endif /* CONFIG_HAVE_KVM_CPU_RELAX_INTERCEPT */ |
1a02b270 | 1506 | |
c36b7150 PB |
1507 | static inline bool kvm_is_visible_memslot(struct kvm_memory_slot *memslot) |
1508 | { | |
1509 | return (memslot && memslot->id < KVM_USER_MEM_SLOTS && | |
1510 | !(memslot->flags & KVM_MEMSLOT_INVALID)); | |
1511 | } | |
1512 | ||
7495e22b | 1513 | struct kvm_vcpu *kvm_get_running_vcpu(void); |
fcd07f9a | 1514 | struct kvm_vcpu * __percpu *kvm_get_running_vcpus(void); |
7495e22b | 1515 | |
1a02b270 | 1516 | #ifdef CONFIG_HAVE_KVM_IRQ_BYPASS |
14717e20 | 1517 | bool kvm_arch_has_irq_bypass(void); |
1a02b270 EA |
1518 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *, |
1519 | struct irq_bypass_producer *); | |
1520 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *, | |
1521 | struct irq_bypass_producer *); | |
1522 | void kvm_arch_irq_bypass_stop(struct irq_bypass_consumer *); | |
1523 | void kvm_arch_irq_bypass_start(struct irq_bypass_consumer *); | |
f70c20aa FW |
1524 | int kvm_arch_update_irqfd_routing(struct kvm *kvm, unsigned int host_irq, |
1525 | uint32_t guest_irq, bool set); | |
1a02b270 | 1526 | #endif /* CONFIG_HAVE_KVM_IRQ_BYPASS */ |
35181e86 | 1527 | |
3491caf2 CB |
1528 | #ifdef CONFIG_HAVE_KVM_INVALID_WAKEUPS |
1529 | /* If we wakeup during the poll time, was it a sucessful poll? */ | |
1530 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | |
1531 | { | |
1532 | return vcpu->valid_wakeup; | |
1533 | } | |
1534 | ||
1535 | #else | |
1536 | static inline bool vcpu_valid_wakeup(struct kvm_vcpu *vcpu) | |
1537 | { | |
1538 | return true; | |
1539 | } | |
1540 | #endif /* CONFIG_HAVE_KVM_INVALID_WAKEUPS */ | |
1541 | ||
cdd6ad3a CB |
1542 | #ifdef CONFIG_HAVE_KVM_NO_POLL |
1543 | /* Callback that tells if we must not poll */ | |
1544 | bool kvm_arch_no_poll(struct kvm_vcpu *vcpu); | |
1545 | #else | |
1546 | static inline bool kvm_arch_no_poll(struct kvm_vcpu *vcpu) | |
1547 | { | |
1548 | return false; | |
1549 | } | |
1550 | #endif /* CONFIG_HAVE_KVM_NO_POLL */ | |
1551 | ||
5cb0944c PB |
1552 | #ifdef CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL |
1553 | long kvm_arch_vcpu_async_ioctl(struct file *filp, | |
1554 | unsigned int ioctl, unsigned long arg); | |
1555 | #else | |
1556 | static inline long kvm_arch_vcpu_async_ioctl(struct file *filp, | |
1557 | unsigned int ioctl, | |
1558 | unsigned long arg) | |
1559 | { | |
1560 | return -ENOIOCTLCMD; | |
1561 | } | |
1562 | #endif /* CONFIG_HAVE_KVM_VCPU_ASYNC_IOCTL */ | |
1563 | ||
e649b3f0 ET |
1564 | void kvm_arch_mmu_notifier_invalidate_range(struct kvm *kvm, |
1565 | unsigned long start, unsigned long end); | |
f75e4924 | 1566 | |
bd2a6394 CD |
1567 | #ifdef CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE |
1568 | int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu); | |
1569 | #else | |
1570 | static inline int kvm_arch_vcpu_run_pid_change(struct kvm_vcpu *vcpu) | |
1571 | { | |
1572 | return 0; | |
1573 | } | |
1574 | #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */ | |
1575 | ||
c57c8046 JS |
1576 | typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data); |
1577 | ||
1578 | int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn, | |
1579 | uintptr_t data, const char *name, | |
1580 | struct task_struct **thread_ptr); | |
1581 | ||
935ace2f TG |
1582 | #ifdef CONFIG_KVM_XFER_TO_GUEST_WORK |
1583 | static inline void kvm_handle_signal_exit(struct kvm_vcpu *vcpu) | |
1584 | { | |
1585 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
1586 | vcpu->stat.signal_exits++; | |
1587 | } | |
1588 | #endif /* CONFIG_KVM_XFER_TO_GUEST_WORK */ | |
1589 | ||
fb04a1ed PX |
1590 | /* |
1591 | * This defines how many reserved entries we want to keep before we | |
1592 | * kick the vcpu to the userspace to avoid dirty ring full. This | |
1593 | * value can be tuned to higher if e.g. PML is enabled on the host. | |
1594 | */ | |
1595 | #define KVM_DIRTY_RING_RSVD_ENTRIES 64 | |
1596 | ||
1597 | /* Max number of entries allowed for each kvm dirty ring */ | |
1598 | #define KVM_DIRTY_RING_MAX_ENTRIES 65536 | |
1599 | ||
bfd99ff5 | 1600 | #endif |