]>
Commit | Line | Data |
---|---|---|
669e846e SL |
1 | /* |
2 | * This file is subject to the terms and conditions of the GNU General Public | |
3 | * License. See the file "COPYING" in the main directory of this archive | |
4 | * for more details. | |
5 | * | |
6 | * KVM/MIPS: MIPS specific KVM APIs | |
7 | * | |
8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. | |
9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> | |
d116e812 | 10 | */ |
669e846e | 11 | |
05108709 | 12 | #include <linux/bitops.h> |
669e846e SL |
13 | #include <linux/errno.h> |
14 | #include <linux/err.h> | |
98e91b84 | 15 | #include <linux/kdebug.h> |
669e846e | 16 | #include <linux/module.h> |
d852b5f3 | 17 | #include <linux/uaccess.h> |
669e846e | 18 | #include <linux/vmalloc.h> |
174cd4b1 | 19 | #include <linux/sched/signal.h> |
669e846e SL |
20 | #include <linux/fs.h> |
21 | #include <linux/bootmem.h> | |
174cd4b1 | 22 | |
f798217d | 23 | #include <asm/fpu.h> |
669e846e SL |
24 | #include <asm/page.h> |
25 | #include <asm/cacheflush.h> | |
26 | #include <asm/mmu_context.h> | |
06c158c9 | 27 | #include <asm/pgalloc.h> |
c4c6f2ca | 28 | #include <asm/pgtable.h> |
669e846e SL |
29 | |
30 | #include <linux/kvm_host.h> | |
31 | ||
d7d5b05f DCZ |
32 | #include "interrupt.h" |
33 | #include "commpage.h" | |
669e846e SL |
34 | |
35 | #define CREATE_TRACE_POINTS | |
36 | #include "trace.h" | |
37 | ||
38 | #ifndef VECTORSPACING | |
39 | #define VECTORSPACING 0x100 /* for EI/VI mode */ | |
40 | #endif | |
41 | ||
d116e812 | 42 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x) |
669e846e | 43 | struct kvm_stats_debugfs_item debugfs_entries[] = { |
d116e812 DCZ |
44 | { "wait", VCPU_STAT(wait_exits), KVM_STAT_VCPU }, |
45 | { "cache", VCPU_STAT(cache_exits), KVM_STAT_VCPU }, | |
46 | { "signal", VCPU_STAT(signal_exits), KVM_STAT_VCPU }, | |
47 | { "interrupt", VCPU_STAT(int_exits), KVM_STAT_VCPU }, | |
48 | { "cop_unsuable", VCPU_STAT(cop_unusable_exits), KVM_STAT_VCPU }, | |
49 | { "tlbmod", VCPU_STAT(tlbmod_exits), KVM_STAT_VCPU }, | |
50 | { "tlbmiss_ld", VCPU_STAT(tlbmiss_ld_exits), KVM_STAT_VCPU }, | |
51 | { "tlbmiss_st", VCPU_STAT(tlbmiss_st_exits), KVM_STAT_VCPU }, | |
52 | { "addrerr_st", VCPU_STAT(addrerr_st_exits), KVM_STAT_VCPU }, | |
53 | { "addrerr_ld", VCPU_STAT(addrerr_ld_exits), KVM_STAT_VCPU }, | |
54 | { "syscall", VCPU_STAT(syscall_exits), KVM_STAT_VCPU }, | |
55 | { "resvd_inst", VCPU_STAT(resvd_inst_exits), KVM_STAT_VCPU }, | |
56 | { "break_inst", VCPU_STAT(break_inst_exits), KVM_STAT_VCPU }, | |
0a560427 | 57 | { "trap_inst", VCPU_STAT(trap_inst_exits), KVM_STAT_VCPU }, |
c2537ed9 | 58 | { "msa_fpe", VCPU_STAT(msa_fpe_exits), KVM_STAT_VCPU }, |
1c0cd66a | 59 | { "fpe", VCPU_STAT(fpe_exits), KVM_STAT_VCPU }, |
c2537ed9 | 60 | { "msa_disabled", VCPU_STAT(msa_disabled_exits), KVM_STAT_VCPU }, |
d116e812 | 61 | { "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU }, |
a7244920 JH |
62 | #ifdef CONFIG_KVM_MIPS_VZ |
63 | { "vz_gpsi", VCPU_STAT(vz_gpsi_exits), KVM_STAT_VCPU }, | |
64 | { "vz_gsfc", VCPU_STAT(vz_gsfc_exits), KVM_STAT_VCPU }, | |
65 | { "vz_hc", VCPU_STAT(vz_hc_exits), KVM_STAT_VCPU }, | |
66 | { "vz_grr", VCPU_STAT(vz_grr_exits), KVM_STAT_VCPU }, | |
67 | { "vz_gva", VCPU_STAT(vz_gva_exits), KVM_STAT_VCPU }, | |
68 | { "vz_ghfc", VCPU_STAT(vz_ghfc_exits), KVM_STAT_VCPU }, | |
69 | { "vz_gpa", VCPU_STAT(vz_gpa_exits), KVM_STAT_VCPU }, | |
70 | { "vz_resvd", VCPU_STAT(vz_resvd_exits), KVM_STAT_VCPU }, | |
71 | #endif | |
f7819512 | 72 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU }, |
62bea5bf | 73 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU }, |
3491caf2 | 74 | { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU }, |
d116e812 | 75 | { "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU }, |
669e846e SL |
76 | {NULL} |
77 | }; | |
78 | ||
d116e812 DCZ |
79 | /* |
80 | * XXXKYMA: We are simulatoring a processor that has the WII bit set in | |
81 | * Config7, so we are "runnable" if interrupts are pending | |
669e846e SL |
82 | */ |
83 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu) | |
84 | { | |
85 | return !!(vcpu->arch.pending_exceptions); | |
86 | } | |
87 | ||
88 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | |
89 | { | |
90 | return 1; | |
91 | } | |
92 | ||
13a34e06 | 93 | int kvm_arch_hardware_enable(void) |
669e846e | 94 | { |
edab4fe1 JH |
95 | return kvm_mips_callbacks->hardware_enable(); |
96 | } | |
97 | ||
98 | void kvm_arch_hardware_disable(void) | |
99 | { | |
100 | kvm_mips_callbacks->hardware_disable(); | |
669e846e SL |
101 | } |
102 | ||
669e846e SL |
103 | int kvm_arch_hardware_setup(void) |
104 | { | |
105 | return 0; | |
106 | } | |
107 | ||
669e846e SL |
108 | void kvm_arch_check_processor_compat(void *rtn) |
109 | { | |
d98403a5 | 110 | *(int *)rtn = 0; |
669e846e SL |
111 | } |
112 | ||
669e846e SL |
113 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
114 | { | |
a8a3c426 JH |
115 | switch (type) { |
116 | case KVM_VM_MIPS_TE: | |
117 | break; | |
118 | default: | |
119 | /* Unsupported KVM type */ | |
120 | return -EINVAL; | |
121 | }; | |
122 | ||
06c158c9 JH |
123 | /* Allocate page table to map GPA -> RPA */ |
124 | kvm->arch.gpa_mm.pgd = kvm_pgd_alloc(); | |
125 | if (!kvm->arch.gpa_mm.pgd) | |
126 | return -ENOMEM; | |
127 | ||
669e846e SL |
128 | return 0; |
129 | } | |
130 | ||
235539b4 LC |
131 | bool kvm_arch_has_vcpu_debugfs(void) |
132 | { | |
133 | return false; | |
134 | } | |
135 | ||
136 | int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) | |
137 | { | |
138 | return 0; | |
139 | } | |
140 | ||
669e846e SL |
141 | void kvm_mips_free_vcpus(struct kvm *kvm) |
142 | { | |
143 | unsigned int i; | |
144 | struct kvm_vcpu *vcpu; | |
145 | ||
669e846e SL |
146 | kvm_for_each_vcpu(i, vcpu, kvm) { |
147 | kvm_arch_vcpu_free(vcpu); | |
148 | } | |
149 | ||
150 | mutex_lock(&kvm->lock); | |
151 | ||
152 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
153 | kvm->vcpus[i] = NULL; | |
154 | ||
155 | atomic_set(&kvm->online_vcpus, 0); | |
156 | ||
157 | mutex_unlock(&kvm->lock); | |
158 | } | |
159 | ||
06c158c9 JH |
160 | static void kvm_mips_free_gpa_pt(struct kvm *kvm) |
161 | { | |
162 | /* It should always be safe to remove after flushing the whole range */ | |
163 | WARN_ON(!kvm_mips_flush_gpa_pt(kvm, 0, ~0)); | |
164 | pgd_free(NULL, kvm->arch.gpa_mm.pgd); | |
165 | } | |
166 | ||
669e846e SL |
167 | void kvm_arch_destroy_vm(struct kvm *kvm) |
168 | { | |
169 | kvm_mips_free_vcpus(kvm); | |
06c158c9 | 170 | kvm_mips_free_gpa_pt(kvm); |
669e846e SL |
171 | } |
172 | ||
d116e812 DCZ |
173 | long kvm_arch_dev_ioctl(struct file *filp, unsigned int ioctl, |
174 | unsigned long arg) | |
669e846e | 175 | { |
ed829857 | 176 | return -ENOIOCTLCMD; |
669e846e SL |
177 | } |
178 | ||
5587027c AK |
179 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
180 | unsigned long npages) | |
669e846e SL |
181 | { |
182 | return 0; | |
183 | } | |
184 | ||
b6209110 JH |
185 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
186 | { | |
187 | /* Flush whole GPA */ | |
188 | kvm_mips_flush_gpa_pt(kvm, 0, ~0); | |
189 | ||
190 | /* Let implementation do the rest */ | |
191 | kvm_mips_callbacks->flush_shadow_all(kvm); | |
192 | } | |
193 | ||
194 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
195 | struct kvm_memory_slot *slot) | |
196 | { | |
197 | /* | |
198 | * The slot has been made invalid (ready for moving or deletion), so we | |
199 | * need to ensure that it can no longer be accessed by any guest VCPUs. | |
200 | */ | |
201 | ||
202 | spin_lock(&kvm->mmu_lock); | |
203 | /* Flush slot from GPA */ | |
204 | kvm_mips_flush_gpa_pt(kvm, slot->base_gfn, | |
205 | slot->base_gfn + slot->npages - 1); | |
206 | /* Let implementation do the rest */ | |
207 | kvm_mips_callbacks->flush_shadow_memslot(kvm, slot); | |
208 | spin_unlock(&kvm->mmu_lock); | |
209 | } | |
210 | ||
669e846e | 211 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
d116e812 | 212 | struct kvm_memory_slot *memslot, |
09170a49 | 213 | const struct kvm_userspace_memory_region *mem, |
d116e812 | 214 | enum kvm_mr_change change) |
669e846e SL |
215 | { |
216 | return 0; | |
217 | } | |
218 | ||
219 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
09170a49 | 220 | const struct kvm_userspace_memory_region *mem, |
d116e812 | 221 | const struct kvm_memory_slot *old, |
f36f3f28 | 222 | const struct kvm_memory_slot *new, |
d116e812 | 223 | enum kvm_mr_change change) |
669e846e | 224 | { |
a1ac9e17 JH |
225 | int needs_flush; |
226 | ||
669e846e SL |
227 | kvm_debug("%s: kvm: %p slot: %d, GPA: %llx, size: %llx, QVA: %llx\n", |
228 | __func__, kvm, mem->slot, mem->guest_phys_addr, | |
229 | mem->memory_size, mem->userspace_addr); | |
a1ac9e17 JH |
230 | |
231 | /* | |
232 | * If dirty page logging is enabled, write protect all pages in the slot | |
233 | * ready for dirty logging. | |
234 | * | |
235 | * There is no need to do this in any of the following cases: | |
236 | * CREATE: No dirty mappings will already exist. | |
237 | * MOVE/DELETE: The old mappings will already have been cleaned up by | |
238 | * kvm_arch_flush_shadow_memslot() | |
239 | */ | |
240 | if (change == KVM_MR_FLAGS_ONLY && | |
241 | (!(old->flags & KVM_MEM_LOG_DIRTY_PAGES) && | |
242 | new->flags & KVM_MEM_LOG_DIRTY_PAGES)) { | |
243 | spin_lock(&kvm->mmu_lock); | |
244 | /* Write protect GPA page table entries */ | |
245 | needs_flush = kvm_mips_mkclean_gpa_pt(kvm, new->base_gfn, | |
246 | new->base_gfn + new->npages - 1); | |
247 | /* Let implementation do the rest */ | |
248 | if (needs_flush) | |
249 | kvm_mips_callbacks->flush_shadow_memslot(kvm, new); | |
250 | spin_unlock(&kvm->mmu_lock); | |
251 | } | |
669e846e SL |
252 | } |
253 | ||
d7b8f890 JH |
254 | static inline void dump_handler(const char *symbol, void *start, void *end) |
255 | { | |
256 | u32 *p; | |
257 | ||
258 | pr_debug("LEAF(%s)\n", symbol); | |
259 | ||
260 | pr_debug("\t.set push\n"); | |
261 | pr_debug("\t.set noreorder\n"); | |
262 | ||
263 | for (p = start; p < (u32 *)end; ++p) | |
264 | pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p); | |
265 | ||
266 | pr_debug("\t.set\tpop\n"); | |
267 | ||
268 | pr_debug("\tEND(%s)\n", symbol); | |
269 | } | |
270 | ||
669e846e SL |
271 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
272 | { | |
90e9311a | 273 | int err, size; |
a7cfa7ac | 274 | void *gebase, *p, *handler, *refill_start, *refill_end; |
669e846e SL |
275 | int i; |
276 | ||
277 | struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL); | |
278 | ||
279 | if (!vcpu) { | |
280 | err = -ENOMEM; | |
281 | goto out; | |
282 | } | |
283 | ||
284 | err = kvm_vcpu_init(vcpu, kvm, id); | |
285 | ||
286 | if (err) | |
287 | goto out_free_cpu; | |
288 | ||
6e95bfd2 | 289 | kvm_debug("kvm @ %p: create cpu %d at %p\n", kvm, id, vcpu); |
669e846e | 290 | |
d116e812 DCZ |
291 | /* |
292 | * Allocate space for host mode exception handlers that handle | |
669e846e SL |
293 | * guest mode exits |
294 | */ | |
d116e812 | 295 | if (cpu_has_veic || cpu_has_vint) |
669e846e | 296 | size = 0x200 + VECTORSPACING * 64; |
d116e812 | 297 | else |
7006e2df | 298 | size = 0x4000; |
669e846e | 299 | |
669e846e SL |
300 | gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL); |
301 | ||
302 | if (!gebase) { | |
303 | err = -ENOMEM; | |
585bb8f9 | 304 | goto out_uninit_cpu; |
669e846e | 305 | } |
6e95bfd2 JH |
306 | kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n", |
307 | ALIGN(size, PAGE_SIZE), gebase); | |
669e846e | 308 | |
2a06dab8 JH |
309 | /* |
310 | * Check new ebase actually fits in CP0_EBase. The lack of a write gate | |
311 | * limits us to the low 512MB of physical address space. If the memory | |
312 | * we allocate is out of range, just give up now. | |
313 | */ | |
314 | if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) { | |
315 | kvm_err("CP0_EBase.WG required for guest exception base %pK\n", | |
316 | gebase); | |
317 | err = -ENOMEM; | |
318 | goto out_free_gebase; | |
319 | } | |
320 | ||
669e846e SL |
321 | /* Save new ebase */ |
322 | vcpu->arch.guest_ebase = gebase; | |
323 | ||
90e9311a | 324 | /* Build guest exception vectors dynamically in unmapped memory */ |
1f9ca62c | 325 | handler = gebase + 0x2000; |
669e846e | 326 | |
1934a3ad | 327 | /* TLB refill (or XTLB refill on 64-bit VZ where KX=1) */ |
a7cfa7ac | 328 | refill_start = gebase; |
1934a3ad JH |
329 | if (IS_ENABLED(CONFIG_KVM_MIPS_VZ) && IS_ENABLED(CONFIG_64BIT)) |
330 | refill_start += 0x080; | |
a7cfa7ac | 331 | refill_end = kvm_mips_build_tlb_refill_exception(refill_start, handler); |
669e846e SL |
332 | |
333 | /* General Exception Entry point */ | |
1f9ca62c | 334 | kvm_mips_build_exception(gebase + 0x180, handler); |
669e846e SL |
335 | |
336 | /* For vectored interrupts poke the exception code @ all offsets 0-7 */ | |
337 | for (i = 0; i < 8; i++) { | |
338 | kvm_debug("L1 Vectored handler @ %p\n", | |
339 | gebase + 0x200 + (i * VECTORSPACING)); | |
1f9ca62c JH |
340 | kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING, |
341 | handler); | |
669e846e SL |
342 | } |
343 | ||
90e9311a | 344 | /* General exit handler */ |
1f9ca62c | 345 | p = handler; |
90e9311a JH |
346 | p = kvm_mips_build_exit(p); |
347 | ||
348 | /* Guest entry routine */ | |
349 | vcpu->arch.vcpu_run = p; | |
350 | p = kvm_mips_build_vcpu_run(p); | |
797179bc | 351 | |
d7b8f890 JH |
352 | /* Dump the generated code */ |
353 | pr_debug("#include <asm/asm.h>\n"); | |
354 | pr_debug("#include <asm/regdef.h>\n"); | |
355 | pr_debug("\n"); | |
356 | dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p); | |
a7cfa7ac | 357 | dump_handler("kvm_tlb_refill", refill_start, refill_end); |
d7b8f890 JH |
358 | dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200); |
359 | dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run); | |
360 | ||
669e846e | 361 | /* Invalidate the icache for these ranges */ |
32eb12a6 JH |
362 | flush_icache_range((unsigned long)gebase, |
363 | (unsigned long)gebase + ALIGN(size, PAGE_SIZE)); | |
669e846e | 364 | |
d116e812 DCZ |
365 | /* |
366 | * Allocate comm page for guest kernel, a TLB will be reserved for | |
367 | * mapping GVA @ 0xFFFF8000 to this page | |
368 | */ | |
669e846e SL |
369 | vcpu->arch.kseg0_commpage = kzalloc(PAGE_SIZE << 1, GFP_KERNEL); |
370 | ||
371 | if (!vcpu->arch.kseg0_commpage) { | |
372 | err = -ENOMEM; | |
373 | goto out_free_gebase; | |
374 | } | |
375 | ||
6e95bfd2 | 376 | kvm_debug("Allocated COMM page @ %p\n", vcpu->arch.kseg0_commpage); |
669e846e SL |
377 | kvm_mips_commpage_init(vcpu); |
378 | ||
379 | /* Init */ | |
380 | vcpu->arch.last_sched_cpu = -1; | |
381 | ||
669e846e SL |
382 | return vcpu; |
383 | ||
384 | out_free_gebase: | |
385 | kfree(gebase); | |
386 | ||
585bb8f9 JH |
387 | out_uninit_cpu: |
388 | kvm_vcpu_uninit(vcpu); | |
389 | ||
669e846e SL |
390 | out_free_cpu: |
391 | kfree(vcpu); | |
392 | ||
393 | out: | |
394 | return ERR_PTR(err); | |
395 | } | |
396 | ||
397 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |
398 | { | |
399 | hrtimer_cancel(&vcpu->arch.comparecount_timer); | |
400 | ||
401 | kvm_vcpu_uninit(vcpu); | |
402 | ||
403 | kvm_mips_dump_stats(vcpu); | |
404 | ||
aba85929 | 405 | kvm_mmu_free_memory_caches(vcpu); |
c6c0a663 JH |
406 | kfree(vcpu->arch.guest_ebase); |
407 | kfree(vcpu->arch.kseg0_commpage); | |
8c9eb041 | 408 | kfree(vcpu); |
669e846e SL |
409 | } |
410 | ||
411 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
412 | { | |
413 | kvm_arch_vcpu_free(vcpu); | |
414 | } | |
415 | ||
d116e812 DCZ |
416 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
417 | struct kvm_guest_debug *dbg) | |
669e846e | 418 | { |
ed829857 | 419 | return -ENOIOCTLCMD; |
669e846e SL |
420 | } |
421 | ||
422 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
423 | { | |
460df4c1 | 424 | int r = -EINTR; |
669e846e SL |
425 | sigset_t sigsaved; |
426 | ||
427 | if (vcpu->sigset_active) | |
428 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
429 | ||
430 | if (vcpu->mmio_needed) { | |
431 | if (!vcpu->mmio_is_write) | |
432 | kvm_mips_complete_mmio_load(vcpu, run); | |
433 | vcpu->mmio_needed = 0; | |
434 | } | |
435 | ||
460df4c1 PB |
436 | if (run->immediate_exit) |
437 | goto out; | |
438 | ||
f798217d JH |
439 | lose_fpu(1); |
440 | ||
044f0f03 | 441 | local_irq_disable(); |
6edaa530 | 442 | guest_enter_irqoff(); |
93258604 | 443 | trace_kvm_enter(vcpu); |
25b08c7f | 444 | |
4841e0dd JH |
445 | /* |
446 | * Make sure the read of VCPU requests in vcpu_run() callback is not | |
447 | * reordered ahead of the write to vcpu->mode, or we could miss a TLB | |
448 | * flush request while the requester sees the VCPU as outside of guest | |
449 | * mode and not needing an IPI. | |
450 | */ | |
451 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | |
452 | ||
a2c046e4 | 453 | r = kvm_mips_callbacks->vcpu_run(run, vcpu); |
25b08c7f | 454 | |
93258604 | 455 | trace_kvm_out(vcpu); |
6edaa530 | 456 | guest_exit_irqoff(); |
669e846e SL |
457 | local_irq_enable(); |
458 | ||
460df4c1 | 459 | out: |
669e846e SL |
460 | if (vcpu->sigset_active) |
461 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
462 | ||
463 | return r; | |
464 | } | |
465 | ||
d116e812 DCZ |
466 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, |
467 | struct kvm_mips_interrupt *irq) | |
669e846e SL |
468 | { |
469 | int intr = (int)irq->irq; | |
470 | struct kvm_vcpu *dvcpu = NULL; | |
471 | ||
472 | if (intr == 3 || intr == -3 || intr == 4 || intr == -4) | |
473 | kvm_debug("%s: CPU: %d, INTR: %d\n", __func__, irq->cpu, | |
474 | (int)intr); | |
475 | ||
476 | if (irq->cpu == -1) | |
477 | dvcpu = vcpu; | |
478 | else | |
479 | dvcpu = vcpu->kvm->vcpus[irq->cpu]; | |
480 | ||
481 | if (intr == 2 || intr == 3 || intr == 4) { | |
482 | kvm_mips_callbacks->queue_io_int(dvcpu, irq); | |
483 | ||
484 | } else if (intr == -2 || intr == -3 || intr == -4) { | |
485 | kvm_mips_callbacks->dequeue_io_int(dvcpu, irq); | |
486 | } else { | |
487 | kvm_err("%s: invalid interrupt ioctl (%d:%d)\n", __func__, | |
488 | irq->cpu, irq->irq); | |
489 | return -EINVAL; | |
490 | } | |
491 | ||
492 | dvcpu->arch.wait = 0; | |
493 | ||
8577370f MT |
494 | if (swait_active(&dvcpu->wq)) |
495 | swake_up(&dvcpu->wq); | |
669e846e SL |
496 | |
497 | return 0; | |
498 | } | |
499 | ||
d116e812 DCZ |
500 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
501 | struct kvm_mp_state *mp_state) | |
669e846e | 502 | { |
ed829857 | 503 | return -ENOIOCTLCMD; |
669e846e SL |
504 | } |
505 | ||
d116e812 DCZ |
506 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, |
507 | struct kvm_mp_state *mp_state) | |
669e846e | 508 | { |
ed829857 | 509 | return -ENOIOCTLCMD; |
669e846e SL |
510 | } |
511 | ||
4c73fb2b DD |
512 | static u64 kvm_mips_get_one_regs[] = { |
513 | KVM_REG_MIPS_R0, | |
514 | KVM_REG_MIPS_R1, | |
515 | KVM_REG_MIPS_R2, | |
516 | KVM_REG_MIPS_R3, | |
517 | KVM_REG_MIPS_R4, | |
518 | KVM_REG_MIPS_R5, | |
519 | KVM_REG_MIPS_R6, | |
520 | KVM_REG_MIPS_R7, | |
521 | KVM_REG_MIPS_R8, | |
522 | KVM_REG_MIPS_R9, | |
523 | KVM_REG_MIPS_R10, | |
524 | KVM_REG_MIPS_R11, | |
525 | KVM_REG_MIPS_R12, | |
526 | KVM_REG_MIPS_R13, | |
527 | KVM_REG_MIPS_R14, | |
528 | KVM_REG_MIPS_R15, | |
529 | KVM_REG_MIPS_R16, | |
530 | KVM_REG_MIPS_R17, | |
531 | KVM_REG_MIPS_R18, | |
532 | KVM_REG_MIPS_R19, | |
533 | KVM_REG_MIPS_R20, | |
534 | KVM_REG_MIPS_R21, | |
535 | KVM_REG_MIPS_R22, | |
536 | KVM_REG_MIPS_R23, | |
537 | KVM_REG_MIPS_R24, | |
538 | KVM_REG_MIPS_R25, | |
539 | KVM_REG_MIPS_R26, | |
540 | KVM_REG_MIPS_R27, | |
541 | KVM_REG_MIPS_R28, | |
542 | KVM_REG_MIPS_R29, | |
543 | KVM_REG_MIPS_R30, | |
544 | KVM_REG_MIPS_R31, | |
545 | ||
70e92c7e | 546 | #ifndef CONFIG_CPU_MIPSR6 |
4c73fb2b DD |
547 | KVM_REG_MIPS_HI, |
548 | KVM_REG_MIPS_LO, | |
70e92c7e | 549 | #endif |
4c73fb2b | 550 | KVM_REG_MIPS_PC, |
4c73fb2b DD |
551 | }; |
552 | ||
e5775930 JH |
553 | static u64 kvm_mips_get_one_regs_fpu[] = { |
554 | KVM_REG_MIPS_FCR_IR, | |
555 | KVM_REG_MIPS_FCR_CSR, | |
556 | }; | |
557 | ||
558 | static u64 kvm_mips_get_one_regs_msa[] = { | |
559 | KVM_REG_MIPS_MSA_IR, | |
560 | KVM_REG_MIPS_MSA_CSR, | |
561 | }; | |
562 | ||
f5c43bd4 JH |
563 | static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu) |
564 | { | |
565 | unsigned long ret; | |
566 | ||
567 | ret = ARRAY_SIZE(kvm_mips_get_one_regs); | |
e5775930 JH |
568 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { |
569 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48; | |
570 | /* odd doubles */ | |
571 | if (boot_cpu_data.fpu_id & MIPS_FPIR_F64) | |
572 | ret += 16; | |
573 | } | |
574 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) | |
575 | ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32; | |
f5c43bd4 JH |
576 | ret += kvm_mips_callbacks->num_regs(vcpu); |
577 | ||
578 | return ret; | |
579 | } | |
580 | ||
581 | static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) | |
582 | { | |
e5775930 JH |
583 | u64 index; |
584 | unsigned int i; | |
585 | ||
f5c43bd4 JH |
586 | if (copy_to_user(indices, kvm_mips_get_one_regs, |
587 | sizeof(kvm_mips_get_one_regs))) | |
588 | return -EFAULT; | |
589 | indices += ARRAY_SIZE(kvm_mips_get_one_regs); | |
590 | ||
e5775930 JH |
591 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) { |
592 | if (copy_to_user(indices, kvm_mips_get_one_regs_fpu, | |
593 | sizeof(kvm_mips_get_one_regs_fpu))) | |
594 | return -EFAULT; | |
595 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu); | |
596 | ||
597 | for (i = 0; i < 32; ++i) { | |
598 | index = KVM_REG_MIPS_FPR_32(i); | |
599 | if (copy_to_user(indices, &index, sizeof(index))) | |
600 | return -EFAULT; | |
601 | ++indices; | |
602 | ||
603 | /* skip odd doubles if no F64 */ | |
604 | if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) | |
605 | continue; | |
606 | ||
607 | index = KVM_REG_MIPS_FPR_64(i); | |
608 | if (copy_to_user(indices, &index, sizeof(index))) | |
609 | return -EFAULT; | |
610 | ++indices; | |
611 | } | |
612 | } | |
613 | ||
614 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) { | |
615 | if (copy_to_user(indices, kvm_mips_get_one_regs_msa, | |
616 | sizeof(kvm_mips_get_one_regs_msa))) | |
617 | return -EFAULT; | |
618 | indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa); | |
619 | ||
620 | for (i = 0; i < 32; ++i) { | |
621 | index = KVM_REG_MIPS_VEC_128(i); | |
622 | if (copy_to_user(indices, &index, sizeof(index))) | |
623 | return -EFAULT; | |
624 | ++indices; | |
625 | } | |
626 | } | |
627 | ||
f5c43bd4 JH |
628 | return kvm_mips_callbacks->copy_reg_indices(vcpu, indices); |
629 | } | |
630 | ||
4c73fb2b DD |
631 | static int kvm_mips_get_reg(struct kvm_vcpu *vcpu, |
632 | const struct kvm_one_reg *reg) | |
633 | { | |
4c73fb2b | 634 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
379245cd | 635 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
f8be02da | 636 | int ret; |
4c73fb2b | 637 | s64 v; |
ab86bd60 | 638 | s64 vs[2]; |
379245cd | 639 | unsigned int idx; |
4c73fb2b DD |
640 | |
641 | switch (reg->id) { | |
379245cd | 642 | /* General purpose registers */ |
4c73fb2b DD |
643 | case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31: |
644 | v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0]; | |
645 | break; | |
70e92c7e | 646 | #ifndef CONFIG_CPU_MIPSR6 |
4c73fb2b DD |
647 | case KVM_REG_MIPS_HI: |
648 | v = (long)vcpu->arch.hi; | |
649 | break; | |
650 | case KVM_REG_MIPS_LO: | |
651 | v = (long)vcpu->arch.lo; | |
652 | break; | |
70e92c7e | 653 | #endif |
4c73fb2b DD |
654 | case KVM_REG_MIPS_PC: |
655 | v = (long)vcpu->arch.pc; | |
656 | break; | |
657 | ||
379245cd JH |
658 | /* Floating point registers */ |
659 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | |
660 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
661 | return -EINVAL; | |
662 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | |
663 | /* Odd singles in top of even double when FR=0 */ | |
664 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | |
665 | v = get_fpr32(&fpu->fpr[idx], 0); | |
666 | else | |
667 | v = get_fpr32(&fpu->fpr[idx & ~1], idx & 1); | |
668 | break; | |
669 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | |
670 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
671 | return -EINVAL; | |
672 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | |
673 | /* Can't access odd doubles in FR=0 mode */ | |
674 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
675 | return -EINVAL; | |
676 | v = get_fpr64(&fpu->fpr[idx], 0); | |
677 | break; | |
678 | case KVM_REG_MIPS_FCR_IR: | |
679 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
680 | return -EINVAL; | |
681 | v = boot_cpu_data.fpu_id; | |
682 | break; | |
683 | case KVM_REG_MIPS_FCR_CSR: | |
684 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
685 | return -EINVAL; | |
686 | v = fpu->fcr31; | |
687 | break; | |
688 | ||
ab86bd60 JH |
689 | /* MIPS SIMD Architecture (MSA) registers */ |
690 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | |
691 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
692 | return -EINVAL; | |
693 | /* Can't access MSA registers in FR=0 mode */ | |
694 | if (!(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
695 | return -EINVAL; | |
696 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | |
697 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | |
698 | /* least significant byte first */ | |
699 | vs[0] = get_fpr64(&fpu->fpr[idx], 0); | |
700 | vs[1] = get_fpr64(&fpu->fpr[idx], 1); | |
701 | #else | |
702 | /* most significant byte first */ | |
703 | vs[0] = get_fpr64(&fpu->fpr[idx], 1); | |
704 | vs[1] = get_fpr64(&fpu->fpr[idx], 0); | |
705 | #endif | |
706 | break; | |
707 | case KVM_REG_MIPS_MSA_IR: | |
708 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
709 | return -EINVAL; | |
710 | v = boot_cpu_data.msa_id; | |
711 | break; | |
712 | case KVM_REG_MIPS_MSA_CSR: | |
713 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
714 | return -EINVAL; | |
715 | v = fpu->msacsr; | |
716 | break; | |
717 | ||
f8be02da | 718 | /* registers to be handled specially */ |
cc68d22f | 719 | default: |
f8be02da JH |
720 | ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v); |
721 | if (ret) | |
722 | return ret; | |
723 | break; | |
4c73fb2b | 724 | } |
681865d4 DD |
725 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
726 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | |
d116e812 | 727 | |
681865d4 DD |
728 | return put_user(v, uaddr64); |
729 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | |
730 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | |
731 | u32 v32 = (u32)v; | |
d116e812 | 732 | |
681865d4 | 733 | return put_user(v32, uaddr32); |
ab86bd60 JH |
734 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
735 | void __user *uaddr = (void __user *)(long)reg->addr; | |
736 | ||
0178fd7d | 737 | return copy_to_user(uaddr, vs, 16) ? -EFAULT : 0; |
681865d4 DD |
738 | } else { |
739 | return -EINVAL; | |
740 | } | |
4c73fb2b DD |
741 | } |
742 | ||
743 | static int kvm_mips_set_reg(struct kvm_vcpu *vcpu, | |
744 | const struct kvm_one_reg *reg) | |
745 | { | |
4c73fb2b | 746 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
379245cd JH |
747 | struct mips_fpu_struct *fpu = &vcpu->arch.fpu; |
748 | s64 v; | |
ab86bd60 | 749 | s64 vs[2]; |
379245cd | 750 | unsigned int idx; |
4c73fb2b | 751 | |
681865d4 DD |
752 | if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) { |
753 | u64 __user *uaddr64 = (u64 __user *)(long)reg->addr; | |
754 | ||
755 | if (get_user(v, uaddr64) != 0) | |
756 | return -EFAULT; | |
757 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U32) { | |
758 | u32 __user *uaddr32 = (u32 __user *)(long)reg->addr; | |
759 | s32 v32; | |
760 | ||
761 | if (get_user(v32, uaddr32) != 0) | |
762 | return -EFAULT; | |
763 | v = (s64)v32; | |
ab86bd60 JH |
764 | } else if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U128) { |
765 | void __user *uaddr = (void __user *)(long)reg->addr; | |
766 | ||
0178fd7d | 767 | return copy_from_user(vs, uaddr, 16) ? -EFAULT : 0; |
681865d4 DD |
768 | } else { |
769 | return -EINVAL; | |
770 | } | |
4c73fb2b DD |
771 | |
772 | switch (reg->id) { | |
379245cd | 773 | /* General purpose registers */ |
4c73fb2b DD |
774 | case KVM_REG_MIPS_R0: |
775 | /* Silently ignore requests to set $0 */ | |
776 | break; | |
777 | case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31: | |
778 | vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v; | |
779 | break; | |
70e92c7e | 780 | #ifndef CONFIG_CPU_MIPSR6 |
4c73fb2b DD |
781 | case KVM_REG_MIPS_HI: |
782 | vcpu->arch.hi = v; | |
783 | break; | |
784 | case KVM_REG_MIPS_LO: | |
785 | vcpu->arch.lo = v; | |
786 | break; | |
70e92c7e | 787 | #endif |
4c73fb2b DD |
788 | case KVM_REG_MIPS_PC: |
789 | vcpu->arch.pc = v; | |
790 | break; | |
791 | ||
379245cd JH |
792 | /* Floating point registers */ |
793 | case KVM_REG_MIPS_FPR_32(0) ... KVM_REG_MIPS_FPR_32(31): | |
794 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
795 | return -EINVAL; | |
796 | idx = reg->id - KVM_REG_MIPS_FPR_32(0); | |
797 | /* Odd singles in top of even double when FR=0 */ | |
798 | if (kvm_read_c0_guest_status(cop0) & ST0_FR) | |
799 | set_fpr32(&fpu->fpr[idx], 0, v); | |
800 | else | |
801 | set_fpr32(&fpu->fpr[idx & ~1], idx & 1, v); | |
802 | break; | |
803 | case KVM_REG_MIPS_FPR_64(0) ... KVM_REG_MIPS_FPR_64(31): | |
804 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
805 | return -EINVAL; | |
806 | idx = reg->id - KVM_REG_MIPS_FPR_64(0); | |
807 | /* Can't access odd doubles in FR=0 mode */ | |
808 | if (idx & 1 && !(kvm_read_c0_guest_status(cop0) & ST0_FR)) | |
809 | return -EINVAL; | |
810 | set_fpr64(&fpu->fpr[idx], 0, v); | |
811 | break; | |
812 | case KVM_REG_MIPS_FCR_IR: | |
813 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
814 | return -EINVAL; | |
815 | /* Read-only */ | |
816 | break; | |
817 | case KVM_REG_MIPS_FCR_CSR: | |
818 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) | |
819 | return -EINVAL; | |
820 | fpu->fcr31 = v; | |
821 | break; | |
822 | ||
ab86bd60 JH |
823 | /* MIPS SIMD Architecture (MSA) registers */ |
824 | case KVM_REG_MIPS_VEC_128(0) ... KVM_REG_MIPS_VEC_128(31): | |
825 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
826 | return -EINVAL; | |
827 | idx = reg->id - KVM_REG_MIPS_VEC_128(0); | |
828 | #ifdef CONFIG_CPU_LITTLE_ENDIAN | |
829 | /* least significant byte first */ | |
830 | set_fpr64(&fpu->fpr[idx], 0, vs[0]); | |
831 | set_fpr64(&fpu->fpr[idx], 1, vs[1]); | |
832 | #else | |
833 | /* most significant byte first */ | |
834 | set_fpr64(&fpu->fpr[idx], 1, vs[0]); | |
835 | set_fpr64(&fpu->fpr[idx], 0, vs[1]); | |
836 | #endif | |
837 | break; | |
838 | case KVM_REG_MIPS_MSA_IR: | |
839 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
840 | return -EINVAL; | |
841 | /* Read-only */ | |
842 | break; | |
843 | case KVM_REG_MIPS_MSA_CSR: | |
844 | if (!kvm_mips_guest_has_msa(&vcpu->arch)) | |
845 | return -EINVAL; | |
846 | fpu->msacsr = v; | |
847 | break; | |
848 | ||
f8be02da | 849 | /* registers to be handled specially */ |
4c73fb2b | 850 | default: |
cc68d22f | 851 | return kvm_mips_callbacks->set_one_reg(vcpu, reg, v); |
4c73fb2b DD |
852 | } |
853 | return 0; | |
854 | } | |
855 | ||
5fafd874 JH |
856 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
857 | struct kvm_enable_cap *cap) | |
858 | { | |
859 | int r = 0; | |
860 | ||
861 | if (!kvm_vm_ioctl_check_extension(vcpu->kvm, cap->cap)) | |
862 | return -EINVAL; | |
863 | if (cap->flags) | |
864 | return -EINVAL; | |
865 | if (cap->args[0]) | |
866 | return -EINVAL; | |
867 | ||
868 | switch (cap->cap) { | |
869 | case KVM_CAP_MIPS_FPU: | |
870 | vcpu->arch.fpu_enabled = true; | |
871 | break; | |
d952bd07 JH |
872 | case KVM_CAP_MIPS_MSA: |
873 | vcpu->arch.msa_enabled = true; | |
874 | break; | |
5fafd874 JH |
875 | default: |
876 | r = -EINVAL; | |
877 | break; | |
878 | } | |
879 | ||
880 | return r; | |
881 | } | |
882 | ||
d116e812 DCZ |
883 | long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl, |
884 | unsigned long arg) | |
669e846e SL |
885 | { |
886 | struct kvm_vcpu *vcpu = filp->private_data; | |
887 | void __user *argp = (void __user *)arg; | |
888 | long r; | |
669e846e SL |
889 | |
890 | switch (ioctl) { | |
4c73fb2b DD |
891 | case KVM_SET_ONE_REG: |
892 | case KVM_GET_ONE_REG: { | |
893 | struct kvm_one_reg reg; | |
d116e812 | 894 | |
4c73fb2b DD |
895 | if (copy_from_user(®, argp, sizeof(reg))) |
896 | return -EFAULT; | |
897 | if (ioctl == KVM_SET_ONE_REG) | |
898 | return kvm_mips_set_reg(vcpu, ®); | |
899 | else | |
900 | return kvm_mips_get_reg(vcpu, ®); | |
901 | } | |
902 | case KVM_GET_REG_LIST: { | |
903 | struct kvm_reg_list __user *user_list = argp; | |
4c73fb2b DD |
904 | struct kvm_reg_list reg_list; |
905 | unsigned n; | |
906 | ||
907 | if (copy_from_user(®_list, user_list, sizeof(reg_list))) | |
908 | return -EFAULT; | |
909 | n = reg_list.n; | |
f5c43bd4 | 910 | reg_list.n = kvm_mips_num_regs(vcpu); |
4c73fb2b DD |
911 | if (copy_to_user(user_list, ®_list, sizeof(reg_list))) |
912 | return -EFAULT; | |
913 | if (n < reg_list.n) | |
914 | return -E2BIG; | |
f5c43bd4 | 915 | return kvm_mips_copy_reg_indices(vcpu, user_list->reg); |
4c73fb2b | 916 | } |
669e846e SL |
917 | case KVM_INTERRUPT: |
918 | { | |
919 | struct kvm_mips_interrupt irq; | |
d116e812 | 920 | |
669e846e | 921 | if (copy_from_user(&irq, argp, sizeof(irq))) |
5a6da5f7 | 922 | return -EFAULT; |
669e846e SL |
923 | kvm_debug("[%d] %s: irq: %d\n", vcpu->vcpu_id, __func__, |
924 | irq.irq); | |
925 | ||
926 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | |
927 | break; | |
928 | } | |
5fafd874 JH |
929 | case KVM_ENABLE_CAP: { |
930 | struct kvm_enable_cap cap; | |
931 | ||
5fafd874 | 932 | if (copy_from_user(&cap, argp, sizeof(cap))) |
5a6da5f7 | 933 | return -EFAULT; |
5fafd874 JH |
934 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); |
935 | break; | |
936 | } | |
669e846e | 937 | default: |
4c73fb2b | 938 | r = -ENOIOCTLCMD; |
669e846e | 939 | } |
669e846e SL |
940 | return r; |
941 | } | |
942 | ||
e88643ba JH |
943 | /** |
944 | * kvm_vm_ioctl_get_dirty_log - get and clear the log of dirty pages in a slot | |
945 | * @kvm: kvm instance | |
946 | * @log: slot id and address to which we copy the log | |
947 | * | |
948 | * Steps 1-4 below provide general overview of dirty page logging. See | |
949 | * kvm_get_dirty_log_protect() function description for additional details. | |
950 | * | |
951 | * We call kvm_get_dirty_log_protect() to handle steps 1-3, upon return we | |
952 | * always flush the TLB (step 4) even if previous step failed and the dirty | |
953 | * bitmap may be corrupt. Regardless of previous outcome the KVM logging API | |
954 | * does not preclude user space subsequent dirty log read. Flushing TLB ensures | |
955 | * writes will be marked dirty for next log read. | |
956 | * | |
957 | * 1. Take a snapshot of the bit and clear it if needed. | |
958 | * 2. Write protect the corresponding page. | |
959 | * 3. Copy the snapshot to the userspace. | |
960 | * 4. Flush TLB's if needed. | |
961 | */ | |
669e846e SL |
962 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
963 | { | |
9f6b8029 | 964 | struct kvm_memslots *slots; |
669e846e | 965 | struct kvm_memory_slot *memslot; |
e88643ba | 966 | bool is_dirty = false; |
669e846e | 967 | int r; |
669e846e SL |
968 | |
969 | mutex_lock(&kvm->slots_lock); | |
970 | ||
e88643ba | 971 | r = kvm_get_dirty_log_protect(kvm, log, &is_dirty); |
669e846e | 972 | |
669e846e | 973 | if (is_dirty) { |
9f6b8029 PB |
974 | slots = kvm_memslots(kvm); |
975 | memslot = id_to_memslot(slots, log->slot); | |
669e846e | 976 | |
e88643ba JH |
977 | /* Let implementation handle TLB/GVA invalidation */ |
978 | kvm_mips_callbacks->flush_shadow_memslot(kvm, memslot); | |
669e846e SL |
979 | } |
980 | ||
669e846e SL |
981 | mutex_unlock(&kvm->slots_lock); |
982 | return r; | |
669e846e SL |
983 | } |
984 | ||
985 | long kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg) | |
986 | { | |
987 | long r; | |
988 | ||
989 | switch (ioctl) { | |
990 | default: | |
ed829857 | 991 | r = -ENOIOCTLCMD; |
669e846e SL |
992 | } |
993 | ||
994 | return r; | |
995 | } | |
996 | ||
997 | int kvm_arch_init(void *opaque) | |
998 | { | |
669e846e SL |
999 | if (kvm_mips_callbacks) { |
1000 | kvm_err("kvm: module already exists\n"); | |
1001 | return -EEXIST; | |
1002 | } | |
1003 | ||
d98403a5 | 1004 | return kvm_mips_emulation_init(&kvm_mips_callbacks); |
669e846e SL |
1005 | } |
1006 | ||
1007 | void kvm_arch_exit(void) | |
1008 | { | |
1009 | kvm_mips_callbacks = NULL; | |
1010 | } | |
1011 | ||
d116e812 DCZ |
1012 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1013 | struct kvm_sregs *sregs) | |
669e846e | 1014 | { |
ed829857 | 1015 | return -ENOIOCTLCMD; |
669e846e SL |
1016 | } |
1017 | ||
d116e812 DCZ |
1018 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, |
1019 | struct kvm_sregs *sregs) | |
669e846e | 1020 | { |
ed829857 | 1021 | return -ENOIOCTLCMD; |
669e846e SL |
1022 | } |
1023 | ||
31928aa5 | 1024 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
669e846e | 1025 | { |
669e846e SL |
1026 | } |
1027 | ||
1028 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1029 | { | |
ed829857 | 1030 | return -ENOIOCTLCMD; |
669e846e SL |
1031 | } |
1032 | ||
1033 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1034 | { | |
ed829857 | 1035 | return -ENOIOCTLCMD; |
669e846e SL |
1036 | } |
1037 | ||
1038 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | |
1039 | { | |
1040 | return VM_FAULT_SIGBUS; | |
1041 | } | |
1042 | ||
784aa3d7 | 1043 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
669e846e SL |
1044 | { |
1045 | int r; | |
1046 | ||
1047 | switch (ext) { | |
4c73fb2b | 1048 | case KVM_CAP_ONE_REG: |
5fafd874 | 1049 | case KVM_CAP_ENABLE_CAP: |
230c5724 | 1050 | case KVM_CAP_READONLY_MEM: |
411740f5 | 1051 | case KVM_CAP_SYNC_MMU: |
460df4c1 | 1052 | case KVM_CAP_IMMEDIATE_EXIT: |
4c73fb2b DD |
1053 | r = 1; |
1054 | break; | |
669e846e SL |
1055 | case KVM_CAP_COALESCED_MMIO: |
1056 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
1057 | break; | |
12ed1fae JH |
1058 | case KVM_CAP_NR_VCPUS: |
1059 | r = num_online_cpus(); | |
1060 | break; | |
1061 | case KVM_CAP_MAX_VCPUS: | |
1062 | r = KVM_MAX_VCPUS; | |
1063 | break; | |
5fafd874 | 1064 | case KVM_CAP_MIPS_FPU: |
556f2a52 JH |
1065 | /* We don't handle systems with inconsistent cpu_has_fpu */ |
1066 | r = !!raw_cpu_has_fpu; | |
5fafd874 | 1067 | break; |
d952bd07 JH |
1068 | case KVM_CAP_MIPS_MSA: |
1069 | /* | |
1070 | * We don't support MSA vector partitioning yet: | |
1071 | * 1) It would require explicit support which can't be tested | |
1072 | * yet due to lack of support in current hardware. | |
1073 | * 2) It extends the state that would need to be saved/restored | |
1074 | * by e.g. QEMU for migration. | |
1075 | * | |
1076 | * When vector partitioning hardware becomes available, support | |
1077 | * could be added by requiring a flag when enabling | |
1078 | * KVM_CAP_MIPS_MSA capability to indicate that userland knows | |
1079 | * to save/restore the appropriate extra state. | |
1080 | */ | |
1081 | r = cpu_has_msa && !(boot_cpu_data.msa_id & MSA_IR_WRPF); | |
1082 | break; | |
669e846e | 1083 | default: |
607ef2fd | 1084 | r = kvm_mips_callbacks->check_extension(kvm, ext); |
669e846e SL |
1085 | break; |
1086 | } | |
1087 | return r; | |
669e846e SL |
1088 | } |
1089 | ||
1090 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
1091 | { | |
1092 | return kvm_mips_pending_timer(vcpu); | |
1093 | } | |
1094 | ||
1095 | int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu) | |
1096 | { | |
1097 | int i; | |
1098 | struct mips_coproc *cop0; | |
1099 | ||
1100 | if (!vcpu) | |
1101 | return -1; | |
1102 | ||
6ad78a5c DCZ |
1103 | kvm_debug("VCPU Register Dump:\n"); |
1104 | kvm_debug("\tpc = 0x%08lx\n", vcpu->arch.pc); | |
1105 | kvm_debug("\texceptions: %08lx\n", vcpu->arch.pending_exceptions); | |
669e846e SL |
1106 | |
1107 | for (i = 0; i < 32; i += 4) { | |
6ad78a5c | 1108 | kvm_debug("\tgpr%02d: %08lx %08lx %08lx %08lx\n", i, |
669e846e SL |
1109 | vcpu->arch.gprs[i], |
1110 | vcpu->arch.gprs[i + 1], | |
1111 | vcpu->arch.gprs[i + 2], vcpu->arch.gprs[i + 3]); | |
1112 | } | |
6ad78a5c DCZ |
1113 | kvm_debug("\thi: 0x%08lx\n", vcpu->arch.hi); |
1114 | kvm_debug("\tlo: 0x%08lx\n", vcpu->arch.lo); | |
669e846e SL |
1115 | |
1116 | cop0 = vcpu->arch.cop0; | |
a27660f3 | 1117 | kvm_debug("\tStatus: 0x%08x, Cause: 0x%08x\n", |
6ad78a5c DCZ |
1118 | kvm_read_c0_guest_status(cop0), |
1119 | kvm_read_c0_guest_cause(cop0)); | |
669e846e | 1120 | |
6ad78a5c | 1121 | kvm_debug("\tEPC: 0x%08lx\n", kvm_read_c0_guest_epc(cop0)); |
669e846e SL |
1122 | |
1123 | return 0; | |
1124 | } | |
1125 | ||
1126 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1127 | { | |
1128 | int i; | |
1129 | ||
8d17dd04 | 1130 | for (i = 1; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
bf32ebf6 | 1131 | vcpu->arch.gprs[i] = regs->gpr[i]; |
8d17dd04 | 1132 | vcpu->arch.gprs[0] = 0; /* zero is special, and cannot be set. */ |
669e846e SL |
1133 | vcpu->arch.hi = regs->hi; |
1134 | vcpu->arch.lo = regs->lo; | |
1135 | vcpu->arch.pc = regs->pc; | |
1136 | ||
4c73fb2b | 1137 | return 0; |
669e846e SL |
1138 | } |
1139 | ||
1140 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1141 | { | |
1142 | int i; | |
1143 | ||
8d17dd04 | 1144 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.gprs); i++) |
bf32ebf6 | 1145 | regs->gpr[i] = vcpu->arch.gprs[i]; |
669e846e SL |
1146 | |
1147 | regs->hi = vcpu->arch.hi; | |
1148 | regs->lo = vcpu->arch.lo; | |
1149 | regs->pc = vcpu->arch.pc; | |
1150 | ||
4c73fb2b | 1151 | return 0; |
669e846e SL |
1152 | } |
1153 | ||
0fae34f4 | 1154 | static void kvm_mips_comparecount_func(unsigned long data) |
669e846e SL |
1155 | { |
1156 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | |
1157 | ||
1158 | kvm_mips_callbacks->queue_timer_int(vcpu); | |
1159 | ||
1160 | vcpu->arch.wait = 0; | |
8577370f MT |
1161 | if (swait_active(&vcpu->wq)) |
1162 | swake_up(&vcpu->wq); | |
669e846e SL |
1163 | } |
1164 | ||
d116e812 | 1165 | /* low level hrtimer wake routine */ |
0fae34f4 | 1166 | static enum hrtimer_restart kvm_mips_comparecount_wakeup(struct hrtimer *timer) |
669e846e SL |
1167 | { |
1168 | struct kvm_vcpu *vcpu; | |
1169 | ||
1170 | vcpu = container_of(timer, struct kvm_vcpu, arch.comparecount_timer); | |
1171 | kvm_mips_comparecount_func((unsigned long) vcpu); | |
e30492bb | 1172 | return kvm_mips_count_timeout(vcpu); |
669e846e SL |
1173 | } |
1174 | ||
1175 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |
1176 | { | |
f7f1427d JH |
1177 | int err; |
1178 | ||
1179 | err = kvm_mips_callbacks->vcpu_init(vcpu); | |
1180 | if (err) | |
1181 | return err; | |
1182 | ||
669e846e SL |
1183 | hrtimer_init(&vcpu->arch.comparecount_timer, CLOCK_MONOTONIC, |
1184 | HRTIMER_MODE_REL); | |
1185 | vcpu->arch.comparecount_timer.function = kvm_mips_comparecount_wakeup; | |
669e846e SL |
1186 | return 0; |
1187 | } | |
1188 | ||
630766b3 JH |
1189 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) |
1190 | { | |
1191 | kvm_mips_callbacks->vcpu_uninit(vcpu); | |
1192 | } | |
1193 | ||
d116e812 DCZ |
1194 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1195 | struct kvm_translation *tr) | |
669e846e SL |
1196 | { |
1197 | return 0; | |
1198 | } | |
1199 | ||
1200 | /* Initial guest state */ | |
1201 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
1202 | { | |
1203 | return kvm_mips_callbacks->vcpu_setup(vcpu); | |
1204 | } | |
1205 | ||
d116e812 | 1206 | static void kvm_mips_set_c0_status(void) |
669e846e | 1207 | { |
8cffd197 | 1208 | u32 status = read_c0_status(); |
669e846e | 1209 | |
669e846e SL |
1210 | if (cpu_has_dsp) |
1211 | status |= (ST0_MX); | |
1212 | ||
1213 | write_c0_status(status); | |
1214 | ehb(); | |
1215 | } | |
1216 | ||
1217 | /* | |
1218 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | |
1219 | */ | |
1220 | int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) | |
1221 | { | |
8cffd197 JH |
1222 | u32 cause = vcpu->arch.host_cp0_cause; |
1223 | u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f; | |
1224 | u32 __user *opc = (u32 __user *) vcpu->arch.pc; | |
669e846e SL |
1225 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
1226 | enum emulation_result er = EMULATE_DONE; | |
122e51d4 | 1227 | u32 inst; |
669e846e SL |
1228 | int ret = RESUME_GUEST; |
1229 | ||
4841e0dd JH |
1230 | vcpu->mode = OUTSIDE_GUEST_MODE; |
1231 | ||
c4c6f2ca JH |
1232 | /* re-enable HTW before enabling interrupts */ |
1233 | htw_start(); | |
1234 | ||
669e846e SL |
1235 | /* Set a default exit reason */ |
1236 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
1237 | run->ready_for_interrupt_injection = 1; | |
1238 | ||
d116e812 DCZ |
1239 | /* |
1240 | * Set the appropriate status bits based on host CPU features, | |
1241 | * before we hit the scheduler | |
1242 | */ | |
669e846e SL |
1243 | kvm_mips_set_c0_status(); |
1244 | ||
1245 | local_irq_enable(); | |
1246 | ||
1247 | kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n", | |
1248 | cause, opc, run, vcpu); | |
1e09e86a | 1249 | trace_kvm_exit(vcpu, exccode); |
669e846e | 1250 | |
d116e812 DCZ |
1251 | /* |
1252 | * Do a privilege check, if in UM most of these exit conditions end up | |
669e846e SL |
1253 | * causing an exception to be delivered to the Guest Kernel |
1254 | */ | |
1255 | er = kvm_mips_check_privilege(cause, opc, run, vcpu); | |
1256 | if (er == EMULATE_PRIV_FAIL) { | |
1257 | goto skip_emul; | |
1258 | } else if (er == EMULATE_FAIL) { | |
1259 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1260 | ret = RESUME_HOST; | |
1261 | goto skip_emul; | |
1262 | } | |
1263 | ||
1264 | switch (exccode) { | |
16d100db JH |
1265 | case EXCCODE_INT: |
1266 | kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc); | |
669e846e SL |
1267 | |
1268 | ++vcpu->stat.int_exits; | |
669e846e | 1269 | |
d116e812 | 1270 | if (need_resched()) |
669e846e | 1271 | cond_resched(); |
669e846e SL |
1272 | |
1273 | ret = RESUME_GUEST; | |
1274 | break; | |
1275 | ||
16d100db JH |
1276 | case EXCCODE_CPU: |
1277 | kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc); | |
669e846e SL |
1278 | |
1279 | ++vcpu->stat.cop_unusable_exits; | |
669e846e SL |
1280 | ret = kvm_mips_callbacks->handle_cop_unusable(vcpu); |
1281 | /* XXXKYMA: Might need to return to user space */ | |
d116e812 | 1282 | if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN) |
669e846e | 1283 | ret = RESUME_HOST; |
669e846e SL |
1284 | break; |
1285 | ||
16d100db | 1286 | case EXCCODE_MOD: |
669e846e | 1287 | ++vcpu->stat.tlbmod_exits; |
669e846e SL |
1288 | ret = kvm_mips_callbacks->handle_tlb_mod(vcpu); |
1289 | break; | |
1290 | ||
16d100db | 1291 | case EXCCODE_TLBS: |
a27660f3 | 1292 | kvm_debug("TLB ST fault: cause %#x, status %#x, PC: %p, BadVaddr: %#lx\n", |
d116e812 DCZ |
1293 | cause, kvm_read_c0_guest_status(vcpu->arch.cop0), opc, |
1294 | badvaddr); | |
669e846e SL |
1295 | |
1296 | ++vcpu->stat.tlbmiss_st_exits; | |
669e846e SL |
1297 | ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu); |
1298 | break; | |
1299 | ||
16d100db | 1300 | case EXCCODE_TLBL: |
669e846e SL |
1301 | kvm_debug("TLB LD fault: cause %#x, PC: %p, BadVaddr: %#lx\n", |
1302 | cause, opc, badvaddr); | |
1303 | ||
1304 | ++vcpu->stat.tlbmiss_ld_exits; | |
669e846e SL |
1305 | ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu); |
1306 | break; | |
1307 | ||
16d100db | 1308 | case EXCCODE_ADES: |
669e846e | 1309 | ++vcpu->stat.addrerr_st_exits; |
669e846e SL |
1310 | ret = kvm_mips_callbacks->handle_addr_err_st(vcpu); |
1311 | break; | |
1312 | ||
16d100db | 1313 | case EXCCODE_ADEL: |
669e846e | 1314 | ++vcpu->stat.addrerr_ld_exits; |
669e846e SL |
1315 | ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu); |
1316 | break; | |
1317 | ||
16d100db | 1318 | case EXCCODE_SYS: |
669e846e | 1319 | ++vcpu->stat.syscall_exits; |
669e846e SL |
1320 | ret = kvm_mips_callbacks->handle_syscall(vcpu); |
1321 | break; | |
1322 | ||
16d100db | 1323 | case EXCCODE_RI: |
669e846e | 1324 | ++vcpu->stat.resvd_inst_exits; |
669e846e SL |
1325 | ret = kvm_mips_callbacks->handle_res_inst(vcpu); |
1326 | break; | |
1327 | ||
16d100db | 1328 | case EXCCODE_BP: |
669e846e | 1329 | ++vcpu->stat.break_inst_exits; |
669e846e SL |
1330 | ret = kvm_mips_callbacks->handle_break(vcpu); |
1331 | break; | |
1332 | ||
16d100db | 1333 | case EXCCODE_TR: |
0a560427 | 1334 | ++vcpu->stat.trap_inst_exits; |
0a560427 JH |
1335 | ret = kvm_mips_callbacks->handle_trap(vcpu); |
1336 | break; | |
1337 | ||
16d100db | 1338 | case EXCCODE_MSAFPE: |
c2537ed9 | 1339 | ++vcpu->stat.msa_fpe_exits; |
c2537ed9 JH |
1340 | ret = kvm_mips_callbacks->handle_msa_fpe(vcpu); |
1341 | break; | |
1342 | ||
16d100db | 1343 | case EXCCODE_FPE: |
1c0cd66a | 1344 | ++vcpu->stat.fpe_exits; |
1c0cd66a JH |
1345 | ret = kvm_mips_callbacks->handle_fpe(vcpu); |
1346 | break; | |
1347 | ||
16d100db | 1348 | case EXCCODE_MSADIS: |
c2537ed9 | 1349 | ++vcpu->stat.msa_disabled_exits; |
98119ad5 JH |
1350 | ret = kvm_mips_callbacks->handle_msa_disabled(vcpu); |
1351 | break; | |
1352 | ||
28c1e762 JH |
1353 | case EXCCODE_GE: |
1354 | /* defer exit accounting to handler */ | |
1355 | ret = kvm_mips_callbacks->handle_guest_exit(vcpu); | |
1356 | break; | |
1357 | ||
669e846e | 1358 | default: |
122e51d4 JH |
1359 | if (cause & CAUSEF_BD) |
1360 | opc += 1; | |
1361 | inst = 0; | |
6a97c775 | 1362 | kvm_get_badinstr(opc, vcpu, &inst); |
a27660f3 | 1363 | kvm_err("Exception Code: %d, not yet handled, @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", |
122e51d4 | 1364 | exccode, opc, inst, badvaddr, |
d116e812 | 1365 | kvm_read_c0_guest_status(vcpu->arch.cop0)); |
669e846e SL |
1366 | kvm_arch_vcpu_dump_regs(vcpu); |
1367 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1368 | ret = RESUME_HOST; | |
1369 | break; | |
1370 | ||
1371 | } | |
1372 | ||
1373 | skip_emul: | |
1374 | local_irq_disable(); | |
1375 | ||
1376 | if (er == EMULATE_DONE && !(ret & RESUME_HOST)) | |
1377 | kvm_mips_deliver_interrupts(vcpu, cause); | |
1378 | ||
1379 | if (!(ret & RESUME_HOST)) { | |
d116e812 | 1380 | /* Only check for signals if not already exiting to userspace */ |
669e846e SL |
1381 | if (signal_pending(current)) { |
1382 | run->exit_reason = KVM_EXIT_INTR; | |
1383 | ret = (-EINTR << 2) | RESUME_HOST; | |
1384 | ++vcpu->stat.signal_exits; | |
1e09e86a | 1385 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL); |
669e846e SL |
1386 | } |
1387 | } | |
1388 | ||
98e91b84 | 1389 | if (ret == RESUME_GUEST) { |
93258604 JH |
1390 | trace_kvm_reenter(vcpu); |
1391 | ||
4841e0dd JH |
1392 | /* |
1393 | * Make sure the read of VCPU requests in vcpu_reenter() | |
1394 | * callback is not reordered ahead of the write to vcpu->mode, | |
1395 | * or we could miss a TLB flush request while the requester sees | |
1396 | * the VCPU as outside of guest mode and not needing an IPI. | |
1397 | */ | |
1398 | smp_store_mb(vcpu->mode, IN_GUEST_MODE); | |
1399 | ||
a2c046e4 | 1400 | kvm_mips_callbacks->vcpu_reenter(run, vcpu); |
25b08c7f | 1401 | |
98e91b84 | 1402 | /* |
539cb89f JH |
1403 | * If FPU / MSA are enabled (i.e. the guest's FPU / MSA context |
1404 | * is live), restore FCR31 / MSACSR. | |
98e91b84 JH |
1405 | * |
1406 | * This should be before returning to the guest exception | |
539cb89f JH |
1407 | * vector, as it may well cause an [MSA] FP exception if there |
1408 | * are pending exception bits unmasked. (see | |
98e91b84 JH |
1409 | * kvm_mips_csr_die_notifier() for how that is handled). |
1410 | */ | |
1411 | if (kvm_mips_guest_has_fpu(&vcpu->arch) && | |
1412 | read_c0_status() & ST0_CU1) | |
1413 | __kvm_restore_fcsr(&vcpu->arch); | |
539cb89f JH |
1414 | |
1415 | if (kvm_mips_guest_has_msa(&vcpu->arch) && | |
1416 | read_c0_config5() & MIPS_CONF5_MSAEN) | |
1417 | __kvm_restore_msacsr(&vcpu->arch); | |
98e91b84 JH |
1418 | } |
1419 | ||
c4c6f2ca JH |
1420 | /* Disable HTW before returning to guest or host */ |
1421 | htw_stop(); | |
1422 | ||
669e846e SL |
1423 | return ret; |
1424 | } | |
1425 | ||
98e91b84 JH |
1426 | /* Enable FPU for guest and restore context */ |
1427 | void kvm_own_fpu(struct kvm_vcpu *vcpu) | |
1428 | { | |
1429 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1430 | unsigned int sr, cfg5; | |
1431 | ||
1432 | preempt_disable(); | |
1433 | ||
539cb89f JH |
1434 | sr = kvm_read_c0_guest_status(cop0); |
1435 | ||
1436 | /* | |
1437 | * If MSA state is already live, it is undefined how it interacts with | |
1438 | * FR=0 FPU state, and we don't want to hit reserved instruction | |
1439 | * exceptions trying to save the MSA state later when CU=1 && FR=1, so | |
1440 | * play it safe and save it first. | |
1441 | * | |
1442 | * In theory we shouldn't ever hit this case since kvm_lose_fpu() should | |
1443 | * get called when guest CU1 is set, however we can't trust the guest | |
1444 | * not to clobber the status register directly via the commpage. | |
1445 | */ | |
1446 | if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) && | |
f943176a | 1447 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) |
539cb89f JH |
1448 | kvm_lose_fpu(vcpu); |
1449 | ||
98e91b84 JH |
1450 | /* |
1451 | * Enable FPU for guest | |
1452 | * We set FR and FRE according to guest context | |
1453 | */ | |
98e91b84 JH |
1454 | change_c0_status(ST0_CU1 | ST0_FR, sr); |
1455 | if (cpu_has_fre) { | |
1456 | cfg5 = kvm_read_c0_guest_config5(cop0); | |
1457 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | |
1458 | } | |
1459 | enable_fpu_hazard(); | |
1460 | ||
1461 | /* If guest FPU state not active, restore it now */ | |
f943176a | 1462 | if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { |
98e91b84 | 1463 | __kvm_restore_fpu(&vcpu->arch); |
f943176a | 1464 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; |
04ebebf4 JH |
1465 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU); |
1466 | } else { | |
1467 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU); | |
98e91b84 JH |
1468 | } |
1469 | ||
1470 | preempt_enable(); | |
1471 | } | |
1472 | ||
539cb89f JH |
1473 | #ifdef CONFIG_CPU_HAS_MSA |
1474 | /* Enable MSA for guest and restore context */ | |
1475 | void kvm_own_msa(struct kvm_vcpu *vcpu) | |
1476 | { | |
1477 | struct mips_coproc *cop0 = vcpu->arch.cop0; | |
1478 | unsigned int sr, cfg5; | |
1479 | ||
1480 | preempt_disable(); | |
1481 | ||
1482 | /* | |
1483 | * Enable FPU if enabled in guest, since we're restoring FPU context | |
1484 | * anyway. We set FR and FRE according to guest context. | |
1485 | */ | |
1486 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { | |
1487 | sr = kvm_read_c0_guest_status(cop0); | |
1488 | ||
1489 | /* | |
1490 | * If FR=0 FPU state is already live, it is undefined how it | |
1491 | * interacts with MSA state, so play it safe and save it first. | |
1492 | */ | |
1493 | if (!(sr & ST0_FR) && | |
f943176a JH |
1494 | (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | |
1495 | KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU) | |
539cb89f JH |
1496 | kvm_lose_fpu(vcpu); |
1497 | ||
1498 | change_c0_status(ST0_CU1 | ST0_FR, sr); | |
1499 | if (sr & ST0_CU1 && cpu_has_fre) { | |
1500 | cfg5 = kvm_read_c0_guest_config5(cop0); | |
1501 | change_c0_config5(MIPS_CONF5_FRE, cfg5); | |
1502 | } | |
1503 | } | |
1504 | ||
1505 | /* Enable MSA for guest */ | |
1506 | set_c0_config5(MIPS_CONF5_MSAEN); | |
1507 | enable_fpu_hazard(); | |
1508 | ||
f943176a JH |
1509 | switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) { |
1510 | case KVM_MIPS_AUX_FPU: | |
539cb89f JH |
1511 | /* |
1512 | * Guest FPU state already loaded, only restore upper MSA state | |
1513 | */ | |
1514 | __kvm_restore_msa_upper(&vcpu->arch); | |
f943176a | 1515 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; |
04ebebf4 | 1516 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA); |
539cb89f JH |
1517 | break; |
1518 | case 0: | |
1519 | /* Neither FPU or MSA already active, restore full MSA state */ | |
1520 | __kvm_restore_msa(&vcpu->arch); | |
f943176a | 1521 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA; |
539cb89f | 1522 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) |
f943176a | 1523 | vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU; |
04ebebf4 JH |
1524 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, |
1525 | KVM_TRACE_AUX_FPU_MSA); | |
539cb89f JH |
1526 | break; |
1527 | default: | |
04ebebf4 | 1528 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA); |
539cb89f JH |
1529 | break; |
1530 | } | |
1531 | ||
1532 | preempt_enable(); | |
1533 | } | |
1534 | #endif | |
1535 | ||
1536 | /* Drop FPU & MSA without saving it */ | |
98e91b84 JH |
1537 | void kvm_drop_fpu(struct kvm_vcpu *vcpu) |
1538 | { | |
1539 | preempt_disable(); | |
f943176a | 1540 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
539cb89f | 1541 | disable_msa(); |
04ebebf4 | 1542 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA); |
f943176a | 1543 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA; |
539cb89f | 1544 | } |
f943176a | 1545 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { |
98e91b84 | 1546 | clear_c0_status(ST0_CU1 | ST0_FR); |
04ebebf4 | 1547 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU); |
f943176a | 1548 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; |
98e91b84 JH |
1549 | } |
1550 | preempt_enable(); | |
1551 | } | |
1552 | ||
539cb89f | 1553 | /* Save and disable FPU & MSA */ |
98e91b84 JH |
1554 | void kvm_lose_fpu(struct kvm_vcpu *vcpu) |
1555 | { | |
1556 | /* | |
c58cf741 JH |
1557 | * With T&E, FPU & MSA get disabled in root context (hardware) when it |
1558 | * is disabled in guest context (software), but the register state in | |
1559 | * the hardware may still be in use. | |
1560 | * This is why we explicitly re-enable the hardware before saving. | |
98e91b84 JH |
1561 | */ |
1562 | ||
1563 | preempt_disable(); | |
f943176a | 1564 | if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
c58cf741 JH |
1565 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { |
1566 | set_c0_config5(MIPS_CONF5_MSAEN); | |
1567 | enable_fpu_hazard(); | |
1568 | } | |
539cb89f JH |
1569 | |
1570 | __kvm_save_msa(&vcpu->arch); | |
04ebebf4 | 1571 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA); |
539cb89f JH |
1572 | |
1573 | /* Disable MSA & FPU */ | |
1574 | disable_msa(); | |
f943176a | 1575 | if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { |
539cb89f | 1576 | clear_c0_status(ST0_CU1 | ST0_FR); |
4ac33429 JH |
1577 | disable_fpu_hazard(); |
1578 | } | |
f943176a JH |
1579 | vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA); |
1580 | } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) { | |
c58cf741 JH |
1581 | if (!IS_ENABLED(CONFIG_KVM_MIPS_VZ)) { |
1582 | set_c0_status(ST0_CU1); | |
1583 | enable_fpu_hazard(); | |
1584 | } | |
98e91b84 JH |
1585 | |
1586 | __kvm_save_fpu(&vcpu->arch); | |
f943176a | 1587 | vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU; |
04ebebf4 | 1588 | trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU); |
98e91b84 JH |
1589 | |
1590 | /* Disable FPU */ | |
1591 | clear_c0_status(ST0_CU1 | ST0_FR); | |
4ac33429 | 1592 | disable_fpu_hazard(); |
98e91b84 JH |
1593 | } |
1594 | preempt_enable(); | |
1595 | } | |
1596 | ||
1597 | /* | |
539cb89f JH |
1598 | * Step over a specific ctc1 to FCSR and a specific ctcmsa to MSACSR which are |
1599 | * used to restore guest FCSR/MSACSR state and may trigger a "harmless" FP/MSAFP | |
1600 | * exception if cause bits are set in the value being written. | |
98e91b84 JH |
1601 | */ |
1602 | static int kvm_mips_csr_die_notify(struct notifier_block *self, | |
1603 | unsigned long cmd, void *ptr) | |
1604 | { | |
1605 | struct die_args *args = (struct die_args *)ptr; | |
1606 | struct pt_regs *regs = args->regs; | |
1607 | unsigned long pc; | |
1608 | ||
539cb89f JH |
1609 | /* Only interested in FPE and MSAFPE */ |
1610 | if (cmd != DIE_FP && cmd != DIE_MSAFP) | |
98e91b84 JH |
1611 | return NOTIFY_DONE; |
1612 | ||
1613 | /* Return immediately if guest context isn't active */ | |
1614 | if (!(current->flags & PF_VCPU)) | |
1615 | return NOTIFY_DONE; | |
1616 | ||
1617 | /* Should never get here from user mode */ | |
1618 | BUG_ON(user_mode(regs)); | |
1619 | ||
1620 | pc = instruction_pointer(regs); | |
1621 | switch (cmd) { | |
1622 | case DIE_FP: | |
1623 | /* match 2nd instruction in __kvm_restore_fcsr */ | |
1624 | if (pc != (unsigned long)&__kvm_restore_fcsr + 4) | |
1625 | return NOTIFY_DONE; | |
1626 | break; | |
539cb89f JH |
1627 | case DIE_MSAFP: |
1628 | /* match 2nd/3rd instruction in __kvm_restore_msacsr */ | |
1629 | if (!cpu_has_msa || | |
1630 | pc < (unsigned long)&__kvm_restore_msacsr + 4 || | |
1631 | pc > (unsigned long)&__kvm_restore_msacsr + 8) | |
1632 | return NOTIFY_DONE; | |
1633 | break; | |
98e91b84 JH |
1634 | } |
1635 | ||
1636 | /* Move PC forward a little and continue executing */ | |
1637 | instruction_pointer(regs) += 4; | |
1638 | ||
1639 | return NOTIFY_STOP; | |
1640 | } | |
1641 | ||
1642 | static struct notifier_block kvm_mips_csr_die_notifier = { | |
1643 | .notifier_call = kvm_mips_csr_die_notify, | |
1644 | }; | |
1645 | ||
2db9d233 | 1646 | static int __init kvm_mips_init(void) |
669e846e SL |
1647 | { |
1648 | int ret; | |
1649 | ||
1e5217f5 JH |
1650 | ret = kvm_mips_entry_setup(); |
1651 | if (ret) | |
1652 | return ret; | |
1653 | ||
669e846e SL |
1654 | ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE); |
1655 | ||
1656 | if (ret) | |
1657 | return ret; | |
1658 | ||
98e91b84 JH |
1659 | register_die_notifier(&kvm_mips_csr_die_notifier); |
1660 | ||
669e846e SL |
1661 | return 0; |
1662 | } | |
1663 | ||
2db9d233 | 1664 | static void __exit kvm_mips_exit(void) |
669e846e SL |
1665 | { |
1666 | kvm_exit(); | |
1667 | ||
98e91b84 | 1668 | unregister_die_notifier(&kvm_mips_csr_die_notifier); |
669e846e SL |
1669 | } |
1670 | ||
1671 | module_init(kvm_mips_init); | |
1672 | module_exit(kvm_mips_exit); | |
1673 | ||
1674 | EXPORT_TRACEPOINT_SYMBOL(kvm_exit); |