]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/errno.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/vmalloc.h> | |
25 | #include <linux/hrtimer.h> | |
26 | #include <linux/sched/signal.h> | |
27 | #include <linux/fs.h> | |
28 | #include <linux/slab.h> | |
29 | #include <linux/file.h> | |
30 | #include <linux/module.h> | |
31 | #include <linux/irqbypass.h> | |
32 | #include <linux/kvm_irqfd.h> | |
33 | #include <asm/cputable.h> | |
34 | #include <linux/uaccess.h> | |
35 | #include <asm/kvm_ppc.h> | |
36 | #include <asm/tlbflush.h> | |
37 | #include <asm/cputhreads.h> | |
38 | #include <asm/irqflags.h> | |
39 | #include <asm/iommu.h> | |
40 | #include <asm/switch_to.h> | |
41 | #include <asm/xive.h> | |
42 | ||
43 | #include "timing.h" | |
44 | #include "irq.h" | |
45 | #include "../mm/mmu_decl.h" | |
46 | ||
47 | #define CREATE_TRACE_POINTS | |
48 | #include "trace.h" | |
49 | ||
50 | struct kvmppc_ops *kvmppc_hv_ops; | |
51 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); | |
52 | struct kvmppc_ops *kvmppc_pr_ops; | |
53 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); | |
54 | ||
55 | ||
56 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | |
57 | { | |
58 | return !!(v->arch.pending_exceptions) || kvm_request_pending(v); | |
59 | } | |
60 | ||
61 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) | |
62 | { | |
63 | return 1; | |
64 | } | |
65 | ||
66 | /* | |
67 | * Common checks before entering the guest world. Call with interrupts | |
68 | * disabled. | |
69 | * | |
70 | * returns: | |
71 | * | |
72 | * == 1 if we're ready to go into guest state | |
73 | * <= 0 if we need to go back to the host with return value | |
74 | */ | |
75 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |
76 | { | |
77 | int r; | |
78 | ||
79 | WARN_ON(irqs_disabled()); | |
80 | hard_irq_disable(); | |
81 | ||
82 | while (true) { | |
83 | if (need_resched()) { | |
84 | local_irq_enable(); | |
85 | cond_resched(); | |
86 | hard_irq_disable(); | |
87 | continue; | |
88 | } | |
89 | ||
90 | if (signal_pending(current)) { | |
91 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); | |
92 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
93 | r = -EINTR; | |
94 | break; | |
95 | } | |
96 | ||
97 | vcpu->mode = IN_GUEST_MODE; | |
98 | ||
99 | /* | |
100 | * Reading vcpu->requests must happen after setting vcpu->mode, | |
101 | * so we don't miss a request because the requester sees | |
102 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests | |
103 | * before next entering the guest (and thus doesn't IPI). | |
104 | * This also orders the write to mode from any reads | |
105 | * to the page tables done while the VCPU is running. | |
106 | * Please see the comment in kvm_flush_remote_tlbs. | |
107 | */ | |
108 | smp_mb(); | |
109 | ||
110 | if (kvm_request_pending(vcpu)) { | |
111 | /* Make sure we process requests preemptable */ | |
112 | local_irq_enable(); | |
113 | trace_kvm_check_requests(vcpu); | |
114 | r = kvmppc_core_check_requests(vcpu); | |
115 | hard_irq_disable(); | |
116 | if (r > 0) | |
117 | continue; | |
118 | break; | |
119 | } | |
120 | ||
121 | if (kvmppc_core_prepare_to_enter(vcpu)) { | |
122 | /* interrupts got enabled in between, so we | |
123 | are back at square 1 */ | |
124 | continue; | |
125 | } | |
126 | ||
127 | guest_enter_irqoff(); | |
128 | return 1; | |
129 | } | |
130 | ||
131 | /* return to host */ | |
132 | local_irq_enable(); | |
133 | return r; | |
134 | } | |
135 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); | |
136 | ||
137 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | |
138 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) | |
139 | { | |
140 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | |
141 | int i; | |
142 | ||
143 | shared->sprg0 = swab64(shared->sprg0); | |
144 | shared->sprg1 = swab64(shared->sprg1); | |
145 | shared->sprg2 = swab64(shared->sprg2); | |
146 | shared->sprg3 = swab64(shared->sprg3); | |
147 | shared->srr0 = swab64(shared->srr0); | |
148 | shared->srr1 = swab64(shared->srr1); | |
149 | shared->dar = swab64(shared->dar); | |
150 | shared->msr = swab64(shared->msr); | |
151 | shared->dsisr = swab32(shared->dsisr); | |
152 | shared->int_pending = swab32(shared->int_pending); | |
153 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) | |
154 | shared->sr[i] = swab32(shared->sr[i]); | |
155 | } | |
156 | #endif | |
157 | ||
158 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) | |
159 | { | |
160 | int nr = kvmppc_get_gpr(vcpu, 11); | |
161 | int r; | |
162 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | |
163 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | |
164 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | |
165 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | |
166 | unsigned long r2 = 0; | |
167 | ||
168 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { | |
169 | /* 32 bit mode */ | |
170 | param1 &= 0xffffffff; | |
171 | param2 &= 0xffffffff; | |
172 | param3 &= 0xffffffff; | |
173 | param4 &= 0xffffffff; | |
174 | } | |
175 | ||
176 | switch (nr) { | |
177 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): | |
178 | { | |
179 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | |
180 | /* Book3S can be little endian, find it out here */ | |
181 | int shared_big_endian = true; | |
182 | if (vcpu->arch.intr_msr & MSR_LE) | |
183 | shared_big_endian = false; | |
184 | if (shared_big_endian != vcpu->arch.shared_big_endian) | |
185 | kvmppc_swab_shared(vcpu); | |
186 | vcpu->arch.shared_big_endian = shared_big_endian; | |
187 | #endif | |
188 | ||
189 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { | |
190 | /* | |
191 | * Older versions of the Linux magic page code had | |
192 | * a bug where they would map their trampoline code | |
193 | * NX. If that's the case, remove !PR NX capability. | |
194 | */ | |
195 | vcpu->arch.disable_kernel_nx = true; | |
196 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | |
197 | } | |
198 | ||
199 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; | |
200 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; | |
201 | ||
202 | #ifdef CONFIG_PPC_64K_PAGES | |
203 | /* | |
204 | * Make sure our 4k magic page is in the same window of a 64k | |
205 | * page within the guest and within the host's page. | |
206 | */ | |
207 | if ((vcpu->arch.magic_page_pa & 0xf000) != | |
208 | ((ulong)vcpu->arch.shared & 0xf000)) { | |
209 | void *old_shared = vcpu->arch.shared; | |
210 | ulong shared = (ulong)vcpu->arch.shared; | |
211 | void *new_shared; | |
212 | ||
213 | shared &= PAGE_MASK; | |
214 | shared |= vcpu->arch.magic_page_pa & 0xf000; | |
215 | new_shared = (void*)shared; | |
216 | memcpy(new_shared, old_shared, 0x1000); | |
217 | vcpu->arch.shared = new_shared; | |
218 | } | |
219 | #endif | |
220 | ||
221 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; | |
222 | ||
223 | r = EV_SUCCESS; | |
224 | break; | |
225 | } | |
226 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): | |
227 | r = EV_SUCCESS; | |
228 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) | |
229 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); | |
230 | #endif | |
231 | ||
232 | /* Second return value is in r4 */ | |
233 | break; | |
234 | case EV_HCALL_TOKEN(EV_IDLE): | |
235 | r = EV_SUCCESS; | |
236 | kvm_vcpu_block(vcpu); | |
237 | kvm_clear_request(KVM_REQ_UNHALT, vcpu); | |
238 | break; | |
239 | default: | |
240 | r = EV_UNIMPLEMENTED; | |
241 | break; | |
242 | } | |
243 | ||
244 | kvmppc_set_gpr(vcpu, 4, r2); | |
245 | ||
246 | return r; | |
247 | } | |
248 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); | |
249 | ||
250 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) | |
251 | { | |
252 | int r = false; | |
253 | ||
254 | /* We have to know what CPU to virtualize */ | |
255 | if (!vcpu->arch.pvr) | |
256 | goto out; | |
257 | ||
258 | /* PAPR only works with book3s_64 */ | |
259 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | |
260 | goto out; | |
261 | ||
262 | /* HV KVM can only do PAPR mode for now */ | |
263 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) | |
264 | goto out; | |
265 | ||
266 | #ifdef CONFIG_KVM_BOOKE_HV | |
267 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | |
268 | goto out; | |
269 | #endif | |
270 | ||
271 | r = true; | |
272 | ||
273 | out: | |
274 | vcpu->arch.sane = r; | |
275 | return r ? 0 : -EINVAL; | |
276 | } | |
277 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); | |
278 | ||
279 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |
280 | { | |
281 | enum emulation_result er; | |
282 | int r; | |
283 | ||
284 | er = kvmppc_emulate_loadstore(vcpu); | |
285 | switch (er) { | |
286 | case EMULATE_DONE: | |
287 | /* Future optimization: only reload non-volatiles if they were | |
288 | * actually modified. */ | |
289 | r = RESUME_GUEST_NV; | |
290 | break; | |
291 | case EMULATE_AGAIN: | |
292 | r = RESUME_GUEST; | |
293 | break; | |
294 | case EMULATE_DO_MMIO: | |
295 | run->exit_reason = KVM_EXIT_MMIO; | |
296 | /* We must reload nonvolatiles because "update" load/store | |
297 | * instructions modify register state. */ | |
298 | /* Future optimization: only reload non-volatiles if they were | |
299 | * actually modified. */ | |
300 | r = RESUME_HOST_NV; | |
301 | break; | |
302 | case EMULATE_FAIL: | |
303 | { | |
304 | u32 last_inst; | |
305 | ||
306 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | |
307 | /* XXX Deliver Program interrupt to guest. */ | |
308 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); | |
309 | r = RESUME_HOST; | |
310 | break; | |
311 | } | |
312 | default: | |
313 | WARN_ON(1); | |
314 | r = RESUME_GUEST; | |
315 | } | |
316 | ||
317 | return r; | |
318 | } | |
319 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); | |
320 | ||
321 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
322 | bool data) | |
323 | { | |
324 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; | |
325 | struct kvmppc_pte pte; | |
326 | int r; | |
327 | ||
328 | vcpu->stat.st++; | |
329 | ||
330 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
331 | XLATE_WRITE, &pte); | |
332 | if (r < 0) | |
333 | return r; | |
334 | ||
335 | *eaddr = pte.raddr; | |
336 | ||
337 | if (!pte.may_write) | |
338 | return -EPERM; | |
339 | ||
340 | /* Magic page override */ | |
341 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
342 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
343 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
344 | void *magic = vcpu->arch.shared; | |
345 | magic += pte.eaddr & 0xfff; | |
346 | memcpy(magic, ptr, size); | |
347 | return EMULATE_DONE; | |
348 | } | |
349 | ||
350 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) | |
351 | return EMULATE_DO_MMIO; | |
352 | ||
353 | return EMULATE_DONE; | |
354 | } | |
355 | EXPORT_SYMBOL_GPL(kvmppc_st); | |
356 | ||
357 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
358 | bool data) | |
359 | { | |
360 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; | |
361 | struct kvmppc_pte pte; | |
362 | int rc; | |
363 | ||
364 | vcpu->stat.ld++; | |
365 | ||
366 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
367 | XLATE_READ, &pte); | |
368 | if (rc) | |
369 | return rc; | |
370 | ||
371 | *eaddr = pte.raddr; | |
372 | ||
373 | if (!pte.may_read) | |
374 | return -EPERM; | |
375 | ||
376 | if (!data && !pte.may_execute) | |
377 | return -ENOEXEC; | |
378 | ||
379 | /* Magic page override */ | |
380 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
381 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
382 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
383 | void *magic = vcpu->arch.shared; | |
384 | magic += pte.eaddr & 0xfff; | |
385 | memcpy(ptr, magic, size); | |
386 | return EMULATE_DONE; | |
387 | } | |
388 | ||
389 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) | |
390 | return EMULATE_DO_MMIO; | |
391 | ||
392 | return EMULATE_DONE; | |
393 | } | |
394 | EXPORT_SYMBOL_GPL(kvmppc_ld); | |
395 | ||
396 | int kvm_arch_hardware_enable(void) | |
397 | { | |
398 | return 0; | |
399 | } | |
400 | ||
401 | int kvm_arch_hardware_setup(void) | |
402 | { | |
403 | return 0; | |
404 | } | |
405 | ||
406 | void kvm_arch_check_processor_compat(void *rtn) | |
407 | { | |
408 | *(int *)rtn = kvmppc_core_check_processor_compat(); | |
409 | } | |
410 | ||
411 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) | |
412 | { | |
413 | struct kvmppc_ops *kvm_ops = NULL; | |
414 | /* | |
415 | * if we have both HV and PR enabled, default is HV | |
416 | */ | |
417 | if (type == 0) { | |
418 | if (kvmppc_hv_ops) | |
419 | kvm_ops = kvmppc_hv_ops; | |
420 | else | |
421 | kvm_ops = kvmppc_pr_ops; | |
422 | if (!kvm_ops) | |
423 | goto err_out; | |
424 | } else if (type == KVM_VM_PPC_HV) { | |
425 | if (!kvmppc_hv_ops) | |
426 | goto err_out; | |
427 | kvm_ops = kvmppc_hv_ops; | |
428 | } else if (type == KVM_VM_PPC_PR) { | |
429 | if (!kvmppc_pr_ops) | |
430 | goto err_out; | |
431 | kvm_ops = kvmppc_pr_ops; | |
432 | } else | |
433 | goto err_out; | |
434 | ||
435 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) | |
436 | return -ENOENT; | |
437 | ||
438 | kvm->arch.kvm_ops = kvm_ops; | |
439 | return kvmppc_core_init_vm(kvm); | |
440 | err_out: | |
441 | return -EINVAL; | |
442 | } | |
443 | ||
444 | bool kvm_arch_has_vcpu_debugfs(void) | |
445 | { | |
446 | return false; | |
447 | } | |
448 | ||
449 | int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) | |
450 | { | |
451 | return 0; | |
452 | } | |
453 | ||
454 | void kvm_arch_destroy_vm(struct kvm *kvm) | |
455 | { | |
456 | unsigned int i; | |
457 | struct kvm_vcpu *vcpu; | |
458 | ||
459 | #ifdef CONFIG_KVM_XICS | |
460 | /* | |
461 | * We call kick_all_cpus_sync() to ensure that all | |
462 | * CPUs have executed any pending IPIs before we | |
463 | * continue and free VCPUs structures below. | |
464 | */ | |
465 | if (is_kvmppc_hv_enabled(kvm)) | |
466 | kick_all_cpus_sync(); | |
467 | #endif | |
468 | ||
469 | kvm_for_each_vcpu(i, vcpu, kvm) | |
470 | kvm_arch_vcpu_free(vcpu); | |
471 | ||
472 | mutex_lock(&kvm->lock); | |
473 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
474 | kvm->vcpus[i] = NULL; | |
475 | ||
476 | atomic_set(&kvm->online_vcpus, 0); | |
477 | ||
478 | kvmppc_core_destroy_vm(kvm); | |
479 | ||
480 | mutex_unlock(&kvm->lock); | |
481 | ||
482 | /* drop the module reference */ | |
483 | module_put(kvm->arch.kvm_ops->owner); | |
484 | } | |
485 | ||
486 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) | |
487 | { | |
488 | int r; | |
489 | /* Assume we're using HV mode when the HV module is loaded */ | |
490 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; | |
491 | ||
492 | if (kvm) { | |
493 | /* | |
494 | * Hooray - we know which VM type we're running on. Depend on | |
495 | * that rather than the guess above. | |
496 | */ | |
497 | hv_enabled = is_kvmppc_hv_enabled(kvm); | |
498 | } | |
499 | ||
500 | switch (ext) { | |
501 | #ifdef CONFIG_BOOKE | |
502 | case KVM_CAP_PPC_BOOKE_SREGS: | |
503 | case KVM_CAP_PPC_BOOKE_WATCHDOG: | |
504 | case KVM_CAP_PPC_EPR: | |
505 | #else | |
506 | case KVM_CAP_PPC_SEGSTATE: | |
507 | case KVM_CAP_PPC_HIOR: | |
508 | case KVM_CAP_PPC_PAPR: | |
509 | #endif | |
510 | case KVM_CAP_PPC_UNSET_IRQ: | |
511 | case KVM_CAP_PPC_IRQ_LEVEL: | |
512 | case KVM_CAP_ENABLE_CAP: | |
513 | case KVM_CAP_ENABLE_CAP_VM: | |
514 | case KVM_CAP_ONE_REG: | |
515 | case KVM_CAP_IOEVENTFD: | |
516 | case KVM_CAP_DEVICE_CTRL: | |
517 | case KVM_CAP_IMMEDIATE_EXIT: | |
518 | r = 1; | |
519 | break; | |
520 | case KVM_CAP_PPC_PAIRED_SINGLES: | |
521 | case KVM_CAP_PPC_OSI: | |
522 | case KVM_CAP_PPC_GET_PVINFO: | |
523 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) | |
524 | case KVM_CAP_SW_TLB: | |
525 | #endif | |
526 | /* We support this only for PR */ | |
527 | r = !hv_enabled; | |
528 | break; | |
529 | #ifdef CONFIG_KVM_MPIC | |
530 | case KVM_CAP_IRQ_MPIC: | |
531 | r = 1; | |
532 | break; | |
533 | #endif | |
534 | ||
535 | #ifdef CONFIG_PPC_BOOK3S_64 | |
536 | case KVM_CAP_SPAPR_TCE: | |
537 | case KVM_CAP_SPAPR_TCE_64: | |
538 | /* fallthrough */ | |
539 | case KVM_CAP_SPAPR_TCE_VFIO: | |
540 | case KVM_CAP_PPC_RTAS: | |
541 | case KVM_CAP_PPC_FIXUP_HCALL: | |
542 | case KVM_CAP_PPC_ENABLE_HCALL: | |
543 | #ifdef CONFIG_KVM_XICS | |
544 | case KVM_CAP_IRQ_XICS: | |
545 | #endif | |
546 | r = 1; | |
547 | break; | |
548 | ||
549 | case KVM_CAP_PPC_ALLOC_HTAB: | |
550 | r = hv_enabled; | |
551 | break; | |
552 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
554 | case KVM_CAP_PPC_SMT: | |
555 | r = 0; | |
556 | if (kvm) { | |
557 | if (kvm->arch.emul_smt_mode > 1) | |
558 | r = kvm->arch.emul_smt_mode; | |
559 | else | |
560 | r = kvm->arch.smt_mode; | |
561 | } else if (hv_enabled) { | |
562 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
563 | r = 1; | |
564 | else | |
565 | r = threads_per_subcore; | |
566 | } | |
567 | break; | |
568 | case KVM_CAP_PPC_SMT_POSSIBLE: | |
569 | r = 1; | |
570 | if (hv_enabled) { | |
571 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
572 | r = ((threads_per_subcore << 1) - 1); | |
573 | else | |
574 | /* P9 can emulate dbells, so allow any mode */ | |
575 | r = 8 | 4 | 2 | 1; | |
576 | } | |
577 | break; | |
578 | case KVM_CAP_PPC_RMA: | |
579 | r = 0; | |
580 | break; | |
581 | case KVM_CAP_PPC_HWRNG: | |
582 | r = kvmppc_hwrng_present(); | |
583 | break; | |
584 | case KVM_CAP_PPC_MMU_RADIX: | |
585 | r = !!(hv_enabled && radix_enabled()); | |
586 | break; | |
587 | case KVM_CAP_PPC_MMU_HASH_V3: | |
588 | r = !!(hv_enabled && !radix_enabled() && | |
589 | cpu_has_feature(CPU_FTR_ARCH_300)); | |
590 | break; | |
591 | #endif | |
592 | case KVM_CAP_SYNC_MMU: | |
593 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
594 | r = hv_enabled; | |
595 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) | |
596 | r = 1; | |
597 | #else | |
598 | r = 0; | |
599 | #endif | |
600 | break; | |
601 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
602 | case KVM_CAP_PPC_HTAB_FD: | |
603 | r = hv_enabled; | |
604 | break; | |
605 | #endif | |
606 | case KVM_CAP_NR_VCPUS: | |
607 | /* | |
608 | * Recommending a number of CPUs is somewhat arbitrary; we | |
609 | * return the number of present CPUs for -HV (since a host | |
610 | * will have secondary threads "offline"), and for other KVM | |
611 | * implementations just count online CPUs. | |
612 | */ | |
613 | if (hv_enabled) | |
614 | r = num_present_cpus(); | |
615 | else | |
616 | r = num_online_cpus(); | |
617 | break; | |
618 | case KVM_CAP_NR_MEMSLOTS: | |
619 | r = KVM_USER_MEM_SLOTS; | |
620 | break; | |
621 | case KVM_CAP_MAX_VCPUS: | |
622 | r = KVM_MAX_VCPUS; | |
623 | break; | |
624 | #ifdef CONFIG_PPC_BOOK3S_64 | |
625 | case KVM_CAP_PPC_GET_SMMU_INFO: | |
626 | r = 1; | |
627 | break; | |
628 | case KVM_CAP_SPAPR_MULTITCE: | |
629 | r = 1; | |
630 | break; | |
631 | case KVM_CAP_SPAPR_RESIZE_HPT: | |
632 | /* Disable this on POWER9 until code handles new HPTE format */ | |
633 | r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300); | |
634 | break; | |
635 | #endif | |
636 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
637 | case KVM_CAP_PPC_FWNMI: | |
638 | r = hv_enabled; | |
639 | break; | |
640 | #endif | |
641 | case KVM_CAP_PPC_HTM: | |
642 | r = cpu_has_feature(CPU_FTR_TM_COMP) && hv_enabled; | |
643 | break; | |
644 | default: | |
645 | r = 0; | |
646 | break; | |
647 | } | |
648 | return r; | |
649 | ||
650 | } | |
651 | ||
652 | long kvm_arch_dev_ioctl(struct file *filp, | |
653 | unsigned int ioctl, unsigned long arg) | |
654 | { | |
655 | return -EINVAL; | |
656 | } | |
657 | ||
658 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, | |
659 | struct kvm_memory_slot *dont) | |
660 | { | |
661 | kvmppc_core_free_memslot(kvm, free, dont); | |
662 | } | |
663 | ||
664 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, | |
665 | unsigned long npages) | |
666 | { | |
667 | return kvmppc_core_create_memslot(kvm, slot, npages); | |
668 | } | |
669 | ||
670 | int kvm_arch_prepare_memory_region(struct kvm *kvm, | |
671 | struct kvm_memory_slot *memslot, | |
672 | const struct kvm_userspace_memory_region *mem, | |
673 | enum kvm_mr_change change) | |
674 | { | |
675 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem); | |
676 | } | |
677 | ||
678 | void kvm_arch_commit_memory_region(struct kvm *kvm, | |
679 | const struct kvm_userspace_memory_region *mem, | |
680 | const struct kvm_memory_slot *old, | |
681 | const struct kvm_memory_slot *new, | |
682 | enum kvm_mr_change change) | |
683 | { | |
684 | kvmppc_core_commit_memory_region(kvm, mem, old, new); | |
685 | } | |
686 | ||
687 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, | |
688 | struct kvm_memory_slot *slot) | |
689 | { | |
690 | kvmppc_core_flush_memslot(kvm, slot); | |
691 | } | |
692 | ||
693 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) | |
694 | { | |
695 | struct kvm_vcpu *vcpu; | |
696 | vcpu = kvmppc_core_vcpu_create(kvm, id); | |
697 | if (!IS_ERR(vcpu)) { | |
698 | vcpu->arch.wqp = &vcpu->wq; | |
699 | kvmppc_create_vcpu_debugfs(vcpu, id); | |
700 | } | |
701 | return vcpu; | |
702 | } | |
703 | ||
704 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) | |
705 | { | |
706 | } | |
707 | ||
708 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |
709 | { | |
710 | /* Make sure we're not using the vcpu anymore */ | |
711 | hrtimer_cancel(&vcpu->arch.dec_timer); | |
712 | ||
713 | kvmppc_remove_vcpu_debugfs(vcpu); | |
714 | ||
715 | switch (vcpu->arch.irq_type) { | |
716 | case KVMPPC_IRQ_MPIC: | |
717 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); | |
718 | break; | |
719 | case KVMPPC_IRQ_XICS: | |
720 | if (xive_enabled()) | |
721 | kvmppc_xive_cleanup_vcpu(vcpu); | |
722 | else | |
723 | kvmppc_xics_free_icp(vcpu); | |
724 | break; | |
725 | } | |
726 | ||
727 | kvmppc_core_vcpu_free(vcpu); | |
728 | } | |
729 | ||
730 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
731 | { | |
732 | kvm_arch_vcpu_free(vcpu); | |
733 | } | |
734 | ||
735 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
736 | { | |
737 | return kvmppc_core_pending_dec(vcpu); | |
738 | } | |
739 | ||
740 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) | |
741 | { | |
742 | struct kvm_vcpu *vcpu; | |
743 | ||
744 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | |
745 | kvmppc_decrementer_func(vcpu); | |
746 | ||
747 | return HRTIMER_NORESTART; | |
748 | } | |
749 | ||
750 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |
751 | { | |
752 | int ret; | |
753 | ||
754 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); | |
755 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | |
756 | vcpu->arch.dec_expires = ~(u64)0; | |
757 | ||
758 | #ifdef CONFIG_KVM_EXIT_TIMING | |
759 | mutex_init(&vcpu->arch.exit_timing_lock); | |
760 | #endif | |
761 | ret = kvmppc_subarch_vcpu_init(vcpu); | |
762 | return ret; | |
763 | } | |
764 | ||
765 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
766 | { | |
767 | kvmppc_mmu_destroy(vcpu); | |
768 | kvmppc_subarch_vcpu_uninit(vcpu); | |
769 | } | |
770 | ||
771 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
772 | { | |
773 | #ifdef CONFIG_BOOKE | |
774 | /* | |
775 | * vrsave (formerly usprg0) isn't used by Linux, but may | |
776 | * be used by the guest. | |
777 | * | |
778 | * On non-booke this is associated with Altivec and | |
779 | * is handled by code in book3s.c. | |
780 | */ | |
781 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | |
782 | #endif | |
783 | kvmppc_core_vcpu_load(vcpu, cpu); | |
784 | } | |
785 | ||
786 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
787 | { | |
788 | kvmppc_core_vcpu_put(vcpu); | |
789 | #ifdef CONFIG_BOOKE | |
790 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | |
791 | #endif | |
792 | } | |
793 | ||
794 | /* | |
795 | * irq_bypass_add_producer and irq_bypass_del_producer are only | |
796 | * useful if the architecture supports PCI passthrough. | |
797 | * irq_bypass_stop and irq_bypass_start are not needed and so | |
798 | * kvm_ops are not defined for them. | |
799 | */ | |
800 | bool kvm_arch_has_irq_bypass(void) | |
801 | { | |
802 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || | |
803 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); | |
804 | } | |
805 | ||
806 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, | |
807 | struct irq_bypass_producer *prod) | |
808 | { | |
809 | struct kvm_kernel_irqfd *irqfd = | |
810 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
811 | struct kvm *kvm = irqfd->kvm; | |
812 | ||
813 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) | |
814 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); | |
815 | ||
816 | return 0; | |
817 | } | |
818 | ||
819 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, | |
820 | struct irq_bypass_producer *prod) | |
821 | { | |
822 | struct kvm_kernel_irqfd *irqfd = | |
823 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
824 | struct kvm *kvm = irqfd->kvm; | |
825 | ||
826 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) | |
827 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); | |
828 | } | |
829 | ||
830 | #ifdef CONFIG_VSX | |
831 | static inline int kvmppc_get_vsr_dword_offset(int index) | |
832 | { | |
833 | int offset; | |
834 | ||
835 | if ((index != 0) && (index != 1)) | |
836 | return -1; | |
837 | ||
838 | #ifdef __BIG_ENDIAN | |
839 | offset = index; | |
840 | #else | |
841 | offset = 1 - index; | |
842 | #endif | |
843 | ||
844 | return offset; | |
845 | } | |
846 | ||
847 | static inline int kvmppc_get_vsr_word_offset(int index) | |
848 | { | |
849 | int offset; | |
850 | ||
851 | if ((index > 3) || (index < 0)) | |
852 | return -1; | |
853 | ||
854 | #ifdef __BIG_ENDIAN | |
855 | offset = index; | |
856 | #else | |
857 | offset = 3 - index; | |
858 | #endif | |
859 | return offset; | |
860 | } | |
861 | ||
862 | static inline void kvmppc_set_vsr_dword(struct kvm_vcpu *vcpu, | |
863 | u64 gpr) | |
864 | { | |
865 | union kvmppc_one_reg val; | |
866 | int offset = kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); | |
867 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
868 | ||
869 | if (offset == -1) | |
870 | return; | |
871 | ||
872 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
873 | val.vval = VCPU_VSX_VR(vcpu, index); | |
874 | val.vsxval[offset] = gpr; | |
875 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
876 | } else { | |
877 | VCPU_VSX_FPR(vcpu, index, offset) = gpr; | |
878 | } | |
879 | } | |
880 | ||
881 | static inline void kvmppc_set_vsr_dword_dump(struct kvm_vcpu *vcpu, | |
882 | u64 gpr) | |
883 | { | |
884 | union kvmppc_one_reg val; | |
885 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
886 | ||
887 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
888 | val.vval = VCPU_VSX_VR(vcpu, index); | |
889 | val.vsxval[0] = gpr; | |
890 | val.vsxval[1] = gpr; | |
891 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
892 | } else { | |
893 | VCPU_VSX_FPR(vcpu, index, 0) = gpr; | |
894 | VCPU_VSX_FPR(vcpu, index, 1) = gpr; | |
895 | } | |
896 | } | |
897 | ||
898 | static inline void kvmppc_set_vsr_word(struct kvm_vcpu *vcpu, | |
899 | u32 gpr32) | |
900 | { | |
901 | union kvmppc_one_reg val; | |
902 | int offset = kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); | |
903 | int index = vcpu->arch.io_gpr & KVM_MMIO_REG_MASK; | |
904 | int dword_offset, word_offset; | |
905 | ||
906 | if (offset == -1) | |
907 | return; | |
908 | ||
909 | if (vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
910 | val.vval = VCPU_VSX_VR(vcpu, index); | |
911 | val.vsx32val[offset] = gpr32; | |
912 | VCPU_VSX_VR(vcpu, index) = val.vval; | |
913 | } else { | |
914 | dword_offset = offset / 2; | |
915 | word_offset = offset % 2; | |
916 | val.vsxval[0] = VCPU_VSX_FPR(vcpu, index, dword_offset); | |
917 | val.vsx32val[word_offset] = gpr32; | |
918 | VCPU_VSX_FPR(vcpu, index, dword_offset) = val.vsxval[0]; | |
919 | } | |
920 | } | |
921 | #endif /* CONFIG_VSX */ | |
922 | ||
923 | #ifdef CONFIG_PPC_FPU | |
924 | static inline u64 sp_to_dp(u32 fprs) | |
925 | { | |
926 | u64 fprd; | |
927 | ||
928 | preempt_disable(); | |
929 | enable_kernel_fp(); | |
930 | asm ("lfs%U1%X1 0,%1; stfd%U0%X0 0,%0" : "=m" (fprd) : "m" (fprs) | |
931 | : "fr0"); | |
932 | preempt_enable(); | |
933 | return fprd; | |
934 | } | |
935 | ||
936 | static inline u32 dp_to_sp(u64 fprd) | |
937 | { | |
938 | u32 fprs; | |
939 | ||
940 | preempt_disable(); | |
941 | enable_kernel_fp(); | |
942 | asm ("lfd%U1%X1 0,%1; stfs%U0%X0 0,%0" : "=m" (fprs) : "m" (fprd) | |
943 | : "fr0"); | |
944 | preempt_enable(); | |
945 | return fprs; | |
946 | } | |
947 | ||
948 | #else | |
949 | #define sp_to_dp(x) (x) | |
950 | #define dp_to_sp(x) (x) | |
951 | #endif /* CONFIG_PPC_FPU */ | |
952 | ||
953 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |
954 | struct kvm_run *run) | |
955 | { | |
956 | u64 uninitialized_var(gpr); | |
957 | ||
958 | if (run->mmio.len > sizeof(gpr)) { | |
959 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | |
960 | return; | |
961 | } | |
962 | ||
963 | if (!vcpu->arch.mmio_host_swabbed) { | |
964 | switch (run->mmio.len) { | |
965 | case 8: gpr = *(u64 *)run->mmio.data; break; | |
966 | case 4: gpr = *(u32 *)run->mmio.data; break; | |
967 | case 2: gpr = *(u16 *)run->mmio.data; break; | |
968 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
969 | } | |
970 | } else { | |
971 | switch (run->mmio.len) { | |
972 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; | |
973 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; | |
974 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; | |
975 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
976 | } | |
977 | } | |
978 | ||
979 | /* conversion between single and double precision */ | |
980 | if ((vcpu->arch.mmio_sp64_extend) && (run->mmio.len == 4)) | |
981 | gpr = sp_to_dp(gpr); | |
982 | ||
983 | if (vcpu->arch.mmio_sign_extend) { | |
984 | switch (run->mmio.len) { | |
985 | #ifdef CONFIG_PPC64 | |
986 | case 4: | |
987 | gpr = (s64)(s32)gpr; | |
988 | break; | |
989 | #endif | |
990 | case 2: | |
991 | gpr = (s64)(s16)gpr; | |
992 | break; | |
993 | case 1: | |
994 | gpr = (s64)(s8)gpr; | |
995 | break; | |
996 | } | |
997 | } | |
998 | ||
999 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { | |
1000 | case KVM_MMIO_REG_GPR: | |
1001 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); | |
1002 | break; | |
1003 | case KVM_MMIO_REG_FPR: | |
1004 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; | |
1005 | break; | |
1006 | #ifdef CONFIG_PPC_BOOK3S | |
1007 | case KVM_MMIO_REG_QPR: | |
1008 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
1009 | break; | |
1010 | case KVM_MMIO_REG_FQPR: | |
1011 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; | |
1012 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
1013 | break; | |
1014 | #endif | |
1015 | #ifdef CONFIG_VSX | |
1016 | case KVM_MMIO_REG_VSX: | |
1017 | if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_DWORD) | |
1018 | kvmppc_set_vsr_dword(vcpu, gpr); | |
1019 | else if (vcpu->arch.mmio_vsx_copy_type == KVMPPC_VSX_COPY_WORD) | |
1020 | kvmppc_set_vsr_word(vcpu, gpr); | |
1021 | else if (vcpu->arch.mmio_vsx_copy_type == | |
1022 | KVMPPC_VSX_COPY_DWORD_LOAD_DUMP) | |
1023 | kvmppc_set_vsr_dword_dump(vcpu, gpr); | |
1024 | break; | |
1025 | #endif | |
1026 | default: | |
1027 | BUG(); | |
1028 | } | |
1029 | } | |
1030 | ||
1031 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1032 | unsigned int rt, unsigned int bytes, | |
1033 | int is_default_endian, int sign_extend) | |
1034 | { | |
1035 | int idx, ret; | |
1036 | bool host_swabbed; | |
1037 | ||
1038 | /* Pity C doesn't have a logical XOR operator */ | |
1039 | if (kvmppc_need_byteswap(vcpu)) { | |
1040 | host_swabbed = is_default_endian; | |
1041 | } else { | |
1042 | host_swabbed = !is_default_endian; | |
1043 | } | |
1044 | ||
1045 | if (bytes > sizeof(run->mmio.data)) { | |
1046 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
1047 | run->mmio.len); | |
1048 | } | |
1049 | ||
1050 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
1051 | run->mmio.len = bytes; | |
1052 | run->mmio.is_write = 0; | |
1053 | ||
1054 | vcpu->arch.io_gpr = rt; | |
1055 | vcpu->arch.mmio_host_swabbed = host_swabbed; | |
1056 | vcpu->mmio_needed = 1; | |
1057 | vcpu->mmio_is_write = 0; | |
1058 | vcpu->arch.mmio_sign_extend = sign_extend; | |
1059 | ||
1060 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
1061 | ||
1062 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, | |
1063 | bytes, &run->mmio.data); | |
1064 | ||
1065 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1066 | ||
1067 | if (!ret) { | |
1068 | kvmppc_complete_mmio_load(vcpu, run); | |
1069 | vcpu->mmio_needed = 0; | |
1070 | return EMULATE_DONE; | |
1071 | } | |
1072 | ||
1073 | return EMULATE_DO_MMIO; | |
1074 | } | |
1075 | ||
1076 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1077 | unsigned int rt, unsigned int bytes, | |
1078 | int is_default_endian) | |
1079 | { | |
1080 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); | |
1081 | } | |
1082 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); | |
1083 | ||
1084 | /* Same as above, but sign extends */ | |
1085 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1086 | unsigned int rt, unsigned int bytes, | |
1087 | int is_default_endian) | |
1088 | { | |
1089 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); | |
1090 | } | |
1091 | ||
1092 | #ifdef CONFIG_VSX | |
1093 | int kvmppc_handle_vsx_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1094 | unsigned int rt, unsigned int bytes, | |
1095 | int is_default_endian, int mmio_sign_extend) | |
1096 | { | |
1097 | enum emulation_result emulated = EMULATE_DONE; | |
1098 | ||
1099 | /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ | |
1100 | if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || | |
1101 | (vcpu->arch.mmio_vsx_copy_nums < 0) ) { | |
1102 | return EMULATE_FAIL; | |
1103 | } | |
1104 | ||
1105 | while (vcpu->arch.mmio_vsx_copy_nums) { | |
1106 | emulated = __kvmppc_handle_load(run, vcpu, rt, bytes, | |
1107 | is_default_endian, mmio_sign_extend); | |
1108 | ||
1109 | if (emulated != EMULATE_DONE) | |
1110 | break; | |
1111 | ||
1112 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1113 | ||
1114 | vcpu->arch.mmio_vsx_copy_nums--; | |
1115 | vcpu->arch.mmio_vsx_offset++; | |
1116 | } | |
1117 | return emulated; | |
1118 | } | |
1119 | #endif /* CONFIG_VSX */ | |
1120 | ||
1121 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1122 | u64 val, unsigned int bytes, int is_default_endian) | |
1123 | { | |
1124 | void *data = run->mmio.data; | |
1125 | int idx, ret; | |
1126 | bool host_swabbed; | |
1127 | ||
1128 | /* Pity C doesn't have a logical XOR operator */ | |
1129 | if (kvmppc_need_byteswap(vcpu)) { | |
1130 | host_swabbed = is_default_endian; | |
1131 | } else { | |
1132 | host_swabbed = !is_default_endian; | |
1133 | } | |
1134 | ||
1135 | if (bytes > sizeof(run->mmio.data)) { | |
1136 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
1137 | run->mmio.len); | |
1138 | } | |
1139 | ||
1140 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
1141 | run->mmio.len = bytes; | |
1142 | run->mmio.is_write = 1; | |
1143 | vcpu->mmio_needed = 1; | |
1144 | vcpu->mmio_is_write = 1; | |
1145 | ||
1146 | if ((vcpu->arch.mmio_sp64_extend) && (bytes == 4)) | |
1147 | val = dp_to_sp(val); | |
1148 | ||
1149 | /* Store the value at the lowest bytes in 'data'. */ | |
1150 | if (!host_swabbed) { | |
1151 | switch (bytes) { | |
1152 | case 8: *(u64 *)data = val; break; | |
1153 | case 4: *(u32 *)data = val; break; | |
1154 | case 2: *(u16 *)data = val; break; | |
1155 | case 1: *(u8 *)data = val; break; | |
1156 | } | |
1157 | } else { | |
1158 | switch (bytes) { | |
1159 | case 8: *(u64 *)data = swab64(val); break; | |
1160 | case 4: *(u32 *)data = swab32(val); break; | |
1161 | case 2: *(u16 *)data = swab16(val); break; | |
1162 | case 1: *(u8 *)data = val; break; | |
1163 | } | |
1164 | } | |
1165 | ||
1166 | idx = srcu_read_lock(&vcpu->kvm->srcu); | |
1167 | ||
1168 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, | |
1169 | bytes, &run->mmio.data); | |
1170 | ||
1171 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
1172 | ||
1173 | if (!ret) { | |
1174 | vcpu->mmio_needed = 0; | |
1175 | return EMULATE_DONE; | |
1176 | } | |
1177 | ||
1178 | return EMULATE_DO_MMIO; | |
1179 | } | |
1180 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); | |
1181 | ||
1182 | #ifdef CONFIG_VSX | |
1183 | static inline int kvmppc_get_vsr_data(struct kvm_vcpu *vcpu, int rs, u64 *val) | |
1184 | { | |
1185 | u32 dword_offset, word_offset; | |
1186 | union kvmppc_one_reg reg; | |
1187 | int vsx_offset = 0; | |
1188 | int copy_type = vcpu->arch.mmio_vsx_copy_type; | |
1189 | int result = 0; | |
1190 | ||
1191 | switch (copy_type) { | |
1192 | case KVMPPC_VSX_COPY_DWORD: | |
1193 | vsx_offset = | |
1194 | kvmppc_get_vsr_dword_offset(vcpu->arch.mmio_vsx_offset); | |
1195 | ||
1196 | if (vsx_offset == -1) { | |
1197 | result = -1; | |
1198 | break; | |
1199 | } | |
1200 | ||
1201 | if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
1202 | *val = VCPU_VSX_FPR(vcpu, rs, vsx_offset); | |
1203 | } else { | |
1204 | reg.vval = VCPU_VSX_VR(vcpu, rs); | |
1205 | *val = reg.vsxval[vsx_offset]; | |
1206 | } | |
1207 | break; | |
1208 | ||
1209 | case KVMPPC_VSX_COPY_WORD: | |
1210 | vsx_offset = | |
1211 | kvmppc_get_vsr_word_offset(vcpu->arch.mmio_vsx_offset); | |
1212 | ||
1213 | if (vsx_offset == -1) { | |
1214 | result = -1; | |
1215 | break; | |
1216 | } | |
1217 | ||
1218 | if (!vcpu->arch.mmio_vsx_tx_sx_enabled) { | |
1219 | dword_offset = vsx_offset / 2; | |
1220 | word_offset = vsx_offset % 2; | |
1221 | reg.vsxval[0] = VCPU_VSX_FPR(vcpu, rs, dword_offset); | |
1222 | *val = reg.vsx32val[word_offset]; | |
1223 | } else { | |
1224 | reg.vval = VCPU_VSX_VR(vcpu, rs); | |
1225 | *val = reg.vsx32val[vsx_offset]; | |
1226 | } | |
1227 | break; | |
1228 | ||
1229 | default: | |
1230 | result = -1; | |
1231 | break; | |
1232 | } | |
1233 | ||
1234 | return result; | |
1235 | } | |
1236 | ||
1237 | int kvmppc_handle_vsx_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
1238 | int rs, unsigned int bytes, int is_default_endian) | |
1239 | { | |
1240 | u64 val; | |
1241 | enum emulation_result emulated = EMULATE_DONE; | |
1242 | ||
1243 | vcpu->arch.io_gpr = rs; | |
1244 | ||
1245 | /* Currently, mmio_vsx_copy_nums only allowed to be less than 4 */ | |
1246 | if ( (vcpu->arch.mmio_vsx_copy_nums > 4) || | |
1247 | (vcpu->arch.mmio_vsx_copy_nums < 0) ) { | |
1248 | return EMULATE_FAIL; | |
1249 | } | |
1250 | ||
1251 | while (vcpu->arch.mmio_vsx_copy_nums) { | |
1252 | if (kvmppc_get_vsr_data(vcpu, rs, &val) == -1) | |
1253 | return EMULATE_FAIL; | |
1254 | ||
1255 | emulated = kvmppc_handle_store(run, vcpu, | |
1256 | val, bytes, is_default_endian); | |
1257 | ||
1258 | if (emulated != EMULATE_DONE) | |
1259 | break; | |
1260 | ||
1261 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1262 | ||
1263 | vcpu->arch.mmio_vsx_copy_nums--; | |
1264 | vcpu->arch.mmio_vsx_offset++; | |
1265 | } | |
1266 | ||
1267 | return emulated; | |
1268 | } | |
1269 | ||
1270 | static int kvmppc_emulate_mmio_vsx_loadstore(struct kvm_vcpu *vcpu, | |
1271 | struct kvm_run *run) | |
1272 | { | |
1273 | enum emulation_result emulated = EMULATE_FAIL; | |
1274 | int r; | |
1275 | ||
1276 | vcpu->arch.paddr_accessed += run->mmio.len; | |
1277 | ||
1278 | if (!vcpu->mmio_is_write) { | |
1279 | emulated = kvmppc_handle_vsx_load(run, vcpu, vcpu->arch.io_gpr, | |
1280 | run->mmio.len, 1, vcpu->arch.mmio_sign_extend); | |
1281 | } else { | |
1282 | emulated = kvmppc_handle_vsx_store(run, vcpu, | |
1283 | vcpu->arch.io_gpr, run->mmio.len, 1); | |
1284 | } | |
1285 | ||
1286 | switch (emulated) { | |
1287 | case EMULATE_DO_MMIO: | |
1288 | run->exit_reason = KVM_EXIT_MMIO; | |
1289 | r = RESUME_HOST; | |
1290 | break; | |
1291 | case EMULATE_FAIL: | |
1292 | pr_info("KVM: MMIO emulation failed (VSX repeat)\n"); | |
1293 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1294 | run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION; | |
1295 | r = RESUME_HOST; | |
1296 | break; | |
1297 | default: | |
1298 | r = RESUME_GUEST; | |
1299 | break; | |
1300 | } | |
1301 | return r; | |
1302 | } | |
1303 | #endif /* CONFIG_VSX */ | |
1304 | ||
1305 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1306 | { | |
1307 | int r = 0; | |
1308 | union kvmppc_one_reg val; | |
1309 | int size; | |
1310 | ||
1311 | size = one_reg_size(reg->id); | |
1312 | if (size > sizeof(val)) | |
1313 | return -EINVAL; | |
1314 | ||
1315 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); | |
1316 | if (r == -EINVAL) { | |
1317 | r = 0; | |
1318 | switch (reg->id) { | |
1319 | #ifdef CONFIG_ALTIVEC | |
1320 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1321 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1322 | r = -ENXIO; | |
1323 | break; | |
1324 | } | |
1325 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; | |
1326 | break; | |
1327 | case KVM_REG_PPC_VSCR: | |
1328 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1329 | r = -ENXIO; | |
1330 | break; | |
1331 | } | |
1332 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); | |
1333 | break; | |
1334 | case KVM_REG_PPC_VRSAVE: | |
1335 | val = get_reg_val(reg->id, vcpu->arch.vrsave); | |
1336 | break; | |
1337 | #endif /* CONFIG_ALTIVEC */ | |
1338 | default: | |
1339 | r = -EINVAL; | |
1340 | break; | |
1341 | } | |
1342 | } | |
1343 | ||
1344 | if (r) | |
1345 | return r; | |
1346 | ||
1347 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) | |
1348 | r = -EFAULT; | |
1349 | ||
1350 | return r; | |
1351 | } | |
1352 | ||
1353 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1354 | { | |
1355 | int r; | |
1356 | union kvmppc_one_reg val; | |
1357 | int size; | |
1358 | ||
1359 | size = one_reg_size(reg->id); | |
1360 | if (size > sizeof(val)) | |
1361 | return -EINVAL; | |
1362 | ||
1363 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) | |
1364 | return -EFAULT; | |
1365 | ||
1366 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); | |
1367 | if (r == -EINVAL) { | |
1368 | r = 0; | |
1369 | switch (reg->id) { | |
1370 | #ifdef CONFIG_ALTIVEC | |
1371 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1372 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1373 | r = -ENXIO; | |
1374 | break; | |
1375 | } | |
1376 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; | |
1377 | break; | |
1378 | case KVM_REG_PPC_VSCR: | |
1379 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1380 | r = -ENXIO; | |
1381 | break; | |
1382 | } | |
1383 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); | |
1384 | break; | |
1385 | case KVM_REG_PPC_VRSAVE: | |
1386 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1387 | r = -ENXIO; | |
1388 | break; | |
1389 | } | |
1390 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | |
1391 | break; | |
1392 | #endif /* CONFIG_ALTIVEC */ | |
1393 | default: | |
1394 | r = -EINVAL; | |
1395 | break; | |
1396 | } | |
1397 | } | |
1398 | ||
1399 | return r; | |
1400 | } | |
1401 | ||
1402 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
1403 | { | |
1404 | int r; | |
1405 | sigset_t sigsaved; | |
1406 | ||
1407 | if (vcpu->mmio_needed) { | |
1408 | vcpu->mmio_needed = 0; | |
1409 | if (!vcpu->mmio_is_write) | |
1410 | kvmppc_complete_mmio_load(vcpu, run); | |
1411 | #ifdef CONFIG_VSX | |
1412 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { | |
1413 | vcpu->arch.mmio_vsx_copy_nums--; | |
1414 | vcpu->arch.mmio_vsx_offset++; | |
1415 | } | |
1416 | ||
1417 | if (vcpu->arch.mmio_vsx_copy_nums > 0) { | |
1418 | r = kvmppc_emulate_mmio_vsx_loadstore(vcpu, run); | |
1419 | if (r == RESUME_HOST) { | |
1420 | vcpu->mmio_needed = 1; | |
1421 | return r; | |
1422 | } | |
1423 | } | |
1424 | #endif | |
1425 | } else if (vcpu->arch.osi_needed) { | |
1426 | u64 *gprs = run->osi.gprs; | |
1427 | int i; | |
1428 | ||
1429 | for (i = 0; i < 32; i++) | |
1430 | kvmppc_set_gpr(vcpu, i, gprs[i]); | |
1431 | vcpu->arch.osi_needed = 0; | |
1432 | } else if (vcpu->arch.hcall_needed) { | |
1433 | int i; | |
1434 | ||
1435 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | |
1436 | for (i = 0; i < 9; ++i) | |
1437 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | |
1438 | vcpu->arch.hcall_needed = 0; | |
1439 | #ifdef CONFIG_BOOKE | |
1440 | } else if (vcpu->arch.epr_needed) { | |
1441 | kvmppc_set_epr(vcpu, run->epr.epr); | |
1442 | vcpu->arch.epr_needed = 0; | |
1443 | #endif | |
1444 | } | |
1445 | ||
1446 | if (vcpu->sigset_active) | |
1447 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
1448 | ||
1449 | if (run->immediate_exit) | |
1450 | r = -EINTR; | |
1451 | else | |
1452 | r = kvmppc_vcpu_run(run, vcpu); | |
1453 | ||
1454 | if (vcpu->sigset_active) | |
1455 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
1456 | ||
1457 | return r; | |
1458 | } | |
1459 | ||
1460 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
1461 | { | |
1462 | if (irq->irq == KVM_INTERRUPT_UNSET) { | |
1463 | kvmppc_core_dequeue_external(vcpu); | |
1464 | return 0; | |
1465 | } | |
1466 | ||
1467 | kvmppc_core_queue_external(vcpu, irq); | |
1468 | ||
1469 | kvm_vcpu_kick(vcpu); | |
1470 | ||
1471 | return 0; | |
1472 | } | |
1473 | ||
1474 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, | |
1475 | struct kvm_enable_cap *cap) | |
1476 | { | |
1477 | int r; | |
1478 | ||
1479 | if (cap->flags) | |
1480 | return -EINVAL; | |
1481 | ||
1482 | switch (cap->cap) { | |
1483 | case KVM_CAP_PPC_OSI: | |
1484 | r = 0; | |
1485 | vcpu->arch.osi_enabled = true; | |
1486 | break; | |
1487 | case KVM_CAP_PPC_PAPR: | |
1488 | r = 0; | |
1489 | vcpu->arch.papr_enabled = true; | |
1490 | break; | |
1491 | case KVM_CAP_PPC_EPR: | |
1492 | r = 0; | |
1493 | if (cap->args[0]) | |
1494 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; | |
1495 | else | |
1496 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; | |
1497 | break; | |
1498 | #ifdef CONFIG_BOOKE | |
1499 | case KVM_CAP_PPC_BOOKE_WATCHDOG: | |
1500 | r = 0; | |
1501 | vcpu->arch.watchdog_enabled = true; | |
1502 | break; | |
1503 | #endif | |
1504 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) | |
1505 | case KVM_CAP_SW_TLB: { | |
1506 | struct kvm_config_tlb cfg; | |
1507 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | |
1508 | ||
1509 | r = -EFAULT; | |
1510 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | |
1511 | break; | |
1512 | ||
1513 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | |
1514 | break; | |
1515 | } | |
1516 | #endif | |
1517 | #ifdef CONFIG_KVM_MPIC | |
1518 | case KVM_CAP_IRQ_MPIC: { | |
1519 | struct fd f; | |
1520 | struct kvm_device *dev; | |
1521 | ||
1522 | r = -EBADF; | |
1523 | f = fdget(cap->args[0]); | |
1524 | if (!f.file) | |
1525 | break; | |
1526 | ||
1527 | r = -EPERM; | |
1528 | dev = kvm_device_from_filp(f.file); | |
1529 | if (dev) | |
1530 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); | |
1531 | ||
1532 | fdput(f); | |
1533 | break; | |
1534 | } | |
1535 | #endif | |
1536 | #ifdef CONFIG_KVM_XICS | |
1537 | case KVM_CAP_IRQ_XICS: { | |
1538 | struct fd f; | |
1539 | struct kvm_device *dev; | |
1540 | ||
1541 | r = -EBADF; | |
1542 | f = fdget(cap->args[0]); | |
1543 | if (!f.file) | |
1544 | break; | |
1545 | ||
1546 | r = -EPERM; | |
1547 | dev = kvm_device_from_filp(f.file); | |
1548 | if (dev) { | |
1549 | if (xive_enabled()) | |
1550 | r = kvmppc_xive_connect_vcpu(dev, vcpu, cap->args[1]); | |
1551 | else | |
1552 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); | |
1553 | } | |
1554 | ||
1555 | fdput(f); | |
1556 | break; | |
1557 | } | |
1558 | #endif /* CONFIG_KVM_XICS */ | |
1559 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
1560 | case KVM_CAP_PPC_FWNMI: | |
1561 | r = -EINVAL; | |
1562 | if (!is_kvmppc_hv_enabled(vcpu->kvm)) | |
1563 | break; | |
1564 | r = 0; | |
1565 | vcpu->kvm->arch.fwnmi_enabled = true; | |
1566 | break; | |
1567 | #endif /* CONFIG_KVM_BOOK3S_HV_POSSIBLE */ | |
1568 | default: | |
1569 | r = -EINVAL; | |
1570 | break; | |
1571 | } | |
1572 | ||
1573 | if (!r) | |
1574 | r = kvmppc_sanity_check(vcpu); | |
1575 | ||
1576 | return r; | |
1577 | } | |
1578 | ||
1579 | bool kvm_arch_intc_initialized(struct kvm *kvm) | |
1580 | { | |
1581 | #ifdef CONFIG_KVM_MPIC | |
1582 | if (kvm->arch.mpic) | |
1583 | return true; | |
1584 | #endif | |
1585 | #ifdef CONFIG_KVM_XICS | |
1586 | if (kvm->arch.xics || kvm->arch.xive) | |
1587 | return true; | |
1588 | #endif | |
1589 | return false; | |
1590 | } | |
1591 | ||
1592 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | |
1593 | struct kvm_mp_state *mp_state) | |
1594 | { | |
1595 | return -EINVAL; | |
1596 | } | |
1597 | ||
1598 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
1599 | struct kvm_mp_state *mp_state) | |
1600 | { | |
1601 | return -EINVAL; | |
1602 | } | |
1603 | ||
1604 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
1605 | unsigned int ioctl, unsigned long arg) | |
1606 | { | |
1607 | struct kvm_vcpu *vcpu = filp->private_data; | |
1608 | void __user *argp = (void __user *)arg; | |
1609 | long r; | |
1610 | ||
1611 | switch (ioctl) { | |
1612 | case KVM_INTERRUPT: { | |
1613 | struct kvm_interrupt irq; | |
1614 | r = -EFAULT; | |
1615 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
1616 | goto out; | |
1617 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | |
1618 | goto out; | |
1619 | } | |
1620 | ||
1621 | case KVM_ENABLE_CAP: | |
1622 | { | |
1623 | struct kvm_enable_cap cap; | |
1624 | r = -EFAULT; | |
1625 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
1626 | goto out; | |
1627 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
1628 | break; | |
1629 | } | |
1630 | ||
1631 | case KVM_SET_ONE_REG: | |
1632 | case KVM_GET_ONE_REG: | |
1633 | { | |
1634 | struct kvm_one_reg reg; | |
1635 | r = -EFAULT; | |
1636 | if (copy_from_user(®, argp, sizeof(reg))) | |
1637 | goto out; | |
1638 | if (ioctl == KVM_SET_ONE_REG) | |
1639 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | |
1640 | else | |
1641 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | |
1642 | break; | |
1643 | } | |
1644 | ||
1645 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) | |
1646 | case KVM_DIRTY_TLB: { | |
1647 | struct kvm_dirty_tlb dirty; | |
1648 | r = -EFAULT; | |
1649 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | |
1650 | goto out; | |
1651 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | |
1652 | break; | |
1653 | } | |
1654 | #endif | |
1655 | default: | |
1656 | r = -EINVAL; | |
1657 | } | |
1658 | ||
1659 | out: | |
1660 | return r; | |
1661 | } | |
1662 | ||
1663 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) | |
1664 | { | |
1665 | return VM_FAULT_SIGBUS; | |
1666 | } | |
1667 | ||
1668 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) | |
1669 | { | |
1670 | u32 inst_nop = 0x60000000; | |
1671 | #ifdef CONFIG_KVM_BOOKE_HV | |
1672 | u32 inst_sc1 = 0x44000022; | |
1673 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); | |
1674 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); | |
1675 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); | |
1676 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
1677 | #else | |
1678 | u32 inst_lis = 0x3c000000; | |
1679 | u32 inst_ori = 0x60000000; | |
1680 | u32 inst_sc = 0x44000002; | |
1681 | u32 inst_imm_mask = 0xffff; | |
1682 | ||
1683 | /* | |
1684 | * The hypercall to get into KVM from within guest context is as | |
1685 | * follows: | |
1686 | * | |
1687 | * lis r0, r0, KVM_SC_MAGIC_R0@h | |
1688 | * ori r0, KVM_SC_MAGIC_R0@l | |
1689 | * sc | |
1690 | * nop | |
1691 | */ | |
1692 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); | |
1693 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); | |
1694 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); | |
1695 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
1696 | #endif | |
1697 | ||
1698 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; | |
1699 | ||
1700 | return 0; | |
1701 | } | |
1702 | ||
1703 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, | |
1704 | bool line_status) | |
1705 | { | |
1706 | if (!irqchip_in_kernel(kvm)) | |
1707 | return -ENXIO; | |
1708 | ||
1709 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | |
1710 | irq_event->irq, irq_event->level, | |
1711 | line_status); | |
1712 | return 0; | |
1713 | } | |
1714 | ||
1715 | ||
1716 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, | |
1717 | struct kvm_enable_cap *cap) | |
1718 | { | |
1719 | int r; | |
1720 | ||
1721 | if (cap->flags) | |
1722 | return -EINVAL; | |
1723 | ||
1724 | switch (cap->cap) { | |
1725 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | |
1726 | case KVM_CAP_PPC_ENABLE_HCALL: { | |
1727 | unsigned long hcall = cap->args[0]; | |
1728 | ||
1729 | r = -EINVAL; | |
1730 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || | |
1731 | cap->args[1] > 1) | |
1732 | break; | |
1733 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) | |
1734 | break; | |
1735 | if (cap->args[1]) | |
1736 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
1737 | else | |
1738 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
1739 | r = 0; | |
1740 | break; | |
1741 | } | |
1742 | case KVM_CAP_PPC_SMT: { | |
1743 | unsigned long mode = cap->args[0]; | |
1744 | unsigned long flags = cap->args[1]; | |
1745 | ||
1746 | r = -EINVAL; | |
1747 | if (kvm->arch.kvm_ops->set_smt_mode) | |
1748 | r = kvm->arch.kvm_ops->set_smt_mode(kvm, mode, flags); | |
1749 | break; | |
1750 | } | |
1751 | #endif | |
1752 | default: | |
1753 | r = -EINVAL; | |
1754 | break; | |
1755 | } | |
1756 | ||
1757 | return r; | |
1758 | } | |
1759 | ||
1760 | long kvm_arch_vm_ioctl(struct file *filp, | |
1761 | unsigned int ioctl, unsigned long arg) | |
1762 | { | |
1763 | struct kvm *kvm __maybe_unused = filp->private_data; | |
1764 | void __user *argp = (void __user *)arg; | |
1765 | long r; | |
1766 | ||
1767 | switch (ioctl) { | |
1768 | case KVM_PPC_GET_PVINFO: { | |
1769 | struct kvm_ppc_pvinfo pvinfo; | |
1770 | memset(&pvinfo, 0, sizeof(pvinfo)); | |
1771 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); | |
1772 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | |
1773 | r = -EFAULT; | |
1774 | goto out; | |
1775 | } | |
1776 | ||
1777 | break; | |
1778 | } | |
1779 | case KVM_ENABLE_CAP: | |
1780 | { | |
1781 | struct kvm_enable_cap cap; | |
1782 | r = -EFAULT; | |
1783 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
1784 | goto out; | |
1785 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); | |
1786 | break; | |
1787 | } | |
1788 | #ifdef CONFIG_SPAPR_TCE_IOMMU | |
1789 | case KVM_CREATE_SPAPR_TCE_64: { | |
1790 | struct kvm_create_spapr_tce_64 create_tce_64; | |
1791 | ||
1792 | r = -EFAULT; | |
1793 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) | |
1794 | goto out; | |
1795 | if (create_tce_64.flags) { | |
1796 | r = -EINVAL; | |
1797 | goto out; | |
1798 | } | |
1799 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
1800 | goto out; | |
1801 | } | |
1802 | case KVM_CREATE_SPAPR_TCE: { | |
1803 | struct kvm_create_spapr_tce create_tce; | |
1804 | struct kvm_create_spapr_tce_64 create_tce_64; | |
1805 | ||
1806 | r = -EFAULT; | |
1807 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | |
1808 | goto out; | |
1809 | ||
1810 | create_tce_64.liobn = create_tce.liobn; | |
1811 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; | |
1812 | create_tce_64.offset = 0; | |
1813 | create_tce_64.size = create_tce.window_size >> | |
1814 | IOMMU_PAGE_SHIFT_4K; | |
1815 | create_tce_64.flags = 0; | |
1816 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
1817 | goto out; | |
1818 | } | |
1819 | #endif | |
1820 | #ifdef CONFIG_PPC_BOOK3S_64 | |
1821 | case KVM_PPC_GET_SMMU_INFO: { | |
1822 | struct kvm_ppc_smmu_info info; | |
1823 | struct kvm *kvm = filp->private_data; | |
1824 | ||
1825 | memset(&info, 0, sizeof(info)); | |
1826 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); | |
1827 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | |
1828 | r = -EFAULT; | |
1829 | break; | |
1830 | } | |
1831 | case KVM_PPC_RTAS_DEFINE_TOKEN: { | |
1832 | struct kvm *kvm = filp->private_data; | |
1833 | ||
1834 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); | |
1835 | break; | |
1836 | } | |
1837 | case KVM_PPC_CONFIGURE_V3_MMU: { | |
1838 | struct kvm *kvm = filp->private_data; | |
1839 | struct kvm_ppc_mmuv3_cfg cfg; | |
1840 | ||
1841 | r = -EINVAL; | |
1842 | if (!kvm->arch.kvm_ops->configure_mmu) | |
1843 | goto out; | |
1844 | r = -EFAULT; | |
1845 | if (copy_from_user(&cfg, argp, sizeof(cfg))) | |
1846 | goto out; | |
1847 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); | |
1848 | break; | |
1849 | } | |
1850 | case KVM_PPC_GET_RMMU_INFO: { | |
1851 | struct kvm *kvm = filp->private_data; | |
1852 | struct kvm_ppc_rmmu_info info; | |
1853 | ||
1854 | r = -EINVAL; | |
1855 | if (!kvm->arch.kvm_ops->get_rmmu_info) | |
1856 | goto out; | |
1857 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); | |
1858 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | |
1859 | r = -EFAULT; | |
1860 | break; | |
1861 | } | |
1862 | default: { | |
1863 | struct kvm *kvm = filp->private_data; | |
1864 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); | |
1865 | } | |
1866 | #else /* CONFIG_PPC_BOOK3S_64 */ | |
1867 | default: | |
1868 | r = -ENOTTY; | |
1869 | #endif | |
1870 | } | |
1871 | out: | |
1872 | return r; | |
1873 | } | |
1874 | ||
1875 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; | |
1876 | static unsigned long nr_lpids; | |
1877 | ||
1878 | long kvmppc_alloc_lpid(void) | |
1879 | { | |
1880 | long lpid; | |
1881 | ||
1882 | do { | |
1883 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | |
1884 | if (lpid >= nr_lpids) { | |
1885 | pr_err("%s: No LPIDs free\n", __func__); | |
1886 | return -ENOMEM; | |
1887 | } | |
1888 | } while (test_and_set_bit(lpid, lpid_inuse)); | |
1889 | ||
1890 | return lpid; | |
1891 | } | |
1892 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); | |
1893 | ||
1894 | void kvmppc_claim_lpid(long lpid) | |
1895 | { | |
1896 | set_bit(lpid, lpid_inuse); | |
1897 | } | |
1898 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); | |
1899 | ||
1900 | void kvmppc_free_lpid(long lpid) | |
1901 | { | |
1902 | clear_bit(lpid, lpid_inuse); | |
1903 | } | |
1904 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); | |
1905 | ||
1906 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | |
1907 | { | |
1908 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | |
1909 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | |
1910 | } | |
1911 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); | |
1912 | ||
1913 | int kvm_arch_init(void *opaque) | |
1914 | { | |
1915 | return 0; | |
1916 | } | |
1917 | ||
1918 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |