]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: PPC: Implement MMIO emulation support for Book3S HV guests
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
bbf45ba5
HB
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
83aae4a8 31#include <asm/tlbflush.h>
371fefd6 32#include <asm/cputhreads.h>
73e75b41 33#include "timing.h"
fad7b9b5 34#include "../mm/mmu_decl.h"
bbf45ba5 35
46f43c6e
MT
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
bbf45ba5
HB
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{
666e7252 41 return !(v->arch.shared->msr & MSR_WE) ||
dfd4d47e
SW
42 !!(v->arch.pending_exceptions) ||
43 v->requests;
bbf45ba5
HB
44}
45
2a342ed5
AG
46int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
47{
48 int nr = kvmppc_get_gpr(vcpu, 11);
49 int r;
50 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
51 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
52 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
53 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
54 unsigned long r2 = 0;
55
56 if (!(vcpu->arch.shared->msr & MSR_SF)) {
57 /* 32 bit mode */
58 param1 &= 0xffffffff;
59 param2 &= 0xffffffff;
60 param3 &= 0xffffffff;
61 param4 &= 0xffffffff;
62 }
63
64 switch (nr) {
5fc87407
AG
65 case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
66 {
67 vcpu->arch.magic_page_pa = param1;
68 vcpu->arch.magic_page_ea = param2;
69
b5904972 70 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 71
5fc87407
AG
72 r = HC_EV_SUCCESS;
73 break;
74 }
2a342ed5
AG
75 case HC_VENDOR_KVM | KVM_HC_FEATURES:
76 r = HC_EV_SUCCESS;
a4cd8b23
SW
77#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
78 /* XXX Missing magic page on 44x */
5fc87407
AG
79 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
80#endif
2a342ed5
AG
81
82 /* Second return value is in r4 */
2a342ed5
AG
83 break;
84 default:
85 r = HC_EV_UNIMPLEMENTED;
86 break;
87 }
88
7508e16c
AG
89 kvmppc_set_gpr(vcpu, 4, r2);
90
2a342ed5
AG
91 return r;
92}
bbf45ba5 93
af8f38b3
AG
94int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
95{
96 int r = false;
97
98 /* We have to know what CPU to virtualize */
99 if (!vcpu->arch.pvr)
100 goto out;
101
102 /* PAPR only works with book3s_64 */
103 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
104 goto out;
105
106#ifdef CONFIG_KVM_BOOK3S_64_HV
107 /* HV KVM can only do PAPR mode for now */
108 if (!vcpu->arch.papr_enabled)
109 goto out;
110#endif
111
112 r = true;
113
114out:
115 vcpu->arch.sane = r;
116 return r ? 0 : -EINVAL;
117}
118
bbf45ba5
HB
119int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
120{
121 enum emulation_result er;
122 int r;
123
124 er = kvmppc_emulate_instruction(run, vcpu);
125 switch (er) {
126 case EMULATE_DONE:
127 /* Future optimization: only reload non-volatiles if they were
128 * actually modified. */
129 r = RESUME_GUEST_NV;
130 break;
131 case EMULATE_DO_MMIO:
132 run->exit_reason = KVM_EXIT_MMIO;
133 /* We must reload nonvolatiles because "update" load/store
134 * instructions modify register state. */
135 /* Future optimization: only reload non-volatiles if they were
136 * actually modified. */
137 r = RESUME_HOST_NV;
138 break;
139 case EMULATE_FAIL:
140 /* XXX Deliver Program interrupt to guest. */
141 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 142 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
143 r = RESUME_HOST;
144 break;
145 default:
146 BUG();
147 }
148
149 return r;
150}
151
10474ae8 152int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 153{
10474ae8 154 return 0;
bbf45ba5
HB
155}
156
157void kvm_arch_hardware_disable(void *garbage)
158{
159}
160
161int kvm_arch_hardware_setup(void)
162{
163 return 0;
164}
165
166void kvm_arch_hardware_unsetup(void)
167{
168}
169
170void kvm_arch_check_processor_compat(void *rtn)
171{
9dd921cf 172 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
173}
174
e08b9637 175int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 176{
e08b9637
CO
177 if (type)
178 return -EINVAL;
179
f9e0554d 180 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
181}
182
d89f5eff 183void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
184{
185 unsigned int i;
988a2cae 186 struct kvm_vcpu *vcpu;
bbf45ba5 187
988a2cae
GN
188 kvm_for_each_vcpu(i, vcpu, kvm)
189 kvm_arch_vcpu_free(vcpu);
190
191 mutex_lock(&kvm->lock);
192 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
193 kvm->vcpus[i] = NULL;
194
195 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
196
197 kvmppc_core_destroy_vm(kvm);
198
988a2cae 199 mutex_unlock(&kvm->lock);
bbf45ba5
HB
200}
201
ad8ba2cd
SY
202void kvm_arch_sync_events(struct kvm *kvm)
203{
204}
205
bbf45ba5
HB
206int kvm_dev_ioctl_check_extension(long ext)
207{
208 int r;
209
210 switch (ext) {
5ce941ee
SW
211#ifdef CONFIG_BOOKE
212 case KVM_CAP_PPC_BOOKE_SREGS:
213#else
e15a1137 214 case KVM_CAP_PPC_SEGSTATE:
930b412a 215 case KVM_CAP_PPC_PAPR:
5ce941ee 216#endif
18978768 217 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 218 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 219 case KVM_CAP_ENABLE_CAP:
de56a948
PM
220 r = 1;
221 break;
222#ifndef CONFIG_KVM_BOOK3S_64_HV
223 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 224 case KVM_CAP_PPC_OSI:
15711e9c 225 case KVM_CAP_PPC_GET_PVINFO:
dc83b8bc
SW
226#ifdef CONFIG_KVM_E500
227 case KVM_CAP_SW_TLB:
228#endif
e15a1137
AG
229 r = 1;
230 break;
588968b6
LV
231 case KVM_CAP_COALESCED_MMIO:
232 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
233 break;
54738c09
DG
234#endif
235#ifdef CONFIG_KVM_BOOK3S_64_HV
236 case KVM_CAP_SPAPR_TCE:
237 r = 1;
238 break;
371fefd6
PM
239 case KVM_CAP_PPC_SMT:
240 r = threads_per_core;
241 break;
aa04b4cc
PM
242 case KVM_CAP_PPC_RMA:
243 r = 1;
9e368f29
PM
244 /* PPC970 requires an RMA */
245 if (cpu_has_feature(CPU_FTR_ARCH_201))
246 r = 2;
aa04b4cc 247 break;
de56a948 248#endif
bbf45ba5
HB
249 default:
250 r = 0;
251 break;
252 }
253 return r;
254
255}
256
257long kvm_arch_dev_ioctl(struct file *filp,
258 unsigned int ioctl, unsigned long arg)
259{
260 return -EINVAL;
261}
262
f7784b8e
MT
263int kvm_arch_prepare_memory_region(struct kvm *kvm,
264 struct kvm_memory_slot *memslot,
265 struct kvm_memory_slot old,
266 struct kvm_userspace_memory_region *mem,
267 int user_alloc)
bbf45ba5 268{
f9e0554d 269 return kvmppc_core_prepare_memory_region(kvm, mem);
bbf45ba5
HB
270}
271
f7784b8e
MT
272void kvm_arch_commit_memory_region(struct kvm *kvm,
273 struct kvm_userspace_memory_region *mem,
274 struct kvm_memory_slot old,
275 int user_alloc)
276{
f9e0554d 277 kvmppc_core_commit_memory_region(kvm, mem);
f7784b8e
MT
278}
279
280
34d4cb8f
MT
281void kvm_arch_flush_shadow(struct kvm *kvm)
282{
283}
284
bbf45ba5
HB
285struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
286{
73e75b41
HB
287 struct kvm_vcpu *vcpu;
288 vcpu = kvmppc_core_vcpu_create(kvm, id);
19ccb76a 289 vcpu->arch.wqp = &vcpu->wq;
06056bfb
WY
290 if (!IS_ERR(vcpu))
291 kvmppc_create_vcpu_debugfs(vcpu, id);
73e75b41 292 return vcpu;
bbf45ba5
HB
293}
294
295void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
296{
a595405d
AG
297 /* Make sure we're not using the vcpu anymore */
298 hrtimer_cancel(&vcpu->arch.dec_timer);
299 tasklet_kill(&vcpu->arch.tasklet);
300
73e75b41 301 kvmppc_remove_vcpu_debugfs(vcpu);
db93f574 302 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
303}
304
305void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
306{
307 kvm_arch_vcpu_free(vcpu);
308}
309
310int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
311{
9dd921cf 312 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
313}
314
544c6761
AG
315/*
316 * low level hrtimer wake routine. Because this runs in hardirq context
317 * we schedule a tasklet to do the real work.
318 */
319enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
320{
321 struct kvm_vcpu *vcpu;
322
323 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
324 tasklet_schedule(&vcpu->arch.tasklet);
325
326 return HRTIMER_NORESTART;
327}
328
bbf45ba5
HB
329int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
330{
544c6761
AG
331 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
332 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
333 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 334 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 335
09000adb
BB
336#ifdef CONFIG_KVM_EXIT_TIMING
337 mutex_init(&vcpu->arch.exit_timing_lock);
338#endif
339
bbf45ba5
HB
340 return 0;
341}
342
343void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
344{
ecc0981f 345 kvmppc_mmu_destroy(vcpu);
bbf45ba5
HB
346}
347
348void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
349{
eab17672
SW
350#ifdef CONFIG_BOOKE
351 /*
352 * vrsave (formerly usprg0) isn't used by Linux, but may
353 * be used by the guest.
354 *
355 * On non-booke this is associated with Altivec and
356 * is handled by code in book3s.c.
357 */
358 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
359#endif
9dd921cf 360 kvmppc_core_vcpu_load(vcpu, cpu);
de56a948 361 vcpu->cpu = smp_processor_id();
bbf45ba5
HB
362}
363
364void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
365{
9dd921cf 366 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
367#ifdef CONFIG_BOOKE
368 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
369#endif
de56a948 370 vcpu->cpu = -1;
bbf45ba5
HB
371}
372
d0bfb940 373int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
f5d0906b 374 struct kvm_guest_debug *dbg)
bbf45ba5 375{
f5d0906b 376 return -EINVAL;
bbf45ba5
HB
377}
378
379static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
380 struct kvm_run *run)
381{
8e5b26b5 382 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
383}
384
385static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
386 struct kvm_run *run)
387{
69b61833 388 u64 uninitialized_var(gpr);
bbf45ba5 389
8e5b26b5 390 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
391 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
392 return;
393 }
394
395 if (vcpu->arch.mmio_is_bigendian) {
396 switch (run->mmio.len) {
b104d066 397 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
398 case 4: gpr = *(u32 *)run->mmio.data; break;
399 case 2: gpr = *(u16 *)run->mmio.data; break;
400 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
401 }
402 } else {
403 /* Convert BE data from userland back to LE. */
404 switch (run->mmio.len) {
8e5b26b5
AG
405 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
406 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
407 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
408 }
409 }
8e5b26b5 410
3587d534
AG
411 if (vcpu->arch.mmio_sign_extend) {
412 switch (run->mmio.len) {
413#ifdef CONFIG_PPC64
414 case 4:
415 gpr = (s64)(s32)gpr;
416 break;
417#endif
418 case 2:
419 gpr = (s64)(s16)gpr;
420 break;
421 case 1:
422 gpr = (s64)(s8)gpr;
423 break;
424 }
425 }
426
8e5b26b5 427 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066
AG
428
429 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
430 case KVM_REG_GPR:
431 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
432 break;
433 case KVM_REG_FPR:
434 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
435 break;
287d5611 436#ifdef CONFIG_PPC_BOOK3S
b104d066
AG
437 case KVM_REG_QPR:
438 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
439 break;
440 case KVM_REG_FQPR:
441 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
442 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
443 break;
287d5611 444#endif
b104d066
AG
445 default:
446 BUG();
447 }
bbf45ba5
HB
448}
449
450int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
451 unsigned int rt, unsigned int bytes, int is_bigendian)
452{
453 if (bytes > sizeof(run->mmio.data)) {
454 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
455 run->mmio.len);
456 }
457
458 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
459 run->mmio.len = bytes;
460 run->mmio.is_write = 0;
461
462 vcpu->arch.io_gpr = rt;
463 vcpu->arch.mmio_is_bigendian = is_bigendian;
464 vcpu->mmio_needed = 1;
465 vcpu->mmio_is_write = 0;
3587d534 466 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5
HB
467
468 return EMULATE_DO_MMIO;
469}
470
3587d534
AG
471/* Same as above, but sign extends */
472int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
473 unsigned int rt, unsigned int bytes, int is_bigendian)
474{
475 int r;
476
477 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
478 vcpu->arch.mmio_sign_extend = 1;
479
480 return r;
481}
482
bbf45ba5 483int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 484 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
485{
486 void *data = run->mmio.data;
487
488 if (bytes > sizeof(run->mmio.data)) {
489 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
490 run->mmio.len);
491 }
492
493 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
494 run->mmio.len = bytes;
495 run->mmio.is_write = 1;
496 vcpu->mmio_needed = 1;
497 vcpu->mmio_is_write = 1;
498
499 /* Store the value at the lowest bytes in 'data'. */
500 if (is_bigendian) {
501 switch (bytes) {
b104d066 502 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
503 case 4: *(u32 *)data = val; break;
504 case 2: *(u16 *)data = val; break;
505 case 1: *(u8 *)data = val; break;
506 }
507 } else {
508 /* Store LE value into 'data'. */
509 switch (bytes) {
510 case 4: st_le32(data, val); break;
511 case 2: st_le16(data, val); break;
512 case 1: *(u8 *)data = val; break;
513 }
514 }
515
516 return EMULATE_DO_MMIO;
517}
518
519int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
520{
521 int r;
522 sigset_t sigsaved;
523
524 if (vcpu->sigset_active)
525 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
526
527 if (vcpu->mmio_needed) {
528 if (!vcpu->mmio_is_write)
529 kvmppc_complete_mmio_load(vcpu, run);
530 vcpu->mmio_needed = 0;
531 } else if (vcpu->arch.dcr_needed) {
532 if (!vcpu->arch.dcr_is_write)
533 kvmppc_complete_dcr_load(vcpu, run);
534 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
535 } else if (vcpu->arch.osi_needed) {
536 u64 *gprs = run->osi.gprs;
537 int i;
538
539 for (i = 0; i < 32; i++)
540 kvmppc_set_gpr(vcpu, i, gprs[i]);
541 vcpu->arch.osi_needed = 0;
de56a948
PM
542 } else if (vcpu->arch.hcall_needed) {
543 int i;
544
545 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
546 for (i = 0; i < 9; ++i)
547 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
548 vcpu->arch.hcall_needed = 0;
bbf45ba5
HB
549 }
550
df6909e5 551 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
552
553 if (vcpu->sigset_active)
554 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
555
556 return r;
557}
558
dfd4d47e
SW
559void kvm_vcpu_kick(struct kvm_vcpu *vcpu)
560{
ae21216b
AG
561 int me;
562 int cpu = vcpu->cpu;
563
564 me = get_cpu();
4e72dbe1 565 if (waitqueue_active(vcpu->arch.wqp)) {
dfd4d47e
SW
566 wake_up_interruptible(vcpu->arch.wqp);
567 vcpu->stat.halt_wakeup++;
ae21216b 568 } else if (cpu != me && cpu != -1) {
dfd4d47e
SW
569 smp_send_reschedule(vcpu->cpu);
570 }
ae21216b 571 put_cpu();
dfd4d47e
SW
572}
573
bbf45ba5
HB
574int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
575{
19ccb76a 576 if (irq->irq == KVM_INTERRUPT_UNSET) {
18978768 577 kvmppc_core_dequeue_external(vcpu, irq);
19ccb76a
PM
578 return 0;
579 }
580
581 kvmppc_core_queue_external(vcpu, irq);
dfd4d47e 582 kvm_vcpu_kick(vcpu);
45c5eb67 583
bbf45ba5
HB
584 return 0;
585}
586
71fbfd5f
AG
587static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
588 struct kvm_enable_cap *cap)
589{
590 int r;
591
592 if (cap->flags)
593 return -EINVAL;
594
595 switch (cap->cap) {
ad0a048b
AG
596 case KVM_CAP_PPC_OSI:
597 r = 0;
598 vcpu->arch.osi_enabled = true;
599 break;
930b412a
AG
600 case KVM_CAP_PPC_PAPR:
601 r = 0;
602 vcpu->arch.papr_enabled = true;
603 break;
dc83b8bc
SW
604#ifdef CONFIG_KVM_E500
605 case KVM_CAP_SW_TLB: {
606 struct kvm_config_tlb cfg;
607 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
608
609 r = -EFAULT;
610 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
611 break;
612
613 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
614 break;
615 }
616#endif
71fbfd5f
AG
617 default:
618 r = -EINVAL;
619 break;
620 }
621
af8f38b3
AG
622 if (!r)
623 r = kvmppc_sanity_check(vcpu);
624
71fbfd5f
AG
625 return r;
626}
627
bbf45ba5
HB
628int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
629 struct kvm_mp_state *mp_state)
630{
631 return -EINVAL;
632}
633
634int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
635 struct kvm_mp_state *mp_state)
636{
637 return -EINVAL;
638}
639
640long kvm_arch_vcpu_ioctl(struct file *filp,
641 unsigned int ioctl, unsigned long arg)
642{
643 struct kvm_vcpu *vcpu = filp->private_data;
644 void __user *argp = (void __user *)arg;
645 long r;
646
93736624
AK
647 switch (ioctl) {
648 case KVM_INTERRUPT: {
bbf45ba5
HB
649 struct kvm_interrupt irq;
650 r = -EFAULT;
651 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 652 goto out;
bbf45ba5 653 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 654 goto out;
bbf45ba5 655 }
19483d14 656
71fbfd5f
AG
657 case KVM_ENABLE_CAP:
658 {
659 struct kvm_enable_cap cap;
660 r = -EFAULT;
661 if (copy_from_user(&cap, argp, sizeof(cap)))
662 goto out;
663 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
664 break;
665 }
dc83b8bc
SW
666
667#ifdef CONFIG_KVM_E500
668 case KVM_DIRTY_TLB: {
669 struct kvm_dirty_tlb dirty;
670 r = -EFAULT;
671 if (copy_from_user(&dirty, argp, sizeof(dirty)))
672 goto out;
673 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
674 break;
675 }
676#endif
677
bbf45ba5
HB
678 default:
679 r = -EINVAL;
680 }
681
682out:
683 return r;
684}
685
5b1c1493
CO
686int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
687{
688 return VM_FAULT_SIGBUS;
689}
690
15711e9c
AG
691static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
692{
693 u32 inst_lis = 0x3c000000;
694 u32 inst_ori = 0x60000000;
695 u32 inst_nop = 0x60000000;
696 u32 inst_sc = 0x44000002;
697 u32 inst_imm_mask = 0xffff;
698
699 /*
700 * The hypercall to get into KVM from within guest context is as
701 * follows:
702 *
703 * lis r0, r0, KVM_SC_MAGIC_R0@h
704 * ori r0, KVM_SC_MAGIC_R0@l
705 * sc
706 * nop
707 */
708 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
709 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
710 pvinfo->hcall[2] = inst_sc;
711 pvinfo->hcall[3] = inst_nop;
712
713 return 0;
714}
715
bbf45ba5
HB
716long kvm_arch_vm_ioctl(struct file *filp,
717 unsigned int ioctl, unsigned long arg)
718{
15711e9c 719 void __user *argp = (void __user *)arg;
bbf45ba5
HB
720 long r;
721
722 switch (ioctl) {
15711e9c
AG
723 case KVM_PPC_GET_PVINFO: {
724 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 725 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
726 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
727 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
728 r = -EFAULT;
729 goto out;
730 }
731
732 break;
733 }
54738c09
DG
734#ifdef CONFIG_KVM_BOOK3S_64_HV
735 case KVM_CREATE_SPAPR_TCE: {
736 struct kvm_create_spapr_tce create_tce;
737 struct kvm *kvm = filp->private_data;
738
739 r = -EFAULT;
740 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
741 goto out;
742 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
743 goto out;
744 }
aa04b4cc
PM
745
746 case KVM_ALLOCATE_RMA: {
747 struct kvm *kvm = filp->private_data;
748 struct kvm_allocate_rma rma;
749
750 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
751 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
752 r = -EFAULT;
753 break;
754 }
54738c09
DG
755#endif /* CONFIG_KVM_BOOK3S_64_HV */
756
bbf45ba5 757 default:
367e1319 758 r = -ENOTTY;
bbf45ba5
HB
759 }
760
15711e9c 761out:
bbf45ba5
HB
762 return r;
763}
764
765int kvm_arch_init(void *opaque)
766{
767 return 0;
768}
769
770void kvm_arch_exit(void)
771{
772}