]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: PPC: e500: clear up confusion between host and guest entries
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
bbf45ba5
HB
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
83aae4a8 31#include <asm/tlbflush.h>
371fefd6 32#include <asm/cputhreads.h>
73e75b41 33#include "timing.h"
fad7b9b5 34#include "../mm/mmu_decl.h"
bbf45ba5 35
46f43c6e
MT
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
bbf45ba5
HB
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{
666e7252
AG
41 return !(v->arch.shared->msr & MSR_WE) ||
42 !!(v->arch.pending_exceptions);
bbf45ba5
HB
43}
44
2a342ed5
AG
45int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
46{
47 int nr = kvmppc_get_gpr(vcpu, 11);
48 int r;
49 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
50 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
51 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
52 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
53 unsigned long r2 = 0;
54
55 if (!(vcpu->arch.shared->msr & MSR_SF)) {
56 /* 32 bit mode */
57 param1 &= 0xffffffff;
58 param2 &= 0xffffffff;
59 param3 &= 0xffffffff;
60 param4 &= 0xffffffff;
61 }
62
63 switch (nr) {
5fc87407
AG
64 case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
65 {
66 vcpu->arch.magic_page_pa = param1;
67 vcpu->arch.magic_page_ea = param2;
68
df1bfa25 69 r2 = KVM_MAGIC_FEAT_SR;
7508e16c 70
5fc87407
AG
71 r = HC_EV_SUCCESS;
72 break;
73 }
2a342ed5
AG
74 case HC_VENDOR_KVM | KVM_HC_FEATURES:
75 r = HC_EV_SUCCESS;
a4cd8b23
SW
76#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
77 /* XXX Missing magic page on 44x */
5fc87407
AG
78 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
79#endif
2a342ed5
AG
80
81 /* Second return value is in r4 */
2a342ed5
AG
82 break;
83 default:
84 r = HC_EV_UNIMPLEMENTED;
85 break;
86 }
87
7508e16c
AG
88 kvmppc_set_gpr(vcpu, 4, r2);
89
2a342ed5
AG
90 return r;
91}
bbf45ba5 92
af8f38b3
AG
93int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
94{
95 int r = false;
96
97 /* We have to know what CPU to virtualize */
98 if (!vcpu->arch.pvr)
99 goto out;
100
101 /* PAPR only works with book3s_64 */
102 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
103 goto out;
104
105#ifdef CONFIG_KVM_BOOK3S_64_HV
106 /* HV KVM can only do PAPR mode for now */
107 if (!vcpu->arch.papr_enabled)
108 goto out;
109#endif
110
111 r = true;
112
113out:
114 vcpu->arch.sane = r;
115 return r ? 0 : -EINVAL;
116}
117
bbf45ba5
HB
118int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
119{
120 enum emulation_result er;
121 int r;
122
123 er = kvmppc_emulate_instruction(run, vcpu);
124 switch (er) {
125 case EMULATE_DONE:
126 /* Future optimization: only reload non-volatiles if they were
127 * actually modified. */
128 r = RESUME_GUEST_NV;
129 break;
130 case EMULATE_DO_MMIO:
131 run->exit_reason = KVM_EXIT_MMIO;
132 /* We must reload nonvolatiles because "update" load/store
133 * instructions modify register state. */
134 /* Future optimization: only reload non-volatiles if they were
135 * actually modified. */
136 r = RESUME_HOST_NV;
137 break;
138 case EMULATE_FAIL:
139 /* XXX Deliver Program interrupt to guest. */
140 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 141 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
142 r = RESUME_HOST;
143 break;
144 default:
145 BUG();
146 }
147
148 return r;
149}
150
10474ae8 151int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 152{
10474ae8 153 return 0;
bbf45ba5
HB
154}
155
156void kvm_arch_hardware_disable(void *garbage)
157{
158}
159
160int kvm_arch_hardware_setup(void)
161{
162 return 0;
163}
164
165void kvm_arch_hardware_unsetup(void)
166{
167}
168
169void kvm_arch_check_processor_compat(void *rtn)
170{
9dd921cf 171 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
172}
173
e08b9637 174int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 175{
e08b9637
CO
176 if (type)
177 return -EINVAL;
178
f9e0554d 179 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
180}
181
d89f5eff 182void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
183{
184 unsigned int i;
988a2cae 185 struct kvm_vcpu *vcpu;
bbf45ba5 186
988a2cae
GN
187 kvm_for_each_vcpu(i, vcpu, kvm)
188 kvm_arch_vcpu_free(vcpu);
189
190 mutex_lock(&kvm->lock);
191 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
192 kvm->vcpus[i] = NULL;
193
194 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
195
196 kvmppc_core_destroy_vm(kvm);
197
988a2cae 198 mutex_unlock(&kvm->lock);
bbf45ba5
HB
199}
200
ad8ba2cd
SY
201void kvm_arch_sync_events(struct kvm *kvm)
202{
203}
204
bbf45ba5
HB
205int kvm_dev_ioctl_check_extension(long ext)
206{
207 int r;
208
209 switch (ext) {
5ce941ee
SW
210#ifdef CONFIG_BOOKE
211 case KVM_CAP_PPC_BOOKE_SREGS:
212#else
e15a1137 213 case KVM_CAP_PPC_SEGSTATE:
930b412a 214 case KVM_CAP_PPC_PAPR:
5ce941ee 215#endif
18978768 216 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 217 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 218 case KVM_CAP_ENABLE_CAP:
de56a948
PM
219 r = 1;
220 break;
221#ifndef CONFIG_KVM_BOOK3S_64_HV
222 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 223 case KVM_CAP_PPC_OSI:
15711e9c 224 case KVM_CAP_PPC_GET_PVINFO:
e15a1137
AG
225 r = 1;
226 break;
588968b6
LV
227 case KVM_CAP_COALESCED_MMIO:
228 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
229 break;
54738c09
DG
230#endif
231#ifdef CONFIG_KVM_BOOK3S_64_HV
232 case KVM_CAP_SPAPR_TCE:
233 r = 1;
234 break;
371fefd6
PM
235 case KVM_CAP_PPC_SMT:
236 r = threads_per_core;
237 break;
aa04b4cc
PM
238 case KVM_CAP_PPC_RMA:
239 r = 1;
9e368f29
PM
240 /* PPC970 requires an RMA */
241 if (cpu_has_feature(CPU_FTR_ARCH_201))
242 r = 2;
aa04b4cc 243 break;
de56a948 244#endif
bbf45ba5
HB
245 default:
246 r = 0;
247 break;
248 }
249 return r;
250
251}
252
253long kvm_arch_dev_ioctl(struct file *filp,
254 unsigned int ioctl, unsigned long arg)
255{
256 return -EINVAL;
257}
258
f7784b8e
MT
259int kvm_arch_prepare_memory_region(struct kvm *kvm,
260 struct kvm_memory_slot *memslot,
261 struct kvm_memory_slot old,
262 struct kvm_userspace_memory_region *mem,
263 int user_alloc)
bbf45ba5 264{
f9e0554d 265 return kvmppc_core_prepare_memory_region(kvm, mem);
bbf45ba5
HB
266}
267
f7784b8e
MT
268void kvm_arch_commit_memory_region(struct kvm *kvm,
269 struct kvm_userspace_memory_region *mem,
270 struct kvm_memory_slot old,
271 int user_alloc)
272{
f9e0554d 273 kvmppc_core_commit_memory_region(kvm, mem);
f7784b8e
MT
274}
275
276
34d4cb8f
MT
277void kvm_arch_flush_shadow(struct kvm *kvm)
278{
279}
280
bbf45ba5
HB
281struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
282{
73e75b41
HB
283 struct kvm_vcpu *vcpu;
284 vcpu = kvmppc_core_vcpu_create(kvm, id);
19ccb76a 285 vcpu->arch.wqp = &vcpu->wq;
06056bfb
WY
286 if (!IS_ERR(vcpu))
287 kvmppc_create_vcpu_debugfs(vcpu, id);
73e75b41 288 return vcpu;
bbf45ba5
HB
289}
290
291void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
292{
a595405d
AG
293 /* Make sure we're not using the vcpu anymore */
294 hrtimer_cancel(&vcpu->arch.dec_timer);
295 tasklet_kill(&vcpu->arch.tasklet);
296
73e75b41 297 kvmppc_remove_vcpu_debugfs(vcpu);
db93f574 298 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
299}
300
301void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
302{
303 kvm_arch_vcpu_free(vcpu);
304}
305
306int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
307{
9dd921cf 308 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
309}
310
311static void kvmppc_decrementer_func(unsigned long data)
312{
313 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
314
9dd921cf 315 kvmppc_core_queue_dec(vcpu);
45c5eb67 316
19ccb76a
PM
317 if (waitqueue_active(vcpu->arch.wqp)) {
318 wake_up_interruptible(vcpu->arch.wqp);
45c5eb67
HB
319 vcpu->stat.halt_wakeup++;
320 }
bbf45ba5
HB
321}
322
544c6761
AG
323/*
324 * low level hrtimer wake routine. Because this runs in hardirq context
325 * we schedule a tasklet to do the real work.
326 */
327enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
328{
329 struct kvm_vcpu *vcpu;
330
331 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
332 tasklet_schedule(&vcpu->arch.tasklet);
333
334 return HRTIMER_NORESTART;
335}
336
bbf45ba5
HB
337int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
338{
544c6761
AG
339 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
340 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
341 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 342 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 343
09000adb
BB
344#ifdef CONFIG_KVM_EXIT_TIMING
345 mutex_init(&vcpu->arch.exit_timing_lock);
346#endif
347
bbf45ba5
HB
348 return 0;
349}
350
351void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
352{
ecc0981f 353 kvmppc_mmu_destroy(vcpu);
bbf45ba5
HB
354}
355
356void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
357{
eab17672
SW
358#ifdef CONFIG_BOOKE
359 /*
360 * vrsave (formerly usprg0) isn't used by Linux, but may
361 * be used by the guest.
362 *
363 * On non-booke this is associated with Altivec and
364 * is handled by code in book3s.c.
365 */
366 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
367#endif
9dd921cf 368 kvmppc_core_vcpu_load(vcpu, cpu);
de56a948 369 vcpu->cpu = smp_processor_id();
bbf45ba5
HB
370}
371
372void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
373{
9dd921cf 374 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
375#ifdef CONFIG_BOOKE
376 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
377#endif
de56a948 378 vcpu->cpu = -1;
bbf45ba5
HB
379}
380
d0bfb940 381int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
f5d0906b 382 struct kvm_guest_debug *dbg)
bbf45ba5 383{
f5d0906b 384 return -EINVAL;
bbf45ba5
HB
385}
386
387static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
388 struct kvm_run *run)
389{
8e5b26b5 390 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
391}
392
393static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
394 struct kvm_run *run)
395{
69b61833 396 u64 uninitialized_var(gpr);
bbf45ba5 397
8e5b26b5 398 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
399 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
400 return;
401 }
402
403 if (vcpu->arch.mmio_is_bigendian) {
404 switch (run->mmio.len) {
b104d066 405 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
406 case 4: gpr = *(u32 *)run->mmio.data; break;
407 case 2: gpr = *(u16 *)run->mmio.data; break;
408 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
409 }
410 } else {
411 /* Convert BE data from userland back to LE. */
412 switch (run->mmio.len) {
8e5b26b5
AG
413 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
414 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
415 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
416 }
417 }
8e5b26b5 418
3587d534
AG
419 if (vcpu->arch.mmio_sign_extend) {
420 switch (run->mmio.len) {
421#ifdef CONFIG_PPC64
422 case 4:
423 gpr = (s64)(s32)gpr;
424 break;
425#endif
426 case 2:
427 gpr = (s64)(s16)gpr;
428 break;
429 case 1:
430 gpr = (s64)(s8)gpr;
431 break;
432 }
433 }
434
8e5b26b5 435 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066
AG
436
437 switch (vcpu->arch.io_gpr & KVM_REG_EXT_MASK) {
438 case KVM_REG_GPR:
439 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
440 break;
441 case KVM_REG_FPR:
442 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
443 break;
287d5611 444#ifdef CONFIG_PPC_BOOK3S
b104d066
AG
445 case KVM_REG_QPR:
446 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
447 break;
448 case KVM_REG_FQPR:
449 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
450 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_REG_MASK] = gpr;
451 break;
287d5611 452#endif
b104d066
AG
453 default:
454 BUG();
455 }
bbf45ba5
HB
456}
457
458int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
459 unsigned int rt, unsigned int bytes, int is_bigendian)
460{
461 if (bytes > sizeof(run->mmio.data)) {
462 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
463 run->mmio.len);
464 }
465
466 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
467 run->mmio.len = bytes;
468 run->mmio.is_write = 0;
469
470 vcpu->arch.io_gpr = rt;
471 vcpu->arch.mmio_is_bigendian = is_bigendian;
472 vcpu->mmio_needed = 1;
473 vcpu->mmio_is_write = 0;
3587d534 474 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5
HB
475
476 return EMULATE_DO_MMIO;
477}
478
3587d534
AG
479/* Same as above, but sign extends */
480int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
481 unsigned int rt, unsigned int bytes, int is_bigendian)
482{
483 int r;
484
485 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
486 vcpu->arch.mmio_sign_extend = 1;
487
488 return r;
489}
490
bbf45ba5 491int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 492 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
493{
494 void *data = run->mmio.data;
495
496 if (bytes > sizeof(run->mmio.data)) {
497 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
498 run->mmio.len);
499 }
500
501 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
502 run->mmio.len = bytes;
503 run->mmio.is_write = 1;
504 vcpu->mmio_needed = 1;
505 vcpu->mmio_is_write = 1;
506
507 /* Store the value at the lowest bytes in 'data'. */
508 if (is_bigendian) {
509 switch (bytes) {
b104d066 510 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
511 case 4: *(u32 *)data = val; break;
512 case 2: *(u16 *)data = val; break;
513 case 1: *(u8 *)data = val; break;
514 }
515 } else {
516 /* Store LE value into 'data'. */
517 switch (bytes) {
518 case 4: st_le32(data, val); break;
519 case 2: st_le16(data, val); break;
520 case 1: *(u8 *)data = val; break;
521 }
522 }
523
524 return EMULATE_DO_MMIO;
525}
526
527int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
528{
529 int r;
530 sigset_t sigsaved;
531
532 if (vcpu->sigset_active)
533 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
534
535 if (vcpu->mmio_needed) {
536 if (!vcpu->mmio_is_write)
537 kvmppc_complete_mmio_load(vcpu, run);
538 vcpu->mmio_needed = 0;
539 } else if (vcpu->arch.dcr_needed) {
540 if (!vcpu->arch.dcr_is_write)
541 kvmppc_complete_dcr_load(vcpu, run);
542 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
543 } else if (vcpu->arch.osi_needed) {
544 u64 *gprs = run->osi.gprs;
545 int i;
546
547 for (i = 0; i < 32; i++)
548 kvmppc_set_gpr(vcpu, i, gprs[i]);
549 vcpu->arch.osi_needed = 0;
de56a948
PM
550 } else if (vcpu->arch.hcall_needed) {
551 int i;
552
553 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
554 for (i = 0; i < 9; ++i)
555 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
556 vcpu->arch.hcall_needed = 0;
bbf45ba5
HB
557 }
558
9dd921cf 559 kvmppc_core_deliver_interrupts(vcpu);
bbf45ba5 560
df6909e5 561 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
562
563 if (vcpu->sigset_active)
564 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
565
566 return r;
567}
568
569int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
570{
19ccb76a 571 if (irq->irq == KVM_INTERRUPT_UNSET) {
18978768 572 kvmppc_core_dequeue_external(vcpu, irq);
19ccb76a
PM
573 return 0;
574 }
575
576 kvmppc_core_queue_external(vcpu, irq);
45c5eb67 577
19ccb76a
PM
578 if (waitqueue_active(vcpu->arch.wqp)) {
579 wake_up_interruptible(vcpu->arch.wqp);
45c5eb67 580 vcpu->stat.halt_wakeup++;
de56a948
PM
581 } else if (vcpu->cpu != -1) {
582 smp_send_reschedule(vcpu->cpu);
45c5eb67
HB
583 }
584
bbf45ba5
HB
585 return 0;
586}
587
71fbfd5f
AG
588static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
589 struct kvm_enable_cap *cap)
590{
591 int r;
592
593 if (cap->flags)
594 return -EINVAL;
595
596 switch (cap->cap) {
ad0a048b
AG
597 case KVM_CAP_PPC_OSI:
598 r = 0;
599 vcpu->arch.osi_enabled = true;
600 break;
930b412a
AG
601 case KVM_CAP_PPC_PAPR:
602 r = 0;
603 vcpu->arch.papr_enabled = true;
604 break;
71fbfd5f
AG
605 default:
606 r = -EINVAL;
607 break;
608 }
609
af8f38b3
AG
610 if (!r)
611 r = kvmppc_sanity_check(vcpu);
612
71fbfd5f
AG
613 return r;
614}
615
bbf45ba5
HB
616int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
617 struct kvm_mp_state *mp_state)
618{
619 return -EINVAL;
620}
621
622int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
623 struct kvm_mp_state *mp_state)
624{
625 return -EINVAL;
626}
627
628long kvm_arch_vcpu_ioctl(struct file *filp,
629 unsigned int ioctl, unsigned long arg)
630{
631 struct kvm_vcpu *vcpu = filp->private_data;
632 void __user *argp = (void __user *)arg;
633 long r;
634
93736624
AK
635 switch (ioctl) {
636 case KVM_INTERRUPT: {
bbf45ba5
HB
637 struct kvm_interrupt irq;
638 r = -EFAULT;
639 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 640 goto out;
bbf45ba5 641 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 642 goto out;
bbf45ba5 643 }
19483d14 644
71fbfd5f
AG
645 case KVM_ENABLE_CAP:
646 {
647 struct kvm_enable_cap cap;
648 r = -EFAULT;
649 if (copy_from_user(&cap, argp, sizeof(cap)))
650 goto out;
651 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
652 break;
653 }
bbf45ba5
HB
654 default:
655 r = -EINVAL;
656 }
657
658out:
659 return r;
660}
661
5b1c1493
CO
662int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
663{
664 return VM_FAULT_SIGBUS;
665}
666
15711e9c
AG
667static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
668{
669 u32 inst_lis = 0x3c000000;
670 u32 inst_ori = 0x60000000;
671 u32 inst_nop = 0x60000000;
672 u32 inst_sc = 0x44000002;
673 u32 inst_imm_mask = 0xffff;
674
675 /*
676 * The hypercall to get into KVM from within guest context is as
677 * follows:
678 *
679 * lis r0, r0, KVM_SC_MAGIC_R0@h
680 * ori r0, KVM_SC_MAGIC_R0@l
681 * sc
682 * nop
683 */
684 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
685 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
686 pvinfo->hcall[2] = inst_sc;
687 pvinfo->hcall[3] = inst_nop;
688
689 return 0;
690}
691
bbf45ba5
HB
692long kvm_arch_vm_ioctl(struct file *filp,
693 unsigned int ioctl, unsigned long arg)
694{
15711e9c 695 void __user *argp = (void __user *)arg;
bbf45ba5
HB
696 long r;
697
698 switch (ioctl) {
15711e9c
AG
699 case KVM_PPC_GET_PVINFO: {
700 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 701 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
702 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
703 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
704 r = -EFAULT;
705 goto out;
706 }
707
708 break;
709 }
54738c09
DG
710#ifdef CONFIG_KVM_BOOK3S_64_HV
711 case KVM_CREATE_SPAPR_TCE: {
712 struct kvm_create_spapr_tce create_tce;
713 struct kvm *kvm = filp->private_data;
714
715 r = -EFAULT;
716 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
717 goto out;
718 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
719 goto out;
720 }
aa04b4cc
PM
721
722 case KVM_ALLOCATE_RMA: {
723 struct kvm *kvm = filp->private_data;
724 struct kvm_allocate_rma rma;
725
726 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
727 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
728 r = -EFAULT;
729 break;
730 }
54738c09
DG
731#endif /* CONFIG_KVM_BOOK3S_64_HV */
732
bbf45ba5 733 default:
367e1319 734 r = -ENOTTY;
bbf45ba5
HB
735 }
736
15711e9c 737out:
bbf45ba5
HB
738 return r;
739}
740
741int kvm_arch_init(void *opaque)
742{
743 return 0;
744}
745
746void kvm_arch_exit(void)
747{
748}