]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: PPC: e500mc: add load inst fixup
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
bbf45ba5
HB
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
83aae4a8 31#include <asm/tlbflush.h>
371fefd6 32#include <asm/cputhreads.h>
73e75b41 33#include "timing.h"
fad7b9b5 34#include "../mm/mmu_decl.h"
bbf45ba5 35
46f43c6e
MT
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
bbf45ba5
HB
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{
666e7252 41 return !(v->arch.shared->msr & MSR_WE) ||
dfd4d47e
SW
42 !!(v->arch.pending_exceptions) ||
43 v->requests;
bbf45ba5
HB
44}
45
b6d33834
CD
46int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
47{
48 return 1;
49}
50
2a342ed5
AG
51int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
52{
53 int nr = kvmppc_get_gpr(vcpu, 11);
54 int r;
55 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
56 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
57 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
58 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
59 unsigned long r2 = 0;
60
61 if (!(vcpu->arch.shared->msr & MSR_SF)) {
62 /* 32 bit mode */
63 param1 &= 0xffffffff;
64 param2 &= 0xffffffff;
65 param3 &= 0xffffffff;
66 param4 &= 0xffffffff;
67 }
68
69 switch (nr) {
5fc87407
AG
70 case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
71 {
72 vcpu->arch.magic_page_pa = param1;
73 vcpu->arch.magic_page_ea = param2;
74
b5904972 75 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 76
5fc87407
AG
77 r = HC_EV_SUCCESS;
78 break;
79 }
2a342ed5
AG
80 case HC_VENDOR_KVM | KVM_HC_FEATURES:
81 r = HC_EV_SUCCESS;
a4cd8b23
SW
82#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
83 /* XXX Missing magic page on 44x */
5fc87407
AG
84 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
85#endif
2a342ed5
AG
86
87 /* Second return value is in r4 */
2a342ed5
AG
88 break;
89 default:
90 r = HC_EV_UNIMPLEMENTED;
91 break;
92 }
93
7508e16c
AG
94 kvmppc_set_gpr(vcpu, 4, r2);
95
2a342ed5
AG
96 return r;
97}
bbf45ba5 98
af8f38b3
AG
99int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
100{
101 int r = false;
102
103 /* We have to know what CPU to virtualize */
104 if (!vcpu->arch.pvr)
105 goto out;
106
107 /* PAPR only works with book3s_64 */
108 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
109 goto out;
110
111#ifdef CONFIG_KVM_BOOK3S_64_HV
112 /* HV KVM can only do PAPR mode for now */
113 if (!vcpu->arch.papr_enabled)
114 goto out;
115#endif
116
d30f6e48
SW
117#ifdef CONFIG_KVM_BOOKE_HV
118 if (!cpu_has_feature(CPU_FTR_EMB_HV))
119 goto out;
120#endif
121
af8f38b3
AG
122 r = true;
123
124out:
125 vcpu->arch.sane = r;
126 return r ? 0 : -EINVAL;
127}
128
bbf45ba5
HB
129int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
130{
131 enum emulation_result er;
132 int r;
133
134 er = kvmppc_emulate_instruction(run, vcpu);
135 switch (er) {
136 case EMULATE_DONE:
137 /* Future optimization: only reload non-volatiles if they were
138 * actually modified. */
139 r = RESUME_GUEST_NV;
140 break;
141 case EMULATE_DO_MMIO:
142 run->exit_reason = KVM_EXIT_MMIO;
143 /* We must reload nonvolatiles because "update" load/store
144 * instructions modify register state. */
145 /* Future optimization: only reload non-volatiles if they were
146 * actually modified. */
147 r = RESUME_HOST_NV;
148 break;
149 case EMULATE_FAIL:
150 /* XXX Deliver Program interrupt to guest. */
151 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 152 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
153 r = RESUME_HOST;
154 break;
155 default:
156 BUG();
157 }
158
159 return r;
160}
161
10474ae8 162int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 163{
10474ae8 164 return 0;
bbf45ba5
HB
165}
166
167void kvm_arch_hardware_disable(void *garbage)
168{
169}
170
171int kvm_arch_hardware_setup(void)
172{
173 return 0;
174}
175
176void kvm_arch_hardware_unsetup(void)
177{
178}
179
180void kvm_arch_check_processor_compat(void *rtn)
181{
9dd921cf 182 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
183}
184
e08b9637 185int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 186{
e08b9637
CO
187 if (type)
188 return -EINVAL;
189
f9e0554d 190 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
191}
192
d89f5eff 193void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
194{
195 unsigned int i;
988a2cae 196 struct kvm_vcpu *vcpu;
bbf45ba5 197
988a2cae
GN
198 kvm_for_each_vcpu(i, vcpu, kvm)
199 kvm_arch_vcpu_free(vcpu);
200
201 mutex_lock(&kvm->lock);
202 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
203 kvm->vcpus[i] = NULL;
204
205 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
206
207 kvmppc_core_destroy_vm(kvm);
208
988a2cae 209 mutex_unlock(&kvm->lock);
bbf45ba5
HB
210}
211
ad8ba2cd
SY
212void kvm_arch_sync_events(struct kvm *kvm)
213{
214}
215
bbf45ba5
HB
216int kvm_dev_ioctl_check_extension(long ext)
217{
218 int r;
219
220 switch (ext) {
5ce941ee
SW
221#ifdef CONFIG_BOOKE
222 case KVM_CAP_PPC_BOOKE_SREGS:
223#else
e15a1137 224 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 225 case KVM_CAP_PPC_HIOR:
930b412a 226 case KVM_CAP_PPC_PAPR:
5ce941ee 227#endif
18978768 228 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 229 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 230 case KVM_CAP_ENABLE_CAP:
e24ed81f 231 case KVM_CAP_ONE_REG:
de56a948
PM
232 r = 1;
233 break;
234#ifndef CONFIG_KVM_BOOK3S_64_HV
235 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 236 case KVM_CAP_PPC_OSI:
15711e9c 237 case KVM_CAP_PPC_GET_PVINFO:
73196cd3 238#if defined(CONFIG_KVM_E500) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
239 case KVM_CAP_SW_TLB:
240#endif
e15a1137
AG
241 r = 1;
242 break;
588968b6
LV
243 case KVM_CAP_COALESCED_MMIO:
244 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
245 break;
54738c09
DG
246#endif
247#ifdef CONFIG_KVM_BOOK3S_64_HV
248 case KVM_CAP_SPAPR_TCE:
249 r = 1;
250 break;
371fefd6
PM
251 case KVM_CAP_PPC_SMT:
252 r = threads_per_core;
253 break;
aa04b4cc
PM
254 case KVM_CAP_PPC_RMA:
255 r = 1;
9e368f29
PM
256 /* PPC970 requires an RMA */
257 if (cpu_has_feature(CPU_FTR_ARCH_201))
258 r = 2;
aa04b4cc 259 break;
342d3db7
PM
260 case KVM_CAP_SYNC_MMU:
261 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
262 break;
de56a948 263#endif
b5434032
ME
264 case KVM_CAP_NR_VCPUS:
265 /*
266 * Recommending a number of CPUs is somewhat arbitrary; we
267 * return the number of present CPUs for -HV (since a host
268 * will have secondary threads "offline"), and for other KVM
269 * implementations just count online CPUs.
270 */
271#ifdef CONFIG_KVM_BOOK3S_64_HV
272 r = num_present_cpus();
273#else
274 r = num_online_cpus();
275#endif
276 break;
277 case KVM_CAP_MAX_VCPUS:
278 r = KVM_MAX_VCPUS;
279 break;
bbf45ba5
HB
280 default:
281 r = 0;
282 break;
283 }
284 return r;
285
286}
287
288long kvm_arch_dev_ioctl(struct file *filp,
289 unsigned int ioctl, unsigned long arg)
290{
291 return -EINVAL;
292}
293
db3fe4eb
TY
294void kvm_arch_free_memslot(struct kvm_memory_slot *free,
295 struct kvm_memory_slot *dont)
296{
297}
298
299int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
300{
301 return 0;
302}
303
f7784b8e
MT
304int kvm_arch_prepare_memory_region(struct kvm *kvm,
305 struct kvm_memory_slot *memslot,
306 struct kvm_memory_slot old,
307 struct kvm_userspace_memory_region *mem,
308 int user_alloc)
bbf45ba5 309{
f9e0554d 310 return kvmppc_core_prepare_memory_region(kvm, mem);
bbf45ba5
HB
311}
312
f7784b8e
MT
313void kvm_arch_commit_memory_region(struct kvm *kvm,
314 struct kvm_userspace_memory_region *mem,
315 struct kvm_memory_slot old,
316 int user_alloc)
317{
f9e0554d 318 kvmppc_core_commit_memory_region(kvm, mem);
f7784b8e
MT
319}
320
321
34d4cb8f
MT
322void kvm_arch_flush_shadow(struct kvm *kvm)
323{
324}
325
bbf45ba5
HB
326struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
327{
73e75b41
HB
328 struct kvm_vcpu *vcpu;
329 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
330 if (!IS_ERR(vcpu)) {
331 vcpu->arch.wqp = &vcpu->wq;
06056bfb 332 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 333 }
73e75b41 334 return vcpu;
bbf45ba5
HB
335}
336
337void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
338{
a595405d
AG
339 /* Make sure we're not using the vcpu anymore */
340 hrtimer_cancel(&vcpu->arch.dec_timer);
341 tasklet_kill(&vcpu->arch.tasklet);
342
73e75b41 343 kvmppc_remove_vcpu_debugfs(vcpu);
db93f574 344 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
345}
346
347void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
348{
349 kvm_arch_vcpu_free(vcpu);
350}
351
352int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
353{
9dd921cf 354 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
355}
356
544c6761
AG
357/*
358 * low level hrtimer wake routine. Because this runs in hardirq context
359 * we schedule a tasklet to do the real work.
360 */
361enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
362{
363 struct kvm_vcpu *vcpu;
364
365 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
366 tasklet_schedule(&vcpu->arch.tasklet);
367
368 return HRTIMER_NORESTART;
369}
370
bbf45ba5
HB
371int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
372{
544c6761
AG
373 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
374 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
375 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 376 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 377
09000adb
BB
378#ifdef CONFIG_KVM_EXIT_TIMING
379 mutex_init(&vcpu->arch.exit_timing_lock);
380#endif
381
bbf45ba5
HB
382 return 0;
383}
384
385void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
386{
ecc0981f 387 kvmppc_mmu_destroy(vcpu);
bbf45ba5
HB
388}
389
390void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
391{
eab17672
SW
392#ifdef CONFIG_BOOKE
393 /*
394 * vrsave (formerly usprg0) isn't used by Linux, but may
395 * be used by the guest.
396 *
397 * On non-booke this is associated with Altivec and
398 * is handled by code in book3s.c.
399 */
400 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
401#endif
9dd921cf 402 kvmppc_core_vcpu_load(vcpu, cpu);
de56a948 403 vcpu->cpu = smp_processor_id();
bbf45ba5
HB
404}
405
406void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
407{
9dd921cf 408 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
409#ifdef CONFIG_BOOKE
410 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
411#endif
de56a948 412 vcpu->cpu = -1;
bbf45ba5
HB
413}
414
d0bfb940 415int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
f5d0906b 416 struct kvm_guest_debug *dbg)
bbf45ba5 417{
f5d0906b 418 return -EINVAL;
bbf45ba5
HB
419}
420
421static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
422 struct kvm_run *run)
423{
8e5b26b5 424 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
425}
426
427static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
428 struct kvm_run *run)
429{
69b61833 430 u64 uninitialized_var(gpr);
bbf45ba5 431
8e5b26b5 432 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
433 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
434 return;
435 }
436
437 if (vcpu->arch.mmio_is_bigendian) {
438 switch (run->mmio.len) {
b104d066 439 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
440 case 4: gpr = *(u32 *)run->mmio.data; break;
441 case 2: gpr = *(u16 *)run->mmio.data; break;
442 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
443 }
444 } else {
445 /* Convert BE data from userland back to LE. */
446 switch (run->mmio.len) {
8e5b26b5
AG
447 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
448 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
449 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
450 }
451 }
8e5b26b5 452
3587d534
AG
453 if (vcpu->arch.mmio_sign_extend) {
454 switch (run->mmio.len) {
455#ifdef CONFIG_PPC64
456 case 4:
457 gpr = (s64)(s32)gpr;
458 break;
459#endif
460 case 2:
461 gpr = (s64)(s16)gpr;
462 break;
463 case 1:
464 gpr = (s64)(s8)gpr;
465 break;
466 }
467 }
468
8e5b26b5 469 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 470
b3c5d3c2
AG
471 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
472 case KVM_MMIO_REG_GPR:
b104d066
AG
473 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
474 break;
b3c5d3c2
AG
475 case KVM_MMIO_REG_FPR:
476 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 477 break;
287d5611 478#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
479 case KVM_MMIO_REG_QPR:
480 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 481 break;
b3c5d3c2
AG
482 case KVM_MMIO_REG_FQPR:
483 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
484 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 485 break;
287d5611 486#endif
b104d066
AG
487 default:
488 BUG();
489 }
bbf45ba5
HB
490}
491
492int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
493 unsigned int rt, unsigned int bytes, int is_bigendian)
494{
495 if (bytes > sizeof(run->mmio.data)) {
496 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
497 run->mmio.len);
498 }
499
500 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
501 run->mmio.len = bytes;
502 run->mmio.is_write = 0;
503
504 vcpu->arch.io_gpr = rt;
505 vcpu->arch.mmio_is_bigendian = is_bigendian;
506 vcpu->mmio_needed = 1;
507 vcpu->mmio_is_write = 0;
3587d534 508 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5
HB
509
510 return EMULATE_DO_MMIO;
511}
512
3587d534
AG
513/* Same as above, but sign extends */
514int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
515 unsigned int rt, unsigned int bytes, int is_bigendian)
516{
517 int r;
518
519 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
520 vcpu->arch.mmio_sign_extend = 1;
521
522 return r;
523}
524
bbf45ba5 525int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 526 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
527{
528 void *data = run->mmio.data;
529
530 if (bytes > sizeof(run->mmio.data)) {
531 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
532 run->mmio.len);
533 }
534
535 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
536 run->mmio.len = bytes;
537 run->mmio.is_write = 1;
538 vcpu->mmio_needed = 1;
539 vcpu->mmio_is_write = 1;
540
541 /* Store the value at the lowest bytes in 'data'. */
542 if (is_bigendian) {
543 switch (bytes) {
b104d066 544 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
545 case 4: *(u32 *)data = val; break;
546 case 2: *(u16 *)data = val; break;
547 case 1: *(u8 *)data = val; break;
548 }
549 } else {
550 /* Store LE value into 'data'. */
551 switch (bytes) {
552 case 4: st_le32(data, val); break;
553 case 2: st_le16(data, val); break;
554 case 1: *(u8 *)data = val; break;
555 }
556 }
557
558 return EMULATE_DO_MMIO;
559}
560
561int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
562{
563 int r;
564 sigset_t sigsaved;
565
566 if (vcpu->sigset_active)
567 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
568
569 if (vcpu->mmio_needed) {
570 if (!vcpu->mmio_is_write)
571 kvmppc_complete_mmio_load(vcpu, run);
572 vcpu->mmio_needed = 0;
573 } else if (vcpu->arch.dcr_needed) {
574 if (!vcpu->arch.dcr_is_write)
575 kvmppc_complete_dcr_load(vcpu, run);
576 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
577 } else if (vcpu->arch.osi_needed) {
578 u64 *gprs = run->osi.gprs;
579 int i;
580
581 for (i = 0; i < 32; i++)
582 kvmppc_set_gpr(vcpu, i, gprs[i]);
583 vcpu->arch.osi_needed = 0;
de56a948
PM
584 } else if (vcpu->arch.hcall_needed) {
585 int i;
586
587 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
588 for (i = 0; i < 9; ++i)
589 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
590 vcpu->arch.hcall_needed = 0;
bbf45ba5
HB
591 }
592
df6909e5 593 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
594
595 if (vcpu->sigset_active)
596 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
597
598 return r;
599}
600
601int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
602{
19ccb76a 603 if (irq->irq == KVM_INTERRUPT_UNSET) {
18978768 604 kvmppc_core_dequeue_external(vcpu, irq);
19ccb76a
PM
605 return 0;
606 }
607
608 kvmppc_core_queue_external(vcpu, irq);
b6d33834 609
dfd4d47e 610 kvm_vcpu_kick(vcpu);
45c5eb67 611
bbf45ba5
HB
612 return 0;
613}
614
71fbfd5f
AG
615static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
616 struct kvm_enable_cap *cap)
617{
618 int r;
619
620 if (cap->flags)
621 return -EINVAL;
622
623 switch (cap->cap) {
ad0a048b
AG
624 case KVM_CAP_PPC_OSI:
625 r = 0;
626 vcpu->arch.osi_enabled = true;
627 break;
930b412a
AG
628 case KVM_CAP_PPC_PAPR:
629 r = 0;
630 vcpu->arch.papr_enabled = true;
631 break;
73196cd3 632#if defined(CONFIG_KVM_E500) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
633 case KVM_CAP_SW_TLB: {
634 struct kvm_config_tlb cfg;
635 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
636
637 r = -EFAULT;
638 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
639 break;
640
641 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
642 break;
643 }
644#endif
71fbfd5f
AG
645 default:
646 r = -EINVAL;
647 break;
648 }
649
af8f38b3
AG
650 if (!r)
651 r = kvmppc_sanity_check(vcpu);
652
71fbfd5f
AG
653 return r;
654}
655
bbf45ba5
HB
656int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
657 struct kvm_mp_state *mp_state)
658{
659 return -EINVAL;
660}
661
662int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
663 struct kvm_mp_state *mp_state)
664{
665 return -EINVAL;
666}
667
668long kvm_arch_vcpu_ioctl(struct file *filp,
669 unsigned int ioctl, unsigned long arg)
670{
671 struct kvm_vcpu *vcpu = filp->private_data;
672 void __user *argp = (void __user *)arg;
673 long r;
674
93736624
AK
675 switch (ioctl) {
676 case KVM_INTERRUPT: {
bbf45ba5
HB
677 struct kvm_interrupt irq;
678 r = -EFAULT;
679 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 680 goto out;
bbf45ba5 681 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 682 goto out;
bbf45ba5 683 }
19483d14 684
71fbfd5f
AG
685 case KVM_ENABLE_CAP:
686 {
687 struct kvm_enable_cap cap;
688 r = -EFAULT;
689 if (copy_from_user(&cap, argp, sizeof(cap)))
690 goto out;
691 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
692 break;
693 }
dc83b8bc 694
e24ed81f
AG
695 case KVM_SET_ONE_REG:
696 case KVM_GET_ONE_REG:
697 {
698 struct kvm_one_reg reg;
699 r = -EFAULT;
700 if (copy_from_user(&reg, argp, sizeof(reg)))
701 goto out;
702 if (ioctl == KVM_SET_ONE_REG)
703 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
704 else
705 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
706 break;
707 }
708
73196cd3 709#if defined(CONFIG_KVM_E500) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
710 case KVM_DIRTY_TLB: {
711 struct kvm_dirty_tlb dirty;
712 r = -EFAULT;
713 if (copy_from_user(&dirty, argp, sizeof(dirty)))
714 goto out;
715 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
716 break;
717 }
718#endif
719
bbf45ba5
HB
720 default:
721 r = -EINVAL;
722 }
723
724out:
725 return r;
726}
727
5b1c1493
CO
728int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
729{
730 return VM_FAULT_SIGBUS;
731}
732
15711e9c
AG
733static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
734{
735 u32 inst_lis = 0x3c000000;
736 u32 inst_ori = 0x60000000;
737 u32 inst_nop = 0x60000000;
738 u32 inst_sc = 0x44000002;
739 u32 inst_imm_mask = 0xffff;
740
741 /*
742 * The hypercall to get into KVM from within guest context is as
743 * follows:
744 *
745 * lis r0, r0, KVM_SC_MAGIC_R0@h
746 * ori r0, KVM_SC_MAGIC_R0@l
747 * sc
748 * nop
749 */
750 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
751 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
752 pvinfo->hcall[2] = inst_sc;
753 pvinfo->hcall[3] = inst_nop;
754
755 return 0;
756}
757
bbf45ba5
HB
758long kvm_arch_vm_ioctl(struct file *filp,
759 unsigned int ioctl, unsigned long arg)
760{
15711e9c 761 void __user *argp = (void __user *)arg;
bbf45ba5
HB
762 long r;
763
764 switch (ioctl) {
15711e9c
AG
765 case KVM_PPC_GET_PVINFO: {
766 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 767 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
768 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
769 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
770 r = -EFAULT;
771 goto out;
772 }
773
774 break;
775 }
54738c09
DG
776#ifdef CONFIG_KVM_BOOK3S_64_HV
777 case KVM_CREATE_SPAPR_TCE: {
778 struct kvm_create_spapr_tce create_tce;
779 struct kvm *kvm = filp->private_data;
780
781 r = -EFAULT;
782 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
783 goto out;
784 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
785 goto out;
786 }
aa04b4cc
PM
787
788 case KVM_ALLOCATE_RMA: {
789 struct kvm *kvm = filp->private_data;
790 struct kvm_allocate_rma rma;
791
792 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
793 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
794 r = -EFAULT;
795 break;
796 }
54738c09
DG
797#endif /* CONFIG_KVM_BOOK3S_64_HV */
798
bbf45ba5 799 default:
367e1319 800 r = -ENOTTY;
bbf45ba5
HB
801 }
802
15711e9c 803out:
bbf45ba5
HB
804 return r;
805}
806
043cc4d7
SW
807static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
808static unsigned long nr_lpids;
809
810long kvmppc_alloc_lpid(void)
811{
812 long lpid;
813
814 do {
815 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
816 if (lpid >= nr_lpids) {
817 pr_err("%s: No LPIDs free\n", __func__);
818 return -ENOMEM;
819 }
820 } while (test_and_set_bit(lpid, lpid_inuse));
821
822 return lpid;
823}
824
825void kvmppc_claim_lpid(long lpid)
826{
827 set_bit(lpid, lpid_inuse);
828}
829
830void kvmppc_free_lpid(long lpid)
831{
832 clear_bit(lpid, lpid_inuse);
833}
834
835void kvmppc_init_lpid(unsigned long nr_lpids_param)
836{
837 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
838 memset(lpid_inuse, 0, sizeof(lpid_inuse));
839}
840
bbf45ba5
HB
841int kvm_arch_init(void *opaque)
842{
843 return 0;
844}
845
846void kvm_arch_exit(void)
847{
848}