]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: PPC: Consistentify vcpu exit path
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
bbf45ba5
HB
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
83aae4a8 31#include <asm/tlbflush.h>
371fefd6 32#include <asm/cputhreads.h>
73e75b41 33#include "timing.h"
fad7b9b5 34#include "../mm/mmu_decl.h"
bbf45ba5 35
46f43c6e
MT
36#define CREATE_TRACE_POINTS
37#include "trace.h"
38
bbf45ba5
HB
39int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
40{
9202e076 41 return !!(v->arch.pending_exceptions) ||
dfd4d47e 42 v->requests;
bbf45ba5
HB
43}
44
b6d33834
CD
45int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
46{
47 return 1;
48}
49
03d25c5b
AG
50#ifndef CONFIG_KVM_BOOK3S_64_HV
51/*
52 * Common checks before entering the guest world. Call with interrupts
53 * disabled.
54 *
55 * returns !0 if a signal is pending and check_signal is true
56 */
57int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
58{
59 int r = 0;
60
61 WARN_ON_ONCE(!irqs_disabled());
62 while (true) {
63 if (need_resched()) {
64 local_irq_enable();
65 cond_resched();
66 local_irq_disable();
67 continue;
68 }
69
70 if (signal_pending(current)) {
71 r = 1;
72 break;
73 }
74
75 smp_mb();
76 if (vcpu->requests) {
77 /* Make sure we process requests preemptable */
78 local_irq_enable();
79 trace_kvm_check_requests(vcpu);
80 kvmppc_core_check_requests(vcpu);
81 local_irq_disable();
82 continue;
83 }
84
85 if (kvmppc_core_prepare_to_enter(vcpu)) {
86 /* interrupts got enabled in between, so we
87 are back at square 1 */
88 continue;
89 }
90
91 if (vcpu->mode == EXITING_GUEST_MODE) {
92 r = 1;
93 break;
94 }
95
96 /* Going into guest context! Yay! */
97 vcpu->mode = IN_GUEST_MODE;
98 smp_wmb();
99
100 break;
101 }
102
103 return r;
104}
105#endif /* CONFIG_KVM_BOOK3S_64_HV */
106
2a342ed5
AG
107int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
108{
109 int nr = kvmppc_get_gpr(vcpu, 11);
110 int r;
111 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
112 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
113 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
114 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
115 unsigned long r2 = 0;
116
117 if (!(vcpu->arch.shared->msr & MSR_SF)) {
118 /* 32 bit mode */
119 param1 &= 0xffffffff;
120 param2 &= 0xffffffff;
121 param3 &= 0xffffffff;
122 param4 &= 0xffffffff;
123 }
124
125 switch (nr) {
fdcf8bd7 126 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407
AG
127 {
128 vcpu->arch.magic_page_pa = param1;
129 vcpu->arch.magic_page_ea = param2;
130
b5904972 131 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 132
fdcf8bd7 133 r = EV_SUCCESS;
5fc87407
AG
134 break;
135 }
fdcf8bd7
SY
136 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
137 r = EV_SUCCESS;
bf7ca4bd 138#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
a4cd8b23 139 /* XXX Missing magic page on 44x */
5fc87407
AG
140 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
141#endif
2a342ed5
AG
142
143 /* Second return value is in r4 */
2a342ed5 144 break;
9202e076
LYB
145 case EV_HCALL_TOKEN(EV_IDLE):
146 r = EV_SUCCESS;
147 kvm_vcpu_block(vcpu);
148 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
149 break;
2a342ed5 150 default:
fdcf8bd7 151 r = EV_UNIMPLEMENTED;
2a342ed5
AG
152 break;
153 }
154
7508e16c
AG
155 kvmppc_set_gpr(vcpu, 4, r2);
156
2a342ed5
AG
157 return r;
158}
bbf45ba5 159
af8f38b3
AG
160int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
161{
162 int r = false;
163
164 /* We have to know what CPU to virtualize */
165 if (!vcpu->arch.pvr)
166 goto out;
167
168 /* PAPR only works with book3s_64 */
169 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
170 goto out;
171
172#ifdef CONFIG_KVM_BOOK3S_64_HV
173 /* HV KVM can only do PAPR mode for now */
174 if (!vcpu->arch.papr_enabled)
175 goto out;
176#endif
177
d30f6e48
SW
178#ifdef CONFIG_KVM_BOOKE_HV
179 if (!cpu_has_feature(CPU_FTR_EMB_HV))
180 goto out;
181#endif
182
af8f38b3
AG
183 r = true;
184
185out:
186 vcpu->arch.sane = r;
187 return r ? 0 : -EINVAL;
188}
189
bbf45ba5
HB
190int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
191{
192 enum emulation_result er;
193 int r;
194
195 er = kvmppc_emulate_instruction(run, vcpu);
196 switch (er) {
197 case EMULATE_DONE:
198 /* Future optimization: only reload non-volatiles if they were
199 * actually modified. */
200 r = RESUME_GUEST_NV;
201 break;
202 case EMULATE_DO_MMIO:
203 run->exit_reason = KVM_EXIT_MMIO;
204 /* We must reload nonvolatiles because "update" load/store
205 * instructions modify register state. */
206 /* Future optimization: only reload non-volatiles if they were
207 * actually modified. */
208 r = RESUME_HOST_NV;
209 break;
210 case EMULATE_FAIL:
211 /* XXX Deliver Program interrupt to guest. */
212 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 213 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
214 r = RESUME_HOST;
215 break;
216 default:
217 BUG();
218 }
219
220 return r;
221}
222
10474ae8 223int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 224{
10474ae8 225 return 0;
bbf45ba5
HB
226}
227
228void kvm_arch_hardware_disable(void *garbage)
229{
230}
231
232int kvm_arch_hardware_setup(void)
233{
234 return 0;
235}
236
237void kvm_arch_hardware_unsetup(void)
238{
239}
240
241void kvm_arch_check_processor_compat(void *rtn)
242{
9dd921cf 243 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
244}
245
e08b9637 246int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 247{
e08b9637
CO
248 if (type)
249 return -EINVAL;
250
f9e0554d 251 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
252}
253
d89f5eff 254void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
255{
256 unsigned int i;
988a2cae 257 struct kvm_vcpu *vcpu;
bbf45ba5 258
988a2cae
GN
259 kvm_for_each_vcpu(i, vcpu, kvm)
260 kvm_arch_vcpu_free(vcpu);
261
262 mutex_lock(&kvm->lock);
263 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
264 kvm->vcpus[i] = NULL;
265
266 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
267
268 kvmppc_core_destroy_vm(kvm);
269
988a2cae 270 mutex_unlock(&kvm->lock);
bbf45ba5
HB
271}
272
ad8ba2cd
SY
273void kvm_arch_sync_events(struct kvm *kvm)
274{
275}
276
bbf45ba5
HB
277int kvm_dev_ioctl_check_extension(long ext)
278{
279 int r;
280
281 switch (ext) {
5ce941ee
SW
282#ifdef CONFIG_BOOKE
283 case KVM_CAP_PPC_BOOKE_SREGS:
284#else
e15a1137 285 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 286 case KVM_CAP_PPC_HIOR:
930b412a 287 case KVM_CAP_PPC_PAPR:
5ce941ee 288#endif
18978768 289 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 290 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 291 case KVM_CAP_ENABLE_CAP:
e24ed81f 292 case KVM_CAP_ONE_REG:
de56a948
PM
293 r = 1;
294 break;
295#ifndef CONFIG_KVM_BOOK3S_64_HV
296 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 297 case KVM_CAP_PPC_OSI:
15711e9c 298 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 299#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
300 case KVM_CAP_SW_TLB:
301#endif
e15a1137
AG
302 r = 1;
303 break;
588968b6
LV
304 case KVM_CAP_COALESCED_MMIO:
305 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
306 break;
54738c09 307#endif
f31e65e1 308#ifdef CONFIG_PPC_BOOK3S_64
54738c09 309 case KVM_CAP_SPAPR_TCE:
32fad281 310 case KVM_CAP_PPC_ALLOC_HTAB:
54738c09
DG
311 r = 1;
312 break;
f31e65e1
BH
313#endif /* CONFIG_PPC_BOOK3S_64 */
314#ifdef CONFIG_KVM_BOOK3S_64_HV
371fefd6
PM
315 case KVM_CAP_PPC_SMT:
316 r = threads_per_core;
317 break;
aa04b4cc
PM
318 case KVM_CAP_PPC_RMA:
319 r = 1;
9e368f29
PM
320 /* PPC970 requires an RMA */
321 if (cpu_has_feature(CPU_FTR_ARCH_201))
322 r = 2;
aa04b4cc 323 break;
f4800b1f 324#endif
342d3db7 325 case KVM_CAP_SYNC_MMU:
f4800b1f 326#ifdef CONFIG_KVM_BOOK3S_64_HV
342d3db7 327 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
f4800b1f
AG
328#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
329 r = 1;
330#else
331 r = 0;
de56a948 332#endif
f4800b1f 333 break;
b5434032
ME
334 case KVM_CAP_NR_VCPUS:
335 /*
336 * Recommending a number of CPUs is somewhat arbitrary; we
337 * return the number of present CPUs for -HV (since a host
338 * will have secondary threads "offline"), and for other KVM
339 * implementations just count online CPUs.
340 */
341#ifdef CONFIG_KVM_BOOK3S_64_HV
342 r = num_present_cpus();
343#else
344 r = num_online_cpus();
345#endif
346 break;
347 case KVM_CAP_MAX_VCPUS:
348 r = KVM_MAX_VCPUS;
349 break;
5b74716e
BH
350#ifdef CONFIG_PPC_BOOK3S_64
351 case KVM_CAP_PPC_GET_SMMU_INFO:
352 r = 1;
353 break;
354#endif
bbf45ba5
HB
355 default:
356 r = 0;
357 break;
358 }
359 return r;
360
361}
362
363long kvm_arch_dev_ioctl(struct file *filp,
364 unsigned int ioctl, unsigned long arg)
365{
366 return -EINVAL;
367}
368
db3fe4eb
TY
369void kvm_arch_free_memslot(struct kvm_memory_slot *free,
370 struct kvm_memory_slot *dont)
371{
d89cc617
TY
372 if (!dont || free->arch.rmap != dont->arch.rmap) {
373 vfree(free->arch.rmap);
374 free->arch.rmap = NULL;
375 }
db3fe4eb
TY
376}
377
378int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
379{
d89cc617
TY
380 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
381 if (!slot->arch.rmap)
382 return -ENOMEM;
383
db3fe4eb
TY
384 return 0;
385}
386
f7784b8e
MT
387int kvm_arch_prepare_memory_region(struct kvm *kvm,
388 struct kvm_memory_slot *memslot,
389 struct kvm_memory_slot old,
390 struct kvm_userspace_memory_region *mem,
391 int user_alloc)
bbf45ba5 392{
f9e0554d 393 return kvmppc_core_prepare_memory_region(kvm, mem);
bbf45ba5
HB
394}
395
f7784b8e
MT
396void kvm_arch_commit_memory_region(struct kvm *kvm,
397 struct kvm_userspace_memory_region *mem,
398 struct kvm_memory_slot old,
399 int user_alloc)
400{
f9e0554d 401 kvmppc_core_commit_memory_region(kvm, mem);
f7784b8e
MT
402}
403
2df72e9b
MT
404void kvm_arch_flush_shadow_all(struct kvm *kvm)
405{
406}
f7784b8e 407
2df72e9b
MT
408void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
409 struct kvm_memory_slot *slot)
34d4cb8f
MT
410{
411}
412
bbf45ba5
HB
413struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
414{
73e75b41
HB
415 struct kvm_vcpu *vcpu;
416 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
417 if (!IS_ERR(vcpu)) {
418 vcpu->arch.wqp = &vcpu->wq;
06056bfb 419 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 420 }
73e75b41 421 return vcpu;
bbf45ba5
HB
422}
423
424void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
425{
a595405d
AG
426 /* Make sure we're not using the vcpu anymore */
427 hrtimer_cancel(&vcpu->arch.dec_timer);
428 tasklet_kill(&vcpu->arch.tasklet);
429
73e75b41 430 kvmppc_remove_vcpu_debugfs(vcpu);
db93f574 431 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
432}
433
434void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
435{
436 kvm_arch_vcpu_free(vcpu);
437}
438
439int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
440{
9dd921cf 441 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
442}
443
544c6761
AG
444/*
445 * low level hrtimer wake routine. Because this runs in hardirq context
446 * we schedule a tasklet to do the real work.
447 */
448enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
449{
450 struct kvm_vcpu *vcpu;
451
452 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
453 tasklet_schedule(&vcpu->arch.tasklet);
454
455 return HRTIMER_NORESTART;
456}
457
bbf45ba5
HB
458int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
459{
544c6761
AG
460 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
461 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
462 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 463 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 464
09000adb
BB
465#ifdef CONFIG_KVM_EXIT_TIMING
466 mutex_init(&vcpu->arch.exit_timing_lock);
467#endif
468
bbf45ba5
HB
469 return 0;
470}
471
472void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
473{
ecc0981f 474 kvmppc_mmu_destroy(vcpu);
bbf45ba5
HB
475}
476
477void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
478{
eab17672
SW
479#ifdef CONFIG_BOOKE
480 /*
481 * vrsave (formerly usprg0) isn't used by Linux, but may
482 * be used by the guest.
483 *
484 * On non-booke this is associated with Altivec and
485 * is handled by code in book3s.c.
486 */
487 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
488#endif
9dd921cf 489 kvmppc_core_vcpu_load(vcpu, cpu);
de56a948 490 vcpu->cpu = smp_processor_id();
bbf45ba5
HB
491}
492
493void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
494{
9dd921cf 495 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
496#ifdef CONFIG_BOOKE
497 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
498#endif
de56a948 499 vcpu->cpu = -1;
bbf45ba5
HB
500}
501
d0bfb940 502int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
f5d0906b 503 struct kvm_guest_debug *dbg)
bbf45ba5 504{
f5d0906b 505 return -EINVAL;
bbf45ba5
HB
506}
507
508static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
509 struct kvm_run *run)
510{
8e5b26b5 511 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
512}
513
514static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
515 struct kvm_run *run)
516{
69b61833 517 u64 uninitialized_var(gpr);
bbf45ba5 518
8e5b26b5 519 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
520 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
521 return;
522 }
523
524 if (vcpu->arch.mmio_is_bigendian) {
525 switch (run->mmio.len) {
b104d066 526 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
527 case 4: gpr = *(u32 *)run->mmio.data; break;
528 case 2: gpr = *(u16 *)run->mmio.data; break;
529 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
530 }
531 } else {
532 /* Convert BE data from userland back to LE. */
533 switch (run->mmio.len) {
8e5b26b5
AG
534 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
535 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
536 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
537 }
538 }
8e5b26b5 539
3587d534
AG
540 if (vcpu->arch.mmio_sign_extend) {
541 switch (run->mmio.len) {
542#ifdef CONFIG_PPC64
543 case 4:
544 gpr = (s64)(s32)gpr;
545 break;
546#endif
547 case 2:
548 gpr = (s64)(s16)gpr;
549 break;
550 case 1:
551 gpr = (s64)(s8)gpr;
552 break;
553 }
554 }
555
8e5b26b5 556 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 557
b3c5d3c2
AG
558 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
559 case KVM_MMIO_REG_GPR:
b104d066
AG
560 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
561 break;
b3c5d3c2
AG
562 case KVM_MMIO_REG_FPR:
563 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 564 break;
287d5611 565#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
566 case KVM_MMIO_REG_QPR:
567 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 568 break;
b3c5d3c2
AG
569 case KVM_MMIO_REG_FQPR:
570 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
571 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 572 break;
287d5611 573#endif
b104d066
AG
574 default:
575 BUG();
576 }
bbf45ba5
HB
577}
578
579int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
580 unsigned int rt, unsigned int bytes, int is_bigendian)
581{
582 if (bytes > sizeof(run->mmio.data)) {
583 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
584 run->mmio.len);
585 }
586
587 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
588 run->mmio.len = bytes;
589 run->mmio.is_write = 0;
590
591 vcpu->arch.io_gpr = rt;
592 vcpu->arch.mmio_is_bigendian = is_bigendian;
593 vcpu->mmio_needed = 1;
594 vcpu->mmio_is_write = 0;
3587d534 595 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5
HB
596
597 return EMULATE_DO_MMIO;
598}
599
3587d534
AG
600/* Same as above, but sign extends */
601int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
602 unsigned int rt, unsigned int bytes, int is_bigendian)
603{
604 int r;
605
606 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
607 vcpu->arch.mmio_sign_extend = 1;
608
609 return r;
610}
611
bbf45ba5 612int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 613 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
614{
615 void *data = run->mmio.data;
616
617 if (bytes > sizeof(run->mmio.data)) {
618 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
619 run->mmio.len);
620 }
621
622 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
623 run->mmio.len = bytes;
624 run->mmio.is_write = 1;
625 vcpu->mmio_needed = 1;
626 vcpu->mmio_is_write = 1;
627
628 /* Store the value at the lowest bytes in 'data'. */
629 if (is_bigendian) {
630 switch (bytes) {
b104d066 631 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
632 case 4: *(u32 *)data = val; break;
633 case 2: *(u16 *)data = val; break;
634 case 1: *(u8 *)data = val; break;
635 }
636 } else {
637 /* Store LE value into 'data'. */
638 switch (bytes) {
639 case 4: st_le32(data, val); break;
640 case 2: st_le16(data, val); break;
641 case 1: *(u8 *)data = val; break;
642 }
643 }
644
645 return EMULATE_DO_MMIO;
646}
647
648int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
649{
650 int r;
651 sigset_t sigsaved;
652
653 if (vcpu->sigset_active)
654 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
655
656 if (vcpu->mmio_needed) {
657 if (!vcpu->mmio_is_write)
658 kvmppc_complete_mmio_load(vcpu, run);
659 vcpu->mmio_needed = 0;
660 } else if (vcpu->arch.dcr_needed) {
661 if (!vcpu->arch.dcr_is_write)
662 kvmppc_complete_dcr_load(vcpu, run);
663 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
664 } else if (vcpu->arch.osi_needed) {
665 u64 *gprs = run->osi.gprs;
666 int i;
667
668 for (i = 0; i < 32; i++)
669 kvmppc_set_gpr(vcpu, i, gprs[i]);
670 vcpu->arch.osi_needed = 0;
de56a948
PM
671 } else if (vcpu->arch.hcall_needed) {
672 int i;
673
674 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
675 for (i = 0; i < 9; ++i)
676 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
677 vcpu->arch.hcall_needed = 0;
bbf45ba5
HB
678 }
679
df6909e5 680 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
681
682 if (vcpu->sigset_active)
683 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
684
685 return r;
686}
687
688int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
689{
19ccb76a 690 if (irq->irq == KVM_INTERRUPT_UNSET) {
18978768 691 kvmppc_core_dequeue_external(vcpu, irq);
19ccb76a
PM
692 return 0;
693 }
694
695 kvmppc_core_queue_external(vcpu, irq);
b6d33834 696
dfd4d47e 697 kvm_vcpu_kick(vcpu);
45c5eb67 698
bbf45ba5
HB
699 return 0;
700}
701
71fbfd5f
AG
702static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
703 struct kvm_enable_cap *cap)
704{
705 int r;
706
707 if (cap->flags)
708 return -EINVAL;
709
710 switch (cap->cap) {
ad0a048b
AG
711 case KVM_CAP_PPC_OSI:
712 r = 0;
713 vcpu->arch.osi_enabled = true;
714 break;
930b412a
AG
715 case KVM_CAP_PPC_PAPR:
716 r = 0;
717 vcpu->arch.papr_enabled = true;
718 break;
bf7ca4bd 719#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
720 case KVM_CAP_SW_TLB: {
721 struct kvm_config_tlb cfg;
722 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
723
724 r = -EFAULT;
725 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
726 break;
727
728 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
729 break;
730 }
731#endif
71fbfd5f
AG
732 default:
733 r = -EINVAL;
734 break;
735 }
736
af8f38b3
AG
737 if (!r)
738 r = kvmppc_sanity_check(vcpu);
739
71fbfd5f
AG
740 return r;
741}
742
bbf45ba5
HB
743int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
744 struct kvm_mp_state *mp_state)
745{
746 return -EINVAL;
747}
748
749int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
750 struct kvm_mp_state *mp_state)
751{
752 return -EINVAL;
753}
754
755long kvm_arch_vcpu_ioctl(struct file *filp,
756 unsigned int ioctl, unsigned long arg)
757{
758 struct kvm_vcpu *vcpu = filp->private_data;
759 void __user *argp = (void __user *)arg;
760 long r;
761
93736624
AK
762 switch (ioctl) {
763 case KVM_INTERRUPT: {
bbf45ba5
HB
764 struct kvm_interrupt irq;
765 r = -EFAULT;
766 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 767 goto out;
bbf45ba5 768 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 769 goto out;
bbf45ba5 770 }
19483d14 771
71fbfd5f
AG
772 case KVM_ENABLE_CAP:
773 {
774 struct kvm_enable_cap cap;
775 r = -EFAULT;
776 if (copy_from_user(&cap, argp, sizeof(cap)))
777 goto out;
778 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
779 break;
780 }
dc83b8bc 781
e24ed81f
AG
782 case KVM_SET_ONE_REG:
783 case KVM_GET_ONE_REG:
784 {
785 struct kvm_one_reg reg;
786 r = -EFAULT;
787 if (copy_from_user(&reg, argp, sizeof(reg)))
788 goto out;
789 if (ioctl == KVM_SET_ONE_REG)
790 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
791 else
792 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
793 break;
794 }
795
bf7ca4bd 796#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
797 case KVM_DIRTY_TLB: {
798 struct kvm_dirty_tlb dirty;
799 r = -EFAULT;
800 if (copy_from_user(&dirty, argp, sizeof(dirty)))
801 goto out;
802 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
803 break;
804 }
805#endif
bbf45ba5
HB
806 default:
807 r = -EINVAL;
808 }
809
810out:
811 return r;
812}
813
5b1c1493
CO
814int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
815{
816 return VM_FAULT_SIGBUS;
817}
818
15711e9c
AG
819static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
820{
784bafac
SY
821 u32 inst_nop = 0x60000000;
822#ifdef CONFIG_KVM_BOOKE_HV
823 u32 inst_sc1 = 0x44000022;
824 pvinfo->hcall[0] = inst_sc1;
825 pvinfo->hcall[1] = inst_nop;
826 pvinfo->hcall[2] = inst_nop;
827 pvinfo->hcall[3] = inst_nop;
828#else
15711e9c
AG
829 u32 inst_lis = 0x3c000000;
830 u32 inst_ori = 0x60000000;
15711e9c
AG
831 u32 inst_sc = 0x44000002;
832 u32 inst_imm_mask = 0xffff;
833
834 /*
835 * The hypercall to get into KVM from within guest context is as
836 * follows:
837 *
838 * lis r0, r0, KVM_SC_MAGIC_R0@h
839 * ori r0, KVM_SC_MAGIC_R0@l
840 * sc
841 * nop
842 */
843 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
844 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
845 pvinfo->hcall[2] = inst_sc;
846 pvinfo->hcall[3] = inst_nop;
784bafac 847#endif
15711e9c 848
9202e076
LYB
849 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
850
15711e9c
AG
851 return 0;
852}
853
bbf45ba5
HB
854long kvm_arch_vm_ioctl(struct file *filp,
855 unsigned int ioctl, unsigned long arg)
856{
15711e9c 857 void __user *argp = (void __user *)arg;
bbf45ba5
HB
858 long r;
859
860 switch (ioctl) {
15711e9c
AG
861 case KVM_PPC_GET_PVINFO: {
862 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 863 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
864 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
865 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
866 r = -EFAULT;
867 goto out;
868 }
869
870 break;
871 }
f31e65e1 872#ifdef CONFIG_PPC_BOOK3S_64
54738c09
DG
873 case KVM_CREATE_SPAPR_TCE: {
874 struct kvm_create_spapr_tce create_tce;
875 struct kvm *kvm = filp->private_data;
876
877 r = -EFAULT;
878 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
879 goto out;
880 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
881 goto out;
882 }
f31e65e1 883#endif /* CONFIG_PPC_BOOK3S_64 */
aa04b4cc 884
f31e65e1 885#ifdef CONFIG_KVM_BOOK3S_64_HV
aa04b4cc
PM
886 case KVM_ALLOCATE_RMA: {
887 struct kvm *kvm = filp->private_data;
888 struct kvm_allocate_rma rma;
889
890 r = kvm_vm_ioctl_allocate_rma(kvm, &rma);
891 if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma)))
892 r = -EFAULT;
893 break;
894 }
32fad281
PM
895
896 case KVM_PPC_ALLOCATE_HTAB: {
897 struct kvm *kvm = filp->private_data;
898 u32 htab_order;
899
900 r = -EFAULT;
901 if (get_user(htab_order, (u32 __user *)argp))
902 break;
903 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
904 if (r)
905 break;
906 r = -EFAULT;
907 if (put_user(htab_order, (u32 __user *)argp))
908 break;
909 r = 0;
910 break;
911 }
54738c09
DG
912#endif /* CONFIG_KVM_BOOK3S_64_HV */
913
5b74716e
BH
914#ifdef CONFIG_PPC_BOOK3S_64
915 case KVM_PPC_GET_SMMU_INFO: {
916 struct kvm *kvm = filp->private_data;
917 struct kvm_ppc_smmu_info info;
918
919 memset(&info, 0, sizeof(info));
920 r = kvm_vm_ioctl_get_smmu_info(kvm, &info);
921 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
922 r = -EFAULT;
923 break;
924 }
925#endif /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 926 default:
367e1319 927 r = -ENOTTY;
bbf45ba5
HB
928 }
929
15711e9c 930out:
bbf45ba5
HB
931 return r;
932}
933
043cc4d7
SW
934static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
935static unsigned long nr_lpids;
936
937long kvmppc_alloc_lpid(void)
938{
939 long lpid;
940
941 do {
942 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
943 if (lpid >= nr_lpids) {
944 pr_err("%s: No LPIDs free\n", __func__);
945 return -ENOMEM;
946 }
947 } while (test_and_set_bit(lpid, lpid_inuse));
948
949 return lpid;
950}
951
952void kvmppc_claim_lpid(long lpid)
953{
954 set_bit(lpid, lpid_inuse);
955}
956
957void kvmppc_free_lpid(long lpid)
958{
959 clear_bit(lpid, lpid_inuse);
960}
961
962void kvmppc_init_lpid(unsigned long nr_lpids_param)
963{
964 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
965 memset(lpid_inuse, 0, sizeof(lpid_inuse));
966}
967
bbf45ba5
HB
968int kvm_arch_init(void *opaque)
969{
970 return 0;
971}
972
973void kvm_arch_exit(void)
974{
975}