]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/powerpc.c
kvm: powerpc: book3s: Cleanup interrupt handling code
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
eb1e4f43 28#include <linux/file.h>
bbf45ba5
HB
29#include <asm/cputable.h>
30#include <asm/uaccess.h>
31#include <asm/kvm_ppc.h>
83aae4a8 32#include <asm/tlbflush.h>
371fefd6 33#include <asm/cputhreads.h>
bd2be683 34#include <asm/irqflags.h>
73e75b41 35#include "timing.h"
5efdb4be 36#include "irq.h"
fad7b9b5 37#include "../mm/mmu_decl.h"
bbf45ba5 38
46f43c6e
MT
39#define CREATE_TRACE_POINTS
40#include "trace.h"
41
3a167bea
AK
42struct kvmppc_ops *kvmppc_ops;
43
bbf45ba5
HB
44int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
45{
9202e076 46 return !!(v->arch.pending_exceptions) ||
dfd4d47e 47 v->requests;
bbf45ba5
HB
48}
49
b6d33834
CD
50int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
51{
52 return 1;
53}
54
03d25c5b
AG
55#ifndef CONFIG_KVM_BOOK3S_64_HV
56/*
57 * Common checks before entering the guest world. Call with interrupts
58 * disabled.
59 *
7ee78855
AG
60 * returns:
61 *
62 * == 1 if we're ready to go into guest state
63 * <= 0 if we need to go back to the host with return value
03d25c5b
AG
64 */
65int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
66{
7ee78855 67 int r = 1;
03d25c5b
AG
68
69 WARN_ON_ONCE(!irqs_disabled());
70 while (true) {
71 if (need_resched()) {
72 local_irq_enable();
73 cond_resched();
74 local_irq_disable();
75 continue;
76 }
77
78 if (signal_pending(current)) {
7ee78855
AG
79 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
80 vcpu->run->exit_reason = KVM_EXIT_INTR;
81 r = -EINTR;
03d25c5b
AG
82 break;
83 }
84
5bd1cf11
SW
85 vcpu->mode = IN_GUEST_MODE;
86
87 /*
88 * Reading vcpu->requests must happen after setting vcpu->mode,
89 * so we don't miss a request because the requester sees
90 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
91 * before next entering the guest (and thus doesn't IPI).
92 */
03d25c5b 93 smp_mb();
5bd1cf11 94
03d25c5b
AG
95 if (vcpu->requests) {
96 /* Make sure we process requests preemptable */
97 local_irq_enable();
98 trace_kvm_check_requests(vcpu);
7c973a2e 99 r = kvmppc_core_check_requests(vcpu);
03d25c5b 100 local_irq_disable();
7c973a2e
AG
101 if (r > 0)
102 continue;
103 break;
03d25c5b
AG
104 }
105
106 if (kvmppc_core_prepare_to_enter(vcpu)) {
107 /* interrupts got enabled in between, so we
108 are back at square 1 */
109 continue;
110 }
111
bd2be683
AG
112#ifdef CONFIG_PPC64
113 /* lazy EE magic */
114 hard_irq_disable();
115 if (lazy_irq_pending()) {
116 /* Got an interrupt in between, try again */
117 local_irq_enable();
118 local_irq_disable();
3766a4c6 119 kvm_guest_exit();
bd2be683
AG
120 continue;
121 }
bd2be683
AG
122#endif
123
3766a4c6 124 kvm_guest_enter();
03d25c5b
AG
125 break;
126 }
127
128 return r;
129}
130#endif /* CONFIG_KVM_BOOK3S_64_HV */
131
2a342ed5
AG
132int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
133{
134 int nr = kvmppc_get_gpr(vcpu, 11);
135 int r;
136 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
137 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
138 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
139 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
140 unsigned long r2 = 0;
141
142 if (!(vcpu->arch.shared->msr & MSR_SF)) {
143 /* 32 bit mode */
144 param1 &= 0xffffffff;
145 param2 &= 0xffffffff;
146 param3 &= 0xffffffff;
147 param4 &= 0xffffffff;
148 }
149
150 switch (nr) {
fdcf8bd7 151 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407
AG
152 {
153 vcpu->arch.magic_page_pa = param1;
154 vcpu->arch.magic_page_ea = param2;
155
b5904972 156 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 157
fdcf8bd7 158 r = EV_SUCCESS;
5fc87407
AG
159 break;
160 }
fdcf8bd7
SY
161 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
162 r = EV_SUCCESS;
bf7ca4bd 163#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
a4cd8b23 164 /* XXX Missing magic page on 44x */
5fc87407
AG
165 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
166#endif
2a342ed5
AG
167
168 /* Second return value is in r4 */
2a342ed5 169 break;
9202e076
LYB
170 case EV_HCALL_TOKEN(EV_IDLE):
171 r = EV_SUCCESS;
172 kvm_vcpu_block(vcpu);
173 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
174 break;
2a342ed5 175 default:
fdcf8bd7 176 r = EV_UNIMPLEMENTED;
2a342ed5
AG
177 break;
178 }
179
7508e16c
AG
180 kvmppc_set_gpr(vcpu, 4, r2);
181
2a342ed5
AG
182 return r;
183}
bbf45ba5 184
af8f38b3
AG
185int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
186{
187 int r = false;
188
189 /* We have to know what CPU to virtualize */
190 if (!vcpu->arch.pvr)
191 goto out;
192
193 /* PAPR only works with book3s_64 */
194 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
195 goto out;
196
197#ifdef CONFIG_KVM_BOOK3S_64_HV
198 /* HV KVM can only do PAPR mode for now */
199 if (!vcpu->arch.papr_enabled)
200 goto out;
201#endif
202
d30f6e48
SW
203#ifdef CONFIG_KVM_BOOKE_HV
204 if (!cpu_has_feature(CPU_FTR_EMB_HV))
205 goto out;
206#endif
207
af8f38b3
AG
208 r = true;
209
210out:
211 vcpu->arch.sane = r;
212 return r ? 0 : -EINVAL;
213}
214
bbf45ba5
HB
215int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
216{
217 enum emulation_result er;
218 int r;
219
220 er = kvmppc_emulate_instruction(run, vcpu);
221 switch (er) {
222 case EMULATE_DONE:
223 /* Future optimization: only reload non-volatiles if they were
224 * actually modified. */
225 r = RESUME_GUEST_NV;
226 break;
227 case EMULATE_DO_MMIO:
228 run->exit_reason = KVM_EXIT_MMIO;
229 /* We must reload nonvolatiles because "update" load/store
230 * instructions modify register state. */
231 /* Future optimization: only reload non-volatiles if they were
232 * actually modified. */
233 r = RESUME_HOST_NV;
234 break;
235 case EMULATE_FAIL:
236 /* XXX Deliver Program interrupt to guest. */
237 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 238 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
239 r = RESUME_HOST;
240 break;
241 default:
5a33169e
AG
242 WARN_ON(1);
243 r = RESUME_GUEST;
bbf45ba5
HB
244 }
245
246 return r;
247}
248
10474ae8 249int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 250{
10474ae8 251 return 0;
bbf45ba5
HB
252}
253
254void kvm_arch_hardware_disable(void *garbage)
255{
256}
257
258int kvm_arch_hardware_setup(void)
259{
260 return 0;
261}
262
263void kvm_arch_hardware_unsetup(void)
264{
265}
266
267void kvm_arch_check_processor_compat(void *rtn)
268{
9dd921cf 269 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
270}
271
e08b9637 272int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 273{
e08b9637
CO
274 if (type)
275 return -EINVAL;
276
f9e0554d 277 return kvmppc_core_init_vm(kvm);
bbf45ba5
HB
278}
279
d89f5eff 280void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
281{
282 unsigned int i;
988a2cae 283 struct kvm_vcpu *vcpu;
bbf45ba5 284
988a2cae
GN
285 kvm_for_each_vcpu(i, vcpu, kvm)
286 kvm_arch_vcpu_free(vcpu);
287
288 mutex_lock(&kvm->lock);
289 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
290 kvm->vcpus[i] = NULL;
291
292 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
293
294 kvmppc_core_destroy_vm(kvm);
295
988a2cae 296 mutex_unlock(&kvm->lock);
bbf45ba5
HB
297}
298
ad8ba2cd
SY
299void kvm_arch_sync_events(struct kvm *kvm)
300{
301}
302
bbf45ba5
HB
303int kvm_dev_ioctl_check_extension(long ext)
304{
305 int r;
306
307 switch (ext) {
5ce941ee
SW
308#ifdef CONFIG_BOOKE
309 case KVM_CAP_PPC_BOOKE_SREGS:
f61c94bb 310 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1c810636 311 case KVM_CAP_PPC_EPR:
5ce941ee 312#else
e15a1137 313 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 314 case KVM_CAP_PPC_HIOR:
930b412a 315 case KVM_CAP_PPC_PAPR:
5ce941ee 316#endif
18978768 317 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 318 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 319 case KVM_CAP_ENABLE_CAP:
e24ed81f 320 case KVM_CAP_ONE_REG:
0e673fb6 321 case KVM_CAP_IOEVENTFD:
5df554ad 322 case KVM_CAP_DEVICE_CTRL:
de56a948
PM
323 r = 1;
324 break;
325#ifndef CONFIG_KVM_BOOK3S_64_HV
326 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 327 case KVM_CAP_PPC_OSI:
15711e9c 328 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 329#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc 330 case KVM_CAP_SW_TLB:
eb1e4f43
SW
331#endif
332#ifdef CONFIG_KVM_MPIC
333 case KVM_CAP_IRQ_MPIC:
dc83b8bc 334#endif
e15a1137
AG
335 r = 1;
336 break;
588968b6
LV
337 case KVM_CAP_COALESCED_MMIO:
338 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
339 break;
54738c09 340#endif
f31e65e1 341#ifdef CONFIG_PPC_BOOK3S_64
54738c09 342 case KVM_CAP_SPAPR_TCE:
32fad281 343 case KVM_CAP_PPC_ALLOC_HTAB:
8e591cb7 344 case KVM_CAP_PPC_RTAS:
5975a2e0
PM
345#ifdef CONFIG_KVM_XICS
346 case KVM_CAP_IRQ_XICS:
347#endif
54738c09
DG
348 r = 1;
349 break;
f31e65e1
BH
350#endif /* CONFIG_PPC_BOOK3S_64 */
351#ifdef CONFIG_KVM_BOOK3S_64_HV
371fefd6
PM
352 case KVM_CAP_PPC_SMT:
353 r = threads_per_core;
354 break;
aa04b4cc
PM
355 case KVM_CAP_PPC_RMA:
356 r = 1;
9e368f29
PM
357 /* PPC970 requires an RMA */
358 if (cpu_has_feature(CPU_FTR_ARCH_201))
359 r = 2;
aa04b4cc 360 break;
f4800b1f 361#endif
342d3db7 362 case KVM_CAP_SYNC_MMU:
f4800b1f 363#ifdef CONFIG_KVM_BOOK3S_64_HV
342d3db7 364 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
f4800b1f
AG
365#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
366 r = 1;
367#else
368 r = 0;
a2932923
PM
369 break;
370#endif
371#ifdef CONFIG_KVM_BOOK3S_64_HV
372 case KVM_CAP_PPC_HTAB_FD:
373 r = 1;
374 break;
de56a948 375#endif
f4800b1f 376 break;
b5434032
ME
377 case KVM_CAP_NR_VCPUS:
378 /*
379 * Recommending a number of CPUs is somewhat arbitrary; we
380 * return the number of present CPUs for -HV (since a host
381 * will have secondary threads "offline"), and for other KVM
382 * implementations just count online CPUs.
383 */
384#ifdef CONFIG_KVM_BOOK3S_64_HV
385 r = num_present_cpus();
386#else
387 r = num_online_cpus();
388#endif
389 break;
390 case KVM_CAP_MAX_VCPUS:
391 r = KVM_MAX_VCPUS;
392 break;
5b74716e
BH
393#ifdef CONFIG_PPC_BOOK3S_64
394 case KVM_CAP_PPC_GET_SMMU_INFO:
395 r = 1;
396 break;
397#endif
bbf45ba5
HB
398 default:
399 r = 0;
400 break;
401 }
402 return r;
403
404}
405
406long kvm_arch_dev_ioctl(struct file *filp,
407 unsigned int ioctl, unsigned long arg)
408{
409 return -EINVAL;
410}
411
db3fe4eb
TY
412void kvm_arch_free_memslot(struct kvm_memory_slot *free,
413 struct kvm_memory_slot *dont)
414{
a66b48c3 415 kvmppc_core_free_memslot(free, dont);
db3fe4eb
TY
416}
417
418int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages)
419{
a66b48c3 420 return kvmppc_core_create_memslot(slot, npages);
db3fe4eb
TY
421}
422
e59dbe09
TY
423void kvm_arch_memslots_updated(struct kvm *kvm)
424{
425}
426
f7784b8e 427int kvm_arch_prepare_memory_region(struct kvm *kvm,
462fce46 428 struct kvm_memory_slot *memslot,
7b6195a9
TY
429 struct kvm_userspace_memory_region *mem,
430 enum kvm_mr_change change)
bbf45ba5 431{
a66b48c3 432 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
bbf45ba5
HB
433}
434
f7784b8e 435void kvm_arch_commit_memory_region(struct kvm *kvm,
462fce46 436 struct kvm_userspace_memory_region *mem,
8482644a
TY
437 const struct kvm_memory_slot *old,
438 enum kvm_mr_change change)
f7784b8e 439{
dfe49dbd 440 kvmppc_core_commit_memory_region(kvm, mem, old);
f7784b8e
MT
441}
442
2df72e9b
MT
443void kvm_arch_flush_shadow_all(struct kvm *kvm)
444{
445}
f7784b8e 446
2df72e9b
MT
447void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
448 struct kvm_memory_slot *slot)
34d4cb8f 449{
dfe49dbd 450 kvmppc_core_flush_memslot(kvm, slot);
34d4cb8f
MT
451}
452
bbf45ba5
HB
453struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
454{
73e75b41
HB
455 struct kvm_vcpu *vcpu;
456 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
457 if (!IS_ERR(vcpu)) {
458 vcpu->arch.wqp = &vcpu->wq;
06056bfb 459 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 460 }
73e75b41 461 return vcpu;
bbf45ba5
HB
462}
463
42897d86
MT
464int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
465{
466 return 0;
467}
468
bbf45ba5
HB
469void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
470{
a595405d
AG
471 /* Make sure we're not using the vcpu anymore */
472 hrtimer_cancel(&vcpu->arch.dec_timer);
473 tasklet_kill(&vcpu->arch.tasklet);
474
73e75b41 475 kvmppc_remove_vcpu_debugfs(vcpu);
eb1e4f43
SW
476
477 switch (vcpu->arch.irq_type) {
478 case KVMPPC_IRQ_MPIC:
479 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
480 break;
bc5ad3f3
BH
481 case KVMPPC_IRQ_XICS:
482 kvmppc_xics_free_icp(vcpu);
483 break;
eb1e4f43
SW
484 }
485
db93f574 486 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
487}
488
489void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
490{
491 kvm_arch_vcpu_free(vcpu);
492}
493
494int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
495{
9dd921cf 496 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
497}
498
544c6761
AG
499/*
500 * low level hrtimer wake routine. Because this runs in hardirq context
501 * we schedule a tasklet to do the real work.
502 */
503enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
504{
505 struct kvm_vcpu *vcpu;
506
507 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
508 tasklet_schedule(&vcpu->arch.tasklet);
509
510 return HRTIMER_NORESTART;
511}
512
bbf45ba5
HB
513int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
514{
f61c94bb
BB
515 int ret;
516
544c6761
AG
517 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
518 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
519 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 520 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 521
09000adb
BB
522#ifdef CONFIG_KVM_EXIT_TIMING
523 mutex_init(&vcpu->arch.exit_timing_lock);
524#endif
f61c94bb
BB
525 ret = kvmppc_subarch_vcpu_init(vcpu);
526 return ret;
bbf45ba5
HB
527}
528
529void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
530{
ecc0981f 531 kvmppc_mmu_destroy(vcpu);
f61c94bb 532 kvmppc_subarch_vcpu_uninit(vcpu);
bbf45ba5
HB
533}
534
535void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
536{
eab17672
SW
537#ifdef CONFIG_BOOKE
538 /*
539 * vrsave (formerly usprg0) isn't used by Linux, but may
540 * be used by the guest.
541 *
542 * On non-booke this is associated with Altivec and
543 * is handled by code in book3s.c.
544 */
545 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
546#endif
9dd921cf 547 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
548}
549
550void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
551{
9dd921cf 552 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
553#ifdef CONFIG_BOOKE
554 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
555#endif
bbf45ba5
HB
556}
557
bbf45ba5
HB
558static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
559 struct kvm_run *run)
560{
8e5b26b5 561 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
562}
563
564static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
565 struct kvm_run *run)
566{
69b61833 567 u64 uninitialized_var(gpr);
bbf45ba5 568
8e5b26b5 569 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
570 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
571 return;
572 }
573
574 if (vcpu->arch.mmio_is_bigendian) {
575 switch (run->mmio.len) {
b104d066 576 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
577 case 4: gpr = *(u32 *)run->mmio.data; break;
578 case 2: gpr = *(u16 *)run->mmio.data; break;
579 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
580 }
581 } else {
582 /* Convert BE data from userland back to LE. */
583 switch (run->mmio.len) {
8e5b26b5
AG
584 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
585 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
586 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
587 }
588 }
8e5b26b5 589
3587d534
AG
590 if (vcpu->arch.mmio_sign_extend) {
591 switch (run->mmio.len) {
592#ifdef CONFIG_PPC64
593 case 4:
594 gpr = (s64)(s32)gpr;
595 break;
596#endif
597 case 2:
598 gpr = (s64)(s16)gpr;
599 break;
600 case 1:
601 gpr = (s64)(s8)gpr;
602 break;
603 }
604 }
605
8e5b26b5 606 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 607
b3c5d3c2
AG
608 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
609 case KVM_MMIO_REG_GPR:
b104d066
AG
610 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
611 break;
b3c5d3c2
AG
612 case KVM_MMIO_REG_FPR:
613 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 614 break;
287d5611 615#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
616 case KVM_MMIO_REG_QPR:
617 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 618 break;
b3c5d3c2
AG
619 case KVM_MMIO_REG_FQPR:
620 vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
621 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 622 break;
287d5611 623#endif
b104d066
AG
624 default:
625 BUG();
626 }
bbf45ba5
HB
627}
628
629int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
630 unsigned int rt, unsigned int bytes, int is_bigendian)
631{
ed840ee9
SW
632 int idx, ret;
633
bbf45ba5
HB
634 if (bytes > sizeof(run->mmio.data)) {
635 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
636 run->mmio.len);
637 }
638
639 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
640 run->mmio.len = bytes;
641 run->mmio.is_write = 0;
642
643 vcpu->arch.io_gpr = rt;
644 vcpu->arch.mmio_is_bigendian = is_bigendian;
645 vcpu->mmio_needed = 1;
646 vcpu->mmio_is_write = 0;
3587d534 647 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5 648
ed840ee9
SW
649 idx = srcu_read_lock(&vcpu->kvm->srcu);
650
651 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
652 bytes, &run->mmio.data);
653
654 srcu_read_unlock(&vcpu->kvm->srcu, idx);
655
656 if (!ret) {
0e673fb6
AG
657 kvmppc_complete_mmio_load(vcpu, run);
658 vcpu->mmio_needed = 0;
659 return EMULATE_DONE;
660 }
661
bbf45ba5
HB
662 return EMULATE_DO_MMIO;
663}
664
3587d534
AG
665/* Same as above, but sign extends */
666int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
667 unsigned int rt, unsigned int bytes, int is_bigendian)
668{
669 int r;
670
3587d534 671 vcpu->arch.mmio_sign_extend = 1;
0e673fb6 672 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian);
3587d534
AG
673
674 return r;
675}
676
bbf45ba5 677int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
b104d066 678 u64 val, unsigned int bytes, int is_bigendian)
bbf45ba5
HB
679{
680 void *data = run->mmio.data;
ed840ee9 681 int idx, ret;
bbf45ba5
HB
682
683 if (bytes > sizeof(run->mmio.data)) {
684 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
685 run->mmio.len);
686 }
687
688 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
689 run->mmio.len = bytes;
690 run->mmio.is_write = 1;
691 vcpu->mmio_needed = 1;
692 vcpu->mmio_is_write = 1;
693
694 /* Store the value at the lowest bytes in 'data'. */
695 if (is_bigendian) {
696 switch (bytes) {
b104d066 697 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
698 case 4: *(u32 *)data = val; break;
699 case 2: *(u16 *)data = val; break;
700 case 1: *(u8 *)data = val; break;
701 }
702 } else {
703 /* Store LE value into 'data'. */
704 switch (bytes) {
705 case 4: st_le32(data, val); break;
706 case 2: st_le16(data, val); break;
707 case 1: *(u8 *)data = val; break;
708 }
709 }
710
ed840ee9
SW
711 idx = srcu_read_lock(&vcpu->kvm->srcu);
712
713 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
714 bytes, &run->mmio.data);
715
716 srcu_read_unlock(&vcpu->kvm->srcu, idx);
717
718 if (!ret) {
0e673fb6
AG
719 vcpu->mmio_needed = 0;
720 return EMULATE_DONE;
721 }
722
bbf45ba5
HB
723 return EMULATE_DO_MMIO;
724}
725
726int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
727{
728 int r;
729 sigset_t sigsaved;
730
731 if (vcpu->sigset_active)
732 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
733
734 if (vcpu->mmio_needed) {
735 if (!vcpu->mmio_is_write)
736 kvmppc_complete_mmio_load(vcpu, run);
737 vcpu->mmio_needed = 0;
738 } else if (vcpu->arch.dcr_needed) {
739 if (!vcpu->arch.dcr_is_write)
740 kvmppc_complete_dcr_load(vcpu, run);
741 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
742 } else if (vcpu->arch.osi_needed) {
743 u64 *gprs = run->osi.gprs;
744 int i;
745
746 for (i = 0; i < 32; i++)
747 kvmppc_set_gpr(vcpu, i, gprs[i]);
748 vcpu->arch.osi_needed = 0;
de56a948
PM
749 } else if (vcpu->arch.hcall_needed) {
750 int i;
751
752 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
753 for (i = 0; i < 9; ++i)
754 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
755 vcpu->arch.hcall_needed = 0;
1c810636
AG
756#ifdef CONFIG_BOOKE
757 } else if (vcpu->arch.epr_needed) {
758 kvmppc_set_epr(vcpu, run->epr.epr);
759 vcpu->arch.epr_needed = 0;
760#endif
bbf45ba5
HB
761 }
762
df6909e5 763 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
764
765 if (vcpu->sigset_active)
766 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
767
768 return r;
769}
770
771int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
772{
19ccb76a 773 if (irq->irq == KVM_INTERRUPT_UNSET) {
4fe27d2a 774 kvmppc_core_dequeue_external(vcpu);
19ccb76a
PM
775 return 0;
776 }
777
778 kvmppc_core_queue_external(vcpu, irq);
b6d33834 779
dfd4d47e 780 kvm_vcpu_kick(vcpu);
45c5eb67 781
bbf45ba5
HB
782 return 0;
783}
784
71fbfd5f
AG
785static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
786 struct kvm_enable_cap *cap)
787{
788 int r;
789
790 if (cap->flags)
791 return -EINVAL;
792
793 switch (cap->cap) {
ad0a048b
AG
794 case KVM_CAP_PPC_OSI:
795 r = 0;
796 vcpu->arch.osi_enabled = true;
797 break;
930b412a
AG
798 case KVM_CAP_PPC_PAPR:
799 r = 0;
800 vcpu->arch.papr_enabled = true;
801 break;
1c810636
AG
802 case KVM_CAP_PPC_EPR:
803 r = 0;
5df554ad
SW
804 if (cap->args[0])
805 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
806 else
807 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1c810636 808 break;
f61c94bb
BB
809#ifdef CONFIG_BOOKE
810 case KVM_CAP_PPC_BOOKE_WATCHDOG:
811 r = 0;
812 vcpu->arch.watchdog_enabled = true;
813 break;
814#endif
bf7ca4bd 815#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
816 case KVM_CAP_SW_TLB: {
817 struct kvm_config_tlb cfg;
818 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
819
820 r = -EFAULT;
821 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
822 break;
823
824 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
825 break;
eb1e4f43
SW
826 }
827#endif
828#ifdef CONFIG_KVM_MPIC
829 case KVM_CAP_IRQ_MPIC: {
70abaded 830 struct fd f;
eb1e4f43
SW
831 struct kvm_device *dev;
832
833 r = -EBADF;
70abaded
AV
834 f = fdget(cap->args[0]);
835 if (!f.file)
eb1e4f43
SW
836 break;
837
838 r = -EPERM;
70abaded 839 dev = kvm_device_from_filp(f.file);
eb1e4f43
SW
840 if (dev)
841 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
842
70abaded 843 fdput(f);
eb1e4f43 844 break;
dc83b8bc
SW
845 }
846#endif
5975a2e0
PM
847#ifdef CONFIG_KVM_XICS
848 case KVM_CAP_IRQ_XICS: {
70abaded 849 struct fd f;
5975a2e0
PM
850 struct kvm_device *dev;
851
852 r = -EBADF;
70abaded
AV
853 f = fdget(cap->args[0]);
854 if (!f.file)
5975a2e0
PM
855 break;
856
857 r = -EPERM;
70abaded 858 dev = kvm_device_from_filp(f.file);
5975a2e0
PM
859 if (dev)
860 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
861
70abaded 862 fdput(f);
5975a2e0
PM
863 break;
864 }
865#endif /* CONFIG_KVM_XICS */
71fbfd5f
AG
866 default:
867 r = -EINVAL;
868 break;
869 }
870
af8f38b3
AG
871 if (!r)
872 r = kvmppc_sanity_check(vcpu);
873
71fbfd5f
AG
874 return r;
875}
876
bbf45ba5
HB
877int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
878 struct kvm_mp_state *mp_state)
879{
880 return -EINVAL;
881}
882
883int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
884 struct kvm_mp_state *mp_state)
885{
886 return -EINVAL;
887}
888
889long kvm_arch_vcpu_ioctl(struct file *filp,
890 unsigned int ioctl, unsigned long arg)
891{
892 struct kvm_vcpu *vcpu = filp->private_data;
893 void __user *argp = (void __user *)arg;
894 long r;
895
93736624
AK
896 switch (ioctl) {
897 case KVM_INTERRUPT: {
bbf45ba5
HB
898 struct kvm_interrupt irq;
899 r = -EFAULT;
900 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 901 goto out;
bbf45ba5 902 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 903 goto out;
bbf45ba5 904 }
19483d14 905
71fbfd5f
AG
906 case KVM_ENABLE_CAP:
907 {
908 struct kvm_enable_cap cap;
909 r = -EFAULT;
910 if (copy_from_user(&cap, argp, sizeof(cap)))
911 goto out;
912 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
913 break;
914 }
dc83b8bc 915
e24ed81f
AG
916 case KVM_SET_ONE_REG:
917 case KVM_GET_ONE_REG:
918 {
919 struct kvm_one_reg reg;
920 r = -EFAULT;
921 if (copy_from_user(&reg, argp, sizeof(reg)))
922 goto out;
923 if (ioctl == KVM_SET_ONE_REG)
924 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
925 else
926 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
927 break;
928 }
929
bf7ca4bd 930#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
931 case KVM_DIRTY_TLB: {
932 struct kvm_dirty_tlb dirty;
933 r = -EFAULT;
934 if (copy_from_user(&dirty, argp, sizeof(dirty)))
935 goto out;
936 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
937 break;
938 }
939#endif
bbf45ba5
HB
940 default:
941 r = -EINVAL;
942 }
943
944out:
945 return r;
946}
947
5b1c1493
CO
948int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
949{
950 return VM_FAULT_SIGBUS;
951}
952
15711e9c
AG
953static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
954{
784bafac
SY
955 u32 inst_nop = 0x60000000;
956#ifdef CONFIG_KVM_BOOKE_HV
957 u32 inst_sc1 = 0x44000022;
958 pvinfo->hcall[0] = inst_sc1;
959 pvinfo->hcall[1] = inst_nop;
960 pvinfo->hcall[2] = inst_nop;
961 pvinfo->hcall[3] = inst_nop;
962#else
15711e9c
AG
963 u32 inst_lis = 0x3c000000;
964 u32 inst_ori = 0x60000000;
15711e9c
AG
965 u32 inst_sc = 0x44000002;
966 u32 inst_imm_mask = 0xffff;
967
968 /*
969 * The hypercall to get into KVM from within guest context is as
970 * follows:
971 *
972 * lis r0, r0, KVM_SC_MAGIC_R0@h
973 * ori r0, KVM_SC_MAGIC_R0@l
974 * sc
975 * nop
976 */
977 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
978 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
979 pvinfo->hcall[2] = inst_sc;
980 pvinfo->hcall[3] = inst_nop;
784bafac 981#endif
15711e9c 982
9202e076
LYB
983 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
984
15711e9c
AG
985 return 0;
986}
987
5efdb4be
AG
988int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
989 bool line_status)
990{
991 if (!irqchip_in_kernel(kvm))
992 return -ENXIO;
993
994 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
995 irq_event->irq, irq_event->level,
996 line_status);
997 return 0;
998}
999
bbf45ba5
HB
1000long kvm_arch_vm_ioctl(struct file *filp,
1001 unsigned int ioctl, unsigned long arg)
1002{
5df554ad 1003 struct kvm *kvm __maybe_unused = filp->private_data;
15711e9c 1004 void __user *argp = (void __user *)arg;
bbf45ba5
HB
1005 long r;
1006
1007 switch (ioctl) {
15711e9c
AG
1008 case KVM_PPC_GET_PVINFO: {
1009 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 1010 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
1011 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1012 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1013 r = -EFAULT;
1014 goto out;
1015 }
1016
1017 break;
1018 }
f31e65e1 1019#ifdef CONFIG_PPC_BOOK3S_64
54738c09
DG
1020 case KVM_CREATE_SPAPR_TCE: {
1021 struct kvm_create_spapr_tce create_tce;
54738c09
DG
1022
1023 r = -EFAULT;
1024 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1025 goto out;
1026 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1027 goto out;
1028 }
5b74716e 1029 case KVM_PPC_GET_SMMU_INFO: {
5b74716e
BH
1030 struct kvm_ppc_smmu_info info;
1031
1032 memset(&info, 0, sizeof(info));
3a167bea 1033 r = kvmppc_ops->get_smmu_info(kvm, &info);
5b74716e
BH
1034 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1035 r = -EFAULT;
1036 break;
1037 }
8e591cb7
ME
1038 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1039 struct kvm *kvm = filp->private_data;
1040
1041 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1042 break;
1043 }
3a167bea
AK
1044 default:
1045 r = kvmppc_ops->arch_vm_ioctl(filp, ioctl, arg);
1046
1047#else /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 1048 default:
367e1319 1049 r = -ENOTTY;
3a167bea 1050#endif
bbf45ba5 1051 }
15711e9c 1052out:
bbf45ba5
HB
1053 return r;
1054}
1055
043cc4d7
SW
1056static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1057static unsigned long nr_lpids;
1058
1059long kvmppc_alloc_lpid(void)
1060{
1061 long lpid;
1062
1063 do {
1064 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1065 if (lpid >= nr_lpids) {
1066 pr_err("%s: No LPIDs free\n", __func__);
1067 return -ENOMEM;
1068 }
1069 } while (test_and_set_bit(lpid, lpid_inuse));
1070
1071 return lpid;
1072}
1073
1074void kvmppc_claim_lpid(long lpid)
1075{
1076 set_bit(lpid, lpid_inuse);
1077}
1078
1079void kvmppc_free_lpid(long lpid)
1080{
1081 clear_bit(lpid, lpid_inuse);
1082}
1083
1084void kvmppc_init_lpid(unsigned long nr_lpids_param)
1085{
1086 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1087 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1088}
1089
bbf45ba5
HB
1090int kvm_arch_init(void *opaque)
1091{
3a167bea
AK
1092 if (kvmppc_ops) {
1093 printk(KERN_ERR "kvm: already loaded the other module\n");
1094 return -EEXIST;
1095 }
1096 kvmppc_ops = (struct kvmppc_ops *)opaque;
bbf45ba5
HB
1097 return 0;
1098}
1099
1100void kvm_arch_exit(void)
1101{
3a167bea 1102 kvmppc_ops = NULL;
bbf45ba5 1103}