]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: PPC: Book3S PR: PAPR: Access RTAS in big endian
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
eb1e4f43 28#include <linux/file.h>
cbbc58d4 29#include <linux/module.h>
bbf45ba5
HB
30#include <asm/cputable.h>
31#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h>
83aae4a8 33#include <asm/tlbflush.h>
371fefd6 34#include <asm/cputhreads.h>
bd2be683 35#include <asm/irqflags.h>
73e75b41 36#include "timing.h"
5efdb4be 37#include "irq.h"
fad7b9b5 38#include "../mm/mmu_decl.h"
bbf45ba5 39
46f43c6e
MT
40#define CREATE_TRACE_POINTS
41#include "trace.h"
42
cbbc58d4
AK
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
3a167bea 48
bbf45ba5
HB
49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50{
9202e076 51 return !!(v->arch.pending_exceptions) ||
dfd4d47e 52 v->requests;
bbf45ba5
HB
53}
54
b6d33834
CD
55int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
56{
57 return 1;
58}
59
03d25c5b
AG
60/*
61 * Common checks before entering the guest world. Call with interrupts
62 * disabled.
63 *
7ee78855
AG
64 * returns:
65 *
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
03d25c5b
AG
68 */
69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70{
6c85f52b
SW
71 int r;
72
73 WARN_ON(irqs_disabled());
74 hard_irq_disable();
03d25c5b 75
03d25c5b
AG
76 while (true) {
77 if (need_resched()) {
78 local_irq_enable();
79 cond_resched();
6c85f52b 80 hard_irq_disable();
03d25c5b
AG
81 continue;
82 }
83
84 if (signal_pending(current)) {
7ee78855
AG
85 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
86 vcpu->run->exit_reason = KVM_EXIT_INTR;
87 r = -EINTR;
03d25c5b
AG
88 break;
89 }
90
5bd1cf11
SW
91 vcpu->mode = IN_GUEST_MODE;
92
93 /*
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
98 */
03d25c5b 99 smp_mb();
5bd1cf11 100
03d25c5b
AG
101 if (vcpu->requests) {
102 /* Make sure we process requests preemptable */
103 local_irq_enable();
104 trace_kvm_check_requests(vcpu);
7c973a2e 105 r = kvmppc_core_check_requests(vcpu);
6c85f52b 106 hard_irq_disable();
7c973a2e
AG
107 if (r > 0)
108 continue;
109 break;
03d25c5b
AG
110 }
111
112 if (kvmppc_core_prepare_to_enter(vcpu)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
115 continue;
116 }
117
3766a4c6 118 kvm_guest_enter();
6c85f52b 119 return 1;
03d25c5b
AG
120 }
121
6c85f52b
SW
122 /* return to host */
123 local_irq_enable();
03d25c5b
AG
124 return r;
125}
2ba9f0d8 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
03d25c5b 127
2a342ed5
AG
128int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
129{
130 int nr = kvmppc_get_gpr(vcpu, 11);
131 int r;
132 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
133 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
134 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
135 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
136 unsigned long r2 = 0;
137
138 if (!(vcpu->arch.shared->msr & MSR_SF)) {
139 /* 32 bit mode */
140 param1 &= 0xffffffff;
141 param2 &= 0xffffffff;
142 param3 &= 0xffffffff;
143 param4 &= 0xffffffff;
144 }
145
146 switch (nr) {
fdcf8bd7 147 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407
AG
148 {
149 vcpu->arch.magic_page_pa = param1;
150 vcpu->arch.magic_page_ea = param2;
151
b5904972 152 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 153
fdcf8bd7 154 r = EV_SUCCESS;
5fc87407
AG
155 break;
156 }
fdcf8bd7
SY
157 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
158 r = EV_SUCCESS;
bf7ca4bd 159#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
a4cd8b23 160 /* XXX Missing magic page on 44x */
5fc87407
AG
161 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
162#endif
2a342ed5
AG
163
164 /* Second return value is in r4 */
2a342ed5 165 break;
9202e076
LYB
166 case EV_HCALL_TOKEN(EV_IDLE):
167 r = EV_SUCCESS;
168 kvm_vcpu_block(vcpu);
169 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
170 break;
2a342ed5 171 default:
fdcf8bd7 172 r = EV_UNIMPLEMENTED;
2a342ed5
AG
173 break;
174 }
175
7508e16c
AG
176 kvmppc_set_gpr(vcpu, 4, r2);
177
2a342ed5
AG
178 return r;
179}
2ba9f0d8 180EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
bbf45ba5 181
af8f38b3
AG
182int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
183{
184 int r = false;
185
186 /* We have to know what CPU to virtualize */
187 if (!vcpu->arch.pvr)
188 goto out;
189
190 /* PAPR only works with book3s_64 */
191 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
192 goto out;
193
af8f38b3 194 /* HV KVM can only do PAPR mode for now */
a78b55d1 195 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
af8f38b3 196 goto out;
af8f38b3 197
d30f6e48
SW
198#ifdef CONFIG_KVM_BOOKE_HV
199 if (!cpu_has_feature(CPU_FTR_EMB_HV))
200 goto out;
201#endif
202
af8f38b3
AG
203 r = true;
204
205out:
206 vcpu->arch.sane = r;
207 return r ? 0 : -EINVAL;
208}
2ba9f0d8 209EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
af8f38b3 210
bbf45ba5
HB
211int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
212{
213 enum emulation_result er;
214 int r;
215
216 er = kvmppc_emulate_instruction(run, vcpu);
217 switch (er) {
218 case EMULATE_DONE:
219 /* Future optimization: only reload non-volatiles if they were
220 * actually modified. */
221 r = RESUME_GUEST_NV;
222 break;
223 case EMULATE_DO_MMIO:
224 run->exit_reason = KVM_EXIT_MMIO;
225 /* We must reload nonvolatiles because "update" load/store
226 * instructions modify register state. */
227 /* Future optimization: only reload non-volatiles if they were
228 * actually modified. */
229 r = RESUME_HOST_NV;
230 break;
231 case EMULATE_FAIL:
232 /* XXX Deliver Program interrupt to guest. */
233 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 234 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
235 r = RESUME_HOST;
236 break;
237 default:
5a33169e
AG
238 WARN_ON(1);
239 r = RESUME_GUEST;
bbf45ba5
HB
240 }
241
242 return r;
243}
2ba9f0d8 244EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
bbf45ba5 245
10474ae8 246int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 247{
10474ae8 248 return 0;
bbf45ba5
HB
249}
250
251void kvm_arch_hardware_disable(void *garbage)
252{
253}
254
255int kvm_arch_hardware_setup(void)
256{
257 return 0;
258}
259
260void kvm_arch_hardware_unsetup(void)
261{
262}
263
264void kvm_arch_check_processor_compat(void *rtn)
265{
9dd921cf 266 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
267}
268
e08b9637 269int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 270{
cbbc58d4
AK
271 struct kvmppc_ops *kvm_ops = NULL;
272 /*
273 * if we have both HV and PR enabled, default is HV
274 */
275 if (type == 0) {
276 if (kvmppc_hv_ops)
277 kvm_ops = kvmppc_hv_ops;
278 else
279 kvm_ops = kvmppc_pr_ops;
280 if (!kvm_ops)
281 goto err_out;
282 } else if (type == KVM_VM_PPC_HV) {
283 if (!kvmppc_hv_ops)
284 goto err_out;
285 kvm_ops = kvmppc_hv_ops;
286 } else if (type == KVM_VM_PPC_PR) {
287 if (!kvmppc_pr_ops)
288 goto err_out;
289 kvm_ops = kvmppc_pr_ops;
290 } else
291 goto err_out;
292
293 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
294 return -ENOENT;
295
296 kvm->arch.kvm_ops = kvm_ops;
f9e0554d 297 return kvmppc_core_init_vm(kvm);
cbbc58d4
AK
298err_out:
299 return -EINVAL;
bbf45ba5
HB
300}
301
d89f5eff 302void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
303{
304 unsigned int i;
988a2cae 305 struct kvm_vcpu *vcpu;
bbf45ba5 306
988a2cae
GN
307 kvm_for_each_vcpu(i, vcpu, kvm)
308 kvm_arch_vcpu_free(vcpu);
309
310 mutex_lock(&kvm->lock);
311 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
312 kvm->vcpus[i] = NULL;
313
314 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
315
316 kvmppc_core_destroy_vm(kvm);
317
988a2cae 318 mutex_unlock(&kvm->lock);
cbbc58d4
AK
319
320 /* drop the module reference */
321 module_put(kvm->arch.kvm_ops->owner);
bbf45ba5
HB
322}
323
ad8ba2cd
SY
324void kvm_arch_sync_events(struct kvm *kvm)
325{
326}
327
bbf45ba5
HB
328int kvm_dev_ioctl_check_extension(long ext)
329{
330 int r;
cbbc58d4
AK
331 /* FIXME!!
332 * Should some of this be vm ioctl ? is it possible now ?
333 */
334 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
bbf45ba5
HB
335
336 switch (ext) {
5ce941ee
SW
337#ifdef CONFIG_BOOKE
338 case KVM_CAP_PPC_BOOKE_SREGS:
f61c94bb 339 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1c810636 340 case KVM_CAP_PPC_EPR:
5ce941ee 341#else
e15a1137 342 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 343 case KVM_CAP_PPC_HIOR:
930b412a 344 case KVM_CAP_PPC_PAPR:
5ce941ee 345#endif
18978768 346 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 347 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 348 case KVM_CAP_ENABLE_CAP:
e24ed81f 349 case KVM_CAP_ONE_REG:
0e673fb6 350 case KVM_CAP_IOEVENTFD:
5df554ad 351 case KVM_CAP_DEVICE_CTRL:
de56a948
PM
352 r = 1;
353 break;
de56a948 354 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 355 case KVM_CAP_PPC_OSI:
15711e9c 356 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 357#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc 358 case KVM_CAP_SW_TLB:
eb1e4f43 359#endif
699cc876 360 /* We support this only for PR */
cbbc58d4 361 r = !hv_enabled;
e15a1137 362 break;
699cc876 363#ifdef CONFIG_KVM_MMIO
588968b6
LV
364 case KVM_CAP_COALESCED_MMIO:
365 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
366 break;
54738c09 367#endif
699cc876
AK
368#ifdef CONFIG_KVM_MPIC
369 case KVM_CAP_IRQ_MPIC:
370 r = 1;
371 break;
372#endif
373
f31e65e1 374#ifdef CONFIG_PPC_BOOK3S_64
54738c09 375 case KVM_CAP_SPAPR_TCE:
32fad281 376 case KVM_CAP_PPC_ALLOC_HTAB:
8e591cb7 377 case KVM_CAP_PPC_RTAS:
5975a2e0
PM
378#ifdef CONFIG_KVM_XICS
379 case KVM_CAP_IRQ_XICS:
380#endif
54738c09
DG
381 r = 1;
382 break;
f31e65e1 383#endif /* CONFIG_PPC_BOOK3S_64 */
699cc876 384#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
371fefd6 385 case KVM_CAP_PPC_SMT:
cbbc58d4 386 if (hv_enabled)
699cc876
AK
387 r = threads_per_core;
388 else
389 r = 0;
371fefd6 390 break;
aa04b4cc 391 case KVM_CAP_PPC_RMA:
cbbc58d4 392 r = hv_enabled;
9e368f29 393 /* PPC970 requires an RMA */
699cc876 394 if (r && cpu_has_feature(CPU_FTR_ARCH_201))
9e368f29 395 r = 2;
aa04b4cc 396 break;
f4800b1f 397#endif
342d3db7 398 case KVM_CAP_SYNC_MMU:
699cc876 399#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
cbbc58d4 400 if (hv_enabled)
699cc876
AK
401 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
402 else
403 r = 0;
f4800b1f
AG
404#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
405 r = 1;
406#else
407 r = 0;
a2932923 408#endif
699cc876
AK
409 break;
410#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a2932923 411 case KVM_CAP_PPC_HTAB_FD:
cbbc58d4 412 r = hv_enabled;
a2932923 413 break;
de56a948 414#endif
b5434032
ME
415 case KVM_CAP_NR_VCPUS:
416 /*
417 * Recommending a number of CPUs is somewhat arbitrary; we
418 * return the number of present CPUs for -HV (since a host
419 * will have secondary threads "offline"), and for other KVM
420 * implementations just count online CPUs.
421 */
cbbc58d4 422 if (hv_enabled)
699cc876
AK
423 r = num_present_cpus();
424 else
425 r = num_online_cpus();
b5434032
ME
426 break;
427 case KVM_CAP_MAX_VCPUS:
428 r = KVM_MAX_VCPUS;
429 break;
5b74716e
BH
430#ifdef CONFIG_PPC_BOOK3S_64
431 case KVM_CAP_PPC_GET_SMMU_INFO:
432 r = 1;
433 break;
434#endif
bbf45ba5
HB
435 default:
436 r = 0;
437 break;
438 }
439 return r;
440
441}
442
443long kvm_arch_dev_ioctl(struct file *filp,
444 unsigned int ioctl, unsigned long arg)
445{
446 return -EINVAL;
447}
448
5587027c 449void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
450 struct kvm_memory_slot *dont)
451{
5587027c 452 kvmppc_core_free_memslot(kvm, free, dont);
db3fe4eb
TY
453}
454
5587027c
AK
455int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
456 unsigned long npages)
db3fe4eb 457{
5587027c 458 return kvmppc_core_create_memslot(kvm, slot, npages);
db3fe4eb
TY
459}
460
e59dbe09
TY
461void kvm_arch_memslots_updated(struct kvm *kvm)
462{
463}
464
f7784b8e 465int kvm_arch_prepare_memory_region(struct kvm *kvm,
462fce46 466 struct kvm_memory_slot *memslot,
7b6195a9
TY
467 struct kvm_userspace_memory_region *mem,
468 enum kvm_mr_change change)
bbf45ba5 469{
a66b48c3 470 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
bbf45ba5
HB
471}
472
f7784b8e 473void kvm_arch_commit_memory_region(struct kvm *kvm,
462fce46 474 struct kvm_userspace_memory_region *mem,
8482644a
TY
475 const struct kvm_memory_slot *old,
476 enum kvm_mr_change change)
f7784b8e 477{
dfe49dbd 478 kvmppc_core_commit_memory_region(kvm, mem, old);
f7784b8e
MT
479}
480
2df72e9b
MT
481void kvm_arch_flush_shadow_all(struct kvm *kvm)
482{
483}
f7784b8e 484
2df72e9b
MT
485void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
486 struct kvm_memory_slot *slot)
34d4cb8f 487{
dfe49dbd 488 kvmppc_core_flush_memslot(kvm, slot);
34d4cb8f
MT
489}
490
bbf45ba5
HB
491struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
492{
73e75b41
HB
493 struct kvm_vcpu *vcpu;
494 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
495 if (!IS_ERR(vcpu)) {
496 vcpu->arch.wqp = &vcpu->wq;
06056bfb 497 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 498 }
73e75b41 499 return vcpu;
bbf45ba5
HB
500}
501
42897d86
MT
502int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
503{
504 return 0;
505}
506
bbf45ba5
HB
507void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
508{
a595405d
AG
509 /* Make sure we're not using the vcpu anymore */
510 hrtimer_cancel(&vcpu->arch.dec_timer);
511 tasklet_kill(&vcpu->arch.tasklet);
512
73e75b41 513 kvmppc_remove_vcpu_debugfs(vcpu);
eb1e4f43
SW
514
515 switch (vcpu->arch.irq_type) {
516 case KVMPPC_IRQ_MPIC:
517 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
518 break;
bc5ad3f3
BH
519 case KVMPPC_IRQ_XICS:
520 kvmppc_xics_free_icp(vcpu);
521 break;
eb1e4f43
SW
522 }
523
db93f574 524 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
525}
526
527void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
528{
529 kvm_arch_vcpu_free(vcpu);
530}
531
532int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
533{
9dd921cf 534 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
535}
536
544c6761
AG
537/*
538 * low level hrtimer wake routine. Because this runs in hardirq context
539 * we schedule a tasklet to do the real work.
540 */
541enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
542{
543 struct kvm_vcpu *vcpu;
544
545 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
546 tasklet_schedule(&vcpu->arch.tasklet);
547
548 return HRTIMER_NORESTART;
549}
550
bbf45ba5
HB
551int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
552{
f61c94bb
BB
553 int ret;
554
544c6761
AG
555 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
556 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
557 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 558 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 559
09000adb
BB
560#ifdef CONFIG_KVM_EXIT_TIMING
561 mutex_init(&vcpu->arch.exit_timing_lock);
562#endif
f61c94bb
BB
563 ret = kvmppc_subarch_vcpu_init(vcpu);
564 return ret;
bbf45ba5
HB
565}
566
567void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
568{
ecc0981f 569 kvmppc_mmu_destroy(vcpu);
f61c94bb 570 kvmppc_subarch_vcpu_uninit(vcpu);
bbf45ba5
HB
571}
572
573void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
574{
eab17672
SW
575#ifdef CONFIG_BOOKE
576 /*
577 * vrsave (formerly usprg0) isn't used by Linux, but may
578 * be used by the guest.
579 *
580 * On non-booke this is associated with Altivec and
581 * is handled by code in book3s.c.
582 */
583 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
584#endif
9dd921cf 585 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
586}
587
588void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
589{
9dd921cf 590 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
591#ifdef CONFIG_BOOKE
592 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
593#endif
bbf45ba5
HB
594}
595
bbf45ba5
HB
596static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
597 struct kvm_run *run)
598{
8e5b26b5 599 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
600}
601
602static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
603 struct kvm_run *run)
604{
69b61833 605 u64 uninitialized_var(gpr);
bbf45ba5 606
8e5b26b5 607 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
608 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
609 return;
610 }
611
612 if (vcpu->arch.mmio_is_bigendian) {
613 switch (run->mmio.len) {
b104d066 614 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
615 case 4: gpr = *(u32 *)run->mmio.data; break;
616 case 2: gpr = *(u16 *)run->mmio.data; break;
617 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
618 }
619 } else {
620 /* Convert BE data from userland back to LE. */
621 switch (run->mmio.len) {
8e5b26b5
AG
622 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
623 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
624 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
625 }
626 }
8e5b26b5 627
3587d534
AG
628 if (vcpu->arch.mmio_sign_extend) {
629 switch (run->mmio.len) {
630#ifdef CONFIG_PPC64
631 case 4:
632 gpr = (s64)(s32)gpr;
633 break;
634#endif
635 case 2:
636 gpr = (s64)(s16)gpr;
637 break;
638 case 1:
639 gpr = (s64)(s8)gpr;
640 break;
641 }
642 }
643
8e5b26b5 644 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 645
b3c5d3c2
AG
646 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
647 case KVM_MMIO_REG_GPR:
b104d066
AG
648 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
649 break;
b3c5d3c2 650 case KVM_MMIO_REG_FPR:
efff1912 651 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b104d066 652 break;
287d5611 653#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
654 case KVM_MMIO_REG_QPR:
655 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 656 break;
b3c5d3c2 657 case KVM_MMIO_REG_FQPR:
efff1912 658 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b3c5d3c2 659 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 660 break;
287d5611 661#endif
b104d066
AG
662 default:
663 BUG();
664 }
bbf45ba5
HB
665}
666
667int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
668 unsigned int rt, unsigned int bytes,
669 int is_default_endian)
bbf45ba5 670{
ed840ee9 671 int idx, ret;
73601775
CLG
672 int is_bigendian;
673
674 if (kvmppc_need_byteswap(vcpu)) {
675 /* Default endianness is "little endian". */
676 is_bigendian = !is_default_endian;
677 } else {
678 /* Default endianness is "big endian". */
679 is_bigendian = is_default_endian;
680 }
ed840ee9 681
bbf45ba5
HB
682 if (bytes > sizeof(run->mmio.data)) {
683 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
684 run->mmio.len);
685 }
686
687 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
688 run->mmio.len = bytes;
689 run->mmio.is_write = 0;
690
691 vcpu->arch.io_gpr = rt;
692 vcpu->arch.mmio_is_bigendian = is_bigendian;
693 vcpu->mmio_needed = 1;
694 vcpu->mmio_is_write = 0;
3587d534 695 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5 696
ed840ee9
SW
697 idx = srcu_read_lock(&vcpu->kvm->srcu);
698
699 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
700 bytes, &run->mmio.data);
701
702 srcu_read_unlock(&vcpu->kvm->srcu, idx);
703
704 if (!ret) {
0e673fb6
AG
705 kvmppc_complete_mmio_load(vcpu, run);
706 vcpu->mmio_needed = 0;
707 return EMULATE_DONE;
708 }
709
bbf45ba5
HB
710 return EMULATE_DO_MMIO;
711}
2ba9f0d8 712EXPORT_SYMBOL_GPL(kvmppc_handle_load);
bbf45ba5 713
3587d534
AG
714/* Same as above, but sign extends */
715int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
716 unsigned int rt, unsigned int bytes,
717 int is_default_endian)
3587d534
AG
718{
719 int r;
720
3587d534 721 vcpu->arch.mmio_sign_extend = 1;
73601775 722 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
3587d534
AG
723
724 return r;
725}
726
bbf45ba5 727int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775 728 u64 val, unsigned int bytes, int is_default_endian)
bbf45ba5
HB
729{
730 void *data = run->mmio.data;
ed840ee9 731 int idx, ret;
73601775
CLG
732 int is_bigendian;
733
734 if (kvmppc_need_byteswap(vcpu)) {
735 /* Default endianness is "little endian". */
736 is_bigendian = !is_default_endian;
737 } else {
738 /* Default endianness is "big endian". */
739 is_bigendian = is_default_endian;
740 }
bbf45ba5
HB
741
742 if (bytes > sizeof(run->mmio.data)) {
743 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
744 run->mmio.len);
745 }
746
747 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
748 run->mmio.len = bytes;
749 run->mmio.is_write = 1;
750 vcpu->mmio_needed = 1;
751 vcpu->mmio_is_write = 1;
752
753 /* Store the value at the lowest bytes in 'data'. */
754 if (is_bigendian) {
755 switch (bytes) {
b104d066 756 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
757 case 4: *(u32 *)data = val; break;
758 case 2: *(u16 *)data = val; break;
759 case 1: *(u8 *)data = val; break;
760 }
761 } else {
762 /* Store LE value into 'data'. */
763 switch (bytes) {
764 case 4: st_le32(data, val); break;
765 case 2: st_le16(data, val); break;
766 case 1: *(u8 *)data = val; break;
767 }
768 }
769
ed840ee9
SW
770 idx = srcu_read_lock(&vcpu->kvm->srcu);
771
772 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
773 bytes, &run->mmio.data);
774
775 srcu_read_unlock(&vcpu->kvm->srcu, idx);
776
777 if (!ret) {
0e673fb6
AG
778 vcpu->mmio_needed = 0;
779 return EMULATE_DONE;
780 }
781
bbf45ba5
HB
782 return EMULATE_DO_MMIO;
783}
2ba9f0d8 784EXPORT_SYMBOL_GPL(kvmppc_handle_store);
bbf45ba5
HB
785
786int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
787{
788 int r;
789 sigset_t sigsaved;
790
791 if (vcpu->sigset_active)
792 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
793
794 if (vcpu->mmio_needed) {
795 if (!vcpu->mmio_is_write)
796 kvmppc_complete_mmio_load(vcpu, run);
797 vcpu->mmio_needed = 0;
798 } else if (vcpu->arch.dcr_needed) {
799 if (!vcpu->arch.dcr_is_write)
800 kvmppc_complete_dcr_load(vcpu, run);
801 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
802 } else if (vcpu->arch.osi_needed) {
803 u64 *gprs = run->osi.gprs;
804 int i;
805
806 for (i = 0; i < 32; i++)
807 kvmppc_set_gpr(vcpu, i, gprs[i]);
808 vcpu->arch.osi_needed = 0;
de56a948
PM
809 } else if (vcpu->arch.hcall_needed) {
810 int i;
811
812 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
813 for (i = 0; i < 9; ++i)
814 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
815 vcpu->arch.hcall_needed = 0;
1c810636
AG
816#ifdef CONFIG_BOOKE
817 } else if (vcpu->arch.epr_needed) {
818 kvmppc_set_epr(vcpu, run->epr.epr);
819 vcpu->arch.epr_needed = 0;
820#endif
bbf45ba5
HB
821 }
822
df6909e5 823 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
824
825 if (vcpu->sigset_active)
826 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
827
828 return r;
829}
830
831int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
832{
19ccb76a 833 if (irq->irq == KVM_INTERRUPT_UNSET) {
4fe27d2a 834 kvmppc_core_dequeue_external(vcpu);
19ccb76a
PM
835 return 0;
836 }
837
838 kvmppc_core_queue_external(vcpu, irq);
b6d33834 839
dfd4d47e 840 kvm_vcpu_kick(vcpu);
45c5eb67 841
bbf45ba5
HB
842 return 0;
843}
844
71fbfd5f
AG
845static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
846 struct kvm_enable_cap *cap)
847{
848 int r;
849
850 if (cap->flags)
851 return -EINVAL;
852
853 switch (cap->cap) {
ad0a048b
AG
854 case KVM_CAP_PPC_OSI:
855 r = 0;
856 vcpu->arch.osi_enabled = true;
857 break;
930b412a
AG
858 case KVM_CAP_PPC_PAPR:
859 r = 0;
860 vcpu->arch.papr_enabled = true;
861 break;
1c810636
AG
862 case KVM_CAP_PPC_EPR:
863 r = 0;
5df554ad
SW
864 if (cap->args[0])
865 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
866 else
867 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1c810636 868 break;
f61c94bb
BB
869#ifdef CONFIG_BOOKE
870 case KVM_CAP_PPC_BOOKE_WATCHDOG:
871 r = 0;
872 vcpu->arch.watchdog_enabled = true;
873 break;
874#endif
bf7ca4bd 875#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
876 case KVM_CAP_SW_TLB: {
877 struct kvm_config_tlb cfg;
878 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
879
880 r = -EFAULT;
881 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
882 break;
883
884 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
885 break;
eb1e4f43
SW
886 }
887#endif
888#ifdef CONFIG_KVM_MPIC
889 case KVM_CAP_IRQ_MPIC: {
70abaded 890 struct fd f;
eb1e4f43
SW
891 struct kvm_device *dev;
892
893 r = -EBADF;
70abaded
AV
894 f = fdget(cap->args[0]);
895 if (!f.file)
eb1e4f43
SW
896 break;
897
898 r = -EPERM;
70abaded 899 dev = kvm_device_from_filp(f.file);
eb1e4f43
SW
900 if (dev)
901 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
902
70abaded 903 fdput(f);
eb1e4f43 904 break;
dc83b8bc
SW
905 }
906#endif
5975a2e0
PM
907#ifdef CONFIG_KVM_XICS
908 case KVM_CAP_IRQ_XICS: {
70abaded 909 struct fd f;
5975a2e0
PM
910 struct kvm_device *dev;
911
912 r = -EBADF;
70abaded
AV
913 f = fdget(cap->args[0]);
914 if (!f.file)
5975a2e0
PM
915 break;
916
917 r = -EPERM;
70abaded 918 dev = kvm_device_from_filp(f.file);
5975a2e0
PM
919 if (dev)
920 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
921
70abaded 922 fdput(f);
5975a2e0
PM
923 break;
924 }
925#endif /* CONFIG_KVM_XICS */
71fbfd5f
AG
926 default:
927 r = -EINVAL;
928 break;
929 }
930
af8f38b3
AG
931 if (!r)
932 r = kvmppc_sanity_check(vcpu);
933
71fbfd5f
AG
934 return r;
935}
936
bbf45ba5
HB
937int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
938 struct kvm_mp_state *mp_state)
939{
940 return -EINVAL;
941}
942
943int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
944 struct kvm_mp_state *mp_state)
945{
946 return -EINVAL;
947}
948
949long kvm_arch_vcpu_ioctl(struct file *filp,
950 unsigned int ioctl, unsigned long arg)
951{
952 struct kvm_vcpu *vcpu = filp->private_data;
953 void __user *argp = (void __user *)arg;
954 long r;
955
93736624
AK
956 switch (ioctl) {
957 case KVM_INTERRUPT: {
bbf45ba5
HB
958 struct kvm_interrupt irq;
959 r = -EFAULT;
960 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 961 goto out;
bbf45ba5 962 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 963 goto out;
bbf45ba5 964 }
19483d14 965
71fbfd5f
AG
966 case KVM_ENABLE_CAP:
967 {
968 struct kvm_enable_cap cap;
969 r = -EFAULT;
970 if (copy_from_user(&cap, argp, sizeof(cap)))
971 goto out;
972 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
973 break;
974 }
dc83b8bc 975
e24ed81f
AG
976 case KVM_SET_ONE_REG:
977 case KVM_GET_ONE_REG:
978 {
979 struct kvm_one_reg reg;
980 r = -EFAULT;
981 if (copy_from_user(&reg, argp, sizeof(reg)))
982 goto out;
983 if (ioctl == KVM_SET_ONE_REG)
984 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
985 else
986 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
987 break;
988 }
989
bf7ca4bd 990#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
991 case KVM_DIRTY_TLB: {
992 struct kvm_dirty_tlb dirty;
993 r = -EFAULT;
994 if (copy_from_user(&dirty, argp, sizeof(dirty)))
995 goto out;
996 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
997 break;
998 }
999#endif
bbf45ba5
HB
1000 default:
1001 r = -EINVAL;
1002 }
1003
1004out:
1005 return r;
1006}
1007
5b1c1493
CO
1008int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1009{
1010 return VM_FAULT_SIGBUS;
1011}
1012
15711e9c
AG
1013static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1014{
784bafac
SY
1015 u32 inst_nop = 0x60000000;
1016#ifdef CONFIG_KVM_BOOKE_HV
1017 u32 inst_sc1 = 0x44000022;
1018 pvinfo->hcall[0] = inst_sc1;
1019 pvinfo->hcall[1] = inst_nop;
1020 pvinfo->hcall[2] = inst_nop;
1021 pvinfo->hcall[3] = inst_nop;
1022#else
15711e9c
AG
1023 u32 inst_lis = 0x3c000000;
1024 u32 inst_ori = 0x60000000;
15711e9c
AG
1025 u32 inst_sc = 0x44000002;
1026 u32 inst_imm_mask = 0xffff;
1027
1028 /*
1029 * The hypercall to get into KVM from within guest context is as
1030 * follows:
1031 *
1032 * lis r0, r0, KVM_SC_MAGIC_R0@h
1033 * ori r0, KVM_SC_MAGIC_R0@l
1034 * sc
1035 * nop
1036 */
1037 pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask);
1038 pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask);
1039 pvinfo->hcall[2] = inst_sc;
1040 pvinfo->hcall[3] = inst_nop;
784bafac 1041#endif
15711e9c 1042
9202e076
LYB
1043 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1044
15711e9c
AG
1045 return 0;
1046}
1047
5efdb4be
AG
1048int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1049 bool line_status)
1050{
1051 if (!irqchip_in_kernel(kvm))
1052 return -ENXIO;
1053
1054 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1055 irq_event->irq, irq_event->level,
1056 line_status);
1057 return 0;
1058}
1059
bbf45ba5
HB
1060long kvm_arch_vm_ioctl(struct file *filp,
1061 unsigned int ioctl, unsigned long arg)
1062{
5df554ad 1063 struct kvm *kvm __maybe_unused = filp->private_data;
15711e9c 1064 void __user *argp = (void __user *)arg;
bbf45ba5
HB
1065 long r;
1066
1067 switch (ioctl) {
15711e9c
AG
1068 case KVM_PPC_GET_PVINFO: {
1069 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 1070 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
1071 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1072 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1073 r = -EFAULT;
1074 goto out;
1075 }
1076
1077 break;
1078 }
f31e65e1 1079#ifdef CONFIG_PPC_BOOK3S_64
54738c09
DG
1080 case KVM_CREATE_SPAPR_TCE: {
1081 struct kvm_create_spapr_tce create_tce;
54738c09
DG
1082
1083 r = -EFAULT;
1084 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1085 goto out;
1086 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1087 goto out;
1088 }
5b74716e 1089 case KVM_PPC_GET_SMMU_INFO: {
5b74716e 1090 struct kvm_ppc_smmu_info info;
cbbc58d4 1091 struct kvm *kvm = filp->private_data;
5b74716e
BH
1092
1093 memset(&info, 0, sizeof(info));
cbbc58d4 1094 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
5b74716e
BH
1095 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1096 r = -EFAULT;
1097 break;
1098 }
8e591cb7
ME
1099 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1100 struct kvm *kvm = filp->private_data;
1101
1102 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1103 break;
1104 }
cbbc58d4
AK
1105 default: {
1106 struct kvm *kvm = filp->private_data;
1107 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1108 }
3a167bea 1109#else /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 1110 default:
367e1319 1111 r = -ENOTTY;
3a167bea 1112#endif
bbf45ba5 1113 }
15711e9c 1114out:
bbf45ba5
HB
1115 return r;
1116}
1117
043cc4d7
SW
1118static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1119static unsigned long nr_lpids;
1120
1121long kvmppc_alloc_lpid(void)
1122{
1123 long lpid;
1124
1125 do {
1126 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1127 if (lpid >= nr_lpids) {
1128 pr_err("%s: No LPIDs free\n", __func__);
1129 return -ENOMEM;
1130 }
1131 } while (test_and_set_bit(lpid, lpid_inuse));
1132
1133 return lpid;
1134}
2ba9f0d8 1135EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
043cc4d7
SW
1136
1137void kvmppc_claim_lpid(long lpid)
1138{
1139 set_bit(lpid, lpid_inuse);
1140}
2ba9f0d8 1141EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
043cc4d7
SW
1142
1143void kvmppc_free_lpid(long lpid)
1144{
1145 clear_bit(lpid, lpid_inuse);
1146}
2ba9f0d8 1147EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
043cc4d7
SW
1148
1149void kvmppc_init_lpid(unsigned long nr_lpids_param)
1150{
1151 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1152 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1153}
2ba9f0d8 1154EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
043cc4d7 1155
bbf45ba5
HB
1156int kvm_arch_init(void *opaque)
1157{
1158 return 0;
1159}
1160
1161void kvm_arch_exit(void)
1162{
cbbc58d4 1163
bbf45ba5 1164}