]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: PPC: Book3s: Remove kvmppc_read_inst() function
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
eb1e4f43 28#include <linux/file.h>
cbbc58d4 29#include <linux/module.h>
bbf45ba5
HB
30#include <asm/cputable.h>
31#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h>
83aae4a8 33#include <asm/tlbflush.h>
371fefd6 34#include <asm/cputhreads.h>
bd2be683 35#include <asm/irqflags.h>
73e75b41 36#include "timing.h"
5efdb4be 37#include "irq.h"
fad7b9b5 38#include "../mm/mmu_decl.h"
bbf45ba5 39
46f43c6e
MT
40#define CREATE_TRACE_POINTS
41#include "trace.h"
42
cbbc58d4
AK
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
3a167bea 48
bbf45ba5
HB
49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50{
9202e076 51 return !!(v->arch.pending_exceptions) ||
dfd4d47e 52 v->requests;
bbf45ba5
HB
53}
54
b6d33834
CD
55int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
56{
57 return 1;
58}
59
03d25c5b
AG
60/*
61 * Common checks before entering the guest world. Call with interrupts
62 * disabled.
63 *
7ee78855
AG
64 * returns:
65 *
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
03d25c5b
AG
68 */
69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70{
6c85f52b
SW
71 int r;
72
73 WARN_ON(irqs_disabled());
74 hard_irq_disable();
03d25c5b 75
03d25c5b
AG
76 while (true) {
77 if (need_resched()) {
78 local_irq_enable();
79 cond_resched();
6c85f52b 80 hard_irq_disable();
03d25c5b
AG
81 continue;
82 }
83
84 if (signal_pending(current)) {
7ee78855
AG
85 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
86 vcpu->run->exit_reason = KVM_EXIT_INTR;
87 r = -EINTR;
03d25c5b
AG
88 break;
89 }
90
5bd1cf11
SW
91 vcpu->mode = IN_GUEST_MODE;
92
93 /*
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
98 */
03d25c5b 99 smp_mb();
5bd1cf11 100
03d25c5b
AG
101 if (vcpu->requests) {
102 /* Make sure we process requests preemptable */
103 local_irq_enable();
104 trace_kvm_check_requests(vcpu);
7c973a2e 105 r = kvmppc_core_check_requests(vcpu);
6c85f52b 106 hard_irq_disable();
7c973a2e
AG
107 if (r > 0)
108 continue;
109 break;
03d25c5b
AG
110 }
111
112 if (kvmppc_core_prepare_to_enter(vcpu)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
115 continue;
116 }
117
3766a4c6 118 kvm_guest_enter();
6c85f52b 119 return 1;
03d25c5b
AG
120 }
121
6c85f52b
SW
122 /* return to host */
123 local_irq_enable();
03d25c5b
AG
124 return r;
125}
2ba9f0d8 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
03d25c5b 127
5deb8e7a
AG
128#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
130{
131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
132 int i;
133
134 shared->sprg0 = swab64(shared->sprg0);
135 shared->sprg1 = swab64(shared->sprg1);
136 shared->sprg2 = swab64(shared->sprg2);
137 shared->sprg3 = swab64(shared->sprg3);
138 shared->srr0 = swab64(shared->srr0);
139 shared->srr1 = swab64(shared->srr1);
140 shared->dar = swab64(shared->dar);
141 shared->msr = swab64(shared->msr);
142 shared->dsisr = swab32(shared->dsisr);
143 shared->int_pending = swab32(shared->int_pending);
144 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
145 shared->sr[i] = swab32(shared->sr[i]);
146}
147#endif
148
2a342ed5
AG
149int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
150{
151 int nr = kvmppc_get_gpr(vcpu, 11);
152 int r;
153 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
154 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
155 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
156 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
157 unsigned long r2 = 0;
158
5deb8e7a 159 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
2a342ed5
AG
160 /* 32 bit mode */
161 param1 &= 0xffffffff;
162 param2 &= 0xffffffff;
163 param3 &= 0xffffffff;
164 param4 &= 0xffffffff;
165 }
166
167 switch (nr) {
fdcf8bd7 168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407 169 {
5deb8e7a
AG
170#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian = true;
173 if (vcpu->arch.intr_msr & MSR_LE)
174 shared_big_endian = false;
175 if (shared_big_endian != vcpu->arch.shared_big_endian)
176 kvmppc_swab_shared(vcpu);
177 vcpu->arch.shared_big_endian = shared_big_endian;
178#endif
179
f3383cf8
AG
180 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
181 /*
182 * Older versions of the Linux magic page code had
183 * a bug where they would map their trampoline code
184 * NX. If that's the case, remove !PR NX capability.
185 */
186 vcpu->arch.disable_kernel_nx = true;
187 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
188 }
189
190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
5fc87407 192
89b68c96
AG
193#ifdef CONFIG_PPC_64K_PAGES
194 /*
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
197 */
198 if ((vcpu->arch.magic_page_pa & 0xf000) !=
199 ((ulong)vcpu->arch.shared & 0xf000)) {
200 void *old_shared = vcpu->arch.shared;
201 ulong shared = (ulong)vcpu->arch.shared;
202 void *new_shared;
203
204 shared &= PAGE_MASK;
205 shared |= vcpu->arch.magic_page_pa & 0xf000;
206 new_shared = (void*)shared;
207 memcpy(new_shared, old_shared, 0x1000);
208 vcpu->arch.shared = new_shared;
209 }
210#endif
211
b5904972 212 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 213
fdcf8bd7 214 r = EV_SUCCESS;
5fc87407
AG
215 break;
216 }
fdcf8bd7
SY
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
218 r = EV_SUCCESS;
bf7ca4bd 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
a4cd8b23 220 /* XXX Missing magic page on 44x */
5fc87407
AG
221 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
222#endif
2a342ed5
AG
223
224 /* Second return value is in r4 */
2a342ed5 225 break;
9202e076
LYB
226 case EV_HCALL_TOKEN(EV_IDLE):
227 r = EV_SUCCESS;
228 kvm_vcpu_block(vcpu);
229 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
230 break;
2a342ed5 231 default:
fdcf8bd7 232 r = EV_UNIMPLEMENTED;
2a342ed5
AG
233 break;
234 }
235
7508e16c
AG
236 kvmppc_set_gpr(vcpu, 4, r2);
237
2a342ed5
AG
238 return r;
239}
2ba9f0d8 240EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
bbf45ba5 241
af8f38b3
AG
242int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
243{
244 int r = false;
245
246 /* We have to know what CPU to virtualize */
247 if (!vcpu->arch.pvr)
248 goto out;
249
250 /* PAPR only works with book3s_64 */
251 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
252 goto out;
253
af8f38b3 254 /* HV KVM can only do PAPR mode for now */
a78b55d1 255 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
af8f38b3 256 goto out;
af8f38b3 257
d30f6e48
SW
258#ifdef CONFIG_KVM_BOOKE_HV
259 if (!cpu_has_feature(CPU_FTR_EMB_HV))
260 goto out;
261#endif
262
af8f38b3
AG
263 r = true;
264
265out:
266 vcpu->arch.sane = r;
267 return r ? 0 : -EINVAL;
268}
2ba9f0d8 269EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
af8f38b3 270
bbf45ba5
HB
271int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
272{
273 enum emulation_result er;
274 int r;
275
276 er = kvmppc_emulate_instruction(run, vcpu);
277 switch (er) {
278 case EMULATE_DONE:
279 /* Future optimization: only reload non-volatiles if they were
280 * actually modified. */
281 r = RESUME_GUEST_NV;
282 break;
283 case EMULATE_DO_MMIO:
284 run->exit_reason = KVM_EXIT_MMIO;
285 /* We must reload nonvolatiles because "update" load/store
286 * instructions modify register state. */
287 /* Future optimization: only reload non-volatiles if they were
288 * actually modified. */
289 r = RESUME_HOST_NV;
290 break;
291 case EMULATE_FAIL:
292 /* XXX Deliver Program interrupt to guest. */
293 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
c7f38f46 294 kvmppc_get_last_inst(vcpu));
bbf45ba5
HB
295 r = RESUME_HOST;
296 break;
297 default:
5a33169e
AG
298 WARN_ON(1);
299 r = RESUME_GUEST;
bbf45ba5
HB
300 }
301
302 return r;
303}
2ba9f0d8 304EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
bbf45ba5 305
10474ae8 306int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 307{
10474ae8 308 return 0;
bbf45ba5
HB
309}
310
311void kvm_arch_hardware_disable(void *garbage)
312{
313}
314
315int kvm_arch_hardware_setup(void)
316{
317 return 0;
318}
319
320void kvm_arch_hardware_unsetup(void)
321{
322}
323
324void kvm_arch_check_processor_compat(void *rtn)
325{
9dd921cf 326 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
327}
328
e08b9637 329int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 330{
cbbc58d4
AK
331 struct kvmppc_ops *kvm_ops = NULL;
332 /*
333 * if we have both HV and PR enabled, default is HV
334 */
335 if (type == 0) {
336 if (kvmppc_hv_ops)
337 kvm_ops = kvmppc_hv_ops;
338 else
339 kvm_ops = kvmppc_pr_ops;
340 if (!kvm_ops)
341 goto err_out;
342 } else if (type == KVM_VM_PPC_HV) {
343 if (!kvmppc_hv_ops)
344 goto err_out;
345 kvm_ops = kvmppc_hv_ops;
346 } else if (type == KVM_VM_PPC_PR) {
347 if (!kvmppc_pr_ops)
348 goto err_out;
349 kvm_ops = kvmppc_pr_ops;
350 } else
351 goto err_out;
352
353 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
354 return -ENOENT;
355
356 kvm->arch.kvm_ops = kvm_ops;
f9e0554d 357 return kvmppc_core_init_vm(kvm);
cbbc58d4
AK
358err_out:
359 return -EINVAL;
bbf45ba5
HB
360}
361
d89f5eff 362void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
363{
364 unsigned int i;
988a2cae 365 struct kvm_vcpu *vcpu;
bbf45ba5 366
988a2cae
GN
367 kvm_for_each_vcpu(i, vcpu, kvm)
368 kvm_arch_vcpu_free(vcpu);
369
370 mutex_lock(&kvm->lock);
371 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
372 kvm->vcpus[i] = NULL;
373
374 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
375
376 kvmppc_core_destroy_vm(kvm);
377
988a2cae 378 mutex_unlock(&kvm->lock);
cbbc58d4
AK
379
380 /* drop the module reference */
381 module_put(kvm->arch.kvm_ops->owner);
bbf45ba5
HB
382}
383
ad8ba2cd
SY
384void kvm_arch_sync_events(struct kvm *kvm)
385{
386}
387
bbf45ba5
HB
388int kvm_dev_ioctl_check_extension(long ext)
389{
390 int r;
cbbc58d4
AK
391 /* FIXME!!
392 * Should some of this be vm ioctl ? is it possible now ?
393 */
394 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
bbf45ba5
HB
395
396 switch (ext) {
5ce941ee
SW
397#ifdef CONFIG_BOOKE
398 case KVM_CAP_PPC_BOOKE_SREGS:
f61c94bb 399 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1c810636 400 case KVM_CAP_PPC_EPR:
5ce941ee 401#else
e15a1137 402 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 403 case KVM_CAP_PPC_HIOR:
930b412a 404 case KVM_CAP_PPC_PAPR:
5ce941ee 405#endif
18978768 406 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 407 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 408 case KVM_CAP_ENABLE_CAP:
699a0ea0 409 case KVM_CAP_ENABLE_CAP_VM:
e24ed81f 410 case KVM_CAP_ONE_REG:
0e673fb6 411 case KVM_CAP_IOEVENTFD:
5df554ad 412 case KVM_CAP_DEVICE_CTRL:
de56a948
PM
413 r = 1;
414 break;
de56a948 415 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 416 case KVM_CAP_PPC_OSI:
15711e9c 417 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 418#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc 419 case KVM_CAP_SW_TLB:
eb1e4f43 420#endif
699cc876 421 /* We support this only for PR */
cbbc58d4 422 r = !hv_enabled;
e15a1137 423 break;
699cc876 424#ifdef CONFIG_KVM_MMIO
588968b6
LV
425 case KVM_CAP_COALESCED_MMIO:
426 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
427 break;
54738c09 428#endif
699cc876
AK
429#ifdef CONFIG_KVM_MPIC
430 case KVM_CAP_IRQ_MPIC:
431 r = 1;
432 break;
433#endif
434
f31e65e1 435#ifdef CONFIG_PPC_BOOK3S_64
54738c09 436 case KVM_CAP_SPAPR_TCE:
32fad281 437 case KVM_CAP_PPC_ALLOC_HTAB:
8e591cb7 438 case KVM_CAP_PPC_RTAS:
f2e91042 439 case KVM_CAP_PPC_FIXUP_HCALL:
699a0ea0 440 case KVM_CAP_PPC_ENABLE_HCALL:
5975a2e0
PM
441#ifdef CONFIG_KVM_XICS
442 case KVM_CAP_IRQ_XICS:
443#endif
54738c09
DG
444 r = 1;
445 break;
f31e65e1 446#endif /* CONFIG_PPC_BOOK3S_64 */
699cc876 447#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
371fefd6 448 case KVM_CAP_PPC_SMT:
cbbc58d4 449 if (hv_enabled)
3102f784 450 r = threads_per_subcore;
699cc876
AK
451 else
452 r = 0;
371fefd6 453 break;
aa04b4cc 454 case KVM_CAP_PPC_RMA:
cbbc58d4 455 r = hv_enabled;
9e368f29 456 /* PPC970 requires an RMA */
699cc876 457 if (r && cpu_has_feature(CPU_FTR_ARCH_201))
9e368f29 458 r = 2;
aa04b4cc 459 break;
f4800b1f 460#endif
342d3db7 461 case KVM_CAP_SYNC_MMU:
699cc876 462#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
cbbc58d4 463 if (hv_enabled)
699cc876
AK
464 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
465 else
466 r = 0;
f4800b1f
AG
467#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
468 r = 1;
469#else
470 r = 0;
a2932923 471#endif
699cc876
AK
472 break;
473#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a2932923 474 case KVM_CAP_PPC_HTAB_FD:
cbbc58d4 475 r = hv_enabled;
a2932923 476 break;
de56a948 477#endif
b5434032
ME
478 case KVM_CAP_NR_VCPUS:
479 /*
480 * Recommending a number of CPUs is somewhat arbitrary; we
481 * return the number of present CPUs for -HV (since a host
482 * will have secondary threads "offline"), and for other KVM
483 * implementations just count online CPUs.
484 */
cbbc58d4 485 if (hv_enabled)
699cc876
AK
486 r = num_present_cpus();
487 else
488 r = num_online_cpus();
b5434032
ME
489 break;
490 case KVM_CAP_MAX_VCPUS:
491 r = KVM_MAX_VCPUS;
492 break;
5b74716e
BH
493#ifdef CONFIG_PPC_BOOK3S_64
494 case KVM_CAP_PPC_GET_SMMU_INFO:
495 r = 1;
496 break;
497#endif
bbf45ba5
HB
498 default:
499 r = 0;
500 break;
501 }
502 return r;
503
504}
505
506long kvm_arch_dev_ioctl(struct file *filp,
507 unsigned int ioctl, unsigned long arg)
508{
509 return -EINVAL;
510}
511
5587027c 512void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
513 struct kvm_memory_slot *dont)
514{
5587027c 515 kvmppc_core_free_memslot(kvm, free, dont);
db3fe4eb
TY
516}
517
5587027c
AK
518int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
519 unsigned long npages)
db3fe4eb 520{
5587027c 521 return kvmppc_core_create_memslot(kvm, slot, npages);
db3fe4eb
TY
522}
523
e59dbe09
TY
524void kvm_arch_memslots_updated(struct kvm *kvm)
525{
526}
527
f7784b8e 528int kvm_arch_prepare_memory_region(struct kvm *kvm,
462fce46 529 struct kvm_memory_slot *memslot,
7b6195a9
TY
530 struct kvm_userspace_memory_region *mem,
531 enum kvm_mr_change change)
bbf45ba5 532{
a66b48c3 533 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
bbf45ba5
HB
534}
535
f7784b8e 536void kvm_arch_commit_memory_region(struct kvm *kvm,
462fce46 537 struct kvm_userspace_memory_region *mem,
8482644a
TY
538 const struct kvm_memory_slot *old,
539 enum kvm_mr_change change)
f7784b8e 540{
dfe49dbd 541 kvmppc_core_commit_memory_region(kvm, mem, old);
f7784b8e
MT
542}
543
2df72e9b
MT
544void kvm_arch_flush_shadow_all(struct kvm *kvm)
545{
546}
f7784b8e 547
2df72e9b
MT
548void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
549 struct kvm_memory_slot *slot)
34d4cb8f 550{
dfe49dbd 551 kvmppc_core_flush_memslot(kvm, slot);
34d4cb8f
MT
552}
553
bbf45ba5
HB
554struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
555{
73e75b41
HB
556 struct kvm_vcpu *vcpu;
557 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
558 if (!IS_ERR(vcpu)) {
559 vcpu->arch.wqp = &vcpu->wq;
06056bfb 560 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 561 }
73e75b41 562 return vcpu;
bbf45ba5
HB
563}
564
42897d86
MT
565int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
566{
567 return 0;
568}
569
bbf45ba5
HB
570void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
571{
a595405d
AG
572 /* Make sure we're not using the vcpu anymore */
573 hrtimer_cancel(&vcpu->arch.dec_timer);
574 tasklet_kill(&vcpu->arch.tasklet);
575
73e75b41 576 kvmppc_remove_vcpu_debugfs(vcpu);
eb1e4f43
SW
577
578 switch (vcpu->arch.irq_type) {
579 case KVMPPC_IRQ_MPIC:
580 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
581 break;
bc5ad3f3
BH
582 case KVMPPC_IRQ_XICS:
583 kvmppc_xics_free_icp(vcpu);
584 break;
eb1e4f43
SW
585 }
586
db93f574 587 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
588}
589
590void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
591{
592 kvm_arch_vcpu_free(vcpu);
593}
594
595int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
596{
9dd921cf 597 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
598}
599
544c6761
AG
600/*
601 * low level hrtimer wake routine. Because this runs in hardirq context
602 * we schedule a tasklet to do the real work.
603 */
604enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
605{
606 struct kvm_vcpu *vcpu;
607
608 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
609 tasklet_schedule(&vcpu->arch.tasklet);
610
611 return HRTIMER_NORESTART;
612}
613
bbf45ba5
HB
614int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
615{
f61c94bb
BB
616 int ret;
617
544c6761
AG
618 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
619 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
620 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 621 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 622
09000adb
BB
623#ifdef CONFIG_KVM_EXIT_TIMING
624 mutex_init(&vcpu->arch.exit_timing_lock);
625#endif
f61c94bb
BB
626 ret = kvmppc_subarch_vcpu_init(vcpu);
627 return ret;
bbf45ba5
HB
628}
629
630void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
631{
ecc0981f 632 kvmppc_mmu_destroy(vcpu);
f61c94bb 633 kvmppc_subarch_vcpu_uninit(vcpu);
bbf45ba5
HB
634}
635
636void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
637{
eab17672
SW
638#ifdef CONFIG_BOOKE
639 /*
640 * vrsave (formerly usprg0) isn't used by Linux, but may
641 * be used by the guest.
642 *
643 * On non-booke this is associated with Altivec and
644 * is handled by code in book3s.c.
645 */
646 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
647#endif
9dd921cf 648 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
649}
650
651void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
652{
9dd921cf 653 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
654#ifdef CONFIG_BOOKE
655 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
656#endif
bbf45ba5
HB
657}
658
bbf45ba5
HB
659static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
660 struct kvm_run *run)
661{
8e5b26b5 662 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
663}
664
665static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
666 struct kvm_run *run)
667{
69b61833 668 u64 uninitialized_var(gpr);
bbf45ba5 669
8e5b26b5 670 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
671 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
672 return;
673 }
674
675 if (vcpu->arch.mmio_is_bigendian) {
676 switch (run->mmio.len) {
b104d066 677 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
678 case 4: gpr = *(u32 *)run->mmio.data; break;
679 case 2: gpr = *(u16 *)run->mmio.data; break;
680 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
681 }
682 } else {
683 /* Convert BE data from userland back to LE. */
684 switch (run->mmio.len) {
8e5b26b5
AG
685 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
686 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
687 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
688 }
689 }
8e5b26b5 690
3587d534
AG
691 if (vcpu->arch.mmio_sign_extend) {
692 switch (run->mmio.len) {
693#ifdef CONFIG_PPC64
694 case 4:
695 gpr = (s64)(s32)gpr;
696 break;
697#endif
698 case 2:
699 gpr = (s64)(s16)gpr;
700 break;
701 case 1:
702 gpr = (s64)(s8)gpr;
703 break;
704 }
705 }
706
8e5b26b5 707 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 708
b3c5d3c2
AG
709 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
710 case KVM_MMIO_REG_GPR:
b104d066
AG
711 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
712 break;
b3c5d3c2 713 case KVM_MMIO_REG_FPR:
efff1912 714 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b104d066 715 break;
287d5611 716#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
717 case KVM_MMIO_REG_QPR:
718 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 719 break;
b3c5d3c2 720 case KVM_MMIO_REG_FQPR:
efff1912 721 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b3c5d3c2 722 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 723 break;
287d5611 724#endif
b104d066
AG
725 default:
726 BUG();
727 }
bbf45ba5
HB
728}
729
730int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
731 unsigned int rt, unsigned int bytes,
732 int is_default_endian)
bbf45ba5 733{
ed840ee9 734 int idx, ret;
73601775
CLG
735 int is_bigendian;
736
737 if (kvmppc_need_byteswap(vcpu)) {
738 /* Default endianness is "little endian". */
739 is_bigendian = !is_default_endian;
740 } else {
741 /* Default endianness is "big endian". */
742 is_bigendian = is_default_endian;
743 }
ed840ee9 744
bbf45ba5
HB
745 if (bytes > sizeof(run->mmio.data)) {
746 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
747 run->mmio.len);
748 }
749
750 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
751 run->mmio.len = bytes;
752 run->mmio.is_write = 0;
753
754 vcpu->arch.io_gpr = rt;
755 vcpu->arch.mmio_is_bigendian = is_bigendian;
756 vcpu->mmio_needed = 1;
757 vcpu->mmio_is_write = 0;
3587d534 758 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5 759
ed840ee9
SW
760 idx = srcu_read_lock(&vcpu->kvm->srcu);
761
762 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
763 bytes, &run->mmio.data);
764
765 srcu_read_unlock(&vcpu->kvm->srcu, idx);
766
767 if (!ret) {
0e673fb6
AG
768 kvmppc_complete_mmio_load(vcpu, run);
769 vcpu->mmio_needed = 0;
770 return EMULATE_DONE;
771 }
772
bbf45ba5
HB
773 return EMULATE_DO_MMIO;
774}
2ba9f0d8 775EXPORT_SYMBOL_GPL(kvmppc_handle_load);
bbf45ba5 776
3587d534
AG
777/* Same as above, but sign extends */
778int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
779 unsigned int rt, unsigned int bytes,
780 int is_default_endian)
3587d534
AG
781{
782 int r;
783
3587d534 784 vcpu->arch.mmio_sign_extend = 1;
73601775 785 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
3587d534
AG
786
787 return r;
788}
789
bbf45ba5 790int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775 791 u64 val, unsigned int bytes, int is_default_endian)
bbf45ba5
HB
792{
793 void *data = run->mmio.data;
ed840ee9 794 int idx, ret;
73601775
CLG
795 int is_bigendian;
796
797 if (kvmppc_need_byteswap(vcpu)) {
798 /* Default endianness is "little endian". */
799 is_bigendian = !is_default_endian;
800 } else {
801 /* Default endianness is "big endian". */
802 is_bigendian = is_default_endian;
803 }
bbf45ba5
HB
804
805 if (bytes > sizeof(run->mmio.data)) {
806 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
807 run->mmio.len);
808 }
809
810 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
811 run->mmio.len = bytes;
812 run->mmio.is_write = 1;
813 vcpu->mmio_needed = 1;
814 vcpu->mmio_is_write = 1;
815
816 /* Store the value at the lowest bytes in 'data'. */
817 if (is_bigendian) {
818 switch (bytes) {
b104d066 819 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
820 case 4: *(u32 *)data = val; break;
821 case 2: *(u16 *)data = val; break;
822 case 1: *(u8 *)data = val; break;
823 }
824 } else {
825 /* Store LE value into 'data'. */
826 switch (bytes) {
827 case 4: st_le32(data, val); break;
828 case 2: st_le16(data, val); break;
829 case 1: *(u8 *)data = val; break;
830 }
831 }
832
ed840ee9
SW
833 idx = srcu_read_lock(&vcpu->kvm->srcu);
834
835 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
836 bytes, &run->mmio.data);
837
838 srcu_read_unlock(&vcpu->kvm->srcu, idx);
839
840 if (!ret) {
0e673fb6
AG
841 vcpu->mmio_needed = 0;
842 return EMULATE_DONE;
843 }
844
bbf45ba5
HB
845 return EMULATE_DO_MMIO;
846}
2ba9f0d8 847EXPORT_SYMBOL_GPL(kvmppc_handle_store);
bbf45ba5
HB
848
849int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
850{
851 int r;
852 sigset_t sigsaved;
853
854 if (vcpu->sigset_active)
855 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
856
857 if (vcpu->mmio_needed) {
858 if (!vcpu->mmio_is_write)
859 kvmppc_complete_mmio_load(vcpu, run);
860 vcpu->mmio_needed = 0;
861 } else if (vcpu->arch.dcr_needed) {
862 if (!vcpu->arch.dcr_is_write)
863 kvmppc_complete_dcr_load(vcpu, run);
864 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
865 } else if (vcpu->arch.osi_needed) {
866 u64 *gprs = run->osi.gprs;
867 int i;
868
869 for (i = 0; i < 32; i++)
870 kvmppc_set_gpr(vcpu, i, gprs[i]);
871 vcpu->arch.osi_needed = 0;
de56a948
PM
872 } else if (vcpu->arch.hcall_needed) {
873 int i;
874
875 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
876 for (i = 0; i < 9; ++i)
877 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
878 vcpu->arch.hcall_needed = 0;
1c810636
AG
879#ifdef CONFIG_BOOKE
880 } else if (vcpu->arch.epr_needed) {
881 kvmppc_set_epr(vcpu, run->epr.epr);
882 vcpu->arch.epr_needed = 0;
883#endif
bbf45ba5
HB
884 }
885
df6909e5 886 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
887
888 if (vcpu->sigset_active)
889 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
890
891 return r;
892}
893
894int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
895{
19ccb76a 896 if (irq->irq == KVM_INTERRUPT_UNSET) {
4fe27d2a 897 kvmppc_core_dequeue_external(vcpu);
19ccb76a
PM
898 return 0;
899 }
900
901 kvmppc_core_queue_external(vcpu, irq);
b6d33834 902
dfd4d47e 903 kvm_vcpu_kick(vcpu);
45c5eb67 904
bbf45ba5
HB
905 return 0;
906}
907
71fbfd5f
AG
908static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
909 struct kvm_enable_cap *cap)
910{
911 int r;
912
913 if (cap->flags)
914 return -EINVAL;
915
916 switch (cap->cap) {
ad0a048b
AG
917 case KVM_CAP_PPC_OSI:
918 r = 0;
919 vcpu->arch.osi_enabled = true;
920 break;
930b412a
AG
921 case KVM_CAP_PPC_PAPR:
922 r = 0;
923 vcpu->arch.papr_enabled = true;
924 break;
1c810636
AG
925 case KVM_CAP_PPC_EPR:
926 r = 0;
5df554ad
SW
927 if (cap->args[0])
928 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
929 else
930 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1c810636 931 break;
f61c94bb
BB
932#ifdef CONFIG_BOOKE
933 case KVM_CAP_PPC_BOOKE_WATCHDOG:
934 r = 0;
935 vcpu->arch.watchdog_enabled = true;
936 break;
937#endif
bf7ca4bd 938#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
939 case KVM_CAP_SW_TLB: {
940 struct kvm_config_tlb cfg;
941 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
942
943 r = -EFAULT;
944 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
945 break;
946
947 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
948 break;
eb1e4f43
SW
949 }
950#endif
951#ifdef CONFIG_KVM_MPIC
952 case KVM_CAP_IRQ_MPIC: {
70abaded 953 struct fd f;
eb1e4f43
SW
954 struct kvm_device *dev;
955
956 r = -EBADF;
70abaded
AV
957 f = fdget(cap->args[0]);
958 if (!f.file)
eb1e4f43
SW
959 break;
960
961 r = -EPERM;
70abaded 962 dev = kvm_device_from_filp(f.file);
eb1e4f43
SW
963 if (dev)
964 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
965
70abaded 966 fdput(f);
eb1e4f43 967 break;
dc83b8bc
SW
968 }
969#endif
5975a2e0
PM
970#ifdef CONFIG_KVM_XICS
971 case KVM_CAP_IRQ_XICS: {
70abaded 972 struct fd f;
5975a2e0
PM
973 struct kvm_device *dev;
974
975 r = -EBADF;
70abaded
AV
976 f = fdget(cap->args[0]);
977 if (!f.file)
5975a2e0
PM
978 break;
979
980 r = -EPERM;
70abaded 981 dev = kvm_device_from_filp(f.file);
5975a2e0
PM
982 if (dev)
983 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
984
70abaded 985 fdput(f);
5975a2e0
PM
986 break;
987 }
988#endif /* CONFIG_KVM_XICS */
71fbfd5f
AG
989 default:
990 r = -EINVAL;
991 break;
992 }
993
af8f38b3
AG
994 if (!r)
995 r = kvmppc_sanity_check(vcpu);
996
71fbfd5f
AG
997 return r;
998}
999
bbf45ba5
HB
1000int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1001 struct kvm_mp_state *mp_state)
1002{
1003 return -EINVAL;
1004}
1005
1006int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1007 struct kvm_mp_state *mp_state)
1008{
1009 return -EINVAL;
1010}
1011
1012long kvm_arch_vcpu_ioctl(struct file *filp,
1013 unsigned int ioctl, unsigned long arg)
1014{
1015 struct kvm_vcpu *vcpu = filp->private_data;
1016 void __user *argp = (void __user *)arg;
1017 long r;
1018
93736624
AK
1019 switch (ioctl) {
1020 case KVM_INTERRUPT: {
bbf45ba5
HB
1021 struct kvm_interrupt irq;
1022 r = -EFAULT;
1023 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 1024 goto out;
bbf45ba5 1025 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 1026 goto out;
bbf45ba5 1027 }
19483d14 1028
71fbfd5f
AG
1029 case KVM_ENABLE_CAP:
1030 {
1031 struct kvm_enable_cap cap;
1032 r = -EFAULT;
1033 if (copy_from_user(&cap, argp, sizeof(cap)))
1034 goto out;
1035 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1036 break;
1037 }
dc83b8bc 1038
e24ed81f
AG
1039 case KVM_SET_ONE_REG:
1040 case KVM_GET_ONE_REG:
1041 {
1042 struct kvm_one_reg reg;
1043 r = -EFAULT;
1044 if (copy_from_user(&reg, argp, sizeof(reg)))
1045 goto out;
1046 if (ioctl == KVM_SET_ONE_REG)
1047 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1048 else
1049 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1050 break;
1051 }
1052
bf7ca4bd 1053#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
1054 case KVM_DIRTY_TLB: {
1055 struct kvm_dirty_tlb dirty;
1056 r = -EFAULT;
1057 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1058 goto out;
1059 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1060 break;
1061 }
1062#endif
bbf45ba5
HB
1063 default:
1064 r = -EINVAL;
1065 }
1066
1067out:
1068 return r;
1069}
1070
5b1c1493
CO
1071int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1072{
1073 return VM_FAULT_SIGBUS;
1074}
1075
15711e9c
AG
1076static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1077{
784bafac
SY
1078 u32 inst_nop = 0x60000000;
1079#ifdef CONFIG_KVM_BOOKE_HV
1080 u32 inst_sc1 = 0x44000022;
2743103f
AG
1081 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1082 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1083 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1084 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 1085#else
15711e9c
AG
1086 u32 inst_lis = 0x3c000000;
1087 u32 inst_ori = 0x60000000;
15711e9c
AG
1088 u32 inst_sc = 0x44000002;
1089 u32 inst_imm_mask = 0xffff;
1090
1091 /*
1092 * The hypercall to get into KVM from within guest context is as
1093 * follows:
1094 *
1095 * lis r0, r0, KVM_SC_MAGIC_R0@h
1096 * ori r0, KVM_SC_MAGIC_R0@l
1097 * sc
1098 * nop
1099 */
2743103f
AG
1100 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1101 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1102 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1103 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 1104#endif
15711e9c 1105
9202e076
LYB
1106 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1107
15711e9c
AG
1108 return 0;
1109}
1110
5efdb4be
AG
1111int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1112 bool line_status)
1113{
1114 if (!irqchip_in_kernel(kvm))
1115 return -ENXIO;
1116
1117 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1118 irq_event->irq, irq_event->level,
1119 line_status);
1120 return 0;
1121}
1122
699a0ea0
PM
1123
1124static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1125 struct kvm_enable_cap *cap)
1126{
1127 int r;
1128
1129 if (cap->flags)
1130 return -EINVAL;
1131
1132 switch (cap->cap) {
1133#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1134 case KVM_CAP_PPC_ENABLE_HCALL: {
1135 unsigned long hcall = cap->args[0];
1136
1137 r = -EINVAL;
1138 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1139 cap->args[1] > 1)
1140 break;
ae2113a4
PM
1141 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1142 break;
699a0ea0
PM
1143 if (cap->args[1])
1144 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1145 else
1146 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1147 r = 0;
1148 break;
1149 }
1150#endif
1151 default:
1152 r = -EINVAL;
1153 break;
1154 }
1155
1156 return r;
1157}
1158
bbf45ba5
HB
1159long kvm_arch_vm_ioctl(struct file *filp,
1160 unsigned int ioctl, unsigned long arg)
1161{
5df554ad 1162 struct kvm *kvm __maybe_unused = filp->private_data;
15711e9c 1163 void __user *argp = (void __user *)arg;
bbf45ba5
HB
1164 long r;
1165
1166 switch (ioctl) {
15711e9c
AG
1167 case KVM_PPC_GET_PVINFO: {
1168 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 1169 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
1170 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1171 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1172 r = -EFAULT;
1173 goto out;
1174 }
1175
1176 break;
1177 }
699a0ea0
PM
1178 case KVM_ENABLE_CAP:
1179 {
1180 struct kvm_enable_cap cap;
1181 r = -EFAULT;
1182 if (copy_from_user(&cap, argp, sizeof(cap)))
1183 goto out;
1184 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1185 break;
1186 }
f31e65e1 1187#ifdef CONFIG_PPC_BOOK3S_64
54738c09
DG
1188 case KVM_CREATE_SPAPR_TCE: {
1189 struct kvm_create_spapr_tce create_tce;
54738c09
DG
1190
1191 r = -EFAULT;
1192 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1193 goto out;
1194 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1195 goto out;
1196 }
5b74716e 1197 case KVM_PPC_GET_SMMU_INFO: {
5b74716e 1198 struct kvm_ppc_smmu_info info;
cbbc58d4 1199 struct kvm *kvm = filp->private_data;
5b74716e
BH
1200
1201 memset(&info, 0, sizeof(info));
cbbc58d4 1202 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
5b74716e
BH
1203 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1204 r = -EFAULT;
1205 break;
1206 }
8e591cb7
ME
1207 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1208 struct kvm *kvm = filp->private_data;
1209
1210 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1211 break;
1212 }
cbbc58d4
AK
1213 default: {
1214 struct kvm *kvm = filp->private_data;
1215 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1216 }
3a167bea 1217#else /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 1218 default:
367e1319 1219 r = -ENOTTY;
3a167bea 1220#endif
bbf45ba5 1221 }
15711e9c 1222out:
bbf45ba5
HB
1223 return r;
1224}
1225
043cc4d7
SW
1226static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1227static unsigned long nr_lpids;
1228
1229long kvmppc_alloc_lpid(void)
1230{
1231 long lpid;
1232
1233 do {
1234 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1235 if (lpid >= nr_lpids) {
1236 pr_err("%s: No LPIDs free\n", __func__);
1237 return -ENOMEM;
1238 }
1239 } while (test_and_set_bit(lpid, lpid_inuse));
1240
1241 return lpid;
1242}
2ba9f0d8 1243EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
043cc4d7
SW
1244
1245void kvmppc_claim_lpid(long lpid)
1246{
1247 set_bit(lpid, lpid_inuse);
1248}
2ba9f0d8 1249EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
043cc4d7
SW
1250
1251void kvmppc_free_lpid(long lpid)
1252{
1253 clear_bit(lpid, lpid_inuse);
1254}
2ba9f0d8 1255EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
043cc4d7
SW
1256
1257void kvmppc_init_lpid(unsigned long nr_lpids_param)
1258{
1259 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1260 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1261}
2ba9f0d8 1262EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
043cc4d7 1263
bbf45ba5
HB
1264int kvm_arch_init(void *opaque)
1265{
1266 return 0;
1267}
1268
1269void kvm_arch_exit(void)
1270{
cbbc58d4 1271
bbf45ba5 1272}