]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: Allow KVM_CHECK_EXTENSION on the vm fd
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
bbf45ba5 24#include <linux/vmalloc.h>
544c6761 25#include <linux/hrtimer.h>
bbf45ba5 26#include <linux/fs.h>
5a0e3ad6 27#include <linux/slab.h>
eb1e4f43 28#include <linux/file.h>
cbbc58d4 29#include <linux/module.h>
bbf45ba5
HB
30#include <asm/cputable.h>
31#include <asm/uaccess.h>
32#include <asm/kvm_ppc.h>
83aae4a8 33#include <asm/tlbflush.h>
371fefd6 34#include <asm/cputhreads.h>
bd2be683 35#include <asm/irqflags.h>
73e75b41 36#include "timing.h"
5efdb4be 37#include "irq.h"
fad7b9b5 38#include "../mm/mmu_decl.h"
bbf45ba5 39
46f43c6e
MT
40#define CREATE_TRACE_POINTS
41#include "trace.h"
42
cbbc58d4
AK
43struct kvmppc_ops *kvmppc_hv_ops;
44EXPORT_SYMBOL_GPL(kvmppc_hv_ops);
45struct kvmppc_ops *kvmppc_pr_ops;
46EXPORT_SYMBOL_GPL(kvmppc_pr_ops);
47
3a167bea 48
bbf45ba5
HB
49int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
50{
9202e076 51 return !!(v->arch.pending_exceptions) ||
dfd4d47e 52 v->requests;
bbf45ba5
HB
53}
54
b6d33834
CD
55int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
56{
57 return 1;
58}
59
03d25c5b
AG
60/*
61 * Common checks before entering the guest world. Call with interrupts
62 * disabled.
63 *
7ee78855
AG
64 * returns:
65 *
66 * == 1 if we're ready to go into guest state
67 * <= 0 if we need to go back to the host with return value
03d25c5b
AG
68 */
69int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
70{
6c85f52b
SW
71 int r;
72
73 WARN_ON(irqs_disabled());
74 hard_irq_disable();
03d25c5b 75
03d25c5b
AG
76 while (true) {
77 if (need_resched()) {
78 local_irq_enable();
79 cond_resched();
6c85f52b 80 hard_irq_disable();
03d25c5b
AG
81 continue;
82 }
83
84 if (signal_pending(current)) {
7ee78855
AG
85 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
86 vcpu->run->exit_reason = KVM_EXIT_INTR;
87 r = -EINTR;
03d25c5b
AG
88 break;
89 }
90
5bd1cf11
SW
91 vcpu->mode = IN_GUEST_MODE;
92
93 /*
94 * Reading vcpu->requests must happen after setting vcpu->mode,
95 * so we don't miss a request because the requester sees
96 * OUTSIDE_GUEST_MODE and assumes we'll be checking requests
97 * before next entering the guest (and thus doesn't IPI).
98 */
03d25c5b 99 smp_mb();
5bd1cf11 100
03d25c5b
AG
101 if (vcpu->requests) {
102 /* Make sure we process requests preemptable */
103 local_irq_enable();
104 trace_kvm_check_requests(vcpu);
7c973a2e 105 r = kvmppc_core_check_requests(vcpu);
6c85f52b 106 hard_irq_disable();
7c973a2e
AG
107 if (r > 0)
108 continue;
109 break;
03d25c5b
AG
110 }
111
112 if (kvmppc_core_prepare_to_enter(vcpu)) {
113 /* interrupts got enabled in between, so we
114 are back at square 1 */
115 continue;
116 }
117
3766a4c6 118 kvm_guest_enter();
6c85f52b 119 return 1;
03d25c5b
AG
120 }
121
6c85f52b
SW
122 /* return to host */
123 local_irq_enable();
03d25c5b
AG
124 return r;
125}
2ba9f0d8 126EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter);
03d25c5b 127
5deb8e7a
AG
128#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
129static void kvmppc_swab_shared(struct kvm_vcpu *vcpu)
130{
131 struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared;
132 int i;
133
134 shared->sprg0 = swab64(shared->sprg0);
135 shared->sprg1 = swab64(shared->sprg1);
136 shared->sprg2 = swab64(shared->sprg2);
137 shared->sprg3 = swab64(shared->sprg3);
138 shared->srr0 = swab64(shared->srr0);
139 shared->srr1 = swab64(shared->srr1);
140 shared->dar = swab64(shared->dar);
141 shared->msr = swab64(shared->msr);
142 shared->dsisr = swab32(shared->dsisr);
143 shared->int_pending = swab32(shared->int_pending);
144 for (i = 0; i < ARRAY_SIZE(shared->sr); i++)
145 shared->sr[i] = swab32(shared->sr[i]);
146}
147#endif
148
2a342ed5
AG
149int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
150{
151 int nr = kvmppc_get_gpr(vcpu, 11);
152 int r;
153 unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
154 unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
155 unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
156 unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
157 unsigned long r2 = 0;
158
5deb8e7a 159 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
2a342ed5
AG
160 /* 32 bit mode */
161 param1 &= 0xffffffff;
162 param2 &= 0xffffffff;
163 param3 &= 0xffffffff;
164 param4 &= 0xffffffff;
165 }
166
167 switch (nr) {
fdcf8bd7 168 case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
5fc87407 169 {
5deb8e7a
AG
170#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
171 /* Book3S can be little endian, find it out here */
172 int shared_big_endian = true;
173 if (vcpu->arch.intr_msr & MSR_LE)
174 shared_big_endian = false;
175 if (shared_big_endian != vcpu->arch.shared_big_endian)
176 kvmppc_swab_shared(vcpu);
177 vcpu->arch.shared_big_endian = shared_big_endian;
178#endif
179
f3383cf8
AG
180 if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
181 /*
182 * Older versions of the Linux magic page code had
183 * a bug where they would map their trampoline code
184 * NX. If that's the case, remove !PR NX capability.
185 */
186 vcpu->arch.disable_kernel_nx = true;
187 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
188 }
189
190 vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
191 vcpu->arch.magic_page_ea = param2 & ~0xfffULL;
5fc87407 192
89b68c96
AG
193#ifdef CONFIG_PPC_64K_PAGES
194 /*
195 * Make sure our 4k magic page is in the same window of a 64k
196 * page within the guest and within the host's page.
197 */
198 if ((vcpu->arch.magic_page_pa & 0xf000) !=
199 ((ulong)vcpu->arch.shared & 0xf000)) {
200 void *old_shared = vcpu->arch.shared;
201 ulong shared = (ulong)vcpu->arch.shared;
202 void *new_shared;
203
204 shared &= PAGE_MASK;
205 shared |= vcpu->arch.magic_page_pa & 0xf000;
206 new_shared = (void*)shared;
207 memcpy(new_shared, old_shared, 0x1000);
208 vcpu->arch.shared = new_shared;
209 }
210#endif
211
b5904972 212 r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;
7508e16c 213
fdcf8bd7 214 r = EV_SUCCESS;
5fc87407
AG
215 break;
216 }
fdcf8bd7
SY
217 case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
218 r = EV_SUCCESS;
bf7ca4bd 219#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
5fc87407
AG
220 r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
221#endif
2a342ed5
AG
222
223 /* Second return value is in r4 */
2a342ed5 224 break;
9202e076
LYB
225 case EV_HCALL_TOKEN(EV_IDLE):
226 r = EV_SUCCESS;
227 kvm_vcpu_block(vcpu);
228 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
229 break;
2a342ed5 230 default:
fdcf8bd7 231 r = EV_UNIMPLEMENTED;
2a342ed5
AG
232 break;
233 }
234
7508e16c
AG
235 kvmppc_set_gpr(vcpu, 4, r2);
236
2a342ed5
AG
237 return r;
238}
2ba9f0d8 239EXPORT_SYMBOL_GPL(kvmppc_kvm_pv);
bbf45ba5 240
af8f38b3
AG
241int kvmppc_sanity_check(struct kvm_vcpu *vcpu)
242{
243 int r = false;
244
245 /* We have to know what CPU to virtualize */
246 if (!vcpu->arch.pvr)
247 goto out;
248
249 /* PAPR only works with book3s_64 */
250 if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled)
251 goto out;
252
af8f38b3 253 /* HV KVM can only do PAPR mode for now */
a78b55d1 254 if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm))
af8f38b3 255 goto out;
af8f38b3 256
d30f6e48
SW
257#ifdef CONFIG_KVM_BOOKE_HV
258 if (!cpu_has_feature(CPU_FTR_EMB_HV))
259 goto out;
260#endif
261
af8f38b3
AG
262 r = true;
263
264out:
265 vcpu->arch.sane = r;
266 return r ? 0 : -EINVAL;
267}
2ba9f0d8 268EXPORT_SYMBOL_GPL(kvmppc_sanity_check);
af8f38b3 269
bbf45ba5
HB
270int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
271{
272 enum emulation_result er;
273 int r;
274
275 er = kvmppc_emulate_instruction(run, vcpu);
276 switch (er) {
277 case EMULATE_DONE:
278 /* Future optimization: only reload non-volatiles if they were
279 * actually modified. */
280 r = RESUME_GUEST_NV;
281 break;
51f04726
MC
282 case EMULATE_AGAIN:
283 r = RESUME_GUEST;
284 break;
bbf45ba5
HB
285 case EMULATE_DO_MMIO:
286 run->exit_reason = KVM_EXIT_MMIO;
287 /* We must reload nonvolatiles because "update" load/store
288 * instructions modify register state. */
289 /* Future optimization: only reload non-volatiles if they were
290 * actually modified. */
291 r = RESUME_HOST_NV;
292 break;
293 case EMULATE_FAIL:
51f04726
MC
294 {
295 u32 last_inst;
296
297 kvmppc_get_last_inst(vcpu, false, &last_inst);
bbf45ba5 298 /* XXX Deliver Program interrupt to guest. */
51f04726 299 pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst);
bbf45ba5
HB
300 r = RESUME_HOST;
301 break;
51f04726 302 }
bbf45ba5 303 default:
5a33169e
AG
304 WARN_ON(1);
305 r = RESUME_GUEST;
bbf45ba5
HB
306 }
307
308 return r;
309}
2ba9f0d8 310EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio);
bbf45ba5 311
10474ae8 312int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 313{
10474ae8 314 return 0;
bbf45ba5
HB
315}
316
317void kvm_arch_hardware_disable(void *garbage)
318{
319}
320
321int kvm_arch_hardware_setup(void)
322{
323 return 0;
324}
325
326void kvm_arch_hardware_unsetup(void)
327{
328}
329
330void kvm_arch_check_processor_compat(void *rtn)
331{
9dd921cf 332 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
333}
334
e08b9637 335int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
bbf45ba5 336{
cbbc58d4
AK
337 struct kvmppc_ops *kvm_ops = NULL;
338 /*
339 * if we have both HV and PR enabled, default is HV
340 */
341 if (type == 0) {
342 if (kvmppc_hv_ops)
343 kvm_ops = kvmppc_hv_ops;
344 else
345 kvm_ops = kvmppc_pr_ops;
346 if (!kvm_ops)
347 goto err_out;
348 } else if (type == KVM_VM_PPC_HV) {
349 if (!kvmppc_hv_ops)
350 goto err_out;
351 kvm_ops = kvmppc_hv_ops;
352 } else if (type == KVM_VM_PPC_PR) {
353 if (!kvmppc_pr_ops)
354 goto err_out;
355 kvm_ops = kvmppc_pr_ops;
356 } else
357 goto err_out;
358
359 if (kvm_ops->owner && !try_module_get(kvm_ops->owner))
360 return -ENOENT;
361
362 kvm->arch.kvm_ops = kvm_ops;
f9e0554d 363 return kvmppc_core_init_vm(kvm);
cbbc58d4
AK
364err_out:
365 return -EINVAL;
bbf45ba5
HB
366}
367
d89f5eff 368void kvm_arch_destroy_vm(struct kvm *kvm)
bbf45ba5
HB
369{
370 unsigned int i;
988a2cae 371 struct kvm_vcpu *vcpu;
bbf45ba5 372
988a2cae
GN
373 kvm_for_each_vcpu(i, vcpu, kvm)
374 kvm_arch_vcpu_free(vcpu);
375
376 mutex_lock(&kvm->lock);
377 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
378 kvm->vcpus[i] = NULL;
379
380 atomic_set(&kvm->online_vcpus, 0);
f9e0554d
PM
381
382 kvmppc_core_destroy_vm(kvm);
383
988a2cae 384 mutex_unlock(&kvm->lock);
cbbc58d4
AK
385
386 /* drop the module reference */
387 module_put(kvm->arch.kvm_ops->owner);
bbf45ba5
HB
388}
389
ad8ba2cd
SY
390void kvm_arch_sync_events(struct kvm *kvm)
391{
392}
393
784aa3d7 394int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
bbf45ba5
HB
395{
396 int r;
cbbc58d4
AK
397 /* FIXME!!
398 * Should some of this be vm ioctl ? is it possible now ?
399 */
400 int hv_enabled = kvmppc_hv_ops ? 1 : 0;
bbf45ba5
HB
401
402 switch (ext) {
5ce941ee
SW
403#ifdef CONFIG_BOOKE
404 case KVM_CAP_PPC_BOOKE_SREGS:
f61c94bb 405 case KVM_CAP_PPC_BOOKE_WATCHDOG:
1c810636 406 case KVM_CAP_PPC_EPR:
5ce941ee 407#else
e15a1137 408 case KVM_CAP_PPC_SEGSTATE:
1022fc3d 409 case KVM_CAP_PPC_HIOR:
930b412a 410 case KVM_CAP_PPC_PAPR:
5ce941ee 411#endif
18978768 412 case KVM_CAP_PPC_UNSET_IRQ:
7b4203e8 413 case KVM_CAP_PPC_IRQ_LEVEL:
71fbfd5f 414 case KVM_CAP_ENABLE_CAP:
699a0ea0 415 case KVM_CAP_ENABLE_CAP_VM:
e24ed81f 416 case KVM_CAP_ONE_REG:
0e673fb6 417 case KVM_CAP_IOEVENTFD:
5df554ad 418 case KVM_CAP_DEVICE_CTRL:
de56a948
PM
419 r = 1;
420 break;
de56a948 421 case KVM_CAP_PPC_PAIRED_SINGLES:
ad0a048b 422 case KVM_CAP_PPC_OSI:
15711e9c 423 case KVM_CAP_PPC_GET_PVINFO:
bf7ca4bd 424#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc 425 case KVM_CAP_SW_TLB:
eb1e4f43 426#endif
699cc876 427 /* We support this only for PR */
cbbc58d4 428 r = !hv_enabled;
e15a1137 429 break;
699cc876 430#ifdef CONFIG_KVM_MMIO
588968b6
LV
431 case KVM_CAP_COALESCED_MMIO:
432 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
433 break;
54738c09 434#endif
699cc876
AK
435#ifdef CONFIG_KVM_MPIC
436 case KVM_CAP_IRQ_MPIC:
437 r = 1;
438 break;
439#endif
440
f31e65e1 441#ifdef CONFIG_PPC_BOOK3S_64
54738c09 442 case KVM_CAP_SPAPR_TCE:
32fad281 443 case KVM_CAP_PPC_ALLOC_HTAB:
8e591cb7 444 case KVM_CAP_PPC_RTAS:
f2e91042 445 case KVM_CAP_PPC_FIXUP_HCALL:
699a0ea0 446 case KVM_CAP_PPC_ENABLE_HCALL:
5975a2e0
PM
447#ifdef CONFIG_KVM_XICS
448 case KVM_CAP_IRQ_XICS:
449#endif
54738c09
DG
450 r = 1;
451 break;
f31e65e1 452#endif /* CONFIG_PPC_BOOK3S_64 */
699cc876 453#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
371fefd6 454 case KVM_CAP_PPC_SMT:
cbbc58d4 455 if (hv_enabled)
3102f784 456 r = threads_per_subcore;
699cc876
AK
457 else
458 r = 0;
371fefd6 459 break;
aa04b4cc 460 case KVM_CAP_PPC_RMA:
cbbc58d4 461 r = hv_enabled;
9e368f29 462 /* PPC970 requires an RMA */
699cc876 463 if (r && cpu_has_feature(CPU_FTR_ARCH_201))
9e368f29 464 r = 2;
aa04b4cc 465 break;
f4800b1f 466#endif
342d3db7 467 case KVM_CAP_SYNC_MMU:
699cc876 468#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
cbbc58d4 469 if (hv_enabled)
699cc876
AK
470 r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0;
471 else
472 r = 0;
f4800b1f
AG
473#elif defined(KVM_ARCH_WANT_MMU_NOTIFIER)
474 r = 1;
475#else
476 r = 0;
a2932923 477#endif
699cc876
AK
478 break;
479#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
a2932923 480 case KVM_CAP_PPC_HTAB_FD:
cbbc58d4 481 r = hv_enabled;
a2932923 482 break;
de56a948 483#endif
b5434032
ME
484 case KVM_CAP_NR_VCPUS:
485 /*
486 * Recommending a number of CPUs is somewhat arbitrary; we
487 * return the number of present CPUs for -HV (since a host
488 * will have secondary threads "offline"), and for other KVM
489 * implementations just count online CPUs.
490 */
cbbc58d4 491 if (hv_enabled)
699cc876
AK
492 r = num_present_cpus();
493 else
494 r = num_online_cpus();
b5434032
ME
495 break;
496 case KVM_CAP_MAX_VCPUS:
497 r = KVM_MAX_VCPUS;
498 break;
5b74716e
BH
499#ifdef CONFIG_PPC_BOOK3S_64
500 case KVM_CAP_PPC_GET_SMMU_INFO:
501 r = 1;
502 break;
503#endif
bbf45ba5
HB
504 default:
505 r = 0;
506 break;
507 }
508 return r;
509
510}
511
512long kvm_arch_dev_ioctl(struct file *filp,
513 unsigned int ioctl, unsigned long arg)
514{
515 return -EINVAL;
516}
517
5587027c 518void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
db3fe4eb
TY
519 struct kvm_memory_slot *dont)
520{
5587027c 521 kvmppc_core_free_memslot(kvm, free, dont);
db3fe4eb
TY
522}
523
5587027c
AK
524int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
525 unsigned long npages)
db3fe4eb 526{
5587027c 527 return kvmppc_core_create_memslot(kvm, slot, npages);
db3fe4eb
TY
528}
529
e59dbe09
TY
530void kvm_arch_memslots_updated(struct kvm *kvm)
531{
532}
533
f7784b8e 534int kvm_arch_prepare_memory_region(struct kvm *kvm,
462fce46 535 struct kvm_memory_slot *memslot,
7b6195a9
TY
536 struct kvm_userspace_memory_region *mem,
537 enum kvm_mr_change change)
bbf45ba5 538{
a66b48c3 539 return kvmppc_core_prepare_memory_region(kvm, memslot, mem);
bbf45ba5
HB
540}
541
f7784b8e 542void kvm_arch_commit_memory_region(struct kvm *kvm,
462fce46 543 struct kvm_userspace_memory_region *mem,
8482644a
TY
544 const struct kvm_memory_slot *old,
545 enum kvm_mr_change change)
f7784b8e 546{
dfe49dbd 547 kvmppc_core_commit_memory_region(kvm, mem, old);
f7784b8e
MT
548}
549
2df72e9b
MT
550void kvm_arch_flush_shadow_all(struct kvm *kvm)
551{
552}
f7784b8e 553
2df72e9b
MT
554void kvm_arch_flush_shadow_memslot(struct kvm *kvm,
555 struct kvm_memory_slot *slot)
34d4cb8f 556{
dfe49dbd 557 kvmppc_core_flush_memslot(kvm, slot);
34d4cb8f
MT
558}
559
bbf45ba5
HB
560struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
561{
73e75b41
HB
562 struct kvm_vcpu *vcpu;
563 vcpu = kvmppc_core_vcpu_create(kvm, id);
03cdab53
ME
564 if (!IS_ERR(vcpu)) {
565 vcpu->arch.wqp = &vcpu->wq;
06056bfb 566 kvmppc_create_vcpu_debugfs(vcpu, id);
03cdab53 567 }
73e75b41 568 return vcpu;
bbf45ba5
HB
569}
570
42897d86
MT
571int kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
572{
573 return 0;
574}
575
bbf45ba5
HB
576void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
577{
a595405d
AG
578 /* Make sure we're not using the vcpu anymore */
579 hrtimer_cancel(&vcpu->arch.dec_timer);
580 tasklet_kill(&vcpu->arch.tasklet);
581
73e75b41 582 kvmppc_remove_vcpu_debugfs(vcpu);
eb1e4f43
SW
583
584 switch (vcpu->arch.irq_type) {
585 case KVMPPC_IRQ_MPIC:
586 kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu);
587 break;
bc5ad3f3
BH
588 case KVMPPC_IRQ_XICS:
589 kvmppc_xics_free_icp(vcpu);
590 break;
eb1e4f43
SW
591 }
592
db93f574 593 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
594}
595
596void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
597{
598 kvm_arch_vcpu_free(vcpu);
599}
600
601int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
602{
9dd921cf 603 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
604}
605
544c6761
AG
606/*
607 * low level hrtimer wake routine. Because this runs in hardirq context
608 * we schedule a tasklet to do the real work.
609 */
610enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
611{
612 struct kvm_vcpu *vcpu;
613
614 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
615 tasklet_schedule(&vcpu->arch.tasklet);
616
617 return HRTIMER_NORESTART;
618}
619
bbf45ba5
HB
620int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
621{
f61c94bb
BB
622 int ret;
623
544c6761
AG
624 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
625 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
626 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
de56a948 627 vcpu->arch.dec_expires = ~(u64)0;
bbf45ba5 628
09000adb
BB
629#ifdef CONFIG_KVM_EXIT_TIMING
630 mutex_init(&vcpu->arch.exit_timing_lock);
631#endif
f61c94bb
BB
632 ret = kvmppc_subarch_vcpu_init(vcpu);
633 return ret;
bbf45ba5
HB
634}
635
636void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
637{
ecc0981f 638 kvmppc_mmu_destroy(vcpu);
f61c94bb 639 kvmppc_subarch_vcpu_uninit(vcpu);
bbf45ba5
HB
640}
641
642void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
643{
eab17672
SW
644#ifdef CONFIG_BOOKE
645 /*
646 * vrsave (formerly usprg0) isn't used by Linux, but may
647 * be used by the guest.
648 *
649 * On non-booke this is associated with Altivec and
650 * is handled by code in book3s.c.
651 */
652 mtspr(SPRN_VRSAVE, vcpu->arch.vrsave);
653#endif
9dd921cf 654 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
655}
656
657void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
658{
9dd921cf 659 kvmppc_core_vcpu_put(vcpu);
eab17672
SW
660#ifdef CONFIG_BOOKE
661 vcpu->arch.vrsave = mfspr(SPRN_VRSAVE);
662#endif
bbf45ba5
HB
663}
664
bbf45ba5
HB
665static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
666 struct kvm_run *run)
667{
8e5b26b5 668 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data);
bbf45ba5
HB
669}
670
671static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
672 struct kvm_run *run)
673{
69b61833 674 u64 uninitialized_var(gpr);
bbf45ba5 675
8e5b26b5 676 if (run->mmio.len > sizeof(gpr)) {
bbf45ba5
HB
677 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
678 return;
679 }
680
681 if (vcpu->arch.mmio_is_bigendian) {
682 switch (run->mmio.len) {
b104d066 683 case 8: gpr = *(u64 *)run->mmio.data; break;
8e5b26b5
AG
684 case 4: gpr = *(u32 *)run->mmio.data; break;
685 case 2: gpr = *(u16 *)run->mmio.data; break;
686 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
687 }
688 } else {
689 /* Convert BE data from userland back to LE. */
690 switch (run->mmio.len) {
8e5b26b5
AG
691 case 4: gpr = ld_le32((u32 *)run->mmio.data); break;
692 case 2: gpr = ld_le16((u16 *)run->mmio.data); break;
693 case 1: gpr = *(u8 *)run->mmio.data; break;
bbf45ba5
HB
694 }
695 }
8e5b26b5 696
3587d534
AG
697 if (vcpu->arch.mmio_sign_extend) {
698 switch (run->mmio.len) {
699#ifdef CONFIG_PPC64
700 case 4:
701 gpr = (s64)(s32)gpr;
702 break;
703#endif
704 case 2:
705 gpr = (s64)(s16)gpr;
706 break;
707 case 1:
708 gpr = (s64)(s8)gpr;
709 break;
710 }
711 }
712
8e5b26b5 713 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
b104d066 714
b3c5d3c2
AG
715 switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) {
716 case KVM_MMIO_REG_GPR:
b104d066
AG
717 kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr);
718 break;
b3c5d3c2 719 case KVM_MMIO_REG_FPR:
efff1912 720 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b104d066 721 break;
287d5611 722#ifdef CONFIG_PPC_BOOK3S
b3c5d3c2
AG
723 case KVM_MMIO_REG_QPR:
724 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 725 break;
b3c5d3c2 726 case KVM_MMIO_REG_FQPR:
efff1912 727 VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr;
b3c5d3c2 728 vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr;
b104d066 729 break;
287d5611 730#endif
b104d066
AG
731 default:
732 BUG();
733 }
bbf45ba5
HB
734}
735
736int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
737 unsigned int rt, unsigned int bytes,
738 int is_default_endian)
bbf45ba5 739{
ed840ee9 740 int idx, ret;
73601775
CLG
741 int is_bigendian;
742
743 if (kvmppc_need_byteswap(vcpu)) {
744 /* Default endianness is "little endian". */
745 is_bigendian = !is_default_endian;
746 } else {
747 /* Default endianness is "big endian". */
748 is_bigendian = is_default_endian;
749 }
ed840ee9 750
bbf45ba5
HB
751 if (bytes > sizeof(run->mmio.data)) {
752 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
753 run->mmio.len);
754 }
755
756 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
757 run->mmio.len = bytes;
758 run->mmio.is_write = 0;
759
760 vcpu->arch.io_gpr = rt;
761 vcpu->arch.mmio_is_bigendian = is_bigendian;
762 vcpu->mmio_needed = 1;
763 vcpu->mmio_is_write = 0;
3587d534 764 vcpu->arch.mmio_sign_extend = 0;
bbf45ba5 765
ed840ee9
SW
766 idx = srcu_read_lock(&vcpu->kvm->srcu);
767
768 ret = kvm_io_bus_read(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
769 bytes, &run->mmio.data);
770
771 srcu_read_unlock(&vcpu->kvm->srcu, idx);
772
773 if (!ret) {
0e673fb6
AG
774 kvmppc_complete_mmio_load(vcpu, run);
775 vcpu->mmio_needed = 0;
776 return EMULATE_DONE;
777 }
778
bbf45ba5
HB
779 return EMULATE_DO_MMIO;
780}
2ba9f0d8 781EXPORT_SYMBOL_GPL(kvmppc_handle_load);
bbf45ba5 782
3587d534
AG
783/* Same as above, but sign extends */
784int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
785 unsigned int rt, unsigned int bytes,
786 int is_default_endian)
3587d534
AG
787{
788 int r;
789
3587d534 790 vcpu->arch.mmio_sign_extend = 1;
73601775 791 r = kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian);
3587d534
AG
792
793 return r;
794}
795
bbf45ba5 796int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775 797 u64 val, unsigned int bytes, int is_default_endian)
bbf45ba5
HB
798{
799 void *data = run->mmio.data;
ed840ee9 800 int idx, ret;
73601775
CLG
801 int is_bigendian;
802
803 if (kvmppc_need_byteswap(vcpu)) {
804 /* Default endianness is "little endian". */
805 is_bigendian = !is_default_endian;
806 } else {
807 /* Default endianness is "big endian". */
808 is_bigendian = is_default_endian;
809 }
bbf45ba5
HB
810
811 if (bytes > sizeof(run->mmio.data)) {
812 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
813 run->mmio.len);
814 }
815
816 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
817 run->mmio.len = bytes;
818 run->mmio.is_write = 1;
819 vcpu->mmio_needed = 1;
820 vcpu->mmio_is_write = 1;
821
822 /* Store the value at the lowest bytes in 'data'. */
823 if (is_bigendian) {
824 switch (bytes) {
b104d066 825 case 8: *(u64 *)data = val; break;
bbf45ba5
HB
826 case 4: *(u32 *)data = val; break;
827 case 2: *(u16 *)data = val; break;
828 case 1: *(u8 *)data = val; break;
829 }
830 } else {
831 /* Store LE value into 'data'. */
832 switch (bytes) {
833 case 4: st_le32(data, val); break;
834 case 2: st_le16(data, val); break;
835 case 1: *(u8 *)data = val; break;
836 }
837 }
838
ed840ee9
SW
839 idx = srcu_read_lock(&vcpu->kvm->srcu);
840
841 ret = kvm_io_bus_write(vcpu->kvm, KVM_MMIO_BUS, run->mmio.phys_addr,
842 bytes, &run->mmio.data);
843
844 srcu_read_unlock(&vcpu->kvm->srcu, idx);
845
846 if (!ret) {
0e673fb6
AG
847 vcpu->mmio_needed = 0;
848 return EMULATE_DONE;
849 }
850
bbf45ba5
HB
851 return EMULATE_DO_MMIO;
852}
2ba9f0d8 853EXPORT_SYMBOL_GPL(kvmppc_handle_store);
bbf45ba5
HB
854
855int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
856{
857 int r;
858 sigset_t sigsaved;
859
860 if (vcpu->sigset_active)
861 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
862
863 if (vcpu->mmio_needed) {
864 if (!vcpu->mmio_is_write)
865 kvmppc_complete_mmio_load(vcpu, run);
866 vcpu->mmio_needed = 0;
867 } else if (vcpu->arch.dcr_needed) {
868 if (!vcpu->arch.dcr_is_write)
869 kvmppc_complete_dcr_load(vcpu, run);
870 vcpu->arch.dcr_needed = 0;
ad0a048b
AG
871 } else if (vcpu->arch.osi_needed) {
872 u64 *gprs = run->osi.gprs;
873 int i;
874
875 for (i = 0; i < 32; i++)
876 kvmppc_set_gpr(vcpu, i, gprs[i]);
877 vcpu->arch.osi_needed = 0;
de56a948
PM
878 } else if (vcpu->arch.hcall_needed) {
879 int i;
880
881 kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret);
882 for (i = 0; i < 9; ++i)
883 kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]);
884 vcpu->arch.hcall_needed = 0;
1c810636
AG
885#ifdef CONFIG_BOOKE
886 } else if (vcpu->arch.epr_needed) {
887 kvmppc_set_epr(vcpu, run->epr.epr);
888 vcpu->arch.epr_needed = 0;
889#endif
bbf45ba5
HB
890 }
891
df6909e5 892 r = kvmppc_vcpu_run(run, vcpu);
bbf45ba5
HB
893
894 if (vcpu->sigset_active)
895 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
896
897 return r;
898}
899
900int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
901{
19ccb76a 902 if (irq->irq == KVM_INTERRUPT_UNSET) {
4fe27d2a 903 kvmppc_core_dequeue_external(vcpu);
19ccb76a
PM
904 return 0;
905 }
906
907 kvmppc_core_queue_external(vcpu, irq);
b6d33834 908
dfd4d47e 909 kvm_vcpu_kick(vcpu);
45c5eb67 910
bbf45ba5
HB
911 return 0;
912}
913
71fbfd5f
AG
914static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
915 struct kvm_enable_cap *cap)
916{
917 int r;
918
919 if (cap->flags)
920 return -EINVAL;
921
922 switch (cap->cap) {
ad0a048b
AG
923 case KVM_CAP_PPC_OSI:
924 r = 0;
925 vcpu->arch.osi_enabled = true;
926 break;
930b412a
AG
927 case KVM_CAP_PPC_PAPR:
928 r = 0;
929 vcpu->arch.papr_enabled = true;
930 break;
1c810636
AG
931 case KVM_CAP_PPC_EPR:
932 r = 0;
5df554ad
SW
933 if (cap->args[0])
934 vcpu->arch.epr_flags |= KVMPPC_EPR_USER;
935 else
936 vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER;
1c810636 937 break;
f61c94bb
BB
938#ifdef CONFIG_BOOKE
939 case KVM_CAP_PPC_BOOKE_WATCHDOG:
940 r = 0;
941 vcpu->arch.watchdog_enabled = true;
942 break;
943#endif
bf7ca4bd 944#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
945 case KVM_CAP_SW_TLB: {
946 struct kvm_config_tlb cfg;
947 void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0];
948
949 r = -EFAULT;
950 if (copy_from_user(&cfg, user_ptr, sizeof(cfg)))
951 break;
952
953 r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg);
954 break;
eb1e4f43
SW
955 }
956#endif
957#ifdef CONFIG_KVM_MPIC
958 case KVM_CAP_IRQ_MPIC: {
70abaded 959 struct fd f;
eb1e4f43
SW
960 struct kvm_device *dev;
961
962 r = -EBADF;
70abaded
AV
963 f = fdget(cap->args[0]);
964 if (!f.file)
eb1e4f43
SW
965 break;
966
967 r = -EPERM;
70abaded 968 dev = kvm_device_from_filp(f.file);
eb1e4f43
SW
969 if (dev)
970 r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]);
971
70abaded 972 fdput(f);
eb1e4f43 973 break;
dc83b8bc
SW
974 }
975#endif
5975a2e0
PM
976#ifdef CONFIG_KVM_XICS
977 case KVM_CAP_IRQ_XICS: {
70abaded 978 struct fd f;
5975a2e0
PM
979 struct kvm_device *dev;
980
981 r = -EBADF;
70abaded
AV
982 f = fdget(cap->args[0]);
983 if (!f.file)
5975a2e0
PM
984 break;
985
986 r = -EPERM;
70abaded 987 dev = kvm_device_from_filp(f.file);
5975a2e0
PM
988 if (dev)
989 r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]);
990
70abaded 991 fdput(f);
5975a2e0
PM
992 break;
993 }
994#endif /* CONFIG_KVM_XICS */
71fbfd5f
AG
995 default:
996 r = -EINVAL;
997 break;
998 }
999
af8f38b3
AG
1000 if (!r)
1001 r = kvmppc_sanity_check(vcpu);
1002
71fbfd5f
AG
1003 return r;
1004}
1005
bbf45ba5
HB
1006int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1007 struct kvm_mp_state *mp_state)
1008{
1009 return -EINVAL;
1010}
1011
1012int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1013 struct kvm_mp_state *mp_state)
1014{
1015 return -EINVAL;
1016}
1017
1018long kvm_arch_vcpu_ioctl(struct file *filp,
1019 unsigned int ioctl, unsigned long arg)
1020{
1021 struct kvm_vcpu *vcpu = filp->private_data;
1022 void __user *argp = (void __user *)arg;
1023 long r;
1024
93736624
AK
1025 switch (ioctl) {
1026 case KVM_INTERRUPT: {
bbf45ba5
HB
1027 struct kvm_interrupt irq;
1028 r = -EFAULT;
1029 if (copy_from_user(&irq, argp, sizeof(irq)))
93736624 1030 goto out;
bbf45ba5 1031 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
93736624 1032 goto out;
bbf45ba5 1033 }
19483d14 1034
71fbfd5f
AG
1035 case KVM_ENABLE_CAP:
1036 {
1037 struct kvm_enable_cap cap;
1038 r = -EFAULT;
1039 if (copy_from_user(&cap, argp, sizeof(cap)))
1040 goto out;
1041 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
1042 break;
1043 }
dc83b8bc 1044
e24ed81f
AG
1045 case KVM_SET_ONE_REG:
1046 case KVM_GET_ONE_REG:
1047 {
1048 struct kvm_one_reg reg;
1049 r = -EFAULT;
1050 if (copy_from_user(&reg, argp, sizeof(reg)))
1051 goto out;
1052 if (ioctl == KVM_SET_ONE_REG)
1053 r = kvm_vcpu_ioctl_set_one_reg(vcpu, &reg);
1054 else
1055 r = kvm_vcpu_ioctl_get_one_reg(vcpu, &reg);
1056 break;
1057 }
1058
bf7ca4bd 1059#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
dc83b8bc
SW
1060 case KVM_DIRTY_TLB: {
1061 struct kvm_dirty_tlb dirty;
1062 r = -EFAULT;
1063 if (copy_from_user(&dirty, argp, sizeof(dirty)))
1064 goto out;
1065 r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty);
1066 break;
1067 }
1068#endif
bbf45ba5
HB
1069 default:
1070 r = -EINVAL;
1071 }
1072
1073out:
1074 return r;
1075}
1076
5b1c1493
CO
1077int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
1078{
1079 return VM_FAULT_SIGBUS;
1080}
1081
15711e9c
AG
1082static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo)
1083{
784bafac
SY
1084 u32 inst_nop = 0x60000000;
1085#ifdef CONFIG_KVM_BOOKE_HV
1086 u32 inst_sc1 = 0x44000022;
2743103f
AG
1087 pvinfo->hcall[0] = cpu_to_be32(inst_sc1);
1088 pvinfo->hcall[1] = cpu_to_be32(inst_nop);
1089 pvinfo->hcall[2] = cpu_to_be32(inst_nop);
1090 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 1091#else
15711e9c
AG
1092 u32 inst_lis = 0x3c000000;
1093 u32 inst_ori = 0x60000000;
15711e9c
AG
1094 u32 inst_sc = 0x44000002;
1095 u32 inst_imm_mask = 0xffff;
1096
1097 /*
1098 * The hypercall to get into KVM from within guest context is as
1099 * follows:
1100 *
1101 * lis r0, r0, KVM_SC_MAGIC_R0@h
1102 * ori r0, KVM_SC_MAGIC_R0@l
1103 * sc
1104 * nop
1105 */
2743103f
AG
1106 pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask));
1107 pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask));
1108 pvinfo->hcall[2] = cpu_to_be32(inst_sc);
1109 pvinfo->hcall[3] = cpu_to_be32(inst_nop);
784bafac 1110#endif
15711e9c 1111
9202e076
LYB
1112 pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE;
1113
15711e9c
AG
1114 return 0;
1115}
1116
5efdb4be
AG
1117int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event,
1118 bool line_status)
1119{
1120 if (!irqchip_in_kernel(kvm))
1121 return -ENXIO;
1122
1123 irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID,
1124 irq_event->irq, irq_event->level,
1125 line_status);
1126 return 0;
1127}
1128
699a0ea0
PM
1129
1130static int kvm_vm_ioctl_enable_cap(struct kvm *kvm,
1131 struct kvm_enable_cap *cap)
1132{
1133 int r;
1134
1135 if (cap->flags)
1136 return -EINVAL;
1137
1138 switch (cap->cap) {
1139#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1140 case KVM_CAP_PPC_ENABLE_HCALL: {
1141 unsigned long hcall = cap->args[0];
1142
1143 r = -EINVAL;
1144 if (hcall > MAX_HCALL_OPCODE || (hcall & 3) ||
1145 cap->args[1] > 1)
1146 break;
ae2113a4
PM
1147 if (!kvmppc_book3s_hcall_implemented(kvm, hcall))
1148 break;
699a0ea0
PM
1149 if (cap->args[1])
1150 set_bit(hcall / 4, kvm->arch.enabled_hcalls);
1151 else
1152 clear_bit(hcall / 4, kvm->arch.enabled_hcalls);
1153 r = 0;
1154 break;
1155 }
1156#endif
1157 default:
1158 r = -EINVAL;
1159 break;
1160 }
1161
1162 return r;
1163}
1164
bbf45ba5
HB
1165long kvm_arch_vm_ioctl(struct file *filp,
1166 unsigned int ioctl, unsigned long arg)
1167{
5df554ad 1168 struct kvm *kvm __maybe_unused = filp->private_data;
15711e9c 1169 void __user *argp = (void __user *)arg;
bbf45ba5
HB
1170 long r;
1171
1172 switch (ioctl) {
15711e9c
AG
1173 case KVM_PPC_GET_PVINFO: {
1174 struct kvm_ppc_pvinfo pvinfo;
d8cdddcd 1175 memset(&pvinfo, 0, sizeof(pvinfo));
15711e9c
AG
1176 r = kvm_vm_ioctl_get_pvinfo(&pvinfo);
1177 if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) {
1178 r = -EFAULT;
1179 goto out;
1180 }
1181
1182 break;
1183 }
699a0ea0
PM
1184 case KVM_ENABLE_CAP:
1185 {
1186 struct kvm_enable_cap cap;
1187 r = -EFAULT;
1188 if (copy_from_user(&cap, argp, sizeof(cap)))
1189 goto out;
1190 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
1191 break;
1192 }
f31e65e1 1193#ifdef CONFIG_PPC_BOOK3S_64
54738c09
DG
1194 case KVM_CREATE_SPAPR_TCE: {
1195 struct kvm_create_spapr_tce create_tce;
54738c09
DG
1196
1197 r = -EFAULT;
1198 if (copy_from_user(&create_tce, argp, sizeof(create_tce)))
1199 goto out;
1200 r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce);
1201 goto out;
1202 }
5b74716e 1203 case KVM_PPC_GET_SMMU_INFO: {
5b74716e 1204 struct kvm_ppc_smmu_info info;
cbbc58d4 1205 struct kvm *kvm = filp->private_data;
5b74716e
BH
1206
1207 memset(&info, 0, sizeof(info));
cbbc58d4 1208 r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info);
5b74716e
BH
1209 if (r >= 0 && copy_to_user(argp, &info, sizeof(info)))
1210 r = -EFAULT;
1211 break;
1212 }
8e591cb7
ME
1213 case KVM_PPC_RTAS_DEFINE_TOKEN: {
1214 struct kvm *kvm = filp->private_data;
1215
1216 r = kvm_vm_ioctl_rtas_define_token(kvm, argp);
1217 break;
1218 }
cbbc58d4
AK
1219 default: {
1220 struct kvm *kvm = filp->private_data;
1221 r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg);
1222 }
3a167bea 1223#else /* CONFIG_PPC_BOOK3S_64 */
bbf45ba5 1224 default:
367e1319 1225 r = -ENOTTY;
3a167bea 1226#endif
bbf45ba5 1227 }
15711e9c 1228out:
bbf45ba5
HB
1229 return r;
1230}
1231
043cc4d7
SW
1232static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)];
1233static unsigned long nr_lpids;
1234
1235long kvmppc_alloc_lpid(void)
1236{
1237 long lpid;
1238
1239 do {
1240 lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS);
1241 if (lpid >= nr_lpids) {
1242 pr_err("%s: No LPIDs free\n", __func__);
1243 return -ENOMEM;
1244 }
1245 } while (test_and_set_bit(lpid, lpid_inuse));
1246
1247 return lpid;
1248}
2ba9f0d8 1249EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid);
043cc4d7
SW
1250
1251void kvmppc_claim_lpid(long lpid)
1252{
1253 set_bit(lpid, lpid_inuse);
1254}
2ba9f0d8 1255EXPORT_SYMBOL_GPL(kvmppc_claim_lpid);
043cc4d7
SW
1256
1257void kvmppc_free_lpid(long lpid)
1258{
1259 clear_bit(lpid, lpid_inuse);
1260}
2ba9f0d8 1261EXPORT_SYMBOL_GPL(kvmppc_free_lpid);
043cc4d7
SW
1262
1263void kvmppc_init_lpid(unsigned long nr_lpids_param)
1264{
1265 nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param);
1266 memset(lpid_inuse, 0, sizeof(lpid_inuse));
1267}
2ba9f0d8 1268EXPORT_SYMBOL_GPL(kvmppc_init_lpid);
043cc4d7 1269
bbf45ba5
HB
1270int kvm_arch_init(void *opaque)
1271{
1272 return 0;
1273}
1274
1275void kvm_arch_exit(void)
1276{
cbbc58d4 1277
bbf45ba5 1278}