]>
Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/errno.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/kvm_host.h> | |
bbf45ba5 | 24 | #include <linux/vmalloc.h> |
544c6761 | 25 | #include <linux/hrtimer.h> |
bbf45ba5 | 26 | #include <linux/fs.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
eb1e4f43 | 28 | #include <linux/file.h> |
cbbc58d4 | 29 | #include <linux/module.h> |
9576730d SW |
30 | #include <linux/irqbypass.h> |
31 | #include <linux/kvm_irqfd.h> | |
bbf45ba5 HB |
32 | #include <asm/cputable.h> |
33 | #include <asm/uaccess.h> | |
34 | #include <asm/kvm_ppc.h> | |
83aae4a8 | 35 | #include <asm/tlbflush.h> |
371fefd6 | 36 | #include <asm/cputhreads.h> |
bd2be683 | 37 | #include <asm/irqflags.h> |
58ded420 | 38 | #include <asm/iommu.h> |
73e75b41 | 39 | #include "timing.h" |
5efdb4be | 40 | #include "irq.h" |
fad7b9b5 | 41 | #include "../mm/mmu_decl.h" |
bbf45ba5 | 42 | |
46f43c6e MT |
43 | #define CREATE_TRACE_POINTS |
44 | #include "trace.h" | |
45 | ||
cbbc58d4 AK |
46 | struct kvmppc_ops *kvmppc_hv_ops; |
47 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); | |
48 | struct kvmppc_ops *kvmppc_pr_ops; | |
49 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); | |
50 | ||
3a167bea | 51 | |
bbf45ba5 HB |
52 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
53 | { | |
9202e076 | 54 | return !!(v->arch.pending_exceptions) || |
dfd4d47e | 55 | v->requests; |
bbf45ba5 HB |
56 | } |
57 | ||
b6d33834 CD |
58 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
59 | { | |
60 | return 1; | |
61 | } | |
62 | ||
03d25c5b AG |
63 | /* |
64 | * Common checks before entering the guest world. Call with interrupts | |
65 | * disabled. | |
66 | * | |
7ee78855 AG |
67 | * returns: |
68 | * | |
69 | * == 1 if we're ready to go into guest state | |
70 | * <= 0 if we need to go back to the host with return value | |
03d25c5b AG |
71 | */ |
72 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |
73 | { | |
6c85f52b SW |
74 | int r; |
75 | ||
76 | WARN_ON(irqs_disabled()); | |
77 | hard_irq_disable(); | |
03d25c5b | 78 | |
03d25c5b AG |
79 | while (true) { |
80 | if (need_resched()) { | |
81 | local_irq_enable(); | |
82 | cond_resched(); | |
6c85f52b | 83 | hard_irq_disable(); |
03d25c5b AG |
84 | continue; |
85 | } | |
86 | ||
87 | if (signal_pending(current)) { | |
7ee78855 AG |
88 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
89 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
90 | r = -EINTR; | |
03d25c5b AG |
91 | break; |
92 | } | |
93 | ||
5bd1cf11 SW |
94 | vcpu->mode = IN_GUEST_MODE; |
95 | ||
96 | /* | |
97 | * Reading vcpu->requests must happen after setting vcpu->mode, | |
98 | * so we don't miss a request because the requester sees | |
99 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests | |
100 | * before next entering the guest (and thus doesn't IPI). | |
489153c7 LT |
101 | * This also orders the write to mode from any reads |
102 | * to the page tables done while the VCPU is running. | |
103 | * Please see the comment in kvm_flush_remote_tlbs. | |
5bd1cf11 | 104 | */ |
03d25c5b | 105 | smp_mb(); |
5bd1cf11 | 106 | |
03d25c5b AG |
107 | if (vcpu->requests) { |
108 | /* Make sure we process requests preemptable */ | |
109 | local_irq_enable(); | |
110 | trace_kvm_check_requests(vcpu); | |
7c973a2e | 111 | r = kvmppc_core_check_requests(vcpu); |
6c85f52b | 112 | hard_irq_disable(); |
7c973a2e AG |
113 | if (r > 0) |
114 | continue; | |
115 | break; | |
03d25c5b AG |
116 | } |
117 | ||
118 | if (kvmppc_core_prepare_to_enter(vcpu)) { | |
119 | /* interrupts got enabled in between, so we | |
120 | are back at square 1 */ | |
121 | continue; | |
122 | } | |
123 | ||
6edaa530 | 124 | guest_enter_irqoff(); |
6c85f52b | 125 | return 1; |
03d25c5b AG |
126 | } |
127 | ||
6c85f52b SW |
128 | /* return to host */ |
129 | local_irq_enable(); | |
03d25c5b AG |
130 | return r; |
131 | } | |
2ba9f0d8 | 132 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
03d25c5b | 133 | |
5deb8e7a AG |
134 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
135 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) | |
136 | { | |
137 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | |
138 | int i; | |
139 | ||
140 | shared->sprg0 = swab64(shared->sprg0); | |
141 | shared->sprg1 = swab64(shared->sprg1); | |
142 | shared->sprg2 = swab64(shared->sprg2); | |
143 | shared->sprg3 = swab64(shared->sprg3); | |
144 | shared->srr0 = swab64(shared->srr0); | |
145 | shared->srr1 = swab64(shared->srr1); | |
146 | shared->dar = swab64(shared->dar); | |
147 | shared->msr = swab64(shared->msr); | |
148 | shared->dsisr = swab32(shared->dsisr); | |
149 | shared->int_pending = swab32(shared->int_pending); | |
150 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) | |
151 | shared->sr[i] = swab32(shared->sr[i]); | |
152 | } | |
153 | #endif | |
154 | ||
2a342ed5 AG |
155 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
156 | { | |
157 | int nr = kvmppc_get_gpr(vcpu, 11); | |
158 | int r; | |
159 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | |
160 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | |
161 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | |
162 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | |
163 | unsigned long r2 = 0; | |
164 | ||
5deb8e7a | 165 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
2a342ed5 AG |
166 | /* 32 bit mode */ |
167 | param1 &= 0xffffffff; | |
168 | param2 &= 0xffffffff; | |
169 | param3 &= 0xffffffff; | |
170 | param4 &= 0xffffffff; | |
171 | } | |
172 | ||
173 | switch (nr) { | |
fdcf8bd7 | 174 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
5fc87407 | 175 | { |
5deb8e7a AG |
176 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
177 | /* Book3S can be little endian, find it out here */ | |
178 | int shared_big_endian = true; | |
179 | if (vcpu->arch.intr_msr & MSR_LE) | |
180 | shared_big_endian = false; | |
181 | if (shared_big_endian != vcpu->arch.shared_big_endian) | |
182 | kvmppc_swab_shared(vcpu); | |
183 | vcpu->arch.shared_big_endian = shared_big_endian; | |
184 | #endif | |
185 | ||
f3383cf8 AG |
186 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
187 | /* | |
188 | * Older versions of the Linux magic page code had | |
189 | * a bug where they would map their trampoline code | |
190 | * NX. If that's the case, remove !PR NX capability. | |
191 | */ | |
192 | vcpu->arch.disable_kernel_nx = true; | |
193 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | |
194 | } | |
195 | ||
196 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; | |
197 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; | |
5fc87407 | 198 | |
89b68c96 AG |
199 | #ifdef CONFIG_PPC_64K_PAGES |
200 | /* | |
201 | * Make sure our 4k magic page is in the same window of a 64k | |
202 | * page within the guest and within the host's page. | |
203 | */ | |
204 | if ((vcpu->arch.magic_page_pa & 0xf000) != | |
205 | ((ulong)vcpu->arch.shared & 0xf000)) { | |
206 | void *old_shared = vcpu->arch.shared; | |
207 | ulong shared = (ulong)vcpu->arch.shared; | |
208 | void *new_shared; | |
209 | ||
210 | shared &= PAGE_MASK; | |
211 | shared |= vcpu->arch.magic_page_pa & 0xf000; | |
212 | new_shared = (void*)shared; | |
213 | memcpy(new_shared, old_shared, 0x1000); | |
214 | vcpu->arch.shared = new_shared; | |
215 | } | |
216 | #endif | |
217 | ||
b5904972 | 218 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
7508e16c | 219 | |
fdcf8bd7 | 220 | r = EV_SUCCESS; |
5fc87407 AG |
221 | break; |
222 | } | |
fdcf8bd7 SY |
223 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
224 | r = EV_SUCCESS; | |
bf7ca4bd | 225 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
5fc87407 AG |
226 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
227 | #endif | |
2a342ed5 AG |
228 | |
229 | /* Second return value is in r4 */ | |
2a342ed5 | 230 | break; |
9202e076 LYB |
231 | case EV_HCALL_TOKEN(EV_IDLE): |
232 | r = EV_SUCCESS; | |
233 | kvm_vcpu_block(vcpu); | |
234 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | |
235 | break; | |
2a342ed5 | 236 | default: |
fdcf8bd7 | 237 | r = EV_UNIMPLEMENTED; |
2a342ed5 AG |
238 | break; |
239 | } | |
240 | ||
7508e16c AG |
241 | kvmppc_set_gpr(vcpu, 4, r2); |
242 | ||
2a342ed5 AG |
243 | return r; |
244 | } | |
2ba9f0d8 | 245 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
bbf45ba5 | 246 | |
af8f38b3 AG |
247 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
248 | { | |
249 | int r = false; | |
250 | ||
251 | /* We have to know what CPU to virtualize */ | |
252 | if (!vcpu->arch.pvr) | |
253 | goto out; | |
254 | ||
255 | /* PAPR only works with book3s_64 */ | |
256 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | |
257 | goto out; | |
258 | ||
af8f38b3 | 259 | /* HV KVM can only do PAPR mode for now */ |
a78b55d1 | 260 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
af8f38b3 | 261 | goto out; |
af8f38b3 | 262 | |
d30f6e48 SW |
263 | #ifdef CONFIG_KVM_BOOKE_HV |
264 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | |
265 | goto out; | |
266 | #endif | |
267 | ||
af8f38b3 AG |
268 | r = true; |
269 | ||
270 | out: | |
271 | vcpu->arch.sane = r; | |
272 | return r ? 0 : -EINVAL; | |
273 | } | |
2ba9f0d8 | 274 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
af8f38b3 | 275 | |
bbf45ba5 HB |
276 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
277 | { | |
278 | enum emulation_result er; | |
279 | int r; | |
280 | ||
d69614a2 | 281 | er = kvmppc_emulate_loadstore(vcpu); |
bbf45ba5 HB |
282 | switch (er) { |
283 | case EMULATE_DONE: | |
284 | /* Future optimization: only reload non-volatiles if they were | |
285 | * actually modified. */ | |
286 | r = RESUME_GUEST_NV; | |
287 | break; | |
51f04726 MC |
288 | case EMULATE_AGAIN: |
289 | r = RESUME_GUEST; | |
290 | break; | |
bbf45ba5 HB |
291 | case EMULATE_DO_MMIO: |
292 | run->exit_reason = KVM_EXIT_MMIO; | |
293 | /* We must reload nonvolatiles because "update" load/store | |
294 | * instructions modify register state. */ | |
295 | /* Future optimization: only reload non-volatiles if they were | |
296 | * actually modified. */ | |
297 | r = RESUME_HOST_NV; | |
298 | break; | |
299 | case EMULATE_FAIL: | |
51f04726 MC |
300 | { |
301 | u32 last_inst; | |
302 | ||
8d0eff63 | 303 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
bbf45ba5 | 304 | /* XXX Deliver Program interrupt to guest. */ |
51f04726 | 305 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
bbf45ba5 HB |
306 | r = RESUME_HOST; |
307 | break; | |
51f04726 | 308 | } |
bbf45ba5 | 309 | default: |
5a33169e AG |
310 | WARN_ON(1); |
311 | r = RESUME_GUEST; | |
bbf45ba5 HB |
312 | } |
313 | ||
314 | return r; | |
315 | } | |
2ba9f0d8 | 316 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
bbf45ba5 | 317 | |
35c4a733 AG |
318 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
319 | bool data) | |
320 | { | |
c12fb43c | 321 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 AG |
322 | struct kvmppc_pte pte; |
323 | int r; | |
324 | ||
325 | vcpu->stat.st++; | |
326 | ||
327 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
328 | XLATE_WRITE, &pte); | |
329 | if (r < 0) | |
330 | return r; | |
331 | ||
332 | *eaddr = pte.raddr; | |
333 | ||
334 | if (!pte.may_write) | |
335 | return -EPERM; | |
336 | ||
c12fb43c AG |
337 | /* Magic page override */ |
338 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
339 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
340 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
341 | void *magic = vcpu->arch.shared; | |
342 | magic += pte.eaddr & 0xfff; | |
343 | memcpy(magic, ptr, size); | |
344 | return EMULATE_DONE; | |
345 | } | |
346 | ||
35c4a733 AG |
347 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
348 | return EMULATE_DO_MMIO; | |
349 | ||
350 | return EMULATE_DONE; | |
351 | } | |
352 | EXPORT_SYMBOL_GPL(kvmppc_st); | |
353 | ||
354 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
355 | bool data) | |
356 | { | |
c12fb43c | 357 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 | 358 | struct kvmppc_pte pte; |
35c4a733 AG |
359 | int rc; |
360 | ||
361 | vcpu->stat.ld++; | |
362 | ||
363 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
364 | XLATE_READ, &pte); | |
365 | if (rc) | |
366 | return rc; | |
367 | ||
368 | *eaddr = pte.raddr; | |
369 | ||
370 | if (!pte.may_read) | |
371 | return -EPERM; | |
372 | ||
373 | if (!data && !pte.may_execute) | |
374 | return -ENOEXEC; | |
375 | ||
c12fb43c AG |
376 | /* Magic page override */ |
377 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
378 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
379 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
380 | void *magic = vcpu->arch.shared; | |
381 | magic += pte.eaddr & 0xfff; | |
382 | memcpy(ptr, magic, size); | |
383 | return EMULATE_DONE; | |
384 | } | |
385 | ||
c45c5514 AG |
386 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
387 | return EMULATE_DO_MMIO; | |
35c4a733 AG |
388 | |
389 | return EMULATE_DONE; | |
35c4a733 AG |
390 | } |
391 | EXPORT_SYMBOL_GPL(kvmppc_ld); | |
392 | ||
13a34e06 | 393 | int kvm_arch_hardware_enable(void) |
bbf45ba5 | 394 | { |
10474ae8 | 395 | return 0; |
bbf45ba5 HB |
396 | } |
397 | ||
bbf45ba5 HB |
398 | int kvm_arch_hardware_setup(void) |
399 | { | |
400 | return 0; | |
401 | } | |
402 | ||
bbf45ba5 HB |
403 | void kvm_arch_check_processor_compat(void *rtn) |
404 | { | |
9dd921cf | 405 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
bbf45ba5 HB |
406 | } |
407 | ||
e08b9637 | 408 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
bbf45ba5 | 409 | { |
cbbc58d4 AK |
410 | struct kvmppc_ops *kvm_ops = NULL; |
411 | /* | |
412 | * if we have both HV and PR enabled, default is HV | |
413 | */ | |
414 | if (type == 0) { | |
415 | if (kvmppc_hv_ops) | |
416 | kvm_ops = kvmppc_hv_ops; | |
417 | else | |
418 | kvm_ops = kvmppc_pr_ops; | |
419 | if (!kvm_ops) | |
420 | goto err_out; | |
421 | } else if (type == KVM_VM_PPC_HV) { | |
422 | if (!kvmppc_hv_ops) | |
423 | goto err_out; | |
424 | kvm_ops = kvmppc_hv_ops; | |
425 | } else if (type == KVM_VM_PPC_PR) { | |
426 | if (!kvmppc_pr_ops) | |
427 | goto err_out; | |
428 | kvm_ops = kvmppc_pr_ops; | |
429 | } else | |
430 | goto err_out; | |
431 | ||
432 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) | |
433 | return -ENOENT; | |
434 | ||
435 | kvm->arch.kvm_ops = kvm_ops; | |
f9e0554d | 436 | return kvmppc_core_init_vm(kvm); |
cbbc58d4 AK |
437 | err_out: |
438 | return -EINVAL; | |
bbf45ba5 HB |
439 | } |
440 | ||
d89f5eff | 441 | void kvm_arch_destroy_vm(struct kvm *kvm) |
bbf45ba5 HB |
442 | { |
443 | unsigned int i; | |
988a2cae | 444 | struct kvm_vcpu *vcpu; |
bbf45ba5 | 445 | |
e17769eb SW |
446 | #ifdef CONFIG_KVM_XICS |
447 | /* | |
448 | * We call kick_all_cpus_sync() to ensure that all | |
449 | * CPUs have executed any pending IPIs before we | |
450 | * continue and free VCPUs structures below. | |
451 | */ | |
452 | if (is_kvmppc_hv_enabled(kvm)) | |
453 | kick_all_cpus_sync(); | |
454 | #endif | |
455 | ||
988a2cae GN |
456 | kvm_for_each_vcpu(i, vcpu, kvm) |
457 | kvm_arch_vcpu_free(vcpu); | |
458 | ||
459 | mutex_lock(&kvm->lock); | |
460 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
461 | kvm->vcpus[i] = NULL; | |
462 | ||
463 | atomic_set(&kvm->online_vcpus, 0); | |
f9e0554d PM |
464 | |
465 | kvmppc_core_destroy_vm(kvm); | |
466 | ||
988a2cae | 467 | mutex_unlock(&kvm->lock); |
cbbc58d4 AK |
468 | |
469 | /* drop the module reference */ | |
470 | module_put(kvm->arch.kvm_ops->owner); | |
bbf45ba5 HB |
471 | } |
472 | ||
784aa3d7 | 473 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
bbf45ba5 HB |
474 | { |
475 | int r; | |
7a58777a | 476 | /* Assume we're using HV mode when the HV module is loaded */ |
cbbc58d4 | 477 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
bbf45ba5 | 478 | |
7a58777a AG |
479 | if (kvm) { |
480 | /* | |
481 | * Hooray - we know which VM type we're running on. Depend on | |
482 | * that rather than the guess above. | |
483 | */ | |
484 | hv_enabled = is_kvmppc_hv_enabled(kvm); | |
485 | } | |
486 | ||
bbf45ba5 | 487 | switch (ext) { |
5ce941ee SW |
488 | #ifdef CONFIG_BOOKE |
489 | case KVM_CAP_PPC_BOOKE_SREGS: | |
f61c94bb | 490 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
1c810636 | 491 | case KVM_CAP_PPC_EPR: |
5ce941ee | 492 | #else |
e15a1137 | 493 | case KVM_CAP_PPC_SEGSTATE: |
1022fc3d | 494 | case KVM_CAP_PPC_HIOR: |
930b412a | 495 | case KVM_CAP_PPC_PAPR: |
5ce941ee | 496 | #endif |
18978768 | 497 | case KVM_CAP_PPC_UNSET_IRQ: |
7b4203e8 | 498 | case KVM_CAP_PPC_IRQ_LEVEL: |
71fbfd5f | 499 | case KVM_CAP_ENABLE_CAP: |
699a0ea0 | 500 | case KVM_CAP_ENABLE_CAP_VM: |
e24ed81f | 501 | case KVM_CAP_ONE_REG: |
0e673fb6 | 502 | case KVM_CAP_IOEVENTFD: |
5df554ad | 503 | case KVM_CAP_DEVICE_CTRL: |
de56a948 PM |
504 | r = 1; |
505 | break; | |
de56a948 | 506 | case KVM_CAP_PPC_PAIRED_SINGLES: |
ad0a048b | 507 | case KVM_CAP_PPC_OSI: |
15711e9c | 508 | case KVM_CAP_PPC_GET_PVINFO: |
bf7ca4bd | 509 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc | 510 | case KVM_CAP_SW_TLB: |
eb1e4f43 | 511 | #endif |
699cc876 | 512 | /* We support this only for PR */ |
cbbc58d4 | 513 | r = !hv_enabled; |
e15a1137 | 514 | break; |
699cc876 | 515 | #ifdef CONFIG_KVM_MMIO |
588968b6 LV |
516 | case KVM_CAP_COALESCED_MMIO: |
517 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
518 | break; | |
54738c09 | 519 | #endif |
699cc876 AK |
520 | #ifdef CONFIG_KVM_MPIC |
521 | case KVM_CAP_IRQ_MPIC: | |
522 | r = 1; | |
523 | break; | |
524 | #endif | |
525 | ||
f31e65e1 | 526 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 | 527 | case KVM_CAP_SPAPR_TCE: |
58ded420 | 528 | case KVM_CAP_SPAPR_TCE_64: |
32fad281 | 529 | case KVM_CAP_PPC_ALLOC_HTAB: |
8e591cb7 | 530 | case KVM_CAP_PPC_RTAS: |
f2e91042 | 531 | case KVM_CAP_PPC_FIXUP_HCALL: |
699a0ea0 | 532 | case KVM_CAP_PPC_ENABLE_HCALL: |
5975a2e0 PM |
533 | #ifdef CONFIG_KVM_XICS |
534 | case KVM_CAP_IRQ_XICS: | |
535 | #endif | |
54738c09 DG |
536 | r = 1; |
537 | break; | |
f31e65e1 | 538 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
699cc876 | 539 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
371fefd6 | 540 | case KVM_CAP_PPC_SMT: |
cbbc58d4 | 541 | if (hv_enabled) |
3102f784 | 542 | r = threads_per_subcore; |
699cc876 AK |
543 | else |
544 | r = 0; | |
371fefd6 | 545 | break; |
aa04b4cc | 546 | case KVM_CAP_PPC_RMA: |
c17b98cf | 547 | r = 0; |
aa04b4cc | 548 | break; |
e928e9cb ME |
549 | case KVM_CAP_PPC_HWRNG: |
550 | r = kvmppc_hwrng_present(); | |
551 | break; | |
f4800b1f | 552 | #endif |
342d3db7 | 553 | case KVM_CAP_SYNC_MMU: |
699cc876 | 554 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
c17b98cf | 555 | r = hv_enabled; |
f4800b1f AG |
556 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
557 | r = 1; | |
558 | #else | |
559 | r = 0; | |
a2932923 | 560 | #endif |
699cc876 AK |
561 | break; |
562 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
a2932923 | 563 | case KVM_CAP_PPC_HTAB_FD: |
cbbc58d4 | 564 | r = hv_enabled; |
a2932923 | 565 | break; |
de56a948 | 566 | #endif |
b5434032 ME |
567 | case KVM_CAP_NR_VCPUS: |
568 | /* | |
569 | * Recommending a number of CPUs is somewhat arbitrary; we | |
570 | * return the number of present CPUs for -HV (since a host | |
571 | * will have secondary threads "offline"), and for other KVM | |
572 | * implementations just count online CPUs. | |
573 | */ | |
cbbc58d4 | 574 | if (hv_enabled) |
699cc876 AK |
575 | r = num_present_cpus(); |
576 | else | |
577 | r = num_online_cpus(); | |
b5434032 | 578 | break; |
bfec5c2c ND |
579 | case KVM_CAP_NR_MEMSLOTS: |
580 | r = KVM_USER_MEM_SLOTS; | |
581 | break; | |
b5434032 ME |
582 | case KVM_CAP_MAX_VCPUS: |
583 | r = KVM_MAX_VCPUS; | |
584 | break; | |
5b74716e BH |
585 | #ifdef CONFIG_PPC_BOOK3S_64 |
586 | case KVM_CAP_PPC_GET_SMMU_INFO: | |
587 | r = 1; | |
588 | break; | |
d3695aa4 AK |
589 | case KVM_CAP_SPAPR_MULTITCE: |
590 | r = 1; | |
591 | break; | |
5b74716e | 592 | #endif |
23528bb2 SB |
593 | case KVM_CAP_PPC_HTM: |
594 | r = cpu_has_feature(CPU_FTR_TM_COMP) && | |
595 | is_kvmppc_hv_enabled(kvm); | |
596 | break; | |
bbf45ba5 HB |
597 | default: |
598 | r = 0; | |
599 | break; | |
600 | } | |
601 | return r; | |
602 | ||
603 | } | |
604 | ||
605 | long kvm_arch_dev_ioctl(struct file *filp, | |
606 | unsigned int ioctl, unsigned long arg) | |
607 | { | |
608 | return -EINVAL; | |
609 | } | |
610 | ||
5587027c | 611 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
db3fe4eb TY |
612 | struct kvm_memory_slot *dont) |
613 | { | |
5587027c | 614 | kvmppc_core_free_memslot(kvm, free, dont); |
db3fe4eb TY |
615 | } |
616 | ||
5587027c AK |
617 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
618 | unsigned long npages) | |
db3fe4eb | 619 | { |
5587027c | 620 | return kvmppc_core_create_memslot(kvm, slot, npages); |
db3fe4eb TY |
621 | } |
622 | ||
f7784b8e | 623 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
462fce46 | 624 | struct kvm_memory_slot *memslot, |
09170a49 | 625 | const struct kvm_userspace_memory_region *mem, |
7b6195a9 | 626 | enum kvm_mr_change change) |
bbf45ba5 | 627 | { |
a66b48c3 | 628 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem); |
bbf45ba5 HB |
629 | } |
630 | ||
f7784b8e | 631 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a49 | 632 | const struct kvm_userspace_memory_region *mem, |
8482644a | 633 | const struct kvm_memory_slot *old, |
f36f3f28 | 634 | const struct kvm_memory_slot *new, |
8482644a | 635 | enum kvm_mr_change change) |
f7784b8e | 636 | { |
f36f3f28 | 637 | kvmppc_core_commit_memory_region(kvm, mem, old, new); |
f7784b8e MT |
638 | } |
639 | ||
2df72e9b MT |
640 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
641 | struct kvm_memory_slot *slot) | |
34d4cb8f | 642 | { |
dfe49dbd | 643 | kvmppc_core_flush_memslot(kvm, slot); |
34d4cb8f MT |
644 | } |
645 | ||
bbf45ba5 HB |
646 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
647 | { | |
73e75b41 HB |
648 | struct kvm_vcpu *vcpu; |
649 | vcpu = kvmppc_core_vcpu_create(kvm, id); | |
03cdab53 ME |
650 | if (!IS_ERR(vcpu)) { |
651 | vcpu->arch.wqp = &vcpu->wq; | |
06056bfb | 652 | kvmppc_create_vcpu_debugfs(vcpu, id); |
03cdab53 | 653 | } |
73e75b41 | 654 | return vcpu; |
bbf45ba5 HB |
655 | } |
656 | ||
31928aa5 | 657 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
42897d86 | 658 | { |
42897d86 MT |
659 | } |
660 | ||
bbf45ba5 HB |
661 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
662 | { | |
a595405d AG |
663 | /* Make sure we're not using the vcpu anymore */ |
664 | hrtimer_cancel(&vcpu->arch.dec_timer); | |
a595405d | 665 | |
73e75b41 | 666 | kvmppc_remove_vcpu_debugfs(vcpu); |
eb1e4f43 SW |
667 | |
668 | switch (vcpu->arch.irq_type) { | |
669 | case KVMPPC_IRQ_MPIC: | |
670 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); | |
671 | break; | |
bc5ad3f3 BH |
672 | case KVMPPC_IRQ_XICS: |
673 | kvmppc_xics_free_icp(vcpu); | |
674 | break; | |
eb1e4f43 SW |
675 | } |
676 | ||
db93f574 | 677 | kvmppc_core_vcpu_free(vcpu); |
bbf45ba5 HB |
678 | } |
679 | ||
680 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
681 | { | |
682 | kvm_arch_vcpu_free(vcpu); | |
683 | } | |
684 | ||
685 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
686 | { | |
9dd921cf | 687 | return kvmppc_core_pending_dec(vcpu); |
bbf45ba5 HB |
688 | } |
689 | ||
5358a963 | 690 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
544c6761 AG |
691 | { |
692 | struct kvm_vcpu *vcpu; | |
693 | ||
694 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | |
d02d4d15 | 695 | kvmppc_decrementer_func(vcpu); |
544c6761 AG |
696 | |
697 | return HRTIMER_NORESTART; | |
698 | } | |
699 | ||
bbf45ba5 HB |
700 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
701 | { | |
f61c94bb BB |
702 | int ret; |
703 | ||
544c6761 | 704 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
544c6761 | 705 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
de56a948 | 706 | vcpu->arch.dec_expires = ~(u64)0; |
bbf45ba5 | 707 | |
09000adb BB |
708 | #ifdef CONFIG_KVM_EXIT_TIMING |
709 | mutex_init(&vcpu->arch.exit_timing_lock); | |
710 | #endif | |
f61c94bb BB |
711 | ret = kvmppc_subarch_vcpu_init(vcpu); |
712 | return ret; | |
bbf45ba5 HB |
713 | } |
714 | ||
715 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
716 | { | |
ecc0981f | 717 | kvmppc_mmu_destroy(vcpu); |
f61c94bb | 718 | kvmppc_subarch_vcpu_uninit(vcpu); |
bbf45ba5 HB |
719 | } |
720 | ||
721 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
722 | { | |
eab17672 SW |
723 | #ifdef CONFIG_BOOKE |
724 | /* | |
725 | * vrsave (formerly usprg0) isn't used by Linux, but may | |
726 | * be used by the guest. | |
727 | * | |
728 | * On non-booke this is associated with Altivec and | |
729 | * is handled by code in book3s.c. | |
730 | */ | |
731 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | |
732 | #endif | |
9dd921cf | 733 | kvmppc_core_vcpu_load(vcpu, cpu); |
bbf45ba5 HB |
734 | } |
735 | ||
736 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
737 | { | |
9dd921cf | 738 | kvmppc_core_vcpu_put(vcpu); |
eab17672 SW |
739 | #ifdef CONFIG_BOOKE |
740 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | |
741 | #endif | |
bbf45ba5 HB |
742 | } |
743 | ||
9576730d SW |
744 | /* |
745 | * irq_bypass_add_producer and irq_bypass_del_producer are only | |
746 | * useful if the architecture supports PCI passthrough. | |
747 | * irq_bypass_stop and irq_bypass_start are not needed and so | |
748 | * kvm_ops are not defined for them. | |
749 | */ | |
750 | bool kvm_arch_has_irq_bypass(void) | |
751 | { | |
752 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || | |
753 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); | |
754 | } | |
755 | ||
756 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, | |
757 | struct irq_bypass_producer *prod) | |
758 | { | |
759 | struct kvm_kernel_irqfd *irqfd = | |
760 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
761 | struct kvm *kvm = irqfd->kvm; | |
762 | ||
763 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) | |
764 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); | |
765 | ||
766 | return 0; | |
767 | } | |
768 | ||
769 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, | |
770 | struct irq_bypass_producer *prod) | |
771 | { | |
772 | struct kvm_kernel_irqfd *irqfd = | |
773 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
774 | struct kvm *kvm = irqfd->kvm; | |
775 | ||
776 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) | |
777 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); | |
778 | } | |
779 | ||
bbf45ba5 HB |
780 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
781 | struct kvm_run *run) | |
782 | { | |
69b61833 | 783 | u64 uninitialized_var(gpr); |
bbf45ba5 | 784 | |
8e5b26b5 | 785 | if (run->mmio.len > sizeof(gpr)) { |
bbf45ba5 HB |
786 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
787 | return; | |
788 | } | |
789 | ||
d078eed3 | 790 | if (!vcpu->arch.mmio_host_swabbed) { |
bbf45ba5 | 791 | switch (run->mmio.len) { |
b104d066 | 792 | case 8: gpr = *(u64 *)run->mmio.data; break; |
8e5b26b5 AG |
793 | case 4: gpr = *(u32 *)run->mmio.data; break; |
794 | case 2: gpr = *(u16 *)run->mmio.data; break; | |
795 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
796 | } |
797 | } else { | |
bbf45ba5 | 798 | switch (run->mmio.len) { |
d078eed3 DG |
799 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
800 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; | |
801 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; | |
8e5b26b5 | 802 | case 1: gpr = *(u8 *)run->mmio.data; break; |
bbf45ba5 HB |
803 | } |
804 | } | |
8e5b26b5 | 805 | |
3587d534 AG |
806 | if (vcpu->arch.mmio_sign_extend) { |
807 | switch (run->mmio.len) { | |
808 | #ifdef CONFIG_PPC64 | |
809 | case 4: | |
810 | gpr = (s64)(s32)gpr; | |
811 | break; | |
812 | #endif | |
813 | case 2: | |
814 | gpr = (s64)(s16)gpr; | |
815 | break; | |
816 | case 1: | |
817 | gpr = (s64)(s8)gpr; | |
818 | break; | |
819 | } | |
820 | } | |
821 | ||
8e5b26b5 | 822 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
b104d066 | 823 | |
b3c5d3c2 AG |
824 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
825 | case KVM_MMIO_REG_GPR: | |
b104d066 AG |
826 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
827 | break; | |
b3c5d3c2 | 828 | case KVM_MMIO_REG_FPR: |
efff1912 | 829 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b104d066 | 830 | break; |
287d5611 | 831 | #ifdef CONFIG_PPC_BOOK3S |
b3c5d3c2 AG |
832 | case KVM_MMIO_REG_QPR: |
833 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 834 | break; |
b3c5d3c2 | 835 | case KVM_MMIO_REG_FQPR: |
efff1912 | 836 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b3c5d3c2 | 837 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
b104d066 | 838 | break; |
287d5611 | 839 | #endif |
b104d066 AG |
840 | default: |
841 | BUG(); | |
842 | } | |
bbf45ba5 HB |
843 | } |
844 | ||
eb8b0560 PM |
845 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
846 | unsigned int rt, unsigned int bytes, | |
847 | int is_default_endian, int sign_extend) | |
bbf45ba5 | 848 | { |
ed840ee9 | 849 | int idx, ret; |
d078eed3 | 850 | bool host_swabbed; |
73601775 | 851 | |
d078eed3 | 852 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 853 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 854 | host_swabbed = is_default_endian; |
73601775 | 855 | } else { |
d078eed3 | 856 | host_swabbed = !is_default_endian; |
73601775 | 857 | } |
ed840ee9 | 858 | |
bbf45ba5 HB |
859 | if (bytes > sizeof(run->mmio.data)) { |
860 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
861 | run->mmio.len); | |
862 | } | |
863 | ||
864 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
865 | run->mmio.len = bytes; | |
866 | run->mmio.is_write = 0; | |
867 | ||
868 | vcpu->arch.io_gpr = rt; | |
d078eed3 | 869 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
bbf45ba5 HB |
870 | vcpu->mmio_needed = 1; |
871 | vcpu->mmio_is_write = 0; | |
eb8b0560 | 872 | vcpu->arch.mmio_sign_extend = sign_extend; |
bbf45ba5 | 873 | |
ed840ee9 SW |
874 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
875 | ||
e32edf4f | 876 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
877 | bytes, &run->mmio.data); |
878 | ||
879 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
880 | ||
881 | if (!ret) { | |
0e673fb6 AG |
882 | kvmppc_complete_mmio_load(vcpu, run); |
883 | vcpu->mmio_needed = 0; | |
884 | return EMULATE_DONE; | |
885 | } | |
886 | ||
bbf45ba5 HB |
887 | return EMULATE_DO_MMIO; |
888 | } | |
eb8b0560 PM |
889 | |
890 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
891 | unsigned int rt, unsigned int bytes, | |
892 | int is_default_endian) | |
893 | { | |
894 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); | |
895 | } | |
2ba9f0d8 | 896 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
bbf45ba5 | 897 | |
3587d534 AG |
898 | /* Same as above, but sign extends */ |
899 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
73601775 CLG |
900 | unsigned int rt, unsigned int bytes, |
901 | int is_default_endian) | |
3587d534 | 902 | { |
eb8b0560 | 903 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
3587d534 AG |
904 | } |
905 | ||
bbf45ba5 | 906 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
73601775 | 907 | u64 val, unsigned int bytes, int is_default_endian) |
bbf45ba5 HB |
908 | { |
909 | void *data = run->mmio.data; | |
ed840ee9 | 910 | int idx, ret; |
d078eed3 | 911 | bool host_swabbed; |
73601775 | 912 | |
d078eed3 | 913 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 914 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 915 | host_swabbed = is_default_endian; |
73601775 | 916 | } else { |
d078eed3 | 917 | host_swabbed = !is_default_endian; |
73601775 | 918 | } |
bbf45ba5 HB |
919 | |
920 | if (bytes > sizeof(run->mmio.data)) { | |
921 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
922 | run->mmio.len); | |
923 | } | |
924 | ||
925 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
926 | run->mmio.len = bytes; | |
927 | run->mmio.is_write = 1; | |
928 | vcpu->mmio_needed = 1; | |
929 | vcpu->mmio_is_write = 1; | |
930 | ||
931 | /* Store the value at the lowest bytes in 'data'. */ | |
d078eed3 | 932 | if (!host_swabbed) { |
bbf45ba5 | 933 | switch (bytes) { |
b104d066 | 934 | case 8: *(u64 *)data = val; break; |
bbf45ba5 HB |
935 | case 4: *(u32 *)data = val; break; |
936 | case 2: *(u16 *)data = val; break; | |
937 | case 1: *(u8 *)data = val; break; | |
938 | } | |
939 | } else { | |
bbf45ba5 | 940 | switch (bytes) { |
d078eed3 DG |
941 | case 8: *(u64 *)data = swab64(val); break; |
942 | case 4: *(u32 *)data = swab32(val); break; | |
943 | case 2: *(u16 *)data = swab16(val); break; | |
944 | case 1: *(u8 *)data = val; break; | |
bbf45ba5 HB |
945 | } |
946 | } | |
947 | ||
ed840ee9 SW |
948 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
949 | ||
e32edf4f | 950 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
951 | bytes, &run->mmio.data); |
952 | ||
953 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
954 | ||
955 | if (!ret) { | |
0e673fb6 AG |
956 | vcpu->mmio_needed = 0; |
957 | return EMULATE_DONE; | |
958 | } | |
959 | ||
bbf45ba5 HB |
960 | return EMULATE_DO_MMIO; |
961 | } | |
2ba9f0d8 | 962 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
bbf45ba5 | 963 | |
8a41ea53 MC |
964 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
965 | { | |
966 | int r = 0; | |
967 | union kvmppc_one_reg val; | |
968 | int size; | |
969 | ||
970 | size = one_reg_size(reg->id); | |
971 | if (size > sizeof(val)) | |
972 | return -EINVAL; | |
973 | ||
974 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); | |
975 | if (r == -EINVAL) { | |
976 | r = 0; | |
977 | switch (reg->id) { | |
3840edc8 MC |
978 | #ifdef CONFIG_ALTIVEC |
979 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
980 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
981 | r = -ENXIO; | |
982 | break; | |
983 | } | |
b4d7f161 | 984 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
3840edc8 MC |
985 | break; |
986 | case KVM_REG_PPC_VSCR: | |
987 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
988 | r = -ENXIO; | |
989 | break; | |
990 | } | |
b4d7f161 | 991 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
3840edc8 MC |
992 | break; |
993 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 | 994 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
3840edc8 MC |
995 | break; |
996 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
997 | default: |
998 | r = -EINVAL; | |
999 | break; | |
1000 | } | |
1001 | } | |
1002 | ||
1003 | if (r) | |
1004 | return r; | |
1005 | ||
1006 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) | |
1007 | r = -EFAULT; | |
1008 | ||
1009 | return r; | |
1010 | } | |
1011 | ||
1012 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1013 | { | |
1014 | int r; | |
1015 | union kvmppc_one_reg val; | |
1016 | int size; | |
1017 | ||
1018 | size = one_reg_size(reg->id); | |
1019 | if (size > sizeof(val)) | |
1020 | return -EINVAL; | |
1021 | ||
1022 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) | |
1023 | return -EFAULT; | |
1024 | ||
1025 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); | |
1026 | if (r == -EINVAL) { | |
1027 | r = 0; | |
1028 | switch (reg->id) { | |
3840edc8 MC |
1029 | #ifdef CONFIG_ALTIVEC |
1030 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1031 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1032 | r = -ENXIO; | |
1033 | break; | |
1034 | } | |
b4d7f161 | 1035 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
3840edc8 MC |
1036 | break; |
1037 | case KVM_REG_PPC_VSCR: | |
1038 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1039 | r = -ENXIO; | |
1040 | break; | |
1041 | } | |
b4d7f161 | 1042 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
3840edc8 MC |
1043 | break; |
1044 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 GK |
1045 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1046 | r = -ENXIO; | |
1047 | break; | |
1048 | } | |
1049 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | |
3840edc8 MC |
1050 | break; |
1051 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
1052 | default: |
1053 | r = -EINVAL; | |
1054 | break; | |
1055 | } | |
1056 | } | |
1057 | ||
1058 | return r; | |
1059 | } | |
1060 | ||
bbf45ba5 HB |
1061 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1062 | { | |
1063 | int r; | |
1064 | sigset_t sigsaved; | |
1065 | ||
1066 | if (vcpu->sigset_active) | |
1067 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
1068 | ||
1069 | if (vcpu->mmio_needed) { | |
1070 | if (!vcpu->mmio_is_write) | |
1071 | kvmppc_complete_mmio_load(vcpu, run); | |
1072 | vcpu->mmio_needed = 0; | |
ad0a048b AG |
1073 | } else if (vcpu->arch.osi_needed) { |
1074 | u64 *gprs = run->osi.gprs; | |
1075 | int i; | |
1076 | ||
1077 | for (i = 0; i < 32; i++) | |
1078 | kvmppc_set_gpr(vcpu, i, gprs[i]); | |
1079 | vcpu->arch.osi_needed = 0; | |
de56a948 PM |
1080 | } else if (vcpu->arch.hcall_needed) { |
1081 | int i; | |
1082 | ||
1083 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | |
1084 | for (i = 0; i < 9; ++i) | |
1085 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | |
1086 | vcpu->arch.hcall_needed = 0; | |
1c810636 AG |
1087 | #ifdef CONFIG_BOOKE |
1088 | } else if (vcpu->arch.epr_needed) { | |
1089 | kvmppc_set_epr(vcpu, run->epr.epr); | |
1090 | vcpu->arch.epr_needed = 0; | |
1091 | #endif | |
bbf45ba5 HB |
1092 | } |
1093 | ||
df6909e5 | 1094 | r = kvmppc_vcpu_run(run, vcpu); |
bbf45ba5 HB |
1095 | |
1096 | if (vcpu->sigset_active) | |
1097 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
1098 | ||
1099 | return r; | |
1100 | } | |
1101 | ||
1102 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
1103 | { | |
19ccb76a | 1104 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
4fe27d2a | 1105 | kvmppc_core_dequeue_external(vcpu); |
19ccb76a PM |
1106 | return 0; |
1107 | } | |
1108 | ||
1109 | kvmppc_core_queue_external(vcpu, irq); | |
b6d33834 | 1110 | |
dfd4d47e | 1111 | kvm_vcpu_kick(vcpu); |
45c5eb67 | 1112 | |
bbf45ba5 HB |
1113 | return 0; |
1114 | } | |
1115 | ||
71fbfd5f AG |
1116 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
1117 | struct kvm_enable_cap *cap) | |
1118 | { | |
1119 | int r; | |
1120 | ||
1121 | if (cap->flags) | |
1122 | return -EINVAL; | |
1123 | ||
1124 | switch (cap->cap) { | |
ad0a048b AG |
1125 | case KVM_CAP_PPC_OSI: |
1126 | r = 0; | |
1127 | vcpu->arch.osi_enabled = true; | |
1128 | break; | |
930b412a AG |
1129 | case KVM_CAP_PPC_PAPR: |
1130 | r = 0; | |
1131 | vcpu->arch.papr_enabled = true; | |
1132 | break; | |
1c810636 AG |
1133 | case KVM_CAP_PPC_EPR: |
1134 | r = 0; | |
5df554ad SW |
1135 | if (cap->args[0]) |
1136 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; | |
1137 | else | |
1138 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; | |
1c810636 | 1139 | break; |
f61c94bb BB |
1140 | #ifdef CONFIG_BOOKE |
1141 | case KVM_CAP_PPC_BOOKE_WATCHDOG: | |
1142 | r = 0; | |
1143 | vcpu->arch.watchdog_enabled = true; | |
1144 | break; | |
1145 | #endif | |
bf7ca4bd | 1146 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
1147 | case KVM_CAP_SW_TLB: { |
1148 | struct kvm_config_tlb cfg; | |
1149 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | |
1150 | ||
1151 | r = -EFAULT; | |
1152 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | |
1153 | break; | |
1154 | ||
1155 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | |
1156 | break; | |
eb1e4f43 SW |
1157 | } |
1158 | #endif | |
1159 | #ifdef CONFIG_KVM_MPIC | |
1160 | case KVM_CAP_IRQ_MPIC: { | |
70abaded | 1161 | struct fd f; |
eb1e4f43 SW |
1162 | struct kvm_device *dev; |
1163 | ||
1164 | r = -EBADF; | |
70abaded AV |
1165 | f = fdget(cap->args[0]); |
1166 | if (!f.file) | |
eb1e4f43 SW |
1167 | break; |
1168 | ||
1169 | r = -EPERM; | |
70abaded | 1170 | dev = kvm_device_from_filp(f.file); |
eb1e4f43 SW |
1171 | if (dev) |
1172 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); | |
1173 | ||
70abaded | 1174 | fdput(f); |
eb1e4f43 | 1175 | break; |
dc83b8bc SW |
1176 | } |
1177 | #endif | |
5975a2e0 PM |
1178 | #ifdef CONFIG_KVM_XICS |
1179 | case KVM_CAP_IRQ_XICS: { | |
70abaded | 1180 | struct fd f; |
5975a2e0 PM |
1181 | struct kvm_device *dev; |
1182 | ||
1183 | r = -EBADF; | |
70abaded AV |
1184 | f = fdget(cap->args[0]); |
1185 | if (!f.file) | |
5975a2e0 PM |
1186 | break; |
1187 | ||
1188 | r = -EPERM; | |
70abaded | 1189 | dev = kvm_device_from_filp(f.file); |
5975a2e0 PM |
1190 | if (dev) |
1191 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); | |
1192 | ||
70abaded | 1193 | fdput(f); |
5975a2e0 PM |
1194 | break; |
1195 | } | |
1196 | #endif /* CONFIG_KVM_XICS */ | |
71fbfd5f AG |
1197 | default: |
1198 | r = -EINVAL; | |
1199 | break; | |
1200 | } | |
1201 | ||
af8f38b3 AG |
1202 | if (!r) |
1203 | r = kvmppc_sanity_check(vcpu); | |
1204 | ||
71fbfd5f AG |
1205 | return r; |
1206 | } | |
1207 | ||
34a75b0f PM |
1208 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
1209 | { | |
1210 | #ifdef CONFIG_KVM_MPIC | |
1211 | if (kvm->arch.mpic) | |
1212 | return true; | |
1213 | #endif | |
1214 | #ifdef CONFIG_KVM_XICS | |
1215 | if (kvm->arch.xics) | |
1216 | return true; | |
1217 | #endif | |
1218 | return false; | |
1219 | } | |
1220 | ||
bbf45ba5 HB |
1221 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
1222 | struct kvm_mp_state *mp_state) | |
1223 | { | |
1224 | return -EINVAL; | |
1225 | } | |
1226 | ||
1227 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
1228 | struct kvm_mp_state *mp_state) | |
1229 | { | |
1230 | return -EINVAL; | |
1231 | } | |
1232 | ||
1233 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
1234 | unsigned int ioctl, unsigned long arg) | |
1235 | { | |
1236 | struct kvm_vcpu *vcpu = filp->private_data; | |
1237 | void __user *argp = (void __user *)arg; | |
1238 | long r; | |
1239 | ||
93736624 AK |
1240 | switch (ioctl) { |
1241 | case KVM_INTERRUPT: { | |
bbf45ba5 HB |
1242 | struct kvm_interrupt irq; |
1243 | r = -EFAULT; | |
1244 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
93736624 | 1245 | goto out; |
bbf45ba5 | 1246 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
93736624 | 1247 | goto out; |
bbf45ba5 | 1248 | } |
19483d14 | 1249 | |
71fbfd5f AG |
1250 | case KVM_ENABLE_CAP: |
1251 | { | |
1252 | struct kvm_enable_cap cap; | |
1253 | r = -EFAULT; | |
1254 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
1255 | goto out; | |
1256 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
1257 | break; | |
1258 | } | |
dc83b8bc | 1259 | |
e24ed81f AG |
1260 | case KVM_SET_ONE_REG: |
1261 | case KVM_GET_ONE_REG: | |
1262 | { | |
1263 | struct kvm_one_reg reg; | |
1264 | r = -EFAULT; | |
1265 | if (copy_from_user(®, argp, sizeof(reg))) | |
1266 | goto out; | |
1267 | if (ioctl == KVM_SET_ONE_REG) | |
1268 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | |
1269 | else | |
1270 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | |
1271 | break; | |
1272 | } | |
1273 | ||
bf7ca4bd | 1274 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
1275 | case KVM_DIRTY_TLB: { |
1276 | struct kvm_dirty_tlb dirty; | |
1277 | r = -EFAULT; | |
1278 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | |
1279 | goto out; | |
1280 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | |
1281 | break; | |
1282 | } | |
1283 | #endif | |
bbf45ba5 HB |
1284 | default: |
1285 | r = -EINVAL; | |
1286 | } | |
1287 | ||
1288 | out: | |
1289 | return r; | |
1290 | } | |
1291 | ||
5b1c1493 CO |
1292 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
1293 | { | |
1294 | return VM_FAULT_SIGBUS; | |
1295 | } | |
1296 | ||
15711e9c AG |
1297 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
1298 | { | |
784bafac SY |
1299 | u32 inst_nop = 0x60000000; |
1300 | #ifdef CONFIG_KVM_BOOKE_HV | |
1301 | u32 inst_sc1 = 0x44000022; | |
2743103f AG |
1302 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
1303 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); | |
1304 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); | |
1305 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 1306 | #else |
15711e9c AG |
1307 | u32 inst_lis = 0x3c000000; |
1308 | u32 inst_ori = 0x60000000; | |
15711e9c AG |
1309 | u32 inst_sc = 0x44000002; |
1310 | u32 inst_imm_mask = 0xffff; | |
1311 | ||
1312 | /* | |
1313 | * The hypercall to get into KVM from within guest context is as | |
1314 | * follows: | |
1315 | * | |
1316 | * lis r0, r0, KVM_SC_MAGIC_R0@h | |
1317 | * ori r0, KVM_SC_MAGIC_R0@l | |
1318 | * sc | |
1319 | * nop | |
1320 | */ | |
2743103f AG |
1321 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
1322 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); | |
1323 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); | |
1324 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 1325 | #endif |
15711e9c | 1326 | |
9202e076 LYB |
1327 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
1328 | ||
15711e9c AG |
1329 | return 0; |
1330 | } | |
1331 | ||
5efdb4be AG |
1332 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
1333 | bool line_status) | |
1334 | { | |
1335 | if (!irqchip_in_kernel(kvm)) | |
1336 | return -ENXIO; | |
1337 | ||
1338 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | |
1339 | irq_event->irq, irq_event->level, | |
1340 | line_status); | |
1341 | return 0; | |
1342 | } | |
1343 | ||
699a0ea0 PM |
1344 | |
1345 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, | |
1346 | struct kvm_enable_cap *cap) | |
1347 | { | |
1348 | int r; | |
1349 | ||
1350 | if (cap->flags) | |
1351 | return -EINVAL; | |
1352 | ||
1353 | switch (cap->cap) { | |
1354 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | |
1355 | case KVM_CAP_PPC_ENABLE_HCALL: { | |
1356 | unsigned long hcall = cap->args[0]; | |
1357 | ||
1358 | r = -EINVAL; | |
1359 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || | |
1360 | cap->args[1] > 1) | |
1361 | break; | |
ae2113a4 PM |
1362 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
1363 | break; | |
699a0ea0 PM |
1364 | if (cap->args[1]) |
1365 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
1366 | else | |
1367 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
1368 | r = 0; | |
1369 | break; | |
1370 | } | |
1371 | #endif | |
1372 | default: | |
1373 | r = -EINVAL; | |
1374 | break; | |
1375 | } | |
1376 | ||
1377 | return r; | |
1378 | } | |
1379 | ||
bbf45ba5 HB |
1380 | long kvm_arch_vm_ioctl(struct file *filp, |
1381 | unsigned int ioctl, unsigned long arg) | |
1382 | { | |
5df554ad | 1383 | struct kvm *kvm __maybe_unused = filp->private_data; |
15711e9c | 1384 | void __user *argp = (void __user *)arg; |
bbf45ba5 HB |
1385 | long r; |
1386 | ||
1387 | switch (ioctl) { | |
15711e9c AG |
1388 | case KVM_PPC_GET_PVINFO: { |
1389 | struct kvm_ppc_pvinfo pvinfo; | |
d8cdddcd | 1390 | memset(&pvinfo, 0, sizeof(pvinfo)); |
15711e9c AG |
1391 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
1392 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | |
1393 | r = -EFAULT; | |
1394 | goto out; | |
1395 | } | |
1396 | ||
1397 | break; | |
1398 | } | |
699a0ea0 PM |
1399 | case KVM_ENABLE_CAP: |
1400 | { | |
1401 | struct kvm_enable_cap cap; | |
1402 | r = -EFAULT; | |
1403 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
1404 | goto out; | |
1405 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); | |
1406 | break; | |
1407 | } | |
f31e65e1 | 1408 | #ifdef CONFIG_PPC_BOOK3S_64 |
58ded420 AK |
1409 | case KVM_CREATE_SPAPR_TCE_64: { |
1410 | struct kvm_create_spapr_tce_64 create_tce_64; | |
1411 | ||
1412 | r = -EFAULT; | |
1413 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) | |
1414 | goto out; | |
1415 | if (create_tce_64.flags) { | |
1416 | r = -EINVAL; | |
1417 | goto out; | |
1418 | } | |
1419 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
1420 | goto out; | |
1421 | } | |
54738c09 DG |
1422 | case KVM_CREATE_SPAPR_TCE: { |
1423 | struct kvm_create_spapr_tce create_tce; | |
58ded420 | 1424 | struct kvm_create_spapr_tce_64 create_tce_64; |
54738c09 DG |
1425 | |
1426 | r = -EFAULT; | |
1427 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | |
1428 | goto out; | |
58ded420 AK |
1429 | |
1430 | create_tce_64.liobn = create_tce.liobn; | |
1431 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; | |
1432 | create_tce_64.offset = 0; | |
1433 | create_tce_64.size = create_tce.window_size >> | |
1434 | IOMMU_PAGE_SHIFT_4K; | |
1435 | create_tce_64.flags = 0; | |
1436 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
54738c09 DG |
1437 | goto out; |
1438 | } | |
5b74716e | 1439 | case KVM_PPC_GET_SMMU_INFO: { |
5b74716e | 1440 | struct kvm_ppc_smmu_info info; |
cbbc58d4 | 1441 | struct kvm *kvm = filp->private_data; |
5b74716e BH |
1442 | |
1443 | memset(&info, 0, sizeof(info)); | |
cbbc58d4 | 1444 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
5b74716e BH |
1445 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
1446 | r = -EFAULT; | |
1447 | break; | |
1448 | } | |
8e591cb7 ME |
1449 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
1450 | struct kvm *kvm = filp->private_data; | |
1451 | ||
1452 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); | |
1453 | break; | |
1454 | } | |
cbbc58d4 AK |
1455 | default: { |
1456 | struct kvm *kvm = filp->private_data; | |
1457 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); | |
1458 | } | |
3a167bea | 1459 | #else /* CONFIG_PPC_BOOK3S_64 */ |
bbf45ba5 | 1460 | default: |
367e1319 | 1461 | r = -ENOTTY; |
3a167bea | 1462 | #endif |
bbf45ba5 | 1463 | } |
15711e9c | 1464 | out: |
bbf45ba5 HB |
1465 | return r; |
1466 | } | |
1467 | ||
043cc4d7 SW |
1468 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
1469 | static unsigned long nr_lpids; | |
1470 | ||
1471 | long kvmppc_alloc_lpid(void) | |
1472 | { | |
1473 | long lpid; | |
1474 | ||
1475 | do { | |
1476 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | |
1477 | if (lpid >= nr_lpids) { | |
1478 | pr_err("%s: No LPIDs free\n", __func__); | |
1479 | return -ENOMEM; | |
1480 | } | |
1481 | } while (test_and_set_bit(lpid, lpid_inuse)); | |
1482 | ||
1483 | return lpid; | |
1484 | } | |
2ba9f0d8 | 1485 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
043cc4d7 SW |
1486 | |
1487 | void kvmppc_claim_lpid(long lpid) | |
1488 | { | |
1489 | set_bit(lpid, lpid_inuse); | |
1490 | } | |
2ba9f0d8 | 1491 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); |
043cc4d7 SW |
1492 | |
1493 | void kvmppc_free_lpid(long lpid) | |
1494 | { | |
1495 | clear_bit(lpid, lpid_inuse); | |
1496 | } | |
2ba9f0d8 | 1497 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
043cc4d7 SW |
1498 | |
1499 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | |
1500 | { | |
1501 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | |
1502 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | |
1503 | } | |
2ba9f0d8 | 1504 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
043cc4d7 | 1505 | |
bbf45ba5 HB |
1506 | int kvm_arch_init(void *opaque) |
1507 | { | |
1508 | return 0; | |
1509 | } | |
1510 | ||
478d6686 | 1511 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |