]>
Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/errno.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/kvm_host.h> | |
bbf45ba5 | 24 | #include <linux/vmalloc.h> |
544c6761 | 25 | #include <linux/hrtimer.h> |
bbf45ba5 | 26 | #include <linux/fs.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
eb1e4f43 | 28 | #include <linux/file.h> |
cbbc58d4 | 29 | #include <linux/module.h> |
9576730d SW |
30 | #include <linux/irqbypass.h> |
31 | #include <linux/kvm_irqfd.h> | |
bbf45ba5 | 32 | #include <asm/cputable.h> |
7c0f6ba6 | 33 | #include <linux/uaccess.h> |
bbf45ba5 | 34 | #include <asm/kvm_ppc.h> |
83aae4a8 | 35 | #include <asm/tlbflush.h> |
371fefd6 | 36 | #include <asm/cputhreads.h> |
bd2be683 | 37 | #include <asm/irqflags.h> |
58ded420 | 38 | #include <asm/iommu.h> |
73e75b41 | 39 | #include "timing.h" |
5efdb4be | 40 | #include "irq.h" |
fad7b9b5 | 41 | #include "../mm/mmu_decl.h" |
bbf45ba5 | 42 | |
46f43c6e MT |
43 | #define CREATE_TRACE_POINTS |
44 | #include "trace.h" | |
45 | ||
cbbc58d4 AK |
46 | struct kvmppc_ops *kvmppc_hv_ops; |
47 | EXPORT_SYMBOL_GPL(kvmppc_hv_ops); | |
48 | struct kvmppc_ops *kvmppc_pr_ops; | |
49 | EXPORT_SYMBOL_GPL(kvmppc_pr_ops); | |
50 | ||
3a167bea | 51 | |
bbf45ba5 HB |
52 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
53 | { | |
9202e076 | 54 | return !!(v->arch.pending_exceptions) || |
dfd4d47e | 55 | v->requests; |
bbf45ba5 HB |
56 | } |
57 | ||
b6d33834 CD |
58 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
59 | { | |
60 | return 1; | |
61 | } | |
62 | ||
03d25c5b AG |
63 | /* |
64 | * Common checks before entering the guest world. Call with interrupts | |
65 | * disabled. | |
66 | * | |
7ee78855 AG |
67 | * returns: |
68 | * | |
69 | * == 1 if we're ready to go into guest state | |
70 | * <= 0 if we need to go back to the host with return value | |
03d25c5b AG |
71 | */ |
72 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |
73 | { | |
6c85f52b SW |
74 | int r; |
75 | ||
76 | WARN_ON(irqs_disabled()); | |
77 | hard_irq_disable(); | |
03d25c5b | 78 | |
03d25c5b AG |
79 | while (true) { |
80 | if (need_resched()) { | |
81 | local_irq_enable(); | |
82 | cond_resched(); | |
6c85f52b | 83 | hard_irq_disable(); |
03d25c5b AG |
84 | continue; |
85 | } | |
86 | ||
87 | if (signal_pending(current)) { | |
7ee78855 AG |
88 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
89 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
90 | r = -EINTR; | |
03d25c5b AG |
91 | break; |
92 | } | |
93 | ||
5bd1cf11 SW |
94 | vcpu->mode = IN_GUEST_MODE; |
95 | ||
96 | /* | |
97 | * Reading vcpu->requests must happen after setting vcpu->mode, | |
98 | * so we don't miss a request because the requester sees | |
99 | * OUTSIDE_GUEST_MODE and assumes we'll be checking requests | |
100 | * before next entering the guest (and thus doesn't IPI). | |
489153c7 LT |
101 | * This also orders the write to mode from any reads |
102 | * to the page tables done while the VCPU is running. | |
103 | * Please see the comment in kvm_flush_remote_tlbs. | |
5bd1cf11 | 104 | */ |
03d25c5b | 105 | smp_mb(); |
5bd1cf11 | 106 | |
03d25c5b AG |
107 | if (vcpu->requests) { |
108 | /* Make sure we process requests preemptable */ | |
109 | local_irq_enable(); | |
110 | trace_kvm_check_requests(vcpu); | |
7c973a2e | 111 | r = kvmppc_core_check_requests(vcpu); |
6c85f52b | 112 | hard_irq_disable(); |
7c973a2e AG |
113 | if (r > 0) |
114 | continue; | |
115 | break; | |
03d25c5b AG |
116 | } |
117 | ||
118 | if (kvmppc_core_prepare_to_enter(vcpu)) { | |
119 | /* interrupts got enabled in between, so we | |
120 | are back at square 1 */ | |
121 | continue; | |
122 | } | |
123 | ||
6edaa530 | 124 | guest_enter_irqoff(); |
6c85f52b | 125 | return 1; |
03d25c5b AG |
126 | } |
127 | ||
6c85f52b SW |
128 | /* return to host */ |
129 | local_irq_enable(); | |
03d25c5b AG |
130 | return r; |
131 | } | |
2ba9f0d8 | 132 | EXPORT_SYMBOL_GPL(kvmppc_prepare_to_enter); |
03d25c5b | 133 | |
5deb8e7a AG |
134 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
135 | static void kvmppc_swab_shared(struct kvm_vcpu *vcpu) | |
136 | { | |
137 | struct kvm_vcpu_arch_shared *shared = vcpu->arch.shared; | |
138 | int i; | |
139 | ||
140 | shared->sprg0 = swab64(shared->sprg0); | |
141 | shared->sprg1 = swab64(shared->sprg1); | |
142 | shared->sprg2 = swab64(shared->sprg2); | |
143 | shared->sprg3 = swab64(shared->sprg3); | |
144 | shared->srr0 = swab64(shared->srr0); | |
145 | shared->srr1 = swab64(shared->srr1); | |
146 | shared->dar = swab64(shared->dar); | |
147 | shared->msr = swab64(shared->msr); | |
148 | shared->dsisr = swab32(shared->dsisr); | |
149 | shared->int_pending = swab32(shared->int_pending); | |
150 | for (i = 0; i < ARRAY_SIZE(shared->sr); i++) | |
151 | shared->sr[i] = swab32(shared->sr[i]); | |
152 | } | |
153 | #endif | |
154 | ||
2a342ed5 AG |
155 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
156 | { | |
157 | int nr = kvmppc_get_gpr(vcpu, 11); | |
158 | int r; | |
159 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | |
160 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | |
161 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | |
162 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | |
163 | unsigned long r2 = 0; | |
164 | ||
5deb8e7a | 165 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { |
2a342ed5 AG |
166 | /* 32 bit mode */ |
167 | param1 &= 0xffffffff; | |
168 | param2 &= 0xffffffff; | |
169 | param3 &= 0xffffffff; | |
170 | param4 &= 0xffffffff; | |
171 | } | |
172 | ||
173 | switch (nr) { | |
fdcf8bd7 | 174 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
5fc87407 | 175 | { |
5deb8e7a AG |
176 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) |
177 | /* Book3S can be little endian, find it out here */ | |
178 | int shared_big_endian = true; | |
179 | if (vcpu->arch.intr_msr & MSR_LE) | |
180 | shared_big_endian = false; | |
181 | if (shared_big_endian != vcpu->arch.shared_big_endian) | |
182 | kvmppc_swab_shared(vcpu); | |
183 | vcpu->arch.shared_big_endian = shared_big_endian; | |
184 | #endif | |
185 | ||
f3383cf8 AG |
186 | if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { |
187 | /* | |
188 | * Older versions of the Linux magic page code had | |
189 | * a bug where they would map their trampoline code | |
190 | * NX. If that's the case, remove !PR NX capability. | |
191 | */ | |
192 | vcpu->arch.disable_kernel_nx = true; | |
193 | kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); | |
194 | } | |
195 | ||
196 | vcpu->arch.magic_page_pa = param1 & ~0xfffULL; | |
197 | vcpu->arch.magic_page_ea = param2 & ~0xfffULL; | |
5fc87407 | 198 | |
89b68c96 AG |
199 | #ifdef CONFIG_PPC_64K_PAGES |
200 | /* | |
201 | * Make sure our 4k magic page is in the same window of a 64k | |
202 | * page within the guest and within the host's page. | |
203 | */ | |
204 | if ((vcpu->arch.magic_page_pa & 0xf000) != | |
205 | ((ulong)vcpu->arch.shared & 0xf000)) { | |
206 | void *old_shared = vcpu->arch.shared; | |
207 | ulong shared = (ulong)vcpu->arch.shared; | |
208 | void *new_shared; | |
209 | ||
210 | shared &= PAGE_MASK; | |
211 | shared |= vcpu->arch.magic_page_pa & 0xf000; | |
212 | new_shared = (void*)shared; | |
213 | memcpy(new_shared, old_shared, 0x1000); | |
214 | vcpu->arch.shared = new_shared; | |
215 | } | |
216 | #endif | |
217 | ||
b5904972 | 218 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
7508e16c | 219 | |
fdcf8bd7 | 220 | r = EV_SUCCESS; |
5fc87407 AG |
221 | break; |
222 | } | |
fdcf8bd7 SY |
223 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
224 | r = EV_SUCCESS; | |
bf7ca4bd | 225 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
5fc87407 AG |
226 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
227 | #endif | |
2a342ed5 AG |
228 | |
229 | /* Second return value is in r4 */ | |
2a342ed5 | 230 | break; |
9202e076 LYB |
231 | case EV_HCALL_TOKEN(EV_IDLE): |
232 | r = EV_SUCCESS; | |
233 | kvm_vcpu_block(vcpu); | |
234 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | |
235 | break; | |
2a342ed5 | 236 | default: |
fdcf8bd7 | 237 | r = EV_UNIMPLEMENTED; |
2a342ed5 AG |
238 | break; |
239 | } | |
240 | ||
7508e16c AG |
241 | kvmppc_set_gpr(vcpu, 4, r2); |
242 | ||
2a342ed5 AG |
243 | return r; |
244 | } | |
2ba9f0d8 | 245 | EXPORT_SYMBOL_GPL(kvmppc_kvm_pv); |
bbf45ba5 | 246 | |
af8f38b3 AG |
247 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
248 | { | |
249 | int r = false; | |
250 | ||
251 | /* We have to know what CPU to virtualize */ | |
252 | if (!vcpu->arch.pvr) | |
253 | goto out; | |
254 | ||
255 | /* PAPR only works with book3s_64 */ | |
256 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | |
257 | goto out; | |
258 | ||
af8f38b3 | 259 | /* HV KVM can only do PAPR mode for now */ |
a78b55d1 | 260 | if (!vcpu->arch.papr_enabled && is_kvmppc_hv_enabled(vcpu->kvm)) |
af8f38b3 | 261 | goto out; |
af8f38b3 | 262 | |
d30f6e48 SW |
263 | #ifdef CONFIG_KVM_BOOKE_HV |
264 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | |
265 | goto out; | |
266 | #endif | |
267 | ||
af8f38b3 AG |
268 | r = true; |
269 | ||
270 | out: | |
271 | vcpu->arch.sane = r; | |
272 | return r ? 0 : -EINVAL; | |
273 | } | |
2ba9f0d8 | 274 | EXPORT_SYMBOL_GPL(kvmppc_sanity_check); |
af8f38b3 | 275 | |
bbf45ba5 HB |
276 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
277 | { | |
278 | enum emulation_result er; | |
279 | int r; | |
280 | ||
d69614a2 | 281 | er = kvmppc_emulate_loadstore(vcpu); |
bbf45ba5 HB |
282 | switch (er) { |
283 | case EMULATE_DONE: | |
284 | /* Future optimization: only reload non-volatiles if they were | |
285 | * actually modified. */ | |
286 | r = RESUME_GUEST_NV; | |
287 | break; | |
51f04726 MC |
288 | case EMULATE_AGAIN: |
289 | r = RESUME_GUEST; | |
290 | break; | |
bbf45ba5 HB |
291 | case EMULATE_DO_MMIO: |
292 | run->exit_reason = KVM_EXIT_MMIO; | |
293 | /* We must reload nonvolatiles because "update" load/store | |
294 | * instructions modify register state. */ | |
295 | /* Future optimization: only reload non-volatiles if they were | |
296 | * actually modified. */ | |
297 | r = RESUME_HOST_NV; | |
298 | break; | |
299 | case EMULATE_FAIL: | |
51f04726 MC |
300 | { |
301 | u32 last_inst; | |
302 | ||
8d0eff63 | 303 | kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
bbf45ba5 | 304 | /* XXX Deliver Program interrupt to guest. */ |
51f04726 | 305 | pr_emerg("%s: emulation failed (%08x)\n", __func__, last_inst); |
bbf45ba5 HB |
306 | r = RESUME_HOST; |
307 | break; | |
51f04726 | 308 | } |
bbf45ba5 | 309 | default: |
5a33169e AG |
310 | WARN_ON(1); |
311 | r = RESUME_GUEST; | |
bbf45ba5 HB |
312 | } |
313 | ||
314 | return r; | |
315 | } | |
2ba9f0d8 | 316 | EXPORT_SYMBOL_GPL(kvmppc_emulate_mmio); |
bbf45ba5 | 317 | |
35c4a733 AG |
318 | int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
319 | bool data) | |
320 | { | |
c12fb43c | 321 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 AG |
322 | struct kvmppc_pte pte; |
323 | int r; | |
324 | ||
325 | vcpu->stat.st++; | |
326 | ||
327 | r = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
328 | XLATE_WRITE, &pte); | |
329 | if (r < 0) | |
330 | return r; | |
331 | ||
332 | *eaddr = pte.raddr; | |
333 | ||
334 | if (!pte.may_write) | |
335 | return -EPERM; | |
336 | ||
c12fb43c AG |
337 | /* Magic page override */ |
338 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
339 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
340 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
341 | void *magic = vcpu->arch.shared; | |
342 | magic += pte.eaddr & 0xfff; | |
343 | memcpy(magic, ptr, size); | |
344 | return EMULATE_DONE; | |
345 | } | |
346 | ||
35c4a733 AG |
347 | if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size)) |
348 | return EMULATE_DO_MMIO; | |
349 | ||
350 | return EMULATE_DONE; | |
351 | } | |
352 | EXPORT_SYMBOL_GPL(kvmppc_st); | |
353 | ||
354 | int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
355 | bool data) | |
356 | { | |
c12fb43c | 357 | ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM & PAGE_MASK; |
35c4a733 | 358 | struct kvmppc_pte pte; |
35c4a733 AG |
359 | int rc; |
360 | ||
361 | vcpu->stat.ld++; | |
362 | ||
363 | rc = kvmppc_xlate(vcpu, *eaddr, data ? XLATE_DATA : XLATE_INST, | |
364 | XLATE_READ, &pte); | |
365 | if (rc) | |
366 | return rc; | |
367 | ||
368 | *eaddr = pte.raddr; | |
369 | ||
370 | if (!pte.may_read) | |
371 | return -EPERM; | |
372 | ||
373 | if (!data && !pte.may_execute) | |
374 | return -ENOEXEC; | |
375 | ||
c12fb43c AG |
376 | /* Magic page override */ |
377 | if (kvmppc_supports_magic_page(vcpu) && mp_pa && | |
378 | ((pte.raddr & KVM_PAM & PAGE_MASK) == mp_pa) && | |
379 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { | |
380 | void *magic = vcpu->arch.shared; | |
381 | magic += pte.eaddr & 0xfff; | |
382 | memcpy(ptr, magic, size); | |
383 | return EMULATE_DONE; | |
384 | } | |
385 | ||
c45c5514 AG |
386 | if (kvm_read_guest(vcpu->kvm, pte.raddr, ptr, size)) |
387 | return EMULATE_DO_MMIO; | |
35c4a733 AG |
388 | |
389 | return EMULATE_DONE; | |
35c4a733 AG |
390 | } |
391 | EXPORT_SYMBOL_GPL(kvmppc_ld); | |
392 | ||
13a34e06 | 393 | int kvm_arch_hardware_enable(void) |
bbf45ba5 | 394 | { |
10474ae8 | 395 | return 0; |
bbf45ba5 HB |
396 | } |
397 | ||
bbf45ba5 HB |
398 | int kvm_arch_hardware_setup(void) |
399 | { | |
400 | return 0; | |
401 | } | |
402 | ||
bbf45ba5 HB |
403 | void kvm_arch_check_processor_compat(void *rtn) |
404 | { | |
9dd921cf | 405 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
bbf45ba5 HB |
406 | } |
407 | ||
e08b9637 | 408 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
bbf45ba5 | 409 | { |
cbbc58d4 AK |
410 | struct kvmppc_ops *kvm_ops = NULL; |
411 | /* | |
412 | * if we have both HV and PR enabled, default is HV | |
413 | */ | |
414 | if (type == 0) { | |
415 | if (kvmppc_hv_ops) | |
416 | kvm_ops = kvmppc_hv_ops; | |
417 | else | |
418 | kvm_ops = kvmppc_pr_ops; | |
419 | if (!kvm_ops) | |
420 | goto err_out; | |
421 | } else if (type == KVM_VM_PPC_HV) { | |
422 | if (!kvmppc_hv_ops) | |
423 | goto err_out; | |
424 | kvm_ops = kvmppc_hv_ops; | |
425 | } else if (type == KVM_VM_PPC_PR) { | |
426 | if (!kvmppc_pr_ops) | |
427 | goto err_out; | |
428 | kvm_ops = kvmppc_pr_ops; | |
429 | } else | |
430 | goto err_out; | |
431 | ||
432 | if (kvm_ops->owner && !try_module_get(kvm_ops->owner)) | |
433 | return -ENOENT; | |
434 | ||
435 | kvm->arch.kvm_ops = kvm_ops; | |
f9e0554d | 436 | return kvmppc_core_init_vm(kvm); |
cbbc58d4 AK |
437 | err_out: |
438 | return -EINVAL; | |
bbf45ba5 HB |
439 | } |
440 | ||
235539b4 LC |
441 | bool kvm_arch_has_vcpu_debugfs(void) |
442 | { | |
443 | return false; | |
444 | } | |
445 | ||
446 | int kvm_arch_create_vcpu_debugfs(struct kvm_vcpu *vcpu) | |
447 | { | |
448 | return 0; | |
449 | } | |
450 | ||
d89f5eff | 451 | void kvm_arch_destroy_vm(struct kvm *kvm) |
bbf45ba5 HB |
452 | { |
453 | unsigned int i; | |
988a2cae | 454 | struct kvm_vcpu *vcpu; |
bbf45ba5 | 455 | |
e17769eb SW |
456 | #ifdef CONFIG_KVM_XICS |
457 | /* | |
458 | * We call kick_all_cpus_sync() to ensure that all | |
459 | * CPUs have executed any pending IPIs before we | |
460 | * continue and free VCPUs structures below. | |
461 | */ | |
462 | if (is_kvmppc_hv_enabled(kvm)) | |
463 | kick_all_cpus_sync(); | |
464 | #endif | |
465 | ||
988a2cae GN |
466 | kvm_for_each_vcpu(i, vcpu, kvm) |
467 | kvm_arch_vcpu_free(vcpu); | |
468 | ||
469 | mutex_lock(&kvm->lock); | |
470 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
471 | kvm->vcpus[i] = NULL; | |
472 | ||
473 | atomic_set(&kvm->online_vcpus, 0); | |
f9e0554d PM |
474 | |
475 | kvmppc_core_destroy_vm(kvm); | |
476 | ||
988a2cae | 477 | mutex_unlock(&kvm->lock); |
cbbc58d4 AK |
478 | |
479 | /* drop the module reference */ | |
480 | module_put(kvm->arch.kvm_ops->owner); | |
bbf45ba5 HB |
481 | } |
482 | ||
784aa3d7 | 483 | int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext) |
bbf45ba5 HB |
484 | { |
485 | int r; | |
7a58777a | 486 | /* Assume we're using HV mode when the HV module is loaded */ |
cbbc58d4 | 487 | int hv_enabled = kvmppc_hv_ops ? 1 : 0; |
bbf45ba5 | 488 | |
7a58777a AG |
489 | if (kvm) { |
490 | /* | |
491 | * Hooray - we know which VM type we're running on. Depend on | |
492 | * that rather than the guess above. | |
493 | */ | |
494 | hv_enabled = is_kvmppc_hv_enabled(kvm); | |
495 | } | |
496 | ||
bbf45ba5 | 497 | switch (ext) { |
5ce941ee SW |
498 | #ifdef CONFIG_BOOKE |
499 | case KVM_CAP_PPC_BOOKE_SREGS: | |
f61c94bb | 500 | case KVM_CAP_PPC_BOOKE_WATCHDOG: |
1c810636 | 501 | case KVM_CAP_PPC_EPR: |
5ce941ee | 502 | #else |
e15a1137 | 503 | case KVM_CAP_PPC_SEGSTATE: |
1022fc3d | 504 | case KVM_CAP_PPC_HIOR: |
930b412a | 505 | case KVM_CAP_PPC_PAPR: |
5ce941ee | 506 | #endif |
18978768 | 507 | case KVM_CAP_PPC_UNSET_IRQ: |
7b4203e8 | 508 | case KVM_CAP_PPC_IRQ_LEVEL: |
71fbfd5f | 509 | case KVM_CAP_ENABLE_CAP: |
699a0ea0 | 510 | case KVM_CAP_ENABLE_CAP_VM: |
e24ed81f | 511 | case KVM_CAP_ONE_REG: |
0e673fb6 | 512 | case KVM_CAP_IOEVENTFD: |
5df554ad | 513 | case KVM_CAP_DEVICE_CTRL: |
460df4c1 | 514 | case KVM_CAP_IMMEDIATE_EXIT: |
de56a948 PM |
515 | r = 1; |
516 | break; | |
de56a948 | 517 | case KVM_CAP_PPC_PAIRED_SINGLES: |
ad0a048b | 518 | case KVM_CAP_PPC_OSI: |
15711e9c | 519 | case KVM_CAP_PPC_GET_PVINFO: |
bf7ca4bd | 520 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc | 521 | case KVM_CAP_SW_TLB: |
eb1e4f43 | 522 | #endif |
699cc876 | 523 | /* We support this only for PR */ |
cbbc58d4 | 524 | r = !hv_enabled; |
e15a1137 | 525 | break; |
699cc876 | 526 | #ifdef CONFIG_KVM_MMIO |
588968b6 LV |
527 | case KVM_CAP_COALESCED_MMIO: |
528 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
529 | break; | |
54738c09 | 530 | #endif |
699cc876 AK |
531 | #ifdef CONFIG_KVM_MPIC |
532 | case KVM_CAP_IRQ_MPIC: | |
533 | r = 1; | |
534 | break; | |
535 | #endif | |
536 | ||
f31e65e1 | 537 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 | 538 | case KVM_CAP_SPAPR_TCE: |
58ded420 | 539 | case KVM_CAP_SPAPR_TCE_64: |
8e591cb7 | 540 | case KVM_CAP_PPC_RTAS: |
f2e91042 | 541 | case KVM_CAP_PPC_FIXUP_HCALL: |
699a0ea0 | 542 | case KVM_CAP_PPC_ENABLE_HCALL: |
5975a2e0 PM |
543 | #ifdef CONFIG_KVM_XICS |
544 | case KVM_CAP_IRQ_XICS: | |
545 | #endif | |
54738c09 DG |
546 | r = 1; |
547 | break; | |
a8acaece DG |
548 | |
549 | case KVM_CAP_PPC_ALLOC_HTAB: | |
550 | r = hv_enabled; | |
551 | break; | |
f31e65e1 | 552 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
699cc876 | 553 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
371fefd6 | 554 | case KVM_CAP_PPC_SMT: |
45c940ba PM |
555 | r = 0; |
556 | if (hv_enabled) { | |
557 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
558 | r = 1; | |
559 | else | |
560 | r = threads_per_subcore; | |
561 | } | |
371fefd6 | 562 | break; |
aa04b4cc | 563 | case KVM_CAP_PPC_RMA: |
c17b98cf | 564 | r = 0; |
aa04b4cc | 565 | break; |
e928e9cb ME |
566 | case KVM_CAP_PPC_HWRNG: |
567 | r = kvmppc_hwrng_present(); | |
568 | break; | |
c9270132 | 569 | case KVM_CAP_PPC_MMU_RADIX: |
8cf4ecc0 | 570 | r = !!(hv_enabled && radix_enabled()); |
c9270132 PM |
571 | break; |
572 | case KVM_CAP_PPC_MMU_HASH_V3: | |
468808bd | 573 | r = !!(hv_enabled && !radix_enabled() && |
c9270132 PM |
574 | cpu_has_feature(CPU_FTR_ARCH_300)); |
575 | break; | |
f4800b1f | 576 | #endif |
342d3db7 | 577 | case KVM_CAP_SYNC_MMU: |
699cc876 | 578 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
c17b98cf | 579 | r = hv_enabled; |
f4800b1f AG |
580 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
581 | r = 1; | |
582 | #else | |
583 | r = 0; | |
a2932923 | 584 | #endif |
699cc876 AK |
585 | break; |
586 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE | |
a2932923 | 587 | case KVM_CAP_PPC_HTAB_FD: |
cbbc58d4 | 588 | r = hv_enabled; |
a2932923 | 589 | break; |
de56a948 | 590 | #endif |
b5434032 ME |
591 | case KVM_CAP_NR_VCPUS: |
592 | /* | |
593 | * Recommending a number of CPUs is somewhat arbitrary; we | |
594 | * return the number of present CPUs for -HV (since a host | |
595 | * will have secondary threads "offline"), and for other KVM | |
596 | * implementations just count online CPUs. | |
597 | */ | |
cbbc58d4 | 598 | if (hv_enabled) |
699cc876 AK |
599 | r = num_present_cpus(); |
600 | else | |
601 | r = num_online_cpus(); | |
b5434032 | 602 | break; |
bfec5c2c ND |
603 | case KVM_CAP_NR_MEMSLOTS: |
604 | r = KVM_USER_MEM_SLOTS; | |
605 | break; | |
b5434032 ME |
606 | case KVM_CAP_MAX_VCPUS: |
607 | r = KVM_MAX_VCPUS; | |
608 | break; | |
5b74716e BH |
609 | #ifdef CONFIG_PPC_BOOK3S_64 |
610 | case KVM_CAP_PPC_GET_SMMU_INFO: | |
611 | r = 1; | |
612 | break; | |
d3695aa4 AK |
613 | case KVM_CAP_SPAPR_MULTITCE: |
614 | r = 1; | |
615 | break; | |
050f2339 | 616 | case KVM_CAP_SPAPR_RESIZE_HPT: |
bcd3bb63 PM |
617 | /* Disable this on POWER9 until code handles new HPTE format */ |
618 | r = !!hv_enabled && !cpu_has_feature(CPU_FTR_ARCH_300); | |
050f2339 | 619 | break; |
5b74716e | 620 | #endif |
23528bb2 SB |
621 | case KVM_CAP_PPC_HTM: |
622 | r = cpu_has_feature(CPU_FTR_TM_COMP) && | |
623 | is_kvmppc_hv_enabled(kvm); | |
624 | break; | |
bbf45ba5 HB |
625 | default: |
626 | r = 0; | |
627 | break; | |
628 | } | |
629 | return r; | |
630 | ||
631 | } | |
632 | ||
633 | long kvm_arch_dev_ioctl(struct file *filp, | |
634 | unsigned int ioctl, unsigned long arg) | |
635 | { | |
636 | return -EINVAL; | |
637 | } | |
638 | ||
5587027c | 639 | void kvm_arch_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
db3fe4eb TY |
640 | struct kvm_memory_slot *dont) |
641 | { | |
5587027c | 642 | kvmppc_core_free_memslot(kvm, free, dont); |
db3fe4eb TY |
643 | } |
644 | ||
5587027c AK |
645 | int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
646 | unsigned long npages) | |
db3fe4eb | 647 | { |
5587027c | 648 | return kvmppc_core_create_memslot(kvm, slot, npages); |
db3fe4eb TY |
649 | } |
650 | ||
f7784b8e | 651 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
462fce46 | 652 | struct kvm_memory_slot *memslot, |
09170a49 | 653 | const struct kvm_userspace_memory_region *mem, |
7b6195a9 | 654 | enum kvm_mr_change change) |
bbf45ba5 | 655 | { |
a66b48c3 | 656 | return kvmppc_core_prepare_memory_region(kvm, memslot, mem); |
bbf45ba5 HB |
657 | } |
658 | ||
f7784b8e | 659 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
09170a49 | 660 | const struct kvm_userspace_memory_region *mem, |
8482644a | 661 | const struct kvm_memory_slot *old, |
f36f3f28 | 662 | const struct kvm_memory_slot *new, |
8482644a | 663 | enum kvm_mr_change change) |
f7784b8e | 664 | { |
f36f3f28 | 665 | kvmppc_core_commit_memory_region(kvm, mem, old, new); |
f7784b8e MT |
666 | } |
667 | ||
2df72e9b MT |
668 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
669 | struct kvm_memory_slot *slot) | |
34d4cb8f | 670 | { |
dfe49dbd | 671 | kvmppc_core_flush_memslot(kvm, slot); |
34d4cb8f MT |
672 | } |
673 | ||
bbf45ba5 HB |
674 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
675 | { | |
73e75b41 HB |
676 | struct kvm_vcpu *vcpu; |
677 | vcpu = kvmppc_core_vcpu_create(kvm, id); | |
03cdab53 ME |
678 | if (!IS_ERR(vcpu)) { |
679 | vcpu->arch.wqp = &vcpu->wq; | |
06056bfb | 680 | kvmppc_create_vcpu_debugfs(vcpu, id); |
03cdab53 | 681 | } |
73e75b41 | 682 | return vcpu; |
bbf45ba5 HB |
683 | } |
684 | ||
31928aa5 | 685 | void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu) |
42897d86 | 686 | { |
42897d86 MT |
687 | } |
688 | ||
bbf45ba5 HB |
689 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) |
690 | { | |
a595405d AG |
691 | /* Make sure we're not using the vcpu anymore */ |
692 | hrtimer_cancel(&vcpu->arch.dec_timer); | |
a595405d | 693 | |
73e75b41 | 694 | kvmppc_remove_vcpu_debugfs(vcpu); |
eb1e4f43 SW |
695 | |
696 | switch (vcpu->arch.irq_type) { | |
697 | case KVMPPC_IRQ_MPIC: | |
698 | kvmppc_mpic_disconnect_vcpu(vcpu->arch.mpic, vcpu); | |
699 | break; | |
bc5ad3f3 BH |
700 | case KVMPPC_IRQ_XICS: |
701 | kvmppc_xics_free_icp(vcpu); | |
702 | break; | |
eb1e4f43 SW |
703 | } |
704 | ||
db93f574 | 705 | kvmppc_core_vcpu_free(vcpu); |
bbf45ba5 HB |
706 | } |
707 | ||
708 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
709 | { | |
710 | kvm_arch_vcpu_free(vcpu); | |
711 | } | |
712 | ||
713 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
714 | { | |
9dd921cf | 715 | return kvmppc_core_pending_dec(vcpu); |
bbf45ba5 HB |
716 | } |
717 | ||
5358a963 | 718 | static enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) |
544c6761 AG |
719 | { |
720 | struct kvm_vcpu *vcpu; | |
721 | ||
722 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | |
d02d4d15 | 723 | kvmppc_decrementer_func(vcpu); |
544c6761 AG |
724 | |
725 | return HRTIMER_NORESTART; | |
726 | } | |
727 | ||
bbf45ba5 HB |
728 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
729 | { | |
f61c94bb BB |
730 | int ret; |
731 | ||
544c6761 | 732 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
544c6761 | 733 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; |
de56a948 | 734 | vcpu->arch.dec_expires = ~(u64)0; |
bbf45ba5 | 735 | |
09000adb BB |
736 | #ifdef CONFIG_KVM_EXIT_TIMING |
737 | mutex_init(&vcpu->arch.exit_timing_lock); | |
738 | #endif | |
f61c94bb BB |
739 | ret = kvmppc_subarch_vcpu_init(vcpu); |
740 | return ret; | |
bbf45ba5 HB |
741 | } |
742 | ||
743 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
744 | { | |
ecc0981f | 745 | kvmppc_mmu_destroy(vcpu); |
f61c94bb | 746 | kvmppc_subarch_vcpu_uninit(vcpu); |
bbf45ba5 HB |
747 | } |
748 | ||
749 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
750 | { | |
eab17672 SW |
751 | #ifdef CONFIG_BOOKE |
752 | /* | |
753 | * vrsave (formerly usprg0) isn't used by Linux, but may | |
754 | * be used by the guest. | |
755 | * | |
756 | * On non-booke this is associated with Altivec and | |
757 | * is handled by code in book3s.c. | |
758 | */ | |
759 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | |
760 | #endif | |
9dd921cf | 761 | kvmppc_core_vcpu_load(vcpu, cpu); |
bbf45ba5 HB |
762 | } |
763 | ||
764 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
765 | { | |
9dd921cf | 766 | kvmppc_core_vcpu_put(vcpu); |
eab17672 SW |
767 | #ifdef CONFIG_BOOKE |
768 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | |
769 | #endif | |
bbf45ba5 HB |
770 | } |
771 | ||
9576730d SW |
772 | /* |
773 | * irq_bypass_add_producer and irq_bypass_del_producer are only | |
774 | * useful if the architecture supports PCI passthrough. | |
775 | * irq_bypass_stop and irq_bypass_start are not needed and so | |
776 | * kvm_ops are not defined for them. | |
777 | */ | |
778 | bool kvm_arch_has_irq_bypass(void) | |
779 | { | |
780 | return ((kvmppc_hv_ops && kvmppc_hv_ops->irq_bypass_add_producer) || | |
781 | (kvmppc_pr_ops && kvmppc_pr_ops->irq_bypass_add_producer)); | |
782 | } | |
783 | ||
784 | int kvm_arch_irq_bypass_add_producer(struct irq_bypass_consumer *cons, | |
785 | struct irq_bypass_producer *prod) | |
786 | { | |
787 | struct kvm_kernel_irqfd *irqfd = | |
788 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
789 | struct kvm *kvm = irqfd->kvm; | |
790 | ||
791 | if (kvm->arch.kvm_ops->irq_bypass_add_producer) | |
792 | return kvm->arch.kvm_ops->irq_bypass_add_producer(cons, prod); | |
793 | ||
794 | return 0; | |
795 | } | |
796 | ||
797 | void kvm_arch_irq_bypass_del_producer(struct irq_bypass_consumer *cons, | |
798 | struct irq_bypass_producer *prod) | |
799 | { | |
800 | struct kvm_kernel_irqfd *irqfd = | |
801 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
802 | struct kvm *kvm = irqfd->kvm; | |
803 | ||
804 | if (kvm->arch.kvm_ops->irq_bypass_del_producer) | |
805 | kvm->arch.kvm_ops->irq_bypass_del_producer(cons, prod); | |
806 | } | |
807 | ||
bbf45ba5 HB |
808 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, |
809 | struct kvm_run *run) | |
810 | { | |
69b61833 | 811 | u64 uninitialized_var(gpr); |
bbf45ba5 | 812 | |
8e5b26b5 | 813 | if (run->mmio.len > sizeof(gpr)) { |
bbf45ba5 HB |
814 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
815 | return; | |
816 | } | |
817 | ||
d078eed3 | 818 | if (!vcpu->arch.mmio_host_swabbed) { |
bbf45ba5 | 819 | switch (run->mmio.len) { |
b104d066 | 820 | case 8: gpr = *(u64 *)run->mmio.data; break; |
8e5b26b5 AG |
821 | case 4: gpr = *(u32 *)run->mmio.data; break; |
822 | case 2: gpr = *(u16 *)run->mmio.data; break; | |
823 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
824 | } |
825 | } else { | |
bbf45ba5 | 826 | switch (run->mmio.len) { |
d078eed3 DG |
827 | case 8: gpr = swab64(*(u64 *)run->mmio.data); break; |
828 | case 4: gpr = swab32(*(u32 *)run->mmio.data); break; | |
829 | case 2: gpr = swab16(*(u16 *)run->mmio.data); break; | |
8e5b26b5 | 830 | case 1: gpr = *(u8 *)run->mmio.data; break; |
bbf45ba5 HB |
831 | } |
832 | } | |
8e5b26b5 | 833 | |
3587d534 AG |
834 | if (vcpu->arch.mmio_sign_extend) { |
835 | switch (run->mmio.len) { | |
836 | #ifdef CONFIG_PPC64 | |
837 | case 4: | |
838 | gpr = (s64)(s32)gpr; | |
839 | break; | |
840 | #endif | |
841 | case 2: | |
842 | gpr = (s64)(s16)gpr; | |
843 | break; | |
844 | case 1: | |
845 | gpr = (s64)(s8)gpr; | |
846 | break; | |
847 | } | |
848 | } | |
849 | ||
8e5b26b5 | 850 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
b104d066 | 851 | |
b3c5d3c2 AG |
852 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
853 | case KVM_MMIO_REG_GPR: | |
b104d066 AG |
854 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
855 | break; | |
b3c5d3c2 | 856 | case KVM_MMIO_REG_FPR: |
efff1912 | 857 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b104d066 | 858 | break; |
287d5611 | 859 | #ifdef CONFIG_PPC_BOOK3S |
b3c5d3c2 AG |
860 | case KVM_MMIO_REG_QPR: |
861 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 862 | break; |
b3c5d3c2 | 863 | case KVM_MMIO_REG_FQPR: |
efff1912 | 864 | VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; |
b3c5d3c2 | 865 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; |
b104d066 | 866 | break; |
287d5611 | 867 | #endif |
b104d066 AG |
868 | default: |
869 | BUG(); | |
870 | } | |
bbf45ba5 HB |
871 | } |
872 | ||
eb8b0560 PM |
873 | static int __kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
874 | unsigned int rt, unsigned int bytes, | |
875 | int is_default_endian, int sign_extend) | |
bbf45ba5 | 876 | { |
ed840ee9 | 877 | int idx, ret; |
d078eed3 | 878 | bool host_swabbed; |
73601775 | 879 | |
d078eed3 | 880 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 881 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 882 | host_swabbed = is_default_endian; |
73601775 | 883 | } else { |
d078eed3 | 884 | host_swabbed = !is_default_endian; |
73601775 | 885 | } |
ed840ee9 | 886 | |
bbf45ba5 HB |
887 | if (bytes > sizeof(run->mmio.data)) { |
888 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
889 | run->mmio.len); | |
890 | } | |
891 | ||
892 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
893 | run->mmio.len = bytes; | |
894 | run->mmio.is_write = 0; | |
895 | ||
896 | vcpu->arch.io_gpr = rt; | |
d078eed3 | 897 | vcpu->arch.mmio_host_swabbed = host_swabbed; |
bbf45ba5 HB |
898 | vcpu->mmio_needed = 1; |
899 | vcpu->mmio_is_write = 0; | |
eb8b0560 | 900 | vcpu->arch.mmio_sign_extend = sign_extend; |
bbf45ba5 | 901 | |
ed840ee9 SW |
902 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
903 | ||
e32edf4f | 904 | ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
905 | bytes, &run->mmio.data); |
906 | ||
907 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
908 | ||
909 | if (!ret) { | |
0e673fb6 AG |
910 | kvmppc_complete_mmio_load(vcpu, run); |
911 | vcpu->mmio_needed = 0; | |
912 | return EMULATE_DONE; | |
913 | } | |
914 | ||
bbf45ba5 HB |
915 | return EMULATE_DO_MMIO; |
916 | } | |
eb8b0560 PM |
917 | |
918 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
919 | unsigned int rt, unsigned int bytes, | |
920 | int is_default_endian) | |
921 | { | |
922 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 0); | |
923 | } | |
2ba9f0d8 | 924 | EXPORT_SYMBOL_GPL(kvmppc_handle_load); |
bbf45ba5 | 925 | |
3587d534 AG |
926 | /* Same as above, but sign extends */ |
927 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
73601775 CLG |
928 | unsigned int rt, unsigned int bytes, |
929 | int is_default_endian) | |
3587d534 | 930 | { |
eb8b0560 | 931 | return __kvmppc_handle_load(run, vcpu, rt, bytes, is_default_endian, 1); |
3587d534 AG |
932 | } |
933 | ||
bbf45ba5 | 934 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
73601775 | 935 | u64 val, unsigned int bytes, int is_default_endian) |
bbf45ba5 HB |
936 | { |
937 | void *data = run->mmio.data; | |
ed840ee9 | 938 | int idx, ret; |
d078eed3 | 939 | bool host_swabbed; |
73601775 | 940 | |
d078eed3 | 941 | /* Pity C doesn't have a logical XOR operator */ |
73601775 | 942 | if (kvmppc_need_byteswap(vcpu)) { |
d078eed3 | 943 | host_swabbed = is_default_endian; |
73601775 | 944 | } else { |
d078eed3 | 945 | host_swabbed = !is_default_endian; |
73601775 | 946 | } |
bbf45ba5 HB |
947 | |
948 | if (bytes > sizeof(run->mmio.data)) { | |
949 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
950 | run->mmio.len); | |
951 | } | |
952 | ||
953 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
954 | run->mmio.len = bytes; | |
955 | run->mmio.is_write = 1; | |
956 | vcpu->mmio_needed = 1; | |
957 | vcpu->mmio_is_write = 1; | |
958 | ||
959 | /* Store the value at the lowest bytes in 'data'. */ | |
d078eed3 | 960 | if (!host_swabbed) { |
bbf45ba5 | 961 | switch (bytes) { |
b104d066 | 962 | case 8: *(u64 *)data = val; break; |
bbf45ba5 HB |
963 | case 4: *(u32 *)data = val; break; |
964 | case 2: *(u16 *)data = val; break; | |
965 | case 1: *(u8 *)data = val; break; | |
966 | } | |
967 | } else { | |
bbf45ba5 | 968 | switch (bytes) { |
d078eed3 DG |
969 | case 8: *(u64 *)data = swab64(val); break; |
970 | case 4: *(u32 *)data = swab32(val); break; | |
971 | case 2: *(u16 *)data = swab16(val); break; | |
972 | case 1: *(u8 *)data = val; break; | |
bbf45ba5 HB |
973 | } |
974 | } | |
975 | ||
ed840ee9 SW |
976 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
977 | ||
e32edf4f | 978 | ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, run->mmio.phys_addr, |
ed840ee9 SW |
979 | bytes, &run->mmio.data); |
980 | ||
981 | srcu_read_unlock(&vcpu->kvm->srcu, idx); | |
982 | ||
983 | if (!ret) { | |
0e673fb6 AG |
984 | vcpu->mmio_needed = 0; |
985 | return EMULATE_DONE; | |
986 | } | |
987 | ||
bbf45ba5 HB |
988 | return EMULATE_DO_MMIO; |
989 | } | |
2ba9f0d8 | 990 | EXPORT_SYMBOL_GPL(kvmppc_handle_store); |
bbf45ba5 | 991 | |
8a41ea53 MC |
992 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) |
993 | { | |
994 | int r = 0; | |
995 | union kvmppc_one_reg val; | |
996 | int size; | |
997 | ||
998 | size = one_reg_size(reg->id); | |
999 | if (size > sizeof(val)) | |
1000 | return -EINVAL; | |
1001 | ||
1002 | r = kvmppc_get_one_reg(vcpu, reg->id, &val); | |
1003 | if (r == -EINVAL) { | |
1004 | r = 0; | |
1005 | switch (reg->id) { | |
3840edc8 MC |
1006 | #ifdef CONFIG_ALTIVEC |
1007 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1008 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1009 | r = -ENXIO; | |
1010 | break; | |
1011 | } | |
b4d7f161 | 1012 | val.vval = vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0]; |
3840edc8 MC |
1013 | break; |
1014 | case KVM_REG_PPC_VSCR: | |
1015 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1016 | r = -ENXIO; | |
1017 | break; | |
1018 | } | |
b4d7f161 | 1019 | val = get_reg_val(reg->id, vcpu->arch.vr.vscr.u[3]); |
3840edc8 MC |
1020 | break; |
1021 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 | 1022 | val = get_reg_val(reg->id, vcpu->arch.vrsave); |
3840edc8 MC |
1023 | break; |
1024 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
1025 | default: |
1026 | r = -EINVAL; | |
1027 | break; | |
1028 | } | |
1029 | } | |
1030 | ||
1031 | if (r) | |
1032 | return r; | |
1033 | ||
1034 | if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size)) | |
1035 | r = -EFAULT; | |
1036 | ||
1037 | return r; | |
1038 | } | |
1039 | ||
1040 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg) | |
1041 | { | |
1042 | int r; | |
1043 | union kvmppc_one_reg val; | |
1044 | int size; | |
1045 | ||
1046 | size = one_reg_size(reg->id); | |
1047 | if (size > sizeof(val)) | |
1048 | return -EINVAL; | |
1049 | ||
1050 | if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size)) | |
1051 | return -EFAULT; | |
1052 | ||
1053 | r = kvmppc_set_one_reg(vcpu, reg->id, &val); | |
1054 | if (r == -EINVAL) { | |
1055 | r = 0; | |
1056 | switch (reg->id) { | |
3840edc8 MC |
1057 | #ifdef CONFIG_ALTIVEC |
1058 | case KVM_REG_PPC_VR0 ... KVM_REG_PPC_VR31: | |
1059 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1060 | r = -ENXIO; | |
1061 | break; | |
1062 | } | |
b4d7f161 | 1063 | vcpu->arch.vr.vr[reg->id - KVM_REG_PPC_VR0] = val.vval; |
3840edc8 MC |
1064 | break; |
1065 | case KVM_REG_PPC_VSCR: | |
1066 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
1067 | r = -ENXIO; | |
1068 | break; | |
1069 | } | |
b4d7f161 | 1070 | vcpu->arch.vr.vscr.u[3] = set_reg_val(reg->id, val); |
3840edc8 MC |
1071 | break; |
1072 | case KVM_REG_PPC_VRSAVE: | |
b4d7f161 GK |
1073 | if (!cpu_has_feature(CPU_FTR_ALTIVEC)) { |
1074 | r = -ENXIO; | |
1075 | break; | |
1076 | } | |
1077 | vcpu->arch.vrsave = set_reg_val(reg->id, val); | |
3840edc8 MC |
1078 | break; |
1079 | #endif /* CONFIG_ALTIVEC */ | |
8a41ea53 MC |
1080 | default: |
1081 | r = -EINVAL; | |
1082 | break; | |
1083 | } | |
1084 | } | |
1085 | ||
1086 | return r; | |
1087 | } | |
1088 | ||
bbf45ba5 HB |
1089 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) |
1090 | { | |
1091 | int r; | |
1092 | sigset_t sigsaved; | |
1093 | ||
1094 | if (vcpu->sigset_active) | |
1095 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
1096 | ||
1097 | if (vcpu->mmio_needed) { | |
1098 | if (!vcpu->mmio_is_write) | |
1099 | kvmppc_complete_mmio_load(vcpu, run); | |
1100 | vcpu->mmio_needed = 0; | |
ad0a048b AG |
1101 | } else if (vcpu->arch.osi_needed) { |
1102 | u64 *gprs = run->osi.gprs; | |
1103 | int i; | |
1104 | ||
1105 | for (i = 0; i < 32; i++) | |
1106 | kvmppc_set_gpr(vcpu, i, gprs[i]); | |
1107 | vcpu->arch.osi_needed = 0; | |
de56a948 PM |
1108 | } else if (vcpu->arch.hcall_needed) { |
1109 | int i; | |
1110 | ||
1111 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | |
1112 | for (i = 0; i < 9; ++i) | |
1113 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | |
1114 | vcpu->arch.hcall_needed = 0; | |
1c810636 AG |
1115 | #ifdef CONFIG_BOOKE |
1116 | } else if (vcpu->arch.epr_needed) { | |
1117 | kvmppc_set_epr(vcpu, run->epr.epr); | |
1118 | vcpu->arch.epr_needed = 0; | |
1119 | #endif | |
bbf45ba5 HB |
1120 | } |
1121 | ||
460df4c1 PB |
1122 | if (run->immediate_exit) |
1123 | r = -EINTR; | |
1124 | else | |
1125 | r = kvmppc_vcpu_run(run, vcpu); | |
bbf45ba5 HB |
1126 | |
1127 | if (vcpu->sigset_active) | |
1128 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
1129 | ||
1130 | return r; | |
1131 | } | |
1132 | ||
1133 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
1134 | { | |
19ccb76a | 1135 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
4fe27d2a | 1136 | kvmppc_core_dequeue_external(vcpu); |
19ccb76a PM |
1137 | return 0; |
1138 | } | |
1139 | ||
1140 | kvmppc_core_queue_external(vcpu, irq); | |
b6d33834 | 1141 | |
dfd4d47e | 1142 | kvm_vcpu_kick(vcpu); |
45c5eb67 | 1143 | |
bbf45ba5 HB |
1144 | return 0; |
1145 | } | |
1146 | ||
71fbfd5f AG |
1147 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
1148 | struct kvm_enable_cap *cap) | |
1149 | { | |
1150 | int r; | |
1151 | ||
1152 | if (cap->flags) | |
1153 | return -EINVAL; | |
1154 | ||
1155 | switch (cap->cap) { | |
ad0a048b AG |
1156 | case KVM_CAP_PPC_OSI: |
1157 | r = 0; | |
1158 | vcpu->arch.osi_enabled = true; | |
1159 | break; | |
930b412a AG |
1160 | case KVM_CAP_PPC_PAPR: |
1161 | r = 0; | |
1162 | vcpu->arch.papr_enabled = true; | |
1163 | break; | |
1c810636 AG |
1164 | case KVM_CAP_PPC_EPR: |
1165 | r = 0; | |
5df554ad SW |
1166 | if (cap->args[0]) |
1167 | vcpu->arch.epr_flags |= KVMPPC_EPR_USER; | |
1168 | else | |
1169 | vcpu->arch.epr_flags &= ~KVMPPC_EPR_USER; | |
1c810636 | 1170 | break; |
f61c94bb BB |
1171 | #ifdef CONFIG_BOOKE |
1172 | case KVM_CAP_PPC_BOOKE_WATCHDOG: | |
1173 | r = 0; | |
1174 | vcpu->arch.watchdog_enabled = true; | |
1175 | break; | |
1176 | #endif | |
bf7ca4bd | 1177 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
1178 | case KVM_CAP_SW_TLB: { |
1179 | struct kvm_config_tlb cfg; | |
1180 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | |
1181 | ||
1182 | r = -EFAULT; | |
1183 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | |
1184 | break; | |
1185 | ||
1186 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | |
1187 | break; | |
eb1e4f43 SW |
1188 | } |
1189 | #endif | |
1190 | #ifdef CONFIG_KVM_MPIC | |
1191 | case KVM_CAP_IRQ_MPIC: { | |
70abaded | 1192 | struct fd f; |
eb1e4f43 SW |
1193 | struct kvm_device *dev; |
1194 | ||
1195 | r = -EBADF; | |
70abaded AV |
1196 | f = fdget(cap->args[0]); |
1197 | if (!f.file) | |
eb1e4f43 SW |
1198 | break; |
1199 | ||
1200 | r = -EPERM; | |
70abaded | 1201 | dev = kvm_device_from_filp(f.file); |
eb1e4f43 SW |
1202 | if (dev) |
1203 | r = kvmppc_mpic_connect_vcpu(dev, vcpu, cap->args[1]); | |
1204 | ||
70abaded | 1205 | fdput(f); |
eb1e4f43 | 1206 | break; |
dc83b8bc SW |
1207 | } |
1208 | #endif | |
5975a2e0 PM |
1209 | #ifdef CONFIG_KVM_XICS |
1210 | case KVM_CAP_IRQ_XICS: { | |
70abaded | 1211 | struct fd f; |
5975a2e0 PM |
1212 | struct kvm_device *dev; |
1213 | ||
1214 | r = -EBADF; | |
70abaded AV |
1215 | f = fdget(cap->args[0]); |
1216 | if (!f.file) | |
5975a2e0 PM |
1217 | break; |
1218 | ||
1219 | r = -EPERM; | |
70abaded | 1220 | dev = kvm_device_from_filp(f.file); |
5975a2e0 PM |
1221 | if (dev) |
1222 | r = kvmppc_xics_connect_vcpu(dev, vcpu, cap->args[1]); | |
1223 | ||
70abaded | 1224 | fdput(f); |
5975a2e0 PM |
1225 | break; |
1226 | } | |
1227 | #endif /* CONFIG_KVM_XICS */ | |
71fbfd5f AG |
1228 | default: |
1229 | r = -EINVAL; | |
1230 | break; | |
1231 | } | |
1232 | ||
af8f38b3 AG |
1233 | if (!r) |
1234 | r = kvmppc_sanity_check(vcpu); | |
1235 | ||
71fbfd5f AG |
1236 | return r; |
1237 | } | |
1238 | ||
34a75b0f PM |
1239 | bool kvm_arch_intc_initialized(struct kvm *kvm) |
1240 | { | |
1241 | #ifdef CONFIG_KVM_MPIC | |
1242 | if (kvm->arch.mpic) | |
1243 | return true; | |
1244 | #endif | |
1245 | #ifdef CONFIG_KVM_XICS | |
1246 | if (kvm->arch.xics) | |
1247 | return true; | |
1248 | #endif | |
1249 | return false; | |
1250 | } | |
1251 | ||
bbf45ba5 HB |
1252 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
1253 | struct kvm_mp_state *mp_state) | |
1254 | { | |
1255 | return -EINVAL; | |
1256 | } | |
1257 | ||
1258 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
1259 | struct kvm_mp_state *mp_state) | |
1260 | { | |
1261 | return -EINVAL; | |
1262 | } | |
1263 | ||
1264 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
1265 | unsigned int ioctl, unsigned long arg) | |
1266 | { | |
1267 | struct kvm_vcpu *vcpu = filp->private_data; | |
1268 | void __user *argp = (void __user *)arg; | |
1269 | long r; | |
1270 | ||
93736624 AK |
1271 | switch (ioctl) { |
1272 | case KVM_INTERRUPT: { | |
bbf45ba5 HB |
1273 | struct kvm_interrupt irq; |
1274 | r = -EFAULT; | |
1275 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
93736624 | 1276 | goto out; |
bbf45ba5 | 1277 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
93736624 | 1278 | goto out; |
bbf45ba5 | 1279 | } |
19483d14 | 1280 | |
71fbfd5f AG |
1281 | case KVM_ENABLE_CAP: |
1282 | { | |
1283 | struct kvm_enable_cap cap; | |
1284 | r = -EFAULT; | |
1285 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
1286 | goto out; | |
1287 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
1288 | break; | |
1289 | } | |
dc83b8bc | 1290 | |
e24ed81f AG |
1291 | case KVM_SET_ONE_REG: |
1292 | case KVM_GET_ONE_REG: | |
1293 | { | |
1294 | struct kvm_one_reg reg; | |
1295 | r = -EFAULT; | |
1296 | if (copy_from_user(®, argp, sizeof(reg))) | |
1297 | goto out; | |
1298 | if (ioctl == KVM_SET_ONE_REG) | |
1299 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | |
1300 | else | |
1301 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | |
1302 | break; | |
1303 | } | |
1304 | ||
bf7ca4bd | 1305 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
1306 | case KVM_DIRTY_TLB: { |
1307 | struct kvm_dirty_tlb dirty; | |
1308 | r = -EFAULT; | |
1309 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | |
1310 | goto out; | |
1311 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | |
1312 | break; | |
1313 | } | |
1314 | #endif | |
bbf45ba5 HB |
1315 | default: |
1316 | r = -EINVAL; | |
1317 | } | |
1318 | ||
1319 | out: | |
1320 | return r; | |
1321 | } | |
1322 | ||
5b1c1493 CO |
1323 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
1324 | { | |
1325 | return VM_FAULT_SIGBUS; | |
1326 | } | |
1327 | ||
15711e9c AG |
1328 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
1329 | { | |
784bafac SY |
1330 | u32 inst_nop = 0x60000000; |
1331 | #ifdef CONFIG_KVM_BOOKE_HV | |
1332 | u32 inst_sc1 = 0x44000022; | |
2743103f AG |
1333 | pvinfo->hcall[0] = cpu_to_be32(inst_sc1); |
1334 | pvinfo->hcall[1] = cpu_to_be32(inst_nop); | |
1335 | pvinfo->hcall[2] = cpu_to_be32(inst_nop); | |
1336 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 1337 | #else |
15711e9c AG |
1338 | u32 inst_lis = 0x3c000000; |
1339 | u32 inst_ori = 0x60000000; | |
15711e9c AG |
1340 | u32 inst_sc = 0x44000002; |
1341 | u32 inst_imm_mask = 0xffff; | |
1342 | ||
1343 | /* | |
1344 | * The hypercall to get into KVM from within guest context is as | |
1345 | * follows: | |
1346 | * | |
1347 | * lis r0, r0, KVM_SC_MAGIC_R0@h | |
1348 | * ori r0, KVM_SC_MAGIC_R0@l | |
1349 | * sc | |
1350 | * nop | |
1351 | */ | |
2743103f AG |
1352 | pvinfo->hcall[0] = cpu_to_be32(inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask)); |
1353 | pvinfo->hcall[1] = cpu_to_be32(inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask)); | |
1354 | pvinfo->hcall[2] = cpu_to_be32(inst_sc); | |
1355 | pvinfo->hcall[3] = cpu_to_be32(inst_nop); | |
784bafac | 1356 | #endif |
15711e9c | 1357 | |
9202e076 LYB |
1358 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
1359 | ||
15711e9c AG |
1360 | return 0; |
1361 | } | |
1362 | ||
5efdb4be AG |
1363 | int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_event, |
1364 | bool line_status) | |
1365 | { | |
1366 | if (!irqchip_in_kernel(kvm)) | |
1367 | return -ENXIO; | |
1368 | ||
1369 | irq_event->status = kvm_set_irq(kvm, KVM_USERSPACE_IRQ_SOURCE_ID, | |
1370 | irq_event->irq, irq_event->level, | |
1371 | line_status); | |
1372 | return 0; | |
1373 | } | |
1374 | ||
699a0ea0 PM |
1375 | |
1376 | static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, | |
1377 | struct kvm_enable_cap *cap) | |
1378 | { | |
1379 | int r; | |
1380 | ||
1381 | if (cap->flags) | |
1382 | return -EINVAL; | |
1383 | ||
1384 | switch (cap->cap) { | |
1385 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER | |
1386 | case KVM_CAP_PPC_ENABLE_HCALL: { | |
1387 | unsigned long hcall = cap->args[0]; | |
1388 | ||
1389 | r = -EINVAL; | |
1390 | if (hcall > MAX_HCALL_OPCODE || (hcall & 3) || | |
1391 | cap->args[1] > 1) | |
1392 | break; | |
ae2113a4 PM |
1393 | if (!kvmppc_book3s_hcall_implemented(kvm, hcall)) |
1394 | break; | |
699a0ea0 PM |
1395 | if (cap->args[1]) |
1396 | set_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
1397 | else | |
1398 | clear_bit(hcall / 4, kvm->arch.enabled_hcalls); | |
1399 | r = 0; | |
1400 | break; | |
1401 | } | |
1402 | #endif | |
1403 | default: | |
1404 | r = -EINVAL; | |
1405 | break; | |
1406 | } | |
1407 | ||
1408 | return r; | |
1409 | } | |
1410 | ||
bbf45ba5 HB |
1411 | long kvm_arch_vm_ioctl(struct file *filp, |
1412 | unsigned int ioctl, unsigned long arg) | |
1413 | { | |
5df554ad | 1414 | struct kvm *kvm __maybe_unused = filp->private_data; |
15711e9c | 1415 | void __user *argp = (void __user *)arg; |
bbf45ba5 HB |
1416 | long r; |
1417 | ||
1418 | switch (ioctl) { | |
15711e9c AG |
1419 | case KVM_PPC_GET_PVINFO: { |
1420 | struct kvm_ppc_pvinfo pvinfo; | |
d8cdddcd | 1421 | memset(&pvinfo, 0, sizeof(pvinfo)); |
15711e9c AG |
1422 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
1423 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | |
1424 | r = -EFAULT; | |
1425 | goto out; | |
1426 | } | |
1427 | ||
1428 | break; | |
1429 | } | |
699a0ea0 PM |
1430 | case KVM_ENABLE_CAP: |
1431 | { | |
1432 | struct kvm_enable_cap cap; | |
1433 | r = -EFAULT; | |
1434 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
1435 | goto out; | |
1436 | r = kvm_vm_ioctl_enable_cap(kvm, &cap); | |
1437 | break; | |
1438 | } | |
f31e65e1 | 1439 | #ifdef CONFIG_PPC_BOOK3S_64 |
58ded420 AK |
1440 | case KVM_CREATE_SPAPR_TCE_64: { |
1441 | struct kvm_create_spapr_tce_64 create_tce_64; | |
1442 | ||
1443 | r = -EFAULT; | |
1444 | if (copy_from_user(&create_tce_64, argp, sizeof(create_tce_64))) | |
1445 | goto out; | |
1446 | if (create_tce_64.flags) { | |
1447 | r = -EINVAL; | |
1448 | goto out; | |
1449 | } | |
1450 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
1451 | goto out; | |
1452 | } | |
54738c09 DG |
1453 | case KVM_CREATE_SPAPR_TCE: { |
1454 | struct kvm_create_spapr_tce create_tce; | |
58ded420 | 1455 | struct kvm_create_spapr_tce_64 create_tce_64; |
54738c09 DG |
1456 | |
1457 | r = -EFAULT; | |
1458 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | |
1459 | goto out; | |
58ded420 AK |
1460 | |
1461 | create_tce_64.liobn = create_tce.liobn; | |
1462 | create_tce_64.page_shift = IOMMU_PAGE_SHIFT_4K; | |
1463 | create_tce_64.offset = 0; | |
1464 | create_tce_64.size = create_tce.window_size >> | |
1465 | IOMMU_PAGE_SHIFT_4K; | |
1466 | create_tce_64.flags = 0; | |
1467 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce_64); | |
54738c09 DG |
1468 | goto out; |
1469 | } | |
5b74716e | 1470 | case KVM_PPC_GET_SMMU_INFO: { |
5b74716e | 1471 | struct kvm_ppc_smmu_info info; |
cbbc58d4 | 1472 | struct kvm *kvm = filp->private_data; |
5b74716e BH |
1473 | |
1474 | memset(&info, 0, sizeof(info)); | |
cbbc58d4 | 1475 | r = kvm->arch.kvm_ops->get_smmu_info(kvm, &info); |
5b74716e BH |
1476 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) |
1477 | r = -EFAULT; | |
1478 | break; | |
1479 | } | |
8e591cb7 ME |
1480 | case KVM_PPC_RTAS_DEFINE_TOKEN: { |
1481 | struct kvm *kvm = filp->private_data; | |
1482 | ||
1483 | r = kvm_vm_ioctl_rtas_define_token(kvm, argp); | |
1484 | break; | |
1485 | } | |
c9270132 PM |
1486 | case KVM_PPC_CONFIGURE_V3_MMU: { |
1487 | struct kvm *kvm = filp->private_data; | |
1488 | struct kvm_ppc_mmuv3_cfg cfg; | |
1489 | ||
1490 | r = -EINVAL; | |
1491 | if (!kvm->arch.kvm_ops->configure_mmu) | |
1492 | goto out; | |
1493 | r = -EFAULT; | |
1494 | if (copy_from_user(&cfg, argp, sizeof(cfg))) | |
1495 | goto out; | |
1496 | r = kvm->arch.kvm_ops->configure_mmu(kvm, &cfg); | |
1497 | break; | |
1498 | } | |
1499 | case KVM_PPC_GET_RMMU_INFO: { | |
1500 | struct kvm *kvm = filp->private_data; | |
1501 | struct kvm_ppc_rmmu_info info; | |
1502 | ||
1503 | r = -EINVAL; | |
1504 | if (!kvm->arch.kvm_ops->get_rmmu_info) | |
1505 | goto out; | |
1506 | r = kvm->arch.kvm_ops->get_rmmu_info(kvm, &info); | |
1507 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | |
1508 | r = -EFAULT; | |
1509 | break; | |
1510 | } | |
cbbc58d4 AK |
1511 | default: { |
1512 | struct kvm *kvm = filp->private_data; | |
1513 | r = kvm->arch.kvm_ops->arch_vm_ioctl(filp, ioctl, arg); | |
1514 | } | |
3a167bea | 1515 | #else /* CONFIG_PPC_BOOK3S_64 */ |
bbf45ba5 | 1516 | default: |
367e1319 | 1517 | r = -ENOTTY; |
3a167bea | 1518 | #endif |
bbf45ba5 | 1519 | } |
15711e9c | 1520 | out: |
bbf45ba5 HB |
1521 | return r; |
1522 | } | |
1523 | ||
043cc4d7 SW |
1524 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
1525 | static unsigned long nr_lpids; | |
1526 | ||
1527 | long kvmppc_alloc_lpid(void) | |
1528 | { | |
1529 | long lpid; | |
1530 | ||
1531 | do { | |
1532 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | |
1533 | if (lpid >= nr_lpids) { | |
1534 | pr_err("%s: No LPIDs free\n", __func__); | |
1535 | return -ENOMEM; | |
1536 | } | |
1537 | } while (test_and_set_bit(lpid, lpid_inuse)); | |
1538 | ||
1539 | return lpid; | |
1540 | } | |
2ba9f0d8 | 1541 | EXPORT_SYMBOL_GPL(kvmppc_alloc_lpid); |
043cc4d7 SW |
1542 | |
1543 | void kvmppc_claim_lpid(long lpid) | |
1544 | { | |
1545 | set_bit(lpid, lpid_inuse); | |
1546 | } | |
2ba9f0d8 | 1547 | EXPORT_SYMBOL_GPL(kvmppc_claim_lpid); |
043cc4d7 SW |
1548 | |
1549 | void kvmppc_free_lpid(long lpid) | |
1550 | { | |
1551 | clear_bit(lpid, lpid_inuse); | |
1552 | } | |
2ba9f0d8 | 1553 | EXPORT_SYMBOL_GPL(kvmppc_free_lpid); |
043cc4d7 SW |
1554 | |
1555 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | |
1556 | { | |
1557 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | |
1558 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | |
1559 | } | |
2ba9f0d8 | 1560 | EXPORT_SYMBOL_GPL(kvmppc_init_lpid); |
043cc4d7 | 1561 | |
bbf45ba5 HB |
1562 | int kvm_arch_init(void *opaque) |
1563 | { | |
1564 | return 0; | |
1565 | } | |
1566 | ||
478d6686 | 1567 | EXPORT_TRACEPOINT_SYMBOL_GPL(kvm_ppc_instr); |