]>
Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/errno.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/kvm_host.h> | |
bbf45ba5 | 24 | #include <linux/vmalloc.h> |
544c6761 | 25 | #include <linux/hrtimer.h> |
bbf45ba5 | 26 | #include <linux/fs.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
bbf45ba5 HB |
28 | #include <asm/cputable.h> |
29 | #include <asm/uaccess.h> | |
30 | #include <asm/kvm_ppc.h> | |
83aae4a8 | 31 | #include <asm/tlbflush.h> |
371fefd6 | 32 | #include <asm/cputhreads.h> |
bd2be683 | 33 | #include <asm/irqflags.h> |
73e75b41 | 34 | #include "timing.h" |
fad7b9b5 | 35 | #include "../mm/mmu_decl.h" |
bbf45ba5 | 36 | |
46f43c6e MT |
37 | #define CREATE_TRACE_POINTS |
38 | #include "trace.h" | |
39 | ||
bbf45ba5 HB |
40 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
41 | { | |
9202e076 | 42 | return !!(v->arch.pending_exceptions) || |
dfd4d47e | 43 | v->requests; |
bbf45ba5 HB |
44 | } |
45 | ||
b6d33834 CD |
46 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
47 | { | |
48 | return 1; | |
49 | } | |
50 | ||
03d25c5b AG |
51 | #ifndef CONFIG_KVM_BOOK3S_64_HV |
52 | /* | |
53 | * Common checks before entering the guest world. Call with interrupts | |
54 | * disabled. | |
55 | * | |
56 | * returns !0 if a signal is pending and check_signal is true | |
57 | */ | |
58 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |
59 | { | |
60 | int r = 0; | |
61 | ||
62 | WARN_ON_ONCE(!irqs_disabled()); | |
63 | while (true) { | |
64 | if (need_resched()) { | |
65 | local_irq_enable(); | |
66 | cond_resched(); | |
67 | local_irq_disable(); | |
68 | continue; | |
69 | } | |
70 | ||
71 | if (signal_pending(current)) { | |
72 | r = 1; | |
73 | break; | |
74 | } | |
75 | ||
76 | smp_mb(); | |
77 | if (vcpu->requests) { | |
78 | /* Make sure we process requests preemptable */ | |
79 | local_irq_enable(); | |
80 | trace_kvm_check_requests(vcpu); | |
81 | kvmppc_core_check_requests(vcpu); | |
82 | local_irq_disable(); | |
83 | continue; | |
84 | } | |
85 | ||
86 | if (kvmppc_core_prepare_to_enter(vcpu)) { | |
87 | /* interrupts got enabled in between, so we | |
88 | are back at square 1 */ | |
89 | continue; | |
90 | } | |
91 | ||
bd2be683 AG |
92 | #ifdef CONFIG_PPC64 |
93 | /* lazy EE magic */ | |
94 | hard_irq_disable(); | |
95 | if (lazy_irq_pending()) { | |
96 | /* Got an interrupt in between, try again */ | |
97 | local_irq_enable(); | |
98 | local_irq_disable(); | |
3766a4c6 | 99 | kvm_guest_exit(); |
bd2be683 AG |
100 | continue; |
101 | } | |
102 | ||
103 | trace_hardirqs_on(); | |
104 | #endif | |
105 | ||
3766a4c6 AG |
106 | kvm_guest_enter(); |
107 | ||
03d25c5b AG |
108 | /* Going into guest context! Yay! */ |
109 | vcpu->mode = IN_GUEST_MODE; | |
110 | smp_wmb(); | |
111 | ||
112 | break; | |
113 | } | |
114 | ||
115 | return r; | |
116 | } | |
117 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | |
118 | ||
2a342ed5 AG |
119 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
120 | { | |
121 | int nr = kvmppc_get_gpr(vcpu, 11); | |
122 | int r; | |
123 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | |
124 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | |
125 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | |
126 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | |
127 | unsigned long r2 = 0; | |
128 | ||
129 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | |
130 | /* 32 bit mode */ | |
131 | param1 &= 0xffffffff; | |
132 | param2 &= 0xffffffff; | |
133 | param3 &= 0xffffffff; | |
134 | param4 &= 0xffffffff; | |
135 | } | |
136 | ||
137 | switch (nr) { | |
fdcf8bd7 | 138 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
5fc87407 AG |
139 | { |
140 | vcpu->arch.magic_page_pa = param1; | |
141 | vcpu->arch.magic_page_ea = param2; | |
142 | ||
b5904972 | 143 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
7508e16c | 144 | |
fdcf8bd7 | 145 | r = EV_SUCCESS; |
5fc87407 AG |
146 | break; |
147 | } | |
fdcf8bd7 SY |
148 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
149 | r = EV_SUCCESS; | |
bf7ca4bd | 150 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
a4cd8b23 | 151 | /* XXX Missing magic page on 44x */ |
5fc87407 AG |
152 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
153 | #endif | |
2a342ed5 AG |
154 | |
155 | /* Second return value is in r4 */ | |
2a342ed5 | 156 | break; |
9202e076 LYB |
157 | case EV_HCALL_TOKEN(EV_IDLE): |
158 | r = EV_SUCCESS; | |
159 | kvm_vcpu_block(vcpu); | |
160 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | |
161 | break; | |
2a342ed5 | 162 | default: |
fdcf8bd7 | 163 | r = EV_UNIMPLEMENTED; |
2a342ed5 AG |
164 | break; |
165 | } | |
166 | ||
7508e16c AG |
167 | kvmppc_set_gpr(vcpu, 4, r2); |
168 | ||
2a342ed5 AG |
169 | return r; |
170 | } | |
bbf45ba5 | 171 | |
af8f38b3 AG |
172 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
173 | { | |
174 | int r = false; | |
175 | ||
176 | /* We have to know what CPU to virtualize */ | |
177 | if (!vcpu->arch.pvr) | |
178 | goto out; | |
179 | ||
180 | /* PAPR only works with book3s_64 */ | |
181 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | |
182 | goto out; | |
183 | ||
184 | #ifdef CONFIG_KVM_BOOK3S_64_HV | |
185 | /* HV KVM can only do PAPR mode for now */ | |
186 | if (!vcpu->arch.papr_enabled) | |
187 | goto out; | |
188 | #endif | |
189 | ||
d30f6e48 SW |
190 | #ifdef CONFIG_KVM_BOOKE_HV |
191 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | |
192 | goto out; | |
193 | #endif | |
194 | ||
af8f38b3 AG |
195 | r = true; |
196 | ||
197 | out: | |
198 | vcpu->arch.sane = r; | |
199 | return r ? 0 : -EINVAL; | |
200 | } | |
201 | ||
bbf45ba5 HB |
202 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
203 | { | |
204 | enum emulation_result er; | |
205 | int r; | |
206 | ||
207 | er = kvmppc_emulate_instruction(run, vcpu); | |
208 | switch (er) { | |
209 | case EMULATE_DONE: | |
210 | /* Future optimization: only reload non-volatiles if they were | |
211 | * actually modified. */ | |
212 | r = RESUME_GUEST_NV; | |
213 | break; | |
214 | case EMULATE_DO_MMIO: | |
215 | run->exit_reason = KVM_EXIT_MMIO; | |
216 | /* We must reload nonvolatiles because "update" load/store | |
217 | * instructions modify register state. */ | |
218 | /* Future optimization: only reload non-volatiles if they were | |
219 | * actually modified. */ | |
220 | r = RESUME_HOST_NV; | |
221 | break; | |
222 | case EMULATE_FAIL: | |
223 | /* XXX Deliver Program interrupt to guest. */ | |
224 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | |
c7f38f46 | 225 | kvmppc_get_last_inst(vcpu)); |
bbf45ba5 HB |
226 | r = RESUME_HOST; |
227 | break; | |
228 | default: | |
229 | BUG(); | |
230 | } | |
231 | ||
232 | return r; | |
233 | } | |
234 | ||
10474ae8 | 235 | int kvm_arch_hardware_enable(void *garbage) |
bbf45ba5 | 236 | { |
10474ae8 | 237 | return 0; |
bbf45ba5 HB |
238 | } |
239 | ||
240 | void kvm_arch_hardware_disable(void *garbage) | |
241 | { | |
242 | } | |
243 | ||
244 | int kvm_arch_hardware_setup(void) | |
245 | { | |
246 | return 0; | |
247 | } | |
248 | ||
249 | void kvm_arch_hardware_unsetup(void) | |
250 | { | |
251 | } | |
252 | ||
253 | void kvm_arch_check_processor_compat(void *rtn) | |
254 | { | |
9dd921cf | 255 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
bbf45ba5 HB |
256 | } |
257 | ||
e08b9637 | 258 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
bbf45ba5 | 259 | { |
e08b9637 CO |
260 | if (type) |
261 | return -EINVAL; | |
262 | ||
f9e0554d | 263 | return kvmppc_core_init_vm(kvm); |
bbf45ba5 HB |
264 | } |
265 | ||
d89f5eff | 266 | void kvm_arch_destroy_vm(struct kvm *kvm) |
bbf45ba5 HB |
267 | { |
268 | unsigned int i; | |
988a2cae | 269 | struct kvm_vcpu *vcpu; |
bbf45ba5 | 270 | |
988a2cae GN |
271 | kvm_for_each_vcpu(i, vcpu, kvm) |
272 | kvm_arch_vcpu_free(vcpu); | |
273 | ||
274 | mutex_lock(&kvm->lock); | |
275 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
276 | kvm->vcpus[i] = NULL; | |
277 | ||
278 | atomic_set(&kvm->online_vcpus, 0); | |
f9e0554d PM |
279 | |
280 | kvmppc_core_destroy_vm(kvm); | |
281 | ||
988a2cae | 282 | mutex_unlock(&kvm->lock); |
bbf45ba5 HB |
283 | } |
284 | ||
ad8ba2cd SY |
285 | void kvm_arch_sync_events(struct kvm *kvm) |
286 | { | |
287 | } | |
288 | ||
bbf45ba5 HB |
289 | int kvm_dev_ioctl_check_extension(long ext) |
290 | { | |
291 | int r; | |
292 | ||
293 | switch (ext) { | |
5ce941ee SW |
294 | #ifdef CONFIG_BOOKE |
295 | case KVM_CAP_PPC_BOOKE_SREGS: | |
296 | #else | |
e15a1137 | 297 | case KVM_CAP_PPC_SEGSTATE: |
1022fc3d | 298 | case KVM_CAP_PPC_HIOR: |
930b412a | 299 | case KVM_CAP_PPC_PAPR: |
5ce941ee | 300 | #endif |
18978768 | 301 | case KVM_CAP_PPC_UNSET_IRQ: |
7b4203e8 | 302 | case KVM_CAP_PPC_IRQ_LEVEL: |
71fbfd5f | 303 | case KVM_CAP_ENABLE_CAP: |
e24ed81f | 304 | case KVM_CAP_ONE_REG: |
de56a948 PM |
305 | r = 1; |
306 | break; | |
307 | #ifndef CONFIG_KVM_BOOK3S_64_HV | |
308 | case KVM_CAP_PPC_PAIRED_SINGLES: | |
ad0a048b | 309 | case KVM_CAP_PPC_OSI: |
15711e9c | 310 | case KVM_CAP_PPC_GET_PVINFO: |
bf7ca4bd | 311 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
312 | case KVM_CAP_SW_TLB: |
313 | #endif | |
e15a1137 AG |
314 | r = 1; |
315 | break; | |
588968b6 LV |
316 | case KVM_CAP_COALESCED_MMIO: |
317 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
318 | break; | |
54738c09 | 319 | #endif |
f31e65e1 | 320 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 | 321 | case KVM_CAP_SPAPR_TCE: |
32fad281 | 322 | case KVM_CAP_PPC_ALLOC_HTAB: |
54738c09 DG |
323 | r = 1; |
324 | break; | |
f31e65e1 BH |
325 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
326 | #ifdef CONFIG_KVM_BOOK3S_64_HV | |
371fefd6 PM |
327 | case KVM_CAP_PPC_SMT: |
328 | r = threads_per_core; | |
329 | break; | |
aa04b4cc PM |
330 | case KVM_CAP_PPC_RMA: |
331 | r = 1; | |
9e368f29 PM |
332 | /* PPC970 requires an RMA */ |
333 | if (cpu_has_feature(CPU_FTR_ARCH_201)) | |
334 | r = 2; | |
aa04b4cc | 335 | break; |
f4800b1f | 336 | #endif |
342d3db7 | 337 | case KVM_CAP_SYNC_MMU: |
f4800b1f | 338 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
342d3db7 | 339 | r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; |
f4800b1f AG |
340 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
341 | r = 1; | |
342 | #else | |
343 | r = 0; | |
de56a948 | 344 | #endif |
f4800b1f | 345 | break; |
b5434032 ME |
346 | case KVM_CAP_NR_VCPUS: |
347 | /* | |
348 | * Recommending a number of CPUs is somewhat arbitrary; we | |
349 | * return the number of present CPUs for -HV (since a host | |
350 | * will have secondary threads "offline"), and for other KVM | |
351 | * implementations just count online CPUs. | |
352 | */ | |
353 | #ifdef CONFIG_KVM_BOOK3S_64_HV | |
354 | r = num_present_cpus(); | |
355 | #else | |
356 | r = num_online_cpus(); | |
357 | #endif | |
358 | break; | |
359 | case KVM_CAP_MAX_VCPUS: | |
360 | r = KVM_MAX_VCPUS; | |
361 | break; | |
5b74716e BH |
362 | #ifdef CONFIG_PPC_BOOK3S_64 |
363 | case KVM_CAP_PPC_GET_SMMU_INFO: | |
364 | r = 1; | |
365 | break; | |
366 | #endif | |
bbf45ba5 HB |
367 | default: |
368 | r = 0; | |
369 | break; | |
370 | } | |
371 | return r; | |
372 | ||
373 | } | |
374 | ||
375 | long kvm_arch_dev_ioctl(struct file *filp, | |
376 | unsigned int ioctl, unsigned long arg) | |
377 | { | |
378 | return -EINVAL; | |
379 | } | |
380 | ||
db3fe4eb TY |
381 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
382 | struct kvm_memory_slot *dont) | |
383 | { | |
d89cc617 TY |
384 | if (!dont || free->arch.rmap != dont->arch.rmap) { |
385 | vfree(free->arch.rmap); | |
386 | free->arch.rmap = NULL; | |
387 | } | |
db3fe4eb TY |
388 | } |
389 | ||
390 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) | |
391 | { | |
d89cc617 TY |
392 | slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); |
393 | if (!slot->arch.rmap) | |
394 | return -ENOMEM; | |
395 | ||
db3fe4eb TY |
396 | return 0; |
397 | } | |
398 | ||
f7784b8e MT |
399 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
400 | struct kvm_memory_slot *memslot, | |
401 | struct kvm_memory_slot old, | |
402 | struct kvm_userspace_memory_region *mem, | |
403 | int user_alloc) | |
bbf45ba5 | 404 | { |
f9e0554d | 405 | return kvmppc_core_prepare_memory_region(kvm, mem); |
bbf45ba5 HB |
406 | } |
407 | ||
f7784b8e MT |
408 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
409 | struct kvm_userspace_memory_region *mem, | |
410 | struct kvm_memory_slot old, | |
411 | int user_alloc) | |
412 | { | |
f9e0554d | 413 | kvmppc_core_commit_memory_region(kvm, mem); |
f7784b8e MT |
414 | } |
415 | ||
2df72e9b MT |
416 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
417 | { | |
418 | } | |
f7784b8e | 419 | |
2df72e9b MT |
420 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
421 | struct kvm_memory_slot *slot) | |
34d4cb8f MT |
422 | { |
423 | } | |
424 | ||
bbf45ba5 HB |
425 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
426 | { | |
73e75b41 HB |
427 | struct kvm_vcpu *vcpu; |
428 | vcpu = kvmppc_core_vcpu_create(kvm, id); | |
03cdab53 ME |
429 | if (!IS_ERR(vcpu)) { |
430 | vcpu->arch.wqp = &vcpu->wq; | |
06056bfb | 431 | kvmppc_create_vcpu_debugfs(vcpu, id); |
03cdab53 | 432 | } |
73e75b41 | 433 | return vcpu; |
bbf45ba5 HB |
434 | } |
435 | ||
436 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |
437 | { | |
a595405d AG |
438 | /* Make sure we're not using the vcpu anymore */ |
439 | hrtimer_cancel(&vcpu->arch.dec_timer); | |
440 | tasklet_kill(&vcpu->arch.tasklet); | |
441 | ||
73e75b41 | 442 | kvmppc_remove_vcpu_debugfs(vcpu); |
db93f574 | 443 | kvmppc_core_vcpu_free(vcpu); |
bbf45ba5 HB |
444 | } |
445 | ||
446 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
447 | { | |
448 | kvm_arch_vcpu_free(vcpu); | |
449 | } | |
450 | ||
451 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
452 | { | |
9dd921cf | 453 | return kvmppc_core_pending_dec(vcpu); |
bbf45ba5 HB |
454 | } |
455 | ||
544c6761 AG |
456 | /* |
457 | * low level hrtimer wake routine. Because this runs in hardirq context | |
458 | * we schedule a tasklet to do the real work. | |
459 | */ | |
460 | enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) | |
461 | { | |
462 | struct kvm_vcpu *vcpu; | |
463 | ||
464 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | |
465 | tasklet_schedule(&vcpu->arch.tasklet); | |
466 | ||
467 | return HRTIMER_NORESTART; | |
468 | } | |
469 | ||
bbf45ba5 HB |
470 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
471 | { | |
544c6761 AG |
472 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
473 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); | |
474 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | |
de56a948 | 475 | vcpu->arch.dec_expires = ~(u64)0; |
bbf45ba5 | 476 | |
09000adb BB |
477 | #ifdef CONFIG_KVM_EXIT_TIMING |
478 | mutex_init(&vcpu->arch.exit_timing_lock); | |
479 | #endif | |
480 | ||
bbf45ba5 HB |
481 | return 0; |
482 | } | |
483 | ||
484 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
485 | { | |
ecc0981f | 486 | kvmppc_mmu_destroy(vcpu); |
bbf45ba5 HB |
487 | } |
488 | ||
489 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
490 | { | |
eab17672 SW |
491 | #ifdef CONFIG_BOOKE |
492 | /* | |
493 | * vrsave (formerly usprg0) isn't used by Linux, but may | |
494 | * be used by the guest. | |
495 | * | |
496 | * On non-booke this is associated with Altivec and | |
497 | * is handled by code in book3s.c. | |
498 | */ | |
499 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | |
500 | #endif | |
9dd921cf | 501 | kvmppc_core_vcpu_load(vcpu, cpu); |
de56a948 | 502 | vcpu->cpu = smp_processor_id(); |
bbf45ba5 HB |
503 | } |
504 | ||
505 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
506 | { | |
9dd921cf | 507 | kvmppc_core_vcpu_put(vcpu); |
eab17672 SW |
508 | #ifdef CONFIG_BOOKE |
509 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | |
510 | #endif | |
de56a948 | 511 | vcpu->cpu = -1; |
bbf45ba5 HB |
512 | } |
513 | ||
d0bfb940 | 514 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
f5d0906b | 515 | struct kvm_guest_debug *dbg) |
bbf45ba5 | 516 | { |
f5d0906b | 517 | return -EINVAL; |
bbf45ba5 HB |
518 | } |
519 | ||
520 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | |
521 | struct kvm_run *run) | |
522 | { | |
8e5b26b5 | 523 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); |
bbf45ba5 HB |
524 | } |
525 | ||
526 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |
527 | struct kvm_run *run) | |
528 | { | |
69b61833 | 529 | u64 uninitialized_var(gpr); |
bbf45ba5 | 530 | |
8e5b26b5 | 531 | if (run->mmio.len > sizeof(gpr)) { |
bbf45ba5 HB |
532 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
533 | return; | |
534 | } | |
535 | ||
536 | if (vcpu->arch.mmio_is_bigendian) { | |
537 | switch (run->mmio.len) { | |
b104d066 | 538 | case 8: gpr = *(u64 *)run->mmio.data; break; |
8e5b26b5 AG |
539 | case 4: gpr = *(u32 *)run->mmio.data; break; |
540 | case 2: gpr = *(u16 *)run->mmio.data; break; | |
541 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
542 | } |
543 | } else { | |
544 | /* Convert BE data from userland back to LE. */ | |
545 | switch (run->mmio.len) { | |
8e5b26b5 AG |
546 | case 4: gpr = ld_le32((u32 *)run->mmio.data); break; |
547 | case 2: gpr = ld_le16((u16 *)run->mmio.data); break; | |
548 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
549 | } |
550 | } | |
8e5b26b5 | 551 | |
3587d534 AG |
552 | if (vcpu->arch.mmio_sign_extend) { |
553 | switch (run->mmio.len) { | |
554 | #ifdef CONFIG_PPC64 | |
555 | case 4: | |
556 | gpr = (s64)(s32)gpr; | |
557 | break; | |
558 | #endif | |
559 | case 2: | |
560 | gpr = (s64)(s16)gpr; | |
561 | break; | |
562 | case 1: | |
563 | gpr = (s64)(s8)gpr; | |
564 | break; | |
565 | } | |
566 | } | |
567 | ||
8e5b26b5 | 568 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
b104d066 | 569 | |
b3c5d3c2 AG |
570 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
571 | case KVM_MMIO_REG_GPR: | |
b104d066 AG |
572 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
573 | break; | |
b3c5d3c2 AG |
574 | case KVM_MMIO_REG_FPR: |
575 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 576 | break; |
287d5611 | 577 | #ifdef CONFIG_PPC_BOOK3S |
b3c5d3c2 AG |
578 | case KVM_MMIO_REG_QPR: |
579 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 580 | break; |
b3c5d3c2 AG |
581 | case KVM_MMIO_REG_FQPR: |
582 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
583 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 584 | break; |
287d5611 | 585 | #endif |
b104d066 AG |
586 | default: |
587 | BUG(); | |
588 | } | |
bbf45ba5 HB |
589 | } |
590 | ||
591 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
592 | unsigned int rt, unsigned int bytes, int is_bigendian) | |
593 | { | |
594 | if (bytes > sizeof(run->mmio.data)) { | |
595 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
596 | run->mmio.len); | |
597 | } | |
598 | ||
599 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
600 | run->mmio.len = bytes; | |
601 | run->mmio.is_write = 0; | |
602 | ||
603 | vcpu->arch.io_gpr = rt; | |
604 | vcpu->arch.mmio_is_bigendian = is_bigendian; | |
605 | vcpu->mmio_needed = 1; | |
606 | vcpu->mmio_is_write = 0; | |
3587d534 | 607 | vcpu->arch.mmio_sign_extend = 0; |
bbf45ba5 HB |
608 | |
609 | return EMULATE_DO_MMIO; | |
610 | } | |
611 | ||
3587d534 AG |
612 | /* Same as above, but sign extends */ |
613 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
614 | unsigned int rt, unsigned int bytes, int is_bigendian) | |
615 | { | |
616 | int r; | |
617 | ||
618 | r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); | |
619 | vcpu->arch.mmio_sign_extend = 1; | |
620 | ||
621 | return r; | |
622 | } | |
623 | ||
bbf45ba5 | 624 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
b104d066 | 625 | u64 val, unsigned int bytes, int is_bigendian) |
bbf45ba5 HB |
626 | { |
627 | void *data = run->mmio.data; | |
628 | ||
629 | if (bytes > sizeof(run->mmio.data)) { | |
630 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
631 | run->mmio.len); | |
632 | } | |
633 | ||
634 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
635 | run->mmio.len = bytes; | |
636 | run->mmio.is_write = 1; | |
637 | vcpu->mmio_needed = 1; | |
638 | vcpu->mmio_is_write = 1; | |
639 | ||
640 | /* Store the value at the lowest bytes in 'data'. */ | |
641 | if (is_bigendian) { | |
642 | switch (bytes) { | |
b104d066 | 643 | case 8: *(u64 *)data = val; break; |
bbf45ba5 HB |
644 | case 4: *(u32 *)data = val; break; |
645 | case 2: *(u16 *)data = val; break; | |
646 | case 1: *(u8 *)data = val; break; | |
647 | } | |
648 | } else { | |
649 | /* Store LE value into 'data'. */ | |
650 | switch (bytes) { | |
651 | case 4: st_le32(data, val); break; | |
652 | case 2: st_le16(data, val); break; | |
653 | case 1: *(u8 *)data = val; break; | |
654 | } | |
655 | } | |
656 | ||
657 | return EMULATE_DO_MMIO; | |
658 | } | |
659 | ||
660 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
661 | { | |
662 | int r; | |
663 | sigset_t sigsaved; | |
664 | ||
665 | if (vcpu->sigset_active) | |
666 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
667 | ||
668 | if (vcpu->mmio_needed) { | |
669 | if (!vcpu->mmio_is_write) | |
670 | kvmppc_complete_mmio_load(vcpu, run); | |
671 | vcpu->mmio_needed = 0; | |
672 | } else if (vcpu->arch.dcr_needed) { | |
673 | if (!vcpu->arch.dcr_is_write) | |
674 | kvmppc_complete_dcr_load(vcpu, run); | |
675 | vcpu->arch.dcr_needed = 0; | |
ad0a048b AG |
676 | } else if (vcpu->arch.osi_needed) { |
677 | u64 *gprs = run->osi.gprs; | |
678 | int i; | |
679 | ||
680 | for (i = 0; i < 32; i++) | |
681 | kvmppc_set_gpr(vcpu, i, gprs[i]); | |
682 | vcpu->arch.osi_needed = 0; | |
de56a948 PM |
683 | } else if (vcpu->arch.hcall_needed) { |
684 | int i; | |
685 | ||
686 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | |
687 | for (i = 0; i < 9; ++i) | |
688 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | |
689 | vcpu->arch.hcall_needed = 0; | |
bbf45ba5 HB |
690 | } |
691 | ||
df6909e5 | 692 | r = kvmppc_vcpu_run(run, vcpu); |
bbf45ba5 HB |
693 | |
694 | if (vcpu->sigset_active) | |
695 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
696 | ||
697 | return r; | |
698 | } | |
699 | ||
700 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
701 | { | |
19ccb76a | 702 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
18978768 | 703 | kvmppc_core_dequeue_external(vcpu, irq); |
19ccb76a PM |
704 | return 0; |
705 | } | |
706 | ||
707 | kvmppc_core_queue_external(vcpu, irq); | |
b6d33834 | 708 | |
dfd4d47e | 709 | kvm_vcpu_kick(vcpu); |
45c5eb67 | 710 | |
bbf45ba5 HB |
711 | return 0; |
712 | } | |
713 | ||
71fbfd5f AG |
714 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
715 | struct kvm_enable_cap *cap) | |
716 | { | |
717 | int r; | |
718 | ||
719 | if (cap->flags) | |
720 | return -EINVAL; | |
721 | ||
722 | switch (cap->cap) { | |
ad0a048b AG |
723 | case KVM_CAP_PPC_OSI: |
724 | r = 0; | |
725 | vcpu->arch.osi_enabled = true; | |
726 | break; | |
930b412a AG |
727 | case KVM_CAP_PPC_PAPR: |
728 | r = 0; | |
729 | vcpu->arch.papr_enabled = true; | |
730 | break; | |
bf7ca4bd | 731 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
732 | case KVM_CAP_SW_TLB: { |
733 | struct kvm_config_tlb cfg; | |
734 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | |
735 | ||
736 | r = -EFAULT; | |
737 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | |
738 | break; | |
739 | ||
740 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | |
741 | break; | |
742 | } | |
743 | #endif | |
71fbfd5f AG |
744 | default: |
745 | r = -EINVAL; | |
746 | break; | |
747 | } | |
748 | ||
af8f38b3 AG |
749 | if (!r) |
750 | r = kvmppc_sanity_check(vcpu); | |
751 | ||
71fbfd5f AG |
752 | return r; |
753 | } | |
754 | ||
bbf45ba5 HB |
755 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
756 | struct kvm_mp_state *mp_state) | |
757 | { | |
758 | return -EINVAL; | |
759 | } | |
760 | ||
761 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
762 | struct kvm_mp_state *mp_state) | |
763 | { | |
764 | return -EINVAL; | |
765 | } | |
766 | ||
767 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
768 | unsigned int ioctl, unsigned long arg) | |
769 | { | |
770 | struct kvm_vcpu *vcpu = filp->private_data; | |
771 | void __user *argp = (void __user *)arg; | |
772 | long r; | |
773 | ||
93736624 AK |
774 | switch (ioctl) { |
775 | case KVM_INTERRUPT: { | |
bbf45ba5 HB |
776 | struct kvm_interrupt irq; |
777 | r = -EFAULT; | |
778 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
93736624 | 779 | goto out; |
bbf45ba5 | 780 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
93736624 | 781 | goto out; |
bbf45ba5 | 782 | } |
19483d14 | 783 | |
71fbfd5f AG |
784 | case KVM_ENABLE_CAP: |
785 | { | |
786 | struct kvm_enable_cap cap; | |
787 | r = -EFAULT; | |
788 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
789 | goto out; | |
790 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
791 | break; | |
792 | } | |
dc83b8bc | 793 | |
e24ed81f AG |
794 | case KVM_SET_ONE_REG: |
795 | case KVM_GET_ONE_REG: | |
796 | { | |
797 | struct kvm_one_reg reg; | |
798 | r = -EFAULT; | |
799 | if (copy_from_user(®, argp, sizeof(reg))) | |
800 | goto out; | |
801 | if (ioctl == KVM_SET_ONE_REG) | |
802 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | |
803 | else | |
804 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | |
805 | break; | |
806 | } | |
807 | ||
bf7ca4bd | 808 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
809 | case KVM_DIRTY_TLB: { |
810 | struct kvm_dirty_tlb dirty; | |
811 | r = -EFAULT; | |
812 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | |
813 | goto out; | |
814 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | |
815 | break; | |
816 | } | |
817 | #endif | |
bbf45ba5 HB |
818 | default: |
819 | r = -EINVAL; | |
820 | } | |
821 | ||
822 | out: | |
823 | return r; | |
824 | } | |
825 | ||
5b1c1493 CO |
826 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
827 | { | |
828 | return VM_FAULT_SIGBUS; | |
829 | } | |
830 | ||
15711e9c AG |
831 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
832 | { | |
784bafac SY |
833 | u32 inst_nop = 0x60000000; |
834 | #ifdef CONFIG_KVM_BOOKE_HV | |
835 | u32 inst_sc1 = 0x44000022; | |
836 | pvinfo->hcall[0] = inst_sc1; | |
837 | pvinfo->hcall[1] = inst_nop; | |
838 | pvinfo->hcall[2] = inst_nop; | |
839 | pvinfo->hcall[3] = inst_nop; | |
840 | #else | |
15711e9c AG |
841 | u32 inst_lis = 0x3c000000; |
842 | u32 inst_ori = 0x60000000; | |
15711e9c AG |
843 | u32 inst_sc = 0x44000002; |
844 | u32 inst_imm_mask = 0xffff; | |
845 | ||
846 | /* | |
847 | * The hypercall to get into KVM from within guest context is as | |
848 | * follows: | |
849 | * | |
850 | * lis r0, r0, KVM_SC_MAGIC_R0@h | |
851 | * ori r0, KVM_SC_MAGIC_R0@l | |
852 | * sc | |
853 | * nop | |
854 | */ | |
855 | pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); | |
856 | pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); | |
857 | pvinfo->hcall[2] = inst_sc; | |
858 | pvinfo->hcall[3] = inst_nop; | |
784bafac | 859 | #endif |
15711e9c | 860 | |
9202e076 LYB |
861 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
862 | ||
15711e9c AG |
863 | return 0; |
864 | } | |
865 | ||
bbf45ba5 HB |
866 | long kvm_arch_vm_ioctl(struct file *filp, |
867 | unsigned int ioctl, unsigned long arg) | |
868 | { | |
15711e9c | 869 | void __user *argp = (void __user *)arg; |
bbf45ba5 HB |
870 | long r; |
871 | ||
872 | switch (ioctl) { | |
15711e9c AG |
873 | case KVM_PPC_GET_PVINFO: { |
874 | struct kvm_ppc_pvinfo pvinfo; | |
d8cdddcd | 875 | memset(&pvinfo, 0, sizeof(pvinfo)); |
15711e9c AG |
876 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
877 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | |
878 | r = -EFAULT; | |
879 | goto out; | |
880 | } | |
881 | ||
882 | break; | |
883 | } | |
f31e65e1 | 884 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 DG |
885 | case KVM_CREATE_SPAPR_TCE: { |
886 | struct kvm_create_spapr_tce create_tce; | |
887 | struct kvm *kvm = filp->private_data; | |
888 | ||
889 | r = -EFAULT; | |
890 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | |
891 | goto out; | |
892 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | |
893 | goto out; | |
894 | } | |
f31e65e1 | 895 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
aa04b4cc | 896 | |
f31e65e1 | 897 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
aa04b4cc PM |
898 | case KVM_ALLOCATE_RMA: { |
899 | struct kvm *kvm = filp->private_data; | |
900 | struct kvm_allocate_rma rma; | |
901 | ||
902 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); | |
903 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) | |
904 | r = -EFAULT; | |
905 | break; | |
906 | } | |
32fad281 PM |
907 | |
908 | case KVM_PPC_ALLOCATE_HTAB: { | |
909 | struct kvm *kvm = filp->private_data; | |
910 | u32 htab_order; | |
911 | ||
912 | r = -EFAULT; | |
913 | if (get_user(htab_order, (u32 __user *)argp)) | |
914 | break; | |
915 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); | |
916 | if (r) | |
917 | break; | |
918 | r = -EFAULT; | |
919 | if (put_user(htab_order, (u32 __user *)argp)) | |
920 | break; | |
921 | r = 0; | |
922 | break; | |
923 | } | |
54738c09 DG |
924 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
925 | ||
5b74716e BH |
926 | #ifdef CONFIG_PPC_BOOK3S_64 |
927 | case KVM_PPC_GET_SMMU_INFO: { | |
928 | struct kvm *kvm = filp->private_data; | |
929 | struct kvm_ppc_smmu_info info; | |
930 | ||
931 | memset(&info, 0, sizeof(info)); | |
932 | r = kvm_vm_ioctl_get_smmu_info(kvm, &info); | |
933 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | |
934 | r = -EFAULT; | |
935 | break; | |
936 | } | |
937 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
bbf45ba5 | 938 | default: |
367e1319 | 939 | r = -ENOTTY; |
bbf45ba5 HB |
940 | } |
941 | ||
15711e9c | 942 | out: |
bbf45ba5 HB |
943 | return r; |
944 | } | |
945 | ||
043cc4d7 SW |
946 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
947 | static unsigned long nr_lpids; | |
948 | ||
949 | long kvmppc_alloc_lpid(void) | |
950 | { | |
951 | long lpid; | |
952 | ||
953 | do { | |
954 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | |
955 | if (lpid >= nr_lpids) { | |
956 | pr_err("%s: No LPIDs free\n", __func__); | |
957 | return -ENOMEM; | |
958 | } | |
959 | } while (test_and_set_bit(lpid, lpid_inuse)); | |
960 | ||
961 | return lpid; | |
962 | } | |
963 | ||
964 | void kvmppc_claim_lpid(long lpid) | |
965 | { | |
966 | set_bit(lpid, lpid_inuse); | |
967 | } | |
968 | ||
969 | void kvmppc_free_lpid(long lpid) | |
970 | { | |
971 | clear_bit(lpid, lpid_inuse); | |
972 | } | |
973 | ||
974 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | |
975 | { | |
976 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | |
977 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | |
978 | } | |
979 | ||
bbf45ba5 HB |
980 | int kvm_arch_init(void *opaque) |
981 | { | |
982 | return 0; | |
983 | } | |
984 | ||
985 | void kvm_arch_exit(void) | |
986 | { | |
987 | } |