]>
Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/errno.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/kvm_host.h> | |
bbf45ba5 | 24 | #include <linux/vmalloc.h> |
544c6761 | 25 | #include <linux/hrtimer.h> |
bbf45ba5 | 26 | #include <linux/fs.h> |
5a0e3ad6 | 27 | #include <linux/slab.h> |
bbf45ba5 HB |
28 | #include <asm/cputable.h> |
29 | #include <asm/uaccess.h> | |
30 | #include <asm/kvm_ppc.h> | |
83aae4a8 | 31 | #include <asm/tlbflush.h> |
371fefd6 | 32 | #include <asm/cputhreads.h> |
bd2be683 | 33 | #include <asm/irqflags.h> |
73e75b41 | 34 | #include "timing.h" |
fad7b9b5 | 35 | #include "../mm/mmu_decl.h" |
bbf45ba5 | 36 | |
46f43c6e MT |
37 | #define CREATE_TRACE_POINTS |
38 | #include "trace.h" | |
39 | ||
bbf45ba5 HB |
40 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) |
41 | { | |
9202e076 | 42 | return !!(v->arch.pending_exceptions) || |
dfd4d47e | 43 | v->requests; |
bbf45ba5 HB |
44 | } |
45 | ||
b6d33834 CD |
46 | int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu) |
47 | { | |
48 | return 1; | |
49 | } | |
50 | ||
03d25c5b AG |
51 | #ifndef CONFIG_KVM_BOOK3S_64_HV |
52 | /* | |
53 | * Common checks before entering the guest world. Call with interrupts | |
54 | * disabled. | |
55 | * | |
7ee78855 AG |
56 | * returns: |
57 | * | |
58 | * == 1 if we're ready to go into guest state | |
59 | * <= 0 if we need to go back to the host with return value | |
03d25c5b AG |
60 | */ |
61 | int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu) | |
62 | { | |
7ee78855 | 63 | int r = 1; |
03d25c5b AG |
64 | |
65 | WARN_ON_ONCE(!irqs_disabled()); | |
66 | while (true) { | |
67 | if (need_resched()) { | |
68 | local_irq_enable(); | |
69 | cond_resched(); | |
70 | local_irq_disable(); | |
71 | continue; | |
72 | } | |
73 | ||
74 | if (signal_pending(current)) { | |
7ee78855 AG |
75 | kvmppc_account_exit(vcpu, SIGNAL_EXITS); |
76 | vcpu->run->exit_reason = KVM_EXIT_INTR; | |
77 | r = -EINTR; | |
03d25c5b AG |
78 | break; |
79 | } | |
80 | ||
81 | smp_mb(); | |
82 | if (vcpu->requests) { | |
83 | /* Make sure we process requests preemptable */ | |
84 | local_irq_enable(); | |
85 | trace_kvm_check_requests(vcpu); | |
86 | kvmppc_core_check_requests(vcpu); | |
87 | local_irq_disable(); | |
88 | continue; | |
89 | } | |
90 | ||
91 | if (kvmppc_core_prepare_to_enter(vcpu)) { | |
92 | /* interrupts got enabled in between, so we | |
93 | are back at square 1 */ | |
94 | continue; | |
95 | } | |
96 | ||
bd2be683 AG |
97 | #ifdef CONFIG_PPC64 |
98 | /* lazy EE magic */ | |
99 | hard_irq_disable(); | |
100 | if (lazy_irq_pending()) { | |
101 | /* Got an interrupt in between, try again */ | |
102 | local_irq_enable(); | |
103 | local_irq_disable(); | |
3766a4c6 | 104 | kvm_guest_exit(); |
bd2be683 AG |
105 | continue; |
106 | } | |
107 | ||
108 | trace_hardirqs_on(); | |
109 | #endif | |
110 | ||
3766a4c6 AG |
111 | kvm_guest_enter(); |
112 | ||
03d25c5b AG |
113 | /* Going into guest context! Yay! */ |
114 | vcpu->mode = IN_GUEST_MODE; | |
115 | smp_wmb(); | |
116 | ||
117 | break; | |
118 | } | |
119 | ||
120 | return r; | |
121 | } | |
122 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ | |
123 | ||
2a342ed5 AG |
124 | int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) |
125 | { | |
126 | int nr = kvmppc_get_gpr(vcpu, 11); | |
127 | int r; | |
128 | unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); | |
129 | unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); | |
130 | unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); | |
131 | unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); | |
132 | unsigned long r2 = 0; | |
133 | ||
134 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | |
135 | /* 32 bit mode */ | |
136 | param1 &= 0xffffffff; | |
137 | param2 &= 0xffffffff; | |
138 | param3 &= 0xffffffff; | |
139 | param4 &= 0xffffffff; | |
140 | } | |
141 | ||
142 | switch (nr) { | |
fdcf8bd7 | 143 | case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): |
5fc87407 AG |
144 | { |
145 | vcpu->arch.magic_page_pa = param1; | |
146 | vcpu->arch.magic_page_ea = param2; | |
147 | ||
b5904972 | 148 | r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; |
7508e16c | 149 | |
fdcf8bd7 | 150 | r = EV_SUCCESS; |
5fc87407 AG |
151 | break; |
152 | } | |
fdcf8bd7 SY |
153 | case KVM_HCALL_TOKEN(KVM_HC_FEATURES): |
154 | r = EV_SUCCESS; | |
bf7ca4bd | 155 | #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) |
a4cd8b23 | 156 | /* XXX Missing magic page on 44x */ |
5fc87407 AG |
157 | r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); |
158 | #endif | |
2a342ed5 AG |
159 | |
160 | /* Second return value is in r4 */ | |
2a342ed5 | 161 | break; |
9202e076 LYB |
162 | case EV_HCALL_TOKEN(EV_IDLE): |
163 | r = EV_SUCCESS; | |
164 | kvm_vcpu_block(vcpu); | |
165 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); | |
166 | break; | |
2a342ed5 | 167 | default: |
fdcf8bd7 | 168 | r = EV_UNIMPLEMENTED; |
2a342ed5 AG |
169 | break; |
170 | } | |
171 | ||
7508e16c AG |
172 | kvmppc_set_gpr(vcpu, 4, r2); |
173 | ||
2a342ed5 AG |
174 | return r; |
175 | } | |
bbf45ba5 | 176 | |
af8f38b3 AG |
177 | int kvmppc_sanity_check(struct kvm_vcpu *vcpu) |
178 | { | |
179 | int r = false; | |
180 | ||
181 | /* We have to know what CPU to virtualize */ | |
182 | if (!vcpu->arch.pvr) | |
183 | goto out; | |
184 | ||
185 | /* PAPR only works with book3s_64 */ | |
186 | if ((vcpu->arch.cpu_type != KVM_CPU_3S_64) && vcpu->arch.papr_enabled) | |
187 | goto out; | |
188 | ||
189 | #ifdef CONFIG_KVM_BOOK3S_64_HV | |
190 | /* HV KVM can only do PAPR mode for now */ | |
191 | if (!vcpu->arch.papr_enabled) | |
192 | goto out; | |
193 | #endif | |
194 | ||
d30f6e48 SW |
195 | #ifdef CONFIG_KVM_BOOKE_HV |
196 | if (!cpu_has_feature(CPU_FTR_EMB_HV)) | |
197 | goto out; | |
198 | #endif | |
199 | ||
af8f38b3 AG |
200 | r = true; |
201 | ||
202 | out: | |
203 | vcpu->arch.sane = r; | |
204 | return r ? 0 : -EINVAL; | |
205 | } | |
206 | ||
bbf45ba5 HB |
207 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) |
208 | { | |
209 | enum emulation_result er; | |
210 | int r; | |
211 | ||
212 | er = kvmppc_emulate_instruction(run, vcpu); | |
213 | switch (er) { | |
214 | case EMULATE_DONE: | |
215 | /* Future optimization: only reload non-volatiles if they were | |
216 | * actually modified. */ | |
217 | r = RESUME_GUEST_NV; | |
218 | break; | |
219 | case EMULATE_DO_MMIO: | |
220 | run->exit_reason = KVM_EXIT_MMIO; | |
221 | /* We must reload nonvolatiles because "update" load/store | |
222 | * instructions modify register state. */ | |
223 | /* Future optimization: only reload non-volatiles if they were | |
224 | * actually modified. */ | |
225 | r = RESUME_HOST_NV; | |
226 | break; | |
227 | case EMULATE_FAIL: | |
228 | /* XXX Deliver Program interrupt to guest. */ | |
229 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | |
c7f38f46 | 230 | kvmppc_get_last_inst(vcpu)); |
bbf45ba5 HB |
231 | r = RESUME_HOST; |
232 | break; | |
233 | default: | |
234 | BUG(); | |
235 | } | |
236 | ||
237 | return r; | |
238 | } | |
239 | ||
10474ae8 | 240 | int kvm_arch_hardware_enable(void *garbage) |
bbf45ba5 | 241 | { |
10474ae8 | 242 | return 0; |
bbf45ba5 HB |
243 | } |
244 | ||
245 | void kvm_arch_hardware_disable(void *garbage) | |
246 | { | |
247 | } | |
248 | ||
249 | int kvm_arch_hardware_setup(void) | |
250 | { | |
251 | return 0; | |
252 | } | |
253 | ||
254 | void kvm_arch_hardware_unsetup(void) | |
255 | { | |
256 | } | |
257 | ||
258 | void kvm_arch_check_processor_compat(void *rtn) | |
259 | { | |
9dd921cf | 260 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
bbf45ba5 HB |
261 | } |
262 | ||
e08b9637 | 263 | int kvm_arch_init_vm(struct kvm *kvm, unsigned long type) |
bbf45ba5 | 264 | { |
e08b9637 CO |
265 | if (type) |
266 | return -EINVAL; | |
267 | ||
f9e0554d | 268 | return kvmppc_core_init_vm(kvm); |
bbf45ba5 HB |
269 | } |
270 | ||
d89f5eff | 271 | void kvm_arch_destroy_vm(struct kvm *kvm) |
bbf45ba5 HB |
272 | { |
273 | unsigned int i; | |
988a2cae | 274 | struct kvm_vcpu *vcpu; |
bbf45ba5 | 275 | |
988a2cae GN |
276 | kvm_for_each_vcpu(i, vcpu, kvm) |
277 | kvm_arch_vcpu_free(vcpu); | |
278 | ||
279 | mutex_lock(&kvm->lock); | |
280 | for (i = 0; i < atomic_read(&kvm->online_vcpus); i++) | |
281 | kvm->vcpus[i] = NULL; | |
282 | ||
283 | atomic_set(&kvm->online_vcpus, 0); | |
f9e0554d PM |
284 | |
285 | kvmppc_core_destroy_vm(kvm); | |
286 | ||
988a2cae | 287 | mutex_unlock(&kvm->lock); |
bbf45ba5 HB |
288 | } |
289 | ||
ad8ba2cd SY |
290 | void kvm_arch_sync_events(struct kvm *kvm) |
291 | { | |
292 | } | |
293 | ||
bbf45ba5 HB |
294 | int kvm_dev_ioctl_check_extension(long ext) |
295 | { | |
296 | int r; | |
297 | ||
298 | switch (ext) { | |
5ce941ee SW |
299 | #ifdef CONFIG_BOOKE |
300 | case KVM_CAP_PPC_BOOKE_SREGS: | |
301 | #else | |
e15a1137 | 302 | case KVM_CAP_PPC_SEGSTATE: |
1022fc3d | 303 | case KVM_CAP_PPC_HIOR: |
930b412a | 304 | case KVM_CAP_PPC_PAPR: |
5ce941ee | 305 | #endif |
18978768 | 306 | case KVM_CAP_PPC_UNSET_IRQ: |
7b4203e8 | 307 | case KVM_CAP_PPC_IRQ_LEVEL: |
71fbfd5f | 308 | case KVM_CAP_ENABLE_CAP: |
e24ed81f | 309 | case KVM_CAP_ONE_REG: |
de56a948 PM |
310 | r = 1; |
311 | break; | |
312 | #ifndef CONFIG_KVM_BOOK3S_64_HV | |
313 | case KVM_CAP_PPC_PAIRED_SINGLES: | |
ad0a048b | 314 | case KVM_CAP_PPC_OSI: |
15711e9c | 315 | case KVM_CAP_PPC_GET_PVINFO: |
bf7ca4bd | 316 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
317 | case KVM_CAP_SW_TLB: |
318 | #endif | |
e15a1137 AG |
319 | r = 1; |
320 | break; | |
588968b6 LV |
321 | case KVM_CAP_COALESCED_MMIO: |
322 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
323 | break; | |
54738c09 | 324 | #endif |
f31e65e1 | 325 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 | 326 | case KVM_CAP_SPAPR_TCE: |
32fad281 | 327 | case KVM_CAP_PPC_ALLOC_HTAB: |
54738c09 DG |
328 | r = 1; |
329 | break; | |
f31e65e1 BH |
330 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
331 | #ifdef CONFIG_KVM_BOOK3S_64_HV | |
371fefd6 PM |
332 | case KVM_CAP_PPC_SMT: |
333 | r = threads_per_core; | |
334 | break; | |
aa04b4cc PM |
335 | case KVM_CAP_PPC_RMA: |
336 | r = 1; | |
9e368f29 PM |
337 | /* PPC970 requires an RMA */ |
338 | if (cpu_has_feature(CPU_FTR_ARCH_201)) | |
339 | r = 2; | |
aa04b4cc | 340 | break; |
f4800b1f | 341 | #endif |
342d3db7 | 342 | case KVM_CAP_SYNC_MMU: |
f4800b1f | 343 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
342d3db7 | 344 | r = cpu_has_feature(CPU_FTR_ARCH_206) ? 1 : 0; |
f4800b1f AG |
345 | #elif defined(KVM_ARCH_WANT_MMU_NOTIFIER) |
346 | r = 1; | |
347 | #else | |
348 | r = 0; | |
de56a948 | 349 | #endif |
f4800b1f | 350 | break; |
b5434032 ME |
351 | case KVM_CAP_NR_VCPUS: |
352 | /* | |
353 | * Recommending a number of CPUs is somewhat arbitrary; we | |
354 | * return the number of present CPUs for -HV (since a host | |
355 | * will have secondary threads "offline"), and for other KVM | |
356 | * implementations just count online CPUs. | |
357 | */ | |
358 | #ifdef CONFIG_KVM_BOOK3S_64_HV | |
359 | r = num_present_cpus(); | |
360 | #else | |
361 | r = num_online_cpus(); | |
362 | #endif | |
363 | break; | |
364 | case KVM_CAP_MAX_VCPUS: | |
365 | r = KVM_MAX_VCPUS; | |
366 | break; | |
5b74716e BH |
367 | #ifdef CONFIG_PPC_BOOK3S_64 |
368 | case KVM_CAP_PPC_GET_SMMU_INFO: | |
369 | r = 1; | |
370 | break; | |
371 | #endif | |
bbf45ba5 HB |
372 | default: |
373 | r = 0; | |
374 | break; | |
375 | } | |
376 | return r; | |
377 | ||
378 | } | |
379 | ||
380 | long kvm_arch_dev_ioctl(struct file *filp, | |
381 | unsigned int ioctl, unsigned long arg) | |
382 | { | |
383 | return -EINVAL; | |
384 | } | |
385 | ||
db3fe4eb TY |
386 | void kvm_arch_free_memslot(struct kvm_memory_slot *free, |
387 | struct kvm_memory_slot *dont) | |
388 | { | |
d89cc617 TY |
389 | if (!dont || free->arch.rmap != dont->arch.rmap) { |
390 | vfree(free->arch.rmap); | |
391 | free->arch.rmap = NULL; | |
392 | } | |
db3fe4eb TY |
393 | } |
394 | ||
395 | int kvm_arch_create_memslot(struct kvm_memory_slot *slot, unsigned long npages) | |
396 | { | |
d89cc617 TY |
397 | slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); |
398 | if (!slot->arch.rmap) | |
399 | return -ENOMEM; | |
400 | ||
db3fe4eb TY |
401 | return 0; |
402 | } | |
403 | ||
f7784b8e MT |
404 | int kvm_arch_prepare_memory_region(struct kvm *kvm, |
405 | struct kvm_memory_slot *memslot, | |
406 | struct kvm_memory_slot old, | |
407 | struct kvm_userspace_memory_region *mem, | |
408 | int user_alloc) | |
bbf45ba5 | 409 | { |
f9e0554d | 410 | return kvmppc_core_prepare_memory_region(kvm, mem); |
bbf45ba5 HB |
411 | } |
412 | ||
f7784b8e MT |
413 | void kvm_arch_commit_memory_region(struct kvm *kvm, |
414 | struct kvm_userspace_memory_region *mem, | |
415 | struct kvm_memory_slot old, | |
416 | int user_alloc) | |
417 | { | |
f9e0554d | 418 | kvmppc_core_commit_memory_region(kvm, mem); |
f7784b8e MT |
419 | } |
420 | ||
2df72e9b MT |
421 | void kvm_arch_flush_shadow_all(struct kvm *kvm) |
422 | { | |
423 | } | |
f7784b8e | 424 | |
2df72e9b MT |
425 | void kvm_arch_flush_shadow_memslot(struct kvm *kvm, |
426 | struct kvm_memory_slot *slot) | |
34d4cb8f MT |
427 | { |
428 | } | |
429 | ||
bbf45ba5 HB |
430 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
431 | { | |
73e75b41 HB |
432 | struct kvm_vcpu *vcpu; |
433 | vcpu = kvmppc_core_vcpu_create(kvm, id); | |
03cdab53 ME |
434 | if (!IS_ERR(vcpu)) { |
435 | vcpu->arch.wqp = &vcpu->wq; | |
06056bfb | 436 | kvmppc_create_vcpu_debugfs(vcpu, id); |
03cdab53 | 437 | } |
73e75b41 | 438 | return vcpu; |
bbf45ba5 HB |
439 | } |
440 | ||
441 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |
442 | { | |
a595405d AG |
443 | /* Make sure we're not using the vcpu anymore */ |
444 | hrtimer_cancel(&vcpu->arch.dec_timer); | |
445 | tasklet_kill(&vcpu->arch.tasklet); | |
446 | ||
73e75b41 | 447 | kvmppc_remove_vcpu_debugfs(vcpu); |
db93f574 | 448 | kvmppc_core_vcpu_free(vcpu); |
bbf45ba5 HB |
449 | } |
450 | ||
451 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
452 | { | |
453 | kvm_arch_vcpu_free(vcpu); | |
454 | } | |
455 | ||
456 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
457 | { | |
9dd921cf | 458 | return kvmppc_core_pending_dec(vcpu); |
bbf45ba5 HB |
459 | } |
460 | ||
544c6761 AG |
461 | /* |
462 | * low level hrtimer wake routine. Because this runs in hardirq context | |
463 | * we schedule a tasklet to do the real work. | |
464 | */ | |
465 | enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer) | |
466 | { | |
467 | struct kvm_vcpu *vcpu; | |
468 | ||
469 | vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer); | |
470 | tasklet_schedule(&vcpu->arch.tasklet); | |
471 | ||
472 | return HRTIMER_NORESTART; | |
473 | } | |
474 | ||
bbf45ba5 HB |
475 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) |
476 | { | |
544c6761 AG |
477 | hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS); |
478 | tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu); | |
479 | vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup; | |
de56a948 | 480 | vcpu->arch.dec_expires = ~(u64)0; |
bbf45ba5 | 481 | |
09000adb BB |
482 | #ifdef CONFIG_KVM_EXIT_TIMING |
483 | mutex_init(&vcpu->arch.exit_timing_lock); | |
484 | #endif | |
485 | ||
bbf45ba5 HB |
486 | return 0; |
487 | } | |
488 | ||
489 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
490 | { | |
ecc0981f | 491 | kvmppc_mmu_destroy(vcpu); |
bbf45ba5 HB |
492 | } |
493 | ||
494 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
495 | { | |
eab17672 SW |
496 | #ifdef CONFIG_BOOKE |
497 | /* | |
498 | * vrsave (formerly usprg0) isn't used by Linux, but may | |
499 | * be used by the guest. | |
500 | * | |
501 | * On non-booke this is associated with Altivec and | |
502 | * is handled by code in book3s.c. | |
503 | */ | |
504 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); | |
505 | #endif | |
9dd921cf | 506 | kvmppc_core_vcpu_load(vcpu, cpu); |
de56a948 | 507 | vcpu->cpu = smp_processor_id(); |
bbf45ba5 HB |
508 | } |
509 | ||
510 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
511 | { | |
9dd921cf | 512 | kvmppc_core_vcpu_put(vcpu); |
eab17672 SW |
513 | #ifdef CONFIG_BOOKE |
514 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); | |
515 | #endif | |
de56a948 | 516 | vcpu->cpu = -1; |
bbf45ba5 HB |
517 | } |
518 | ||
d0bfb940 | 519 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
f5d0906b | 520 | struct kvm_guest_debug *dbg) |
bbf45ba5 | 521 | { |
f5d0906b | 522 | return -EINVAL; |
bbf45ba5 HB |
523 | } |
524 | ||
525 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | |
526 | struct kvm_run *run) | |
527 | { | |
8e5b26b5 | 528 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); |
bbf45ba5 HB |
529 | } |
530 | ||
531 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |
532 | struct kvm_run *run) | |
533 | { | |
69b61833 | 534 | u64 uninitialized_var(gpr); |
bbf45ba5 | 535 | |
8e5b26b5 | 536 | if (run->mmio.len > sizeof(gpr)) { |
bbf45ba5 HB |
537 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); |
538 | return; | |
539 | } | |
540 | ||
541 | if (vcpu->arch.mmio_is_bigendian) { | |
542 | switch (run->mmio.len) { | |
b104d066 | 543 | case 8: gpr = *(u64 *)run->mmio.data; break; |
8e5b26b5 AG |
544 | case 4: gpr = *(u32 *)run->mmio.data; break; |
545 | case 2: gpr = *(u16 *)run->mmio.data; break; | |
546 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
547 | } |
548 | } else { | |
549 | /* Convert BE data from userland back to LE. */ | |
550 | switch (run->mmio.len) { | |
8e5b26b5 AG |
551 | case 4: gpr = ld_le32((u32 *)run->mmio.data); break; |
552 | case 2: gpr = ld_le16((u16 *)run->mmio.data); break; | |
553 | case 1: gpr = *(u8 *)run->mmio.data; break; | |
bbf45ba5 HB |
554 | } |
555 | } | |
8e5b26b5 | 556 | |
3587d534 AG |
557 | if (vcpu->arch.mmio_sign_extend) { |
558 | switch (run->mmio.len) { | |
559 | #ifdef CONFIG_PPC64 | |
560 | case 4: | |
561 | gpr = (s64)(s32)gpr; | |
562 | break; | |
563 | #endif | |
564 | case 2: | |
565 | gpr = (s64)(s16)gpr; | |
566 | break; | |
567 | case 1: | |
568 | gpr = (s64)(s8)gpr; | |
569 | break; | |
570 | } | |
571 | } | |
572 | ||
8e5b26b5 | 573 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
b104d066 | 574 | |
b3c5d3c2 AG |
575 | switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { |
576 | case KVM_MMIO_REG_GPR: | |
b104d066 AG |
577 | kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); |
578 | break; | |
b3c5d3c2 AG |
579 | case KVM_MMIO_REG_FPR: |
580 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 581 | break; |
287d5611 | 582 | #ifdef CONFIG_PPC_BOOK3S |
b3c5d3c2 AG |
583 | case KVM_MMIO_REG_QPR: |
584 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 585 | break; |
b3c5d3c2 AG |
586 | case KVM_MMIO_REG_FQPR: |
587 | vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
588 | vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; | |
b104d066 | 589 | break; |
287d5611 | 590 | #endif |
b104d066 AG |
591 | default: |
592 | BUG(); | |
593 | } | |
bbf45ba5 HB |
594 | } |
595 | ||
596 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
597 | unsigned int rt, unsigned int bytes, int is_bigendian) | |
598 | { | |
599 | if (bytes > sizeof(run->mmio.data)) { | |
600 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
601 | run->mmio.len); | |
602 | } | |
603 | ||
604 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
605 | run->mmio.len = bytes; | |
606 | run->mmio.is_write = 0; | |
607 | ||
608 | vcpu->arch.io_gpr = rt; | |
609 | vcpu->arch.mmio_is_bigendian = is_bigendian; | |
610 | vcpu->mmio_needed = 1; | |
611 | vcpu->mmio_is_write = 0; | |
3587d534 | 612 | vcpu->arch.mmio_sign_extend = 0; |
bbf45ba5 HB |
613 | |
614 | return EMULATE_DO_MMIO; | |
615 | } | |
616 | ||
3587d534 AG |
617 | /* Same as above, but sign extends */ |
618 | int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
619 | unsigned int rt, unsigned int bytes, int is_bigendian) | |
620 | { | |
621 | int r; | |
622 | ||
623 | r = kvmppc_handle_load(run, vcpu, rt, bytes, is_bigendian); | |
624 | vcpu->arch.mmio_sign_extend = 1; | |
625 | ||
626 | return r; | |
627 | } | |
628 | ||
bbf45ba5 | 629 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
b104d066 | 630 | u64 val, unsigned int bytes, int is_bigendian) |
bbf45ba5 HB |
631 | { |
632 | void *data = run->mmio.data; | |
633 | ||
634 | if (bytes > sizeof(run->mmio.data)) { | |
635 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
636 | run->mmio.len); | |
637 | } | |
638 | ||
639 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
640 | run->mmio.len = bytes; | |
641 | run->mmio.is_write = 1; | |
642 | vcpu->mmio_needed = 1; | |
643 | vcpu->mmio_is_write = 1; | |
644 | ||
645 | /* Store the value at the lowest bytes in 'data'. */ | |
646 | if (is_bigendian) { | |
647 | switch (bytes) { | |
b104d066 | 648 | case 8: *(u64 *)data = val; break; |
bbf45ba5 HB |
649 | case 4: *(u32 *)data = val; break; |
650 | case 2: *(u16 *)data = val; break; | |
651 | case 1: *(u8 *)data = val; break; | |
652 | } | |
653 | } else { | |
654 | /* Store LE value into 'data'. */ | |
655 | switch (bytes) { | |
656 | case 4: st_le32(data, val); break; | |
657 | case 2: st_le16(data, val); break; | |
658 | case 1: *(u8 *)data = val; break; | |
659 | } | |
660 | } | |
661 | ||
662 | return EMULATE_DO_MMIO; | |
663 | } | |
664 | ||
665 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
666 | { | |
667 | int r; | |
668 | sigset_t sigsaved; | |
669 | ||
670 | if (vcpu->sigset_active) | |
671 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
672 | ||
673 | if (vcpu->mmio_needed) { | |
674 | if (!vcpu->mmio_is_write) | |
675 | kvmppc_complete_mmio_load(vcpu, run); | |
676 | vcpu->mmio_needed = 0; | |
677 | } else if (vcpu->arch.dcr_needed) { | |
678 | if (!vcpu->arch.dcr_is_write) | |
679 | kvmppc_complete_dcr_load(vcpu, run); | |
680 | vcpu->arch.dcr_needed = 0; | |
ad0a048b AG |
681 | } else if (vcpu->arch.osi_needed) { |
682 | u64 *gprs = run->osi.gprs; | |
683 | int i; | |
684 | ||
685 | for (i = 0; i < 32; i++) | |
686 | kvmppc_set_gpr(vcpu, i, gprs[i]); | |
687 | vcpu->arch.osi_needed = 0; | |
de56a948 PM |
688 | } else if (vcpu->arch.hcall_needed) { |
689 | int i; | |
690 | ||
691 | kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); | |
692 | for (i = 0; i < 9; ++i) | |
693 | kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); | |
694 | vcpu->arch.hcall_needed = 0; | |
bbf45ba5 HB |
695 | } |
696 | ||
df6909e5 | 697 | r = kvmppc_vcpu_run(run, vcpu); |
bbf45ba5 HB |
698 | |
699 | if (vcpu->sigset_active) | |
700 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
701 | ||
702 | return r; | |
703 | } | |
704 | ||
705 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
706 | { | |
19ccb76a | 707 | if (irq->irq == KVM_INTERRUPT_UNSET) { |
18978768 | 708 | kvmppc_core_dequeue_external(vcpu, irq); |
19ccb76a PM |
709 | return 0; |
710 | } | |
711 | ||
712 | kvmppc_core_queue_external(vcpu, irq); | |
b6d33834 | 713 | |
dfd4d47e | 714 | kvm_vcpu_kick(vcpu); |
45c5eb67 | 715 | |
bbf45ba5 HB |
716 | return 0; |
717 | } | |
718 | ||
71fbfd5f AG |
719 | static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu, |
720 | struct kvm_enable_cap *cap) | |
721 | { | |
722 | int r; | |
723 | ||
724 | if (cap->flags) | |
725 | return -EINVAL; | |
726 | ||
727 | switch (cap->cap) { | |
ad0a048b AG |
728 | case KVM_CAP_PPC_OSI: |
729 | r = 0; | |
730 | vcpu->arch.osi_enabled = true; | |
731 | break; | |
930b412a AG |
732 | case KVM_CAP_PPC_PAPR: |
733 | r = 0; | |
734 | vcpu->arch.papr_enabled = true; | |
735 | break; | |
bf7ca4bd | 736 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
737 | case KVM_CAP_SW_TLB: { |
738 | struct kvm_config_tlb cfg; | |
739 | void __user *user_ptr = (void __user *)(uintptr_t)cap->args[0]; | |
740 | ||
741 | r = -EFAULT; | |
742 | if (copy_from_user(&cfg, user_ptr, sizeof(cfg))) | |
743 | break; | |
744 | ||
745 | r = kvm_vcpu_ioctl_config_tlb(vcpu, &cfg); | |
746 | break; | |
747 | } | |
748 | #endif | |
71fbfd5f AG |
749 | default: |
750 | r = -EINVAL; | |
751 | break; | |
752 | } | |
753 | ||
af8f38b3 AG |
754 | if (!r) |
755 | r = kvmppc_sanity_check(vcpu); | |
756 | ||
71fbfd5f AG |
757 | return r; |
758 | } | |
759 | ||
bbf45ba5 HB |
760 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, |
761 | struct kvm_mp_state *mp_state) | |
762 | { | |
763 | return -EINVAL; | |
764 | } | |
765 | ||
766 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
767 | struct kvm_mp_state *mp_state) | |
768 | { | |
769 | return -EINVAL; | |
770 | } | |
771 | ||
772 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
773 | unsigned int ioctl, unsigned long arg) | |
774 | { | |
775 | struct kvm_vcpu *vcpu = filp->private_data; | |
776 | void __user *argp = (void __user *)arg; | |
777 | long r; | |
778 | ||
93736624 AK |
779 | switch (ioctl) { |
780 | case KVM_INTERRUPT: { | |
bbf45ba5 HB |
781 | struct kvm_interrupt irq; |
782 | r = -EFAULT; | |
783 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
93736624 | 784 | goto out; |
bbf45ba5 | 785 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); |
93736624 | 786 | goto out; |
bbf45ba5 | 787 | } |
19483d14 | 788 | |
71fbfd5f AG |
789 | case KVM_ENABLE_CAP: |
790 | { | |
791 | struct kvm_enable_cap cap; | |
792 | r = -EFAULT; | |
793 | if (copy_from_user(&cap, argp, sizeof(cap))) | |
794 | goto out; | |
795 | r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap); | |
796 | break; | |
797 | } | |
dc83b8bc | 798 | |
e24ed81f AG |
799 | case KVM_SET_ONE_REG: |
800 | case KVM_GET_ONE_REG: | |
801 | { | |
802 | struct kvm_one_reg reg; | |
803 | r = -EFAULT; | |
804 | if (copy_from_user(®, argp, sizeof(reg))) | |
805 | goto out; | |
806 | if (ioctl == KVM_SET_ONE_REG) | |
807 | r = kvm_vcpu_ioctl_set_one_reg(vcpu, ®); | |
808 | else | |
809 | r = kvm_vcpu_ioctl_get_one_reg(vcpu, ®); | |
810 | break; | |
811 | } | |
812 | ||
bf7ca4bd | 813 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
dc83b8bc SW |
814 | case KVM_DIRTY_TLB: { |
815 | struct kvm_dirty_tlb dirty; | |
816 | r = -EFAULT; | |
817 | if (copy_from_user(&dirty, argp, sizeof(dirty))) | |
818 | goto out; | |
819 | r = kvm_vcpu_ioctl_dirty_tlb(vcpu, &dirty); | |
820 | break; | |
821 | } | |
822 | #endif | |
bbf45ba5 HB |
823 | default: |
824 | r = -EINVAL; | |
825 | } | |
826 | ||
827 | out: | |
828 | return r; | |
829 | } | |
830 | ||
5b1c1493 CO |
831 | int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf) |
832 | { | |
833 | return VM_FAULT_SIGBUS; | |
834 | } | |
835 | ||
15711e9c AG |
836 | static int kvm_vm_ioctl_get_pvinfo(struct kvm_ppc_pvinfo *pvinfo) |
837 | { | |
784bafac SY |
838 | u32 inst_nop = 0x60000000; |
839 | #ifdef CONFIG_KVM_BOOKE_HV | |
840 | u32 inst_sc1 = 0x44000022; | |
841 | pvinfo->hcall[0] = inst_sc1; | |
842 | pvinfo->hcall[1] = inst_nop; | |
843 | pvinfo->hcall[2] = inst_nop; | |
844 | pvinfo->hcall[3] = inst_nop; | |
845 | #else | |
15711e9c AG |
846 | u32 inst_lis = 0x3c000000; |
847 | u32 inst_ori = 0x60000000; | |
15711e9c AG |
848 | u32 inst_sc = 0x44000002; |
849 | u32 inst_imm_mask = 0xffff; | |
850 | ||
851 | /* | |
852 | * The hypercall to get into KVM from within guest context is as | |
853 | * follows: | |
854 | * | |
855 | * lis r0, r0, KVM_SC_MAGIC_R0@h | |
856 | * ori r0, KVM_SC_MAGIC_R0@l | |
857 | * sc | |
858 | * nop | |
859 | */ | |
860 | pvinfo->hcall[0] = inst_lis | ((KVM_SC_MAGIC_R0 >> 16) & inst_imm_mask); | |
861 | pvinfo->hcall[1] = inst_ori | (KVM_SC_MAGIC_R0 & inst_imm_mask); | |
862 | pvinfo->hcall[2] = inst_sc; | |
863 | pvinfo->hcall[3] = inst_nop; | |
784bafac | 864 | #endif |
15711e9c | 865 | |
9202e076 LYB |
866 | pvinfo->flags = KVM_PPC_PVINFO_FLAGS_EV_IDLE; |
867 | ||
15711e9c AG |
868 | return 0; |
869 | } | |
870 | ||
bbf45ba5 HB |
871 | long kvm_arch_vm_ioctl(struct file *filp, |
872 | unsigned int ioctl, unsigned long arg) | |
873 | { | |
15711e9c | 874 | void __user *argp = (void __user *)arg; |
bbf45ba5 HB |
875 | long r; |
876 | ||
877 | switch (ioctl) { | |
15711e9c AG |
878 | case KVM_PPC_GET_PVINFO: { |
879 | struct kvm_ppc_pvinfo pvinfo; | |
d8cdddcd | 880 | memset(&pvinfo, 0, sizeof(pvinfo)); |
15711e9c AG |
881 | r = kvm_vm_ioctl_get_pvinfo(&pvinfo); |
882 | if (copy_to_user(argp, &pvinfo, sizeof(pvinfo))) { | |
883 | r = -EFAULT; | |
884 | goto out; | |
885 | } | |
886 | ||
887 | break; | |
888 | } | |
f31e65e1 | 889 | #ifdef CONFIG_PPC_BOOK3S_64 |
54738c09 DG |
890 | case KVM_CREATE_SPAPR_TCE: { |
891 | struct kvm_create_spapr_tce create_tce; | |
892 | struct kvm *kvm = filp->private_data; | |
893 | ||
894 | r = -EFAULT; | |
895 | if (copy_from_user(&create_tce, argp, sizeof(create_tce))) | |
896 | goto out; | |
897 | r = kvm_vm_ioctl_create_spapr_tce(kvm, &create_tce); | |
898 | goto out; | |
899 | } | |
f31e65e1 | 900 | #endif /* CONFIG_PPC_BOOK3S_64 */ |
aa04b4cc | 901 | |
f31e65e1 | 902 | #ifdef CONFIG_KVM_BOOK3S_64_HV |
aa04b4cc PM |
903 | case KVM_ALLOCATE_RMA: { |
904 | struct kvm *kvm = filp->private_data; | |
905 | struct kvm_allocate_rma rma; | |
906 | ||
907 | r = kvm_vm_ioctl_allocate_rma(kvm, &rma); | |
908 | if (r >= 0 && copy_to_user(argp, &rma, sizeof(rma))) | |
909 | r = -EFAULT; | |
910 | break; | |
911 | } | |
32fad281 PM |
912 | |
913 | case KVM_PPC_ALLOCATE_HTAB: { | |
914 | struct kvm *kvm = filp->private_data; | |
915 | u32 htab_order; | |
916 | ||
917 | r = -EFAULT; | |
918 | if (get_user(htab_order, (u32 __user *)argp)) | |
919 | break; | |
920 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); | |
921 | if (r) | |
922 | break; | |
923 | r = -EFAULT; | |
924 | if (put_user(htab_order, (u32 __user *)argp)) | |
925 | break; | |
926 | r = 0; | |
927 | break; | |
928 | } | |
54738c09 DG |
929 | #endif /* CONFIG_KVM_BOOK3S_64_HV */ |
930 | ||
5b74716e BH |
931 | #ifdef CONFIG_PPC_BOOK3S_64 |
932 | case KVM_PPC_GET_SMMU_INFO: { | |
933 | struct kvm *kvm = filp->private_data; | |
934 | struct kvm_ppc_smmu_info info; | |
935 | ||
936 | memset(&info, 0, sizeof(info)); | |
937 | r = kvm_vm_ioctl_get_smmu_info(kvm, &info); | |
938 | if (r >= 0 && copy_to_user(argp, &info, sizeof(info))) | |
939 | r = -EFAULT; | |
940 | break; | |
941 | } | |
942 | #endif /* CONFIG_PPC_BOOK3S_64 */ | |
bbf45ba5 | 943 | default: |
367e1319 | 944 | r = -ENOTTY; |
bbf45ba5 HB |
945 | } |
946 | ||
15711e9c | 947 | out: |
bbf45ba5 HB |
948 | return r; |
949 | } | |
950 | ||
043cc4d7 SW |
951 | static unsigned long lpid_inuse[BITS_TO_LONGS(KVMPPC_NR_LPIDS)]; |
952 | static unsigned long nr_lpids; | |
953 | ||
954 | long kvmppc_alloc_lpid(void) | |
955 | { | |
956 | long lpid; | |
957 | ||
958 | do { | |
959 | lpid = find_first_zero_bit(lpid_inuse, KVMPPC_NR_LPIDS); | |
960 | if (lpid >= nr_lpids) { | |
961 | pr_err("%s: No LPIDs free\n", __func__); | |
962 | return -ENOMEM; | |
963 | } | |
964 | } while (test_and_set_bit(lpid, lpid_inuse)); | |
965 | ||
966 | return lpid; | |
967 | } | |
968 | ||
969 | void kvmppc_claim_lpid(long lpid) | |
970 | { | |
971 | set_bit(lpid, lpid_inuse); | |
972 | } | |
973 | ||
974 | void kvmppc_free_lpid(long lpid) | |
975 | { | |
976 | clear_bit(lpid, lpid_inuse); | |
977 | } | |
978 | ||
979 | void kvmppc_init_lpid(unsigned long nr_lpids_param) | |
980 | { | |
981 | nr_lpids = min_t(unsigned long, KVMPPC_NR_LPIDS, nr_lpids_param); | |
982 | memset(lpid_inuse, 0, sizeof(lpid_inuse)); | |
983 | } | |
984 | ||
bbf45ba5 HB |
985 | int kvm_arch_init(void *opaque) |
986 | { | |
987 | return 0; | |
988 | } | |
989 | ||
990 | void kvm_arch_exit(void) | |
991 | { | |
992 | } |