]>
Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
19 | */ | |
20 | ||
21 | #include <linux/errno.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/module.h> | |
25 | #include <linux/vmalloc.h> | |
26 | #include <linux/fs.h> | |
27 | #include <asm/cputable.h> | |
28 | #include <asm/uaccess.h> | |
29 | #include <asm/kvm_ppc.h> | |
83aae4a8 | 30 | #include <asm/tlbflush.h> |
fad7b9b5 | 31 | #include "../mm/mmu_decl.h" |
bbf45ba5 HB |
32 | |
33 | ||
34 | gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn) | |
35 | { | |
36 | return gfn; | |
37 | } | |
38 | ||
39 | int kvm_cpu_has_interrupt(struct kvm_vcpu *v) | |
40 | { | |
45c5eb67 | 41 | return !!(v->arch.pending_exceptions); |
bbf45ba5 HB |
42 | } |
43 | ||
44 | int kvm_arch_vcpu_runnable(struct kvm_vcpu *v) | |
45 | { | |
45c5eb67 | 46 | return !(v->arch.msr & MSR_WE); |
bbf45ba5 HB |
47 | } |
48 | ||
49 | ||
50 | int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) | |
51 | { | |
52 | enum emulation_result er; | |
53 | int r; | |
54 | ||
55 | er = kvmppc_emulate_instruction(run, vcpu); | |
56 | switch (er) { | |
57 | case EMULATE_DONE: | |
58 | /* Future optimization: only reload non-volatiles if they were | |
59 | * actually modified. */ | |
60 | r = RESUME_GUEST_NV; | |
61 | break; | |
62 | case EMULATE_DO_MMIO: | |
63 | run->exit_reason = KVM_EXIT_MMIO; | |
64 | /* We must reload nonvolatiles because "update" load/store | |
65 | * instructions modify register state. */ | |
66 | /* Future optimization: only reload non-volatiles if they were | |
67 | * actually modified. */ | |
68 | r = RESUME_HOST_NV; | |
69 | break; | |
70 | case EMULATE_FAIL: | |
71 | /* XXX Deliver Program interrupt to guest. */ | |
72 | printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, | |
73 | vcpu->arch.last_inst); | |
74 | r = RESUME_HOST; | |
75 | break; | |
76 | default: | |
77 | BUG(); | |
78 | } | |
79 | ||
80 | return r; | |
81 | } | |
82 | ||
83 | void kvm_arch_hardware_enable(void *garbage) | |
84 | { | |
85 | } | |
86 | ||
87 | void kvm_arch_hardware_disable(void *garbage) | |
88 | { | |
89 | } | |
90 | ||
91 | int kvm_arch_hardware_setup(void) | |
92 | { | |
93 | return 0; | |
94 | } | |
95 | ||
96 | void kvm_arch_hardware_unsetup(void) | |
97 | { | |
98 | } | |
99 | ||
100 | void kvm_arch_check_processor_compat(void *rtn) | |
101 | { | |
9dd921cf | 102 | *(int *)rtn = kvmppc_core_check_processor_compat(); |
bbf45ba5 HB |
103 | } |
104 | ||
105 | struct kvm *kvm_arch_create_vm(void) | |
106 | { | |
107 | struct kvm *kvm; | |
108 | ||
109 | kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL); | |
110 | if (!kvm) | |
111 | return ERR_PTR(-ENOMEM); | |
112 | ||
113 | return kvm; | |
114 | } | |
115 | ||
116 | static void kvmppc_free_vcpus(struct kvm *kvm) | |
117 | { | |
118 | unsigned int i; | |
119 | ||
120 | for (i = 0; i < KVM_MAX_VCPUS; ++i) { | |
121 | if (kvm->vcpus[i]) { | |
122 | kvm_arch_vcpu_free(kvm->vcpus[i]); | |
123 | kvm->vcpus[i] = NULL; | |
124 | } | |
125 | } | |
126 | } | |
127 | ||
128 | void kvm_arch_destroy_vm(struct kvm *kvm) | |
129 | { | |
130 | kvmppc_free_vcpus(kvm); | |
131 | kvm_free_physmem(kvm); | |
132 | kfree(kvm); | |
133 | } | |
134 | ||
135 | int kvm_dev_ioctl_check_extension(long ext) | |
136 | { | |
137 | int r; | |
138 | ||
139 | switch (ext) { | |
140 | case KVM_CAP_USER_MEMORY: | |
141 | r = 1; | |
142 | break; | |
588968b6 LV |
143 | case KVM_CAP_COALESCED_MMIO: |
144 | r = KVM_COALESCED_MMIO_PAGE_OFFSET; | |
145 | break; | |
bbf45ba5 HB |
146 | default: |
147 | r = 0; | |
148 | break; | |
149 | } | |
150 | return r; | |
151 | ||
152 | } | |
153 | ||
154 | long kvm_arch_dev_ioctl(struct file *filp, | |
155 | unsigned int ioctl, unsigned long arg) | |
156 | { | |
157 | return -EINVAL; | |
158 | } | |
159 | ||
160 | int kvm_arch_set_memory_region(struct kvm *kvm, | |
161 | struct kvm_userspace_memory_region *mem, | |
162 | struct kvm_memory_slot old, | |
163 | int user_alloc) | |
164 | { | |
165 | return 0; | |
166 | } | |
167 | ||
34d4cb8f MT |
168 | void kvm_arch_flush_shadow(struct kvm *kvm) |
169 | { | |
170 | } | |
171 | ||
bbf45ba5 HB |
172 | struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id) |
173 | { | |
174 | struct kvm_vcpu *vcpu; | |
175 | int err; | |
176 | ||
177 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); | |
178 | if (!vcpu) { | |
179 | err = -ENOMEM; | |
180 | goto out; | |
181 | } | |
182 | ||
183 | err = kvm_vcpu_init(vcpu, kvm, id); | |
184 | if (err) | |
185 | goto free_vcpu; | |
186 | ||
187 | return vcpu; | |
188 | ||
189 | free_vcpu: | |
190 | kmem_cache_free(kvm_vcpu_cache, vcpu); | |
191 | out: | |
192 | return ERR_PTR(err); | |
193 | } | |
194 | ||
195 | void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu) | |
196 | { | |
197 | kvm_vcpu_uninit(vcpu); | |
198 | kmem_cache_free(kvm_vcpu_cache, vcpu); | |
199 | } | |
200 | ||
201 | void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu) | |
202 | { | |
203 | kvm_arch_vcpu_free(vcpu); | |
204 | } | |
205 | ||
206 | int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu) | |
207 | { | |
9dd921cf | 208 | return kvmppc_core_pending_dec(vcpu); |
bbf45ba5 HB |
209 | } |
210 | ||
211 | static void kvmppc_decrementer_func(unsigned long data) | |
212 | { | |
213 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | |
214 | ||
9dd921cf | 215 | kvmppc_core_queue_dec(vcpu); |
45c5eb67 HB |
216 | |
217 | if (waitqueue_active(&vcpu->wq)) { | |
218 | wake_up_interruptible(&vcpu->wq); | |
219 | vcpu->stat.halt_wakeup++; | |
220 | } | |
bbf45ba5 HB |
221 | } |
222 | ||
223 | int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu) | |
224 | { | |
225 | setup_timer(&vcpu->arch.dec_timer, kvmppc_decrementer_func, | |
226 | (unsigned long)vcpu); | |
227 | ||
228 | return 0; | |
229 | } | |
230 | ||
231 | void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
232 | { | |
c30f8a6c | 233 | kvmppc_core_destroy_mmu(vcpu); |
bbf45ba5 HB |
234 | } |
235 | ||
236 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
237 | { | |
6a0ab738 | 238 | if (vcpu->guest_debug.enabled) |
9dd921cf | 239 | kvmppc_core_load_guest_debugstate(vcpu); |
83aae4a8 | 240 | |
9dd921cf | 241 | kvmppc_core_vcpu_load(vcpu, cpu); |
bbf45ba5 HB |
242 | } |
243 | ||
244 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) | |
245 | { | |
6a0ab738 | 246 | if (vcpu->guest_debug.enabled) |
9dd921cf | 247 | kvmppc_core_load_host_debugstate(vcpu); |
83aae4a8 HB |
248 | |
249 | /* Don't leave guest TLB entries resident when being de-scheduled. */ | |
250 | /* XXX It would be nice to differentiate between heavyweight exit and | |
251 | * sched_out here, since we could avoid the TLB flush for heavyweight | |
252 | * exits. */ | |
2a4aca11 | 253 | _tlbil_all(); |
9dd921cf | 254 | kvmppc_core_vcpu_put(vcpu); |
bbf45ba5 HB |
255 | } |
256 | ||
bbf45ba5 HB |
257 | int kvm_arch_vcpu_ioctl_debug_guest(struct kvm_vcpu *vcpu, |
258 | struct kvm_debug_guest *dbg) | |
259 | { | |
6a0ab738 HB |
260 | int i; |
261 | ||
262 | vcpu->guest_debug.enabled = dbg->enabled; | |
263 | if (vcpu->guest_debug.enabled) { | |
264 | for (i=0; i < ARRAY_SIZE(vcpu->guest_debug.bp); i++) { | |
265 | if (dbg->breakpoints[i].enabled) | |
266 | vcpu->guest_debug.bp[i] = dbg->breakpoints[i].address; | |
267 | else | |
268 | vcpu->guest_debug.bp[i] = 0; | |
269 | } | |
270 | } | |
271 | ||
272 | return 0; | |
bbf45ba5 HB |
273 | } |
274 | ||
275 | static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, | |
276 | struct kvm_run *run) | |
277 | { | |
278 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | |
279 | *gpr = run->dcr.data; | |
280 | } | |
281 | ||
282 | static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, | |
283 | struct kvm_run *run) | |
284 | { | |
285 | u32 *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr]; | |
286 | ||
287 | if (run->mmio.len > sizeof(*gpr)) { | |
288 | printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); | |
289 | return; | |
290 | } | |
291 | ||
292 | if (vcpu->arch.mmio_is_bigendian) { | |
293 | switch (run->mmio.len) { | |
294 | case 4: *gpr = *(u32 *)run->mmio.data; break; | |
295 | case 2: *gpr = *(u16 *)run->mmio.data; break; | |
296 | case 1: *gpr = *(u8 *)run->mmio.data; break; | |
297 | } | |
298 | } else { | |
299 | /* Convert BE data from userland back to LE. */ | |
300 | switch (run->mmio.len) { | |
301 | case 4: *gpr = ld_le32((u32 *)run->mmio.data); break; | |
302 | case 2: *gpr = ld_le16((u16 *)run->mmio.data); break; | |
303 | case 1: *gpr = *(u8 *)run->mmio.data; break; | |
304 | } | |
305 | } | |
306 | } | |
307 | ||
308 | int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
309 | unsigned int rt, unsigned int bytes, int is_bigendian) | |
310 | { | |
311 | if (bytes > sizeof(run->mmio.data)) { | |
312 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
313 | run->mmio.len); | |
314 | } | |
315 | ||
316 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
317 | run->mmio.len = bytes; | |
318 | run->mmio.is_write = 0; | |
319 | ||
320 | vcpu->arch.io_gpr = rt; | |
321 | vcpu->arch.mmio_is_bigendian = is_bigendian; | |
322 | vcpu->mmio_needed = 1; | |
323 | vcpu->mmio_is_write = 0; | |
324 | ||
325 | return EMULATE_DO_MMIO; | |
326 | } | |
327 | ||
328 | int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
329 | u32 val, unsigned int bytes, int is_bigendian) | |
330 | { | |
331 | void *data = run->mmio.data; | |
332 | ||
333 | if (bytes > sizeof(run->mmio.data)) { | |
334 | printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__, | |
335 | run->mmio.len); | |
336 | } | |
337 | ||
338 | run->mmio.phys_addr = vcpu->arch.paddr_accessed; | |
339 | run->mmio.len = bytes; | |
340 | run->mmio.is_write = 1; | |
341 | vcpu->mmio_needed = 1; | |
342 | vcpu->mmio_is_write = 1; | |
343 | ||
344 | /* Store the value at the lowest bytes in 'data'. */ | |
345 | if (is_bigendian) { | |
346 | switch (bytes) { | |
347 | case 4: *(u32 *)data = val; break; | |
348 | case 2: *(u16 *)data = val; break; | |
349 | case 1: *(u8 *)data = val; break; | |
350 | } | |
351 | } else { | |
352 | /* Store LE value into 'data'. */ | |
353 | switch (bytes) { | |
354 | case 4: st_le32(data, val); break; | |
355 | case 2: st_le16(data, val); break; | |
356 | case 1: *(u8 *)data = val; break; | |
357 | } | |
358 | } | |
359 | ||
360 | return EMULATE_DO_MMIO; | |
361 | } | |
362 | ||
363 | int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
364 | { | |
365 | int r; | |
366 | sigset_t sigsaved; | |
367 | ||
45c5eb67 HB |
368 | vcpu_load(vcpu); |
369 | ||
bbf45ba5 HB |
370 | if (vcpu->sigset_active) |
371 | sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); | |
372 | ||
373 | if (vcpu->mmio_needed) { | |
374 | if (!vcpu->mmio_is_write) | |
375 | kvmppc_complete_mmio_load(vcpu, run); | |
376 | vcpu->mmio_needed = 0; | |
377 | } else if (vcpu->arch.dcr_needed) { | |
378 | if (!vcpu->arch.dcr_is_write) | |
379 | kvmppc_complete_dcr_load(vcpu, run); | |
380 | vcpu->arch.dcr_needed = 0; | |
381 | } | |
382 | ||
9dd921cf | 383 | kvmppc_core_deliver_interrupts(vcpu); |
bbf45ba5 HB |
384 | |
385 | local_irq_disable(); | |
386 | kvm_guest_enter(); | |
387 | r = __kvmppc_vcpu_run(run, vcpu); | |
388 | kvm_guest_exit(); | |
389 | local_irq_enable(); | |
390 | ||
391 | if (vcpu->sigset_active) | |
392 | sigprocmask(SIG_SETMASK, &sigsaved, NULL); | |
393 | ||
45c5eb67 HB |
394 | vcpu_put(vcpu); |
395 | ||
bbf45ba5 HB |
396 | return r; |
397 | } | |
398 | ||
399 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq) | |
400 | { | |
9dd921cf | 401 | kvmppc_core_queue_external(vcpu, irq); |
45c5eb67 HB |
402 | |
403 | if (waitqueue_active(&vcpu->wq)) { | |
404 | wake_up_interruptible(&vcpu->wq); | |
405 | vcpu->stat.halt_wakeup++; | |
406 | } | |
407 | ||
bbf45ba5 HB |
408 | return 0; |
409 | } | |
410 | ||
411 | int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu, | |
412 | struct kvm_mp_state *mp_state) | |
413 | { | |
414 | return -EINVAL; | |
415 | } | |
416 | ||
417 | int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu, | |
418 | struct kvm_mp_state *mp_state) | |
419 | { | |
420 | return -EINVAL; | |
421 | } | |
422 | ||
423 | long kvm_arch_vcpu_ioctl(struct file *filp, | |
424 | unsigned int ioctl, unsigned long arg) | |
425 | { | |
426 | struct kvm_vcpu *vcpu = filp->private_data; | |
427 | void __user *argp = (void __user *)arg; | |
428 | long r; | |
429 | ||
430 | switch (ioctl) { | |
431 | case KVM_INTERRUPT: { | |
432 | struct kvm_interrupt irq; | |
433 | r = -EFAULT; | |
434 | if (copy_from_user(&irq, argp, sizeof(irq))) | |
435 | goto out; | |
436 | r = kvm_vcpu_ioctl_interrupt(vcpu, &irq); | |
437 | break; | |
438 | } | |
439 | default: | |
440 | r = -EINVAL; | |
441 | } | |
442 | ||
443 | out: | |
444 | return r; | |
445 | } | |
446 | ||
447 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) | |
448 | { | |
449 | return -ENOTSUPP; | |
450 | } | |
451 | ||
452 | long kvm_arch_vm_ioctl(struct file *filp, | |
453 | unsigned int ioctl, unsigned long arg) | |
454 | { | |
455 | long r; | |
456 | ||
457 | switch (ioctl) { | |
458 | default: | |
459 | r = -EINVAL; | |
460 | } | |
461 | ||
462 | return r; | |
463 | } | |
464 | ||
465 | int kvm_arch_init(void *opaque) | |
466 | { | |
467 | return 0; | |
468 | } | |
469 | ||
470 | void kvm_arch_exit(void) | |
471 | { | |
472 | } |