]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/powerpc.c
KVM: Fix the explanation of write_emulated
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / powerpc.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
25#include <linux/vmalloc.h>
544c6761 26#include <linux/hrtimer.h>
bbf45ba5
HB
27#include <linux/fs.h>
28#include <asm/cputable.h>
29#include <asm/uaccess.h>
30#include <asm/kvm_ppc.h>
83aae4a8 31#include <asm/tlbflush.h>
73e75b41 32#include "timing.h"
fad7b9b5 33#include "../mm/mmu_decl.h"
bbf45ba5 34
46f43c6e
MT
35#define CREATE_TRACE_POINTS
36#include "trace.h"
37
bbf45ba5
HB
38gfn_t unalias_gfn(struct kvm *kvm, gfn_t gfn)
39{
40 return gfn;
41}
42
bbf45ba5
HB
43int kvm_arch_vcpu_runnable(struct kvm_vcpu *v)
44{
a1b37100 45 return !(v->arch.msr & MSR_WE) || !!(v->arch.pending_exceptions);
bbf45ba5
HB
46}
47
48
49int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu)
50{
51 enum emulation_result er;
52 int r;
53
54 er = kvmppc_emulate_instruction(run, vcpu);
55 switch (er) {
56 case EMULATE_DONE:
57 /* Future optimization: only reload non-volatiles if they were
58 * actually modified. */
59 r = RESUME_GUEST_NV;
60 break;
61 case EMULATE_DO_MMIO:
62 run->exit_reason = KVM_EXIT_MMIO;
63 /* We must reload nonvolatiles because "update" load/store
64 * instructions modify register state. */
65 /* Future optimization: only reload non-volatiles if they were
66 * actually modified. */
67 r = RESUME_HOST_NV;
68 break;
69 case EMULATE_FAIL:
70 /* XXX Deliver Program interrupt to guest. */
71 printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__,
72 vcpu->arch.last_inst);
73 r = RESUME_HOST;
74 break;
75 default:
76 BUG();
77 }
78
79 return r;
80}
81
10474ae8 82int kvm_arch_hardware_enable(void *garbage)
bbf45ba5 83{
10474ae8 84 return 0;
bbf45ba5
HB
85}
86
87void kvm_arch_hardware_disable(void *garbage)
88{
89}
90
91int kvm_arch_hardware_setup(void)
92{
93 return 0;
94}
95
96void kvm_arch_hardware_unsetup(void)
97{
98}
99
100void kvm_arch_check_processor_compat(void *rtn)
101{
9dd921cf 102 *(int *)rtn = kvmppc_core_check_processor_compat();
bbf45ba5
HB
103}
104
105struct kvm *kvm_arch_create_vm(void)
106{
107 struct kvm *kvm;
108
109 kvm = kzalloc(sizeof(struct kvm), GFP_KERNEL);
110 if (!kvm)
111 return ERR_PTR(-ENOMEM);
112
113 return kvm;
114}
115
116static void kvmppc_free_vcpus(struct kvm *kvm)
117{
118 unsigned int i;
988a2cae 119 struct kvm_vcpu *vcpu;
bbf45ba5 120
988a2cae
GN
121 kvm_for_each_vcpu(i, vcpu, kvm)
122 kvm_arch_vcpu_free(vcpu);
123
124 mutex_lock(&kvm->lock);
125 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
126 kvm->vcpus[i] = NULL;
127
128 atomic_set(&kvm->online_vcpus, 0);
129 mutex_unlock(&kvm->lock);
bbf45ba5
HB
130}
131
ad8ba2cd
SY
132void kvm_arch_sync_events(struct kvm *kvm)
133{
134}
135
bbf45ba5
HB
136void kvm_arch_destroy_vm(struct kvm *kvm)
137{
138 kvmppc_free_vcpus(kvm);
139 kvm_free_physmem(kvm);
140 kfree(kvm);
141}
142
143int kvm_dev_ioctl_check_extension(long ext)
144{
145 int r;
146
147 switch (ext) {
e15a1137
AG
148 case KVM_CAP_PPC_SEGSTATE:
149 r = 1;
150 break;
588968b6
LV
151 case KVM_CAP_COALESCED_MMIO:
152 r = KVM_COALESCED_MMIO_PAGE_OFFSET;
153 break;
bbf45ba5
HB
154 default:
155 r = 0;
156 break;
157 }
158 return r;
159
160}
161
162long kvm_arch_dev_ioctl(struct file *filp,
163 unsigned int ioctl, unsigned long arg)
164{
165 return -EINVAL;
166}
167
f7784b8e
MT
168int kvm_arch_prepare_memory_region(struct kvm *kvm,
169 struct kvm_memory_slot *memslot,
170 struct kvm_memory_slot old,
171 struct kvm_userspace_memory_region *mem,
172 int user_alloc)
bbf45ba5
HB
173{
174 return 0;
175}
176
f7784b8e
MT
177void kvm_arch_commit_memory_region(struct kvm *kvm,
178 struct kvm_userspace_memory_region *mem,
179 struct kvm_memory_slot old,
180 int user_alloc)
181{
182 return;
183}
184
185
34d4cb8f
MT
186void kvm_arch_flush_shadow(struct kvm *kvm)
187{
188}
189
bbf45ba5
HB
190struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
191{
73e75b41
HB
192 struct kvm_vcpu *vcpu;
193 vcpu = kvmppc_core_vcpu_create(kvm, id);
194 kvmppc_create_vcpu_debugfs(vcpu, id);
195 return vcpu;
bbf45ba5
HB
196}
197
198void kvm_arch_vcpu_free(struct kvm_vcpu *vcpu)
199{
73e75b41 200 kvmppc_remove_vcpu_debugfs(vcpu);
db93f574 201 kvmppc_core_vcpu_free(vcpu);
bbf45ba5
HB
202}
203
204void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
205{
206 kvm_arch_vcpu_free(vcpu);
207}
208
209int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
210{
9dd921cf 211 return kvmppc_core_pending_dec(vcpu);
bbf45ba5
HB
212}
213
214static void kvmppc_decrementer_func(unsigned long data)
215{
216 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
217
9dd921cf 218 kvmppc_core_queue_dec(vcpu);
45c5eb67
HB
219
220 if (waitqueue_active(&vcpu->wq)) {
221 wake_up_interruptible(&vcpu->wq);
222 vcpu->stat.halt_wakeup++;
223 }
bbf45ba5
HB
224}
225
544c6761
AG
226/*
227 * low level hrtimer wake routine. Because this runs in hardirq context
228 * we schedule a tasklet to do the real work.
229 */
230enum hrtimer_restart kvmppc_decrementer_wakeup(struct hrtimer *timer)
231{
232 struct kvm_vcpu *vcpu;
233
234 vcpu = container_of(timer, struct kvm_vcpu, arch.dec_timer);
235 tasklet_schedule(&vcpu->arch.tasklet);
236
237 return HRTIMER_NORESTART;
238}
239
bbf45ba5
HB
240int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
241{
544c6761
AG
242 hrtimer_init(&vcpu->arch.dec_timer, CLOCK_REALTIME, HRTIMER_MODE_ABS);
243 tasklet_init(&vcpu->arch.tasklet, kvmppc_decrementer_func, (ulong)vcpu);
244 vcpu->arch.dec_timer.function = kvmppc_decrementer_wakeup;
bbf45ba5
HB
245
246 return 0;
247}
248
249void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu)
250{
ecc0981f 251 kvmppc_mmu_destroy(vcpu);
bbf45ba5
HB
252}
253
254void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
255{
9dd921cf 256 kvmppc_core_vcpu_load(vcpu, cpu);
bbf45ba5
HB
257}
258
259void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
260{
9dd921cf 261 kvmppc_core_vcpu_put(vcpu);
bbf45ba5
HB
262}
263
d0bfb940 264int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
f5d0906b 265 struct kvm_guest_debug *dbg)
bbf45ba5 266{
f5d0906b 267 return -EINVAL;
bbf45ba5
HB
268}
269
270static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu,
271 struct kvm_run *run)
272{
5cf8ca22 273 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
bbf45ba5
HB
274 *gpr = run->dcr.data;
275}
276
277static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu,
278 struct kvm_run *run)
279{
5cf8ca22 280 ulong *gpr = &vcpu->arch.gpr[vcpu->arch.io_gpr];
bbf45ba5
HB
281
282 if (run->mmio.len > sizeof(*gpr)) {
283 printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len);
284 return;
285 }
286
287 if (vcpu->arch.mmio_is_bigendian) {
288 switch (run->mmio.len) {
289 case 4: *gpr = *(u32 *)run->mmio.data; break;
290 case 2: *gpr = *(u16 *)run->mmio.data; break;
291 case 1: *gpr = *(u8 *)run->mmio.data; break;
292 }
293 } else {
294 /* Convert BE data from userland back to LE. */
295 switch (run->mmio.len) {
296 case 4: *gpr = ld_le32((u32 *)run->mmio.data); break;
297 case 2: *gpr = ld_le16((u16 *)run->mmio.data); break;
298 case 1: *gpr = *(u8 *)run->mmio.data; break;
299 }
300 }
301}
302
303int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
304 unsigned int rt, unsigned int bytes, int is_bigendian)
305{
306 if (bytes > sizeof(run->mmio.data)) {
307 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
308 run->mmio.len);
309 }
310
311 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
312 run->mmio.len = bytes;
313 run->mmio.is_write = 0;
314
315 vcpu->arch.io_gpr = rt;
316 vcpu->arch.mmio_is_bigendian = is_bigendian;
317 vcpu->mmio_needed = 1;
318 vcpu->mmio_is_write = 0;
319
320 return EMULATE_DO_MMIO;
321}
322
323int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
324 u32 val, unsigned int bytes, int is_bigendian)
325{
326 void *data = run->mmio.data;
327
328 if (bytes > sizeof(run->mmio.data)) {
329 printk(KERN_ERR "%s: bad MMIO length: %d\n", __func__,
330 run->mmio.len);
331 }
332
333 run->mmio.phys_addr = vcpu->arch.paddr_accessed;
334 run->mmio.len = bytes;
335 run->mmio.is_write = 1;
336 vcpu->mmio_needed = 1;
337 vcpu->mmio_is_write = 1;
338
339 /* Store the value at the lowest bytes in 'data'. */
340 if (is_bigendian) {
341 switch (bytes) {
342 case 4: *(u32 *)data = val; break;
343 case 2: *(u16 *)data = val; break;
344 case 1: *(u8 *)data = val; break;
345 }
346 } else {
347 /* Store LE value into 'data'. */
348 switch (bytes) {
349 case 4: st_le32(data, val); break;
350 case 2: st_le16(data, val); break;
351 case 1: *(u8 *)data = val; break;
352 }
353 }
354
355 return EMULATE_DO_MMIO;
356}
357
358int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
359{
360 int r;
361 sigset_t sigsaved;
362
45c5eb67
HB
363 vcpu_load(vcpu);
364
bbf45ba5
HB
365 if (vcpu->sigset_active)
366 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
367
368 if (vcpu->mmio_needed) {
369 if (!vcpu->mmio_is_write)
370 kvmppc_complete_mmio_load(vcpu, run);
371 vcpu->mmio_needed = 0;
372 } else if (vcpu->arch.dcr_needed) {
373 if (!vcpu->arch.dcr_is_write)
374 kvmppc_complete_dcr_load(vcpu, run);
375 vcpu->arch.dcr_needed = 0;
376 }
377
9dd921cf 378 kvmppc_core_deliver_interrupts(vcpu);
bbf45ba5
HB
379
380 local_irq_disable();
381 kvm_guest_enter();
382 r = __kvmppc_vcpu_run(run, vcpu);
383 kvm_guest_exit();
384 local_irq_enable();
385
386 if (vcpu->sigset_active)
387 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
388
45c5eb67
HB
389 vcpu_put(vcpu);
390
bbf45ba5
HB
391 return r;
392}
393
394int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq)
395{
9dd921cf 396 kvmppc_core_queue_external(vcpu, irq);
45c5eb67
HB
397
398 if (waitqueue_active(&vcpu->wq)) {
399 wake_up_interruptible(&vcpu->wq);
400 vcpu->stat.halt_wakeup++;
401 }
402
bbf45ba5
HB
403 return 0;
404}
405
406int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
407 struct kvm_mp_state *mp_state)
408{
409 return -EINVAL;
410}
411
412int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
413 struct kvm_mp_state *mp_state)
414{
415 return -EINVAL;
416}
417
418long kvm_arch_vcpu_ioctl(struct file *filp,
419 unsigned int ioctl, unsigned long arg)
420{
421 struct kvm_vcpu *vcpu = filp->private_data;
422 void __user *argp = (void __user *)arg;
423 long r;
424
425 switch (ioctl) {
426 case KVM_INTERRUPT: {
427 struct kvm_interrupt irq;
428 r = -EFAULT;
429 if (copy_from_user(&irq, argp, sizeof(irq)))
430 goto out;
431 r = kvm_vcpu_ioctl_interrupt(vcpu, &irq);
432 break;
433 }
434 default:
435 r = -EINVAL;
436 }
437
438out:
439 return r;
440}
441
bbf45ba5
HB
442long kvm_arch_vm_ioctl(struct file *filp,
443 unsigned int ioctl, unsigned long arg)
444{
445 long r;
446
447 switch (ioctl) {
448 default:
367e1319 449 r = -ENOTTY;
bbf45ba5
HB
450 }
451
452 return r;
453}
454
455int kvm_arch_init(void *opaque)
456{
457 return 0;
458}
459
460void kvm_arch_exit(void)
461{
462}