]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/powerpc/kvm/book3s.c
KVM: PPC: Check max IRQ prio
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kvm / book3s.c
CommitLineData
2f4cf5e4
AG
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * Description:
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
15 */
16
17#include <linux/kvm_host.h>
18#include <linux/err.h>
19
20#include <asm/reg.h>
21#include <asm/cputable.h>
22#include <asm/cacheflush.h>
23#include <asm/tlbflush.h>
24#include <asm/uaccess.h>
25#include <asm/io.h>
26#include <asm/kvm_ppc.h>
27#include <asm/kvm_book3s.h>
28#include <asm/mmu_context.h>
5a0e3ad6 29#include <linux/gfp.h>
2f4cf5e4
AG
30#include <linux/sched.h>
31#include <linux/vmalloc.h>
9fb244a2 32#include <linux/highmem.h>
2f4cf5e4
AG
33
34#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
35
36/* #define EXIT_DEBUG */
37/* #define EXIT_DEBUG_SIMPLE */
180a34d2
AG
38/* #define DEBUG_EXT */
39
c8c0b6f2
AG
40static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
41 ulong msr);
2f4cf5e4 42
07b0907d
AG
43/* Some compatibility defines */
44#ifdef CONFIG_PPC_BOOK3S_32
45#define MSR_USER32 MSR_USER
46#define MSR_USER64 MSR_USER
47#define HW_PAGE_SIZE PAGE_SIZE
48#endif
49
2f4cf5e4
AG
50struct kvm_stats_debugfs_item debugfs_entries[] = {
51 { "exits", VCPU_STAT(sum_exits) },
52 { "mmio", VCPU_STAT(mmio_exits) },
53 { "sig", VCPU_STAT(signal_exits) },
54 { "sysc", VCPU_STAT(syscall_exits) },
55 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
56 { "dec", VCPU_STAT(dec_exits) },
57 { "ext_intr", VCPU_STAT(ext_intr_exits) },
58 { "queue_intr", VCPU_STAT(queue_intr) },
59 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
60 { "pf_storage", VCPU_STAT(pf_storage) },
61 { "sp_storage", VCPU_STAT(sp_storage) },
62 { "pf_instruc", VCPU_STAT(pf_instruc) },
63 { "sp_instruc", VCPU_STAT(sp_instruc) },
64 { "ld", VCPU_STAT(ld) },
65 { "ld_slow", VCPU_STAT(ld_slow) },
66 { "st", VCPU_STAT(st) },
67 { "st_slow", VCPU_STAT(st_slow) },
68 { NULL }
69};
70
71void kvmppc_core_load_host_debugstate(struct kvm_vcpu *vcpu)
72{
73}
74
75void kvmppc_core_load_guest_debugstate(struct kvm_vcpu *vcpu)
76{
77}
78
79void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
80{
c7f38f46
AG
81#ifdef CONFIG_PPC_BOOK3S_64
82 memcpy(to_svcpu(vcpu)->slb, to_book3s(vcpu)->slb_shadow, sizeof(to_svcpu(vcpu)->slb));
83 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
7e57cba0 84 sizeof(get_paca()->shadow_vcpu));
c7f38f46
AG
85 to_svcpu(vcpu)->slb_max = to_book3s(vcpu)->slb_shadow_max;
86#endif
87
88#ifdef CONFIG_PPC_BOOK3S_32
89 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
90#endif
2f4cf5e4
AG
91}
92
93void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
94{
c7f38f46
AG
95#ifdef CONFIG_PPC_BOOK3S_64
96 memcpy(to_book3s(vcpu)->slb_shadow, to_svcpu(vcpu)->slb, sizeof(to_svcpu(vcpu)->slb));
97 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
7e57cba0 98 sizeof(get_paca()->shadow_vcpu));
c7f38f46
AG
99 to_book3s(vcpu)->slb_shadow_max = to_svcpu(vcpu)->slb_max;
100#endif
180a34d2
AG
101
102 kvmppc_giveup_ext(vcpu, MSR_FP);
103 kvmppc_giveup_ext(vcpu, MSR_VEC);
104 kvmppc_giveup_ext(vcpu, MSR_VSX);
2f4cf5e4
AG
105}
106
0bb1fb71 107#if defined(EXIT_DEBUG)
2f4cf5e4
AG
108static u32 kvmppc_get_dec(struct kvm_vcpu *vcpu)
109{
110 u64 jd = mftb() - vcpu->arch.dec_jiffies;
111 return vcpu->arch.dec - jd;
112}
113#endif
114
a76f8497
AG
115static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
116{
117 vcpu->arch.shadow_msr = vcpu->arch.msr;
118 /* Guest MSR values */
119 vcpu->arch.shadow_msr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE |
120 MSR_BE | MSR_DE;
121 /* Process MSR values */
122 vcpu->arch.shadow_msr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR |
123 MSR_EE;
124 /* External providers the guest reserved */
125 vcpu->arch.shadow_msr |= (vcpu->arch.msr & vcpu->arch.guest_owned_ext);
126 /* 64-bit Process MSR values */
127#ifdef CONFIG_PPC_BOOK3S_64
128 vcpu->arch.shadow_msr |= MSR_ISF | MSR_HV;
129#endif
130}
131
2f4cf5e4
AG
132void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
133{
134 ulong old_msr = vcpu->arch.msr;
135
136#ifdef EXIT_DEBUG
137 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
138#endif
a76f8497 139
2f4cf5e4
AG
140 msr &= to_book3s(vcpu)->msr_mask;
141 vcpu->arch.msr = msr;
a76f8497 142 kvmppc_recalc_shadow_msr(vcpu);
2f4cf5e4
AG
143
144 if (msr & (MSR_WE|MSR_POW)) {
145 if (!vcpu->arch.pending_exceptions) {
146 kvm_vcpu_block(vcpu);
147 vcpu->stat.halt_wakeup++;
148 }
149 }
150
151 if (((vcpu->arch.msr & (MSR_IR|MSR_DR)) != (old_msr & (MSR_IR|MSR_DR))) ||
152 (vcpu->arch.msr & MSR_PR) != (old_msr & MSR_PR)) {
3eeafd7d
AG
153 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
154 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
155
156 /* Flush split mode PTEs */
157 if (dr != ir)
158 kvmppc_mmu_pte_vflush(vcpu, VSID_SPLIT_MASK,
159 VSID_SPLIT_MASK);
160
2f4cf5e4 161 kvmppc_mmu_flush_segments(vcpu);
c7f38f46 162 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
2f4cf5e4 163 }
d1bab74c
AG
164
165 /* Preload FPU if it's enabled */
166 if (vcpu->arch.msr & MSR_FP)
167 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
2f4cf5e4
AG
168}
169
170void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
171{
c7f38f46 172 vcpu->arch.srr0 = kvmppc_get_pc(vcpu);
2f4cf5e4 173 vcpu->arch.srr1 = vcpu->arch.msr | flags;
c7f38f46 174 kvmppc_set_pc(vcpu, to_book3s(vcpu)->hior + vec);
2f4cf5e4
AG
175 vcpu->arch.mmu.reset_msr(vcpu);
176}
177
583617b7 178static int kvmppc_book3s_vec2irqprio(unsigned int vec)
2f4cf5e4
AG
179{
180 unsigned int prio;
181
2f4cf5e4
AG
182 switch (vec) {
183 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
184 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
185 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
186 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
187 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
188 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
189 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
190 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
191 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
192 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
193 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
194 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
195 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
196 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
197 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
198 default: prio = BOOK3S_IRQPRIO_MAX; break;
199 }
200
583617b7
AG
201 return prio;
202}
203
7706664d
AG
204static void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
205 unsigned int vec)
206{
207 clear_bit(kvmppc_book3s_vec2irqprio(vec),
208 &vcpu->arch.pending_exceptions);
209}
210
583617b7
AG
211void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
212{
213 vcpu->stat.queue_intr++;
214
215 set_bit(kvmppc_book3s_vec2irqprio(vec),
216 &vcpu->arch.pending_exceptions);
2f4cf5e4
AG
217#ifdef EXIT_DEBUG
218 printk(KERN_INFO "Queueing interrupt %x\n", vec);
219#endif
220}
221
222
25a8a02d 223void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
2f4cf5e4 224{
25a8a02d 225 to_book3s(vcpu)->prog_flags = flags;
2f4cf5e4
AG
226 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_PROGRAM);
227}
228
229void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
230{
231 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
232}
233
234int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
235{
236 return test_bit(BOOK3S_INTERRUPT_DECREMENTER >> 7, &vcpu->arch.pending_exceptions);
237}
238
7706664d
AG
239void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
240{
241 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
242}
243
2f4cf5e4
AG
244void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
245 struct kvm_interrupt *irq)
246{
247 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
248}
249
18978768
AG
250void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
251 struct kvm_interrupt *irq)
252{
253 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
254}
255
2f4cf5e4
AG
256int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu, unsigned int priority)
257{
258 int deliver = 1;
259 int vec = 0;
25a8a02d 260 ulong flags = 0ULL;
2f4cf5e4
AG
261
262 switch (priority) {
263 case BOOK3S_IRQPRIO_DECREMENTER:
264 deliver = vcpu->arch.msr & MSR_EE;
265 vec = BOOK3S_INTERRUPT_DECREMENTER;
266 break;
267 case BOOK3S_IRQPRIO_EXTERNAL:
268 deliver = vcpu->arch.msr & MSR_EE;
269 vec = BOOK3S_INTERRUPT_EXTERNAL;
270 break;
271 case BOOK3S_IRQPRIO_SYSTEM_RESET:
272 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
273 break;
274 case BOOK3S_IRQPRIO_MACHINE_CHECK:
275 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
276 break;
277 case BOOK3S_IRQPRIO_DATA_STORAGE:
278 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
279 break;
280 case BOOK3S_IRQPRIO_INST_STORAGE:
281 vec = BOOK3S_INTERRUPT_INST_STORAGE;
282 break;
283 case BOOK3S_IRQPRIO_DATA_SEGMENT:
284 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
285 break;
286 case BOOK3S_IRQPRIO_INST_SEGMENT:
287 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
288 break;
289 case BOOK3S_IRQPRIO_ALIGNMENT:
290 vec = BOOK3S_INTERRUPT_ALIGNMENT;
291 break;
292 case BOOK3S_IRQPRIO_PROGRAM:
293 vec = BOOK3S_INTERRUPT_PROGRAM;
25a8a02d 294 flags = to_book3s(vcpu)->prog_flags;
2f4cf5e4
AG
295 break;
296 case BOOK3S_IRQPRIO_VSX:
297 vec = BOOK3S_INTERRUPT_VSX;
298 break;
299 case BOOK3S_IRQPRIO_ALTIVEC:
300 vec = BOOK3S_INTERRUPT_ALTIVEC;
301 break;
302 case BOOK3S_IRQPRIO_FP_UNAVAIL:
303 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
304 break;
305 case BOOK3S_IRQPRIO_SYSCALL:
306 vec = BOOK3S_INTERRUPT_SYSCALL;
307 break;
308 case BOOK3S_IRQPRIO_DEBUG:
309 vec = BOOK3S_INTERRUPT_TRACE;
310 break;
311 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
312 vec = BOOK3S_INTERRUPT_PERFMON;
313 break;
314 default:
315 deliver = 0;
316 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
317 break;
318 }
319
320#if 0
321 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
322#endif
323
324 if (deliver)
25a8a02d 325 kvmppc_inject_interrupt(vcpu, vec, flags);
2f4cf5e4
AG
326
327 return deliver;
328}
329
330void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
331{
332 unsigned long *pending = &vcpu->arch.pending_exceptions;
333 unsigned int priority;
334
2f4cf5e4
AG
335#ifdef EXIT_DEBUG
336 if (vcpu->arch.pending_exceptions)
337 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
338#endif
339 priority = __ffs(*pending);
ada7ba17 340 while (priority < BOOK3S_IRQPRIO_MAX) {
7706664d
AG
341 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
342 (priority != BOOK3S_IRQPRIO_DECREMENTER)) {
343 /* DEC interrupts get cleared by mtdec */
2f4cf5e4
AG
344 clear_bit(priority, &vcpu->arch.pending_exceptions);
345 break;
346 }
347
348 priority = find_next_bit(pending,
349 BITS_PER_BYTE * sizeof(*pending),
350 priority + 1);
351 }
352}
353
354void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
355{
e15a1137 356 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
2f4cf5e4 357 vcpu->arch.pvr = pvr;
07b0907d 358#ifdef CONFIG_PPC_BOOK3S_64
2f4cf5e4
AG
359 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
360 kvmppc_mmu_book3s_64_init(vcpu);
361 to_book3s(vcpu)->hior = 0xfff00000;
362 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
07b0907d
AG
363 } else
364#endif
365 {
2f4cf5e4
AG
366 kvmppc_mmu_book3s_32_init(vcpu);
367 to_book3s(vcpu)->hior = 0;
368 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
369 }
370
371 /* If we are in hypervisor level on 970, we can tell the CPU to
372 * treat DCBZ as 32 bytes store */
373 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
374 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
375 !strcmp(cur_cpu_spec->platform, "ppc970"))
376 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
377
05b0ab1c
AG
378 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
379 really needs them in a VM on Cell and force disable them. */
380 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
381 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
07b0907d
AG
382
383#ifdef CONFIG_PPC_BOOK3S_32
384 /* 32 bit Book3S always has 32 byte dcbz */
385 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
386#endif
2f4cf5e4
AG
387}
388
389/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
390 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
391 * emulate 32 bytes dcbz length.
392 *
393 * The Book3s_64 inventors also realized this case and implemented a special bit
394 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
395 *
396 * My approach here is to patch the dcbz instruction on executing pages.
397 */
398static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
399{
9fb244a2
AG
400 struct page *hpage;
401 u64 hpage_offset;
2f4cf5e4
AG
402 u32 *page;
403 int i;
404
9fb244a2
AG
405 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
406 if (is_error_page(hpage))
2f4cf5e4
AG
407 return;
408
9fb244a2
AG
409 hpage_offset = pte->raddr & ~PAGE_MASK;
410 hpage_offset &= ~0xFFFULL;
411 hpage_offset /= 4;
2f4cf5e4 412
9fb244a2
AG
413 get_page(hpage);
414 page = kmap_atomic(hpage, KM_USER0);
2f4cf5e4 415
9fb244a2
AG
416 /* patch dcbz into reserved instruction, so we trap */
417 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
418 if ((page[i] & 0xff0007ff) == INS_DCBZ)
419 page[i] &= 0xfffffff7;
2f4cf5e4 420
9fb244a2
AG
421 kunmap_atomic(page, KM_USER0);
422 put_page(hpage);
2f4cf5e4
AG
423}
424
425static int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, bool data,
426 struct kvmppc_pte *pte)
427{
428 int relocated = (vcpu->arch.msr & (data ? MSR_DR : MSR_IR));
429 int r;
430
431 if (relocated) {
432 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data);
433 } else {
434 pte->eaddr = eaddr;
435 pte->raddr = eaddr & 0xffffffff;
3eeafd7d 436 pte->vpage = VSID_REAL | eaddr >> 12;
2f4cf5e4
AG
437 pte->may_read = true;
438 pte->may_write = true;
439 pte->may_execute = true;
440 r = 0;
441 }
442
443 return r;
444}
445
446static hva_t kvmppc_bad_hva(void)
447{
448 return PAGE_OFFSET;
449}
450
451static hva_t kvmppc_pte_to_hva(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte,
452 bool read)
453{
454 hva_t hpage;
455
456 if (read && !pte->may_read)
457 goto err;
458
459 if (!read && !pte->may_write)
460 goto err;
461
462 hpage = gfn_to_hva(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
463 if (kvm_is_error_hva(hpage))
464 goto err;
465
466 return hpage | (pte->raddr & ~PAGE_MASK);
467err:
468 return kvmppc_bad_hva();
469}
470
5467a97d
AG
471int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
472 bool data)
2f4cf5e4
AG
473{
474 struct kvmppc_pte pte;
2f4cf5e4
AG
475
476 vcpu->stat.st++;
477
5467a97d 478 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
9fb244a2 479 return -ENOENT;
5467a97d
AG
480
481 *eaddr = pte.raddr;
2f4cf5e4 482
9fb244a2
AG
483 if (!pte.may_write)
484 return -EPERM;
2f4cf5e4 485
9fb244a2
AG
486 if (kvm_write_guest(vcpu->kvm, pte.raddr, ptr, size))
487 return EMULATE_DO_MMIO;
2f4cf5e4 488
5467a97d 489 return EMULATE_DONE;
2f4cf5e4
AG
490}
491
5467a97d 492int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
2f4cf5e4
AG
493 bool data)
494{
495 struct kvmppc_pte pte;
5467a97d 496 hva_t hva = *eaddr;
2f4cf5e4
AG
497
498 vcpu->stat.ld++;
499
5467a97d
AG
500 if (kvmppc_xlate(vcpu, *eaddr, data, &pte))
501 goto nopte;
502
503 *eaddr = pte.raddr;
2f4cf5e4
AG
504
505 hva = kvmppc_pte_to_hva(vcpu, &pte, true);
506 if (kvm_is_error_hva(hva))
5467a97d 507 goto mmio;
2f4cf5e4
AG
508
509 if (copy_from_user(ptr, (void __user *)hva, size)) {
510 printk(KERN_INFO "kvmppc_ld at 0x%lx failed\n", hva);
5467a97d 511 goto mmio;
2f4cf5e4
AG
512 }
513
5467a97d 514 return EMULATE_DONE;
2f4cf5e4 515
5467a97d 516nopte:
2f4cf5e4 517 return -ENOENT;
5467a97d
AG
518mmio:
519 return EMULATE_DO_MMIO;
2f4cf5e4
AG
520}
521
522static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
523{
524 return kvm_is_visible_gfn(vcpu->kvm, gfn);
525}
526
527int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
528 ulong eaddr, int vec)
529{
530 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
531 int r = RESUME_GUEST;
532 int relocated;
533 int page_found = 0;
534 struct kvmppc_pte pte;
535 bool is_mmio = false;
3eeafd7d
AG
536 bool dr = (vcpu->arch.msr & MSR_DR) ? true : false;
537 bool ir = (vcpu->arch.msr & MSR_IR) ? true : false;
2f4cf5e4 538
3eeafd7d 539 relocated = data ? dr : ir;
2f4cf5e4
AG
540
541 /* Resolve real address if translation turned on */
542 if (relocated) {
543 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
544 } else {
545 pte.may_execute = true;
546 pte.may_read = true;
547 pte.may_write = true;
548 pte.raddr = eaddr & 0xffffffff;
549 pte.eaddr = eaddr;
550 pte.vpage = eaddr >> 12;
3eeafd7d
AG
551 }
552
553 switch (vcpu->arch.msr & (MSR_DR|MSR_IR)) {
554 case 0:
555 pte.vpage |= VSID_REAL;
556 break;
557 case MSR_DR:
558 pte.vpage |= VSID_REAL_DR;
559 break;
560 case MSR_IR:
561 pte.vpage |= VSID_REAL_IR;
562 break;
2f4cf5e4
AG
563 }
564
565 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
566 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
567 /*
568 * If we do the dcbz hack, we have to NX on every execution,
569 * so we can patch the executing code. This renders our guest
570 * NX-less.
571 */
572 pte.may_execute = !data;
573 }
574
575 if (page_found == -ENOENT) {
576 /* Page not found in guest PTE entries */
c7f38f46
AG
577 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
578 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
579 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
2f4cf5e4
AG
580 kvmppc_book3s_queue_irqprio(vcpu, vec);
581 } else if (page_found == -EPERM) {
582 /* Storage protection */
c7f38f46
AG
583 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
584 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr & ~DSISR_NOHPTE;
2f4cf5e4 585 to_book3s(vcpu)->dsisr |= DSISR_PROTFAULT;
c7f38f46 586 vcpu->arch.msr |= (to_svcpu(vcpu)->shadow_srr1 & 0x00000000f8000000ULL);
2f4cf5e4
AG
587 kvmppc_book3s_queue_irqprio(vcpu, vec);
588 } else if (page_found == -EINVAL) {
589 /* Page not found in guest SLB */
c7f38f46 590 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
2f4cf5e4
AG
591 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
592 } else if (!is_mmio &&
593 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
594 /* The guest's PTE is not mapped yet. Map on the host */
595 kvmppc_mmu_map_page(vcpu, &pte);
596 if (data)
597 vcpu->stat.sp_storage++;
598 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
599 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
600 kvmppc_patch_dcbz(vcpu, &pte);
601 } else {
602 /* MMIO */
603 vcpu->stat.mmio_exits++;
604 vcpu->arch.paddr_accessed = pte.raddr;
605 r = kvmppc_emulate_mmio(run, vcpu);
606 if ( r == RESUME_HOST_NV )
607 r = RESUME_HOST;
2f4cf5e4
AG
608 }
609
610 return r;
611}
612
180a34d2
AG
613static inline int get_fpr_index(int i)
614{
615#ifdef CONFIG_VSX
616 i *= 2;
617#endif
618 return i;
619}
620
621/* Give up external provider (FPU, Altivec, VSX) */
aba3bd7f 622void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
180a34d2
AG
623{
624 struct thread_struct *t = &current->thread;
625 u64 *vcpu_fpr = vcpu->arch.fpr;
a2b07664 626#ifdef CONFIG_VSX
180a34d2 627 u64 *vcpu_vsx = vcpu->arch.vsr;
a2b07664 628#endif
180a34d2
AG
629 u64 *thread_fpr = (u64*)t->fpr;
630 int i;
631
632 if (!(vcpu->arch.guest_owned_ext & msr))
633 return;
634
635#ifdef DEBUG_EXT
636 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
637#endif
638
639 switch (msr) {
640 case MSR_FP:
641 giveup_fpu(current);
642 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
643 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
644
645 vcpu->arch.fpscr = t->fpscr.val;
646 break;
647 case MSR_VEC:
648#ifdef CONFIG_ALTIVEC
649 giveup_altivec(current);
650 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
651 vcpu->arch.vscr = t->vscr;
652#endif
653 break;
654 case MSR_VSX:
655#ifdef CONFIG_VSX
656 __giveup_vsx(current);
657 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
658 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
659#endif
660 break;
661 default:
662 BUG();
663 }
664
665 vcpu->arch.guest_owned_ext &= ~msr;
666 current->thread.regs->msr &= ~msr;
a76f8497 667 kvmppc_recalc_shadow_msr(vcpu);
180a34d2
AG
668}
669
8963221d 670static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
c8c0b6f2 671{
c7f38f46
AG
672 ulong srr0 = kvmppc_get_pc(vcpu);
673 u32 last_inst = kvmppc_get_last_inst(vcpu);
c8c0b6f2
AG
674 int ret;
675
c7f38f46 676 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
c8c0b6f2
AG
677 if (ret == -ENOENT) {
678 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 33, 33, 1);
679 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 34, 36, 0);
680 vcpu->arch.msr = kvmppc_set_field(vcpu->arch.msr, 42, 47, 0);
681 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
8963221d
AG
682 return EMULATE_AGAIN;
683 }
684
685 return EMULATE_DONE;
686}
687
688static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
689{
690
691 /* Need to do paired single emulation? */
692 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
693 return EMULATE_DONE;
694
695 /* Read out the instruction */
696 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
c8c0b6f2
AG
697 /* Need to emulate */
698 return EMULATE_FAIL;
c8c0b6f2
AG
699
700 return EMULATE_AGAIN;
701}
702
180a34d2
AG
703/* Handle external providers (FPU, Altivec, VSX) */
704static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
705 ulong msr)
706{
707 struct thread_struct *t = &current->thread;
708 u64 *vcpu_fpr = vcpu->arch.fpr;
a2b07664 709#ifdef CONFIG_VSX
180a34d2 710 u64 *vcpu_vsx = vcpu->arch.vsr;
a2b07664 711#endif
180a34d2
AG
712 u64 *thread_fpr = (u64*)t->fpr;
713 int i;
714
3c402a75
AG
715 /* When we have paired singles, we emulate in software */
716 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
717 return RESUME_GUEST;
718
180a34d2
AG
719 if (!(vcpu->arch.msr & msr)) {
720 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
721 return RESUME_GUEST;
722 }
723
c2453693
AG
724 /* We already own the ext */
725 if (vcpu->arch.guest_owned_ext & msr) {
726 return RESUME_GUEST;
727 }
728
180a34d2
AG
729#ifdef DEBUG_EXT
730 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
731#endif
732
733 current->thread.regs->msr |= msr;
734
735 switch (msr) {
736 case MSR_FP:
737 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
738 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
739
740 t->fpscr.val = vcpu->arch.fpscr;
741 t->fpexc_mode = 0;
742 kvmppc_load_up_fpu();
743 break;
744 case MSR_VEC:
745#ifdef CONFIG_ALTIVEC
746 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
747 t->vscr = vcpu->arch.vscr;
748 t->vrsave = -1;
749 kvmppc_load_up_altivec();
750#endif
751 break;
752 case MSR_VSX:
753#ifdef CONFIG_VSX
754 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr); i++)
755 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
756 kvmppc_load_up_vsx();
757#endif
758 break;
759 default:
760 BUG();
761 }
762
763 vcpu->arch.guest_owned_ext |= msr;
764
a76f8497 765 kvmppc_recalc_shadow_msr(vcpu);
180a34d2
AG
766
767 return RESUME_GUEST;
768}
769
2f4cf5e4
AG
770int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
771 unsigned int exit_nr)
772{
773 int r = RESUME_HOST;
774
775 vcpu->stat.sum_exits++;
776
777 run->exit_reason = KVM_EXIT_UNKNOWN;
778 run->ready_for_interrupt_injection = 1;
779#ifdef EXIT_DEBUG
780 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | dec=0x%x | msr=0x%lx\n",
c7f38f46
AG
781 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
782 kvmppc_get_dec(vcpu), to_svcpu(vcpu)->shadow_srr1);
2f4cf5e4
AG
783#elif defined (EXIT_DEBUG_SIMPLE)
784 if ((exit_nr != 0x900) && (exit_nr != 0x500))
785 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | dar=0x%lx | msr=0x%lx\n",
c7f38f46 786 exit_nr, kvmppc_get_pc(vcpu), kvmppc_get_fault_dar(vcpu),
2f4cf5e4
AG
787 vcpu->arch.msr);
788#endif
789 kvm_resched(vcpu);
790 switch (exit_nr) {
791 case BOOK3S_INTERRUPT_INST_STORAGE:
792 vcpu->stat.pf_instruc++;
61db97cc
AG
793
794#ifdef CONFIG_PPC_BOOK3S_32
795 /* We set segments as unused segments when invalidating them. So
796 * treat the respective fault as segment fault. */
797 if (to_svcpu(vcpu)->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]
798 == SR_INVALID) {
799 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
800 r = RESUME_GUEST;
801 break;
802 }
803#endif
804
2f4cf5e4 805 /* only care about PTEG not found errors, but leave NX alone */
c7f38f46
AG
806 if (to_svcpu(vcpu)->shadow_srr1 & 0x40000000) {
807 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
2f4cf5e4
AG
808 vcpu->stat.sp_instruc++;
809 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
810 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
811 /*
812 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
813 * so we can't use the NX bit inside the guest. Let's cross our fingers,
814 * that no guest that needs the dcbz hack does NX.
815 */
c7f38f46 816 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
9fb244a2 817 r = RESUME_GUEST;
2f4cf5e4 818 } else {
c7f38f46 819 vcpu->arch.msr |= to_svcpu(vcpu)->shadow_srr1 & 0x58000000;
2f4cf5e4 820 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
c7f38f46 821 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFULL);
2f4cf5e4
AG
822 r = RESUME_GUEST;
823 }
824 break;
825 case BOOK3S_INTERRUPT_DATA_STORAGE:
c7f38f46
AG
826 {
827 ulong dar = kvmppc_get_fault_dar(vcpu);
2f4cf5e4 828 vcpu->stat.pf_storage++;
61db97cc
AG
829
830#ifdef CONFIG_PPC_BOOK3S_32
831 /* We set segments as unused segments when invalidating them. So
832 * treat the respective fault as segment fault. */
833 if ((to_svcpu(vcpu)->sr[dar >> SID_SHIFT]) == SR_INVALID) {
834 kvmppc_mmu_map_segment(vcpu, dar);
835 r = RESUME_GUEST;
836 break;
837 }
838#endif
839
2f4cf5e4 840 /* The only case we need to handle is missing shadow PTEs */
c7f38f46
AG
841 if (to_svcpu(vcpu)->fault_dsisr & DSISR_NOHPTE) {
842 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
2f4cf5e4 843 } else {
c7f38f46
AG
844 vcpu->arch.dear = dar;
845 to_book3s(vcpu)->dsisr = to_svcpu(vcpu)->fault_dsisr;
2f4cf5e4
AG
846 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
847 kvmppc_mmu_pte_flush(vcpu, vcpu->arch.dear, ~0xFFFULL);
848 r = RESUME_GUEST;
849 }
850 break;
c7f38f46 851 }
2f4cf5e4 852 case BOOK3S_INTERRUPT_DATA_SEGMENT:
c7f38f46
AG
853 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
854 vcpu->arch.dear = kvmppc_get_fault_dar(vcpu);
2f4cf5e4
AG
855 kvmppc_book3s_queue_irqprio(vcpu,
856 BOOK3S_INTERRUPT_DATA_SEGMENT);
857 }
858 r = RESUME_GUEST;
859 break;
860 case BOOK3S_INTERRUPT_INST_SEGMENT:
c7f38f46 861 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
2f4cf5e4
AG
862 kvmppc_book3s_queue_irqprio(vcpu,
863 BOOK3S_INTERRUPT_INST_SEGMENT);
864 }
865 r = RESUME_GUEST;
866 break;
867 /* We're good on these - the host merely wanted to get our attention */
868 case BOOK3S_INTERRUPT_DECREMENTER:
869 vcpu->stat.dec_exits++;
870 r = RESUME_GUEST;
871 break;
872 case BOOK3S_INTERRUPT_EXTERNAL:
873 vcpu->stat.ext_intr_exits++;
874 r = RESUME_GUEST;
875 break;
876 case BOOK3S_INTERRUPT_PROGRAM:
877 {
878 enum emulation_result er;
ff1ca3f9
AG
879 ulong flags;
880
c8c0b6f2 881program_interrupt:
c7f38f46 882 flags = to_svcpu(vcpu)->shadow_srr1 & 0x1f0000ull;
2f4cf5e4
AG
883
884 if (vcpu->arch.msr & MSR_PR) {
885#ifdef EXIT_DEBUG
c7f38f46 886 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
2f4cf5e4 887#endif
c7f38f46 888 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
2f4cf5e4 889 (INS_DCBZ & 0xfffffff7)) {
ff1ca3f9 890 kvmppc_core_queue_program(vcpu, flags);
2f4cf5e4
AG
891 r = RESUME_GUEST;
892 break;
893 }
894 }
895
896 vcpu->stat.emulated_inst_exits++;
897 er = kvmppc_emulate_instruction(run, vcpu);
898 switch (er) {
899 case EMULATE_DONE:
97c4cfbe 900 r = RESUME_GUEST_NV;
2f4cf5e4 901 break;
37f5bca6
AG
902 case EMULATE_AGAIN:
903 r = RESUME_GUEST;
904 break;
2f4cf5e4
AG
905 case EMULATE_FAIL:
906 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
c7f38f46 907 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
ff1ca3f9 908 kvmppc_core_queue_program(vcpu, flags);
2f4cf5e4
AG
909 r = RESUME_GUEST;
910 break;
e5c29e92
AG
911 case EMULATE_DO_MMIO:
912 run->exit_reason = KVM_EXIT_MMIO;
913 r = RESUME_HOST_NV;
914 break;
2f4cf5e4
AG
915 default:
916 BUG();
917 }
918 break;
919 }
920 case BOOK3S_INTERRUPT_SYSCALL:
ad0a048b
AG
921 // XXX make user settable
922 if (vcpu->arch.osi_enabled &&
923 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
924 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
925 u64 *gprs = run->osi.gprs;
926 int i;
927
928 run->exit_reason = KVM_EXIT_OSI;
929 for (i = 0; i < 32; i++)
930 gprs[i] = kvmppc_get_gpr(vcpu, i);
931 vcpu->arch.osi_needed = 1;
932 r = RESUME_HOST_NV;
933
934 } else {
935 vcpu->stat.syscall_exits++;
936 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
937 r = RESUME_GUEST;
938 }
2f4cf5e4 939 break;
2f4cf5e4 940 case BOOK3S_INTERRUPT_FP_UNAVAIL:
2f4cf5e4
AG
941 case BOOK3S_INTERRUPT_ALTIVEC:
942 case BOOK3S_INTERRUPT_VSX:
c8c0b6f2
AG
943 {
944 int ext_msr = 0;
945
946 switch (exit_nr) {
947 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
948 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
949 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
950 }
951
952 switch (kvmppc_check_ext(vcpu, exit_nr)) {
953 case EMULATE_DONE:
954 /* everything ok - let's enable the ext */
955 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
956 break;
957 case EMULATE_FAIL:
958 /* we need to emulate this instruction */
959 goto program_interrupt;
960 break;
961 default:
962 /* nothing to worry about - go again */
963 break;
964 }
180a34d2 965 break;
c8c0b6f2 966 }
ca7f4203
AG
967 case BOOK3S_INTERRUPT_ALIGNMENT:
968 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
969 to_book3s(vcpu)->dsisr = kvmppc_alignment_dsisr(vcpu,
c7f38f46 970 kvmppc_get_last_inst(vcpu));
ca7f4203 971 vcpu->arch.dear = kvmppc_alignment_dar(vcpu,
c7f38f46 972 kvmppc_get_last_inst(vcpu));
ca7f4203
AG
973 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
974 }
975 r = RESUME_GUEST;
976 break;
180a34d2
AG
977 case BOOK3S_INTERRUPT_MACHINE_CHECK:
978 case BOOK3S_INTERRUPT_TRACE:
2f4cf5e4
AG
979 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
980 r = RESUME_GUEST;
981 break;
982 default:
983 /* Ugh - bork here! What did we get? */
f7adbba1 984 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
c7f38f46 985 exit_nr, kvmppc_get_pc(vcpu), to_svcpu(vcpu)->shadow_srr1);
2f4cf5e4
AG
986 r = RESUME_HOST;
987 BUG();
988 break;
989 }
990
991
992 if (!(r & RESUME_HOST)) {
993 /* To avoid clobbering exit_reason, only check for signals if
994 * we aren't already exiting to userspace for some other
995 * reason. */
996 if (signal_pending(current)) {
997#ifdef EXIT_DEBUG
998 printk(KERN_EMERG "KVM: Going back to host\n");
999#endif
1000 vcpu->stat.signal_exits++;
1001 run->exit_reason = KVM_EXIT_INTR;
1002 r = -EINTR;
1003 } else {
1004 /* In case an interrupt came in that was triggered
1005 * from userspace (like DEC), we need to check what
1006 * to inject now! */
1007 kvmppc_core_deliver_interrupts(vcpu);
1008 }
1009 }
1010
1011#ifdef EXIT_DEBUG
c7f38f46 1012 printk(KERN_EMERG "KVM exit: vcpu=0x%p pc=0x%lx r=0x%x\n", vcpu, kvmppc_get_pc(vcpu), r);
2f4cf5e4
AG
1013#endif
1014
1015 return r;
1016}
1017
1018int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1019{
1020 return 0;
1021}
1022
1023int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1024{
1025 int i;
1026
a56cf347
AG
1027 vcpu_load(vcpu);
1028
c7f38f46 1029 regs->pc = kvmppc_get_pc(vcpu);
992b5b29 1030 regs->cr = kvmppc_get_cr(vcpu);
c7f38f46
AG
1031 regs->ctr = kvmppc_get_ctr(vcpu);
1032 regs->lr = kvmppc_get_lr(vcpu);
992b5b29 1033 regs->xer = kvmppc_get_xer(vcpu);
2f4cf5e4
AG
1034 regs->msr = vcpu->arch.msr;
1035 regs->srr0 = vcpu->arch.srr0;
1036 regs->srr1 = vcpu->arch.srr1;
1037 regs->pid = vcpu->arch.pid;
1038 regs->sprg0 = vcpu->arch.sprg0;
1039 regs->sprg1 = vcpu->arch.sprg1;
1040 regs->sprg2 = vcpu->arch.sprg2;
1041 regs->sprg3 = vcpu->arch.sprg3;
1042 regs->sprg5 = vcpu->arch.sprg4;
1043 regs->sprg6 = vcpu->arch.sprg5;
1044 regs->sprg7 = vcpu->arch.sprg6;
1045
1046 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
8e5b26b5 1047 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
2f4cf5e4 1048
a56cf347
AG
1049 vcpu_put(vcpu);
1050
2f4cf5e4
AG
1051 return 0;
1052}
1053
1054int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1055{
1056 int i;
1057
a56cf347
AG
1058 vcpu_load(vcpu);
1059
c7f38f46 1060 kvmppc_set_pc(vcpu, regs->pc);
992b5b29 1061 kvmppc_set_cr(vcpu, regs->cr);
c7f38f46
AG
1062 kvmppc_set_ctr(vcpu, regs->ctr);
1063 kvmppc_set_lr(vcpu, regs->lr);
992b5b29 1064 kvmppc_set_xer(vcpu, regs->xer);
2f4cf5e4
AG
1065 kvmppc_set_msr(vcpu, regs->msr);
1066 vcpu->arch.srr0 = regs->srr0;
1067 vcpu->arch.srr1 = regs->srr1;
1068 vcpu->arch.sprg0 = regs->sprg0;
1069 vcpu->arch.sprg1 = regs->sprg1;
1070 vcpu->arch.sprg2 = regs->sprg2;
1071 vcpu->arch.sprg3 = regs->sprg3;
1072 vcpu->arch.sprg5 = regs->sprg4;
1073 vcpu->arch.sprg6 = regs->sprg5;
1074 vcpu->arch.sprg7 = regs->sprg6;
1075
8e5b26b5
AG
1076 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1077 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
2f4cf5e4 1078
a56cf347
AG
1079 vcpu_put(vcpu);
1080
2f4cf5e4
AG
1081 return 0;
1082}
1083
1084int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1085 struct kvm_sregs *sregs)
1086{
e15a1137
AG
1087 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1088 int i;
1089
2f4cf5e4 1090 sregs->pvr = vcpu->arch.pvr;
e15a1137
AG
1091
1092 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1093 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1094 for (i = 0; i < 64; i++) {
1095 sregs->u.s.ppc64.slb[i].slbe = vcpu3s->slb[i].orige | i;
1096 sregs->u.s.ppc64.slb[i].slbv = vcpu3s->slb[i].origv;
1097 }
1098 } else {
1099 for (i = 0; i < 16; i++) {
1100 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
1101 sregs->u.s.ppc32.sr[i] = vcpu3s->sr[i].raw;
1102 }
1103 for (i = 0; i < 8; i++) {
1104 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1105 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1106 }
1107 }
2f4cf5e4
AG
1108 return 0;
1109}
1110
1111int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1112 struct kvm_sregs *sregs)
1113{
e15a1137
AG
1114 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1115 int i;
1116
2f4cf5e4 1117 kvmppc_set_pvr(vcpu, sregs->pvr);
e15a1137
AG
1118
1119 vcpu3s->sdr1 = sregs->u.s.sdr1;
1120 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1121 for (i = 0; i < 64; i++) {
1122 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1123 sregs->u.s.ppc64.slb[i].slbe);
1124 }
1125 } else {
1126 for (i = 0; i < 16; i++) {
1127 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1128 }
1129 for (i = 0; i < 8; i++) {
1130 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1131 (u32)sregs->u.s.ppc32.ibat[i]);
1132 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1133 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1134 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1135 (u32)sregs->u.s.ppc32.dbat[i]);
1136 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1137 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1138 }
1139 }
1140
1141 /* Flush the MMU after messing with the segments */
1142 kvmppc_mmu_pte_flush(vcpu, 0, 0);
2f4cf5e4
AG
1143 return 0;
1144}
1145
1146int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1147{
1148 return -ENOTSUPP;
1149}
1150
1151int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1152{
1153 return -ENOTSUPP;
1154}
1155
1156int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1157 struct kvm_translation *tr)
1158{
1159 return 0;
1160}
1161
1162/*
1163 * Get (and clear) the dirty memory log for a memory slot.
1164 */
1165int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1166 struct kvm_dirty_log *log)
1167{
1168 struct kvm_memory_slot *memslot;
1169 struct kvm_vcpu *vcpu;
1170 ulong ga, ga_end;
1171 int is_dirty = 0;
87bf6e7d
TY
1172 int r;
1173 unsigned long n;
2f4cf5e4 1174
79fac95e 1175 mutex_lock(&kvm->slots_lock);
2f4cf5e4
AG
1176
1177 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1178 if (r)
1179 goto out;
1180
1181 /* If nothing is dirty, don't bother messing with page tables. */
1182 if (is_dirty) {
46a26bf5 1183 memslot = &kvm->memslots->memslots[log->slot];
2f4cf5e4
AG
1184
1185 ga = memslot->base_gfn << PAGE_SHIFT;
1186 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1187
1188 kvm_for_each_vcpu(n, vcpu, kvm)
1189 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1190
87bf6e7d 1191 n = kvm_dirty_bitmap_bytes(memslot);
2f4cf5e4
AG
1192 memset(memslot->dirty_bitmap, 0, n);
1193 }
1194
1195 r = 0;
1196out:
79fac95e 1197 mutex_unlock(&kvm->slots_lock);
2f4cf5e4
AG
1198 return r;
1199}
1200
1201int kvmppc_core_check_processor_compat(void)
1202{
1203 return 0;
1204}
1205
1206struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1207{
1208 struct kvmppc_vcpu_book3s *vcpu_book3s;
1209 struct kvm_vcpu *vcpu;
c7f38f46 1210 int err = -ENOMEM;
2f4cf5e4 1211
032c3407 1212 vcpu_book3s = vmalloc(sizeof(struct kvmppc_vcpu_book3s));
c7f38f46 1213 if (!vcpu_book3s)
2f4cf5e4 1214 goto out;
c7f38f46 1215
7e821d39 1216 memset(vcpu_book3s, 0, sizeof(struct kvmppc_vcpu_book3s));
2f4cf5e4 1217
c7f38f46
AG
1218 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
1219 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1220 if (!vcpu_book3s->shadow_vcpu)
1221 goto free_vcpu;
1222
2f4cf5e4
AG
1223 vcpu = &vcpu_book3s->vcpu;
1224 err = kvm_vcpu_init(vcpu, kvm, id);
1225 if (err)
c7f38f46 1226 goto free_shadow_vcpu;
2f4cf5e4
AG
1227
1228 vcpu->arch.host_retip = kvm_return_point;
1229 vcpu->arch.host_msr = mfmsr();
07b0907d 1230#ifdef CONFIG_PPC_BOOK3S_64
2f4cf5e4
AG
1231 /* default to book3s_64 (970fx) */
1232 vcpu->arch.pvr = 0x3C0301;
07b0907d
AG
1233#else
1234 /* default to book3s_32 (750) */
1235 vcpu->arch.pvr = 0x84202;
1236#endif
2f4cf5e4
AG
1237 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1238 vcpu_book3s->slb_nr = 64;
1239
1240 /* remember where some real-mode handlers are */
1241 vcpu->arch.trampoline_lowmem = kvmppc_trampoline_lowmem;
1242 vcpu->arch.trampoline_enter = kvmppc_trampoline_enter;
1243 vcpu->arch.highmem_handler = (ulong)kvmppc_handler_highmem;
07b0907d 1244#ifdef CONFIG_PPC_BOOK3S_64
021ec9c6 1245 vcpu->arch.rmcall = *(ulong*)kvmppc_rmcall;
07b0907d
AG
1246#else
1247 vcpu->arch.rmcall = (ulong)kvmppc_rmcall;
1248#endif
2f4cf5e4
AG
1249
1250 vcpu->arch.shadow_msr = MSR_USER64;
1251
9cc5e953 1252 err = kvmppc_mmu_init(vcpu);
2f4cf5e4 1253 if (err < 0)
c7f38f46 1254 goto free_shadow_vcpu;
2f4cf5e4
AG
1255
1256 return vcpu;
1257
c7f38f46
AG
1258free_shadow_vcpu:
1259 kfree(vcpu_book3s->shadow_vcpu);
2f4cf5e4 1260free_vcpu:
032c3407 1261 vfree(vcpu_book3s);
2f4cf5e4
AG
1262out:
1263 return ERR_PTR(err);
1264}
1265
1266void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1267{
1268 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1269
2f4cf5e4 1270 kvm_vcpu_uninit(vcpu);
c7f38f46 1271 kfree(vcpu_book3s->shadow_vcpu);
032c3407 1272 vfree(vcpu_book3s);
2f4cf5e4
AG
1273}
1274
1275extern int __kvmppc_vcpu_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
1276int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
1277{
1278 int ret;
180a34d2 1279 struct thread_struct ext_bkp;
a2b07664 1280#ifdef CONFIG_ALTIVEC
180a34d2 1281 bool save_vec = current->thread.used_vr;
a2b07664
AG
1282#endif
1283#ifdef CONFIG_VSX
180a34d2 1284 bool save_vsx = current->thread.used_vsr;
a2b07664 1285#endif
180a34d2 1286 ulong ext_msr;
2f4cf5e4
AG
1287
1288 /* No need to go into the guest when all we do is going out */
1289 if (signal_pending(current)) {
1290 kvm_run->exit_reason = KVM_EXIT_INTR;
1291 return -EINTR;
1292 }
1293
180a34d2
AG
1294 /* Save FPU state in stack */
1295 if (current->thread.regs->msr & MSR_FP)
1296 giveup_fpu(current);
1297 memcpy(ext_bkp.fpr, current->thread.fpr, sizeof(current->thread.fpr));
1298 ext_bkp.fpscr = current->thread.fpscr;
1299 ext_bkp.fpexc_mode = current->thread.fpexc_mode;
1300
1301#ifdef CONFIG_ALTIVEC
1302 /* Save Altivec state in stack */
1303 if (save_vec) {
1304 if (current->thread.regs->msr & MSR_VEC)
1305 giveup_altivec(current);
1306 memcpy(ext_bkp.vr, current->thread.vr, sizeof(ext_bkp.vr));
1307 ext_bkp.vscr = current->thread.vscr;
1308 ext_bkp.vrsave = current->thread.vrsave;
1309 }
1310 ext_bkp.used_vr = current->thread.used_vr;
1311#endif
1312
1313#ifdef CONFIG_VSX
1314 /* Save VSX state in stack */
1315 if (save_vsx && (current->thread.regs->msr & MSR_VSX))
1316 __giveup_vsx(current);
1317 ext_bkp.used_vsr = current->thread.used_vsr;
1318#endif
1319
1320 /* Remember the MSR with disabled extensions */
1321 ext_msr = current->thread.regs->msr;
1322
2f4cf5e4
AG
1323 /* XXX we get called with irq disabled - change that! */
1324 local_irq_enable();
1325
d1bab74c
AG
1326 /* Preload FPU if it's enabled */
1327 if (vcpu->arch.msr & MSR_FP)
1328 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1329
2f4cf5e4
AG
1330 ret = __kvmppc_vcpu_entry(kvm_run, vcpu);
1331
1332 local_irq_disable();
1333
180a34d2
AG
1334 current->thread.regs->msr = ext_msr;
1335
1336 /* Make sure we save the guest FPU/Altivec/VSX state */
1337 kvmppc_giveup_ext(vcpu, MSR_FP);
1338 kvmppc_giveup_ext(vcpu, MSR_VEC);
1339 kvmppc_giveup_ext(vcpu, MSR_VSX);
1340
1341 /* Restore FPU state from stack */
1342 memcpy(current->thread.fpr, ext_bkp.fpr, sizeof(ext_bkp.fpr));
1343 current->thread.fpscr = ext_bkp.fpscr;
1344 current->thread.fpexc_mode = ext_bkp.fpexc_mode;
1345
1346#ifdef CONFIG_ALTIVEC
1347 /* Restore Altivec state from stack */
1348 if (save_vec && current->thread.used_vr) {
1349 memcpy(current->thread.vr, ext_bkp.vr, sizeof(ext_bkp.vr));
1350 current->thread.vscr = ext_bkp.vscr;
1351 current->thread.vrsave= ext_bkp.vrsave;
1352 }
1353 current->thread.used_vr = ext_bkp.used_vr;
1354#endif
1355
1356#ifdef CONFIG_VSX
1357 current->thread.used_vsr = ext_bkp.used_vsr;
1358#endif
1359
2f4cf5e4
AG
1360 return ret;
1361}
1362
1363static int kvmppc_book3s_init(void)
1364{
1365 return kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), THIS_MODULE);
1366}
1367
1368static void kvmppc_book3s_exit(void)
1369{
1370 kvm_exit();
1371}
1372
1373module_init(kvmppc_book3s_init);
1374module_exit(kvmppc_book3s_exit);