]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/book3s_pr.c
kvm: powerpc: Add kvmppc_ops callback
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / book3s_pr.c
CommitLineData
f05ed4d5
PM
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
93087948 23#include <linux/export.h>
f05ed4d5
PM
24#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
95327d08 36#include <asm/switch_to.h>
a413f474 37#include <asm/firmware.h>
deb26c27 38#include <asm/hvcall.h>
f05ed4d5
PM
39#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
43
3a167bea 44#include "book3s.h"
f05ed4d5
PM
45#include "trace.h"
46
47/* #define EXIT_DEBUG */
48/* #define DEBUG_EXT */
49
50static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
51 ulong msr);
52
53/* Some compatibility defines */
54#ifdef CONFIG_PPC_BOOK3S_32
55#define MSR_USER32 MSR_USER
56#define MSR_USER64 MSR_USER
57#define HW_PAGE_SIZE PAGE_SIZE
58#endif
59
3a167bea 60static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
f05ed4d5
PM
61{
62#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
63 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
64 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
468a12c2
AG
65 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
66 svcpu_put(svcpu);
f05ed4d5 67#endif
a47d72f3 68 vcpu->cpu = smp_processor_id();
f05ed4d5 69#ifdef CONFIG_PPC_BOOK3S_32
3ff95502 70 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
f05ed4d5
PM
71#endif
72}
73
3a167bea 74static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
75{
76#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
77 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
78 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
468a12c2
AG
79 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
80 svcpu_put(svcpu);
f05ed4d5
PM
81#endif
82
28c483b6 83 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
a47d72f3 84 vcpu->cpu = -1;
f05ed4d5
PM
85}
86
a2d56020
PM
87/* Copy data needed by real-mode code from vcpu to shadow vcpu */
88void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
89 struct kvm_vcpu *vcpu)
90{
91 svcpu->gpr[0] = vcpu->arch.gpr[0];
92 svcpu->gpr[1] = vcpu->arch.gpr[1];
93 svcpu->gpr[2] = vcpu->arch.gpr[2];
94 svcpu->gpr[3] = vcpu->arch.gpr[3];
95 svcpu->gpr[4] = vcpu->arch.gpr[4];
96 svcpu->gpr[5] = vcpu->arch.gpr[5];
97 svcpu->gpr[6] = vcpu->arch.gpr[6];
98 svcpu->gpr[7] = vcpu->arch.gpr[7];
99 svcpu->gpr[8] = vcpu->arch.gpr[8];
100 svcpu->gpr[9] = vcpu->arch.gpr[9];
101 svcpu->gpr[10] = vcpu->arch.gpr[10];
102 svcpu->gpr[11] = vcpu->arch.gpr[11];
103 svcpu->gpr[12] = vcpu->arch.gpr[12];
104 svcpu->gpr[13] = vcpu->arch.gpr[13];
105 svcpu->cr = vcpu->arch.cr;
106 svcpu->xer = vcpu->arch.xer;
107 svcpu->ctr = vcpu->arch.ctr;
108 svcpu->lr = vcpu->arch.lr;
109 svcpu->pc = vcpu->arch.pc;
110}
111
112/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
113void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
114 struct kvmppc_book3s_shadow_vcpu *svcpu)
115{
116 vcpu->arch.gpr[0] = svcpu->gpr[0];
117 vcpu->arch.gpr[1] = svcpu->gpr[1];
118 vcpu->arch.gpr[2] = svcpu->gpr[2];
119 vcpu->arch.gpr[3] = svcpu->gpr[3];
120 vcpu->arch.gpr[4] = svcpu->gpr[4];
121 vcpu->arch.gpr[5] = svcpu->gpr[5];
122 vcpu->arch.gpr[6] = svcpu->gpr[6];
123 vcpu->arch.gpr[7] = svcpu->gpr[7];
124 vcpu->arch.gpr[8] = svcpu->gpr[8];
125 vcpu->arch.gpr[9] = svcpu->gpr[9];
126 vcpu->arch.gpr[10] = svcpu->gpr[10];
127 vcpu->arch.gpr[11] = svcpu->gpr[11];
128 vcpu->arch.gpr[12] = svcpu->gpr[12];
129 vcpu->arch.gpr[13] = svcpu->gpr[13];
130 vcpu->arch.cr = svcpu->cr;
131 vcpu->arch.xer = svcpu->xer;
132 vcpu->arch.ctr = svcpu->ctr;
133 vcpu->arch.lr = svcpu->lr;
134 vcpu->arch.pc = svcpu->pc;
135 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
136 vcpu->arch.fault_dar = svcpu->fault_dar;
137 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
138 vcpu->arch.last_inst = svcpu->last_inst;
139}
140
3a167bea 141static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
03d25c5b 142{
7c973a2e
AG
143 int r = 1; /* Indicate we want to get back into the guest */
144
9b0cb3c8
AG
145 /* We misuse TLB_FLUSH to indicate that we want to clear
146 all shadow cache entries */
147 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
148 kvmppc_mmu_pte_flush(vcpu, 0, 0);
7c973a2e
AG
149
150 return r;
03d25c5b
AG
151}
152
9b0cb3c8 153/************* MMU Notifiers *************/
491d6ecc
PM
154static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
155 unsigned long end)
156{
157 long i;
158 struct kvm_vcpu *vcpu;
159 struct kvm_memslots *slots;
160 struct kvm_memory_slot *memslot;
161
162 slots = kvm_memslots(kvm);
163 kvm_for_each_memslot(memslot, slots) {
164 unsigned long hva_start, hva_end;
165 gfn_t gfn, gfn_end;
166
167 hva_start = max(start, memslot->userspace_addr);
168 hva_end = min(end, memslot->userspace_addr +
169 (memslot->npages << PAGE_SHIFT));
170 if (hva_start >= hva_end)
171 continue;
172 /*
173 * {gfn(page) | page intersects with [hva_start, hva_end)} =
174 * {gfn, gfn+1, ..., gfn_end-1}.
175 */
176 gfn = hva_to_gfn_memslot(hva_start, memslot);
177 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
178 kvm_for_each_vcpu(i, vcpu, kvm)
179 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
180 gfn_end << PAGE_SHIFT);
181 }
182}
9b0cb3c8 183
3a167bea 184static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
185{
186 trace_kvm_unmap_hva(hva);
187
491d6ecc 188 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
189
190 return 0;
191}
192
3a167bea
AK
193static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
194 unsigned long end)
9b0cb3c8 195{
491d6ecc 196 do_kvm_unmap_hva(kvm, start, end);
9b0cb3c8
AG
197
198 return 0;
199}
200
3a167bea 201static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
202{
203 /* XXX could be more clever ;) */
204 return 0;
205}
206
3a167bea 207static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
208{
209 /* XXX could be more clever ;) */
210 return 0;
211}
212
3a167bea 213static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
9b0cb3c8
AG
214{
215 /* The page will get remapped properly on its next fault */
491d6ecc 216 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
217}
218
219/*****************************************/
220
f05ed4d5
PM
221static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
222{
223 ulong smsr = vcpu->arch.shared->msr;
224
225 /* Guest MSR values */
3a2e7b0d 226 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
f05ed4d5
PM
227 /* Process MSR values */
228 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
229 /* External providers the guest reserved */
230 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
231 /* 64-bit Process MSR values */
232#ifdef CONFIG_PPC_BOOK3S_64
233 smsr |= MSR_ISF | MSR_HV;
234#endif
235 vcpu->arch.shadow_msr = smsr;
236}
237
3a167bea 238static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
f05ed4d5
PM
239{
240 ulong old_msr = vcpu->arch.shared->msr;
241
242#ifdef EXIT_DEBUG
243 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
244#endif
245
246 msr &= to_book3s(vcpu)->msr_mask;
247 vcpu->arch.shared->msr = msr;
248 kvmppc_recalc_shadow_msr(vcpu);
249
250 if (msr & MSR_POW) {
251 if (!vcpu->arch.pending_exceptions) {
252 kvm_vcpu_block(vcpu);
966cd0f3 253 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
f05ed4d5
PM
254 vcpu->stat.halt_wakeup++;
255
256 /* Unset POW bit after we woke up */
257 msr &= ~MSR_POW;
258 vcpu->arch.shared->msr = msr;
259 }
260 }
261
262 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
263 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
264 kvmppc_mmu_flush_segments(vcpu);
265 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
266
267 /* Preload magic page segment when in kernel mode */
268 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
269 struct kvm_vcpu_arch *a = &vcpu->arch;
270
271 if (msr & MSR_DR)
272 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
273 else
274 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
275 }
276 }
277
bbcc9c06
BH
278 /*
279 * When switching from 32 to 64-bit, we may have a stale 32-bit
280 * magic page around, we need to flush it. Typically 32-bit magic
281 * page will be instanciated when calling into RTAS. Note: We
282 * assume that such transition only happens while in kernel mode,
283 * ie, we never transition from user 32-bit to kernel 64-bit with
284 * a 32-bit magic page around.
285 */
286 if (vcpu->arch.magic_page_pa &&
287 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
288 /* going from RTAS to normal kernel code */
289 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
290 ~0xFFFUL);
291 }
292
f05ed4d5
PM
293 /* Preload FPU if it's enabled */
294 if (vcpu->arch.shared->msr & MSR_FP)
295 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
296}
297
3a167bea 298void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
f05ed4d5
PM
299{
300 u32 host_pvr;
301
302 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
303 vcpu->arch.pvr = pvr;
304#ifdef CONFIG_PPC_BOOK3S_64
305 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
306 kvmppc_mmu_book3s_64_init(vcpu);
1022fc3d
AG
307 if (!to_book3s(vcpu)->hior_explicit)
308 to_book3s(vcpu)->hior = 0xfff00000;
f05ed4d5 309 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
af8f38b3 310 vcpu->arch.cpu_type = KVM_CPU_3S_64;
f05ed4d5
PM
311 } else
312#endif
313 {
314 kvmppc_mmu_book3s_32_init(vcpu);
1022fc3d
AG
315 if (!to_book3s(vcpu)->hior_explicit)
316 to_book3s(vcpu)->hior = 0;
f05ed4d5 317 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
af8f38b3 318 vcpu->arch.cpu_type = KVM_CPU_3S_32;
f05ed4d5
PM
319 }
320
af8f38b3
AG
321 kvmppc_sanity_check(vcpu);
322
f05ed4d5
PM
323 /* If we are in hypervisor level on 970, we can tell the CPU to
324 * treat DCBZ as 32 bytes store */
325 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
326 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
327 !strcmp(cur_cpu_spec->platform, "ppc970"))
328 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
329
330 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
331 really needs them in a VM on Cell and force disable them. */
332 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
333 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
334
a4a0f252
PM
335 /*
336 * If they're asking for POWER6 or later, set the flag
337 * indicating that we can do multiple large page sizes
338 * and 1TB segments.
339 * Also set the flag that indicates that tlbie has the large
340 * page bit in the RB operand instead of the instruction.
341 */
342 switch (PVR_VER(pvr)) {
343 case PVR_POWER6:
344 case PVR_POWER7:
345 case PVR_POWER7p:
346 case PVR_POWER8:
347 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
348 BOOK3S_HFLAG_NEW_TLBIE;
349 break;
350 }
351
f05ed4d5
PM
352#ifdef CONFIG_PPC_BOOK3S_32
353 /* 32 bit Book3S always has 32 byte dcbz */
354 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
355#endif
356
357 /* On some CPUs we can execute paired single operations natively */
358 asm ( "mfpvr %0" : "=r"(host_pvr));
359 switch (host_pvr) {
360 case 0x00080200: /* lonestar 2.0 */
361 case 0x00088202: /* lonestar 2.2 */
362 case 0x70000100: /* gekko 1.0 */
363 case 0x00080100: /* gekko 2.0 */
364 case 0x00083203: /* gekko 2.3a */
365 case 0x00083213: /* gekko 2.3b */
366 case 0x00083204: /* gekko 2.4 */
367 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
368 case 0x00087200: /* broadway */
369 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
370 /* Enable HID2.PSE - in case we need it later */
371 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
372 }
373}
374
375/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
376 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
377 * emulate 32 bytes dcbz length.
378 *
379 * The Book3s_64 inventors also realized this case and implemented a special bit
380 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
381 *
382 * My approach here is to patch the dcbz instruction on executing pages.
383 */
384static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
385{
386 struct page *hpage;
387 u64 hpage_offset;
388 u32 *page;
389 int i;
390
391 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
32cad84f 392 if (is_error_page(hpage))
f05ed4d5 393 return;
f05ed4d5
PM
394
395 hpage_offset = pte->raddr & ~PAGE_MASK;
396 hpage_offset &= ~0xFFFULL;
397 hpage_offset /= 4;
398
399 get_page(hpage);
2480b208 400 page = kmap_atomic(hpage);
f05ed4d5
PM
401
402 /* patch dcbz into reserved instruction, so we trap */
403 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
404 if ((page[i] & 0xff0007ff) == INS_DCBZ)
405 page[i] &= 0xfffffff7;
406
2480b208 407 kunmap_atomic(page);
f05ed4d5
PM
408 put_page(hpage);
409}
410
411static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
412{
413 ulong mp_pa = vcpu->arch.magic_page_pa;
414
bbcc9c06
BH
415 if (!(vcpu->arch.shared->msr & MSR_SF))
416 mp_pa = (uint32_t)mp_pa;
417
f05ed4d5
PM
418 if (unlikely(mp_pa) &&
419 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
420 return 1;
421 }
422
423 return kvm_is_visible_gfn(vcpu->kvm, gfn);
424}
425
426int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
427 ulong eaddr, int vec)
428{
429 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
93b159b4 430 bool iswrite = false;
f05ed4d5
PM
431 int r = RESUME_GUEST;
432 int relocated;
433 int page_found = 0;
434 struct kvmppc_pte pte;
435 bool is_mmio = false;
436 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
437 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
438 u64 vsid;
439
440 relocated = data ? dr : ir;
93b159b4
PM
441 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
442 iswrite = true;
f05ed4d5
PM
443
444 /* Resolve real address if translation turned on */
445 if (relocated) {
93b159b4 446 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
f05ed4d5
PM
447 } else {
448 pte.may_execute = true;
449 pte.may_read = true;
450 pte.may_write = true;
451 pte.raddr = eaddr & KVM_PAM;
452 pte.eaddr = eaddr;
453 pte.vpage = eaddr >> 12;
c9029c34 454 pte.page_size = MMU_PAGE_64K;
f05ed4d5
PM
455 }
456
457 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
458 case 0:
459 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
460 break;
461 case MSR_DR:
462 case MSR_IR:
463 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
464
465 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
466 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
467 else
468 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
469 pte.vpage |= vsid;
470
471 if (vsid == -1)
472 page_found = -EINVAL;
473 break;
474 }
475
476 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
477 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
478 /*
479 * If we do the dcbz hack, we have to NX on every execution,
480 * so we can patch the executing code. This renders our guest
481 * NX-less.
482 */
483 pte.may_execute = !data;
484 }
485
486 if (page_found == -ENOENT) {
487 /* Page not found in guest PTE entries */
488 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
a2d56020 489 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
f05ed4d5 490 vcpu->arch.shared->msr |=
a2d56020 491 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
f05ed4d5
PM
492 kvmppc_book3s_queue_irqprio(vcpu, vec);
493 } else if (page_found == -EPERM) {
494 /* Storage protection */
495 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
a2d56020 496 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
f05ed4d5
PM
497 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
498 vcpu->arch.shared->msr |=
a2d56020 499 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
f05ed4d5
PM
500 kvmppc_book3s_queue_irqprio(vcpu, vec);
501 } else if (page_found == -EINVAL) {
502 /* Page not found in guest SLB */
503 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
504 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
505 } else if (!is_mmio &&
506 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
93b159b4
PM
507 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
508 /*
509 * There is already a host HPTE there, presumably
510 * a read-only one for a page the guest thinks
511 * is writable, so get rid of it first.
512 */
513 kvmppc_mmu_unmap_page(vcpu, &pte);
514 }
f05ed4d5 515 /* The guest's PTE is not mapped yet. Map on the host */
93b159b4 516 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
f05ed4d5
PM
517 if (data)
518 vcpu->stat.sp_storage++;
519 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
93b159b4 520 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
f05ed4d5
PM
521 kvmppc_patch_dcbz(vcpu, &pte);
522 } else {
523 /* MMIO */
524 vcpu->stat.mmio_exits++;
525 vcpu->arch.paddr_accessed = pte.raddr;
6020c0f6 526 vcpu->arch.vaddr_accessed = pte.eaddr;
f05ed4d5
PM
527 r = kvmppc_emulate_mmio(run, vcpu);
528 if ( r == RESUME_HOST_NV )
529 r = RESUME_HOST;
530 }
531
532 return r;
533}
534
535static inline int get_fpr_index(int i)
536{
28c483b6 537 return i * TS_FPRWIDTH;
f05ed4d5
PM
538}
539
540/* Give up external provider (FPU, Altivec, VSX) */
541void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
542{
543 struct thread_struct *t = &current->thread;
544 u64 *vcpu_fpr = vcpu->arch.fpr;
545#ifdef CONFIG_VSX
546 u64 *vcpu_vsx = vcpu->arch.vsr;
547#endif
548 u64 *thread_fpr = (u64*)t->fpr;
549 int i;
550
28c483b6
PM
551 /*
552 * VSX instructions can access FP and vector registers, so if
553 * we are giving up VSX, make sure we give up FP and VMX as well.
554 */
555 if (msr & MSR_VSX)
556 msr |= MSR_FP | MSR_VEC;
557
558 msr &= vcpu->arch.guest_owned_ext;
559 if (!msr)
f05ed4d5
PM
560 return;
561
562#ifdef DEBUG_EXT
563 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
564#endif
565
28c483b6
PM
566 if (msr & MSR_FP) {
567 /*
568 * Note that on CPUs with VSX, giveup_fpu stores
569 * both the traditional FP registers and the added VSX
570 * registers into thread.fpr[].
571 */
9d1ffdd8
PM
572 if (current->thread.regs->msr & MSR_FP)
573 giveup_fpu(current);
f05ed4d5
PM
574 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
575 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
576
577 vcpu->arch.fpscr = t->fpscr.val;
28c483b6
PM
578
579#ifdef CONFIG_VSX
580 if (cpu_has_feature(CPU_FTR_VSX))
581 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
582 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
583#endif
584 }
585
f05ed4d5 586#ifdef CONFIG_ALTIVEC
28c483b6 587 if (msr & MSR_VEC) {
9d1ffdd8
PM
588 if (current->thread.regs->msr & MSR_VEC)
589 giveup_altivec(current);
f05ed4d5
PM
590 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
591 vcpu->arch.vscr = t->vscr;
f05ed4d5 592 }
28c483b6 593#endif
f05ed4d5 594
28c483b6 595 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
f05ed4d5
PM
596 kvmppc_recalc_shadow_msr(vcpu);
597}
598
599static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
600{
601 ulong srr0 = kvmppc_get_pc(vcpu);
602 u32 last_inst = kvmppc_get_last_inst(vcpu);
603 int ret;
604
605 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
606 if (ret == -ENOENT) {
607 ulong msr = vcpu->arch.shared->msr;
608
609 msr = kvmppc_set_field(msr, 33, 33, 1);
610 msr = kvmppc_set_field(msr, 34, 36, 0);
611 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
612 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
613 return EMULATE_AGAIN;
614 }
615
616 return EMULATE_DONE;
617}
618
619static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
620{
621
622 /* Need to do paired single emulation? */
623 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
624 return EMULATE_DONE;
625
626 /* Read out the instruction */
627 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
628 /* Need to emulate */
629 return EMULATE_FAIL;
630
631 return EMULATE_AGAIN;
632}
633
634/* Handle external providers (FPU, Altivec, VSX) */
635static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
636 ulong msr)
637{
638 struct thread_struct *t = &current->thread;
639 u64 *vcpu_fpr = vcpu->arch.fpr;
640#ifdef CONFIG_VSX
641 u64 *vcpu_vsx = vcpu->arch.vsr;
642#endif
643 u64 *thread_fpr = (u64*)t->fpr;
644 int i;
645
646 /* When we have paired singles, we emulate in software */
647 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
648 return RESUME_GUEST;
649
650 if (!(vcpu->arch.shared->msr & msr)) {
651 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
652 return RESUME_GUEST;
653 }
654
28c483b6
PM
655 if (msr == MSR_VSX) {
656 /* No VSX? Give an illegal instruction interrupt */
657#ifdef CONFIG_VSX
658 if (!cpu_has_feature(CPU_FTR_VSX))
659#endif
660 {
661 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
662 return RESUME_GUEST;
663 }
664
665 /*
666 * We have to load up all the FP and VMX registers before
667 * we can let the guest use VSX instructions.
668 */
669 msr = MSR_FP | MSR_VEC | MSR_VSX;
f05ed4d5
PM
670 }
671
28c483b6
PM
672 /* See if we already own all the ext(s) needed */
673 msr &= ~vcpu->arch.guest_owned_ext;
674 if (!msr)
675 return RESUME_GUEST;
676
f05ed4d5
PM
677#ifdef DEBUG_EXT
678 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
679#endif
680
28c483b6 681 if (msr & MSR_FP) {
f05ed4d5
PM
682 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
683 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
28c483b6
PM
684#ifdef CONFIG_VSX
685 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
686 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
687#endif
f05ed4d5
PM
688 t->fpscr.val = vcpu->arch.fpscr;
689 t->fpexc_mode = 0;
690 kvmppc_load_up_fpu();
28c483b6
PM
691 }
692
693 if (msr & MSR_VEC) {
f05ed4d5
PM
694#ifdef CONFIG_ALTIVEC
695 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
696 t->vscr = vcpu->arch.vscr;
697 t->vrsave = -1;
698 kvmppc_load_up_altivec();
699#endif
f05ed4d5
PM
700 }
701
9d1ffdd8 702 current->thread.regs->msr |= msr;
f05ed4d5 703 vcpu->arch.guest_owned_ext |= msr;
f05ed4d5
PM
704 kvmppc_recalc_shadow_msr(vcpu);
705
706 return RESUME_GUEST;
707}
708
9d1ffdd8
PM
709/*
710 * Kernel code using FP or VMX could have flushed guest state to
711 * the thread_struct; if so, get it back now.
712 */
713static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
714{
715 unsigned long lost_ext;
716
717 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
718 if (!lost_ext)
719 return;
720
721 if (lost_ext & MSR_FP)
722 kvmppc_load_up_fpu();
f2481771 723#ifdef CONFIG_ALTIVEC
9d1ffdd8
PM
724 if (lost_ext & MSR_VEC)
725 kvmppc_load_up_altivec();
f2481771 726#endif
9d1ffdd8
PM
727 current->thread.regs->msr |= lost_ext;
728}
729
3a167bea
AK
730int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
731 unsigned int exit_nr)
f05ed4d5
PM
732{
733 int r = RESUME_HOST;
7ee78855 734 int s;
f05ed4d5
PM
735
736 vcpu->stat.sum_exits++;
737
738 run->exit_reason = KVM_EXIT_UNKNOWN;
739 run->ready_for_interrupt_injection = 1;
740
bd2be683 741 /* We get here with MSR.EE=1 */
3b1d9d7d 742
97c95059 743 trace_kvm_exit(exit_nr, vcpu);
706fb730 744 kvm_guest_exit();
c63ddcb4 745
f05ed4d5
PM
746 switch (exit_nr) {
747 case BOOK3S_INTERRUPT_INST_STORAGE:
468a12c2 748 {
a2d56020 749 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
750 vcpu->stat.pf_instruc++;
751
752#ifdef CONFIG_PPC_BOOK3S_32
753 /* We set segments as unused segments when invalidating them. So
754 * treat the respective fault as segment fault. */
a2d56020
PM
755 {
756 struct kvmppc_book3s_shadow_vcpu *svcpu;
757 u32 sr;
758
759 svcpu = svcpu_get(vcpu);
760 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
468a12c2 761 svcpu_put(svcpu);
a2d56020
PM
762 if (sr == SR_INVALID) {
763 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
764 r = RESUME_GUEST;
765 break;
766 }
f05ed4d5
PM
767 }
768#endif
769
770 /* only care about PTEG not found errors, but leave NX alone */
468a12c2 771 if (shadow_srr1 & 0x40000000) {
93b159b4 772 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 773 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
93b159b4 774 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
775 vcpu->stat.sp_instruc++;
776 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
777 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
778 /*
779 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
780 * so we can't use the NX bit inside the guest. Let's cross our fingers,
781 * that no guest that needs the dcbz hack does NX.
782 */
783 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
784 r = RESUME_GUEST;
785 } else {
468a12c2 786 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
f05ed4d5
PM
787 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
788 r = RESUME_GUEST;
789 }
790 break;
468a12c2 791 }
f05ed4d5
PM
792 case BOOK3S_INTERRUPT_DATA_STORAGE:
793 {
794 ulong dar = kvmppc_get_fault_dar(vcpu);
a2d56020 795 u32 fault_dsisr = vcpu->arch.fault_dsisr;
f05ed4d5
PM
796 vcpu->stat.pf_storage++;
797
798#ifdef CONFIG_PPC_BOOK3S_32
799 /* We set segments as unused segments when invalidating them. So
800 * treat the respective fault as segment fault. */
a2d56020
PM
801 {
802 struct kvmppc_book3s_shadow_vcpu *svcpu;
803 u32 sr;
804
805 svcpu = svcpu_get(vcpu);
806 sr = svcpu->sr[dar >> SID_SHIFT];
468a12c2 807 svcpu_put(svcpu);
a2d56020
PM
808 if (sr == SR_INVALID) {
809 kvmppc_mmu_map_segment(vcpu, dar);
810 r = RESUME_GUEST;
811 break;
812 }
f05ed4d5
PM
813 }
814#endif
815
93b159b4
PM
816 /*
817 * We need to handle missing shadow PTEs, and
818 * protection faults due to us mapping a page read-only
819 * when the guest thinks it is writable.
820 */
821 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
822 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 823 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
93b159b4 824 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
825 } else {
826 vcpu->arch.shared->dar = dar;
468a12c2 827 vcpu->arch.shared->dsisr = fault_dsisr;
f05ed4d5
PM
828 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
829 r = RESUME_GUEST;
830 }
831 break;
832 }
833 case BOOK3S_INTERRUPT_DATA_SEGMENT:
834 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
835 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
836 kvmppc_book3s_queue_irqprio(vcpu,
837 BOOK3S_INTERRUPT_DATA_SEGMENT);
838 }
839 r = RESUME_GUEST;
840 break;
841 case BOOK3S_INTERRUPT_INST_SEGMENT:
842 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
843 kvmppc_book3s_queue_irqprio(vcpu,
844 BOOK3S_INTERRUPT_INST_SEGMENT);
845 }
846 r = RESUME_GUEST;
847 break;
848 /* We're good on these - the host merely wanted to get our attention */
849 case BOOK3S_INTERRUPT_DECREMENTER:
4f225ae0 850 case BOOK3S_INTERRUPT_HV_DECREMENTER:
f05ed4d5
PM
851 vcpu->stat.dec_exits++;
852 r = RESUME_GUEST;
853 break;
854 case BOOK3S_INTERRUPT_EXTERNAL:
4f225ae0
AG
855 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
856 case BOOK3S_INTERRUPT_EXTERNAL_HV:
f05ed4d5
PM
857 vcpu->stat.ext_intr_exits++;
858 r = RESUME_GUEST;
859 break;
860 case BOOK3S_INTERRUPT_PERFMON:
861 r = RESUME_GUEST;
862 break;
863 case BOOK3S_INTERRUPT_PROGRAM:
4f225ae0 864 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
f05ed4d5
PM
865 {
866 enum emulation_result er;
867 ulong flags;
868
869program_interrupt:
a2d56020 870 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
f05ed4d5
PM
871
872 if (vcpu->arch.shared->msr & MSR_PR) {
873#ifdef EXIT_DEBUG
874 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
875#endif
876 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
877 (INS_DCBZ & 0xfffffff7)) {
878 kvmppc_core_queue_program(vcpu, flags);
879 r = RESUME_GUEST;
880 break;
881 }
882 }
883
884 vcpu->stat.emulated_inst_exits++;
885 er = kvmppc_emulate_instruction(run, vcpu);
886 switch (er) {
887 case EMULATE_DONE:
888 r = RESUME_GUEST_NV;
889 break;
890 case EMULATE_AGAIN:
891 r = RESUME_GUEST;
892 break;
893 case EMULATE_FAIL:
894 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
895 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
896 kvmppc_core_queue_program(vcpu, flags);
897 r = RESUME_GUEST;
898 break;
899 case EMULATE_DO_MMIO:
900 run->exit_reason = KVM_EXIT_MMIO;
901 r = RESUME_HOST_NV;
902 break;
c402a3f4 903 case EMULATE_EXIT_USER:
50c7bb80
AG
904 r = RESUME_HOST_NV;
905 break;
f05ed4d5
PM
906 default:
907 BUG();
908 }
909 break;
910 }
911 case BOOK3S_INTERRUPT_SYSCALL:
a668f2bd 912 if (vcpu->arch.papr_enabled &&
8b23de29 913 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
a668f2bd
AG
914 !(vcpu->arch.shared->msr & MSR_PR)) {
915 /* SC 1 papr hypercalls */
916 ulong cmd = kvmppc_get_gpr(vcpu, 3);
917 int i;
918
96f38d72 919#ifdef CONFIG_KVM_BOOK3S_64_PR
a668f2bd
AG
920 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
921 r = RESUME_GUEST;
922 break;
923 }
96f38d72 924#endif
a668f2bd
AG
925
926 run->papr_hcall.nr = cmd;
927 for (i = 0; i < 9; ++i) {
928 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
929 run->papr_hcall.args[i] = gpr;
930 }
931 run->exit_reason = KVM_EXIT_PAPR_HCALL;
932 vcpu->arch.hcall_needed = 1;
933 r = RESUME_HOST;
934 } else if (vcpu->arch.osi_enabled &&
f05ed4d5
PM
935 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
936 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
937 /* MOL hypercalls */
938 u64 *gprs = run->osi.gprs;
939 int i;
940
941 run->exit_reason = KVM_EXIT_OSI;
942 for (i = 0; i < 32; i++)
943 gprs[i] = kvmppc_get_gpr(vcpu, i);
944 vcpu->arch.osi_needed = 1;
945 r = RESUME_HOST_NV;
946 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
947 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
948 /* KVM PV hypercalls */
949 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
950 r = RESUME_GUEST;
951 } else {
952 /* Guest syscalls */
953 vcpu->stat.syscall_exits++;
954 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
955 r = RESUME_GUEST;
956 }
957 break;
958 case BOOK3S_INTERRUPT_FP_UNAVAIL:
959 case BOOK3S_INTERRUPT_ALTIVEC:
960 case BOOK3S_INTERRUPT_VSX:
961 {
962 int ext_msr = 0;
963
964 switch (exit_nr) {
965 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
966 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
967 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
968 }
969
970 switch (kvmppc_check_ext(vcpu, exit_nr)) {
971 case EMULATE_DONE:
972 /* everything ok - let's enable the ext */
973 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
974 break;
975 case EMULATE_FAIL:
976 /* we need to emulate this instruction */
977 goto program_interrupt;
978 break;
979 default:
980 /* nothing to worry about - go again */
981 break;
982 }
983 break;
984 }
985 case BOOK3S_INTERRUPT_ALIGNMENT:
986 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
987 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
988 kvmppc_get_last_inst(vcpu));
989 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
990 kvmppc_get_last_inst(vcpu));
991 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
992 }
993 r = RESUME_GUEST;
994 break;
995 case BOOK3S_INTERRUPT_MACHINE_CHECK:
996 case BOOK3S_INTERRUPT_TRACE:
997 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
998 r = RESUME_GUEST;
999 break;
1000 default:
468a12c2 1001 {
a2d56020 1002 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
1003 /* Ugh - bork here! What did we get? */
1004 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
468a12c2 1005 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
f05ed4d5
PM
1006 r = RESUME_HOST;
1007 BUG();
1008 break;
1009 }
468a12c2 1010 }
f05ed4d5
PM
1011
1012 if (!(r & RESUME_HOST)) {
1013 /* To avoid clobbering exit_reason, only check for signals if
1014 * we aren't already exiting to userspace for some other
1015 * reason. */
e371f713
AG
1016
1017 /*
1018 * Interrupts could be timers for the guest which we have to
1019 * inject again, so let's postpone them until we're in the guest
1020 * and if we really did time things so badly, then we just exit
1021 * again due to a host external interrupt.
1022 */
bd2be683 1023 local_irq_disable();
7ee78855
AG
1024 s = kvmppc_prepare_to_enter(vcpu);
1025 if (s <= 0) {
bd2be683 1026 local_irq_enable();
7ee78855 1027 r = s;
24afa37b 1028 } else {
5f1c248f 1029 kvmppc_fix_ee_before_entry();
f05ed4d5 1030 }
9d1ffdd8 1031 kvmppc_handle_lost_ext(vcpu);
f05ed4d5
PM
1032 }
1033
1034 trace_kvm_book3s_reenter(r, vcpu);
1035
1036 return r;
1037}
1038
3a167bea
AK
1039static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1040 struct kvm_sregs *sregs)
f05ed4d5
PM
1041{
1042 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1043 int i;
1044
1045 sregs->pvr = vcpu->arch.pvr;
1046
1047 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1048 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1049 for (i = 0; i < 64; i++) {
1050 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1051 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1052 }
1053 } else {
1054 for (i = 0; i < 16; i++)
1055 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1056
1057 for (i = 0; i < 8; i++) {
1058 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1059 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1060 }
1061 }
1062
1063 return 0;
1064}
1065
3a167bea
AK
1066static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1067 struct kvm_sregs *sregs)
f05ed4d5
PM
1068{
1069 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1070 int i;
1071
3a167bea 1072 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
f05ed4d5
PM
1073
1074 vcpu3s->sdr1 = sregs->u.s.sdr1;
1075 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1076 for (i = 0; i < 64; i++) {
1077 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1078 sregs->u.s.ppc64.slb[i].slbe);
1079 }
1080 } else {
1081 for (i = 0; i < 16; i++) {
1082 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1083 }
1084 for (i = 0; i < 8; i++) {
1085 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1086 (u32)sregs->u.s.ppc32.ibat[i]);
1087 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1088 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1089 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1090 (u32)sregs->u.s.ppc32.dbat[i]);
1091 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1092 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1093 }
1094 }
1095
1096 /* Flush the MMU after messing with the segments */
1097 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1098
1099 return 0;
1100}
1101
3a167bea
AK
1102static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1103 union kvmppc_one_reg *val)
31f3438e 1104{
a136a8bd 1105 int r = 0;
31f3438e 1106
a136a8bd 1107 switch (id) {
31f3438e 1108 case KVM_REG_PPC_HIOR:
a136a8bd 1109 *val = get_reg_val(id, to_book3s(vcpu)->hior);
31f3438e 1110 break;
a8bd19ef
PM
1111#ifdef CONFIG_VSX
1112 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1113 long int i = id - KVM_REG_PPC_VSR0;
1114
1115 if (!cpu_has_feature(CPU_FTR_VSX)) {
1116 r = -ENXIO;
1117 break;
1118 }
1119 val->vsxval[0] = vcpu->arch.fpr[i];
1120 val->vsxval[1] = vcpu->arch.vsr[i];
1121 break;
1122 }
1123#endif /* CONFIG_VSX */
31f3438e 1124 default:
a136a8bd 1125 r = -EINVAL;
31f3438e
PM
1126 break;
1127 }
1128
1129 return r;
1130}
1131
3a167bea
AK
1132static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1133 union kvmppc_one_reg *val)
31f3438e 1134{
a136a8bd 1135 int r = 0;
31f3438e 1136
a136a8bd 1137 switch (id) {
31f3438e 1138 case KVM_REG_PPC_HIOR:
a136a8bd
PM
1139 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1140 to_book3s(vcpu)->hior_explicit = true;
31f3438e 1141 break;
a8bd19ef
PM
1142#ifdef CONFIG_VSX
1143 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1144 long int i = id - KVM_REG_PPC_VSR0;
1145
1146 if (!cpu_has_feature(CPU_FTR_VSX)) {
1147 r = -ENXIO;
1148 break;
1149 }
1150 vcpu->arch.fpr[i] = val->vsxval[0];
1151 vcpu->arch.vsr[i] = val->vsxval[1];
1152 break;
1153 }
1154#endif /* CONFIG_VSX */
31f3438e 1155 default:
a136a8bd 1156 r = -EINVAL;
31f3438e
PM
1157 break;
1158 }
1159
1160 return r;
1161}
1162
3a167bea
AK
1163static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1164 unsigned int id)
f05ed4d5
PM
1165{
1166 struct kvmppc_vcpu_book3s *vcpu_book3s;
1167 struct kvm_vcpu *vcpu;
1168 int err = -ENOMEM;
1169 unsigned long p;
1170
3ff95502
PM
1171 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1172 if (!vcpu)
1173 goto out;
1174
f05ed4d5
PM
1175 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1176 if (!vcpu_book3s)
3ff95502
PM
1177 goto free_vcpu;
1178 vcpu->arch.book3s = vcpu_book3s;
f05ed4d5 1179
a2d56020 1180#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1181 vcpu->arch.shadow_vcpu =
1182 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1183 if (!vcpu->arch.shadow_vcpu)
1184 goto free_vcpu3s;
a2d56020 1185#endif
3ff95502 1186
f05ed4d5
PM
1187 err = kvm_vcpu_init(vcpu, kvm, id);
1188 if (err)
1189 goto free_shadow_vcpu;
1190
7c7b406e 1191 err = -ENOMEM;
f05ed4d5 1192 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
f05ed4d5
PM
1193 if (!p)
1194 goto uninit_vcpu;
7c7b406e
TLSC
1195 /* the real shared page fills the last 4k of our page */
1196 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
f05ed4d5 1197
f05ed4d5 1198#ifdef CONFIG_PPC_BOOK3S_64
a4a0f252
PM
1199 /*
1200 * Default to the same as the host if we're on sufficiently
1201 * recent machine that we have 1TB segments;
1202 * otherwise default to PPC970FX.
1203 */
f05ed4d5 1204 vcpu->arch.pvr = 0x3C0301;
a4a0f252
PM
1205 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1206 vcpu->arch.pvr = mfspr(SPRN_PVR);
f05ed4d5
PM
1207#else
1208 /* default to book3s_32 (750) */
1209 vcpu->arch.pvr = 0x84202;
1210#endif
3a167bea 1211 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
f05ed4d5
PM
1212 vcpu->arch.slb_nr = 64;
1213
f05ed4d5
PM
1214 vcpu->arch.shadow_msr = MSR_USER64;
1215
1216 err = kvmppc_mmu_init(vcpu);
1217 if (err < 0)
1218 goto uninit_vcpu;
1219
1220 return vcpu;
1221
1222uninit_vcpu:
1223 kvm_vcpu_uninit(vcpu);
1224free_shadow_vcpu:
a2d56020 1225#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1226 kfree(vcpu->arch.shadow_vcpu);
1227free_vcpu3s:
a2d56020 1228#endif
f05ed4d5 1229 vfree(vcpu_book3s);
3ff95502
PM
1230free_vcpu:
1231 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1232out:
1233 return ERR_PTR(err);
1234}
1235
3a167bea 1236static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
1237{
1238 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1239
1240 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1241 kvm_vcpu_uninit(vcpu);
3ff95502
PM
1242#ifdef CONFIG_KVM_BOOK3S_32
1243 kfree(vcpu->arch.shadow_vcpu);
1244#endif
f05ed4d5 1245 vfree(vcpu_book3s);
3ff95502 1246 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1247}
1248
3a167bea 1249static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
f05ed4d5
PM
1250{
1251 int ret;
1252 double fpr[32][TS_FPRWIDTH];
1253 unsigned int fpscr;
1254 int fpexc_mode;
1255#ifdef CONFIG_ALTIVEC
1256 vector128 vr[32];
1257 vector128 vscr;
1258 unsigned long uninitialized_var(vrsave);
1259 int used_vr;
1260#endif
1261#ifdef CONFIG_VSX
1262 int used_vsr;
1263#endif
1264 ulong ext_msr;
1265
af8f38b3
AG
1266 /* Check if we can run the vcpu at all */
1267 if (!vcpu->arch.sane) {
1268 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7d82714d
AG
1269 ret = -EINVAL;
1270 goto out;
af8f38b3
AG
1271 }
1272
e371f713
AG
1273 /*
1274 * Interrupts could be timers for the guest which we have to inject
1275 * again, so let's postpone them until we're in the guest and if we
1276 * really did time things so badly, then we just exit again due to
1277 * a host external interrupt.
1278 */
bd2be683 1279 local_irq_disable();
7ee78855
AG
1280 ret = kvmppc_prepare_to_enter(vcpu);
1281 if (ret <= 0) {
bd2be683 1282 local_irq_enable();
7d82714d 1283 goto out;
f05ed4d5
PM
1284 }
1285
1286 /* Save FPU state in stack */
1287 if (current->thread.regs->msr & MSR_FP)
1288 giveup_fpu(current);
1289 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1290 fpscr = current->thread.fpscr.val;
1291 fpexc_mode = current->thread.fpexc_mode;
1292
1293#ifdef CONFIG_ALTIVEC
1294 /* Save Altivec state in stack */
1295 used_vr = current->thread.used_vr;
1296 if (used_vr) {
1297 if (current->thread.regs->msr & MSR_VEC)
1298 giveup_altivec(current);
1299 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1300 vscr = current->thread.vscr;
1301 vrsave = current->thread.vrsave;
1302 }
1303#endif
1304
1305#ifdef CONFIG_VSX
1306 /* Save VSX state in stack */
1307 used_vsr = current->thread.used_vsr;
1308 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
28c483b6 1309 __giveup_vsx(current);
f05ed4d5
PM
1310#endif
1311
1312 /* Remember the MSR with disabled extensions */
1313 ext_msr = current->thread.regs->msr;
1314
f05ed4d5
PM
1315 /* Preload FPU if it's enabled */
1316 if (vcpu->arch.shared->msr & MSR_FP)
1317 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1318
5f1c248f 1319 kvmppc_fix_ee_before_entry();
df6909e5
PM
1320
1321 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1322
24afa37b
AG
1323 /* No need for kvm_guest_exit. It's done in handle_exit.
1324 We also get here with interrupts enabled. */
f05ed4d5 1325
f05ed4d5 1326 /* Make sure we save the guest FPU/Altivec/VSX state */
28c483b6
PM
1327 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1328
1329 current->thread.regs->msr = ext_msr;
f05ed4d5 1330
28c483b6 1331 /* Restore FPU/VSX state from stack */
f05ed4d5
PM
1332 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1333 current->thread.fpscr.val = fpscr;
1334 current->thread.fpexc_mode = fpexc_mode;
1335
1336#ifdef CONFIG_ALTIVEC
1337 /* Restore Altivec state from stack */
1338 if (used_vr && current->thread.used_vr) {
1339 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1340 current->thread.vscr = vscr;
1341 current->thread.vrsave = vrsave;
1342 }
1343 current->thread.used_vr = used_vr;
1344#endif
1345
1346#ifdef CONFIG_VSX
1347 current->thread.used_vsr = used_vsr;
1348#endif
1349
7d82714d 1350out:
0652eaae 1351 vcpu->mode = OUTSIDE_GUEST_MODE;
f05ed4d5
PM
1352 return ret;
1353}
1354
82ed3616
PM
1355/*
1356 * Get (and clear) the dirty memory log for a memory slot.
1357 */
3a167bea
AK
1358static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1359 struct kvm_dirty_log *log)
82ed3616
PM
1360{
1361 struct kvm_memory_slot *memslot;
1362 struct kvm_vcpu *vcpu;
1363 ulong ga, ga_end;
1364 int is_dirty = 0;
1365 int r;
1366 unsigned long n;
1367
1368 mutex_lock(&kvm->slots_lock);
1369
1370 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1371 if (r)
1372 goto out;
1373
1374 /* If nothing is dirty, don't bother messing with page tables. */
1375 if (is_dirty) {
1376 memslot = id_to_memslot(kvm->memslots, log->slot);
1377
1378 ga = memslot->base_gfn << PAGE_SHIFT;
1379 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1380
1381 kvm_for_each_vcpu(n, vcpu, kvm)
1382 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1383
1384 n = kvm_dirty_bitmap_bytes(memslot);
1385 memset(memslot->dirty_bitmap, 0, n);
1386 }
1387
1388 r = 0;
1389out:
1390 mutex_unlock(&kvm->slots_lock);
1391 return r;
1392}
1393
3a167bea
AK
1394static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1395 struct kvm_memory_slot *memslot)
1396{
1397 return;
1398}
1399
1400static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1401 struct kvm_memory_slot *memslot,
1402 struct kvm_userspace_memory_region *mem)
1403{
1404 return 0;
1405}
1406
1407static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1408 struct kvm_userspace_memory_region *mem,
1409 const struct kvm_memory_slot *old)
1410{
1411 return;
1412}
1413
1414static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1415 struct kvm_memory_slot *dont)
1416{
1417 return;
1418}
1419
1420static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1421 unsigned long npages)
1422{
1423 return 0;
1424}
1425
1426
5b74716e 1427#ifdef CONFIG_PPC64
3a167bea
AK
1428static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1429 struct kvm_ppc_smmu_info *info)
5b74716e 1430{
a4a0f252
PM
1431 long int i;
1432 struct kvm_vcpu *vcpu;
1433
1434 info->flags = 0;
5b74716e
BH
1435
1436 /* SLB is always 64 entries */
1437 info->slb_size = 64;
1438
1439 /* Standard 4k base page size segment */
1440 info->sps[0].page_shift = 12;
1441 info->sps[0].slb_enc = 0;
1442 info->sps[0].enc[0].page_shift = 12;
1443 info->sps[0].enc[0].pte_enc = 0;
1444
a4a0f252
PM
1445 /*
1446 * 64k large page size.
1447 * We only want to put this in if the CPUs we're emulating
1448 * support it, but unfortunately we don't have a vcpu easily
1449 * to hand here to test. Just pick the first vcpu, and if
1450 * that doesn't exist yet, report the minimum capability,
1451 * i.e., no 64k pages.
1452 * 1T segment support goes along with 64k pages.
1453 */
1454 i = 1;
1455 vcpu = kvm_get_vcpu(kvm, 0);
1456 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1457 info->flags = KVM_PPC_1T_SEGMENTS;
1458 info->sps[i].page_shift = 16;
1459 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1460 info->sps[i].enc[0].page_shift = 16;
1461 info->sps[i].enc[0].pte_enc = 1;
1462 ++i;
1463 }
1464
5b74716e 1465 /* Standard 16M large page size segment */
a4a0f252
PM
1466 info->sps[i].page_shift = 24;
1467 info->sps[i].slb_enc = SLB_VSID_L;
1468 info->sps[i].enc[0].page_shift = 24;
1469 info->sps[i].enc[0].pte_enc = 0;
5b74716e
BH
1470
1471 return 0;
1472}
3a167bea
AK
1473#else
1474static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1475 struct kvm_ppc_smmu_info *info)
f9e0554d 1476{
3a167bea
AK
1477 /* We should not get called */
1478 BUG();
f9e0554d 1479}
3a167bea 1480#endif /* CONFIG_PPC64 */
f9e0554d 1481
a413f474
IM
1482static unsigned int kvm_global_user_count = 0;
1483static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1484
3a167bea 1485static int kvmppc_core_init_vm_pr(struct kvm *kvm)
f9e0554d 1486{
9308ab8e 1487 mutex_init(&kvm->arch.hpt_mutex);
f31e65e1 1488
a413f474
IM
1489 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1490 spin_lock(&kvm_global_user_count_lock);
1491 if (++kvm_global_user_count == 1)
1492 pSeries_disable_reloc_on_exc();
1493 spin_unlock(&kvm_global_user_count_lock);
1494 }
f9e0554d
PM
1495 return 0;
1496}
1497
3a167bea 1498static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
f9e0554d 1499{
f31e65e1
BH
1500#ifdef CONFIG_PPC64
1501 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1502#endif
a413f474
IM
1503
1504 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1505 spin_lock(&kvm_global_user_count_lock);
1506 BUG_ON(kvm_global_user_count == 0);
1507 if (--kvm_global_user_count == 0)
1508 pSeries_enable_reloc_on_exc();
1509 spin_unlock(&kvm_global_user_count_lock);
1510 }
f9e0554d
PM
1511}
1512
3a167bea
AK
1513static int kvmppc_core_check_processor_compat_pr(void)
1514{
1515 /* we are always compatible */
1516 return 0;
1517}
1518
1519static long kvm_arch_vm_ioctl_pr(struct file *filp,
1520 unsigned int ioctl, unsigned long arg)
1521{
1522 return -ENOTTY;
1523}
1524
1525static struct kvmppc_ops kvmppc_pr_ops = {
1526 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1527 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1528 .get_one_reg = kvmppc_get_one_reg_pr,
1529 .set_one_reg = kvmppc_set_one_reg_pr,
1530 .vcpu_load = kvmppc_core_vcpu_load_pr,
1531 .vcpu_put = kvmppc_core_vcpu_put_pr,
1532 .set_msr = kvmppc_set_msr_pr,
1533 .vcpu_run = kvmppc_vcpu_run_pr,
1534 .vcpu_create = kvmppc_core_vcpu_create_pr,
1535 .vcpu_free = kvmppc_core_vcpu_free_pr,
1536 .check_requests = kvmppc_core_check_requests_pr,
1537 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1538 .flush_memslot = kvmppc_core_flush_memslot_pr,
1539 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1540 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1541 .unmap_hva = kvm_unmap_hva_pr,
1542 .unmap_hva_range = kvm_unmap_hva_range_pr,
1543 .age_hva = kvm_age_hva_pr,
1544 .test_age_hva = kvm_test_age_hva_pr,
1545 .set_spte_hva = kvm_set_spte_hva_pr,
1546 .mmu_destroy = kvmppc_mmu_destroy_pr,
1547 .free_memslot = kvmppc_core_free_memslot_pr,
1548 .create_memslot = kvmppc_core_create_memslot_pr,
1549 .init_vm = kvmppc_core_init_vm_pr,
1550 .destroy_vm = kvmppc_core_destroy_vm_pr,
1551 .check_processor_compat = kvmppc_core_check_processor_compat_pr,
1552 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1553 .emulate_op = kvmppc_core_emulate_op_pr,
1554 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1555 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1556 .fast_vcpu_kick = kvm_vcpu_kick,
1557 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1558};
1559
1560static int kvmppc_book3s_init_pr(void)
f05ed4d5
PM
1561{
1562 int r;
1563
3a167bea 1564 r = kvm_init(&kvmppc_pr_ops, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
f05ed4d5
PM
1565
1566 if (r)
1567 return r;
1568
1569 r = kvmppc_mmu_hpte_sysinit();
1570
1571 return r;
1572}
1573
3a167bea 1574static void kvmppc_book3s_exit_pr(void)
f05ed4d5
PM
1575{
1576 kvmppc_mmu_hpte_sysexit();
1577 kvm_exit();
1578}
1579
3a167bea
AK
1580module_init(kvmppc_book3s_init_pr);
1581module_exit(kvmppc_book3s_exit_pr);