]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kvm/book3s_pr.c
KVM: PPC: Make shared struct aka magic page guest endian
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / book3s_pr.c
CommitLineData
f05ed4d5
PM
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
93087948 23#include <linux/export.h>
f05ed4d5
PM
24#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
95327d08 36#include <asm/switch_to.h>
a413f474 37#include <asm/firmware.h>
deb26c27 38#include <asm/hvcall.h>
f05ed4d5
PM
39#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
2ba9f0d8 43#include <linux/module.h>
398a76c6 44#include <linux/miscdevice.h>
f05ed4d5 45
3a167bea 46#include "book3s.h"
72c12535
AK
47
48#define CREATE_TRACE_POINTS
49#include "trace_pr.h"
f05ed4d5
PM
50
51/* #define EXIT_DEBUG */
52/* #define DEBUG_EXT */
53
54static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 ulong msr);
56
57/* Some compatibility defines */
58#ifdef CONFIG_PPC_BOOK3S_32
59#define MSR_USER32 MSR_USER
60#define MSR_USER64 MSR_USER
61#define HW_PAGE_SIZE PAGE_SIZE
62#endif
63
3a167bea 64static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
f05ed4d5
PM
65{
66#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
67 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
68 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
468a12c2 69 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
40fdd8c8 70 svcpu->in_use = 0;
468a12c2 71 svcpu_put(svcpu);
f05ed4d5 72#endif
a47d72f3 73 vcpu->cpu = smp_processor_id();
f05ed4d5 74#ifdef CONFIG_PPC_BOOK3S_32
3ff95502 75 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
f05ed4d5
PM
76#endif
77}
78
3a167bea 79static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
80{
81#ifdef CONFIG_PPC_BOOK3S_64
468a12c2 82 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
40fdd8c8
AG
83 if (svcpu->in_use) {
84 kvmppc_copy_from_svcpu(vcpu, svcpu);
85 }
468a12c2 86 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
468a12c2
AG
87 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
88 svcpu_put(svcpu);
f05ed4d5
PM
89#endif
90
28c483b6 91 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
a47d72f3 92 vcpu->cpu = -1;
f05ed4d5
PM
93}
94
a2d56020
PM
95/* Copy data needed by real-mode code from vcpu to shadow vcpu */
96void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
97 struct kvm_vcpu *vcpu)
98{
99 svcpu->gpr[0] = vcpu->arch.gpr[0];
100 svcpu->gpr[1] = vcpu->arch.gpr[1];
101 svcpu->gpr[2] = vcpu->arch.gpr[2];
102 svcpu->gpr[3] = vcpu->arch.gpr[3];
103 svcpu->gpr[4] = vcpu->arch.gpr[4];
104 svcpu->gpr[5] = vcpu->arch.gpr[5];
105 svcpu->gpr[6] = vcpu->arch.gpr[6];
106 svcpu->gpr[7] = vcpu->arch.gpr[7];
107 svcpu->gpr[8] = vcpu->arch.gpr[8];
108 svcpu->gpr[9] = vcpu->arch.gpr[9];
109 svcpu->gpr[10] = vcpu->arch.gpr[10];
110 svcpu->gpr[11] = vcpu->arch.gpr[11];
111 svcpu->gpr[12] = vcpu->arch.gpr[12];
112 svcpu->gpr[13] = vcpu->arch.gpr[13];
113 svcpu->cr = vcpu->arch.cr;
114 svcpu->xer = vcpu->arch.xer;
115 svcpu->ctr = vcpu->arch.ctr;
116 svcpu->lr = vcpu->arch.lr;
117 svcpu->pc = vcpu->arch.pc;
40fdd8c8 118 svcpu->in_use = true;
a2d56020
PM
119}
120
121/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
122void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
123 struct kvmppc_book3s_shadow_vcpu *svcpu)
124{
40fdd8c8
AG
125 /*
126 * vcpu_put would just call us again because in_use hasn't
127 * been updated yet.
128 */
129 preempt_disable();
130
131 /*
132 * Maybe we were already preempted and synced the svcpu from
133 * our preempt notifiers. Don't bother touching this svcpu then.
134 */
135 if (!svcpu->in_use)
136 goto out;
137
a2d56020
PM
138 vcpu->arch.gpr[0] = svcpu->gpr[0];
139 vcpu->arch.gpr[1] = svcpu->gpr[1];
140 vcpu->arch.gpr[2] = svcpu->gpr[2];
141 vcpu->arch.gpr[3] = svcpu->gpr[3];
142 vcpu->arch.gpr[4] = svcpu->gpr[4];
143 vcpu->arch.gpr[5] = svcpu->gpr[5];
144 vcpu->arch.gpr[6] = svcpu->gpr[6];
145 vcpu->arch.gpr[7] = svcpu->gpr[7];
146 vcpu->arch.gpr[8] = svcpu->gpr[8];
147 vcpu->arch.gpr[9] = svcpu->gpr[9];
148 vcpu->arch.gpr[10] = svcpu->gpr[10];
149 vcpu->arch.gpr[11] = svcpu->gpr[11];
150 vcpu->arch.gpr[12] = svcpu->gpr[12];
151 vcpu->arch.gpr[13] = svcpu->gpr[13];
152 vcpu->arch.cr = svcpu->cr;
153 vcpu->arch.xer = svcpu->xer;
154 vcpu->arch.ctr = svcpu->ctr;
155 vcpu->arch.lr = svcpu->lr;
156 vcpu->arch.pc = svcpu->pc;
157 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
158 vcpu->arch.fault_dar = svcpu->fault_dar;
159 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
160 vcpu->arch.last_inst = svcpu->last_inst;
40fdd8c8
AG
161 svcpu->in_use = false;
162
163out:
164 preempt_enable();
a2d56020
PM
165}
166
3a167bea 167static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
03d25c5b 168{
7c973a2e
AG
169 int r = 1; /* Indicate we want to get back into the guest */
170
9b0cb3c8
AG
171 /* We misuse TLB_FLUSH to indicate that we want to clear
172 all shadow cache entries */
173 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
174 kvmppc_mmu_pte_flush(vcpu, 0, 0);
7c973a2e
AG
175
176 return r;
03d25c5b
AG
177}
178
9b0cb3c8 179/************* MMU Notifiers *************/
491d6ecc
PM
180static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
181 unsigned long end)
182{
183 long i;
184 struct kvm_vcpu *vcpu;
185 struct kvm_memslots *slots;
186 struct kvm_memory_slot *memslot;
187
188 slots = kvm_memslots(kvm);
189 kvm_for_each_memslot(memslot, slots) {
190 unsigned long hva_start, hva_end;
191 gfn_t gfn, gfn_end;
192
193 hva_start = max(start, memslot->userspace_addr);
194 hva_end = min(end, memslot->userspace_addr +
195 (memslot->npages << PAGE_SHIFT));
196 if (hva_start >= hva_end)
197 continue;
198 /*
199 * {gfn(page) | page intersects with [hva_start, hva_end)} =
200 * {gfn, gfn+1, ..., gfn_end-1}.
201 */
202 gfn = hva_to_gfn_memslot(hva_start, memslot);
203 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
204 kvm_for_each_vcpu(i, vcpu, kvm)
205 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
206 gfn_end << PAGE_SHIFT);
207 }
208}
9b0cb3c8 209
3a167bea 210static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
211{
212 trace_kvm_unmap_hva(hva);
213
491d6ecc 214 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
215
216 return 0;
217}
218
3a167bea
AK
219static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
220 unsigned long end)
9b0cb3c8 221{
491d6ecc 222 do_kvm_unmap_hva(kvm, start, end);
9b0cb3c8
AG
223
224 return 0;
225}
226
3a167bea 227static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
228{
229 /* XXX could be more clever ;) */
230 return 0;
231}
232
3a167bea 233static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
234{
235 /* XXX could be more clever ;) */
236 return 0;
237}
238
3a167bea 239static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
9b0cb3c8
AG
240{
241 /* The page will get remapped properly on its next fault */
491d6ecc 242 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
243}
244
245/*****************************************/
246
f05ed4d5
PM
247static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
248{
5deb8e7a
AG
249 ulong guest_msr = kvmppc_get_msr(vcpu);
250 ulong smsr = guest_msr;
f05ed4d5
PM
251
252 /* Guest MSR values */
e5ee5422 253 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
f05ed4d5
PM
254 /* Process MSR values */
255 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
256 /* External providers the guest reserved */
5deb8e7a 257 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
f05ed4d5
PM
258 /* 64-bit Process MSR values */
259#ifdef CONFIG_PPC_BOOK3S_64
260 smsr |= MSR_ISF | MSR_HV;
261#endif
262 vcpu->arch.shadow_msr = smsr;
263}
264
3a167bea 265static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
f05ed4d5 266{
5deb8e7a 267 ulong old_msr = kvmppc_get_msr(vcpu);
f05ed4d5
PM
268
269#ifdef EXIT_DEBUG
270 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
271#endif
272
273 msr &= to_book3s(vcpu)->msr_mask;
5deb8e7a 274 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
275 kvmppc_recalc_shadow_msr(vcpu);
276
277 if (msr & MSR_POW) {
278 if (!vcpu->arch.pending_exceptions) {
279 kvm_vcpu_block(vcpu);
966cd0f3 280 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
f05ed4d5
PM
281 vcpu->stat.halt_wakeup++;
282
283 /* Unset POW bit after we woke up */
284 msr &= ~MSR_POW;
5deb8e7a 285 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
286 }
287 }
288
5deb8e7a 289 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
f05ed4d5
PM
290 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
291 kvmppc_mmu_flush_segments(vcpu);
292 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
293
294 /* Preload magic page segment when in kernel mode */
295 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
296 struct kvm_vcpu_arch *a = &vcpu->arch;
297
298 if (msr & MSR_DR)
299 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
300 else
301 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
302 }
303 }
304
bbcc9c06
BH
305 /*
306 * When switching from 32 to 64-bit, we may have a stale 32-bit
307 * magic page around, we need to flush it. Typically 32-bit magic
308 * page will be instanciated when calling into RTAS. Note: We
309 * assume that such transition only happens while in kernel mode,
310 * ie, we never transition from user 32-bit to kernel 64-bit with
311 * a 32-bit magic page around.
312 */
313 if (vcpu->arch.magic_page_pa &&
314 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
315 /* going from RTAS to normal kernel code */
316 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
317 ~0xFFFUL);
318 }
319
f05ed4d5 320 /* Preload FPU if it's enabled */
5deb8e7a 321 if (kvmppc_get_msr(vcpu) & MSR_FP)
f05ed4d5
PM
322 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
323}
324
3a167bea 325void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
f05ed4d5
PM
326{
327 u32 host_pvr;
328
329 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
330 vcpu->arch.pvr = pvr;
331#ifdef CONFIG_PPC_BOOK3S_64
332 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
333 kvmppc_mmu_book3s_64_init(vcpu);
1022fc3d
AG
334 if (!to_book3s(vcpu)->hior_explicit)
335 to_book3s(vcpu)->hior = 0xfff00000;
f05ed4d5 336 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
af8f38b3 337 vcpu->arch.cpu_type = KVM_CPU_3S_64;
f05ed4d5
PM
338 } else
339#endif
340 {
341 kvmppc_mmu_book3s_32_init(vcpu);
1022fc3d
AG
342 if (!to_book3s(vcpu)->hior_explicit)
343 to_book3s(vcpu)->hior = 0;
f05ed4d5 344 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
af8f38b3 345 vcpu->arch.cpu_type = KVM_CPU_3S_32;
f05ed4d5
PM
346 }
347
af8f38b3
AG
348 kvmppc_sanity_check(vcpu);
349
f05ed4d5
PM
350 /* If we are in hypervisor level on 970, we can tell the CPU to
351 * treat DCBZ as 32 bytes store */
352 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
353 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
354 !strcmp(cur_cpu_spec->platform, "ppc970"))
355 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
356
357 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
358 really needs them in a VM on Cell and force disable them. */
359 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
360 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
361
a4a0f252
PM
362 /*
363 * If they're asking for POWER6 or later, set the flag
364 * indicating that we can do multiple large page sizes
365 * and 1TB segments.
366 * Also set the flag that indicates that tlbie has the large
367 * page bit in the RB operand instead of the instruction.
368 */
369 switch (PVR_VER(pvr)) {
370 case PVR_POWER6:
371 case PVR_POWER7:
372 case PVR_POWER7p:
373 case PVR_POWER8:
374 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
375 BOOK3S_HFLAG_NEW_TLBIE;
376 break;
377 }
378
f05ed4d5
PM
379#ifdef CONFIG_PPC_BOOK3S_32
380 /* 32 bit Book3S always has 32 byte dcbz */
381 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
382#endif
383
384 /* On some CPUs we can execute paired single operations natively */
385 asm ( "mfpvr %0" : "=r"(host_pvr));
386 switch (host_pvr) {
387 case 0x00080200: /* lonestar 2.0 */
388 case 0x00088202: /* lonestar 2.2 */
389 case 0x70000100: /* gekko 1.0 */
390 case 0x00080100: /* gekko 2.0 */
391 case 0x00083203: /* gekko 2.3a */
392 case 0x00083213: /* gekko 2.3b */
393 case 0x00083204: /* gekko 2.4 */
394 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
395 case 0x00087200: /* broadway */
396 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
397 /* Enable HID2.PSE - in case we need it later */
398 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
399 }
400}
401
402/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
403 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
404 * emulate 32 bytes dcbz length.
405 *
406 * The Book3s_64 inventors also realized this case and implemented a special bit
407 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
408 *
409 * My approach here is to patch the dcbz instruction on executing pages.
410 */
411static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
412{
413 struct page *hpage;
414 u64 hpage_offset;
415 u32 *page;
416 int i;
417
418 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
32cad84f 419 if (is_error_page(hpage))
f05ed4d5 420 return;
f05ed4d5
PM
421
422 hpage_offset = pte->raddr & ~PAGE_MASK;
423 hpage_offset &= ~0xFFFULL;
424 hpage_offset /= 4;
425
426 get_page(hpage);
2480b208 427 page = kmap_atomic(hpage);
f05ed4d5
PM
428
429 /* patch dcbz into reserved instruction, so we trap */
430 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
431 if ((page[i] & 0xff0007ff) == INS_DCBZ)
432 page[i] &= 0xfffffff7;
433
2480b208 434 kunmap_atomic(page);
f05ed4d5
PM
435 put_page(hpage);
436}
437
438static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
439{
440 ulong mp_pa = vcpu->arch.magic_page_pa;
441
5deb8e7a 442 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
bbcc9c06
BH
443 mp_pa = (uint32_t)mp_pa;
444
f05ed4d5
PM
445 if (unlikely(mp_pa) &&
446 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
447 return 1;
448 }
449
450 return kvm_is_visible_gfn(vcpu->kvm, gfn);
451}
452
453int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
454 ulong eaddr, int vec)
455{
456 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
93b159b4 457 bool iswrite = false;
f05ed4d5
PM
458 int r = RESUME_GUEST;
459 int relocated;
460 int page_found = 0;
461 struct kvmppc_pte pte;
462 bool is_mmio = false;
5deb8e7a
AG
463 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
464 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
f05ed4d5
PM
465 u64 vsid;
466
467 relocated = data ? dr : ir;
93b159b4
PM
468 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
469 iswrite = true;
f05ed4d5
PM
470
471 /* Resolve real address if translation turned on */
472 if (relocated) {
93b159b4 473 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
f05ed4d5
PM
474 } else {
475 pte.may_execute = true;
476 pte.may_read = true;
477 pte.may_write = true;
478 pte.raddr = eaddr & KVM_PAM;
479 pte.eaddr = eaddr;
480 pte.vpage = eaddr >> 12;
c9029c34 481 pte.page_size = MMU_PAGE_64K;
f05ed4d5
PM
482 }
483
5deb8e7a 484 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
f05ed4d5
PM
485 case 0:
486 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
487 break;
488 case MSR_DR:
489 case MSR_IR:
490 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
491
5deb8e7a 492 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
f05ed4d5
PM
493 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
494 else
495 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
496 pte.vpage |= vsid;
497
498 if (vsid == -1)
499 page_found = -EINVAL;
500 break;
501 }
502
503 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
504 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
505 /*
506 * If we do the dcbz hack, we have to NX on every execution,
507 * so we can patch the executing code. This renders our guest
508 * NX-less.
509 */
510 pte.may_execute = !data;
511 }
512
513 if (page_found == -ENOENT) {
514 /* Page not found in guest PTE entries */
5deb8e7a
AG
515 u64 ssrr1 = vcpu->arch.shadow_srr1;
516 u64 msr = kvmppc_get_msr(vcpu);
517 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
518 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
519 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
f05ed4d5
PM
520 kvmppc_book3s_queue_irqprio(vcpu, vec);
521 } else if (page_found == -EPERM) {
522 /* Storage protection */
5deb8e7a
AG
523 u32 dsisr = vcpu->arch.fault_dsisr;
524 u64 ssrr1 = vcpu->arch.shadow_srr1;
525 u64 msr = kvmppc_get_msr(vcpu);
526 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
527 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
528 kvmppc_set_dsisr(vcpu, dsisr);
529 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
f05ed4d5
PM
530 kvmppc_book3s_queue_irqprio(vcpu, vec);
531 } else if (page_found == -EINVAL) {
532 /* Page not found in guest SLB */
5deb8e7a 533 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
f05ed4d5
PM
534 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
535 } else if (!is_mmio &&
536 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
93b159b4
PM
537 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
538 /*
539 * There is already a host HPTE there, presumably
540 * a read-only one for a page the guest thinks
541 * is writable, so get rid of it first.
542 */
543 kvmppc_mmu_unmap_page(vcpu, &pte);
544 }
f05ed4d5 545 /* The guest's PTE is not mapped yet. Map on the host */
93b159b4 546 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
f05ed4d5
PM
547 if (data)
548 vcpu->stat.sp_storage++;
549 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
93b159b4 550 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
f05ed4d5
PM
551 kvmppc_patch_dcbz(vcpu, &pte);
552 } else {
553 /* MMIO */
554 vcpu->stat.mmio_exits++;
555 vcpu->arch.paddr_accessed = pte.raddr;
6020c0f6 556 vcpu->arch.vaddr_accessed = pte.eaddr;
f05ed4d5
PM
557 r = kvmppc_emulate_mmio(run, vcpu);
558 if ( r == RESUME_HOST_NV )
559 r = RESUME_HOST;
560 }
561
562 return r;
563}
564
565static inline int get_fpr_index(int i)
566{
28c483b6 567 return i * TS_FPRWIDTH;
f05ed4d5
PM
568}
569
570/* Give up external provider (FPU, Altivec, VSX) */
571void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
572{
573 struct thread_struct *t = &current->thread;
f05ed4d5 574
28c483b6
PM
575 /*
576 * VSX instructions can access FP and vector registers, so if
577 * we are giving up VSX, make sure we give up FP and VMX as well.
578 */
579 if (msr & MSR_VSX)
580 msr |= MSR_FP | MSR_VEC;
581
582 msr &= vcpu->arch.guest_owned_ext;
583 if (!msr)
f05ed4d5
PM
584 return;
585
586#ifdef DEBUG_EXT
587 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
588#endif
589
28c483b6
PM
590 if (msr & MSR_FP) {
591 /*
592 * Note that on CPUs with VSX, giveup_fpu stores
593 * both the traditional FP registers and the added VSX
de79f7b9 594 * registers into thread.fp_state.fpr[].
28c483b6 595 */
99dae3ba 596 if (t->regs->msr & MSR_FP)
9d1ffdd8 597 giveup_fpu(current);
99dae3ba 598 t->fp_save_area = NULL;
28c483b6
PM
599 }
600
f05ed4d5 601#ifdef CONFIG_ALTIVEC
28c483b6 602 if (msr & MSR_VEC) {
9d1ffdd8
PM
603 if (current->thread.regs->msr & MSR_VEC)
604 giveup_altivec(current);
99dae3ba 605 t->vr_save_area = NULL;
f05ed4d5 606 }
28c483b6 607#endif
f05ed4d5 608
28c483b6 609 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
f05ed4d5
PM
610 kvmppc_recalc_shadow_msr(vcpu);
611}
612
613static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
614{
615 ulong srr0 = kvmppc_get_pc(vcpu);
616 u32 last_inst = kvmppc_get_last_inst(vcpu);
617 int ret;
618
619 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
620 if (ret == -ENOENT) {
5deb8e7a 621 ulong msr = kvmppc_get_msr(vcpu);
f05ed4d5
PM
622
623 msr = kvmppc_set_field(msr, 33, 33, 1);
624 msr = kvmppc_set_field(msr, 34, 36, 0);
5deb8e7a
AG
625 msr = kvmppc_set_field(msr, 42, 47, 0);
626 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
627 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
628 return EMULATE_AGAIN;
629 }
630
631 return EMULATE_DONE;
632}
633
634static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
635{
636
637 /* Need to do paired single emulation? */
638 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
639 return EMULATE_DONE;
640
641 /* Read out the instruction */
642 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
643 /* Need to emulate */
644 return EMULATE_FAIL;
645
646 return EMULATE_AGAIN;
647}
648
649/* Handle external providers (FPU, Altivec, VSX) */
650static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
651 ulong msr)
652{
653 struct thread_struct *t = &current->thread;
f05ed4d5
PM
654
655 /* When we have paired singles, we emulate in software */
656 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
657 return RESUME_GUEST;
658
5deb8e7a 659 if (!(kvmppc_get_msr(vcpu) & msr)) {
f05ed4d5
PM
660 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
661 return RESUME_GUEST;
662 }
663
28c483b6
PM
664 if (msr == MSR_VSX) {
665 /* No VSX? Give an illegal instruction interrupt */
666#ifdef CONFIG_VSX
667 if (!cpu_has_feature(CPU_FTR_VSX))
668#endif
669 {
670 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
671 return RESUME_GUEST;
672 }
673
674 /*
675 * We have to load up all the FP and VMX registers before
676 * we can let the guest use VSX instructions.
677 */
678 msr = MSR_FP | MSR_VEC | MSR_VSX;
f05ed4d5
PM
679 }
680
28c483b6
PM
681 /* See if we already own all the ext(s) needed */
682 msr &= ~vcpu->arch.guest_owned_ext;
683 if (!msr)
684 return RESUME_GUEST;
685
f05ed4d5
PM
686#ifdef DEBUG_EXT
687 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
688#endif
689
28c483b6 690 if (msr & MSR_FP) {
7562c4fd 691 preempt_disable();
09548fda 692 enable_kernel_fp();
99dae3ba
PM
693 load_fp_state(&vcpu->arch.fp);
694 t->fp_save_area = &vcpu->arch.fp;
7562c4fd 695 preempt_enable();
28c483b6
PM
696 }
697
698 if (msr & MSR_VEC) {
f05ed4d5 699#ifdef CONFIG_ALTIVEC
7562c4fd 700 preempt_disable();
09548fda 701 enable_kernel_altivec();
99dae3ba
PM
702 load_vr_state(&vcpu->arch.vr);
703 t->vr_save_area = &vcpu->arch.vr;
7562c4fd 704 preempt_enable();
f05ed4d5 705#endif
f05ed4d5
PM
706 }
707
99dae3ba 708 t->regs->msr |= msr;
f05ed4d5 709 vcpu->arch.guest_owned_ext |= msr;
f05ed4d5
PM
710 kvmppc_recalc_shadow_msr(vcpu);
711
712 return RESUME_GUEST;
713}
714
9d1ffdd8
PM
715/*
716 * Kernel code using FP or VMX could have flushed guest state to
717 * the thread_struct; if so, get it back now.
718 */
719static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
720{
721 unsigned long lost_ext;
722
723 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
724 if (!lost_ext)
725 return;
726
09548fda 727 if (lost_ext & MSR_FP) {
7562c4fd 728 preempt_disable();
09548fda 729 enable_kernel_fp();
99dae3ba 730 load_fp_state(&vcpu->arch.fp);
7562c4fd 731 preempt_enable();
09548fda 732 }
f2481771 733#ifdef CONFIG_ALTIVEC
09548fda 734 if (lost_ext & MSR_VEC) {
7562c4fd 735 preempt_disable();
09548fda 736 enable_kernel_altivec();
99dae3ba 737 load_vr_state(&vcpu->arch.vr);
7562c4fd 738 preempt_enable();
09548fda 739 }
f2481771 740#endif
9d1ffdd8
PM
741 current->thread.regs->msr |= lost_ext;
742}
743
3a167bea
AK
744int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
745 unsigned int exit_nr)
f05ed4d5
PM
746{
747 int r = RESUME_HOST;
7ee78855 748 int s;
f05ed4d5
PM
749
750 vcpu->stat.sum_exits++;
751
752 run->exit_reason = KVM_EXIT_UNKNOWN;
753 run->ready_for_interrupt_injection = 1;
754
bd2be683 755 /* We get here with MSR.EE=1 */
3b1d9d7d 756
97c95059 757 trace_kvm_exit(exit_nr, vcpu);
706fb730 758 kvm_guest_exit();
c63ddcb4 759
f05ed4d5
PM
760 switch (exit_nr) {
761 case BOOK3S_INTERRUPT_INST_STORAGE:
468a12c2 762 {
a2d56020 763 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
764 vcpu->stat.pf_instruc++;
765
766#ifdef CONFIG_PPC_BOOK3S_32
767 /* We set segments as unused segments when invalidating them. So
768 * treat the respective fault as segment fault. */
a2d56020
PM
769 {
770 struct kvmppc_book3s_shadow_vcpu *svcpu;
771 u32 sr;
772
773 svcpu = svcpu_get(vcpu);
774 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
468a12c2 775 svcpu_put(svcpu);
a2d56020
PM
776 if (sr == SR_INVALID) {
777 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
778 r = RESUME_GUEST;
779 break;
780 }
f05ed4d5
PM
781 }
782#endif
783
784 /* only care about PTEG not found errors, but leave NX alone */
468a12c2 785 if (shadow_srr1 & 0x40000000) {
93b159b4 786 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 787 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
93b159b4 788 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
789 vcpu->stat.sp_instruc++;
790 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
791 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
792 /*
793 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
794 * so we can't use the NX bit inside the guest. Let's cross our fingers,
795 * that no guest that needs the dcbz hack does NX.
796 */
797 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
798 r = RESUME_GUEST;
799 } else {
5deb8e7a
AG
800 u64 msr = kvmppc_get_msr(vcpu);
801 msr |= shadow_srr1 & 0x58000000;
802 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
803 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
804 r = RESUME_GUEST;
805 }
806 break;
468a12c2 807 }
f05ed4d5
PM
808 case BOOK3S_INTERRUPT_DATA_STORAGE:
809 {
810 ulong dar = kvmppc_get_fault_dar(vcpu);
a2d56020 811 u32 fault_dsisr = vcpu->arch.fault_dsisr;
f05ed4d5
PM
812 vcpu->stat.pf_storage++;
813
814#ifdef CONFIG_PPC_BOOK3S_32
815 /* We set segments as unused segments when invalidating them. So
816 * treat the respective fault as segment fault. */
a2d56020
PM
817 {
818 struct kvmppc_book3s_shadow_vcpu *svcpu;
819 u32 sr;
820
821 svcpu = svcpu_get(vcpu);
822 sr = svcpu->sr[dar >> SID_SHIFT];
468a12c2 823 svcpu_put(svcpu);
a2d56020
PM
824 if (sr == SR_INVALID) {
825 kvmppc_mmu_map_segment(vcpu, dar);
826 r = RESUME_GUEST;
827 break;
828 }
f05ed4d5
PM
829 }
830#endif
831
93b159b4
PM
832 /*
833 * We need to handle missing shadow PTEs, and
834 * protection faults due to us mapping a page read-only
835 * when the guest thinks it is writable.
836 */
837 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
838 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 839 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
93b159b4 840 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5 841 } else {
5deb8e7a
AG
842 kvmppc_set_dar(vcpu, dar);
843 kvmppc_set_dsisr(vcpu, fault_dsisr);
f05ed4d5
PM
844 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
845 r = RESUME_GUEST;
846 }
847 break;
848 }
849 case BOOK3S_INTERRUPT_DATA_SEGMENT:
850 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
5deb8e7a 851 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
f05ed4d5
PM
852 kvmppc_book3s_queue_irqprio(vcpu,
853 BOOK3S_INTERRUPT_DATA_SEGMENT);
854 }
855 r = RESUME_GUEST;
856 break;
857 case BOOK3S_INTERRUPT_INST_SEGMENT:
858 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
859 kvmppc_book3s_queue_irqprio(vcpu,
860 BOOK3S_INTERRUPT_INST_SEGMENT);
861 }
862 r = RESUME_GUEST;
863 break;
864 /* We're good on these - the host merely wanted to get our attention */
865 case BOOK3S_INTERRUPT_DECREMENTER:
4f225ae0 866 case BOOK3S_INTERRUPT_HV_DECREMENTER:
40688909 867 case BOOK3S_INTERRUPT_DOORBELL:
f05ed4d5
PM
868 vcpu->stat.dec_exits++;
869 r = RESUME_GUEST;
870 break;
871 case BOOK3S_INTERRUPT_EXTERNAL:
4f225ae0
AG
872 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
873 case BOOK3S_INTERRUPT_EXTERNAL_HV:
f05ed4d5
PM
874 vcpu->stat.ext_intr_exits++;
875 r = RESUME_GUEST;
876 break;
877 case BOOK3S_INTERRUPT_PERFMON:
878 r = RESUME_GUEST;
879 break;
880 case BOOK3S_INTERRUPT_PROGRAM:
4f225ae0 881 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
f05ed4d5
PM
882 {
883 enum emulation_result er;
884 ulong flags;
885
886program_interrupt:
a2d56020 887 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
f05ed4d5 888
5deb8e7a 889 if (kvmppc_get_msr(vcpu) & MSR_PR) {
f05ed4d5
PM
890#ifdef EXIT_DEBUG
891 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
892#endif
893 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
894 (INS_DCBZ & 0xfffffff7)) {
895 kvmppc_core_queue_program(vcpu, flags);
896 r = RESUME_GUEST;
897 break;
898 }
899 }
900
901 vcpu->stat.emulated_inst_exits++;
902 er = kvmppc_emulate_instruction(run, vcpu);
903 switch (er) {
904 case EMULATE_DONE:
905 r = RESUME_GUEST_NV;
906 break;
907 case EMULATE_AGAIN:
908 r = RESUME_GUEST;
909 break;
910 case EMULATE_FAIL:
911 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
912 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
913 kvmppc_core_queue_program(vcpu, flags);
914 r = RESUME_GUEST;
915 break;
916 case EMULATE_DO_MMIO:
917 run->exit_reason = KVM_EXIT_MMIO;
918 r = RESUME_HOST_NV;
919 break;
c402a3f4 920 case EMULATE_EXIT_USER:
50c7bb80
AG
921 r = RESUME_HOST_NV;
922 break;
f05ed4d5
PM
923 default:
924 BUG();
925 }
926 break;
927 }
928 case BOOK3S_INTERRUPT_SYSCALL:
a668f2bd 929 if (vcpu->arch.papr_enabled &&
8b23de29 930 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
5deb8e7a 931 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
a668f2bd
AG
932 /* SC 1 papr hypercalls */
933 ulong cmd = kvmppc_get_gpr(vcpu, 3);
934 int i;
935
2ba9f0d8 936#ifdef CONFIG_PPC_BOOK3S_64
a668f2bd
AG
937 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
938 r = RESUME_GUEST;
939 break;
940 }
96f38d72 941#endif
a668f2bd
AG
942
943 run->papr_hcall.nr = cmd;
944 for (i = 0; i < 9; ++i) {
945 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
946 run->papr_hcall.args[i] = gpr;
947 }
948 run->exit_reason = KVM_EXIT_PAPR_HCALL;
949 vcpu->arch.hcall_needed = 1;
950 r = RESUME_HOST;
951 } else if (vcpu->arch.osi_enabled &&
f05ed4d5
PM
952 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
953 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
954 /* MOL hypercalls */
955 u64 *gprs = run->osi.gprs;
956 int i;
957
958 run->exit_reason = KVM_EXIT_OSI;
959 for (i = 0; i < 32; i++)
960 gprs[i] = kvmppc_get_gpr(vcpu, i);
961 vcpu->arch.osi_needed = 1;
962 r = RESUME_HOST_NV;
5deb8e7a 963 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
f05ed4d5
PM
964 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
965 /* KVM PV hypercalls */
966 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
967 r = RESUME_GUEST;
968 } else {
969 /* Guest syscalls */
970 vcpu->stat.syscall_exits++;
971 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
972 r = RESUME_GUEST;
973 }
974 break;
975 case BOOK3S_INTERRUPT_FP_UNAVAIL:
976 case BOOK3S_INTERRUPT_ALTIVEC:
977 case BOOK3S_INTERRUPT_VSX:
978 {
979 int ext_msr = 0;
980
981 switch (exit_nr) {
982 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
983 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
984 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
985 }
986
987 switch (kvmppc_check_ext(vcpu, exit_nr)) {
988 case EMULATE_DONE:
989 /* everything ok - let's enable the ext */
990 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
991 break;
992 case EMULATE_FAIL:
993 /* we need to emulate this instruction */
994 goto program_interrupt;
995 break;
996 default:
997 /* nothing to worry about - go again */
998 break;
999 }
1000 break;
1001 }
1002 case BOOK3S_INTERRUPT_ALIGNMENT:
1003 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
5deb8e7a
AG
1004 u32 last_inst = kvmppc_get_last_inst(vcpu);
1005 u32 dsisr;
1006 u64 dar;
1007
1008 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1009 dar = kvmppc_alignment_dar(vcpu, last_inst);
1010
1011 kvmppc_set_dsisr(vcpu, dsisr);
1012 kvmppc_set_dar(vcpu, dar);
1013
f05ed4d5
PM
1014 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1015 }
1016 r = RESUME_GUEST;
1017 break;
1018 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1019 case BOOK3S_INTERRUPT_TRACE:
1020 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1021 r = RESUME_GUEST;
1022 break;
1023 default:
468a12c2 1024 {
a2d56020 1025 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
1026 /* Ugh - bork here! What did we get? */
1027 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
468a12c2 1028 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
f05ed4d5
PM
1029 r = RESUME_HOST;
1030 BUG();
1031 break;
1032 }
468a12c2 1033 }
f05ed4d5
PM
1034
1035 if (!(r & RESUME_HOST)) {
1036 /* To avoid clobbering exit_reason, only check for signals if
1037 * we aren't already exiting to userspace for some other
1038 * reason. */
e371f713
AG
1039
1040 /*
1041 * Interrupts could be timers for the guest which we have to
1042 * inject again, so let's postpone them until we're in the guest
1043 * and if we really did time things so badly, then we just exit
1044 * again due to a host external interrupt.
1045 */
7ee78855 1046 s = kvmppc_prepare_to_enter(vcpu);
6c85f52b 1047 if (s <= 0)
7ee78855 1048 r = s;
6c85f52b
SW
1049 else {
1050 /* interrupts now hard-disabled */
5f1c248f 1051 kvmppc_fix_ee_before_entry();
f05ed4d5 1052 }
6c85f52b 1053
9d1ffdd8 1054 kvmppc_handle_lost_ext(vcpu);
f05ed4d5
PM
1055 }
1056
1057 trace_kvm_book3s_reenter(r, vcpu);
1058
1059 return r;
1060}
1061
3a167bea
AK
1062static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1063 struct kvm_sregs *sregs)
f05ed4d5
PM
1064{
1065 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1066 int i;
1067
1068 sregs->pvr = vcpu->arch.pvr;
1069
1070 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1071 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1072 for (i = 0; i < 64; i++) {
1073 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1074 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1075 }
1076 } else {
1077 for (i = 0; i < 16; i++)
5deb8e7a 1078 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
f05ed4d5
PM
1079
1080 for (i = 0; i < 8; i++) {
1081 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1082 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1083 }
1084 }
1085
1086 return 0;
1087}
1088
3a167bea
AK
1089static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1090 struct kvm_sregs *sregs)
f05ed4d5
PM
1091{
1092 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1093 int i;
1094
3a167bea 1095 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
f05ed4d5
PM
1096
1097 vcpu3s->sdr1 = sregs->u.s.sdr1;
1098 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1099 for (i = 0; i < 64; i++) {
1100 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1101 sregs->u.s.ppc64.slb[i].slbe);
1102 }
1103 } else {
1104 for (i = 0; i < 16; i++) {
1105 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1106 }
1107 for (i = 0; i < 8; i++) {
1108 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1109 (u32)sregs->u.s.ppc32.ibat[i]);
1110 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1111 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1112 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1113 (u32)sregs->u.s.ppc32.dbat[i]);
1114 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1115 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1116 }
1117 }
1118
1119 /* Flush the MMU after messing with the segments */
1120 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1121
1122 return 0;
1123}
1124
3a167bea
AK
1125static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1126 union kvmppc_one_reg *val)
31f3438e 1127{
a136a8bd 1128 int r = 0;
31f3438e 1129
a136a8bd 1130 switch (id) {
31f3438e 1131 case KVM_REG_PPC_HIOR:
a136a8bd 1132 *val = get_reg_val(id, to_book3s(vcpu)->hior);
31f3438e 1133 break;
e5ee5422
AK
1134 case KVM_REG_PPC_LPCR:
1135 /*
1136 * We are only interested in the LPCR_ILE bit
1137 */
1138 if (vcpu->arch.intr_msr & MSR_LE)
1139 *val = get_reg_val(id, LPCR_ILE);
1140 else
1141 *val = get_reg_val(id, 0);
1142 break;
31f3438e 1143 default:
a136a8bd 1144 r = -EINVAL;
31f3438e
PM
1145 break;
1146 }
1147
1148 return r;
1149}
1150
e5ee5422
AK
1151static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1152{
1153 if (new_lpcr & LPCR_ILE)
1154 vcpu->arch.intr_msr |= MSR_LE;
1155 else
1156 vcpu->arch.intr_msr &= ~MSR_LE;
1157}
1158
3a167bea
AK
1159static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1160 union kvmppc_one_reg *val)
31f3438e 1161{
a136a8bd 1162 int r = 0;
31f3438e 1163
a136a8bd 1164 switch (id) {
31f3438e 1165 case KVM_REG_PPC_HIOR:
a136a8bd
PM
1166 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1167 to_book3s(vcpu)->hior_explicit = true;
31f3438e 1168 break;
e5ee5422
AK
1169 case KVM_REG_PPC_LPCR:
1170 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1171 break;
31f3438e 1172 default:
a136a8bd 1173 r = -EINVAL;
31f3438e
PM
1174 break;
1175 }
1176
1177 return r;
1178}
1179
3a167bea
AK
1180static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1181 unsigned int id)
f05ed4d5
PM
1182{
1183 struct kvmppc_vcpu_book3s *vcpu_book3s;
1184 struct kvm_vcpu *vcpu;
1185 int err = -ENOMEM;
1186 unsigned long p;
1187
3ff95502
PM
1188 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1189 if (!vcpu)
f05ed4d5
PM
1190 goto out;
1191
f05ed4d5
PM
1192 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1193 if (!vcpu_book3s)
f05ed4d5 1194 goto free_vcpu;
3ff95502 1195 vcpu->arch.book3s = vcpu_book3s;
f05ed4d5 1196
a2d56020 1197#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1198 vcpu->arch.shadow_vcpu =
1199 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1200 if (!vcpu->arch.shadow_vcpu)
1201 goto free_vcpu3s;
a2d56020 1202#endif
f05ed4d5 1203
f05ed4d5
PM
1204 err = kvm_vcpu_init(vcpu, kvm, id);
1205 if (err)
1206 goto free_shadow_vcpu;
1207
7c7b406e 1208 err = -ENOMEM;
f05ed4d5 1209 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
f05ed4d5
PM
1210 if (!p)
1211 goto uninit_vcpu;
7c7b406e
TLSC
1212 /* the real shared page fills the last 4k of our page */
1213 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
f05ed4d5 1214#ifdef CONFIG_PPC_BOOK3S_64
5deb8e7a
AG
1215 /* Always start the shared struct in native endian mode */
1216#ifdef __BIG_ENDIAN__
1217 vcpu->arch.shared_big_endian = true;
1218#else
1219 vcpu->arch.shared_big_endian = false;
1220#endif
1221
a4a0f252
PM
1222 /*
1223 * Default to the same as the host if we're on sufficiently
1224 * recent machine that we have 1TB segments;
1225 * otherwise default to PPC970FX.
1226 */
f05ed4d5 1227 vcpu->arch.pvr = 0x3C0301;
a4a0f252
PM
1228 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1229 vcpu->arch.pvr = mfspr(SPRN_PVR);
e5ee5422 1230 vcpu->arch.intr_msr = MSR_SF;
f05ed4d5
PM
1231#else
1232 /* default to book3s_32 (750) */
1233 vcpu->arch.pvr = 0x84202;
1234#endif
3a167bea 1235 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
f05ed4d5
PM
1236 vcpu->arch.slb_nr = 64;
1237
94810ba4 1238 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
f05ed4d5
PM
1239
1240 err = kvmppc_mmu_init(vcpu);
1241 if (err < 0)
1242 goto uninit_vcpu;
1243
1244 return vcpu;
1245
1246uninit_vcpu:
1247 kvm_vcpu_uninit(vcpu);
1248free_shadow_vcpu:
a2d56020 1249#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1250 kfree(vcpu->arch.shadow_vcpu);
1251free_vcpu3s:
a2d56020 1252#endif
f05ed4d5 1253 vfree(vcpu_book3s);
3ff95502
PM
1254free_vcpu:
1255 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1256out:
1257 return ERR_PTR(err);
1258}
1259
3a167bea 1260static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
1261{
1262 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1263
1264 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1265 kvm_vcpu_uninit(vcpu);
3ff95502
PM
1266#ifdef CONFIG_KVM_BOOK3S_32
1267 kfree(vcpu->arch.shadow_vcpu);
1268#endif
f05ed4d5 1269 vfree(vcpu_book3s);
3ff95502 1270 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1271}
1272
3a167bea 1273static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
f05ed4d5
PM
1274{
1275 int ret;
f05ed4d5 1276#ifdef CONFIG_ALTIVEC
f05ed4d5 1277 unsigned long uninitialized_var(vrsave);
f05ed4d5 1278#endif
f05ed4d5 1279
af8f38b3
AG
1280 /* Check if we can run the vcpu at all */
1281 if (!vcpu->arch.sane) {
1282 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7d82714d
AG
1283 ret = -EINVAL;
1284 goto out;
af8f38b3
AG
1285 }
1286
e371f713
AG
1287 /*
1288 * Interrupts could be timers for the guest which we have to inject
1289 * again, so let's postpone them until we're in the guest and if we
1290 * really did time things so badly, then we just exit again due to
1291 * a host external interrupt.
1292 */
7ee78855 1293 ret = kvmppc_prepare_to_enter(vcpu);
6c85f52b 1294 if (ret <= 0)
7d82714d 1295 goto out;
6c85f52b 1296 /* interrupts now hard-disabled */
f05ed4d5 1297
99dae3ba 1298 /* Save FPU state in thread_struct */
f05ed4d5
PM
1299 if (current->thread.regs->msr & MSR_FP)
1300 giveup_fpu(current);
f05ed4d5
PM
1301
1302#ifdef CONFIG_ALTIVEC
99dae3ba
PM
1303 /* Save Altivec state in thread_struct */
1304 if (current->thread.regs->msr & MSR_VEC)
1305 giveup_altivec(current);
f05ed4d5
PM
1306#endif
1307
1308#ifdef CONFIG_VSX
99dae3ba
PM
1309 /* Save VSX state in thread_struct */
1310 if (current->thread.regs->msr & MSR_VSX)
28c483b6 1311 __giveup_vsx(current);
f05ed4d5
PM
1312#endif
1313
f05ed4d5 1314 /* Preload FPU if it's enabled */
5deb8e7a 1315 if (kvmppc_get_msr(vcpu) & MSR_FP)
f05ed4d5
PM
1316 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1317
5f1c248f 1318 kvmppc_fix_ee_before_entry();
df6909e5
PM
1319
1320 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1321
24afa37b
AG
1322 /* No need for kvm_guest_exit. It's done in handle_exit.
1323 We also get here with interrupts enabled. */
f05ed4d5 1324
f05ed4d5 1325 /* Make sure we save the guest FPU/Altivec/VSX state */
28c483b6
PM
1326 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1327
7d82714d 1328out:
0652eaae 1329 vcpu->mode = OUTSIDE_GUEST_MODE;
f05ed4d5
PM
1330 return ret;
1331}
1332
82ed3616
PM
1333/*
1334 * Get (and clear) the dirty memory log for a memory slot.
1335 */
3a167bea
AK
1336static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1337 struct kvm_dirty_log *log)
82ed3616
PM
1338{
1339 struct kvm_memory_slot *memslot;
1340 struct kvm_vcpu *vcpu;
1341 ulong ga, ga_end;
1342 int is_dirty = 0;
1343 int r;
1344 unsigned long n;
1345
1346 mutex_lock(&kvm->slots_lock);
1347
1348 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1349 if (r)
1350 goto out;
1351
1352 /* If nothing is dirty, don't bother messing with page tables. */
1353 if (is_dirty) {
1354 memslot = id_to_memslot(kvm->memslots, log->slot);
1355
1356 ga = memslot->base_gfn << PAGE_SHIFT;
1357 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1358
1359 kvm_for_each_vcpu(n, vcpu, kvm)
1360 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1361
1362 n = kvm_dirty_bitmap_bytes(memslot);
1363 memset(memslot->dirty_bitmap, 0, n);
1364 }
1365
1366 r = 0;
1367out:
1368 mutex_unlock(&kvm->slots_lock);
1369 return r;
1370}
1371
3a167bea
AK
1372static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1373 struct kvm_memory_slot *memslot)
5b74716e 1374{
3a167bea
AK
1375 return;
1376}
5b74716e 1377
3a167bea
AK
1378static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1379 struct kvm_memory_slot *memslot,
1380 struct kvm_userspace_memory_region *mem)
1381{
5b74716e
BH
1382 return 0;
1383}
5b74716e 1384
3a167bea
AK
1385static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1386 struct kvm_userspace_memory_region *mem,
1387 const struct kvm_memory_slot *old)
a66b48c3 1388{
3a167bea 1389 return;
a66b48c3
PM
1390}
1391
3a167bea
AK
1392static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1393 struct kvm_memory_slot *dont)
a66b48c3 1394{
3a167bea 1395 return;
a66b48c3
PM
1396}
1397
3a167bea
AK
1398static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1399 unsigned long npages)
f9e0554d
PM
1400{
1401 return 0;
1402}
1403
3a167bea 1404
5b74716e 1405#ifdef CONFIG_PPC64
3a167bea
AK
1406static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1407 struct kvm_ppc_smmu_info *info)
dfe49dbd 1408{
a4a0f252
PM
1409 long int i;
1410 struct kvm_vcpu *vcpu;
1411
1412 info->flags = 0;
5b74716e
BH
1413
1414 /* SLB is always 64 entries */
1415 info->slb_size = 64;
1416
1417 /* Standard 4k base page size segment */
1418 info->sps[0].page_shift = 12;
1419 info->sps[0].slb_enc = 0;
1420 info->sps[0].enc[0].page_shift = 12;
1421 info->sps[0].enc[0].pte_enc = 0;
1422
a4a0f252
PM
1423 /*
1424 * 64k large page size.
1425 * We only want to put this in if the CPUs we're emulating
1426 * support it, but unfortunately we don't have a vcpu easily
1427 * to hand here to test. Just pick the first vcpu, and if
1428 * that doesn't exist yet, report the minimum capability,
1429 * i.e., no 64k pages.
1430 * 1T segment support goes along with 64k pages.
1431 */
1432 i = 1;
1433 vcpu = kvm_get_vcpu(kvm, 0);
1434 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1435 info->flags = KVM_PPC_1T_SEGMENTS;
1436 info->sps[i].page_shift = 16;
1437 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1438 info->sps[i].enc[0].page_shift = 16;
1439 info->sps[i].enc[0].pte_enc = 1;
1440 ++i;
1441 }
1442
5b74716e 1443 /* Standard 16M large page size segment */
a4a0f252
PM
1444 info->sps[i].page_shift = 24;
1445 info->sps[i].slb_enc = SLB_VSID_L;
1446 info->sps[i].enc[0].page_shift = 24;
1447 info->sps[i].enc[0].pte_enc = 0;
dfe49dbd 1448
5b74716e
BH
1449 return 0;
1450}
3a167bea
AK
1451#else
1452static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1453 struct kvm_ppc_smmu_info *info)
f9e0554d 1454{
3a167bea
AK
1455 /* We should not get called */
1456 BUG();
f9e0554d 1457}
3a167bea 1458#endif /* CONFIG_PPC64 */
f9e0554d 1459
a413f474
IM
1460static unsigned int kvm_global_user_count = 0;
1461static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1462
3a167bea 1463static int kvmppc_core_init_vm_pr(struct kvm *kvm)
f9e0554d 1464{
9308ab8e 1465 mutex_init(&kvm->arch.hpt_mutex);
f31e65e1 1466
a413f474
IM
1467 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1468 spin_lock(&kvm_global_user_count_lock);
1469 if (++kvm_global_user_count == 1)
1470 pSeries_disable_reloc_on_exc();
1471 spin_unlock(&kvm_global_user_count_lock);
1472 }
f9e0554d
PM
1473 return 0;
1474}
1475
3a167bea 1476static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
f9e0554d 1477{
f31e65e1
BH
1478#ifdef CONFIG_PPC64
1479 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1480#endif
a413f474
IM
1481
1482 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1483 spin_lock(&kvm_global_user_count_lock);
1484 BUG_ON(kvm_global_user_count == 0);
1485 if (--kvm_global_user_count == 0)
1486 pSeries_enable_reloc_on_exc();
1487 spin_unlock(&kvm_global_user_count_lock);
1488 }
f9e0554d
PM
1489}
1490
3a167bea 1491static int kvmppc_core_check_processor_compat_pr(void)
f05ed4d5 1492{
3a167bea
AK
1493 /* we are always compatible */
1494 return 0;
1495}
f05ed4d5 1496
3a167bea
AK
1497static long kvm_arch_vm_ioctl_pr(struct file *filp,
1498 unsigned int ioctl, unsigned long arg)
1499{
1500 return -ENOTTY;
1501}
f05ed4d5 1502
cbbc58d4 1503static struct kvmppc_ops kvm_ops_pr = {
3a167bea
AK
1504 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1505 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1506 .get_one_reg = kvmppc_get_one_reg_pr,
1507 .set_one_reg = kvmppc_set_one_reg_pr,
1508 .vcpu_load = kvmppc_core_vcpu_load_pr,
1509 .vcpu_put = kvmppc_core_vcpu_put_pr,
1510 .set_msr = kvmppc_set_msr_pr,
1511 .vcpu_run = kvmppc_vcpu_run_pr,
1512 .vcpu_create = kvmppc_core_vcpu_create_pr,
1513 .vcpu_free = kvmppc_core_vcpu_free_pr,
1514 .check_requests = kvmppc_core_check_requests_pr,
1515 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1516 .flush_memslot = kvmppc_core_flush_memslot_pr,
1517 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1518 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1519 .unmap_hva = kvm_unmap_hva_pr,
1520 .unmap_hva_range = kvm_unmap_hva_range_pr,
1521 .age_hva = kvm_age_hva_pr,
1522 .test_age_hva = kvm_test_age_hva_pr,
1523 .set_spte_hva = kvm_set_spte_hva_pr,
1524 .mmu_destroy = kvmppc_mmu_destroy_pr,
1525 .free_memslot = kvmppc_core_free_memslot_pr,
1526 .create_memslot = kvmppc_core_create_memslot_pr,
1527 .init_vm = kvmppc_core_init_vm_pr,
1528 .destroy_vm = kvmppc_core_destroy_vm_pr,
3a167bea
AK
1529 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1530 .emulate_op = kvmppc_core_emulate_op_pr,
1531 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1532 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1533 .fast_vcpu_kick = kvm_vcpu_kick,
1534 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1535};
1536
cbbc58d4
AK
1537
1538int kvmppc_book3s_init_pr(void)
f05ed4d5
PM
1539{
1540 int r;
1541
cbbc58d4
AK
1542 r = kvmppc_core_check_processor_compat_pr();
1543 if (r < 0)
f05ed4d5
PM
1544 return r;
1545
cbbc58d4
AK
1546 kvm_ops_pr.owner = THIS_MODULE;
1547 kvmppc_pr_ops = &kvm_ops_pr;
f05ed4d5 1548
cbbc58d4 1549 r = kvmppc_mmu_hpte_sysinit();
f05ed4d5
PM
1550 return r;
1551}
1552
cbbc58d4 1553void kvmppc_book3s_exit_pr(void)
f05ed4d5 1554{
cbbc58d4 1555 kvmppc_pr_ops = NULL;
f05ed4d5 1556 kvmppc_mmu_hpte_sysexit();
f05ed4d5
PM
1557}
1558
cbbc58d4
AK
1559/*
1560 * We only support separate modules for book3s 64
1561 */
1562#ifdef CONFIG_PPC_BOOK3S_64
1563
3a167bea
AK
1564module_init(kvmppc_book3s_init_pr);
1565module_exit(kvmppc_book3s_exit_pr);
2ba9f0d8
AK
1566
1567MODULE_LICENSE("GPL");
398a76c6
AG
1568MODULE_ALIAS_MISCDEV(KVM_MINOR);
1569MODULE_ALIAS("devname:kvm");
cbbc58d4 1570#endif