]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blame - arch/powerpc/kvm/book3s_pr.c
KVM: PPC: Book3S PR: Expose TAR facility to guest
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / kvm / book3s_pr.c
CommitLineData
f05ed4d5
PM
1/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
93087948 23#include <linux/export.h>
f05ed4d5
PM
24#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
95327d08 36#include <asm/switch_to.h>
a413f474 37#include <asm/firmware.h>
deb26c27 38#include <asm/hvcall.h>
f05ed4d5
PM
39#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
2ba9f0d8 43#include <linux/module.h>
398a76c6 44#include <linux/miscdevice.h>
f05ed4d5 45
3a167bea 46#include "book3s.h"
72c12535
AK
47
48#define CREATE_TRACE_POINTS
49#include "trace_pr.h"
f05ed4d5
PM
50
51/* #define EXIT_DEBUG */
52/* #define DEBUG_EXT */
53
54static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 ulong msr);
616dff86 56static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
f05ed4d5
PM
57
58/* Some compatibility defines */
59#ifdef CONFIG_PPC_BOOK3S_32
60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE
63#endif
64
3a167bea 65static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
f05ed4d5
PM
66{
67#ifdef CONFIG_PPC_BOOK3S_64
468a12c2
AG
68 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
69 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
468a12c2 70 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
40fdd8c8 71 svcpu->in_use = 0;
468a12c2 72 svcpu_put(svcpu);
f05ed4d5 73#endif
a47d72f3 74 vcpu->cpu = smp_processor_id();
f05ed4d5 75#ifdef CONFIG_PPC_BOOK3S_32
3ff95502 76 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
f05ed4d5
PM
77#endif
78}
79
3a167bea 80static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
81{
82#ifdef CONFIG_PPC_BOOK3S_64
468a12c2 83 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
40fdd8c8
AG
84 if (svcpu->in_use) {
85 kvmppc_copy_from_svcpu(vcpu, svcpu);
86 }
468a12c2 87 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
468a12c2
AG
88 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
89 svcpu_put(svcpu);
f05ed4d5
PM
90#endif
91
28c483b6 92 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
e14e7a1e 93 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
a47d72f3 94 vcpu->cpu = -1;
f05ed4d5
PM
95}
96
a2d56020
PM
97/* Copy data needed by real-mode code from vcpu to shadow vcpu */
98void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
99 struct kvm_vcpu *vcpu)
100{
101 svcpu->gpr[0] = vcpu->arch.gpr[0];
102 svcpu->gpr[1] = vcpu->arch.gpr[1];
103 svcpu->gpr[2] = vcpu->arch.gpr[2];
104 svcpu->gpr[3] = vcpu->arch.gpr[3];
105 svcpu->gpr[4] = vcpu->arch.gpr[4];
106 svcpu->gpr[5] = vcpu->arch.gpr[5];
107 svcpu->gpr[6] = vcpu->arch.gpr[6];
108 svcpu->gpr[7] = vcpu->arch.gpr[7];
109 svcpu->gpr[8] = vcpu->arch.gpr[8];
110 svcpu->gpr[9] = vcpu->arch.gpr[9];
111 svcpu->gpr[10] = vcpu->arch.gpr[10];
112 svcpu->gpr[11] = vcpu->arch.gpr[11];
113 svcpu->gpr[12] = vcpu->arch.gpr[12];
114 svcpu->gpr[13] = vcpu->arch.gpr[13];
115 svcpu->cr = vcpu->arch.cr;
116 svcpu->xer = vcpu->arch.xer;
117 svcpu->ctr = vcpu->arch.ctr;
118 svcpu->lr = vcpu->arch.lr;
119 svcpu->pc = vcpu->arch.pc;
616dff86
AG
120#ifdef CONFIG_PPC_BOOK3S_64
121 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
122#endif
40fdd8c8 123 svcpu->in_use = true;
a2d56020
PM
124}
125
126/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
127void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
128 struct kvmppc_book3s_shadow_vcpu *svcpu)
129{
40fdd8c8
AG
130 /*
131 * vcpu_put would just call us again because in_use hasn't
132 * been updated yet.
133 */
134 preempt_disable();
135
136 /*
137 * Maybe we were already preempted and synced the svcpu from
138 * our preempt notifiers. Don't bother touching this svcpu then.
139 */
140 if (!svcpu->in_use)
141 goto out;
142
a2d56020
PM
143 vcpu->arch.gpr[0] = svcpu->gpr[0];
144 vcpu->arch.gpr[1] = svcpu->gpr[1];
145 vcpu->arch.gpr[2] = svcpu->gpr[2];
146 vcpu->arch.gpr[3] = svcpu->gpr[3];
147 vcpu->arch.gpr[4] = svcpu->gpr[4];
148 vcpu->arch.gpr[5] = svcpu->gpr[5];
149 vcpu->arch.gpr[6] = svcpu->gpr[6];
150 vcpu->arch.gpr[7] = svcpu->gpr[7];
151 vcpu->arch.gpr[8] = svcpu->gpr[8];
152 vcpu->arch.gpr[9] = svcpu->gpr[9];
153 vcpu->arch.gpr[10] = svcpu->gpr[10];
154 vcpu->arch.gpr[11] = svcpu->gpr[11];
155 vcpu->arch.gpr[12] = svcpu->gpr[12];
156 vcpu->arch.gpr[13] = svcpu->gpr[13];
157 vcpu->arch.cr = svcpu->cr;
158 vcpu->arch.xer = svcpu->xer;
159 vcpu->arch.ctr = svcpu->ctr;
160 vcpu->arch.lr = svcpu->lr;
161 vcpu->arch.pc = svcpu->pc;
162 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
163 vcpu->arch.fault_dar = svcpu->fault_dar;
164 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
165 vcpu->arch.last_inst = svcpu->last_inst;
616dff86
AG
166#ifdef CONFIG_PPC_BOOK3S_64
167 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
168#endif
40fdd8c8
AG
169 svcpu->in_use = false;
170
171out:
172 preempt_enable();
a2d56020
PM
173}
174
3a167bea 175static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
03d25c5b 176{
7c973a2e
AG
177 int r = 1; /* Indicate we want to get back into the guest */
178
9b0cb3c8
AG
179 /* We misuse TLB_FLUSH to indicate that we want to clear
180 all shadow cache entries */
181 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
182 kvmppc_mmu_pte_flush(vcpu, 0, 0);
7c973a2e
AG
183
184 return r;
03d25c5b
AG
185}
186
9b0cb3c8 187/************* MMU Notifiers *************/
491d6ecc
PM
188static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
189 unsigned long end)
190{
191 long i;
192 struct kvm_vcpu *vcpu;
193 struct kvm_memslots *slots;
194 struct kvm_memory_slot *memslot;
195
196 slots = kvm_memslots(kvm);
197 kvm_for_each_memslot(memslot, slots) {
198 unsigned long hva_start, hva_end;
199 gfn_t gfn, gfn_end;
200
201 hva_start = max(start, memslot->userspace_addr);
202 hva_end = min(end, memslot->userspace_addr +
203 (memslot->npages << PAGE_SHIFT));
204 if (hva_start >= hva_end)
205 continue;
206 /*
207 * {gfn(page) | page intersects with [hva_start, hva_end)} =
208 * {gfn, gfn+1, ..., gfn_end-1}.
209 */
210 gfn = hva_to_gfn_memslot(hva_start, memslot);
211 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
212 kvm_for_each_vcpu(i, vcpu, kvm)
213 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
214 gfn_end << PAGE_SHIFT);
215 }
216}
9b0cb3c8 217
3a167bea 218static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
219{
220 trace_kvm_unmap_hva(hva);
221
491d6ecc 222 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
223
224 return 0;
225}
226
3a167bea
AK
227static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
228 unsigned long end)
9b0cb3c8 229{
491d6ecc 230 do_kvm_unmap_hva(kvm, start, end);
9b0cb3c8
AG
231
232 return 0;
233}
234
3a167bea 235static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
236{
237 /* XXX could be more clever ;) */
238 return 0;
239}
240
3a167bea 241static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
9b0cb3c8
AG
242{
243 /* XXX could be more clever ;) */
244 return 0;
245}
246
3a167bea 247static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
9b0cb3c8
AG
248{
249 /* The page will get remapped properly on its next fault */
491d6ecc 250 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
9b0cb3c8
AG
251}
252
253/*****************************************/
254
f05ed4d5
PM
255static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
256{
5deb8e7a
AG
257 ulong guest_msr = kvmppc_get_msr(vcpu);
258 ulong smsr = guest_msr;
f05ed4d5
PM
259
260 /* Guest MSR values */
e5ee5422 261 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
f05ed4d5
PM
262 /* Process MSR values */
263 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
264 /* External providers the guest reserved */
5deb8e7a 265 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
f05ed4d5
PM
266 /* 64-bit Process MSR values */
267#ifdef CONFIG_PPC_BOOK3S_64
268 smsr |= MSR_ISF | MSR_HV;
269#endif
270 vcpu->arch.shadow_msr = smsr;
271}
272
3a167bea 273static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
f05ed4d5 274{
5deb8e7a 275 ulong old_msr = kvmppc_get_msr(vcpu);
f05ed4d5
PM
276
277#ifdef EXIT_DEBUG
278 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
279#endif
280
281 msr &= to_book3s(vcpu)->msr_mask;
5deb8e7a 282 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
283 kvmppc_recalc_shadow_msr(vcpu);
284
285 if (msr & MSR_POW) {
286 if (!vcpu->arch.pending_exceptions) {
287 kvm_vcpu_block(vcpu);
966cd0f3 288 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
f05ed4d5
PM
289 vcpu->stat.halt_wakeup++;
290
291 /* Unset POW bit after we woke up */
292 msr &= ~MSR_POW;
5deb8e7a 293 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
294 }
295 }
296
5deb8e7a 297 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
f05ed4d5
PM
298 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
299 kvmppc_mmu_flush_segments(vcpu);
300 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
301
302 /* Preload magic page segment when in kernel mode */
303 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
304 struct kvm_vcpu_arch *a = &vcpu->arch;
305
306 if (msr & MSR_DR)
307 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
308 else
309 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
310 }
311 }
312
bbcc9c06
BH
313 /*
314 * When switching from 32 to 64-bit, we may have a stale 32-bit
315 * magic page around, we need to flush it. Typically 32-bit magic
316 * page will be instanciated when calling into RTAS. Note: We
317 * assume that such transition only happens while in kernel mode,
318 * ie, we never transition from user 32-bit to kernel 64-bit with
319 * a 32-bit magic page around.
320 */
321 if (vcpu->arch.magic_page_pa &&
322 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
323 /* going from RTAS to normal kernel code */
324 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
325 ~0xFFFUL);
326 }
327
f05ed4d5 328 /* Preload FPU if it's enabled */
5deb8e7a 329 if (kvmppc_get_msr(vcpu) & MSR_FP)
f05ed4d5
PM
330 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
331}
332
3a167bea 333void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
f05ed4d5
PM
334{
335 u32 host_pvr;
336
337 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
338 vcpu->arch.pvr = pvr;
339#ifdef CONFIG_PPC_BOOK3S_64
340 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
341 kvmppc_mmu_book3s_64_init(vcpu);
1022fc3d
AG
342 if (!to_book3s(vcpu)->hior_explicit)
343 to_book3s(vcpu)->hior = 0xfff00000;
f05ed4d5 344 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
af8f38b3 345 vcpu->arch.cpu_type = KVM_CPU_3S_64;
f05ed4d5
PM
346 } else
347#endif
348 {
349 kvmppc_mmu_book3s_32_init(vcpu);
1022fc3d
AG
350 if (!to_book3s(vcpu)->hior_explicit)
351 to_book3s(vcpu)->hior = 0;
f05ed4d5 352 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
af8f38b3 353 vcpu->arch.cpu_type = KVM_CPU_3S_32;
f05ed4d5
PM
354 }
355
af8f38b3
AG
356 kvmppc_sanity_check(vcpu);
357
f05ed4d5
PM
358 /* If we are in hypervisor level on 970, we can tell the CPU to
359 * treat DCBZ as 32 bytes store */
360 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
361 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
362 !strcmp(cur_cpu_spec->platform, "ppc970"))
363 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
364
365 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
366 really needs them in a VM on Cell and force disable them. */
367 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
368 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
369
a4a0f252
PM
370 /*
371 * If they're asking for POWER6 or later, set the flag
372 * indicating that we can do multiple large page sizes
373 * and 1TB segments.
374 * Also set the flag that indicates that tlbie has the large
375 * page bit in the RB operand instead of the instruction.
376 */
377 switch (PVR_VER(pvr)) {
378 case PVR_POWER6:
379 case PVR_POWER7:
380 case PVR_POWER7p:
381 case PVR_POWER8:
382 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
383 BOOK3S_HFLAG_NEW_TLBIE;
384 break;
385 }
386
f05ed4d5
PM
387#ifdef CONFIG_PPC_BOOK3S_32
388 /* 32 bit Book3S always has 32 byte dcbz */
389 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
390#endif
391
392 /* On some CPUs we can execute paired single operations natively */
393 asm ( "mfpvr %0" : "=r"(host_pvr));
394 switch (host_pvr) {
395 case 0x00080200: /* lonestar 2.0 */
396 case 0x00088202: /* lonestar 2.2 */
397 case 0x70000100: /* gekko 1.0 */
398 case 0x00080100: /* gekko 2.0 */
399 case 0x00083203: /* gekko 2.3a */
400 case 0x00083213: /* gekko 2.3b */
401 case 0x00083204: /* gekko 2.4 */
402 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
403 case 0x00087200: /* broadway */
404 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
405 /* Enable HID2.PSE - in case we need it later */
406 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
407 }
408}
409
410/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
411 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
412 * emulate 32 bytes dcbz length.
413 *
414 * The Book3s_64 inventors also realized this case and implemented a special bit
415 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
416 *
417 * My approach here is to patch the dcbz instruction on executing pages.
418 */
419static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
420{
421 struct page *hpage;
422 u64 hpage_offset;
423 u32 *page;
424 int i;
425
426 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
32cad84f 427 if (is_error_page(hpage))
f05ed4d5 428 return;
f05ed4d5
PM
429
430 hpage_offset = pte->raddr & ~PAGE_MASK;
431 hpage_offset &= ~0xFFFULL;
432 hpage_offset /= 4;
433
434 get_page(hpage);
2480b208 435 page = kmap_atomic(hpage);
f05ed4d5
PM
436
437 /* patch dcbz into reserved instruction, so we trap */
438 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
cd087eef
AG
439 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
440 page[i] &= cpu_to_be32(0xfffffff7);
f05ed4d5 441
2480b208 442 kunmap_atomic(page);
f05ed4d5
PM
443 put_page(hpage);
444}
445
446static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
447{
448 ulong mp_pa = vcpu->arch.magic_page_pa;
449
5deb8e7a 450 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
bbcc9c06
BH
451 mp_pa = (uint32_t)mp_pa;
452
f05ed4d5
PM
453 if (unlikely(mp_pa) &&
454 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
455 return 1;
456 }
457
458 return kvm_is_visible_gfn(vcpu->kvm, gfn);
459}
460
461int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
462 ulong eaddr, int vec)
463{
464 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
93b159b4 465 bool iswrite = false;
f05ed4d5
PM
466 int r = RESUME_GUEST;
467 int relocated;
468 int page_found = 0;
469 struct kvmppc_pte pte;
470 bool is_mmio = false;
5deb8e7a
AG
471 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
472 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
f05ed4d5
PM
473 u64 vsid;
474
475 relocated = data ? dr : ir;
93b159b4
PM
476 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
477 iswrite = true;
f05ed4d5
PM
478
479 /* Resolve real address if translation turned on */
480 if (relocated) {
93b159b4 481 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
f05ed4d5
PM
482 } else {
483 pte.may_execute = true;
484 pte.may_read = true;
485 pte.may_write = true;
486 pte.raddr = eaddr & KVM_PAM;
487 pte.eaddr = eaddr;
488 pte.vpage = eaddr >> 12;
c9029c34 489 pte.page_size = MMU_PAGE_64K;
f05ed4d5
PM
490 }
491
5deb8e7a 492 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
f05ed4d5
PM
493 case 0:
494 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
495 break;
496 case MSR_DR:
497 case MSR_IR:
498 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
499
5deb8e7a 500 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
f05ed4d5
PM
501 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
502 else
503 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
504 pte.vpage |= vsid;
505
506 if (vsid == -1)
507 page_found = -EINVAL;
508 break;
509 }
510
511 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
512 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
513 /*
514 * If we do the dcbz hack, we have to NX on every execution,
515 * so we can patch the executing code. This renders our guest
516 * NX-less.
517 */
518 pte.may_execute = !data;
519 }
520
521 if (page_found == -ENOENT) {
522 /* Page not found in guest PTE entries */
5deb8e7a
AG
523 u64 ssrr1 = vcpu->arch.shadow_srr1;
524 u64 msr = kvmppc_get_msr(vcpu);
525 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
526 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
527 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
f05ed4d5
PM
528 kvmppc_book3s_queue_irqprio(vcpu, vec);
529 } else if (page_found == -EPERM) {
530 /* Storage protection */
5deb8e7a
AG
531 u32 dsisr = vcpu->arch.fault_dsisr;
532 u64 ssrr1 = vcpu->arch.shadow_srr1;
533 u64 msr = kvmppc_get_msr(vcpu);
534 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
535 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
536 kvmppc_set_dsisr(vcpu, dsisr);
537 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
f05ed4d5
PM
538 kvmppc_book3s_queue_irqprio(vcpu, vec);
539 } else if (page_found == -EINVAL) {
540 /* Page not found in guest SLB */
5deb8e7a 541 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
f05ed4d5
PM
542 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
543 } else if (!is_mmio &&
544 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
93b159b4
PM
545 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
546 /*
547 * There is already a host HPTE there, presumably
548 * a read-only one for a page the guest thinks
549 * is writable, so get rid of it first.
550 */
551 kvmppc_mmu_unmap_page(vcpu, &pte);
552 }
f05ed4d5 553 /* The guest's PTE is not mapped yet. Map on the host */
93b159b4 554 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
f05ed4d5
PM
555 if (data)
556 vcpu->stat.sp_storage++;
557 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
93b159b4 558 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
f05ed4d5
PM
559 kvmppc_patch_dcbz(vcpu, &pte);
560 } else {
561 /* MMIO */
562 vcpu->stat.mmio_exits++;
563 vcpu->arch.paddr_accessed = pte.raddr;
6020c0f6 564 vcpu->arch.vaddr_accessed = pte.eaddr;
f05ed4d5
PM
565 r = kvmppc_emulate_mmio(run, vcpu);
566 if ( r == RESUME_HOST_NV )
567 r = RESUME_HOST;
568 }
569
570 return r;
571}
572
573static inline int get_fpr_index(int i)
574{
28c483b6 575 return i * TS_FPRWIDTH;
f05ed4d5
PM
576}
577
578/* Give up external provider (FPU, Altivec, VSX) */
579void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
580{
581 struct thread_struct *t = &current->thread;
f05ed4d5 582
28c483b6
PM
583 /*
584 * VSX instructions can access FP and vector registers, so if
585 * we are giving up VSX, make sure we give up FP and VMX as well.
586 */
587 if (msr & MSR_VSX)
588 msr |= MSR_FP | MSR_VEC;
589
590 msr &= vcpu->arch.guest_owned_ext;
591 if (!msr)
f05ed4d5
PM
592 return;
593
594#ifdef DEBUG_EXT
595 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
596#endif
597
28c483b6
PM
598 if (msr & MSR_FP) {
599 /*
600 * Note that on CPUs with VSX, giveup_fpu stores
601 * both the traditional FP registers and the added VSX
de79f7b9 602 * registers into thread.fp_state.fpr[].
28c483b6 603 */
99dae3ba 604 if (t->regs->msr & MSR_FP)
9d1ffdd8 605 giveup_fpu(current);
99dae3ba 606 t->fp_save_area = NULL;
28c483b6
PM
607 }
608
f05ed4d5 609#ifdef CONFIG_ALTIVEC
28c483b6 610 if (msr & MSR_VEC) {
9d1ffdd8
PM
611 if (current->thread.regs->msr & MSR_VEC)
612 giveup_altivec(current);
99dae3ba 613 t->vr_save_area = NULL;
f05ed4d5 614 }
28c483b6 615#endif
f05ed4d5 616
28c483b6 617 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
f05ed4d5
PM
618 kvmppc_recalc_shadow_msr(vcpu);
619}
620
616dff86
AG
621/* Give up facility (TAR / EBB / DSCR) */
622static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
623{
624#ifdef CONFIG_PPC_BOOK3S_64
625 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
626 /* Facility not available to the guest, ignore giveup request*/
627 return;
628 }
e14e7a1e
AG
629
630 switch (fac) {
631 case FSCR_TAR_LG:
632 vcpu->arch.tar = mfspr(SPRN_TAR);
633 mtspr(SPRN_TAR, current->thread.tar);
634 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
635 break;
636 }
616dff86
AG
637#endif
638}
639
f05ed4d5
PM
640static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
641{
642 ulong srr0 = kvmppc_get_pc(vcpu);
643 u32 last_inst = kvmppc_get_last_inst(vcpu);
644 int ret;
645
646 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
647 if (ret == -ENOENT) {
5deb8e7a 648 ulong msr = kvmppc_get_msr(vcpu);
f05ed4d5
PM
649
650 msr = kvmppc_set_field(msr, 33, 33, 1);
651 msr = kvmppc_set_field(msr, 34, 36, 0);
5deb8e7a
AG
652 msr = kvmppc_set_field(msr, 42, 47, 0);
653 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
654 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
655 return EMULATE_AGAIN;
656 }
657
658 return EMULATE_DONE;
659}
660
661static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
662{
663
664 /* Need to do paired single emulation? */
665 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
666 return EMULATE_DONE;
667
668 /* Read out the instruction */
669 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
670 /* Need to emulate */
671 return EMULATE_FAIL;
672
673 return EMULATE_AGAIN;
674}
675
676/* Handle external providers (FPU, Altivec, VSX) */
677static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
678 ulong msr)
679{
680 struct thread_struct *t = &current->thread;
f05ed4d5
PM
681
682 /* When we have paired singles, we emulate in software */
683 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
684 return RESUME_GUEST;
685
5deb8e7a 686 if (!(kvmppc_get_msr(vcpu) & msr)) {
f05ed4d5
PM
687 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
688 return RESUME_GUEST;
689 }
690
28c483b6
PM
691 if (msr == MSR_VSX) {
692 /* No VSX? Give an illegal instruction interrupt */
693#ifdef CONFIG_VSX
694 if (!cpu_has_feature(CPU_FTR_VSX))
695#endif
696 {
697 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
698 return RESUME_GUEST;
699 }
700
701 /*
702 * We have to load up all the FP and VMX registers before
703 * we can let the guest use VSX instructions.
704 */
705 msr = MSR_FP | MSR_VEC | MSR_VSX;
f05ed4d5
PM
706 }
707
28c483b6
PM
708 /* See if we already own all the ext(s) needed */
709 msr &= ~vcpu->arch.guest_owned_ext;
710 if (!msr)
711 return RESUME_GUEST;
712
f05ed4d5
PM
713#ifdef DEBUG_EXT
714 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
715#endif
716
28c483b6 717 if (msr & MSR_FP) {
7562c4fd 718 preempt_disable();
09548fda 719 enable_kernel_fp();
99dae3ba
PM
720 load_fp_state(&vcpu->arch.fp);
721 t->fp_save_area = &vcpu->arch.fp;
7562c4fd 722 preempt_enable();
28c483b6
PM
723 }
724
725 if (msr & MSR_VEC) {
f05ed4d5 726#ifdef CONFIG_ALTIVEC
7562c4fd 727 preempt_disable();
09548fda 728 enable_kernel_altivec();
99dae3ba
PM
729 load_vr_state(&vcpu->arch.vr);
730 t->vr_save_area = &vcpu->arch.vr;
7562c4fd 731 preempt_enable();
f05ed4d5 732#endif
f05ed4d5
PM
733 }
734
99dae3ba 735 t->regs->msr |= msr;
f05ed4d5 736 vcpu->arch.guest_owned_ext |= msr;
f05ed4d5
PM
737 kvmppc_recalc_shadow_msr(vcpu);
738
739 return RESUME_GUEST;
740}
741
9d1ffdd8
PM
742/*
743 * Kernel code using FP or VMX could have flushed guest state to
744 * the thread_struct; if so, get it back now.
745 */
746static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
747{
748 unsigned long lost_ext;
749
750 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
751 if (!lost_ext)
752 return;
753
09548fda 754 if (lost_ext & MSR_FP) {
7562c4fd 755 preempt_disable();
09548fda 756 enable_kernel_fp();
99dae3ba 757 load_fp_state(&vcpu->arch.fp);
7562c4fd 758 preempt_enable();
09548fda 759 }
f2481771 760#ifdef CONFIG_ALTIVEC
09548fda 761 if (lost_ext & MSR_VEC) {
7562c4fd 762 preempt_disable();
09548fda 763 enable_kernel_altivec();
99dae3ba 764 load_vr_state(&vcpu->arch.vr);
7562c4fd 765 preempt_enable();
09548fda 766 }
f2481771 767#endif
9d1ffdd8
PM
768 current->thread.regs->msr |= lost_ext;
769}
770
616dff86
AG
771#ifdef CONFIG_PPC_BOOK3S_64
772
773static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
774{
775 /* Inject the Interrupt Cause field and trigger a guest interrupt */
776 vcpu->arch.fscr &= ~(0xffULL << 56);
777 vcpu->arch.fscr |= (fac << 56);
778 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
779}
780
781static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
782{
783 enum emulation_result er = EMULATE_FAIL;
784
785 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
786 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
787
788 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
789 /* Couldn't emulate, trigger interrupt in guest */
790 kvmppc_trigger_fac_interrupt(vcpu, fac);
791 }
792}
793
794/* Enable facilities (TAR, EBB, DSCR) for the guest */
795static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
796{
797 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
798
799 if (!(vcpu->arch.fscr & (1ULL << fac))) {
800 /* Facility not enabled by the guest */
801 kvmppc_trigger_fac_interrupt(vcpu, fac);
802 return RESUME_GUEST;
803 }
804
805 switch (fac) {
e14e7a1e
AG
806 case FSCR_TAR_LG:
807 /* TAR switching isn't lazy in Linux yet */
808 current->thread.tar = mfspr(SPRN_TAR);
809 mtspr(SPRN_TAR, vcpu->arch.tar);
810 vcpu->arch.shadow_fscr |= FSCR_TAR;
811 break;
616dff86
AG
812 default:
813 kvmppc_emulate_fac(vcpu, fac);
814 break;
815 }
816
817 return RESUME_GUEST;
818}
819#endif
820
3a167bea
AK
821int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
822 unsigned int exit_nr)
f05ed4d5
PM
823{
824 int r = RESUME_HOST;
7ee78855 825 int s;
f05ed4d5
PM
826
827 vcpu->stat.sum_exits++;
828
829 run->exit_reason = KVM_EXIT_UNKNOWN;
830 run->ready_for_interrupt_injection = 1;
831
bd2be683 832 /* We get here with MSR.EE=1 */
3b1d9d7d 833
97c95059 834 trace_kvm_exit(exit_nr, vcpu);
706fb730 835 kvm_guest_exit();
c63ddcb4 836
f05ed4d5
PM
837 switch (exit_nr) {
838 case BOOK3S_INTERRUPT_INST_STORAGE:
468a12c2 839 {
a2d56020 840 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
841 vcpu->stat.pf_instruc++;
842
843#ifdef CONFIG_PPC_BOOK3S_32
844 /* We set segments as unused segments when invalidating them. So
845 * treat the respective fault as segment fault. */
a2d56020
PM
846 {
847 struct kvmppc_book3s_shadow_vcpu *svcpu;
848 u32 sr;
849
850 svcpu = svcpu_get(vcpu);
851 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
468a12c2 852 svcpu_put(svcpu);
a2d56020
PM
853 if (sr == SR_INVALID) {
854 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
855 r = RESUME_GUEST;
856 break;
857 }
f05ed4d5
PM
858 }
859#endif
860
861 /* only care about PTEG not found errors, but leave NX alone */
468a12c2 862 if (shadow_srr1 & 0x40000000) {
93b159b4 863 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 864 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
93b159b4 865 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5
PM
866 vcpu->stat.sp_instruc++;
867 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
868 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
869 /*
870 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
871 * so we can't use the NX bit inside the guest. Let's cross our fingers,
872 * that no guest that needs the dcbz hack does NX.
873 */
874 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
875 r = RESUME_GUEST;
876 } else {
5deb8e7a
AG
877 u64 msr = kvmppc_get_msr(vcpu);
878 msr |= shadow_srr1 & 0x58000000;
879 kvmppc_set_msr_fast(vcpu, msr);
f05ed4d5
PM
880 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
881 r = RESUME_GUEST;
882 }
883 break;
468a12c2 884 }
f05ed4d5
PM
885 case BOOK3S_INTERRUPT_DATA_STORAGE:
886 {
887 ulong dar = kvmppc_get_fault_dar(vcpu);
a2d56020 888 u32 fault_dsisr = vcpu->arch.fault_dsisr;
f05ed4d5
PM
889 vcpu->stat.pf_storage++;
890
891#ifdef CONFIG_PPC_BOOK3S_32
892 /* We set segments as unused segments when invalidating them. So
893 * treat the respective fault as segment fault. */
a2d56020
PM
894 {
895 struct kvmppc_book3s_shadow_vcpu *svcpu;
896 u32 sr;
897
898 svcpu = svcpu_get(vcpu);
899 sr = svcpu->sr[dar >> SID_SHIFT];
468a12c2 900 svcpu_put(svcpu);
a2d56020
PM
901 if (sr == SR_INVALID) {
902 kvmppc_mmu_map_segment(vcpu, dar);
903 r = RESUME_GUEST;
904 break;
905 }
f05ed4d5
PM
906 }
907#endif
908
93b159b4
PM
909 /*
910 * We need to handle missing shadow PTEs, and
911 * protection faults due to us mapping a page read-only
912 * when the guest thinks it is writable.
913 */
914 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
915 int idx = srcu_read_lock(&vcpu->kvm->srcu);
f05ed4d5 916 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
93b159b4 917 srcu_read_unlock(&vcpu->kvm->srcu, idx);
f05ed4d5 918 } else {
5deb8e7a
AG
919 kvmppc_set_dar(vcpu, dar);
920 kvmppc_set_dsisr(vcpu, fault_dsisr);
f05ed4d5
PM
921 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
922 r = RESUME_GUEST;
923 }
924 break;
925 }
926 case BOOK3S_INTERRUPT_DATA_SEGMENT:
927 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
5deb8e7a 928 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
f05ed4d5
PM
929 kvmppc_book3s_queue_irqprio(vcpu,
930 BOOK3S_INTERRUPT_DATA_SEGMENT);
931 }
932 r = RESUME_GUEST;
933 break;
934 case BOOK3S_INTERRUPT_INST_SEGMENT:
935 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
936 kvmppc_book3s_queue_irqprio(vcpu,
937 BOOK3S_INTERRUPT_INST_SEGMENT);
938 }
939 r = RESUME_GUEST;
940 break;
941 /* We're good on these - the host merely wanted to get our attention */
942 case BOOK3S_INTERRUPT_DECREMENTER:
4f225ae0 943 case BOOK3S_INTERRUPT_HV_DECREMENTER:
40688909 944 case BOOK3S_INTERRUPT_DOORBELL:
f05ed4d5
PM
945 vcpu->stat.dec_exits++;
946 r = RESUME_GUEST;
947 break;
948 case BOOK3S_INTERRUPT_EXTERNAL:
4f225ae0
AG
949 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
950 case BOOK3S_INTERRUPT_EXTERNAL_HV:
f05ed4d5
PM
951 vcpu->stat.ext_intr_exits++;
952 r = RESUME_GUEST;
953 break;
954 case BOOK3S_INTERRUPT_PERFMON:
955 r = RESUME_GUEST;
956 break;
957 case BOOK3S_INTERRUPT_PROGRAM:
4f225ae0 958 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
f05ed4d5
PM
959 {
960 enum emulation_result er;
961 ulong flags;
962
963program_interrupt:
a2d56020 964 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
f05ed4d5 965
5deb8e7a 966 if (kvmppc_get_msr(vcpu) & MSR_PR) {
f05ed4d5
PM
967#ifdef EXIT_DEBUG
968 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
969#endif
970 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
971 (INS_DCBZ & 0xfffffff7)) {
972 kvmppc_core_queue_program(vcpu, flags);
973 r = RESUME_GUEST;
974 break;
975 }
976 }
977
978 vcpu->stat.emulated_inst_exits++;
979 er = kvmppc_emulate_instruction(run, vcpu);
980 switch (er) {
981 case EMULATE_DONE:
982 r = RESUME_GUEST_NV;
983 break;
984 case EMULATE_AGAIN:
985 r = RESUME_GUEST;
986 break;
987 case EMULATE_FAIL:
988 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
989 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
990 kvmppc_core_queue_program(vcpu, flags);
991 r = RESUME_GUEST;
992 break;
993 case EMULATE_DO_MMIO:
994 run->exit_reason = KVM_EXIT_MMIO;
995 r = RESUME_HOST_NV;
996 break;
c402a3f4 997 case EMULATE_EXIT_USER:
50c7bb80
AG
998 r = RESUME_HOST_NV;
999 break;
f05ed4d5
PM
1000 default:
1001 BUG();
1002 }
1003 break;
1004 }
1005 case BOOK3S_INTERRUPT_SYSCALL:
a668f2bd 1006 if (vcpu->arch.papr_enabled &&
8b23de29 1007 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
5deb8e7a 1008 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
a668f2bd
AG
1009 /* SC 1 papr hypercalls */
1010 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1011 int i;
1012
2ba9f0d8 1013#ifdef CONFIG_PPC_BOOK3S_64
a668f2bd
AG
1014 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1015 r = RESUME_GUEST;
1016 break;
1017 }
96f38d72 1018#endif
a668f2bd
AG
1019
1020 run->papr_hcall.nr = cmd;
1021 for (i = 0; i < 9; ++i) {
1022 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1023 run->papr_hcall.args[i] = gpr;
1024 }
1025 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1026 vcpu->arch.hcall_needed = 1;
1027 r = RESUME_HOST;
1028 } else if (vcpu->arch.osi_enabled &&
f05ed4d5
PM
1029 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1030 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1031 /* MOL hypercalls */
1032 u64 *gprs = run->osi.gprs;
1033 int i;
1034
1035 run->exit_reason = KVM_EXIT_OSI;
1036 for (i = 0; i < 32; i++)
1037 gprs[i] = kvmppc_get_gpr(vcpu, i);
1038 vcpu->arch.osi_needed = 1;
1039 r = RESUME_HOST_NV;
5deb8e7a 1040 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
f05ed4d5
PM
1041 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1042 /* KVM PV hypercalls */
1043 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1044 r = RESUME_GUEST;
1045 } else {
1046 /* Guest syscalls */
1047 vcpu->stat.syscall_exits++;
1048 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1049 r = RESUME_GUEST;
1050 }
1051 break;
1052 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1053 case BOOK3S_INTERRUPT_ALTIVEC:
1054 case BOOK3S_INTERRUPT_VSX:
1055 {
1056 int ext_msr = 0;
1057
1058 switch (exit_nr) {
1059 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
1060 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
1061 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
1062 }
1063
1064 switch (kvmppc_check_ext(vcpu, exit_nr)) {
1065 case EMULATE_DONE:
1066 /* everything ok - let's enable the ext */
1067 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1068 break;
1069 case EMULATE_FAIL:
1070 /* we need to emulate this instruction */
1071 goto program_interrupt;
1072 break;
1073 default:
1074 /* nothing to worry about - go again */
1075 break;
1076 }
1077 break;
1078 }
1079 case BOOK3S_INTERRUPT_ALIGNMENT:
1080 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
5deb8e7a
AG
1081 u32 last_inst = kvmppc_get_last_inst(vcpu);
1082 u32 dsisr;
1083 u64 dar;
1084
1085 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1086 dar = kvmppc_alignment_dar(vcpu, last_inst);
1087
1088 kvmppc_set_dsisr(vcpu, dsisr);
1089 kvmppc_set_dar(vcpu, dar);
1090
f05ed4d5
PM
1091 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1092 }
1093 r = RESUME_GUEST;
1094 break;
616dff86
AG
1095#ifdef CONFIG_PPC_BOOK3S_64
1096 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1097 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1098 r = RESUME_GUEST;
1099 break;
1100#endif
f05ed4d5
PM
1101 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1102 case BOOK3S_INTERRUPT_TRACE:
1103 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1104 r = RESUME_GUEST;
1105 break;
1106 default:
468a12c2 1107 {
a2d56020 1108 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
f05ed4d5
PM
1109 /* Ugh - bork here! What did we get? */
1110 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
468a12c2 1111 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
f05ed4d5
PM
1112 r = RESUME_HOST;
1113 BUG();
1114 break;
1115 }
468a12c2 1116 }
f05ed4d5
PM
1117
1118 if (!(r & RESUME_HOST)) {
1119 /* To avoid clobbering exit_reason, only check for signals if
1120 * we aren't already exiting to userspace for some other
1121 * reason. */
e371f713
AG
1122
1123 /*
1124 * Interrupts could be timers for the guest which we have to
1125 * inject again, so let's postpone them until we're in the guest
1126 * and if we really did time things so badly, then we just exit
1127 * again due to a host external interrupt.
1128 */
7ee78855 1129 s = kvmppc_prepare_to_enter(vcpu);
6c85f52b 1130 if (s <= 0)
7ee78855 1131 r = s;
6c85f52b
SW
1132 else {
1133 /* interrupts now hard-disabled */
5f1c248f 1134 kvmppc_fix_ee_before_entry();
f05ed4d5 1135 }
6c85f52b 1136
9d1ffdd8 1137 kvmppc_handle_lost_ext(vcpu);
f05ed4d5
PM
1138 }
1139
1140 trace_kvm_book3s_reenter(r, vcpu);
1141
1142 return r;
1143}
1144
3a167bea
AK
1145static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1146 struct kvm_sregs *sregs)
f05ed4d5
PM
1147{
1148 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1149 int i;
1150
1151 sregs->pvr = vcpu->arch.pvr;
1152
1153 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1154 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1155 for (i = 0; i < 64; i++) {
1156 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1157 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1158 }
1159 } else {
1160 for (i = 0; i < 16; i++)
5deb8e7a 1161 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
f05ed4d5
PM
1162
1163 for (i = 0; i < 8; i++) {
1164 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1165 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1166 }
1167 }
1168
1169 return 0;
1170}
1171
3a167bea
AK
1172static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1173 struct kvm_sregs *sregs)
f05ed4d5
PM
1174{
1175 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1176 int i;
1177
3a167bea 1178 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
f05ed4d5
PM
1179
1180 vcpu3s->sdr1 = sregs->u.s.sdr1;
1181 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1182 for (i = 0; i < 64; i++) {
1183 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1184 sregs->u.s.ppc64.slb[i].slbe);
1185 }
1186 } else {
1187 for (i = 0; i < 16; i++) {
1188 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1189 }
1190 for (i = 0; i < 8; i++) {
1191 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1192 (u32)sregs->u.s.ppc32.ibat[i]);
1193 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1194 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1195 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1196 (u32)sregs->u.s.ppc32.dbat[i]);
1197 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1198 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1199 }
1200 }
1201
1202 /* Flush the MMU after messing with the segments */
1203 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1204
1205 return 0;
1206}
1207
3a167bea
AK
1208static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1209 union kvmppc_one_reg *val)
31f3438e 1210{
a136a8bd 1211 int r = 0;
31f3438e 1212
a136a8bd 1213 switch (id) {
31f3438e 1214 case KVM_REG_PPC_HIOR:
a136a8bd 1215 *val = get_reg_val(id, to_book3s(vcpu)->hior);
31f3438e 1216 break;
e5ee5422
AK
1217 case KVM_REG_PPC_LPCR:
1218 /*
1219 * We are only interested in the LPCR_ILE bit
1220 */
1221 if (vcpu->arch.intr_msr & MSR_LE)
1222 *val = get_reg_val(id, LPCR_ILE);
1223 else
1224 *val = get_reg_val(id, 0);
1225 break;
31f3438e 1226 default:
a136a8bd 1227 r = -EINVAL;
31f3438e
PM
1228 break;
1229 }
1230
1231 return r;
1232}
1233
e5ee5422
AK
1234static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1235{
1236 if (new_lpcr & LPCR_ILE)
1237 vcpu->arch.intr_msr |= MSR_LE;
1238 else
1239 vcpu->arch.intr_msr &= ~MSR_LE;
1240}
1241
3a167bea
AK
1242static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1243 union kvmppc_one_reg *val)
31f3438e 1244{
a136a8bd 1245 int r = 0;
31f3438e 1246
a136a8bd 1247 switch (id) {
31f3438e 1248 case KVM_REG_PPC_HIOR:
a136a8bd
PM
1249 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1250 to_book3s(vcpu)->hior_explicit = true;
31f3438e 1251 break;
e5ee5422
AK
1252 case KVM_REG_PPC_LPCR:
1253 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1254 break;
31f3438e 1255 default:
a136a8bd 1256 r = -EINVAL;
31f3438e
PM
1257 break;
1258 }
1259
1260 return r;
1261}
1262
3a167bea
AK
1263static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1264 unsigned int id)
f05ed4d5
PM
1265{
1266 struct kvmppc_vcpu_book3s *vcpu_book3s;
1267 struct kvm_vcpu *vcpu;
1268 int err = -ENOMEM;
1269 unsigned long p;
1270
3ff95502
PM
1271 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1272 if (!vcpu)
f05ed4d5
PM
1273 goto out;
1274
f05ed4d5
PM
1275 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1276 if (!vcpu_book3s)
f05ed4d5 1277 goto free_vcpu;
3ff95502 1278 vcpu->arch.book3s = vcpu_book3s;
f05ed4d5 1279
a2d56020 1280#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1281 vcpu->arch.shadow_vcpu =
1282 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1283 if (!vcpu->arch.shadow_vcpu)
1284 goto free_vcpu3s;
a2d56020 1285#endif
f05ed4d5 1286
f05ed4d5
PM
1287 err = kvm_vcpu_init(vcpu, kvm, id);
1288 if (err)
1289 goto free_shadow_vcpu;
1290
7c7b406e 1291 err = -ENOMEM;
f05ed4d5 1292 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
f05ed4d5
PM
1293 if (!p)
1294 goto uninit_vcpu;
7c7b406e
TLSC
1295 /* the real shared page fills the last 4k of our page */
1296 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
f05ed4d5 1297#ifdef CONFIG_PPC_BOOK3S_64
5deb8e7a
AG
1298 /* Always start the shared struct in native endian mode */
1299#ifdef __BIG_ENDIAN__
1300 vcpu->arch.shared_big_endian = true;
1301#else
1302 vcpu->arch.shared_big_endian = false;
1303#endif
1304
a4a0f252
PM
1305 /*
1306 * Default to the same as the host if we're on sufficiently
1307 * recent machine that we have 1TB segments;
1308 * otherwise default to PPC970FX.
1309 */
f05ed4d5 1310 vcpu->arch.pvr = 0x3C0301;
a4a0f252
PM
1311 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1312 vcpu->arch.pvr = mfspr(SPRN_PVR);
e5ee5422 1313 vcpu->arch.intr_msr = MSR_SF;
f05ed4d5
PM
1314#else
1315 /* default to book3s_32 (750) */
1316 vcpu->arch.pvr = 0x84202;
1317#endif
3a167bea 1318 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
f05ed4d5
PM
1319 vcpu->arch.slb_nr = 64;
1320
94810ba4 1321 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
f05ed4d5
PM
1322
1323 err = kvmppc_mmu_init(vcpu);
1324 if (err < 0)
1325 goto uninit_vcpu;
1326
1327 return vcpu;
1328
1329uninit_vcpu:
1330 kvm_vcpu_uninit(vcpu);
1331free_shadow_vcpu:
a2d56020 1332#ifdef CONFIG_KVM_BOOK3S_32
3ff95502
PM
1333 kfree(vcpu->arch.shadow_vcpu);
1334free_vcpu3s:
a2d56020 1335#endif
f05ed4d5 1336 vfree(vcpu_book3s);
3ff95502
PM
1337free_vcpu:
1338 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1339out:
1340 return ERR_PTR(err);
1341}
1342
3a167bea 1343static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
f05ed4d5
PM
1344{
1345 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1346
1347 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1348 kvm_vcpu_uninit(vcpu);
3ff95502
PM
1349#ifdef CONFIG_KVM_BOOK3S_32
1350 kfree(vcpu->arch.shadow_vcpu);
1351#endif
f05ed4d5 1352 vfree(vcpu_book3s);
3ff95502 1353 kmem_cache_free(kvm_vcpu_cache, vcpu);
f05ed4d5
PM
1354}
1355
3a167bea 1356static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
f05ed4d5
PM
1357{
1358 int ret;
f05ed4d5 1359#ifdef CONFIG_ALTIVEC
f05ed4d5 1360 unsigned long uninitialized_var(vrsave);
f05ed4d5 1361#endif
f05ed4d5 1362
af8f38b3
AG
1363 /* Check if we can run the vcpu at all */
1364 if (!vcpu->arch.sane) {
1365 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
7d82714d
AG
1366 ret = -EINVAL;
1367 goto out;
af8f38b3
AG
1368 }
1369
e371f713
AG
1370 /*
1371 * Interrupts could be timers for the guest which we have to inject
1372 * again, so let's postpone them until we're in the guest and if we
1373 * really did time things so badly, then we just exit again due to
1374 * a host external interrupt.
1375 */
7ee78855 1376 ret = kvmppc_prepare_to_enter(vcpu);
6c85f52b 1377 if (ret <= 0)
7d82714d 1378 goto out;
6c85f52b 1379 /* interrupts now hard-disabled */
f05ed4d5 1380
99dae3ba 1381 /* Save FPU state in thread_struct */
f05ed4d5
PM
1382 if (current->thread.regs->msr & MSR_FP)
1383 giveup_fpu(current);
f05ed4d5
PM
1384
1385#ifdef CONFIG_ALTIVEC
99dae3ba
PM
1386 /* Save Altivec state in thread_struct */
1387 if (current->thread.regs->msr & MSR_VEC)
1388 giveup_altivec(current);
f05ed4d5
PM
1389#endif
1390
1391#ifdef CONFIG_VSX
99dae3ba
PM
1392 /* Save VSX state in thread_struct */
1393 if (current->thread.regs->msr & MSR_VSX)
28c483b6 1394 __giveup_vsx(current);
f05ed4d5
PM
1395#endif
1396
f05ed4d5 1397 /* Preload FPU if it's enabled */
5deb8e7a 1398 if (kvmppc_get_msr(vcpu) & MSR_FP)
f05ed4d5
PM
1399 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1400
5f1c248f 1401 kvmppc_fix_ee_before_entry();
df6909e5
PM
1402
1403 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1404
24afa37b
AG
1405 /* No need for kvm_guest_exit. It's done in handle_exit.
1406 We also get here with interrupts enabled. */
f05ed4d5 1407
f05ed4d5 1408 /* Make sure we save the guest FPU/Altivec/VSX state */
28c483b6
PM
1409 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1410
e14e7a1e
AG
1411 /* Make sure we save the guest TAR/EBB/DSCR state */
1412 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1413
7d82714d 1414out:
0652eaae 1415 vcpu->mode = OUTSIDE_GUEST_MODE;
f05ed4d5
PM
1416 return ret;
1417}
1418
82ed3616
PM
1419/*
1420 * Get (and clear) the dirty memory log for a memory slot.
1421 */
3a167bea
AK
1422static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1423 struct kvm_dirty_log *log)
82ed3616
PM
1424{
1425 struct kvm_memory_slot *memslot;
1426 struct kvm_vcpu *vcpu;
1427 ulong ga, ga_end;
1428 int is_dirty = 0;
1429 int r;
1430 unsigned long n;
1431
1432 mutex_lock(&kvm->slots_lock);
1433
1434 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1435 if (r)
1436 goto out;
1437
1438 /* If nothing is dirty, don't bother messing with page tables. */
1439 if (is_dirty) {
1440 memslot = id_to_memslot(kvm->memslots, log->slot);
1441
1442 ga = memslot->base_gfn << PAGE_SHIFT;
1443 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1444
1445 kvm_for_each_vcpu(n, vcpu, kvm)
1446 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1447
1448 n = kvm_dirty_bitmap_bytes(memslot);
1449 memset(memslot->dirty_bitmap, 0, n);
1450 }
1451
1452 r = 0;
1453out:
1454 mutex_unlock(&kvm->slots_lock);
1455 return r;
1456}
1457
3a167bea
AK
1458static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1459 struct kvm_memory_slot *memslot)
5b74716e 1460{
3a167bea
AK
1461 return;
1462}
5b74716e 1463
3a167bea
AK
1464static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1465 struct kvm_memory_slot *memslot,
1466 struct kvm_userspace_memory_region *mem)
1467{
5b74716e
BH
1468 return 0;
1469}
5b74716e 1470
3a167bea
AK
1471static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1472 struct kvm_userspace_memory_region *mem,
1473 const struct kvm_memory_slot *old)
a66b48c3 1474{
3a167bea 1475 return;
a66b48c3
PM
1476}
1477
3a167bea
AK
1478static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1479 struct kvm_memory_slot *dont)
a66b48c3 1480{
3a167bea 1481 return;
a66b48c3
PM
1482}
1483
3a167bea
AK
1484static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1485 unsigned long npages)
f9e0554d
PM
1486{
1487 return 0;
1488}
1489
3a167bea 1490
5b74716e 1491#ifdef CONFIG_PPC64
3a167bea
AK
1492static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1493 struct kvm_ppc_smmu_info *info)
dfe49dbd 1494{
a4a0f252
PM
1495 long int i;
1496 struct kvm_vcpu *vcpu;
1497
1498 info->flags = 0;
5b74716e
BH
1499
1500 /* SLB is always 64 entries */
1501 info->slb_size = 64;
1502
1503 /* Standard 4k base page size segment */
1504 info->sps[0].page_shift = 12;
1505 info->sps[0].slb_enc = 0;
1506 info->sps[0].enc[0].page_shift = 12;
1507 info->sps[0].enc[0].pte_enc = 0;
1508
a4a0f252
PM
1509 /*
1510 * 64k large page size.
1511 * We only want to put this in if the CPUs we're emulating
1512 * support it, but unfortunately we don't have a vcpu easily
1513 * to hand here to test. Just pick the first vcpu, and if
1514 * that doesn't exist yet, report the minimum capability,
1515 * i.e., no 64k pages.
1516 * 1T segment support goes along with 64k pages.
1517 */
1518 i = 1;
1519 vcpu = kvm_get_vcpu(kvm, 0);
1520 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1521 info->flags = KVM_PPC_1T_SEGMENTS;
1522 info->sps[i].page_shift = 16;
1523 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1524 info->sps[i].enc[0].page_shift = 16;
1525 info->sps[i].enc[0].pte_enc = 1;
1526 ++i;
1527 }
1528
5b74716e 1529 /* Standard 16M large page size segment */
a4a0f252
PM
1530 info->sps[i].page_shift = 24;
1531 info->sps[i].slb_enc = SLB_VSID_L;
1532 info->sps[i].enc[0].page_shift = 24;
1533 info->sps[i].enc[0].pte_enc = 0;
dfe49dbd 1534
5b74716e
BH
1535 return 0;
1536}
3a167bea
AK
1537#else
1538static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1539 struct kvm_ppc_smmu_info *info)
f9e0554d 1540{
3a167bea
AK
1541 /* We should not get called */
1542 BUG();
f9e0554d 1543}
3a167bea 1544#endif /* CONFIG_PPC64 */
f9e0554d 1545
a413f474
IM
1546static unsigned int kvm_global_user_count = 0;
1547static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1548
3a167bea 1549static int kvmppc_core_init_vm_pr(struct kvm *kvm)
f9e0554d 1550{
9308ab8e 1551 mutex_init(&kvm->arch.hpt_mutex);
f31e65e1 1552
a413f474
IM
1553 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1554 spin_lock(&kvm_global_user_count_lock);
1555 if (++kvm_global_user_count == 1)
1556 pSeries_disable_reloc_on_exc();
1557 spin_unlock(&kvm_global_user_count_lock);
1558 }
f9e0554d
PM
1559 return 0;
1560}
1561
3a167bea 1562static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
f9e0554d 1563{
f31e65e1
BH
1564#ifdef CONFIG_PPC64
1565 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1566#endif
a413f474
IM
1567
1568 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1569 spin_lock(&kvm_global_user_count_lock);
1570 BUG_ON(kvm_global_user_count == 0);
1571 if (--kvm_global_user_count == 0)
1572 pSeries_enable_reloc_on_exc();
1573 spin_unlock(&kvm_global_user_count_lock);
1574 }
f9e0554d
PM
1575}
1576
3a167bea 1577static int kvmppc_core_check_processor_compat_pr(void)
f05ed4d5 1578{
3a167bea
AK
1579 /* we are always compatible */
1580 return 0;
1581}
f05ed4d5 1582
3a167bea
AK
1583static long kvm_arch_vm_ioctl_pr(struct file *filp,
1584 unsigned int ioctl, unsigned long arg)
1585{
1586 return -ENOTTY;
1587}
f05ed4d5 1588
cbbc58d4 1589static struct kvmppc_ops kvm_ops_pr = {
3a167bea
AK
1590 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1591 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1592 .get_one_reg = kvmppc_get_one_reg_pr,
1593 .set_one_reg = kvmppc_set_one_reg_pr,
1594 .vcpu_load = kvmppc_core_vcpu_load_pr,
1595 .vcpu_put = kvmppc_core_vcpu_put_pr,
1596 .set_msr = kvmppc_set_msr_pr,
1597 .vcpu_run = kvmppc_vcpu_run_pr,
1598 .vcpu_create = kvmppc_core_vcpu_create_pr,
1599 .vcpu_free = kvmppc_core_vcpu_free_pr,
1600 .check_requests = kvmppc_core_check_requests_pr,
1601 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1602 .flush_memslot = kvmppc_core_flush_memslot_pr,
1603 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1604 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1605 .unmap_hva = kvm_unmap_hva_pr,
1606 .unmap_hva_range = kvm_unmap_hva_range_pr,
1607 .age_hva = kvm_age_hva_pr,
1608 .test_age_hva = kvm_test_age_hva_pr,
1609 .set_spte_hva = kvm_set_spte_hva_pr,
1610 .mmu_destroy = kvmppc_mmu_destroy_pr,
1611 .free_memslot = kvmppc_core_free_memslot_pr,
1612 .create_memslot = kvmppc_core_create_memslot_pr,
1613 .init_vm = kvmppc_core_init_vm_pr,
1614 .destroy_vm = kvmppc_core_destroy_vm_pr,
3a167bea
AK
1615 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1616 .emulate_op = kvmppc_core_emulate_op_pr,
1617 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1618 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1619 .fast_vcpu_kick = kvm_vcpu_kick,
1620 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1621};
1622
cbbc58d4
AK
1623
1624int kvmppc_book3s_init_pr(void)
f05ed4d5
PM
1625{
1626 int r;
1627
cbbc58d4
AK
1628 r = kvmppc_core_check_processor_compat_pr();
1629 if (r < 0)
f05ed4d5
PM
1630 return r;
1631
cbbc58d4
AK
1632 kvm_ops_pr.owner = THIS_MODULE;
1633 kvmppc_pr_ops = &kvm_ops_pr;
f05ed4d5 1634
cbbc58d4 1635 r = kvmppc_mmu_hpte_sysinit();
f05ed4d5
PM
1636 return r;
1637}
1638
cbbc58d4 1639void kvmppc_book3s_exit_pr(void)
f05ed4d5 1640{
cbbc58d4 1641 kvmppc_pr_ops = NULL;
f05ed4d5 1642 kvmppc_mmu_hpte_sysexit();
f05ed4d5
PM
1643}
1644
cbbc58d4
AK
1645/*
1646 * We only support separate modules for book3s 64
1647 */
1648#ifdef CONFIG_PPC_BOOK3S_64
1649
3a167bea
AK
1650module_init(kvmppc_book3s_init_pr);
1651module_exit(kvmppc_book3s_exit_pr);
2ba9f0d8
AK
1652
1653MODULE_LICENSE("GPL");
398a76c6
AG
1654MODULE_ALIAS_MISCDEV(KVM_MINOR);
1655MODULE_ALIAS("devname:kvm");
cbbc58d4 1656#endif