]>
Commit | Line | Data |
---|---|---|
f05ed4d5 PM |
1 | /* |
2 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * Paul Mackerras <paulus@samba.org> | |
8 | * | |
9 | * Description: | |
10 | * Functions relating to running KVM on Book 3S processors where | |
11 | * we don't have access to hypervisor mode, and we run the guest | |
12 | * in problem state (user mode). | |
13 | * | |
14 | * This file is derived from arch/powerpc/kvm/44x.c, | |
15 | * by Hollis Blanchard <hollisb@us.ibm.com>. | |
16 | * | |
17 | * This program is free software; you can redistribute it and/or modify | |
18 | * it under the terms of the GNU General Public License, version 2, as | |
19 | * published by the Free Software Foundation. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
93087948 | 23 | #include <linux/export.h> |
f05ed4d5 PM |
24 | #include <linux/err.h> |
25 | #include <linux/slab.h> | |
26 | ||
27 | #include <asm/reg.h> | |
28 | #include <asm/cputable.h> | |
29 | #include <asm/cacheflush.h> | |
30 | #include <asm/tlbflush.h> | |
31 | #include <asm/uaccess.h> | |
32 | #include <asm/io.h> | |
33 | #include <asm/kvm_ppc.h> | |
34 | #include <asm/kvm_book3s.h> | |
35 | #include <asm/mmu_context.h> | |
95327d08 | 36 | #include <asm/switch_to.h> |
a413f474 | 37 | #include <asm/firmware.h> |
deb26c27 | 38 | #include <asm/hvcall.h> |
f05ed4d5 PM |
39 | #include <linux/gfp.h> |
40 | #include <linux/sched.h> | |
41 | #include <linux/vmalloc.h> | |
42 | #include <linux/highmem.h> | |
2ba9f0d8 | 43 | #include <linux/module.h> |
398a76c6 | 44 | #include <linux/miscdevice.h> |
f05ed4d5 | 45 | |
3a167bea | 46 | #include "book3s.h" |
72c12535 AK |
47 | |
48 | #define CREATE_TRACE_POINTS | |
49 | #include "trace_pr.h" | |
f05ed4d5 PM |
50 | |
51 | /* #define EXIT_DEBUG */ | |
52 | /* #define DEBUG_EXT */ | |
53 | ||
54 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
55 | ulong msr); | |
616dff86 | 56 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac); |
f05ed4d5 PM |
57 | |
58 | /* Some compatibility defines */ | |
59 | #ifdef CONFIG_PPC_BOOK3S_32 | |
60 | #define MSR_USER32 MSR_USER | |
61 | #define MSR_USER64 MSR_USER | |
62 | #define HW_PAGE_SIZE PAGE_SIZE | |
63 | #endif | |
64 | ||
c01e3f66 AG |
65 | static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu) |
66 | { | |
67 | ulong msr = kvmppc_get_msr(vcpu); | |
68 | return (msr & (MSR_IR|MSR_DR)) == MSR_DR; | |
69 | } | |
70 | ||
71 | static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu) | |
72 | { | |
73 | ulong msr = kvmppc_get_msr(vcpu); | |
74 | ulong pc = kvmppc_get_pc(vcpu); | |
75 | ||
76 | /* We are in DR only split real mode */ | |
77 | if ((msr & (MSR_IR|MSR_DR)) != MSR_DR) | |
78 | return; | |
79 | ||
80 | /* We have not fixed up the guest already */ | |
81 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) | |
82 | return; | |
83 | ||
84 | /* The code is in fixupable address space */ | |
85 | if (pc & SPLIT_HACK_MASK) | |
86 | return; | |
87 | ||
88 | vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK; | |
89 | kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS); | |
90 | } | |
91 | ||
92 | void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu); | |
93 | ||
3a167bea | 94 | static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu) |
f05ed4d5 PM |
95 | { |
96 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 AG |
97 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
98 | memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb)); | |
468a12c2 | 99 | svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max; |
40fdd8c8 | 100 | svcpu->in_use = 0; |
468a12c2 | 101 | svcpu_put(svcpu); |
f05ed4d5 | 102 | #endif |
fb4188ba AG |
103 | |
104 | /* Disable AIL if supported */ | |
105 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
106 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
107 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL); | |
108 | ||
a47d72f3 | 109 | vcpu->cpu = smp_processor_id(); |
f05ed4d5 | 110 | #ifdef CONFIG_PPC_BOOK3S_32 |
3ff95502 | 111 | current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu; |
f05ed4d5 | 112 | #endif |
c01e3f66 AG |
113 | |
114 | if (kvmppc_is_split_real(vcpu)) | |
115 | kvmppc_fixup_split_real(vcpu); | |
f05ed4d5 PM |
116 | } |
117 | ||
3a167bea | 118 | static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
119 | { |
120 | #ifdef CONFIG_PPC_BOOK3S_64 | |
468a12c2 | 121 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
40fdd8c8 AG |
122 | if (svcpu->in_use) { |
123 | kvmppc_copy_from_svcpu(vcpu, svcpu); | |
124 | } | |
468a12c2 | 125 | memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb)); |
468a12c2 AG |
126 | to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max; |
127 | svcpu_put(svcpu); | |
f05ed4d5 PM |
128 | #endif |
129 | ||
c01e3f66 AG |
130 | if (kvmppc_is_split_real(vcpu)) |
131 | kvmppc_unfixup_split_real(vcpu); | |
132 | ||
28c483b6 | 133 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
e14e7a1e | 134 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); |
fb4188ba AG |
135 | |
136 | /* Enable AIL if supported */ | |
137 | if (cpu_has_feature(CPU_FTR_HVMODE) && | |
138 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
139 | mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3); | |
140 | ||
a47d72f3 | 141 | vcpu->cpu = -1; |
f05ed4d5 PM |
142 | } |
143 | ||
a2d56020 PM |
144 | /* Copy data needed by real-mode code from vcpu to shadow vcpu */ |
145 | void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu, | |
146 | struct kvm_vcpu *vcpu) | |
147 | { | |
148 | svcpu->gpr[0] = vcpu->arch.gpr[0]; | |
149 | svcpu->gpr[1] = vcpu->arch.gpr[1]; | |
150 | svcpu->gpr[2] = vcpu->arch.gpr[2]; | |
151 | svcpu->gpr[3] = vcpu->arch.gpr[3]; | |
152 | svcpu->gpr[4] = vcpu->arch.gpr[4]; | |
153 | svcpu->gpr[5] = vcpu->arch.gpr[5]; | |
154 | svcpu->gpr[6] = vcpu->arch.gpr[6]; | |
155 | svcpu->gpr[7] = vcpu->arch.gpr[7]; | |
156 | svcpu->gpr[8] = vcpu->arch.gpr[8]; | |
157 | svcpu->gpr[9] = vcpu->arch.gpr[9]; | |
158 | svcpu->gpr[10] = vcpu->arch.gpr[10]; | |
159 | svcpu->gpr[11] = vcpu->arch.gpr[11]; | |
160 | svcpu->gpr[12] = vcpu->arch.gpr[12]; | |
161 | svcpu->gpr[13] = vcpu->arch.gpr[13]; | |
162 | svcpu->cr = vcpu->arch.cr; | |
163 | svcpu->xer = vcpu->arch.xer; | |
164 | svcpu->ctr = vcpu->arch.ctr; | |
165 | svcpu->lr = vcpu->arch.lr; | |
166 | svcpu->pc = vcpu->arch.pc; | |
616dff86 AG |
167 | #ifdef CONFIG_PPC_BOOK3S_64 |
168 | svcpu->shadow_fscr = vcpu->arch.shadow_fscr; | |
169 | #endif | |
3cd60e31 AK |
170 | /* |
171 | * Now also save the current time base value. We use this | |
172 | * to find the guest purr and spurr value. | |
173 | */ | |
174 | vcpu->arch.entry_tb = get_tb(); | |
8f42ab27 | 175 | vcpu->arch.entry_vtb = get_vtb(); |
06da28e7 AK |
176 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
177 | vcpu->arch.entry_ic = mfspr(SPRN_IC); | |
40fdd8c8 | 178 | svcpu->in_use = true; |
a2d56020 PM |
179 | } |
180 | ||
181 | /* Copy data touched by real-mode code from shadow vcpu back to vcpu */ | |
182 | void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu, | |
183 | struct kvmppc_book3s_shadow_vcpu *svcpu) | |
184 | { | |
40fdd8c8 AG |
185 | /* |
186 | * vcpu_put would just call us again because in_use hasn't | |
187 | * been updated yet. | |
188 | */ | |
189 | preempt_disable(); | |
190 | ||
191 | /* | |
192 | * Maybe we were already preempted and synced the svcpu from | |
193 | * our preempt notifiers. Don't bother touching this svcpu then. | |
194 | */ | |
195 | if (!svcpu->in_use) | |
196 | goto out; | |
197 | ||
a2d56020 PM |
198 | vcpu->arch.gpr[0] = svcpu->gpr[0]; |
199 | vcpu->arch.gpr[1] = svcpu->gpr[1]; | |
200 | vcpu->arch.gpr[2] = svcpu->gpr[2]; | |
201 | vcpu->arch.gpr[3] = svcpu->gpr[3]; | |
202 | vcpu->arch.gpr[4] = svcpu->gpr[4]; | |
203 | vcpu->arch.gpr[5] = svcpu->gpr[5]; | |
204 | vcpu->arch.gpr[6] = svcpu->gpr[6]; | |
205 | vcpu->arch.gpr[7] = svcpu->gpr[7]; | |
206 | vcpu->arch.gpr[8] = svcpu->gpr[8]; | |
207 | vcpu->arch.gpr[9] = svcpu->gpr[9]; | |
208 | vcpu->arch.gpr[10] = svcpu->gpr[10]; | |
209 | vcpu->arch.gpr[11] = svcpu->gpr[11]; | |
210 | vcpu->arch.gpr[12] = svcpu->gpr[12]; | |
211 | vcpu->arch.gpr[13] = svcpu->gpr[13]; | |
212 | vcpu->arch.cr = svcpu->cr; | |
213 | vcpu->arch.xer = svcpu->xer; | |
214 | vcpu->arch.ctr = svcpu->ctr; | |
215 | vcpu->arch.lr = svcpu->lr; | |
216 | vcpu->arch.pc = svcpu->pc; | |
217 | vcpu->arch.shadow_srr1 = svcpu->shadow_srr1; | |
218 | vcpu->arch.fault_dar = svcpu->fault_dar; | |
219 | vcpu->arch.fault_dsisr = svcpu->fault_dsisr; | |
220 | vcpu->arch.last_inst = svcpu->last_inst; | |
616dff86 AG |
221 | #ifdef CONFIG_PPC_BOOK3S_64 |
222 | vcpu->arch.shadow_fscr = svcpu->shadow_fscr; | |
223 | #endif | |
3cd60e31 AK |
224 | /* |
225 | * Update purr and spurr using time base on exit. | |
226 | */ | |
227 | vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb; | |
228 | vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb; | |
8f42ab27 | 229 | vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb; |
06da28e7 AK |
230 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
231 | vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic; | |
40fdd8c8 AG |
232 | svcpu->in_use = false; |
233 | ||
234 | out: | |
235 | preempt_enable(); | |
a2d56020 PM |
236 | } |
237 | ||
3a167bea | 238 | static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu) |
03d25c5b | 239 | { |
7c973a2e AG |
240 | int r = 1; /* Indicate we want to get back into the guest */ |
241 | ||
9b0cb3c8 AG |
242 | /* We misuse TLB_FLUSH to indicate that we want to clear |
243 | all shadow cache entries */ | |
244 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) | |
245 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
7c973a2e AG |
246 | |
247 | return r; | |
03d25c5b AG |
248 | } |
249 | ||
9b0cb3c8 | 250 | /************* MMU Notifiers *************/ |
491d6ecc PM |
251 | static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start, |
252 | unsigned long end) | |
253 | { | |
254 | long i; | |
255 | struct kvm_vcpu *vcpu; | |
256 | struct kvm_memslots *slots; | |
257 | struct kvm_memory_slot *memslot; | |
258 | ||
259 | slots = kvm_memslots(kvm); | |
260 | kvm_for_each_memslot(memslot, slots) { | |
261 | unsigned long hva_start, hva_end; | |
262 | gfn_t gfn, gfn_end; | |
263 | ||
264 | hva_start = max(start, memslot->userspace_addr); | |
265 | hva_end = min(end, memslot->userspace_addr + | |
266 | (memslot->npages << PAGE_SHIFT)); | |
267 | if (hva_start >= hva_end) | |
268 | continue; | |
269 | /* | |
270 | * {gfn(page) | page intersects with [hva_start, hva_end)} = | |
271 | * {gfn, gfn+1, ..., gfn_end-1}. | |
272 | */ | |
273 | gfn = hva_to_gfn_memslot(hva_start, memslot); | |
274 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); | |
275 | kvm_for_each_vcpu(i, vcpu, kvm) | |
276 | kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT, | |
277 | gfn_end << PAGE_SHIFT); | |
278 | } | |
279 | } | |
9b0cb3c8 | 280 | |
3a167bea | 281 | static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva) |
9b0cb3c8 AG |
282 | { |
283 | trace_kvm_unmap_hva(hva); | |
284 | ||
491d6ecc | 285 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
286 | |
287 | return 0; | |
288 | } | |
289 | ||
3a167bea AK |
290 | static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start, |
291 | unsigned long end) | |
9b0cb3c8 | 292 | { |
491d6ecc | 293 | do_kvm_unmap_hva(kvm, start, end); |
9b0cb3c8 AG |
294 | |
295 | return 0; | |
296 | } | |
297 | ||
3a167bea | 298 | static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva) |
9b0cb3c8 AG |
299 | { |
300 | /* XXX could be more clever ;) */ | |
301 | return 0; | |
302 | } | |
303 | ||
3a167bea | 304 | static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva) |
9b0cb3c8 AG |
305 | { |
306 | /* XXX could be more clever ;) */ | |
307 | return 0; | |
308 | } | |
309 | ||
3a167bea | 310 | static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte) |
9b0cb3c8 AG |
311 | { |
312 | /* The page will get remapped properly on its next fault */ | |
491d6ecc | 313 | do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE); |
9b0cb3c8 AG |
314 | } |
315 | ||
316 | /*****************************************/ | |
317 | ||
f05ed4d5 PM |
318 | static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu) |
319 | { | |
5deb8e7a AG |
320 | ulong guest_msr = kvmppc_get_msr(vcpu); |
321 | ulong smsr = guest_msr; | |
f05ed4d5 PM |
322 | |
323 | /* Guest MSR values */ | |
e5ee5422 | 324 | smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE; |
f05ed4d5 PM |
325 | /* Process MSR values */ |
326 | smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE; | |
327 | /* External providers the guest reserved */ | |
5deb8e7a | 328 | smsr |= (guest_msr & vcpu->arch.guest_owned_ext); |
f05ed4d5 PM |
329 | /* 64-bit Process MSR values */ |
330 | #ifdef CONFIG_PPC_BOOK3S_64 | |
331 | smsr |= MSR_ISF | MSR_HV; | |
332 | #endif | |
333 | vcpu->arch.shadow_msr = smsr; | |
334 | } | |
335 | ||
3a167bea | 336 | static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr) |
f05ed4d5 | 337 | { |
5deb8e7a | 338 | ulong old_msr = kvmppc_get_msr(vcpu); |
f05ed4d5 PM |
339 | |
340 | #ifdef EXIT_DEBUG | |
341 | printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr); | |
342 | #endif | |
343 | ||
344 | msr &= to_book3s(vcpu)->msr_mask; | |
5deb8e7a | 345 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
346 | kvmppc_recalc_shadow_msr(vcpu); |
347 | ||
348 | if (msr & MSR_POW) { | |
349 | if (!vcpu->arch.pending_exceptions) { | |
350 | kvm_vcpu_block(vcpu); | |
966cd0f3 | 351 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
f05ed4d5 PM |
352 | vcpu->stat.halt_wakeup++; |
353 | ||
354 | /* Unset POW bit after we woke up */ | |
355 | msr &= ~MSR_POW; | |
5deb8e7a | 356 | kvmppc_set_msr_fast(vcpu, msr); |
f05ed4d5 PM |
357 | } |
358 | } | |
359 | ||
c01e3f66 AG |
360 | if (kvmppc_is_split_real(vcpu)) |
361 | kvmppc_fixup_split_real(vcpu); | |
362 | else | |
363 | kvmppc_unfixup_split_real(vcpu); | |
364 | ||
5deb8e7a | 365 | if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) != |
f05ed4d5 PM |
366 | (old_msr & (MSR_PR|MSR_IR|MSR_DR))) { |
367 | kvmppc_mmu_flush_segments(vcpu); | |
368 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
369 | ||
370 | /* Preload magic page segment when in kernel mode */ | |
371 | if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) { | |
372 | struct kvm_vcpu_arch *a = &vcpu->arch; | |
373 | ||
374 | if (msr & MSR_DR) | |
375 | kvmppc_mmu_map_segment(vcpu, a->magic_page_ea); | |
376 | else | |
377 | kvmppc_mmu_map_segment(vcpu, a->magic_page_pa); | |
378 | } | |
379 | } | |
380 | ||
bbcc9c06 BH |
381 | /* |
382 | * When switching from 32 to 64-bit, we may have a stale 32-bit | |
383 | * magic page around, we need to flush it. Typically 32-bit magic | |
384 | * page will be instanciated when calling into RTAS. Note: We | |
385 | * assume that such transition only happens while in kernel mode, | |
386 | * ie, we never transition from user 32-bit to kernel 64-bit with | |
387 | * a 32-bit magic page around. | |
388 | */ | |
389 | if (vcpu->arch.magic_page_pa && | |
390 | !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) { | |
391 | /* going from RTAS to normal kernel code */ | |
392 | kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa, | |
393 | ~0xFFFUL); | |
394 | } | |
395 | ||
f05ed4d5 | 396 | /* Preload FPU if it's enabled */ |
5deb8e7a | 397 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 PM |
398 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
399 | } | |
400 | ||
3a167bea | 401 | void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr) |
f05ed4d5 PM |
402 | { |
403 | u32 host_pvr; | |
404 | ||
405 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB; | |
406 | vcpu->arch.pvr = pvr; | |
407 | #ifdef CONFIG_PPC_BOOK3S_64 | |
408 | if ((pvr >= 0x330000) && (pvr < 0x70330000)) { | |
409 | kvmppc_mmu_book3s_64_init(vcpu); | |
1022fc3d AG |
410 | if (!to_book3s(vcpu)->hior_explicit) |
411 | to_book3s(vcpu)->hior = 0xfff00000; | |
f05ed4d5 | 412 | to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL; |
af8f38b3 | 413 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
f05ed4d5 PM |
414 | } else |
415 | #endif | |
416 | { | |
417 | kvmppc_mmu_book3s_32_init(vcpu); | |
1022fc3d AG |
418 | if (!to_book3s(vcpu)->hior_explicit) |
419 | to_book3s(vcpu)->hior = 0; | |
f05ed4d5 | 420 | to_book3s(vcpu)->msr_mask = 0xffffffffULL; |
af8f38b3 | 421 | vcpu->arch.cpu_type = KVM_CPU_3S_32; |
f05ed4d5 PM |
422 | } |
423 | ||
af8f38b3 AG |
424 | kvmppc_sanity_check(vcpu); |
425 | ||
f05ed4d5 PM |
426 | /* If we are in hypervisor level on 970, we can tell the CPU to |
427 | * treat DCBZ as 32 bytes store */ | |
428 | vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32; | |
429 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) && | |
430 | !strcmp(cur_cpu_spec->platform, "ppc970")) | |
431 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
432 | ||
433 | /* Cell performs badly if MSR_FEx are set. So let's hope nobody | |
434 | really needs them in a VM on Cell and force disable them. */ | |
435 | if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be")) | |
436 | to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1); | |
437 | ||
a4a0f252 PM |
438 | /* |
439 | * If they're asking for POWER6 or later, set the flag | |
440 | * indicating that we can do multiple large page sizes | |
441 | * and 1TB segments. | |
442 | * Also set the flag that indicates that tlbie has the large | |
443 | * page bit in the RB operand instead of the instruction. | |
444 | */ | |
445 | switch (PVR_VER(pvr)) { | |
446 | case PVR_POWER6: | |
447 | case PVR_POWER7: | |
448 | case PVR_POWER7p: | |
449 | case PVR_POWER8: | |
450 | vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE | | |
451 | BOOK3S_HFLAG_NEW_TLBIE; | |
452 | break; | |
453 | } | |
454 | ||
f05ed4d5 PM |
455 | #ifdef CONFIG_PPC_BOOK3S_32 |
456 | /* 32 bit Book3S always has 32 byte dcbz */ | |
457 | vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32; | |
458 | #endif | |
459 | ||
460 | /* On some CPUs we can execute paired single operations natively */ | |
461 | asm ( "mfpvr %0" : "=r"(host_pvr)); | |
462 | switch (host_pvr) { | |
463 | case 0x00080200: /* lonestar 2.0 */ | |
464 | case 0x00088202: /* lonestar 2.2 */ | |
465 | case 0x70000100: /* gekko 1.0 */ | |
466 | case 0x00080100: /* gekko 2.0 */ | |
467 | case 0x00083203: /* gekko 2.3a */ | |
468 | case 0x00083213: /* gekko 2.3b */ | |
469 | case 0x00083204: /* gekko 2.4 */ | |
470 | case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */ | |
471 | case 0x00087200: /* broadway */ | |
472 | vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS; | |
473 | /* Enable HID2.PSE - in case we need it later */ | |
474 | mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29)); | |
475 | } | |
476 | } | |
477 | ||
478 | /* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To | |
479 | * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to | |
480 | * emulate 32 bytes dcbz length. | |
481 | * | |
482 | * The Book3s_64 inventors also realized this case and implemented a special bit | |
483 | * in the HID5 register, which is a hypervisor ressource. Thus we can't use it. | |
484 | * | |
485 | * My approach here is to patch the dcbz instruction on executing pages. | |
486 | */ | |
487 | static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) | |
488 | { | |
489 | struct page *hpage; | |
490 | u64 hpage_offset; | |
491 | u32 *page; | |
492 | int i; | |
493 | ||
494 | hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT); | |
32cad84f | 495 | if (is_error_page(hpage)) |
f05ed4d5 | 496 | return; |
f05ed4d5 PM |
497 | |
498 | hpage_offset = pte->raddr & ~PAGE_MASK; | |
499 | hpage_offset &= ~0xFFFULL; | |
500 | hpage_offset /= 4; | |
501 | ||
502 | get_page(hpage); | |
2480b208 | 503 | page = kmap_atomic(hpage); |
f05ed4d5 PM |
504 | |
505 | /* patch dcbz into reserved instruction, so we trap */ | |
506 | for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++) | |
cd087eef AG |
507 | if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ) |
508 | page[i] &= cpu_to_be32(0xfffffff7); | |
f05ed4d5 | 509 | |
2480b208 | 510 | kunmap_atomic(page); |
f05ed4d5 PM |
511 | put_page(hpage); |
512 | } | |
513 | ||
89b68c96 | 514 | static int kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa) |
f05ed4d5 PM |
515 | { |
516 | ulong mp_pa = vcpu->arch.magic_page_pa; | |
517 | ||
5deb8e7a | 518 | if (!(kvmppc_get_msr(vcpu) & MSR_SF)) |
bbcc9c06 BH |
519 | mp_pa = (uint32_t)mp_pa; |
520 | ||
89b68c96 AG |
521 | gpa &= ~0xFFFULL; |
522 | if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) { | |
f05ed4d5 PM |
523 | return 1; |
524 | } | |
525 | ||
89b68c96 | 526 | return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT); |
f05ed4d5 PM |
527 | } |
528 | ||
529 | int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
530 | ulong eaddr, int vec) | |
531 | { | |
532 | bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE); | |
93b159b4 | 533 | bool iswrite = false; |
f05ed4d5 PM |
534 | int r = RESUME_GUEST; |
535 | int relocated; | |
536 | int page_found = 0; | |
537 | struct kvmppc_pte pte; | |
538 | bool is_mmio = false; | |
5deb8e7a AG |
539 | bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false; |
540 | bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false; | |
f05ed4d5 PM |
541 | u64 vsid; |
542 | ||
543 | relocated = data ? dr : ir; | |
93b159b4 PM |
544 | if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE)) |
545 | iswrite = true; | |
f05ed4d5 PM |
546 | |
547 | /* Resolve real address if translation turned on */ | |
548 | if (relocated) { | |
93b159b4 | 549 | page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite); |
f05ed4d5 PM |
550 | } else { |
551 | pte.may_execute = true; | |
552 | pte.may_read = true; | |
553 | pte.may_write = true; | |
554 | pte.raddr = eaddr & KVM_PAM; | |
555 | pte.eaddr = eaddr; | |
556 | pte.vpage = eaddr >> 12; | |
c9029c34 | 557 | pte.page_size = MMU_PAGE_64K; |
f05ed4d5 PM |
558 | } |
559 | ||
5deb8e7a | 560 | switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) { |
f05ed4d5 PM |
561 | case 0: |
562 | pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12)); | |
563 | break; | |
564 | case MSR_DR: | |
c01e3f66 AG |
565 | if (!data && |
566 | (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) && | |
567 | ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)) | |
568 | pte.raddr &= ~SPLIT_HACK_MASK; | |
569 | /* fall through */ | |
f05ed4d5 PM |
570 | case MSR_IR: |
571 | vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid); | |
572 | ||
5deb8e7a | 573 | if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR) |
f05ed4d5 PM |
574 | pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12)); |
575 | else | |
576 | pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12)); | |
577 | pte.vpage |= vsid; | |
578 | ||
579 | if (vsid == -1) | |
580 | page_found = -EINVAL; | |
581 | break; | |
582 | } | |
583 | ||
584 | if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
585 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
586 | /* | |
587 | * If we do the dcbz hack, we have to NX on every execution, | |
588 | * so we can patch the executing code. This renders our guest | |
589 | * NX-less. | |
590 | */ | |
591 | pte.may_execute = !data; | |
592 | } | |
593 | ||
594 | if (page_found == -ENOENT) { | |
595 | /* Page not found in guest PTE entries */ | |
5deb8e7a AG |
596 | u64 ssrr1 = vcpu->arch.shadow_srr1; |
597 | u64 msr = kvmppc_get_msr(vcpu); | |
598 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | |
599 | kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr); | |
600 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | |
f05ed4d5 PM |
601 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
602 | } else if (page_found == -EPERM) { | |
603 | /* Storage protection */ | |
5deb8e7a AG |
604 | u32 dsisr = vcpu->arch.fault_dsisr; |
605 | u64 ssrr1 = vcpu->arch.shadow_srr1; | |
606 | u64 msr = kvmppc_get_msr(vcpu); | |
607 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); | |
608 | dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT; | |
609 | kvmppc_set_dsisr(vcpu, dsisr); | |
610 | kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL)); | |
f05ed4d5 PM |
611 | kvmppc_book3s_queue_irqprio(vcpu, vec); |
612 | } else if (page_found == -EINVAL) { | |
613 | /* Page not found in guest SLB */ | |
5deb8e7a | 614 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 PM |
615 | kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80); |
616 | } else if (!is_mmio && | |
89b68c96 | 617 | kvmppc_visible_gpa(vcpu, pte.raddr)) { |
93b159b4 PM |
618 | if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) { |
619 | /* | |
620 | * There is already a host HPTE there, presumably | |
621 | * a read-only one for a page the guest thinks | |
622 | * is writable, so get rid of it first. | |
623 | */ | |
624 | kvmppc_mmu_unmap_page(vcpu, &pte); | |
625 | } | |
f05ed4d5 | 626 | /* The guest's PTE is not mapped yet. Map on the host */ |
93b159b4 | 627 | kvmppc_mmu_map_page(vcpu, &pte, iswrite); |
f05ed4d5 PM |
628 | if (data) |
629 | vcpu->stat.sp_storage++; | |
630 | else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
93b159b4 | 631 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) |
f05ed4d5 PM |
632 | kvmppc_patch_dcbz(vcpu, &pte); |
633 | } else { | |
634 | /* MMIO */ | |
635 | vcpu->stat.mmio_exits++; | |
636 | vcpu->arch.paddr_accessed = pte.raddr; | |
6020c0f6 | 637 | vcpu->arch.vaddr_accessed = pte.eaddr; |
f05ed4d5 PM |
638 | r = kvmppc_emulate_mmio(run, vcpu); |
639 | if ( r == RESUME_HOST_NV ) | |
640 | r = RESUME_HOST; | |
641 | } | |
642 | ||
643 | return r; | |
644 | } | |
645 | ||
646 | static inline int get_fpr_index(int i) | |
647 | { | |
28c483b6 | 648 | return i * TS_FPRWIDTH; |
f05ed4d5 PM |
649 | } |
650 | ||
651 | /* Give up external provider (FPU, Altivec, VSX) */ | |
652 | void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr) | |
653 | { | |
654 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 | 655 | |
28c483b6 PM |
656 | /* |
657 | * VSX instructions can access FP and vector registers, so if | |
658 | * we are giving up VSX, make sure we give up FP and VMX as well. | |
659 | */ | |
660 | if (msr & MSR_VSX) | |
661 | msr |= MSR_FP | MSR_VEC; | |
662 | ||
663 | msr &= vcpu->arch.guest_owned_ext; | |
664 | if (!msr) | |
f05ed4d5 PM |
665 | return; |
666 | ||
667 | #ifdef DEBUG_EXT | |
668 | printk(KERN_INFO "Giving up ext 0x%lx\n", msr); | |
669 | #endif | |
670 | ||
28c483b6 PM |
671 | if (msr & MSR_FP) { |
672 | /* | |
673 | * Note that on CPUs with VSX, giveup_fpu stores | |
674 | * both the traditional FP registers and the added VSX | |
de79f7b9 | 675 | * registers into thread.fp_state.fpr[]. |
28c483b6 | 676 | */ |
99dae3ba | 677 | if (t->regs->msr & MSR_FP) |
9d1ffdd8 | 678 | giveup_fpu(current); |
99dae3ba | 679 | t->fp_save_area = NULL; |
28c483b6 PM |
680 | } |
681 | ||
f05ed4d5 | 682 | #ifdef CONFIG_ALTIVEC |
28c483b6 | 683 | if (msr & MSR_VEC) { |
9d1ffdd8 PM |
684 | if (current->thread.regs->msr & MSR_VEC) |
685 | giveup_altivec(current); | |
99dae3ba | 686 | t->vr_save_area = NULL; |
f05ed4d5 | 687 | } |
28c483b6 | 688 | #endif |
f05ed4d5 | 689 | |
28c483b6 | 690 | vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX); |
f05ed4d5 PM |
691 | kvmppc_recalc_shadow_msr(vcpu); |
692 | } | |
693 | ||
616dff86 AG |
694 | /* Give up facility (TAR / EBB / DSCR) */ |
695 | static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac) | |
696 | { | |
697 | #ifdef CONFIG_PPC_BOOK3S_64 | |
698 | if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) { | |
699 | /* Facility not available to the guest, ignore giveup request*/ | |
700 | return; | |
701 | } | |
e14e7a1e AG |
702 | |
703 | switch (fac) { | |
704 | case FSCR_TAR_LG: | |
705 | vcpu->arch.tar = mfspr(SPRN_TAR); | |
706 | mtspr(SPRN_TAR, current->thread.tar); | |
707 | vcpu->arch.shadow_fscr &= ~FSCR_TAR; | |
708 | break; | |
709 | } | |
616dff86 AG |
710 | #endif |
711 | } | |
712 | ||
f05ed4d5 PM |
713 | /* Handle external providers (FPU, Altivec, VSX) */ |
714 | static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr, | |
715 | ulong msr) | |
716 | { | |
717 | struct thread_struct *t = ¤t->thread; | |
f05ed4d5 PM |
718 | |
719 | /* When we have paired singles, we emulate in software */ | |
720 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) | |
721 | return RESUME_GUEST; | |
722 | ||
5deb8e7a | 723 | if (!(kvmppc_get_msr(vcpu) & msr)) { |
f05ed4d5 PM |
724 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
725 | return RESUME_GUEST; | |
726 | } | |
727 | ||
28c483b6 PM |
728 | if (msr == MSR_VSX) { |
729 | /* No VSX? Give an illegal instruction interrupt */ | |
730 | #ifdef CONFIG_VSX | |
731 | if (!cpu_has_feature(CPU_FTR_VSX)) | |
732 | #endif | |
733 | { | |
734 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
735 | return RESUME_GUEST; | |
736 | } | |
737 | ||
738 | /* | |
739 | * We have to load up all the FP and VMX registers before | |
740 | * we can let the guest use VSX instructions. | |
741 | */ | |
742 | msr = MSR_FP | MSR_VEC | MSR_VSX; | |
f05ed4d5 PM |
743 | } |
744 | ||
28c483b6 PM |
745 | /* See if we already own all the ext(s) needed */ |
746 | msr &= ~vcpu->arch.guest_owned_ext; | |
747 | if (!msr) | |
748 | return RESUME_GUEST; | |
749 | ||
f05ed4d5 PM |
750 | #ifdef DEBUG_EXT |
751 | printk(KERN_INFO "Loading up ext 0x%lx\n", msr); | |
752 | #endif | |
753 | ||
28c483b6 | 754 | if (msr & MSR_FP) { |
7562c4fd | 755 | preempt_disable(); |
09548fda | 756 | enable_kernel_fp(); |
99dae3ba PM |
757 | load_fp_state(&vcpu->arch.fp); |
758 | t->fp_save_area = &vcpu->arch.fp; | |
7562c4fd | 759 | preempt_enable(); |
28c483b6 PM |
760 | } |
761 | ||
762 | if (msr & MSR_VEC) { | |
f05ed4d5 | 763 | #ifdef CONFIG_ALTIVEC |
7562c4fd | 764 | preempt_disable(); |
09548fda | 765 | enable_kernel_altivec(); |
99dae3ba PM |
766 | load_vr_state(&vcpu->arch.vr); |
767 | t->vr_save_area = &vcpu->arch.vr; | |
7562c4fd | 768 | preempt_enable(); |
f05ed4d5 | 769 | #endif |
f05ed4d5 PM |
770 | } |
771 | ||
99dae3ba | 772 | t->regs->msr |= msr; |
f05ed4d5 | 773 | vcpu->arch.guest_owned_ext |= msr; |
f05ed4d5 PM |
774 | kvmppc_recalc_shadow_msr(vcpu); |
775 | ||
776 | return RESUME_GUEST; | |
777 | } | |
778 | ||
9d1ffdd8 PM |
779 | /* |
780 | * Kernel code using FP or VMX could have flushed guest state to | |
781 | * the thread_struct; if so, get it back now. | |
782 | */ | |
783 | static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu) | |
784 | { | |
785 | unsigned long lost_ext; | |
786 | ||
787 | lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr; | |
788 | if (!lost_ext) | |
789 | return; | |
790 | ||
09548fda | 791 | if (lost_ext & MSR_FP) { |
7562c4fd | 792 | preempt_disable(); |
09548fda | 793 | enable_kernel_fp(); |
99dae3ba | 794 | load_fp_state(&vcpu->arch.fp); |
7562c4fd | 795 | preempt_enable(); |
09548fda | 796 | } |
f2481771 | 797 | #ifdef CONFIG_ALTIVEC |
09548fda | 798 | if (lost_ext & MSR_VEC) { |
7562c4fd | 799 | preempt_disable(); |
09548fda | 800 | enable_kernel_altivec(); |
99dae3ba | 801 | load_vr_state(&vcpu->arch.vr); |
7562c4fd | 802 | preempt_enable(); |
09548fda | 803 | } |
f2481771 | 804 | #endif |
9d1ffdd8 PM |
805 | current->thread.regs->msr |= lost_ext; |
806 | } | |
807 | ||
616dff86 AG |
808 | #ifdef CONFIG_PPC_BOOK3S_64 |
809 | ||
810 | static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac) | |
811 | { | |
812 | /* Inject the Interrupt Cause field and trigger a guest interrupt */ | |
813 | vcpu->arch.fscr &= ~(0xffULL << 56); | |
814 | vcpu->arch.fscr |= (fac << 56); | |
815 | kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL); | |
816 | } | |
817 | ||
818 | static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac) | |
819 | { | |
820 | enum emulation_result er = EMULATE_FAIL; | |
821 | ||
822 | if (!(kvmppc_get_msr(vcpu) & MSR_PR)) | |
823 | er = kvmppc_emulate_instruction(vcpu->run, vcpu); | |
824 | ||
825 | if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) { | |
826 | /* Couldn't emulate, trigger interrupt in guest */ | |
827 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
828 | } | |
829 | } | |
830 | ||
831 | /* Enable facilities (TAR, EBB, DSCR) for the guest */ | |
832 | static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac) | |
833 | { | |
9916d57e | 834 | bool guest_fac_enabled; |
616dff86 AG |
835 | BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S)); |
836 | ||
9916d57e AG |
837 | /* |
838 | * Not every facility is enabled by FSCR bits, check whether the | |
839 | * guest has this facility enabled at all. | |
840 | */ | |
841 | switch (fac) { | |
842 | case FSCR_TAR_LG: | |
843 | case FSCR_EBB_LG: | |
844 | guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac)); | |
845 | break; | |
846 | case FSCR_TM_LG: | |
847 | guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM; | |
848 | break; | |
849 | default: | |
850 | guest_fac_enabled = false; | |
851 | break; | |
852 | } | |
853 | ||
854 | if (!guest_fac_enabled) { | |
616dff86 AG |
855 | /* Facility not enabled by the guest */ |
856 | kvmppc_trigger_fac_interrupt(vcpu, fac); | |
857 | return RESUME_GUEST; | |
858 | } | |
859 | ||
860 | switch (fac) { | |
e14e7a1e AG |
861 | case FSCR_TAR_LG: |
862 | /* TAR switching isn't lazy in Linux yet */ | |
863 | current->thread.tar = mfspr(SPRN_TAR); | |
864 | mtspr(SPRN_TAR, vcpu->arch.tar); | |
865 | vcpu->arch.shadow_fscr |= FSCR_TAR; | |
866 | break; | |
616dff86 AG |
867 | default: |
868 | kvmppc_emulate_fac(vcpu, fac); | |
869 | break; | |
870 | } | |
871 | ||
872 | return RESUME_GUEST; | |
873 | } | |
874 | #endif | |
875 | ||
3a167bea AK |
876 | int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu, |
877 | unsigned int exit_nr) | |
f05ed4d5 PM |
878 | { |
879 | int r = RESUME_HOST; | |
7ee78855 | 880 | int s; |
f05ed4d5 PM |
881 | |
882 | vcpu->stat.sum_exits++; | |
883 | ||
884 | run->exit_reason = KVM_EXIT_UNKNOWN; | |
885 | run->ready_for_interrupt_injection = 1; | |
886 | ||
bd2be683 | 887 | /* We get here with MSR.EE=1 */ |
3b1d9d7d | 888 | |
97c95059 | 889 | trace_kvm_exit(exit_nr, vcpu); |
706fb730 | 890 | kvm_guest_exit(); |
c63ddcb4 | 891 | |
f05ed4d5 PM |
892 | switch (exit_nr) { |
893 | case BOOK3S_INTERRUPT_INST_STORAGE: | |
468a12c2 | 894 | { |
a2d56020 | 895 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
896 | vcpu->stat.pf_instruc++; |
897 | ||
c01e3f66 AG |
898 | if (kvmppc_is_split_real(vcpu)) |
899 | kvmppc_fixup_split_real(vcpu); | |
900 | ||
f05ed4d5 PM |
901 | #ifdef CONFIG_PPC_BOOK3S_32 |
902 | /* We set segments as unused segments when invalidating them. So | |
903 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
904 | { |
905 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
906 | u32 sr; | |
907 | ||
908 | svcpu = svcpu_get(vcpu); | |
909 | sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT]; | |
468a12c2 | 910 | svcpu_put(svcpu); |
a2d56020 PM |
911 | if (sr == SR_INVALID) { |
912 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); | |
913 | r = RESUME_GUEST; | |
914 | break; | |
915 | } | |
f05ed4d5 PM |
916 | } |
917 | #endif | |
918 | ||
919 | /* only care about PTEG not found errors, but leave NX alone */ | |
468a12c2 | 920 | if (shadow_srr1 & 0x40000000) { |
93b159b4 | 921 | int idx = srcu_read_lock(&vcpu->kvm->srcu); |
f05ed4d5 | 922 | r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr); |
93b159b4 | 923 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 PM |
924 | vcpu->stat.sp_instruc++; |
925 | } else if (vcpu->arch.mmu.is_dcbz32(vcpu) && | |
926 | (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) { | |
927 | /* | |
928 | * XXX If we do the dcbz hack we use the NX bit to flush&patch the page, | |
929 | * so we can't use the NX bit inside the guest. Let's cross our fingers, | |
930 | * that no guest that needs the dcbz hack does NX. | |
931 | */ | |
932 | kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL); | |
933 | r = RESUME_GUEST; | |
934 | } else { | |
5deb8e7a AG |
935 | u64 msr = kvmppc_get_msr(vcpu); |
936 | msr |= shadow_srr1 & 0x58000000; | |
937 | kvmppc_set_msr_fast(vcpu, msr); | |
f05ed4d5 PM |
938 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
939 | r = RESUME_GUEST; | |
940 | } | |
941 | break; | |
468a12c2 | 942 | } |
f05ed4d5 PM |
943 | case BOOK3S_INTERRUPT_DATA_STORAGE: |
944 | { | |
945 | ulong dar = kvmppc_get_fault_dar(vcpu); | |
a2d56020 | 946 | u32 fault_dsisr = vcpu->arch.fault_dsisr; |
f05ed4d5 PM |
947 | vcpu->stat.pf_storage++; |
948 | ||
949 | #ifdef CONFIG_PPC_BOOK3S_32 | |
950 | /* We set segments as unused segments when invalidating them. So | |
951 | * treat the respective fault as segment fault. */ | |
a2d56020 PM |
952 | { |
953 | struct kvmppc_book3s_shadow_vcpu *svcpu; | |
954 | u32 sr; | |
955 | ||
956 | svcpu = svcpu_get(vcpu); | |
957 | sr = svcpu->sr[dar >> SID_SHIFT]; | |
468a12c2 | 958 | svcpu_put(svcpu); |
a2d56020 PM |
959 | if (sr == SR_INVALID) { |
960 | kvmppc_mmu_map_segment(vcpu, dar); | |
961 | r = RESUME_GUEST; | |
962 | break; | |
963 | } | |
f05ed4d5 PM |
964 | } |
965 | #endif | |
966 | ||
93b159b4 PM |
967 | /* |
968 | * We need to handle missing shadow PTEs, and | |
969 | * protection faults due to us mapping a page read-only | |
970 | * when the guest thinks it is writable. | |
971 | */ | |
972 | if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) { | |
973 | int idx = srcu_read_lock(&vcpu->kvm->srcu); | |
f05ed4d5 | 974 | r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr); |
93b159b4 | 975 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
f05ed4d5 | 976 | } else { |
5deb8e7a AG |
977 | kvmppc_set_dar(vcpu, dar); |
978 | kvmppc_set_dsisr(vcpu, fault_dsisr); | |
f05ed4d5 PM |
979 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
980 | r = RESUME_GUEST; | |
981 | } | |
982 | break; | |
983 | } | |
984 | case BOOK3S_INTERRUPT_DATA_SEGMENT: | |
985 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) { | |
5deb8e7a | 986 | kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu)); |
f05ed4d5 PM |
987 | kvmppc_book3s_queue_irqprio(vcpu, |
988 | BOOK3S_INTERRUPT_DATA_SEGMENT); | |
989 | } | |
990 | r = RESUME_GUEST; | |
991 | break; | |
992 | case BOOK3S_INTERRUPT_INST_SEGMENT: | |
993 | if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) { | |
994 | kvmppc_book3s_queue_irqprio(vcpu, | |
995 | BOOK3S_INTERRUPT_INST_SEGMENT); | |
996 | } | |
997 | r = RESUME_GUEST; | |
998 | break; | |
999 | /* We're good on these - the host merely wanted to get our attention */ | |
1000 | case BOOK3S_INTERRUPT_DECREMENTER: | |
4f225ae0 | 1001 | case BOOK3S_INTERRUPT_HV_DECREMENTER: |
40688909 | 1002 | case BOOK3S_INTERRUPT_DOORBELL: |
568fccc4 | 1003 | case BOOK3S_INTERRUPT_H_DOORBELL: |
f05ed4d5 PM |
1004 | vcpu->stat.dec_exits++; |
1005 | r = RESUME_GUEST; | |
1006 | break; | |
1007 | case BOOK3S_INTERRUPT_EXTERNAL: | |
4f225ae0 AG |
1008 | case BOOK3S_INTERRUPT_EXTERNAL_LEVEL: |
1009 | case BOOK3S_INTERRUPT_EXTERNAL_HV: | |
f05ed4d5 PM |
1010 | vcpu->stat.ext_intr_exits++; |
1011 | r = RESUME_GUEST; | |
1012 | break; | |
1013 | case BOOK3S_INTERRUPT_PERFMON: | |
1014 | r = RESUME_GUEST; | |
1015 | break; | |
1016 | case BOOK3S_INTERRUPT_PROGRAM: | |
4f225ae0 | 1017 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: |
f05ed4d5 PM |
1018 | { |
1019 | enum emulation_result er; | |
1020 | ulong flags; | |
51f04726 MC |
1021 | u32 last_inst; |
1022 | int emul; | |
f05ed4d5 PM |
1023 | |
1024 | program_interrupt: | |
a2d56020 | 1025 | flags = vcpu->arch.shadow_srr1 & 0x1f0000ull; |
f05ed4d5 | 1026 | |
51f04726 MC |
1027 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
1028 | if (emul != EMULATE_DONE) { | |
1029 | r = RESUME_GUEST; | |
1030 | break; | |
1031 | } | |
1032 | ||
5deb8e7a | 1033 | if (kvmppc_get_msr(vcpu) & MSR_PR) { |
f05ed4d5 | 1034 | #ifdef EXIT_DEBUG |
51f04726 MC |
1035 | pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n", |
1036 | kvmppc_get_pc(vcpu), last_inst); | |
f05ed4d5 | 1037 | #endif |
51f04726 | 1038 | if ((last_inst & 0xff0007ff) != |
f05ed4d5 PM |
1039 | (INS_DCBZ & 0xfffffff7)) { |
1040 | kvmppc_core_queue_program(vcpu, flags); | |
1041 | r = RESUME_GUEST; | |
1042 | break; | |
1043 | } | |
1044 | } | |
1045 | ||
1046 | vcpu->stat.emulated_inst_exits++; | |
1047 | er = kvmppc_emulate_instruction(run, vcpu); | |
1048 | switch (er) { | |
1049 | case EMULATE_DONE: | |
1050 | r = RESUME_GUEST_NV; | |
1051 | break; | |
1052 | case EMULATE_AGAIN: | |
1053 | r = RESUME_GUEST; | |
1054 | break; | |
1055 | case EMULATE_FAIL: | |
1056 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", | |
51f04726 | 1057 | __func__, kvmppc_get_pc(vcpu), last_inst); |
f05ed4d5 PM |
1058 | kvmppc_core_queue_program(vcpu, flags); |
1059 | r = RESUME_GUEST; | |
1060 | break; | |
1061 | case EMULATE_DO_MMIO: | |
1062 | run->exit_reason = KVM_EXIT_MMIO; | |
1063 | r = RESUME_HOST_NV; | |
1064 | break; | |
c402a3f4 | 1065 | case EMULATE_EXIT_USER: |
50c7bb80 AG |
1066 | r = RESUME_HOST_NV; |
1067 | break; | |
f05ed4d5 PM |
1068 | default: |
1069 | BUG(); | |
1070 | } | |
1071 | break; | |
1072 | } | |
1073 | case BOOK3S_INTERRUPT_SYSCALL: | |
51f04726 MC |
1074 | { |
1075 | u32 last_sc; | |
1076 | int emul; | |
1077 | ||
1078 | /* Get last sc for papr */ | |
1079 | if (vcpu->arch.papr_enabled) { | |
1080 | /* The sc instuction points SRR0 to the next inst */ | |
1081 | emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc); | |
1082 | if (emul != EMULATE_DONE) { | |
1083 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4); | |
1084 | r = RESUME_GUEST; | |
1085 | break; | |
1086 | } | |
1087 | } | |
1088 | ||
a668f2bd | 1089 | if (vcpu->arch.papr_enabled && |
51f04726 | 1090 | (last_sc == 0x44000022) && |
5deb8e7a | 1091 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
a668f2bd AG |
1092 | /* SC 1 papr hypercalls */ |
1093 | ulong cmd = kvmppc_get_gpr(vcpu, 3); | |
1094 | int i; | |
1095 | ||
2ba9f0d8 | 1096 | #ifdef CONFIG_PPC_BOOK3S_64 |
a668f2bd AG |
1097 | if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) { |
1098 | r = RESUME_GUEST; | |
1099 | break; | |
1100 | } | |
96f38d72 | 1101 | #endif |
a668f2bd AG |
1102 | |
1103 | run->papr_hcall.nr = cmd; | |
1104 | for (i = 0; i < 9; ++i) { | |
1105 | ulong gpr = kvmppc_get_gpr(vcpu, 4 + i); | |
1106 | run->papr_hcall.args[i] = gpr; | |
1107 | } | |
1108 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
1109 | vcpu->arch.hcall_needed = 1; | |
1110 | r = RESUME_HOST; | |
1111 | } else if (vcpu->arch.osi_enabled && | |
f05ed4d5 PM |
1112 | (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) && |
1113 | (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) { | |
1114 | /* MOL hypercalls */ | |
1115 | u64 *gprs = run->osi.gprs; | |
1116 | int i; | |
1117 | ||
1118 | run->exit_reason = KVM_EXIT_OSI; | |
1119 | for (i = 0; i < 32; i++) | |
1120 | gprs[i] = kvmppc_get_gpr(vcpu, i); | |
1121 | vcpu->arch.osi_needed = 1; | |
1122 | r = RESUME_HOST_NV; | |
5deb8e7a | 1123 | } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && |
f05ed4d5 PM |
1124 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { |
1125 | /* KVM PV hypercalls */ | |
1126 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
1127 | r = RESUME_GUEST; | |
1128 | } else { | |
1129 | /* Guest syscalls */ | |
1130 | vcpu->stat.syscall_exits++; | |
1131 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1132 | r = RESUME_GUEST; | |
1133 | } | |
1134 | break; | |
51f04726 | 1135 | } |
f05ed4d5 PM |
1136 | case BOOK3S_INTERRUPT_FP_UNAVAIL: |
1137 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1138 | case BOOK3S_INTERRUPT_VSX: | |
1139 | { | |
1140 | int ext_msr = 0; | |
9a26af64 | 1141 | int emul; |
9a26af64 MC |
1142 | u32 last_inst; |
1143 | ||
1144 | if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) { | |
1145 | /* Do paired single instruction emulation */ | |
51f04726 MC |
1146 | emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, |
1147 | &last_inst); | |
9a26af64 MC |
1148 | if (emul == EMULATE_DONE) |
1149 | goto program_interrupt; | |
1150 | else | |
1151 | r = RESUME_GUEST; | |
f05ed4d5 | 1152 | |
9a26af64 | 1153 | break; |
f05ed4d5 PM |
1154 | } |
1155 | ||
9a26af64 MC |
1156 | /* Enable external provider */ |
1157 | switch (exit_nr) { | |
1158 | case BOOK3S_INTERRUPT_FP_UNAVAIL: | |
1159 | ext_msr = MSR_FP; | |
f05ed4d5 | 1160 | break; |
9a26af64 MC |
1161 | |
1162 | case BOOK3S_INTERRUPT_ALTIVEC: | |
1163 | ext_msr = MSR_VEC; | |
f05ed4d5 | 1164 | break; |
9a26af64 MC |
1165 | |
1166 | case BOOK3S_INTERRUPT_VSX: | |
1167 | ext_msr = MSR_VSX; | |
f05ed4d5 PM |
1168 | break; |
1169 | } | |
9a26af64 MC |
1170 | |
1171 | r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr); | |
f05ed4d5 PM |
1172 | break; |
1173 | } | |
1174 | case BOOK3S_INTERRUPT_ALIGNMENT: | |
9a26af64 | 1175 | { |
51f04726 MC |
1176 | u32 last_inst; |
1177 | int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); | |
9a26af64 MC |
1178 | |
1179 | if (emul == EMULATE_DONE) { | |
5deb8e7a AG |
1180 | u32 dsisr; |
1181 | u64 dar; | |
1182 | ||
1183 | dsisr = kvmppc_alignment_dsisr(vcpu, last_inst); | |
1184 | dar = kvmppc_alignment_dar(vcpu, last_inst); | |
1185 | ||
1186 | kvmppc_set_dsisr(vcpu, dsisr); | |
1187 | kvmppc_set_dar(vcpu, dar); | |
1188 | ||
f05ed4d5 PM |
1189 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); |
1190 | } | |
1191 | r = RESUME_GUEST; | |
1192 | break; | |
9a26af64 | 1193 | } |
616dff86 AG |
1194 | #ifdef CONFIG_PPC_BOOK3S_64 |
1195 | case BOOK3S_INTERRUPT_FAC_UNAVAIL: | |
1196 | kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56); | |
1197 | r = RESUME_GUEST; | |
1198 | break; | |
1199 | #endif | |
f05ed4d5 PM |
1200 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
1201 | case BOOK3S_INTERRUPT_TRACE: | |
1202 | kvmppc_book3s_queue_irqprio(vcpu, exit_nr); | |
1203 | r = RESUME_GUEST; | |
1204 | break; | |
1205 | default: | |
468a12c2 | 1206 | { |
a2d56020 | 1207 | ulong shadow_srr1 = vcpu->arch.shadow_srr1; |
f05ed4d5 PM |
1208 | /* Ugh - bork here! What did we get? */ |
1209 | printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n", | |
468a12c2 | 1210 | exit_nr, kvmppc_get_pc(vcpu), shadow_srr1); |
f05ed4d5 PM |
1211 | r = RESUME_HOST; |
1212 | BUG(); | |
1213 | break; | |
1214 | } | |
468a12c2 | 1215 | } |
f05ed4d5 PM |
1216 | |
1217 | if (!(r & RESUME_HOST)) { | |
1218 | /* To avoid clobbering exit_reason, only check for signals if | |
1219 | * we aren't already exiting to userspace for some other | |
1220 | * reason. */ | |
e371f713 AG |
1221 | |
1222 | /* | |
1223 | * Interrupts could be timers for the guest which we have to | |
1224 | * inject again, so let's postpone them until we're in the guest | |
1225 | * and if we really did time things so badly, then we just exit | |
1226 | * again due to a host external interrupt. | |
1227 | */ | |
7ee78855 | 1228 | s = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1229 | if (s <= 0) |
7ee78855 | 1230 | r = s; |
6c85f52b SW |
1231 | else { |
1232 | /* interrupts now hard-disabled */ | |
5f1c248f | 1233 | kvmppc_fix_ee_before_entry(); |
f05ed4d5 | 1234 | } |
6c85f52b | 1235 | |
9d1ffdd8 | 1236 | kvmppc_handle_lost_ext(vcpu); |
f05ed4d5 PM |
1237 | } |
1238 | ||
1239 | trace_kvm_book3s_reenter(r, vcpu); | |
1240 | ||
1241 | return r; | |
1242 | } | |
1243 | ||
3a167bea AK |
1244 | static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu, |
1245 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1246 | { |
1247 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1248 | int i; | |
1249 | ||
1250 | sregs->pvr = vcpu->arch.pvr; | |
1251 | ||
1252 | sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1; | |
1253 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1254 | for (i = 0; i < 64; i++) { | |
1255 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i; | |
1256 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
1257 | } | |
1258 | } else { | |
1259 | for (i = 0; i < 16; i++) | |
5deb8e7a | 1260 | sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i); |
f05ed4d5 PM |
1261 | |
1262 | for (i = 0; i < 8; i++) { | |
1263 | sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw; | |
1264 | sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw; | |
1265 | } | |
1266 | } | |
1267 | ||
1268 | return 0; | |
1269 | } | |
1270 | ||
3a167bea AK |
1271 | static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu, |
1272 | struct kvm_sregs *sregs) | |
f05ed4d5 PM |
1273 | { |
1274 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
1275 | int i; | |
1276 | ||
3a167bea | 1277 | kvmppc_set_pvr_pr(vcpu, sregs->pvr); |
f05ed4d5 PM |
1278 | |
1279 | vcpu3s->sdr1 = sregs->u.s.sdr1; | |
1280 | if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) { | |
1281 | for (i = 0; i < 64; i++) { | |
1282 | vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv, | |
1283 | sregs->u.s.ppc64.slb[i].slbe); | |
1284 | } | |
1285 | } else { | |
1286 | for (i = 0; i < 16; i++) { | |
1287 | vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]); | |
1288 | } | |
1289 | for (i = 0; i < 8; i++) { | |
1290 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false, | |
1291 | (u32)sregs->u.s.ppc32.ibat[i]); | |
1292 | kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true, | |
1293 | (u32)(sregs->u.s.ppc32.ibat[i] >> 32)); | |
1294 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false, | |
1295 | (u32)sregs->u.s.ppc32.dbat[i]); | |
1296 | kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true, | |
1297 | (u32)(sregs->u.s.ppc32.dbat[i] >> 32)); | |
1298 | } | |
1299 | } | |
1300 | ||
1301 | /* Flush the MMU after messing with the segments */ | |
1302 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
1303 | ||
1304 | return 0; | |
1305 | } | |
1306 | ||
3a167bea AK |
1307 | static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1308 | union kvmppc_one_reg *val) | |
31f3438e | 1309 | { |
a136a8bd | 1310 | int r = 0; |
31f3438e | 1311 | |
a136a8bd | 1312 | switch (id) { |
31f3438e | 1313 | case KVM_REG_PPC_HIOR: |
a136a8bd | 1314 | *val = get_reg_val(id, to_book3s(vcpu)->hior); |
31f3438e | 1315 | break; |
e5ee5422 | 1316 | case KVM_REG_PPC_LPCR: |
a0840240 | 1317 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1318 | /* |
1319 | * We are only interested in the LPCR_ILE bit | |
1320 | */ | |
1321 | if (vcpu->arch.intr_msr & MSR_LE) | |
1322 | *val = get_reg_val(id, LPCR_ILE); | |
1323 | else | |
1324 | *val = get_reg_val(id, 0); | |
1325 | break; | |
31f3438e | 1326 | default: |
a136a8bd | 1327 | r = -EINVAL; |
31f3438e PM |
1328 | break; |
1329 | } | |
1330 | ||
1331 | return r; | |
1332 | } | |
1333 | ||
e5ee5422 AK |
1334 | static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr) |
1335 | { | |
1336 | if (new_lpcr & LPCR_ILE) | |
1337 | vcpu->arch.intr_msr |= MSR_LE; | |
1338 | else | |
1339 | vcpu->arch.intr_msr &= ~MSR_LE; | |
1340 | } | |
1341 | ||
3a167bea AK |
1342 | static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id, |
1343 | union kvmppc_one_reg *val) | |
31f3438e | 1344 | { |
a136a8bd | 1345 | int r = 0; |
31f3438e | 1346 | |
a136a8bd | 1347 | switch (id) { |
31f3438e | 1348 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
1349 | to_book3s(vcpu)->hior = set_reg_val(id, *val); |
1350 | to_book3s(vcpu)->hior_explicit = true; | |
31f3438e | 1351 | break; |
e5ee5422 | 1352 | case KVM_REG_PPC_LPCR: |
a0840240 | 1353 | case KVM_REG_PPC_LPCR_64: |
e5ee5422 AK |
1354 | kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val)); |
1355 | break; | |
31f3438e | 1356 | default: |
a136a8bd | 1357 | r = -EINVAL; |
31f3438e PM |
1358 | break; |
1359 | } | |
1360 | ||
1361 | return r; | |
1362 | } | |
1363 | ||
3a167bea AK |
1364 | static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm, |
1365 | unsigned int id) | |
f05ed4d5 PM |
1366 | { |
1367 | struct kvmppc_vcpu_book3s *vcpu_book3s; | |
1368 | struct kvm_vcpu *vcpu; | |
1369 | int err = -ENOMEM; | |
1370 | unsigned long p; | |
1371 | ||
3ff95502 PM |
1372 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
1373 | if (!vcpu) | |
f05ed4d5 PM |
1374 | goto out; |
1375 | ||
f05ed4d5 PM |
1376 | vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s)); |
1377 | if (!vcpu_book3s) | |
f05ed4d5 | 1378 | goto free_vcpu; |
3ff95502 | 1379 | vcpu->arch.book3s = vcpu_book3s; |
f05ed4d5 | 1380 | |
ab78475c | 1381 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1382 | vcpu->arch.shadow_vcpu = |
1383 | kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL); | |
1384 | if (!vcpu->arch.shadow_vcpu) | |
1385 | goto free_vcpu3s; | |
a2d56020 | 1386 | #endif |
f05ed4d5 | 1387 | |
f05ed4d5 PM |
1388 | err = kvm_vcpu_init(vcpu, kvm, id); |
1389 | if (err) | |
1390 | goto free_shadow_vcpu; | |
1391 | ||
7c7b406e | 1392 | err = -ENOMEM; |
f05ed4d5 | 1393 | p = __get_free_page(GFP_KERNEL|__GFP_ZERO); |
f05ed4d5 PM |
1394 | if (!p) |
1395 | goto uninit_vcpu; | |
89b68c96 | 1396 | vcpu->arch.shared = (void *)p; |
f05ed4d5 | 1397 | #ifdef CONFIG_PPC_BOOK3S_64 |
5deb8e7a AG |
1398 | /* Always start the shared struct in native endian mode */ |
1399 | #ifdef __BIG_ENDIAN__ | |
1400 | vcpu->arch.shared_big_endian = true; | |
1401 | #else | |
1402 | vcpu->arch.shared_big_endian = false; | |
1403 | #endif | |
1404 | ||
a4a0f252 PM |
1405 | /* |
1406 | * Default to the same as the host if we're on sufficiently | |
1407 | * recent machine that we have 1TB segments; | |
1408 | * otherwise default to PPC970FX. | |
1409 | */ | |
f05ed4d5 | 1410 | vcpu->arch.pvr = 0x3C0301; |
a4a0f252 PM |
1411 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) |
1412 | vcpu->arch.pvr = mfspr(SPRN_PVR); | |
e5ee5422 | 1413 | vcpu->arch.intr_msr = MSR_SF; |
f05ed4d5 PM |
1414 | #else |
1415 | /* default to book3s_32 (750) */ | |
1416 | vcpu->arch.pvr = 0x84202; | |
1417 | #endif | |
3a167bea | 1418 | kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr); |
f05ed4d5 PM |
1419 | vcpu->arch.slb_nr = 64; |
1420 | ||
94810ba4 | 1421 | vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE; |
f05ed4d5 PM |
1422 | |
1423 | err = kvmppc_mmu_init(vcpu); | |
1424 | if (err < 0) | |
1425 | goto uninit_vcpu; | |
1426 | ||
1427 | return vcpu; | |
1428 | ||
1429 | uninit_vcpu: | |
1430 | kvm_vcpu_uninit(vcpu); | |
1431 | free_shadow_vcpu: | |
ab78475c | 1432 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1433 | kfree(vcpu->arch.shadow_vcpu); |
1434 | free_vcpu3s: | |
a2d56020 | 1435 | #endif |
f05ed4d5 | 1436 | vfree(vcpu_book3s); |
3ff95502 PM |
1437 | free_vcpu: |
1438 | kmem_cache_free(kvm_vcpu_cache, vcpu); | |
f05ed4d5 PM |
1439 | out: |
1440 | return ERR_PTR(err); | |
1441 | } | |
1442 | ||
3a167bea | 1443 | static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1444 | { |
1445 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
1446 | ||
1447 | free_page((unsigned long)vcpu->arch.shared & PAGE_MASK); | |
1448 | kvm_vcpu_uninit(vcpu); | |
ab78475c | 1449 | #ifdef CONFIG_KVM_BOOK3S_32_HANDLER |
3ff95502 PM |
1450 | kfree(vcpu->arch.shadow_vcpu); |
1451 | #endif | |
f05ed4d5 | 1452 | vfree(vcpu_book3s); |
3ff95502 | 1453 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
f05ed4d5 PM |
1454 | } |
1455 | ||
3a167bea | 1456 | static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
f05ed4d5 PM |
1457 | { |
1458 | int ret; | |
f05ed4d5 | 1459 | #ifdef CONFIG_ALTIVEC |
f05ed4d5 | 1460 | unsigned long uninitialized_var(vrsave); |
f05ed4d5 | 1461 | #endif |
f05ed4d5 | 1462 | |
af8f38b3 AG |
1463 | /* Check if we can run the vcpu at all */ |
1464 | if (!vcpu->arch.sane) { | |
1465 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
7d82714d AG |
1466 | ret = -EINVAL; |
1467 | goto out; | |
af8f38b3 AG |
1468 | } |
1469 | ||
e371f713 AG |
1470 | /* |
1471 | * Interrupts could be timers for the guest which we have to inject | |
1472 | * again, so let's postpone them until we're in the guest and if we | |
1473 | * really did time things so badly, then we just exit again due to | |
1474 | * a host external interrupt. | |
1475 | */ | |
7ee78855 | 1476 | ret = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1477 | if (ret <= 0) |
7d82714d | 1478 | goto out; |
6c85f52b | 1479 | /* interrupts now hard-disabled */ |
f05ed4d5 | 1480 | |
99dae3ba | 1481 | /* Save FPU state in thread_struct */ |
f05ed4d5 PM |
1482 | if (current->thread.regs->msr & MSR_FP) |
1483 | giveup_fpu(current); | |
f05ed4d5 PM |
1484 | |
1485 | #ifdef CONFIG_ALTIVEC | |
99dae3ba PM |
1486 | /* Save Altivec state in thread_struct */ |
1487 | if (current->thread.regs->msr & MSR_VEC) | |
1488 | giveup_altivec(current); | |
f05ed4d5 PM |
1489 | #endif |
1490 | ||
1491 | #ifdef CONFIG_VSX | |
99dae3ba PM |
1492 | /* Save VSX state in thread_struct */ |
1493 | if (current->thread.regs->msr & MSR_VSX) | |
28c483b6 | 1494 | __giveup_vsx(current); |
f05ed4d5 PM |
1495 | #endif |
1496 | ||
f05ed4d5 | 1497 | /* Preload FPU if it's enabled */ |
5deb8e7a | 1498 | if (kvmppc_get_msr(vcpu) & MSR_FP) |
f05ed4d5 PM |
1499 | kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP); |
1500 | ||
5f1c248f | 1501 | kvmppc_fix_ee_before_entry(); |
df6909e5 PM |
1502 | |
1503 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); | |
1504 | ||
24afa37b AG |
1505 | /* No need for kvm_guest_exit. It's done in handle_exit. |
1506 | We also get here with interrupts enabled. */ | |
f05ed4d5 | 1507 | |
f05ed4d5 | 1508 | /* Make sure we save the guest FPU/Altivec/VSX state */ |
28c483b6 PM |
1509 | kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX); |
1510 | ||
e14e7a1e AG |
1511 | /* Make sure we save the guest TAR/EBB/DSCR state */ |
1512 | kvmppc_giveup_fac(vcpu, FSCR_TAR_LG); | |
1513 | ||
7d82714d | 1514 | out: |
0652eaae | 1515 | vcpu->mode = OUTSIDE_GUEST_MODE; |
f05ed4d5 PM |
1516 | return ret; |
1517 | } | |
1518 | ||
82ed3616 PM |
1519 | /* |
1520 | * Get (and clear) the dirty memory log for a memory slot. | |
1521 | */ | |
3a167bea AK |
1522 | static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm, |
1523 | struct kvm_dirty_log *log) | |
82ed3616 PM |
1524 | { |
1525 | struct kvm_memory_slot *memslot; | |
1526 | struct kvm_vcpu *vcpu; | |
1527 | ulong ga, ga_end; | |
1528 | int is_dirty = 0; | |
1529 | int r; | |
1530 | unsigned long n; | |
1531 | ||
1532 | mutex_lock(&kvm->slots_lock); | |
1533 | ||
1534 | r = kvm_get_dirty_log(kvm, log, &is_dirty); | |
1535 | if (r) | |
1536 | goto out; | |
1537 | ||
1538 | /* If nothing is dirty, don't bother messing with page tables. */ | |
1539 | if (is_dirty) { | |
1540 | memslot = id_to_memslot(kvm->memslots, log->slot); | |
1541 | ||
1542 | ga = memslot->base_gfn << PAGE_SHIFT; | |
1543 | ga_end = ga + (memslot->npages << PAGE_SHIFT); | |
1544 | ||
1545 | kvm_for_each_vcpu(n, vcpu, kvm) | |
1546 | kvmppc_mmu_pte_pflush(vcpu, ga, ga_end); | |
1547 | ||
1548 | n = kvm_dirty_bitmap_bytes(memslot); | |
1549 | memset(memslot->dirty_bitmap, 0, n); | |
1550 | } | |
1551 | ||
1552 | r = 0; | |
1553 | out: | |
1554 | mutex_unlock(&kvm->slots_lock); | |
1555 | return r; | |
1556 | } | |
1557 | ||
3a167bea AK |
1558 | static void kvmppc_core_flush_memslot_pr(struct kvm *kvm, |
1559 | struct kvm_memory_slot *memslot) | |
5b74716e | 1560 | { |
3a167bea AK |
1561 | return; |
1562 | } | |
5b74716e | 1563 | |
3a167bea AK |
1564 | static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm, |
1565 | struct kvm_memory_slot *memslot, | |
1566 | struct kvm_userspace_memory_region *mem) | |
1567 | { | |
5b74716e BH |
1568 | return 0; |
1569 | } | |
5b74716e | 1570 | |
3a167bea AK |
1571 | static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm, |
1572 | struct kvm_userspace_memory_region *mem, | |
1573 | const struct kvm_memory_slot *old) | |
a66b48c3 | 1574 | { |
3a167bea | 1575 | return; |
a66b48c3 PM |
1576 | } |
1577 | ||
3a167bea AK |
1578 | static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free, |
1579 | struct kvm_memory_slot *dont) | |
a66b48c3 | 1580 | { |
3a167bea | 1581 | return; |
a66b48c3 PM |
1582 | } |
1583 | ||
3a167bea AK |
1584 | static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot, |
1585 | unsigned long npages) | |
f9e0554d PM |
1586 | { |
1587 | return 0; | |
1588 | } | |
1589 | ||
3a167bea | 1590 | |
5b74716e | 1591 | #ifdef CONFIG_PPC64 |
3a167bea AK |
1592 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, |
1593 | struct kvm_ppc_smmu_info *info) | |
dfe49dbd | 1594 | { |
a4a0f252 PM |
1595 | long int i; |
1596 | struct kvm_vcpu *vcpu; | |
1597 | ||
1598 | info->flags = 0; | |
5b74716e BH |
1599 | |
1600 | /* SLB is always 64 entries */ | |
1601 | info->slb_size = 64; | |
1602 | ||
1603 | /* Standard 4k base page size segment */ | |
1604 | info->sps[0].page_shift = 12; | |
1605 | info->sps[0].slb_enc = 0; | |
1606 | info->sps[0].enc[0].page_shift = 12; | |
1607 | info->sps[0].enc[0].pte_enc = 0; | |
1608 | ||
a4a0f252 PM |
1609 | /* |
1610 | * 64k large page size. | |
1611 | * We only want to put this in if the CPUs we're emulating | |
1612 | * support it, but unfortunately we don't have a vcpu easily | |
1613 | * to hand here to test. Just pick the first vcpu, and if | |
1614 | * that doesn't exist yet, report the minimum capability, | |
1615 | * i.e., no 64k pages. | |
1616 | * 1T segment support goes along with 64k pages. | |
1617 | */ | |
1618 | i = 1; | |
1619 | vcpu = kvm_get_vcpu(kvm, 0); | |
1620 | if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | |
1621 | info->flags = KVM_PPC_1T_SEGMENTS; | |
1622 | info->sps[i].page_shift = 16; | |
1623 | info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01; | |
1624 | info->sps[i].enc[0].page_shift = 16; | |
1625 | info->sps[i].enc[0].pte_enc = 1; | |
1626 | ++i; | |
1627 | } | |
1628 | ||
5b74716e | 1629 | /* Standard 16M large page size segment */ |
a4a0f252 PM |
1630 | info->sps[i].page_shift = 24; |
1631 | info->sps[i].slb_enc = SLB_VSID_L; | |
1632 | info->sps[i].enc[0].page_shift = 24; | |
1633 | info->sps[i].enc[0].pte_enc = 0; | |
dfe49dbd | 1634 | |
5b74716e BH |
1635 | return 0; |
1636 | } | |
3a167bea AK |
1637 | #else |
1638 | static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm, | |
1639 | struct kvm_ppc_smmu_info *info) | |
f9e0554d | 1640 | { |
3a167bea AK |
1641 | /* We should not get called */ |
1642 | BUG(); | |
f9e0554d | 1643 | } |
3a167bea | 1644 | #endif /* CONFIG_PPC64 */ |
f9e0554d | 1645 | |
a413f474 IM |
1646 | static unsigned int kvm_global_user_count = 0; |
1647 | static DEFINE_SPINLOCK(kvm_global_user_count_lock); | |
1648 | ||
3a167bea | 1649 | static int kvmppc_core_init_vm_pr(struct kvm *kvm) |
f9e0554d | 1650 | { |
9308ab8e | 1651 | mutex_init(&kvm->arch.hpt_mutex); |
f31e65e1 | 1652 | |
699a0ea0 PM |
1653 | #ifdef CONFIG_PPC_BOOK3S_64 |
1654 | /* Start out with the default set of hcalls enabled */ | |
1655 | kvmppc_pr_init_default_hcalls(kvm); | |
1656 | #endif | |
1657 | ||
a413f474 IM |
1658 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { |
1659 | spin_lock(&kvm_global_user_count_lock); | |
1660 | if (++kvm_global_user_count == 1) | |
1661 | pSeries_disable_reloc_on_exc(); | |
1662 | spin_unlock(&kvm_global_user_count_lock); | |
1663 | } | |
f9e0554d PM |
1664 | return 0; |
1665 | } | |
1666 | ||
3a167bea | 1667 | static void kvmppc_core_destroy_vm_pr(struct kvm *kvm) |
f9e0554d | 1668 | { |
f31e65e1 BH |
1669 | #ifdef CONFIG_PPC64 |
1670 | WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables)); | |
1671 | #endif | |
a413f474 IM |
1672 | |
1673 | if (firmware_has_feature(FW_FEATURE_SET_MODE)) { | |
1674 | spin_lock(&kvm_global_user_count_lock); | |
1675 | BUG_ON(kvm_global_user_count == 0); | |
1676 | if (--kvm_global_user_count == 0) | |
1677 | pSeries_enable_reloc_on_exc(); | |
1678 | spin_unlock(&kvm_global_user_count_lock); | |
1679 | } | |
f9e0554d PM |
1680 | } |
1681 | ||
3a167bea | 1682 | static int kvmppc_core_check_processor_compat_pr(void) |
f05ed4d5 | 1683 | { |
3a167bea AK |
1684 | /* we are always compatible */ |
1685 | return 0; | |
1686 | } | |
f05ed4d5 | 1687 | |
3a167bea AK |
1688 | static long kvm_arch_vm_ioctl_pr(struct file *filp, |
1689 | unsigned int ioctl, unsigned long arg) | |
1690 | { | |
1691 | return -ENOTTY; | |
1692 | } | |
f05ed4d5 | 1693 | |
cbbc58d4 | 1694 | static struct kvmppc_ops kvm_ops_pr = { |
3a167bea AK |
1695 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr, |
1696 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr, | |
1697 | .get_one_reg = kvmppc_get_one_reg_pr, | |
1698 | .set_one_reg = kvmppc_set_one_reg_pr, | |
1699 | .vcpu_load = kvmppc_core_vcpu_load_pr, | |
1700 | .vcpu_put = kvmppc_core_vcpu_put_pr, | |
1701 | .set_msr = kvmppc_set_msr_pr, | |
1702 | .vcpu_run = kvmppc_vcpu_run_pr, | |
1703 | .vcpu_create = kvmppc_core_vcpu_create_pr, | |
1704 | .vcpu_free = kvmppc_core_vcpu_free_pr, | |
1705 | .check_requests = kvmppc_core_check_requests_pr, | |
1706 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr, | |
1707 | .flush_memslot = kvmppc_core_flush_memslot_pr, | |
1708 | .prepare_memory_region = kvmppc_core_prepare_memory_region_pr, | |
1709 | .commit_memory_region = kvmppc_core_commit_memory_region_pr, | |
1710 | .unmap_hva = kvm_unmap_hva_pr, | |
1711 | .unmap_hva_range = kvm_unmap_hva_range_pr, | |
1712 | .age_hva = kvm_age_hva_pr, | |
1713 | .test_age_hva = kvm_test_age_hva_pr, | |
1714 | .set_spte_hva = kvm_set_spte_hva_pr, | |
1715 | .mmu_destroy = kvmppc_mmu_destroy_pr, | |
1716 | .free_memslot = kvmppc_core_free_memslot_pr, | |
1717 | .create_memslot = kvmppc_core_create_memslot_pr, | |
1718 | .init_vm = kvmppc_core_init_vm_pr, | |
1719 | .destroy_vm = kvmppc_core_destroy_vm_pr, | |
3a167bea AK |
1720 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr, |
1721 | .emulate_op = kvmppc_core_emulate_op_pr, | |
1722 | .emulate_mtspr = kvmppc_core_emulate_mtspr_pr, | |
1723 | .emulate_mfspr = kvmppc_core_emulate_mfspr_pr, | |
1724 | .fast_vcpu_kick = kvm_vcpu_kick, | |
1725 | .arch_vm_ioctl = kvm_arch_vm_ioctl_pr, | |
ae2113a4 PM |
1726 | #ifdef CONFIG_PPC_BOOK3S_64 |
1727 | .hcall_implemented = kvmppc_hcall_impl_pr, | |
1728 | #endif | |
3a167bea AK |
1729 | }; |
1730 | ||
cbbc58d4 AK |
1731 | |
1732 | int kvmppc_book3s_init_pr(void) | |
f05ed4d5 PM |
1733 | { |
1734 | int r; | |
1735 | ||
cbbc58d4 AK |
1736 | r = kvmppc_core_check_processor_compat_pr(); |
1737 | if (r < 0) | |
f05ed4d5 PM |
1738 | return r; |
1739 | ||
cbbc58d4 AK |
1740 | kvm_ops_pr.owner = THIS_MODULE; |
1741 | kvmppc_pr_ops = &kvm_ops_pr; | |
f05ed4d5 | 1742 | |
cbbc58d4 | 1743 | r = kvmppc_mmu_hpte_sysinit(); |
f05ed4d5 PM |
1744 | return r; |
1745 | } | |
1746 | ||
cbbc58d4 | 1747 | void kvmppc_book3s_exit_pr(void) |
f05ed4d5 | 1748 | { |
cbbc58d4 | 1749 | kvmppc_pr_ops = NULL; |
f05ed4d5 | 1750 | kvmppc_mmu_hpte_sysexit(); |
f05ed4d5 PM |
1751 | } |
1752 | ||
cbbc58d4 AK |
1753 | /* |
1754 | * We only support separate modules for book3s 64 | |
1755 | */ | |
1756 | #ifdef CONFIG_PPC_BOOK3S_64 | |
1757 | ||
3a167bea AK |
1758 | module_init(kvmppc_book3s_init_pr); |
1759 | module_exit(kvmppc_book3s_exit_pr); | |
2ba9f0d8 AK |
1760 | |
1761 | MODULE_LICENSE("GPL"); | |
398a76c6 AG |
1762 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
1763 | MODULE_ALIAS("devname:kvm"); | |
cbbc58d4 | 1764 | #endif |