]>
Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2007 | |
4cd35f67 | 16 | * Copyright 2010-2011 Freescale Semiconductor, Inc. |
bbf45ba5 HB |
17 | * |
18 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
19 | * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com> | |
d30f6e48 SW |
20 | * Scott Wood <scottwood@freescale.com> |
21 | * Varun Sethi <varun.sethi@freescale.com> | |
bbf45ba5 HB |
22 | */ |
23 | ||
24 | #include <linux/errno.h> | |
25 | #include <linux/err.h> | |
26 | #include <linux/kvm_host.h> | |
5a0e3ad6 | 27 | #include <linux/gfp.h> |
bbf45ba5 HB |
28 | #include <linux/module.h> |
29 | #include <linux/vmalloc.h> | |
30 | #include <linux/fs.h> | |
7924bd41 | 31 | |
bbf45ba5 HB |
32 | #include <asm/cputable.h> |
33 | #include <asm/uaccess.h> | |
34 | #include <asm/kvm_ppc.h> | |
d9fbd03d | 35 | #include <asm/cacheflush.h> |
d30f6e48 SW |
36 | #include <asm/dbell.h> |
37 | #include <asm/hw_irq.h> | |
38 | #include <asm/irq.h> | |
b50df19c | 39 | #include <asm/time.h> |
bbf45ba5 | 40 | |
d30f6e48 | 41 | #include "timing.h" |
75f74f0d | 42 | #include "booke.h" |
dba291f2 AK |
43 | |
44 | #define CREATE_TRACE_POINTS | |
45 | #include "trace_booke.h" | |
bbf45ba5 | 46 | |
d9fbd03d HB |
47 | unsigned long kvmppc_booke_handlers; |
48 | ||
bbf45ba5 HB |
49 | #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM |
50 | #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU | |
51 | ||
52 | struct kvm_stats_debugfs_item debugfs_entries[] = { | |
bbf45ba5 | 53 | { "mmio", VCPU_STAT(mmio_exits) }, |
bbf45ba5 | 54 | { "sig", VCPU_STAT(signal_exits) }, |
bbf45ba5 HB |
55 | { "itlb_r", VCPU_STAT(itlb_real_miss_exits) }, |
56 | { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) }, | |
57 | { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) }, | |
58 | { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) }, | |
59 | { "sysc", VCPU_STAT(syscall_exits) }, | |
60 | { "isi", VCPU_STAT(isi_exits) }, | |
61 | { "dsi", VCPU_STAT(dsi_exits) }, | |
62 | { "inst_emu", VCPU_STAT(emulated_inst_exits) }, | |
63 | { "dec", VCPU_STAT(dec_exits) }, | |
64 | { "ext_intr", VCPU_STAT(ext_intr_exits) }, | |
f7819512 | 65 | { "halt_successful_poll", VCPU_STAT(halt_successful_poll) }, |
62bea5bf | 66 | { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) }, |
3491caf2 | 67 | { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) }, |
45c5eb67 | 68 | { "halt_wakeup", VCPU_STAT(halt_wakeup) }, |
d30f6e48 SW |
69 | { "doorbell", VCPU_STAT(dbell_exits) }, |
70 | { "guest doorbell", VCPU_STAT(gdbell_exits) }, | |
cf1c5ca4 | 71 | { "remote_tlb_flush", VM_STAT(remote_tlb_flush) }, |
bbf45ba5 HB |
72 | { NULL } |
73 | }; | |
74 | ||
bbf45ba5 HB |
75 | /* TODO: use vcpu_printf() */ |
76 | void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu) | |
77 | { | |
78 | int i; | |
79 | ||
666e7252 | 80 | printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr); |
5cf8ca22 | 81 | printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr); |
de7906c3 AG |
82 | printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0, |
83 | vcpu->arch.shared->srr1); | |
bbf45ba5 HB |
84 | |
85 | printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions); | |
86 | ||
87 | for (i = 0; i < 32; i += 4) { | |
5cf8ca22 | 88 | printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i, |
8e5b26b5 AG |
89 | kvmppc_get_gpr(vcpu, i), |
90 | kvmppc_get_gpr(vcpu, i+1), | |
91 | kvmppc_get_gpr(vcpu, i+2), | |
92 | kvmppc_get_gpr(vcpu, i+3)); | |
bbf45ba5 HB |
93 | } |
94 | } | |
95 | ||
4cd35f67 SW |
96 | #ifdef CONFIG_SPE |
97 | void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu) | |
98 | { | |
99 | preempt_disable(); | |
100 | enable_kernel_spe(); | |
101 | kvmppc_save_guest_spe(vcpu); | |
dc4fbba1 | 102 | disable_kernel_spe(); |
4cd35f67 SW |
103 | vcpu->arch.shadow_msr &= ~MSR_SPE; |
104 | preempt_enable(); | |
105 | } | |
106 | ||
107 | static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu) | |
108 | { | |
109 | preempt_disable(); | |
110 | enable_kernel_spe(); | |
111 | kvmppc_load_guest_spe(vcpu); | |
dc4fbba1 | 112 | disable_kernel_spe(); |
4cd35f67 SW |
113 | vcpu->arch.shadow_msr |= MSR_SPE; |
114 | preempt_enable(); | |
115 | } | |
116 | ||
117 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | |
118 | { | |
119 | if (vcpu->arch.shared->msr & MSR_SPE) { | |
120 | if (!(vcpu->arch.shadow_msr & MSR_SPE)) | |
121 | kvmppc_vcpu_enable_spe(vcpu); | |
122 | } else if (vcpu->arch.shadow_msr & MSR_SPE) { | |
123 | kvmppc_vcpu_disable_spe(vcpu); | |
124 | } | |
125 | } | |
126 | #else | |
127 | static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu) | |
128 | { | |
129 | } | |
130 | #endif | |
131 | ||
3efc7da6 MC |
132 | /* |
133 | * Load up guest vcpu FP state if it's needed. | |
134 | * It also set the MSR_FP in thread so that host know | |
135 | * we're holding FPU, and then host can help to save | |
136 | * guest vcpu FP state if other threads require to use FPU. | |
137 | * This simulates an FP unavailable fault. | |
138 | * | |
139 | * It requires to be called with preemption disabled. | |
140 | */ | |
141 | static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu) | |
142 | { | |
143 | #ifdef CONFIG_PPC_FPU | |
144 | if (!(current->thread.regs->msr & MSR_FP)) { | |
145 | enable_kernel_fp(); | |
146 | load_fp_state(&vcpu->arch.fp); | |
dc4fbba1 | 147 | disable_kernel_fp(); |
3efc7da6 MC |
148 | current->thread.fp_save_area = &vcpu->arch.fp; |
149 | current->thread.regs->msr |= MSR_FP; | |
150 | } | |
151 | #endif | |
152 | } | |
153 | ||
154 | /* | |
155 | * Save guest vcpu FP state into thread. | |
156 | * It requires to be called with preemption disabled. | |
157 | */ | |
158 | static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu) | |
159 | { | |
160 | #ifdef CONFIG_PPC_FPU | |
161 | if (current->thread.regs->msr & MSR_FP) | |
162 | giveup_fpu(current); | |
163 | current->thread.fp_save_area = NULL; | |
164 | #endif | |
165 | } | |
166 | ||
7a08c274 AG |
167 | static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu) |
168 | { | |
169 | #if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV) | |
170 | /* We always treat the FP bit as enabled from the host | |
171 | perspective, so only need to adjust the shadow MSR */ | |
172 | vcpu->arch.shadow_msr &= ~MSR_FP; | |
173 | vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP; | |
174 | #endif | |
175 | } | |
176 | ||
95d80a29 MC |
177 | /* |
178 | * Simulate AltiVec unavailable fault to load guest state | |
179 | * from thread to AltiVec unit. | |
180 | * It requires to be called with preemption disabled. | |
181 | */ | |
182 | static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu) | |
183 | { | |
184 | #ifdef CONFIG_ALTIVEC | |
185 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
186 | if (!(current->thread.regs->msr & MSR_VEC)) { | |
187 | enable_kernel_altivec(); | |
188 | load_vr_state(&vcpu->arch.vr); | |
dc4fbba1 | 189 | disable_kernel_altivec(); |
95d80a29 MC |
190 | current->thread.vr_save_area = &vcpu->arch.vr; |
191 | current->thread.regs->msr |= MSR_VEC; | |
192 | } | |
193 | } | |
194 | #endif | |
195 | } | |
196 | ||
197 | /* | |
198 | * Save guest vcpu AltiVec state into thread. | |
199 | * It requires to be called with preemption disabled. | |
200 | */ | |
201 | static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu) | |
202 | { | |
203 | #ifdef CONFIG_ALTIVEC | |
204 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) { | |
205 | if (current->thread.regs->msr & MSR_VEC) | |
206 | giveup_altivec(current); | |
207 | current->thread.vr_save_area = NULL; | |
208 | } | |
209 | #endif | |
210 | } | |
211 | ||
ce11e48b BB |
212 | static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu) |
213 | { | |
214 | /* Synchronize guest's desire to get debug interrupts into shadow MSR */ | |
215 | #ifndef CONFIG_KVM_BOOKE_HV | |
216 | vcpu->arch.shadow_msr &= ~MSR_DE; | |
217 | vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE; | |
218 | #endif | |
219 | ||
220 | /* Force enable debug interrupts when user space wants to debug */ | |
221 | if (vcpu->guest_debug) { | |
222 | #ifdef CONFIG_KVM_BOOKE_HV | |
223 | /* | |
224 | * Since there is no shadow MSR, sync MSR_DE into the guest | |
225 | * visible MSR. | |
226 | */ | |
227 | vcpu->arch.shared->msr |= MSR_DE; | |
228 | #else | |
229 | vcpu->arch.shadow_msr |= MSR_DE; | |
230 | vcpu->arch.shared->msr &= ~MSR_DE; | |
231 | #endif | |
232 | } | |
233 | } | |
234 | ||
dd9ebf1f LY |
235 | /* |
236 | * Helper function for "full" MSR writes. No need to call this if only | |
237 | * EE/CE/ME/DE/RI are changing. | |
238 | */ | |
4cd35f67 SW |
239 | void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr) |
240 | { | |
dd9ebf1f | 241 | u32 old_msr = vcpu->arch.shared->msr; |
4cd35f67 | 242 | |
d30f6e48 SW |
243 | #ifdef CONFIG_KVM_BOOKE_HV |
244 | new_msr |= MSR_GS; | |
245 | #endif | |
246 | ||
4cd35f67 SW |
247 | vcpu->arch.shared->msr = new_msr; |
248 | ||
dd9ebf1f | 249 | kvmppc_mmu_msr_notify(vcpu, old_msr); |
4cd35f67 | 250 | kvmppc_vcpu_sync_spe(vcpu); |
7a08c274 | 251 | kvmppc_vcpu_sync_fpu(vcpu); |
ce11e48b | 252 | kvmppc_vcpu_sync_debug(vcpu); |
4cd35f67 SW |
253 | } |
254 | ||
d4cf3892 HB |
255 | static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu, |
256 | unsigned int priority) | |
9dd921cf | 257 | { |
6346046c | 258 | trace_kvm_booke_queue_irqprio(vcpu, priority); |
9dd921cf HB |
259 | set_bit(priority, &vcpu->arch.pending_exceptions); |
260 | } | |
261 | ||
8de12015 AG |
262 | void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, |
263 | ulong dear_flags, ulong esr_flags) | |
9dd921cf | 264 | { |
daf5e271 LY |
265 | vcpu->arch.queued_dear = dear_flags; |
266 | vcpu->arch.queued_esr = esr_flags; | |
267 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS); | |
268 | } | |
269 | ||
8de12015 AG |
270 | void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, |
271 | ulong dear_flags, ulong esr_flags) | |
daf5e271 LY |
272 | { |
273 | vcpu->arch.queued_dear = dear_flags; | |
274 | vcpu->arch.queued_esr = esr_flags; | |
275 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE); | |
276 | } | |
277 | ||
8de12015 AG |
278 | void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu) |
279 | { | |
280 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); | |
281 | } | |
282 | ||
283 | void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags) | |
daf5e271 LY |
284 | { |
285 | vcpu->arch.queued_esr = esr_flags; | |
286 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE); | |
287 | } | |
288 | ||
011da899 AG |
289 | static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags, |
290 | ulong esr_flags) | |
291 | { | |
292 | vcpu->arch.queued_dear = dear_flags; | |
293 | vcpu->arch.queued_esr = esr_flags; | |
294 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT); | |
295 | } | |
296 | ||
daf5e271 LY |
297 | void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags) |
298 | { | |
299 | vcpu->arch.queued_esr = esr_flags; | |
d4cf3892 | 300 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM); |
9dd921cf HB |
301 | } |
302 | ||
303 | void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu) | |
304 | { | |
d4cf3892 | 305 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER); |
9dd921cf HB |
306 | } |
307 | ||
308 | int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu) | |
309 | { | |
d4cf3892 | 310 | return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); |
9dd921cf HB |
311 | } |
312 | ||
7706664d AG |
313 | void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu) |
314 | { | |
315 | clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions); | |
316 | } | |
317 | ||
9dd921cf HB |
318 | void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
319 | struct kvm_interrupt *irq) | |
320 | { | |
c5335f17 AG |
321 | unsigned int prio = BOOKE_IRQPRIO_EXTERNAL; |
322 | ||
323 | if (irq->irq == KVM_INTERRUPT_SET_LEVEL) | |
324 | prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL; | |
325 | ||
326 | kvmppc_booke_queue_irqprio(vcpu, prio); | |
9dd921cf HB |
327 | } |
328 | ||
4fe27d2a | 329 | void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu) |
4496f974 AG |
330 | { |
331 | clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions); | |
c5335f17 | 332 | clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions); |
4496f974 AG |
333 | } |
334 | ||
f61c94bb BB |
335 | static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu) |
336 | { | |
337 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG); | |
338 | } | |
339 | ||
340 | static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu) | |
341 | { | |
342 | clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions); | |
343 | } | |
344 | ||
2f699a59 BB |
345 | void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu) |
346 | { | |
347 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG); | |
348 | } | |
349 | ||
350 | void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu) | |
351 | { | |
352 | clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions); | |
353 | } | |
354 | ||
d30f6e48 SW |
355 | static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) |
356 | { | |
31579eea BB |
357 | kvmppc_set_srr0(vcpu, srr0); |
358 | kvmppc_set_srr1(vcpu, srr1); | |
d30f6e48 SW |
359 | } |
360 | ||
361 | static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
362 | { | |
363 | vcpu->arch.csrr0 = srr0; | |
364 | vcpu->arch.csrr1 = srr1; | |
365 | } | |
366 | ||
367 | static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
368 | { | |
369 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) { | |
370 | vcpu->arch.dsrr0 = srr0; | |
371 | vcpu->arch.dsrr1 = srr1; | |
372 | } else { | |
373 | set_guest_csrr(vcpu, srr0, srr1); | |
374 | } | |
375 | } | |
376 | ||
377 | static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1) | |
378 | { | |
379 | vcpu->arch.mcsrr0 = srr0; | |
380 | vcpu->arch.mcsrr1 = srr1; | |
381 | } | |
382 | ||
d4cf3892 HB |
383 | /* Deliver the interrupt of the corresponding priority, if possible. */ |
384 | static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu, | |
385 | unsigned int priority) | |
bbf45ba5 | 386 | { |
d4cf3892 | 387 | int allowed = 0; |
79300f8c | 388 | ulong msr_mask = 0; |
1c810636 | 389 | bool update_esr = false, update_dear = false, update_epr = false; |
5c6cedf4 AG |
390 | ulong crit_raw = vcpu->arch.shared->critical; |
391 | ulong crit_r1 = kvmppc_get_gpr(vcpu, 1); | |
392 | bool crit; | |
c5335f17 | 393 | bool keep_irq = false; |
d30f6e48 | 394 | enum int_class int_class; |
95e90b43 | 395 | ulong new_msr = vcpu->arch.shared->msr; |
5c6cedf4 AG |
396 | |
397 | /* Truncate crit indicators in 32 bit mode */ | |
398 | if (!(vcpu->arch.shared->msr & MSR_SF)) { | |
399 | crit_raw &= 0xffffffff; | |
400 | crit_r1 &= 0xffffffff; | |
401 | } | |
402 | ||
403 | /* Critical section when crit == r1 */ | |
404 | crit = (crit_raw == crit_r1); | |
405 | /* ... and we're in supervisor mode */ | |
406 | crit = crit && !(vcpu->arch.shared->msr & MSR_PR); | |
d4cf3892 | 407 | |
c5335f17 AG |
408 | if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) { |
409 | priority = BOOKE_IRQPRIO_EXTERNAL; | |
410 | keep_irq = true; | |
411 | } | |
412 | ||
5df554ad | 413 | if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags) |
1c810636 AG |
414 | update_epr = true; |
415 | ||
d4cf3892 | 416 | switch (priority) { |
d4cf3892 | 417 | case BOOKE_IRQPRIO_DTLB_MISS: |
d4cf3892 | 418 | case BOOKE_IRQPRIO_DATA_STORAGE: |
011da899 | 419 | case BOOKE_IRQPRIO_ALIGNMENT: |
daf5e271 LY |
420 | update_dear = true; |
421 | /* fall through */ | |
d4cf3892 | 422 | case BOOKE_IRQPRIO_INST_STORAGE: |
daf5e271 LY |
423 | case BOOKE_IRQPRIO_PROGRAM: |
424 | update_esr = true; | |
425 | /* fall through */ | |
426 | case BOOKE_IRQPRIO_ITLB_MISS: | |
427 | case BOOKE_IRQPRIO_SYSCALL: | |
d4cf3892 | 428 | case BOOKE_IRQPRIO_FP_UNAVAIL: |
95d80a29 | 429 | #ifdef CONFIG_SPE_POSSIBLE |
bb3a8a17 HB |
430 | case BOOKE_IRQPRIO_SPE_UNAVAIL: |
431 | case BOOKE_IRQPRIO_SPE_FP_DATA: | |
432 | case BOOKE_IRQPRIO_SPE_FP_ROUND: | |
95d80a29 MC |
433 | #endif |
434 | #ifdef CONFIG_ALTIVEC | |
435 | case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL: | |
436 | case BOOKE_IRQPRIO_ALTIVEC_ASSIST: | |
437 | #endif | |
d4cf3892 | 438 | case BOOKE_IRQPRIO_AP_UNAVAIL: |
d4cf3892 | 439 | allowed = 1; |
79300f8c | 440 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
d30f6e48 | 441 | int_class = INT_CLASS_NONCRIT; |
bbf45ba5 | 442 | break; |
f61c94bb | 443 | case BOOKE_IRQPRIO_WATCHDOG: |
d4cf3892 | 444 | case BOOKE_IRQPRIO_CRITICAL: |
4ab96919 | 445 | case BOOKE_IRQPRIO_DBELL_CRIT: |
666e7252 | 446 | allowed = vcpu->arch.shared->msr & MSR_CE; |
d30f6e48 | 447 | allowed = allowed && !crit; |
79300f8c | 448 | msr_mask = MSR_ME; |
d30f6e48 | 449 | int_class = INT_CLASS_CRIT; |
bbf45ba5 | 450 | break; |
d4cf3892 | 451 | case BOOKE_IRQPRIO_MACHINE_CHECK: |
666e7252 | 452 | allowed = vcpu->arch.shared->msr & MSR_ME; |
d30f6e48 | 453 | allowed = allowed && !crit; |
d30f6e48 | 454 | int_class = INT_CLASS_MC; |
bbf45ba5 | 455 | break; |
d4cf3892 HB |
456 | case BOOKE_IRQPRIO_DECREMENTER: |
457 | case BOOKE_IRQPRIO_FIT: | |
dfd4d47e SW |
458 | keep_irq = true; |
459 | /* fall through */ | |
460 | case BOOKE_IRQPRIO_EXTERNAL: | |
4ab96919 | 461 | case BOOKE_IRQPRIO_DBELL: |
666e7252 | 462 | allowed = vcpu->arch.shared->msr & MSR_EE; |
5c6cedf4 | 463 | allowed = allowed && !crit; |
79300f8c | 464 | msr_mask = MSR_CE | MSR_ME | MSR_DE; |
d30f6e48 | 465 | int_class = INT_CLASS_NONCRIT; |
bbf45ba5 | 466 | break; |
d4cf3892 | 467 | case BOOKE_IRQPRIO_DEBUG: |
666e7252 | 468 | allowed = vcpu->arch.shared->msr & MSR_DE; |
d30f6e48 | 469 | allowed = allowed && !crit; |
79300f8c | 470 | msr_mask = MSR_ME; |
9fee7563 BB |
471 | if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) |
472 | int_class = INT_CLASS_DBG; | |
473 | else | |
474 | int_class = INT_CLASS_CRIT; | |
475 | ||
bbf45ba5 | 476 | break; |
bbf45ba5 HB |
477 | } |
478 | ||
d4cf3892 | 479 | if (allowed) { |
d30f6e48 SW |
480 | switch (int_class) { |
481 | case INT_CLASS_NONCRIT: | |
482 | set_guest_srr(vcpu, vcpu->arch.pc, | |
483 | vcpu->arch.shared->msr); | |
484 | break; | |
485 | case INT_CLASS_CRIT: | |
486 | set_guest_csrr(vcpu, vcpu->arch.pc, | |
487 | vcpu->arch.shared->msr); | |
488 | break; | |
489 | case INT_CLASS_DBG: | |
490 | set_guest_dsrr(vcpu, vcpu->arch.pc, | |
491 | vcpu->arch.shared->msr); | |
492 | break; | |
493 | case INT_CLASS_MC: | |
494 | set_guest_mcsrr(vcpu, vcpu->arch.pc, | |
495 | vcpu->arch.shared->msr); | |
496 | break; | |
497 | } | |
498 | ||
d4cf3892 | 499 | vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority]; |
daf5e271 | 500 | if (update_esr == true) |
dc168549 | 501 | kvmppc_set_esr(vcpu, vcpu->arch.queued_esr); |
daf5e271 | 502 | if (update_dear == true) |
a5414d4b | 503 | kvmppc_set_dar(vcpu, vcpu->arch.queued_dear); |
5df554ad SW |
504 | if (update_epr == true) { |
505 | if (vcpu->arch.epr_flags & KVMPPC_EPR_USER) | |
506 | kvm_make_request(KVM_REQ_EPR_EXIT, vcpu); | |
eb1e4f43 SW |
507 | else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) { |
508 | BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC); | |
509 | kvmppc_mpic_set_epr(vcpu); | |
510 | } | |
5df554ad | 511 | } |
95e90b43 MC |
512 | |
513 | new_msr &= msr_mask; | |
514 | #if defined(CONFIG_64BIT) | |
515 | if (vcpu->arch.epcr & SPRN_EPCR_ICM) | |
516 | new_msr |= MSR_CM; | |
517 | #endif | |
518 | kvmppc_set_msr(vcpu, new_msr); | |
bbf45ba5 | 519 | |
c5335f17 AG |
520 | if (!keep_irq) |
521 | clear_bit(priority, &vcpu->arch.pending_exceptions); | |
bbf45ba5 HB |
522 | } |
523 | ||
d30f6e48 SW |
524 | #ifdef CONFIG_KVM_BOOKE_HV |
525 | /* | |
526 | * If an interrupt is pending but masked, raise a guest doorbell | |
527 | * so that we are notified when the guest enables the relevant | |
528 | * MSR bit. | |
529 | */ | |
530 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE) | |
531 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT); | |
532 | if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE) | |
533 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT); | |
534 | if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK) | |
535 | kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC); | |
536 | #endif | |
537 | ||
d4cf3892 | 538 | return allowed; |
bbf45ba5 HB |
539 | } |
540 | ||
f61c94bb BB |
541 | /* |
542 | * Return the number of jiffies until the next timeout. If the timeout is | |
543 | * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA | |
544 | * because the larger value can break the timer APIs. | |
545 | */ | |
546 | static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu) | |
547 | { | |
548 | u64 tb, wdt_tb, wdt_ticks = 0; | |
549 | u64 nr_jiffies = 0; | |
550 | u32 period = TCR_GET_WP(vcpu->arch.tcr); | |
551 | ||
552 | wdt_tb = 1ULL << (63 - period); | |
553 | tb = get_tb(); | |
554 | /* | |
555 | * The watchdog timeout will hapeen when TB bit corresponding | |
556 | * to watchdog will toggle from 0 to 1. | |
557 | */ | |
558 | if (tb & wdt_tb) | |
559 | wdt_ticks = wdt_tb; | |
560 | ||
561 | wdt_ticks += wdt_tb - (tb & (wdt_tb - 1)); | |
562 | ||
563 | /* Convert timebase ticks to jiffies */ | |
564 | nr_jiffies = wdt_ticks; | |
565 | ||
566 | if (do_div(nr_jiffies, tb_ticks_per_jiffy)) | |
567 | nr_jiffies++; | |
568 | ||
569 | return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA); | |
570 | } | |
571 | ||
572 | static void arm_next_watchdog(struct kvm_vcpu *vcpu) | |
573 | { | |
574 | unsigned long nr_jiffies; | |
575 | unsigned long flags; | |
576 | ||
577 | /* | |
578 | * If TSR_ENW and TSR_WIS are not set then no need to exit to | |
579 | * userspace, so clear the KVM_REQ_WATCHDOG request. | |
580 | */ | |
581 | if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS)) | |
582 | clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests); | |
583 | ||
584 | spin_lock_irqsave(&vcpu->arch.wdt_lock, flags); | |
585 | nr_jiffies = watchdog_next_timeout(vcpu); | |
586 | /* | |
587 | * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA | |
588 | * then do not run the watchdog timer as this can break timer APIs. | |
589 | */ | |
590 | if (nr_jiffies < NEXT_TIMER_MAX_DELTA) | |
591 | mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies); | |
592 | else | |
593 | del_timer(&vcpu->arch.wdt_timer); | |
594 | spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags); | |
595 | } | |
596 | ||
597 | void kvmppc_watchdog_func(unsigned long data) | |
598 | { | |
599 | struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data; | |
600 | u32 tsr, new_tsr; | |
601 | int final; | |
602 | ||
603 | do { | |
604 | new_tsr = tsr = vcpu->arch.tsr; | |
605 | final = 0; | |
606 | ||
607 | /* Time out event */ | |
608 | if (tsr & TSR_ENW) { | |
609 | if (tsr & TSR_WIS) | |
610 | final = 1; | |
611 | else | |
612 | new_tsr = tsr | TSR_WIS; | |
613 | } else { | |
614 | new_tsr = tsr | TSR_ENW; | |
615 | } | |
616 | } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr); | |
617 | ||
618 | if (new_tsr & TSR_WIS) { | |
619 | smp_wmb(); | |
620 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | |
621 | kvm_vcpu_kick(vcpu); | |
622 | } | |
623 | ||
624 | /* | |
625 | * If this is final watchdog expiry and some action is required | |
626 | * then exit to userspace. | |
627 | */ | |
628 | if (final && (vcpu->arch.tcr & TCR_WRC_MASK) && | |
629 | vcpu->arch.watchdog_enabled) { | |
630 | smp_wmb(); | |
631 | kvm_make_request(KVM_REQ_WATCHDOG, vcpu); | |
632 | kvm_vcpu_kick(vcpu); | |
633 | } | |
634 | ||
635 | /* | |
636 | * Stop running the watchdog timer after final expiration to | |
637 | * prevent the host from being flooded with timers if the | |
638 | * guest sets a short period. | |
639 | * Timers will resume when TSR/TCR is updated next time. | |
640 | */ | |
641 | if (!final) | |
642 | arm_next_watchdog(vcpu); | |
643 | } | |
644 | ||
dfd4d47e SW |
645 | static void update_timer_ints(struct kvm_vcpu *vcpu) |
646 | { | |
647 | if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS)) | |
648 | kvmppc_core_queue_dec(vcpu); | |
649 | else | |
650 | kvmppc_core_dequeue_dec(vcpu); | |
f61c94bb BB |
651 | |
652 | if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS)) | |
653 | kvmppc_core_queue_watchdog(vcpu); | |
654 | else | |
655 | kvmppc_core_dequeue_watchdog(vcpu); | |
dfd4d47e SW |
656 | } |
657 | ||
c59a6a3e | 658 | static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu) |
bbf45ba5 HB |
659 | { |
660 | unsigned long *pending = &vcpu->arch.pending_exceptions; | |
bbf45ba5 HB |
661 | unsigned int priority; |
662 | ||
9ab80843 | 663 | priority = __ffs(*pending); |
8b3a00fc | 664 | while (priority < BOOKE_IRQPRIO_MAX) { |
d4cf3892 | 665 | if (kvmppc_booke_irqprio_deliver(vcpu, priority)) |
bbf45ba5 | 666 | break; |
bbf45ba5 HB |
667 | |
668 | priority = find_next_bit(pending, | |
669 | BITS_PER_BYTE * sizeof(*pending), | |
670 | priority + 1); | |
671 | } | |
90bba358 AG |
672 | |
673 | /* Tell the guest about our interrupt status */ | |
29ac26ef | 674 | vcpu->arch.shared->int_pending = !!*pending; |
bbf45ba5 HB |
675 | } |
676 | ||
c59a6a3e | 677 | /* Check pending exceptions and deliver one, if possible. */ |
a8e4ef84 | 678 | int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu) |
c59a6a3e | 679 | { |
a8e4ef84 | 680 | int r = 0; |
c59a6a3e SW |
681 | WARN_ON_ONCE(!irqs_disabled()); |
682 | ||
683 | kvmppc_core_check_exceptions(vcpu); | |
684 | ||
b8c649a9 AG |
685 | if (vcpu->requests) { |
686 | /* Exception delivery raised request; start over */ | |
687 | return 1; | |
688 | } | |
689 | ||
c59a6a3e SW |
690 | if (vcpu->arch.shared->msr & MSR_WE) { |
691 | local_irq_enable(); | |
692 | kvm_vcpu_block(vcpu); | |
966cd0f3 | 693 | clear_bit(KVM_REQ_UNHALT, &vcpu->requests); |
6c85f52b | 694 | hard_irq_disable(); |
c59a6a3e SW |
695 | |
696 | kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS); | |
a8e4ef84 | 697 | r = 1; |
c59a6a3e | 698 | }; |
a8e4ef84 AG |
699 | |
700 | return r; | |
701 | } | |
702 | ||
7c973a2e | 703 | int kvmppc_core_check_requests(struct kvm_vcpu *vcpu) |
4ffc6356 | 704 | { |
7c973a2e AG |
705 | int r = 1; /* Indicate we want to get back into the guest */ |
706 | ||
2d8185d4 AG |
707 | if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) |
708 | update_timer_ints(vcpu); | |
862d31f7 | 709 | #if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC) |
2d8185d4 AG |
710 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) |
711 | kvmppc_core_flush_tlb(vcpu); | |
862d31f7 | 712 | #endif |
7c973a2e | 713 | |
f61c94bb BB |
714 | if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) { |
715 | vcpu->run->exit_reason = KVM_EXIT_WATCHDOG; | |
716 | r = 0; | |
717 | } | |
718 | ||
1c810636 AG |
719 | if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) { |
720 | vcpu->run->epr.epr = 0; | |
721 | vcpu->arch.epr_needed = true; | |
722 | vcpu->run->exit_reason = KVM_EXIT_EPR; | |
723 | r = 0; | |
724 | } | |
725 | ||
7c973a2e | 726 | return r; |
4ffc6356 AG |
727 | } |
728 | ||
df6909e5 PM |
729 | int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
730 | { | |
7ee78855 | 731 | int ret, s; |
f5f97210 | 732 | struct debug_reg debug; |
df6909e5 | 733 | |
af8f38b3 AG |
734 | if (!vcpu->arch.sane) { |
735 | kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
736 | return -EINVAL; | |
737 | } | |
738 | ||
7ee78855 AG |
739 | s = kvmppc_prepare_to_enter(vcpu); |
740 | if (s <= 0) { | |
7ee78855 | 741 | ret = s; |
1d1ef222 SW |
742 | goto out; |
743 | } | |
6c85f52b | 744 | /* interrupts now hard-disabled */ |
1d1ef222 | 745 | |
8fae845f SW |
746 | #ifdef CONFIG_PPC_FPU |
747 | /* Save userspace FPU state in stack */ | |
748 | enable_kernel_fp(); | |
8fae845f SW |
749 | |
750 | /* | |
751 | * Since we can't trap on MSR_FP in GS-mode, we consider the guest | |
3efc7da6 | 752 | * as always using the FPU. |
8fae845f | 753 | */ |
8fae845f SW |
754 | kvmppc_load_guest_fp(vcpu); |
755 | #endif | |
756 | ||
95d80a29 MC |
757 | #ifdef CONFIG_ALTIVEC |
758 | /* Save userspace AltiVec state in stack */ | |
759 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
760 | enable_kernel_altivec(); | |
761 | /* | |
762 | * Since we can't trap on MSR_VEC in GS-mode, we consider the guest | |
763 | * as always using the AltiVec. | |
764 | */ | |
765 | kvmppc_load_guest_altivec(vcpu); | |
766 | #endif | |
767 | ||
ce11e48b | 768 | /* Switch to guest debug context */ |
348ba710 | 769 | debug = vcpu->arch.dbg_reg; |
f5f97210 SW |
770 | switch_booke_debug_regs(&debug); |
771 | debug = current->thread.debug; | |
348ba710 | 772 | current->thread.debug = vcpu->arch.dbg_reg; |
ce11e48b | 773 | |
08c9a188 | 774 | vcpu->arch.pgdir = current->mm->pgd; |
5f1c248f | 775 | kvmppc_fix_ee_before_entry(); |
f8941fbe | 776 | |
df6909e5 | 777 | ret = __kvmppc_vcpu_run(kvm_run, vcpu); |
8fae845f | 778 | |
6edaa530 | 779 | /* No need for guest_exit. It's done in handle_exit. |
24afa37b AG |
780 | We also get here with interrupts enabled. */ |
781 | ||
ce11e48b | 782 | /* Switch back to user space debug context */ |
f5f97210 SW |
783 | switch_booke_debug_regs(&debug); |
784 | current->thread.debug = debug; | |
ce11e48b | 785 | |
8fae845f SW |
786 | #ifdef CONFIG_PPC_FPU |
787 | kvmppc_save_guest_fp(vcpu); | |
8fae845f SW |
788 | #endif |
789 | ||
95d80a29 MC |
790 | #ifdef CONFIG_ALTIVEC |
791 | kvmppc_save_guest_altivec(vcpu); | |
792 | #endif | |
793 | ||
1d1ef222 | 794 | out: |
d69c6436 | 795 | vcpu->mode = OUTSIDE_GUEST_MODE; |
df6909e5 PM |
796 | return ret; |
797 | } | |
798 | ||
d30f6e48 SW |
799 | static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu) |
800 | { | |
801 | enum emulation_result er; | |
802 | ||
803 | er = kvmppc_emulate_instruction(run, vcpu); | |
804 | switch (er) { | |
805 | case EMULATE_DONE: | |
806 | /* don't overwrite subtypes, just account kvm_stats */ | |
807 | kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS); | |
808 | /* Future optimization: only reload non-volatiles if | |
809 | * they were actually modified by emulation. */ | |
810 | return RESUME_GUEST_NV; | |
811 | ||
51f04726 MC |
812 | case EMULATE_AGAIN: |
813 | return RESUME_GUEST; | |
814 | ||
d30f6e48 | 815 | case EMULATE_FAIL: |
d30f6e48 SW |
816 | printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n", |
817 | __func__, vcpu->arch.pc, vcpu->arch.last_inst); | |
818 | /* For debugging, encode the failing instruction and | |
819 | * report it to userspace. */ | |
820 | run->hw.hardware_exit_reason = ~0ULL << 32; | |
821 | run->hw.hardware_exit_reason |= vcpu->arch.last_inst; | |
d1ff5499 | 822 | kvmppc_core_queue_program(vcpu, ESR_PIL); |
d30f6e48 SW |
823 | return RESUME_HOST; |
824 | ||
9b4f5308 BB |
825 | case EMULATE_EXIT_USER: |
826 | return RESUME_HOST; | |
827 | ||
d30f6e48 SW |
828 | default: |
829 | BUG(); | |
830 | } | |
831 | } | |
832 | ||
ce11e48b BB |
833 | static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu) |
834 | { | |
348ba710 | 835 | struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg); |
ce11e48b BB |
836 | u32 dbsr = vcpu->arch.dbsr; |
837 | ||
2f699a59 BB |
838 | if (vcpu->guest_debug == 0) { |
839 | /* | |
840 | * Debug resources belong to Guest. | |
841 | * Imprecise debug event is not injected | |
842 | */ | |
843 | if (dbsr & DBSR_IDE) { | |
844 | dbsr &= ~DBSR_IDE; | |
845 | if (!dbsr) | |
846 | return RESUME_GUEST; | |
847 | } | |
848 | ||
849 | if (dbsr && (vcpu->arch.shared->msr & MSR_DE) && | |
850 | (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM)) | |
851 | kvmppc_core_queue_debug(vcpu); | |
852 | ||
853 | /* Inject a program interrupt if trap debug is not allowed */ | |
854 | if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE)) | |
855 | kvmppc_core_queue_program(vcpu, ESR_PTR); | |
856 | ||
857 | return RESUME_GUEST; | |
858 | } | |
859 | ||
860 | /* | |
861 | * Debug resource owned by userspace. | |
862 | * Clear guest dbsr (vcpu->arch.dbsr) | |
863 | */ | |
2190991e | 864 | vcpu->arch.dbsr = 0; |
ce11e48b BB |
865 | run->debug.arch.status = 0; |
866 | run->debug.arch.address = vcpu->arch.pc; | |
867 | ||
868 | if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) { | |
869 | run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT; | |
870 | } else { | |
871 | if (dbsr & (DBSR_DAC1W | DBSR_DAC2W)) | |
872 | run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE; | |
873 | else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R)) | |
874 | run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ; | |
875 | if (dbsr & (DBSR_DAC1R | DBSR_DAC1W)) | |
876 | run->debug.arch.address = dbg_reg->dac1; | |
877 | else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W)) | |
878 | run->debug.arch.address = dbg_reg->dac2; | |
879 | } | |
880 | ||
881 | return RESUME_HOST; | |
882 | } | |
883 | ||
4e642ccb | 884 | static void kvmppc_fill_pt_regs(struct pt_regs *regs) |
bbf45ba5 | 885 | { |
4e642ccb | 886 | ulong r1, ip, msr, lr; |
bbf45ba5 | 887 | |
4e642ccb AG |
888 | asm("mr %0, 1" : "=r"(r1)); |
889 | asm("mflr %0" : "=r"(lr)); | |
890 | asm("mfmsr %0" : "=r"(msr)); | |
891 | asm("bl 1f; 1: mflr %0" : "=r"(ip)); | |
892 | ||
893 | memset(regs, 0, sizeof(*regs)); | |
894 | regs->gpr[1] = r1; | |
895 | regs->nip = ip; | |
896 | regs->msr = msr; | |
897 | regs->link = lr; | |
898 | } | |
899 | ||
6328e593 BB |
900 | /* |
901 | * For interrupts needed to be handled by host interrupt handlers, | |
902 | * corresponding host handler are called from here in similar way | |
903 | * (but not exact) as they are called from low level handler | |
904 | * (such as from arch/powerpc/kernel/head_fsl_booke.S). | |
905 | */ | |
4e642ccb AG |
906 | static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu, |
907 | unsigned int exit_nr) | |
908 | { | |
909 | struct pt_regs regs; | |
73e75b41 | 910 | |
d30f6e48 SW |
911 | switch (exit_nr) { |
912 | case BOOKE_INTERRUPT_EXTERNAL: | |
4e642ccb AG |
913 | kvmppc_fill_pt_regs(®s); |
914 | do_IRQ(®s); | |
d30f6e48 | 915 | break; |
d30f6e48 | 916 | case BOOKE_INTERRUPT_DECREMENTER: |
4e642ccb AG |
917 | kvmppc_fill_pt_regs(®s); |
918 | timer_interrupt(®s); | |
d30f6e48 | 919 | break; |
5f17ce8b | 920 | #if defined(CONFIG_PPC_DOORBELL) |
d30f6e48 | 921 | case BOOKE_INTERRUPT_DOORBELL: |
4e642ccb AG |
922 | kvmppc_fill_pt_regs(®s); |
923 | doorbell_exception(®s); | |
d30f6e48 SW |
924 | break; |
925 | #endif | |
926 | case BOOKE_INTERRUPT_MACHINE_CHECK: | |
927 | /* FIXME */ | |
928 | break; | |
7cc1e8ee AG |
929 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: |
930 | kvmppc_fill_pt_regs(®s); | |
931 | performance_monitor_exception(®s); | |
932 | break; | |
6328e593 BB |
933 | case BOOKE_INTERRUPT_WATCHDOG: |
934 | kvmppc_fill_pt_regs(®s); | |
935 | #ifdef CONFIG_BOOKE_WDT | |
936 | WatchdogException(®s); | |
937 | #else | |
938 | unknown_exception(®s); | |
939 | #endif | |
940 | break; | |
941 | case BOOKE_INTERRUPT_CRITICAL: | |
845ac985 | 942 | kvmppc_fill_pt_regs(®s); |
6328e593 BB |
943 | unknown_exception(®s); |
944 | break; | |
ce11e48b BB |
945 | case BOOKE_INTERRUPT_DEBUG: |
946 | /* Save DBSR before preemption is enabled */ | |
947 | vcpu->arch.dbsr = mfspr(SPRN_DBSR); | |
948 | kvmppc_clear_dbsr(); | |
949 | break; | |
d30f6e48 | 950 | } |
4e642ccb AG |
951 | } |
952 | ||
f5250471 MC |
953 | static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu, |
954 | enum emulation_result emulated, u32 last_inst) | |
955 | { | |
956 | switch (emulated) { | |
957 | case EMULATE_AGAIN: | |
958 | return RESUME_GUEST; | |
959 | ||
960 | case EMULATE_FAIL: | |
961 | pr_debug("%s: load instruction from guest address %lx failed\n", | |
962 | __func__, vcpu->arch.pc); | |
963 | /* For debugging, encode the failing instruction and | |
964 | * report it to userspace. */ | |
965 | run->hw.hardware_exit_reason = ~0ULL << 32; | |
966 | run->hw.hardware_exit_reason |= last_inst; | |
967 | kvmppc_core_queue_program(vcpu, ESR_PIL); | |
968 | return RESUME_HOST; | |
969 | ||
970 | default: | |
971 | BUG(); | |
972 | } | |
973 | } | |
974 | ||
4e642ccb AG |
975 | /** |
976 | * kvmppc_handle_exit | |
977 | * | |
978 | * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV) | |
979 | */ | |
980 | int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
981 | unsigned int exit_nr) | |
982 | { | |
983 | int r = RESUME_HOST; | |
7ee78855 | 984 | int s; |
f1e89028 | 985 | int idx; |
f5250471 MC |
986 | u32 last_inst = KVM_INST_FETCH_FAILED; |
987 | enum emulation_result emulated = EMULATE_DONE; | |
4e642ccb AG |
988 | |
989 | /* update before a new last_exit_type is rewritten */ | |
990 | kvmppc_update_timing_stats(vcpu); | |
991 | ||
992 | /* restart interrupts if they were meant for the host */ | |
993 | kvmppc_restart_interrupt(vcpu, exit_nr); | |
d30f6e48 | 994 | |
f5250471 | 995 | /* |
446957ba | 996 | * get last instruction before being preempted |
f5250471 MC |
997 | * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA |
998 | */ | |
999 | switch (exit_nr) { | |
1000 | case BOOKE_INTERRUPT_DATA_STORAGE: | |
1001 | case BOOKE_INTERRUPT_DTLB_MISS: | |
1002 | case BOOKE_INTERRUPT_HV_PRIV: | |
8d0eff63 | 1003 | emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
f5250471 | 1004 | break; |
033aaa14 MS |
1005 | case BOOKE_INTERRUPT_PROGRAM: |
1006 | /* SW breakpoints arrive as illegal instructions on HV */ | |
1007 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) | |
8d0eff63 | 1008 | emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst); |
033aaa14 | 1009 | break; |
f5250471 MC |
1010 | default: |
1011 | break; | |
1012 | } | |
1013 | ||
97c95059 | 1014 | trace_kvm_exit(exit_nr, vcpu); |
6edaa530 | 1015 | guest_exit_irqoff(); |
e233d54d PB |
1016 | |
1017 | local_irq_enable(); | |
97c95059 | 1018 | |
bbf45ba5 HB |
1019 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1020 | run->ready_for_interrupt_injection = 1; | |
1021 | ||
f5250471 MC |
1022 | if (emulated != EMULATE_DONE) { |
1023 | r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst); | |
1024 | goto out; | |
1025 | } | |
1026 | ||
bbf45ba5 HB |
1027 | switch (exit_nr) { |
1028 | case BOOKE_INTERRUPT_MACHINE_CHECK: | |
c35c9d84 AG |
1029 | printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR)); |
1030 | kvmppc_dump_vcpu(vcpu); | |
1031 | /* For debugging, send invalid exit reason to user space */ | |
1032 | run->hw.hardware_exit_reason = ~1ULL << 32; | |
1033 | run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR); | |
1034 | r = RESUME_HOST; | |
bbf45ba5 HB |
1035 | break; |
1036 | ||
1037 | case BOOKE_INTERRUPT_EXTERNAL: | |
7b701591 | 1038 | kvmppc_account_exit(vcpu, EXT_INTR_EXITS); |
1b6766c7 HB |
1039 | r = RESUME_GUEST; |
1040 | break; | |
1041 | ||
bbf45ba5 | 1042 | case BOOKE_INTERRUPT_DECREMENTER: |
7b701591 | 1043 | kvmppc_account_exit(vcpu, DEC_EXITS); |
bbf45ba5 HB |
1044 | r = RESUME_GUEST; |
1045 | break; | |
1046 | ||
6328e593 BB |
1047 | case BOOKE_INTERRUPT_WATCHDOG: |
1048 | r = RESUME_GUEST; | |
1049 | break; | |
1050 | ||
d30f6e48 SW |
1051 | case BOOKE_INTERRUPT_DOORBELL: |
1052 | kvmppc_account_exit(vcpu, DBELL_EXITS); | |
d30f6e48 SW |
1053 | r = RESUME_GUEST; |
1054 | break; | |
1055 | ||
1056 | case BOOKE_INTERRUPT_GUEST_DBELL_CRIT: | |
1057 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | |
1058 | ||
1059 | /* | |
1060 | * We are here because there is a pending guest interrupt | |
1061 | * which could not be delivered as MSR_CE or MSR_ME was not | |
1062 | * set. Once we break from here we will retry delivery. | |
1063 | */ | |
1064 | r = RESUME_GUEST; | |
1065 | break; | |
1066 | ||
1067 | case BOOKE_INTERRUPT_GUEST_DBELL: | |
1068 | kvmppc_account_exit(vcpu, GDBELL_EXITS); | |
1069 | ||
1070 | /* | |
1071 | * We are here because there is a pending guest interrupt | |
1072 | * which could not be delivered as MSR_EE was not set. Once | |
1073 | * we break from here we will retry delivery. | |
1074 | */ | |
1075 | r = RESUME_GUEST; | |
1076 | break; | |
1077 | ||
95f2e921 AG |
1078 | case BOOKE_INTERRUPT_PERFORMANCE_MONITOR: |
1079 | r = RESUME_GUEST; | |
1080 | break; | |
1081 | ||
d30f6e48 SW |
1082 | case BOOKE_INTERRUPT_HV_PRIV: |
1083 | r = emulation_exit(run, vcpu); | |
1084 | break; | |
1085 | ||
bbf45ba5 | 1086 | case BOOKE_INTERRUPT_PROGRAM: |
033aaa14 MS |
1087 | if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) && |
1088 | (last_inst == KVMPPC_INST_SW_BREAKPOINT)) { | |
1089 | /* | |
1090 | * We are here because of an SW breakpoint instr, | |
1091 | * so lets return to host to handle. | |
1092 | */ | |
1093 | r = kvmppc_handle_debug(run, vcpu); | |
1094 | run->exit_reason = KVM_EXIT_DEBUG; | |
1095 | kvmppc_account_exit(vcpu, DEBUG_EXITS); | |
1096 | break; | |
1097 | } | |
1098 | ||
d30f6e48 | 1099 | if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) { |
0268597c AG |
1100 | /* |
1101 | * Program traps generated by user-level software must | |
1102 | * be handled by the guest kernel. | |
1103 | * | |
1104 | * In GS mode, hypervisor privileged instructions trap | |
1105 | * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are | |
1106 | * actual program interrupts, handled by the guest. | |
1107 | */ | |
daf5e271 | 1108 | kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr); |
bbf45ba5 | 1109 | r = RESUME_GUEST; |
7b701591 | 1110 | kvmppc_account_exit(vcpu, USR_PR_INST); |
bbf45ba5 HB |
1111 | break; |
1112 | } | |
1113 | ||
d30f6e48 | 1114 | r = emulation_exit(run, vcpu); |
bbf45ba5 HB |
1115 | break; |
1116 | ||
de368dce | 1117 | case BOOKE_INTERRUPT_FP_UNAVAIL: |
d4cf3892 | 1118 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL); |
7b701591 | 1119 | kvmppc_account_exit(vcpu, FP_UNAVAIL); |
de368dce CE |
1120 | r = RESUME_GUEST; |
1121 | break; | |
1122 | ||
4cd35f67 SW |
1123 | #ifdef CONFIG_SPE |
1124 | case BOOKE_INTERRUPT_SPE_UNAVAIL: { | |
1125 | if (vcpu->arch.shared->msr & MSR_SPE) | |
1126 | kvmppc_vcpu_enable_spe(vcpu); | |
1127 | else | |
1128 | kvmppc_booke_queue_irqprio(vcpu, | |
1129 | BOOKE_IRQPRIO_SPE_UNAVAIL); | |
bb3a8a17 HB |
1130 | r = RESUME_GUEST; |
1131 | break; | |
4cd35f67 | 1132 | } |
bb3a8a17 HB |
1133 | |
1134 | case BOOKE_INTERRUPT_SPE_FP_DATA: | |
1135 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA); | |
1136 | r = RESUME_GUEST; | |
1137 | break; | |
1138 | ||
1139 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | |
1140 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND); | |
1141 | r = RESUME_GUEST; | |
1142 | break; | |
95d80a29 | 1143 | #elif defined(CONFIG_SPE_POSSIBLE) |
4cd35f67 SW |
1144 | case BOOKE_INTERRUPT_SPE_UNAVAIL: |
1145 | /* | |
1146 | * Guest wants SPE, but host kernel doesn't support it. Send | |
1147 | * an "unimplemented operation" program check to the guest. | |
1148 | */ | |
1149 | kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV); | |
1150 | r = RESUME_GUEST; | |
1151 | break; | |
1152 | ||
1153 | /* | |
1154 | * These really should never happen without CONFIG_SPE, | |
1155 | * as we should never enable the real MSR[SPE] in the guest. | |
1156 | */ | |
1157 | case BOOKE_INTERRUPT_SPE_FP_DATA: | |
1158 | case BOOKE_INTERRUPT_SPE_FP_ROUND: | |
1159 | printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n", | |
1160 | __func__, exit_nr, vcpu->arch.pc); | |
1161 | run->hw.hardware_exit_reason = exit_nr; | |
1162 | r = RESUME_HOST; | |
1163 | break; | |
95d80a29 MC |
1164 | #endif /* CONFIG_SPE_POSSIBLE */ |
1165 | ||
1166 | /* | |
1167 | * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC, | |
1168 | * see kvmppc_core_check_processor_compat(). | |
1169 | */ | |
1170 | #ifdef CONFIG_ALTIVEC | |
1171 | case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL: | |
1172 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL); | |
1173 | r = RESUME_GUEST; | |
1174 | break; | |
1175 | ||
1176 | case BOOKE_INTERRUPT_ALTIVEC_ASSIST: | |
1177 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST); | |
1178 | r = RESUME_GUEST; | |
1179 | break; | |
4cd35f67 | 1180 | #endif |
bb3a8a17 | 1181 | |
bbf45ba5 | 1182 | case BOOKE_INTERRUPT_DATA_STORAGE: |
daf5e271 LY |
1183 | kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear, |
1184 | vcpu->arch.fault_esr); | |
7b701591 | 1185 | kvmppc_account_exit(vcpu, DSI_EXITS); |
bbf45ba5 HB |
1186 | r = RESUME_GUEST; |
1187 | break; | |
1188 | ||
1189 | case BOOKE_INTERRUPT_INST_STORAGE: | |
daf5e271 | 1190 | kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr); |
7b701591 | 1191 | kvmppc_account_exit(vcpu, ISI_EXITS); |
bbf45ba5 HB |
1192 | r = RESUME_GUEST; |
1193 | break; | |
1194 | ||
011da899 AG |
1195 | case BOOKE_INTERRUPT_ALIGNMENT: |
1196 | kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear, | |
1197 | vcpu->arch.fault_esr); | |
1198 | r = RESUME_GUEST; | |
1199 | break; | |
1200 | ||
d30f6e48 SW |
1201 | #ifdef CONFIG_KVM_BOOKE_HV |
1202 | case BOOKE_INTERRUPT_HV_SYSCALL: | |
1203 | if (!(vcpu->arch.shared->msr & MSR_PR)) { | |
1204 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
1205 | } else { | |
1206 | /* | |
1207 | * hcall from guest userspace -- send privileged | |
1208 | * instruction program check. | |
1209 | */ | |
1210 | kvmppc_core_queue_program(vcpu, ESR_PPR); | |
1211 | } | |
1212 | ||
1213 | r = RESUME_GUEST; | |
1214 | break; | |
1215 | #else | |
bbf45ba5 | 1216 | case BOOKE_INTERRUPT_SYSCALL: |
2a342ed5 AG |
1217 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
1218 | (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) { | |
1219 | /* KVM PV hypercalls */ | |
1220 | kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu)); | |
1221 | r = RESUME_GUEST; | |
1222 | } else { | |
1223 | /* Guest syscalls */ | |
1224 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL); | |
1225 | } | |
7b701591 | 1226 | kvmppc_account_exit(vcpu, SYSCALL_EXITS); |
bbf45ba5 HB |
1227 | r = RESUME_GUEST; |
1228 | break; | |
d30f6e48 | 1229 | #endif |
bbf45ba5 HB |
1230 | |
1231 | case BOOKE_INTERRUPT_DTLB_MISS: { | |
bbf45ba5 | 1232 | unsigned long eaddr = vcpu->arch.fault_dear; |
7924bd41 | 1233 | int gtlb_index; |
475e7cdd | 1234 | gpa_t gpaddr; |
bbf45ba5 HB |
1235 | gfn_t gfn; |
1236 | ||
bf7ca4bd | 1237 | #ifdef CONFIG_KVM_E500V2 |
a4cd8b23 SW |
1238 | if (!(vcpu->arch.shared->msr & MSR_PR) && |
1239 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | |
1240 | kvmppc_map_magic(vcpu); | |
1241 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); | |
1242 | r = RESUME_GUEST; | |
1243 | ||
1244 | break; | |
1245 | } | |
1246 | #endif | |
1247 | ||
bbf45ba5 | 1248 | /* Check the guest TLB. */ |
fa86b8dd | 1249 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); |
7924bd41 | 1250 | if (gtlb_index < 0) { |
bbf45ba5 | 1251 | /* The guest didn't have a mapping for it. */ |
daf5e271 LY |
1252 | kvmppc_core_queue_dtlb_miss(vcpu, |
1253 | vcpu->arch.fault_dear, | |
1254 | vcpu->arch.fault_esr); | |
b52a638c | 1255 | kvmppc_mmu_dtlb_miss(vcpu); |
7b701591 | 1256 | kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS); |
bbf45ba5 HB |
1257 | r = RESUME_GUEST; |
1258 | break; | |
1259 | } | |
1260 | ||
f1e89028 SW |
1261 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
1262 | ||
be8d1cae | 1263 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
475e7cdd | 1264 | gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba5 HB |
1265 | |
1266 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
1267 | /* The guest TLB had a mapping, but the shadow TLB | |
1268 | * didn't, and it is RAM. This could be because: | |
1269 | * a) the entry is mapping the host kernel, or | |
1270 | * b) the guest used a large mapping which we're faking | |
1271 | * Either way, we need to satisfy the fault without | |
1272 | * invoking the guest. */ | |
58a96214 | 1273 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
7b701591 | 1274 | kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS); |
bbf45ba5 HB |
1275 | r = RESUME_GUEST; |
1276 | } else { | |
1277 | /* Guest has mapped and accessed a page which is not | |
1278 | * actually RAM. */ | |
475e7cdd | 1279 | vcpu->arch.paddr_accessed = gpaddr; |
6020c0f6 | 1280 | vcpu->arch.vaddr_accessed = eaddr; |
bbf45ba5 | 1281 | r = kvmppc_emulate_mmio(run, vcpu); |
7b701591 | 1282 | kvmppc_account_exit(vcpu, MMIO_EXITS); |
bbf45ba5 HB |
1283 | } |
1284 | ||
f1e89028 | 1285 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
bbf45ba5 HB |
1286 | break; |
1287 | } | |
1288 | ||
1289 | case BOOKE_INTERRUPT_ITLB_MISS: { | |
bbf45ba5 | 1290 | unsigned long eaddr = vcpu->arch.pc; |
89168618 | 1291 | gpa_t gpaddr; |
bbf45ba5 | 1292 | gfn_t gfn; |
7924bd41 | 1293 | int gtlb_index; |
bbf45ba5 HB |
1294 | |
1295 | r = RESUME_GUEST; | |
1296 | ||
1297 | /* Check the guest TLB. */ | |
fa86b8dd | 1298 | gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); |
7924bd41 | 1299 | if (gtlb_index < 0) { |
bbf45ba5 | 1300 | /* The guest didn't have a mapping for it. */ |
d4cf3892 | 1301 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS); |
b52a638c | 1302 | kvmppc_mmu_itlb_miss(vcpu); |
7b701591 | 1303 | kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS); |
bbf45ba5 HB |
1304 | break; |
1305 | } | |
1306 | ||
7b701591 | 1307 | kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS); |
bbf45ba5 | 1308 | |
f1e89028 SW |
1309 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
1310 | ||
be8d1cae | 1311 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); |
89168618 | 1312 | gfn = gpaddr >> PAGE_SHIFT; |
bbf45ba5 HB |
1313 | |
1314 | if (kvm_is_visible_gfn(vcpu->kvm, gfn)) { | |
1315 | /* The guest TLB had a mapping, but the shadow TLB | |
1316 | * didn't. This could be because: | |
1317 | * a) the entry is mapping the host kernel, or | |
1318 | * b) the guest used a large mapping which we're faking | |
1319 | * Either way, we need to satisfy the fault without | |
1320 | * invoking the guest. */ | |
58a96214 | 1321 | kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index); |
bbf45ba5 HB |
1322 | } else { |
1323 | /* Guest mapped and leaped at non-RAM! */ | |
d4cf3892 | 1324 | kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK); |
bbf45ba5 HB |
1325 | } |
1326 | ||
f1e89028 | 1327 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
bbf45ba5 HB |
1328 | break; |
1329 | } | |
1330 | ||
6a0ab738 | 1331 | case BOOKE_INTERRUPT_DEBUG: { |
ce11e48b BB |
1332 | r = kvmppc_handle_debug(run, vcpu); |
1333 | if (r == RESUME_HOST) | |
1334 | run->exit_reason = KVM_EXIT_DEBUG; | |
7b701591 | 1335 | kvmppc_account_exit(vcpu, DEBUG_EXITS); |
6a0ab738 HB |
1336 | break; |
1337 | } | |
1338 | ||
bbf45ba5 HB |
1339 | default: |
1340 | printk(KERN_EMERG "exit_nr %d\n", exit_nr); | |
1341 | BUG(); | |
1342 | } | |
1343 | ||
f5250471 | 1344 | out: |
a8e4ef84 AG |
1345 | /* |
1346 | * To avoid clobbering exit_reason, only check for signals if we | |
1347 | * aren't already exiting to userspace for some other reason. | |
1348 | */ | |
03660ba2 | 1349 | if (!(r & RESUME_HOST)) { |
7ee78855 | 1350 | s = kvmppc_prepare_to_enter(vcpu); |
6c85f52b | 1351 | if (s <= 0) |
7ee78855 | 1352 | r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV); |
6c85f52b SW |
1353 | else { |
1354 | /* interrupts now hard-disabled */ | |
5f1c248f | 1355 | kvmppc_fix_ee_before_entry(); |
3efc7da6 | 1356 | kvmppc_load_guest_fp(vcpu); |
95d80a29 | 1357 | kvmppc_load_guest_altivec(vcpu); |
03660ba2 | 1358 | } |
bbf45ba5 HB |
1359 | } |
1360 | ||
1361 | return r; | |
1362 | } | |
1363 | ||
d26f22c9 BB |
1364 | static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr) |
1365 | { | |
1366 | u32 old_tsr = vcpu->arch.tsr; | |
1367 | ||
1368 | vcpu->arch.tsr = new_tsr; | |
1369 | ||
1370 | if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS)) | |
1371 | arm_next_watchdog(vcpu); | |
1372 | ||
1373 | update_timer_ints(vcpu); | |
1374 | } | |
1375 | ||
bbf45ba5 HB |
1376 | /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ |
1377 | int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) | |
1378 | { | |
082decf2 | 1379 | int i; |
af8f38b3 | 1380 | int r; |
082decf2 | 1381 | |
bbf45ba5 | 1382 | vcpu->arch.pc = 0; |
b5904972 | 1383 | vcpu->arch.shared->pir = vcpu->vcpu_id; |
8e5b26b5 | 1384 | kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ |
d30f6e48 | 1385 | kvmppc_set_msr(vcpu, 0); |
bbf45ba5 | 1386 | |
d30f6e48 | 1387 | #ifndef CONFIG_KVM_BOOKE_HV |
ce11e48b | 1388 | vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS; |
49dd2c49 | 1389 | vcpu->arch.shadow_pid = 1; |
d30f6e48 SW |
1390 | vcpu->arch.shared->msr = 0; |
1391 | #endif | |
49dd2c49 | 1392 | |
082decf2 HB |
1393 | /* Eye-catching numbers so we know if the guest takes an interrupt |
1394 | * before it's programmed its own IVPR/IVORs. */ | |
bbf45ba5 | 1395 | vcpu->arch.ivpr = 0x55550000; |
082decf2 HB |
1396 | for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) |
1397 | vcpu->arch.ivor[i] = 0x7700 | i * 4; | |
bbf45ba5 | 1398 | |
73e75b41 HB |
1399 | kvmppc_init_timing_stats(vcpu); |
1400 | ||
af8f38b3 AG |
1401 | r = kvmppc_core_vcpu_setup(vcpu); |
1402 | kvmppc_sanity_check(vcpu); | |
1403 | return r; | |
bbf45ba5 HB |
1404 | } |
1405 | ||
f61c94bb BB |
1406 | int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu) |
1407 | { | |
1408 | /* setup watchdog timer once */ | |
1409 | spin_lock_init(&vcpu->arch.wdt_lock); | |
1410 | setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, | |
1411 | (unsigned long)vcpu); | |
1412 | ||
2f699a59 BB |
1413 | /* |
1414 | * Clear DBSR.MRR to avoid guest debug interrupt as | |
1415 | * this is of host interest | |
1416 | */ | |
1417 | mtspr(SPRN_DBSR, DBSR_MRR); | |
f61c94bb BB |
1418 | return 0; |
1419 | } | |
1420 | ||
1421 | void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu) | |
1422 | { | |
1423 | del_timer_sync(&vcpu->arch.wdt_timer); | |
1424 | } | |
1425 | ||
bbf45ba5 HB |
1426 | int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) |
1427 | { | |
1428 | int i; | |
1429 | ||
1430 | regs->pc = vcpu->arch.pc; | |
992b5b29 | 1431 | regs->cr = kvmppc_get_cr(vcpu); |
bbf45ba5 HB |
1432 | regs->ctr = vcpu->arch.ctr; |
1433 | regs->lr = vcpu->arch.lr; | |
992b5b29 | 1434 | regs->xer = kvmppc_get_xer(vcpu); |
666e7252 | 1435 | regs->msr = vcpu->arch.shared->msr; |
31579eea BB |
1436 | regs->srr0 = kvmppc_get_srr0(vcpu); |
1437 | regs->srr1 = kvmppc_get_srr1(vcpu); | |
bbf45ba5 | 1438 | regs->pid = vcpu->arch.pid; |
c1b8a01b BB |
1439 | regs->sprg0 = kvmppc_get_sprg0(vcpu); |
1440 | regs->sprg1 = kvmppc_get_sprg1(vcpu); | |
1441 | regs->sprg2 = kvmppc_get_sprg2(vcpu); | |
1442 | regs->sprg3 = kvmppc_get_sprg3(vcpu); | |
1443 | regs->sprg4 = kvmppc_get_sprg4(vcpu); | |
1444 | regs->sprg5 = kvmppc_get_sprg5(vcpu); | |
1445 | regs->sprg6 = kvmppc_get_sprg6(vcpu); | |
1446 | regs->sprg7 = kvmppc_get_sprg7(vcpu); | |
bbf45ba5 HB |
1447 | |
1448 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) | |
8e5b26b5 | 1449 | regs->gpr[i] = kvmppc_get_gpr(vcpu, i); |
bbf45ba5 HB |
1450 | |
1451 | return 0; | |
1452 | } | |
1453 | ||
1454 | int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) | |
1455 | { | |
1456 | int i; | |
1457 | ||
1458 | vcpu->arch.pc = regs->pc; | |
992b5b29 | 1459 | kvmppc_set_cr(vcpu, regs->cr); |
bbf45ba5 HB |
1460 | vcpu->arch.ctr = regs->ctr; |
1461 | vcpu->arch.lr = regs->lr; | |
992b5b29 | 1462 | kvmppc_set_xer(vcpu, regs->xer); |
b8fd68ac | 1463 | kvmppc_set_msr(vcpu, regs->msr); |
31579eea BB |
1464 | kvmppc_set_srr0(vcpu, regs->srr0); |
1465 | kvmppc_set_srr1(vcpu, regs->srr1); | |
5ce941ee | 1466 | kvmppc_set_pid(vcpu, regs->pid); |
c1b8a01b BB |
1467 | kvmppc_set_sprg0(vcpu, regs->sprg0); |
1468 | kvmppc_set_sprg1(vcpu, regs->sprg1); | |
1469 | kvmppc_set_sprg2(vcpu, regs->sprg2); | |
1470 | kvmppc_set_sprg3(vcpu, regs->sprg3); | |
1471 | kvmppc_set_sprg4(vcpu, regs->sprg4); | |
1472 | kvmppc_set_sprg5(vcpu, regs->sprg5); | |
1473 | kvmppc_set_sprg6(vcpu, regs->sprg6); | |
1474 | kvmppc_set_sprg7(vcpu, regs->sprg7); | |
bbf45ba5 | 1475 | |
8e5b26b5 AG |
1476 | for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) |
1477 | kvmppc_set_gpr(vcpu, i, regs->gpr[i]); | |
bbf45ba5 HB |
1478 | |
1479 | return 0; | |
1480 | } | |
1481 | ||
5ce941ee SW |
1482 | static void get_sregs_base(struct kvm_vcpu *vcpu, |
1483 | struct kvm_sregs *sregs) | |
1484 | { | |
1485 | u64 tb = get_tb(); | |
1486 | ||
1487 | sregs->u.e.features |= KVM_SREGS_E_BASE; | |
1488 | ||
1489 | sregs->u.e.csrr0 = vcpu->arch.csrr0; | |
1490 | sregs->u.e.csrr1 = vcpu->arch.csrr1; | |
1491 | sregs->u.e.mcsr = vcpu->arch.mcsr; | |
dc168549 | 1492 | sregs->u.e.esr = kvmppc_get_esr(vcpu); |
a5414d4b | 1493 | sregs->u.e.dear = kvmppc_get_dar(vcpu); |
5ce941ee SW |
1494 | sregs->u.e.tsr = vcpu->arch.tsr; |
1495 | sregs->u.e.tcr = vcpu->arch.tcr; | |
1496 | sregs->u.e.dec = kvmppc_get_dec(vcpu, tb); | |
1497 | sregs->u.e.tb = tb; | |
1498 | sregs->u.e.vrsave = vcpu->arch.vrsave; | |
1499 | } | |
1500 | ||
1501 | static int set_sregs_base(struct kvm_vcpu *vcpu, | |
1502 | struct kvm_sregs *sregs) | |
1503 | { | |
1504 | if (!(sregs->u.e.features & KVM_SREGS_E_BASE)) | |
1505 | return 0; | |
1506 | ||
1507 | vcpu->arch.csrr0 = sregs->u.e.csrr0; | |
1508 | vcpu->arch.csrr1 = sregs->u.e.csrr1; | |
1509 | vcpu->arch.mcsr = sregs->u.e.mcsr; | |
dc168549 | 1510 | kvmppc_set_esr(vcpu, sregs->u.e.esr); |
a5414d4b | 1511 | kvmppc_set_dar(vcpu, sregs->u.e.dear); |
5ce941ee | 1512 | vcpu->arch.vrsave = sregs->u.e.vrsave; |
dfd4d47e | 1513 | kvmppc_set_tcr(vcpu, sregs->u.e.tcr); |
5ce941ee | 1514 | |
dfd4d47e | 1515 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) { |
5ce941ee | 1516 | vcpu->arch.dec = sregs->u.e.dec; |
dfd4d47e SW |
1517 | kvmppc_emulate_dec(vcpu); |
1518 | } | |
5ce941ee | 1519 | |
d26f22c9 BB |
1520 | if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) |
1521 | kvmppc_set_tsr(vcpu, sregs->u.e.tsr); | |
5ce941ee SW |
1522 | |
1523 | return 0; | |
1524 | } | |
1525 | ||
1526 | static void get_sregs_arch206(struct kvm_vcpu *vcpu, | |
1527 | struct kvm_sregs *sregs) | |
1528 | { | |
1529 | sregs->u.e.features |= KVM_SREGS_E_ARCH206; | |
1530 | ||
841741f2 | 1531 | sregs->u.e.pir = vcpu->vcpu_id; |
5ce941ee SW |
1532 | sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0; |
1533 | sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1; | |
1534 | sregs->u.e.decar = vcpu->arch.decar; | |
1535 | sregs->u.e.ivpr = vcpu->arch.ivpr; | |
1536 | } | |
1537 | ||
1538 | static int set_sregs_arch206(struct kvm_vcpu *vcpu, | |
1539 | struct kvm_sregs *sregs) | |
1540 | { | |
1541 | if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206)) | |
1542 | return 0; | |
1543 | ||
841741f2 | 1544 | if (sregs->u.e.pir != vcpu->vcpu_id) |
5ce941ee SW |
1545 | return -EINVAL; |
1546 | ||
1547 | vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0; | |
1548 | vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1; | |
1549 | vcpu->arch.decar = sregs->u.e.decar; | |
1550 | vcpu->arch.ivpr = sregs->u.e.ivpr; | |
1551 | ||
1552 | return 0; | |
1553 | } | |
1554 | ||
3a167bea | 1555 | int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) |
5ce941ee SW |
1556 | { |
1557 | sregs->u.e.features |= KVM_SREGS_E_IVOR; | |
1558 | ||
1559 | sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]; | |
1560 | sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]; | |
1561 | sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]; | |
1562 | sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]; | |
1563 | sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]; | |
1564 | sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]; | |
1565 | sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]; | |
1566 | sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]; | |
1567 | sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]; | |
1568 | sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]; | |
1569 | sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]; | |
1570 | sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]; | |
1571 | sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]; | |
1572 | sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]; | |
1573 | sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]; | |
1574 | sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]; | |
3a167bea | 1575 | return 0; |
5ce941ee SW |
1576 | } |
1577 | ||
1578 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs) | |
1579 | { | |
1580 | if (!(sregs->u.e.features & KVM_SREGS_E_IVOR)) | |
1581 | return 0; | |
1582 | ||
1583 | vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0]; | |
1584 | vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1]; | |
1585 | vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2]; | |
1586 | vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3]; | |
1587 | vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4]; | |
1588 | vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5]; | |
1589 | vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6]; | |
1590 | vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7]; | |
1591 | vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8]; | |
1592 | vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9]; | |
1593 | vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10]; | |
1594 | vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11]; | |
1595 | vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12]; | |
1596 | vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13]; | |
1597 | vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14]; | |
1598 | vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15]; | |
1599 | ||
1600 | return 0; | |
1601 | } | |
1602 | ||
bbf45ba5 HB |
1603 | int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu, |
1604 | struct kvm_sregs *sregs) | |
1605 | { | |
5ce941ee SW |
1606 | sregs->pvr = vcpu->arch.pvr; |
1607 | ||
1608 | get_sregs_base(vcpu, sregs); | |
1609 | get_sregs_arch206(vcpu, sregs); | |
cbbc58d4 | 1610 | return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs); |
bbf45ba5 HB |
1611 | } |
1612 | ||
1613 | int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu, | |
1614 | struct kvm_sregs *sregs) | |
1615 | { | |
5ce941ee SW |
1616 | int ret; |
1617 | ||
1618 | if (vcpu->arch.pvr != sregs->pvr) | |
1619 | return -EINVAL; | |
1620 | ||
1621 | ret = set_sregs_base(vcpu, sregs); | |
1622 | if (ret < 0) | |
1623 | return ret; | |
1624 | ||
1625 | ret = set_sregs_arch206(vcpu, sregs); | |
1626 | if (ret < 0) | |
1627 | return ret; | |
1628 | ||
cbbc58d4 | 1629 | return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs); |
bbf45ba5 HB |
1630 | } |
1631 | ||
8a41ea53 MC |
1632 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, |
1633 | union kvmppc_one_reg *val) | |
31f3438e | 1634 | { |
35b299e2 | 1635 | int r = 0; |
35b299e2 | 1636 | |
8a41ea53 | 1637 | switch (id) { |
6df8d3fc | 1638 | case KVM_REG_PPC_IAC1: |
8a41ea53 | 1639 | *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1); |
547465ef | 1640 | break; |
6df8d3fc | 1641 | case KVM_REG_PPC_IAC2: |
8a41ea53 | 1642 | *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2); |
547465ef BB |
1643 | break; |
1644 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | |
6df8d3fc | 1645 | case KVM_REG_PPC_IAC3: |
8a41ea53 | 1646 | *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3); |
547465ef | 1647 | break; |
35b299e2 | 1648 | case KVM_REG_PPC_IAC4: |
8a41ea53 | 1649 | *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4); |
6df8d3fc | 1650 | break; |
547465ef | 1651 | #endif |
6df8d3fc | 1652 | case KVM_REG_PPC_DAC1: |
8a41ea53 | 1653 | *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1); |
547465ef | 1654 | break; |
35b299e2 | 1655 | case KVM_REG_PPC_DAC2: |
8a41ea53 | 1656 | *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2); |
2c509672 | 1657 | break; |
324b3e63 | 1658 | case KVM_REG_PPC_EPR: { |
34f754b9 | 1659 | u32 epr = kvmppc_get_epr(vcpu); |
8a41ea53 | 1660 | *val = get_reg_val(id, epr); |
324b3e63 AG |
1661 | break; |
1662 | } | |
352df1de MC |
1663 | #if defined(CONFIG_64BIT) |
1664 | case KVM_REG_PPC_EPCR: | |
8a41ea53 | 1665 | *val = get_reg_val(id, vcpu->arch.epcr); |
352df1de MC |
1666 | break; |
1667 | #endif | |
78accda4 | 1668 | case KVM_REG_PPC_TCR: |
8a41ea53 | 1669 | *val = get_reg_val(id, vcpu->arch.tcr); |
78accda4 BB |
1670 | break; |
1671 | case KVM_REG_PPC_TSR: | |
8a41ea53 | 1672 | *val = get_reg_val(id, vcpu->arch.tsr); |
78accda4 | 1673 | break; |
35b299e2 | 1674 | case KVM_REG_PPC_DEBUG_INST: |
033aaa14 | 1675 | *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); |
8c32a2ea | 1676 | break; |
8b75cbbe | 1677 | case KVM_REG_PPC_VRSAVE: |
8a41ea53 | 1678 | *val = get_reg_val(id, vcpu->arch.vrsave); |
8c32a2ea | 1679 | break; |
6df8d3fc | 1680 | default: |
8a41ea53 | 1681 | r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val); |
6df8d3fc BB |
1682 | break; |
1683 | } | |
35b299e2 | 1684 | |
6df8d3fc | 1685 | return r; |
31f3438e PM |
1686 | } |
1687 | ||
8a41ea53 MC |
1688 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, |
1689 | union kvmppc_one_reg *val) | |
31f3438e | 1690 | { |
35b299e2 | 1691 | int r = 0; |
35b299e2 | 1692 | |
8a41ea53 | 1693 | switch (id) { |
6df8d3fc | 1694 | case KVM_REG_PPC_IAC1: |
8a41ea53 | 1695 | vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val); |
547465ef | 1696 | break; |
6df8d3fc | 1697 | case KVM_REG_PPC_IAC2: |
8a41ea53 | 1698 | vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val); |
547465ef BB |
1699 | break; |
1700 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | |
6df8d3fc | 1701 | case KVM_REG_PPC_IAC3: |
8a41ea53 | 1702 | vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val); |
547465ef | 1703 | break; |
35b299e2 | 1704 | case KVM_REG_PPC_IAC4: |
8a41ea53 | 1705 | vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val); |
6df8d3fc | 1706 | break; |
547465ef | 1707 | #endif |
6df8d3fc | 1708 | case KVM_REG_PPC_DAC1: |
8a41ea53 | 1709 | vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val); |
547465ef | 1710 | break; |
35b299e2 | 1711 | case KVM_REG_PPC_DAC2: |
8a41ea53 | 1712 | vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val); |
2c509672 | 1713 | break; |
324b3e63 | 1714 | case KVM_REG_PPC_EPR: { |
8a41ea53 | 1715 | u32 new_epr = set_reg_val(id, *val); |
35b299e2 | 1716 | kvmppc_set_epr(vcpu, new_epr); |
324b3e63 AG |
1717 | break; |
1718 | } | |
352df1de MC |
1719 | #if defined(CONFIG_64BIT) |
1720 | case KVM_REG_PPC_EPCR: { | |
8a41ea53 | 1721 | u32 new_epcr = set_reg_val(id, *val); |
35b299e2 | 1722 | kvmppc_set_epcr(vcpu, new_epcr); |
352df1de MC |
1723 | break; |
1724 | } | |
1725 | #endif | |
78accda4 | 1726 | case KVM_REG_PPC_OR_TSR: { |
8a41ea53 | 1727 | u32 tsr_bits = set_reg_val(id, *val); |
78accda4 BB |
1728 | kvmppc_set_tsr_bits(vcpu, tsr_bits); |
1729 | break; | |
1730 | } | |
1731 | case KVM_REG_PPC_CLEAR_TSR: { | |
8a41ea53 | 1732 | u32 tsr_bits = set_reg_val(id, *val); |
78accda4 BB |
1733 | kvmppc_clr_tsr_bits(vcpu, tsr_bits); |
1734 | break; | |
1735 | } | |
1736 | case KVM_REG_PPC_TSR: { | |
8a41ea53 | 1737 | u32 tsr = set_reg_val(id, *val); |
78accda4 BB |
1738 | kvmppc_set_tsr(vcpu, tsr); |
1739 | break; | |
1740 | } | |
1741 | case KVM_REG_PPC_TCR: { | |
8a41ea53 | 1742 | u32 tcr = set_reg_val(id, *val); |
78accda4 BB |
1743 | kvmppc_set_tcr(vcpu, tcr); |
1744 | break; | |
1745 | } | |
8b75cbbe | 1746 | case KVM_REG_PPC_VRSAVE: |
8a41ea53 | 1747 | vcpu->arch.vrsave = set_reg_val(id, *val); |
8b75cbbe | 1748 | break; |
6df8d3fc | 1749 | default: |
8a41ea53 | 1750 | r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val); |
6df8d3fc BB |
1751 | break; |
1752 | } | |
35b299e2 | 1753 | |
6df8d3fc | 1754 | return r; |
31f3438e PM |
1755 | } |
1756 | ||
bbf45ba5 HB |
1757 | int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) |
1758 | { | |
1759 | return -ENOTSUPP; | |
1760 | } | |
1761 | ||
1762 | int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu) | |
1763 | { | |
1764 | return -ENOTSUPP; | |
1765 | } | |
1766 | ||
bbf45ba5 HB |
1767 | int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu, |
1768 | struct kvm_translation *tr) | |
1769 | { | |
98001d8d AK |
1770 | int r; |
1771 | ||
98001d8d | 1772 | r = kvmppc_core_vcpu_translate(vcpu, tr); |
98001d8d | 1773 | return r; |
bbf45ba5 | 1774 | } |
d9fbd03d | 1775 | |
4e755758 AG |
1776 | int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log) |
1777 | { | |
1778 | return -ENOTSUPP; | |
1779 | } | |
1780 | ||
5587027c | 1781 | void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free, |
a66b48c3 PM |
1782 | struct kvm_memory_slot *dont) |
1783 | { | |
1784 | } | |
1785 | ||
5587027c | 1786 | int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot, |
a66b48c3 PM |
1787 | unsigned long npages) |
1788 | { | |
1789 | return 0; | |
1790 | } | |
1791 | ||
f9e0554d | 1792 | int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
a66b48c3 | 1793 | struct kvm_memory_slot *memslot, |
09170a49 | 1794 | const struct kvm_userspace_memory_region *mem) |
f9e0554d PM |
1795 | { |
1796 | return 0; | |
1797 | } | |
1798 | ||
1799 | void kvmppc_core_commit_memory_region(struct kvm *kvm, | |
09170a49 | 1800 | const struct kvm_userspace_memory_region *mem, |
f36f3f28 PB |
1801 | const struct kvm_memory_slot *old, |
1802 | const struct kvm_memory_slot *new) | |
dfe49dbd PM |
1803 | { |
1804 | } | |
1805 | ||
1806 | void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot) | |
f9e0554d PM |
1807 | { |
1808 | } | |
1809 | ||
38f98824 MC |
1810 | void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr) |
1811 | { | |
1812 | #if defined(CONFIG_64BIT) | |
1813 | vcpu->arch.epcr = new_epcr; | |
1814 | #ifdef CONFIG_KVM_BOOKE_HV | |
1815 | vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM; | |
1816 | if (vcpu->arch.epcr & SPRN_EPCR_ICM) | |
1817 | vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM; | |
1818 | #endif | |
1819 | #endif | |
1820 | } | |
1821 | ||
dfd4d47e SW |
1822 | void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr) |
1823 | { | |
1824 | vcpu->arch.tcr = new_tcr; | |
f61c94bb | 1825 | arm_next_watchdog(vcpu); |
dfd4d47e SW |
1826 | update_timer_ints(vcpu); |
1827 | } | |
1828 | ||
1829 | void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | |
1830 | { | |
1831 | set_bits(tsr_bits, &vcpu->arch.tsr); | |
1832 | smp_wmb(); | |
1833 | kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu); | |
1834 | kvm_vcpu_kick(vcpu); | |
1835 | } | |
1836 | ||
1837 | void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits) | |
1838 | { | |
1839 | clear_bits(tsr_bits, &vcpu->arch.tsr); | |
f61c94bb BB |
1840 | |
1841 | /* | |
1842 | * We may have stopped the watchdog due to | |
1843 | * being stuck on final expiration. | |
1844 | */ | |
1845 | if (tsr_bits & (TSR_ENW | TSR_WIS)) | |
1846 | arm_next_watchdog(vcpu); | |
1847 | ||
dfd4d47e SW |
1848 | update_timer_ints(vcpu); |
1849 | } | |
1850 | ||
d02d4d15 | 1851 | void kvmppc_decrementer_func(struct kvm_vcpu *vcpu) |
dfd4d47e | 1852 | { |
21bd000a BB |
1853 | if (vcpu->arch.tcr & TCR_ARE) { |
1854 | vcpu->arch.dec = vcpu->arch.decar; | |
1855 | kvmppc_emulate_dec(vcpu); | |
1856 | } | |
1857 | ||
dfd4d47e SW |
1858 | kvmppc_set_tsr_bits(vcpu, TSR_DIS); |
1859 | } | |
1860 | ||
ce11e48b BB |
1861 | static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg, |
1862 | uint64_t addr, int index) | |
1863 | { | |
1864 | switch (index) { | |
1865 | case 0: | |
1866 | dbg_reg->dbcr0 |= DBCR0_IAC1; | |
1867 | dbg_reg->iac1 = addr; | |
1868 | break; | |
1869 | case 1: | |
1870 | dbg_reg->dbcr0 |= DBCR0_IAC2; | |
1871 | dbg_reg->iac2 = addr; | |
1872 | break; | |
1873 | #if CONFIG_PPC_ADV_DEBUG_IACS > 2 | |
1874 | case 2: | |
1875 | dbg_reg->dbcr0 |= DBCR0_IAC3; | |
1876 | dbg_reg->iac3 = addr; | |
1877 | break; | |
1878 | case 3: | |
1879 | dbg_reg->dbcr0 |= DBCR0_IAC4; | |
1880 | dbg_reg->iac4 = addr; | |
1881 | break; | |
1882 | #endif | |
1883 | default: | |
1884 | return -EINVAL; | |
1885 | } | |
1886 | ||
1887 | dbg_reg->dbcr0 |= DBCR0_IDM; | |
1888 | return 0; | |
1889 | } | |
1890 | ||
1891 | static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr, | |
1892 | int type, int index) | |
1893 | { | |
1894 | switch (index) { | |
1895 | case 0: | |
1896 | if (type & KVMPPC_DEBUG_WATCH_READ) | |
1897 | dbg_reg->dbcr0 |= DBCR0_DAC1R; | |
1898 | if (type & KVMPPC_DEBUG_WATCH_WRITE) | |
1899 | dbg_reg->dbcr0 |= DBCR0_DAC1W; | |
1900 | dbg_reg->dac1 = addr; | |
1901 | break; | |
1902 | case 1: | |
1903 | if (type & KVMPPC_DEBUG_WATCH_READ) | |
1904 | dbg_reg->dbcr0 |= DBCR0_DAC2R; | |
1905 | if (type & KVMPPC_DEBUG_WATCH_WRITE) | |
1906 | dbg_reg->dbcr0 |= DBCR0_DAC2W; | |
1907 | dbg_reg->dac2 = addr; | |
1908 | break; | |
1909 | default: | |
1910 | return -EINVAL; | |
1911 | } | |
1912 | ||
1913 | dbg_reg->dbcr0 |= DBCR0_IDM; | |
1914 | return 0; | |
1915 | } | |
1916 | void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set) | |
1917 | { | |
1918 | /* XXX: Add similar MSR protection for BookE-PR */ | |
1919 | #ifdef CONFIG_KVM_BOOKE_HV | |
1920 | BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP)); | |
1921 | if (set) { | |
1922 | if (prot_bitmap & MSR_UCLE) | |
1923 | vcpu->arch.shadow_msrp |= MSRP_UCLEP; | |
1924 | if (prot_bitmap & MSR_DE) | |
1925 | vcpu->arch.shadow_msrp |= MSRP_DEP; | |
1926 | if (prot_bitmap & MSR_PMM) | |
1927 | vcpu->arch.shadow_msrp |= MSRP_PMMP; | |
1928 | } else { | |
1929 | if (prot_bitmap & MSR_UCLE) | |
1930 | vcpu->arch.shadow_msrp &= ~MSRP_UCLEP; | |
1931 | if (prot_bitmap & MSR_DE) | |
1932 | vcpu->arch.shadow_msrp &= ~MSRP_DEP; | |
1933 | if (prot_bitmap & MSR_PMM) | |
1934 | vcpu->arch.shadow_msrp &= ~MSRP_PMMP; | |
1935 | } | |
1936 | #endif | |
1937 | } | |
1938 | ||
7d15c06f AG |
1939 | int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid, |
1940 | enum xlate_readwrite xlrw, struct kvmppc_pte *pte) | |
1941 | { | |
1942 | int gtlb_index; | |
1943 | gpa_t gpaddr; | |
1944 | ||
1945 | #ifdef CONFIG_KVM_E500V2 | |
1946 | if (!(vcpu->arch.shared->msr & MSR_PR) && | |
1947 | (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) { | |
1948 | pte->eaddr = eaddr; | |
1949 | pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) | | |
1950 | (eaddr & ~PAGE_MASK); | |
1951 | pte->vpage = eaddr >> PAGE_SHIFT; | |
1952 | pte->may_read = true; | |
1953 | pte->may_write = true; | |
1954 | pte->may_execute = true; | |
1955 | ||
1956 | return 0; | |
1957 | } | |
1958 | #endif | |
1959 | ||
1960 | /* Check the guest TLB. */ | |
1961 | switch (xlid) { | |
1962 | case XLATE_INST: | |
1963 | gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr); | |
1964 | break; | |
1965 | case XLATE_DATA: | |
1966 | gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr); | |
1967 | break; | |
1968 | default: | |
1969 | BUG(); | |
1970 | } | |
1971 | ||
1972 | /* Do we have a TLB entry at all? */ | |
1973 | if (gtlb_index < 0) | |
1974 | return -ENOENT; | |
1975 | ||
1976 | gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr); | |
1977 | ||
1978 | pte->eaddr = eaddr; | |
1979 | pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK); | |
1980 | pte->vpage = eaddr >> PAGE_SHIFT; | |
1981 | ||
1982 | /* XXX read permissions from the guest TLB */ | |
1983 | pte->may_read = true; | |
1984 | pte->may_write = true; | |
1985 | pte->may_execute = true; | |
1986 | ||
1987 | return 0; | |
1988 | } | |
1989 | ||
ce11e48b BB |
1990 | int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu, |
1991 | struct kvm_guest_debug *dbg) | |
1992 | { | |
1993 | struct debug_reg *dbg_reg; | |
1994 | int n, b = 0, w = 0; | |
1995 | ||
1996 | if (!(dbg->control & KVM_GUESTDBG_ENABLE)) { | |
348ba710 | 1997 | vcpu->arch.dbg_reg.dbcr0 = 0; |
ce11e48b BB |
1998 | vcpu->guest_debug = 0; |
1999 | kvm_guest_protect_msr(vcpu, MSR_DE, false); | |
2000 | return 0; | |
2001 | } | |
2002 | ||
2003 | kvm_guest_protect_msr(vcpu, MSR_DE, true); | |
2004 | vcpu->guest_debug = dbg->control; | |
348ba710 | 2005 | vcpu->arch.dbg_reg.dbcr0 = 0; |
ce11e48b BB |
2006 | |
2007 | if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) | |
348ba710 | 2008 | vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC; |
ce11e48b BB |
2009 | |
2010 | /* Code below handles only HW breakpoints */ | |
348ba710 | 2011 | dbg_reg = &(vcpu->arch.dbg_reg); |
ce11e48b BB |
2012 | |
2013 | #ifdef CONFIG_KVM_BOOKE_HV | |
2014 | /* | |
2015 | * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1 | |
2016 | * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0 | |
2017 | */ | |
2018 | dbg_reg->dbcr1 = 0; | |
2019 | dbg_reg->dbcr2 = 0; | |
2020 | #else | |
2021 | /* | |
2022 | * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1 | |
2023 | * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR | |
2024 | * is set. | |
2025 | */ | |
2026 | dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US | | |
2027 | DBCR1_IAC4US; | |
2028 | dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US; | |
2029 | #endif | |
2030 | ||
2031 | if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP)) | |
2032 | return 0; | |
2033 | ||
2034 | for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) { | |
2035 | uint64_t addr = dbg->arch.bp[n].addr; | |
2036 | uint32_t type = dbg->arch.bp[n].type; | |
2037 | ||
2038 | if (type == KVMPPC_DEBUG_NONE) | |
2039 | continue; | |
2040 | ||
2041 | if (type & !(KVMPPC_DEBUG_WATCH_READ | | |
2042 | KVMPPC_DEBUG_WATCH_WRITE | | |
2043 | KVMPPC_DEBUG_BREAKPOINT)) | |
2044 | return -EINVAL; | |
2045 | ||
2046 | if (type & KVMPPC_DEBUG_BREAKPOINT) { | |
2047 | /* Setting H/W breakpoint */ | |
2048 | if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++)) | |
2049 | return -EINVAL; | |
2050 | } else { | |
2051 | /* Setting H/W watchpoint */ | |
2052 | if (kvmppc_booke_add_watchpoint(dbg_reg, addr, | |
2053 | type, w++)) | |
2054 | return -EINVAL; | |
2055 | } | |
2056 | } | |
2057 | ||
2058 | return 0; | |
2059 | } | |
2060 | ||
94fa9d99 SW |
2061 | void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
2062 | { | |
a47d72f3 | 2063 | vcpu->cpu = smp_processor_id(); |
d30f6e48 | 2064 | current->thread.kvm_vcpu = vcpu; |
94fa9d99 SW |
2065 | } |
2066 | ||
2067 | void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu) | |
2068 | { | |
d30f6e48 | 2069 | current->thread.kvm_vcpu = NULL; |
a47d72f3 | 2070 | vcpu->cpu = -1; |
ce11e48b BB |
2071 | |
2072 | /* Clear pending debug event in DBSR */ | |
2073 | kvmppc_clear_dbsr(); | |
94fa9d99 SW |
2074 | } |
2075 | ||
3a167bea AK |
2076 | void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu) |
2077 | { | |
cbbc58d4 | 2078 | vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu); |
3a167bea AK |
2079 | } |
2080 | ||
2081 | int kvmppc_core_init_vm(struct kvm *kvm) | |
2082 | { | |
cbbc58d4 | 2083 | return kvm->arch.kvm_ops->init_vm(kvm); |
3a167bea AK |
2084 | } |
2085 | ||
2086 | struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id) | |
2087 | { | |
cbbc58d4 | 2088 | return kvm->arch.kvm_ops->vcpu_create(kvm, id); |
3a167bea AK |
2089 | } |
2090 | ||
2091 | void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu) | |
2092 | { | |
cbbc58d4 | 2093 | vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu); |
3a167bea AK |
2094 | } |
2095 | ||
2096 | void kvmppc_core_destroy_vm(struct kvm *kvm) | |
2097 | { | |
cbbc58d4 | 2098 | kvm->arch.kvm_ops->destroy_vm(kvm); |
3a167bea AK |
2099 | } |
2100 | ||
2101 | void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu) | |
2102 | { | |
cbbc58d4 | 2103 | vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu); |
3a167bea AK |
2104 | } |
2105 | ||
2106 | void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu) | |
2107 | { | |
cbbc58d4 | 2108 | vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu); |
94fa9d99 SW |
2109 | } |
2110 | ||
2986b8c7 | 2111 | int __init kvmppc_booke_init(void) |
d9fbd03d | 2112 | { |
d30f6e48 | 2113 | #ifndef CONFIG_KVM_BOOKE_HV |
d9fbd03d | 2114 | unsigned long ivor[16]; |
1d542d9c | 2115 | unsigned long *handler = kvmppc_booke_handler_addr; |
d9fbd03d | 2116 | unsigned long max_ivor = 0; |
1d542d9c | 2117 | unsigned long handler_len; |
d9fbd03d HB |
2118 | int i; |
2119 | ||
2120 | /* We install our own exception handlers by hijacking IVPR. IVPR must | |
2121 | * be 16-bit aligned, so we need a 64KB allocation. */ | |
2122 | kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO, | |
2123 | VCPU_SIZE_ORDER); | |
2124 | if (!kvmppc_booke_handlers) | |
2125 | return -ENOMEM; | |
2126 | ||
2127 | /* XXX make sure our handlers are smaller than Linux's */ | |
2128 | ||
2129 | /* Copy our interrupt handlers to match host IVORs. That way we don't | |
2130 | * have to swap the IVORs on every guest/host transition. */ | |
2131 | ivor[0] = mfspr(SPRN_IVOR0); | |
2132 | ivor[1] = mfspr(SPRN_IVOR1); | |
2133 | ivor[2] = mfspr(SPRN_IVOR2); | |
2134 | ivor[3] = mfspr(SPRN_IVOR3); | |
2135 | ivor[4] = mfspr(SPRN_IVOR4); | |
2136 | ivor[5] = mfspr(SPRN_IVOR5); | |
2137 | ivor[6] = mfspr(SPRN_IVOR6); | |
2138 | ivor[7] = mfspr(SPRN_IVOR7); | |
2139 | ivor[8] = mfspr(SPRN_IVOR8); | |
2140 | ivor[9] = mfspr(SPRN_IVOR9); | |
2141 | ivor[10] = mfspr(SPRN_IVOR10); | |
2142 | ivor[11] = mfspr(SPRN_IVOR11); | |
2143 | ivor[12] = mfspr(SPRN_IVOR12); | |
2144 | ivor[13] = mfspr(SPRN_IVOR13); | |
2145 | ivor[14] = mfspr(SPRN_IVOR14); | |
2146 | ivor[15] = mfspr(SPRN_IVOR15); | |
2147 | ||
2148 | for (i = 0; i < 16; i++) { | |
2149 | if (ivor[i] > max_ivor) | |
1d542d9c | 2150 | max_ivor = i; |
d9fbd03d | 2151 | |
1d542d9c | 2152 | handler_len = handler[i + 1] - handler[i]; |
d9fbd03d | 2153 | memcpy((void *)kvmppc_booke_handlers + ivor[i], |
1d542d9c | 2154 | (void *)handler[i], handler_len); |
d9fbd03d | 2155 | } |
1d542d9c BB |
2156 | |
2157 | handler_len = handler[max_ivor + 1] - handler[max_ivor]; | |
2158 | flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers + | |
2159 | ivor[max_ivor] + handler_len); | |
d30f6e48 | 2160 | #endif /* !BOOKE_HV */ |
db93f574 | 2161 | return 0; |
d9fbd03d HB |
2162 | } |
2163 | ||
db93f574 | 2164 | void __exit kvmppc_booke_exit(void) |
d9fbd03d HB |
2165 | { |
2166 | free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER); | |
2167 | kvm_exit(); | |
2168 | } |