]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/powerpc/kvm/booke.c
9f2e4a5e1c4dfecca8dab9d50acc3abb5bfca1b9
[mirror_ubuntu-jammy-kernel.git] / arch / powerpc / kvm / booke.c
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2010-2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
20 */
21
22 #include <linux/errno.h>
23 #include <linux/err.h>
24 #include <linux/kvm_host.h>
25 #include <linux/gfp.h>
26 #include <linux/module.h>
27 #include <linux/vmalloc.h>
28 #include <linux/fs.h>
29
30 #include <asm/cputable.h>
31 #include <asm/uaccess.h>
32 #include <asm/kvm_ppc.h>
33 #include "timing.h"
34 #include <asm/cacheflush.h>
35
36 #include "booke.h"
37
38 unsigned long kvmppc_booke_handlers;
39
40 #define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
41 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43 struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "mmio", VCPU_STAT(mmio_exits) },
45 { "dcr", VCPU_STAT(dcr_exits) },
46 { "sig", VCPU_STAT(signal_exits) },
47 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
48 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
49 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
50 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
51 { "sysc", VCPU_STAT(syscall_exits) },
52 { "isi", VCPU_STAT(isi_exits) },
53 { "dsi", VCPU_STAT(dsi_exits) },
54 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
55 { "dec", VCPU_STAT(dec_exits) },
56 { "ext_intr", VCPU_STAT(ext_intr_exits) },
57 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
58 { NULL }
59 };
60
61 /* TODO: use vcpu_printf() */
62 void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
63 {
64 int i;
65
66 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
67 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
68 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
69 vcpu->arch.shared->srr1);
70
71 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
72
73 for (i = 0; i < 32; i += 4) {
74 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
75 kvmppc_get_gpr(vcpu, i),
76 kvmppc_get_gpr(vcpu, i+1),
77 kvmppc_get_gpr(vcpu, i+2),
78 kvmppc_get_gpr(vcpu, i+3));
79 }
80 }
81
82 #ifdef CONFIG_SPE
83 void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
84 {
85 preempt_disable();
86 enable_kernel_spe();
87 kvmppc_save_guest_spe(vcpu);
88 vcpu->arch.shadow_msr &= ~MSR_SPE;
89 preempt_enable();
90 }
91
92 static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
93 {
94 preempt_disable();
95 enable_kernel_spe();
96 kvmppc_load_guest_spe(vcpu);
97 vcpu->arch.shadow_msr |= MSR_SPE;
98 preempt_enable();
99 }
100
101 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
102 {
103 if (vcpu->arch.shared->msr & MSR_SPE) {
104 if (!(vcpu->arch.shadow_msr & MSR_SPE))
105 kvmppc_vcpu_enable_spe(vcpu);
106 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
107 kvmppc_vcpu_disable_spe(vcpu);
108 }
109 }
110 #else
111 static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112 {
113 }
114 #endif
115
116 /*
117 * Helper function for "full" MSR writes. No need to call this if only
118 * EE/CE/ME/DE/RI are changing.
119 */
120 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
121 {
122 u32 old_msr = vcpu->arch.shared->msr;
123
124 vcpu->arch.shared->msr = new_msr;
125
126 kvmppc_mmu_msr_notify(vcpu, old_msr);
127
128 if (vcpu->arch.shared->msr & MSR_WE) {
129 kvm_vcpu_block(vcpu);
130 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
131 };
132
133 kvmppc_vcpu_sync_spe(vcpu);
134 }
135
136 static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
137 unsigned int priority)
138 {
139 set_bit(priority, &vcpu->arch.pending_exceptions);
140 }
141
142 static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
143 ulong dear_flags, ulong esr_flags)
144 {
145 vcpu->arch.queued_dear = dear_flags;
146 vcpu->arch.queued_esr = esr_flags;
147 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
148 }
149
150 static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
151 ulong dear_flags, ulong esr_flags)
152 {
153 vcpu->arch.queued_dear = dear_flags;
154 vcpu->arch.queued_esr = esr_flags;
155 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
156 }
157
158 static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
159 ulong esr_flags)
160 {
161 vcpu->arch.queued_esr = esr_flags;
162 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
163 }
164
165 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
166 {
167 vcpu->arch.queued_esr = esr_flags;
168 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
169 }
170
171 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
172 {
173 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
174 }
175
176 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
177 {
178 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
179 }
180
181 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
182 {
183 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
184 }
185
186 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
187 struct kvm_interrupt *irq)
188 {
189 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
190
191 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
192 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
193
194 kvmppc_booke_queue_irqprio(vcpu, prio);
195 }
196
197 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
198 struct kvm_interrupt *irq)
199 {
200 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
201 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
202 }
203
204 /* Deliver the interrupt of the corresponding priority, if possible. */
205 static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
206 unsigned int priority)
207 {
208 int allowed = 0;
209 ulong uninitialized_var(msr_mask);
210 bool update_esr = false, update_dear = false;
211 ulong crit_raw = vcpu->arch.shared->critical;
212 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
213 bool crit;
214 bool keep_irq = false;
215
216 /* Truncate crit indicators in 32 bit mode */
217 if (!(vcpu->arch.shared->msr & MSR_SF)) {
218 crit_raw &= 0xffffffff;
219 crit_r1 &= 0xffffffff;
220 }
221
222 /* Critical section when crit == r1 */
223 crit = (crit_raw == crit_r1);
224 /* ... and we're in supervisor mode */
225 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
226
227 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
228 priority = BOOKE_IRQPRIO_EXTERNAL;
229 keep_irq = true;
230 }
231
232 switch (priority) {
233 case BOOKE_IRQPRIO_DTLB_MISS:
234 case BOOKE_IRQPRIO_DATA_STORAGE:
235 update_dear = true;
236 /* fall through */
237 case BOOKE_IRQPRIO_INST_STORAGE:
238 case BOOKE_IRQPRIO_PROGRAM:
239 update_esr = true;
240 /* fall through */
241 case BOOKE_IRQPRIO_ITLB_MISS:
242 case BOOKE_IRQPRIO_SYSCALL:
243 case BOOKE_IRQPRIO_FP_UNAVAIL:
244 case BOOKE_IRQPRIO_SPE_UNAVAIL:
245 case BOOKE_IRQPRIO_SPE_FP_DATA:
246 case BOOKE_IRQPRIO_SPE_FP_ROUND:
247 case BOOKE_IRQPRIO_AP_UNAVAIL:
248 case BOOKE_IRQPRIO_ALIGNMENT:
249 allowed = 1;
250 msr_mask = MSR_CE|MSR_ME|MSR_DE;
251 break;
252 case BOOKE_IRQPRIO_CRITICAL:
253 case BOOKE_IRQPRIO_WATCHDOG:
254 allowed = vcpu->arch.shared->msr & MSR_CE;
255 msr_mask = MSR_ME;
256 break;
257 case BOOKE_IRQPRIO_MACHINE_CHECK:
258 allowed = vcpu->arch.shared->msr & MSR_ME;
259 msr_mask = 0;
260 break;
261 case BOOKE_IRQPRIO_EXTERNAL:
262 case BOOKE_IRQPRIO_DECREMENTER:
263 case BOOKE_IRQPRIO_FIT:
264 allowed = vcpu->arch.shared->msr & MSR_EE;
265 allowed = allowed && !crit;
266 msr_mask = MSR_CE|MSR_ME|MSR_DE;
267 break;
268 case BOOKE_IRQPRIO_DEBUG:
269 allowed = vcpu->arch.shared->msr & MSR_DE;
270 msr_mask = MSR_ME;
271 break;
272 }
273
274 if (allowed) {
275 vcpu->arch.shared->srr0 = vcpu->arch.pc;
276 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
277 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
278 if (update_esr == true)
279 vcpu->arch.esr = vcpu->arch.queued_esr;
280 if (update_dear == true)
281 vcpu->arch.shared->dar = vcpu->arch.queued_dear;
282 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
283
284 if (!keep_irq)
285 clear_bit(priority, &vcpu->arch.pending_exceptions);
286 }
287
288 return allowed;
289 }
290
291 /* Check pending exceptions and deliver one, if possible. */
292 void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
293 {
294 unsigned long *pending = &vcpu->arch.pending_exceptions;
295 unsigned long old_pending = vcpu->arch.pending_exceptions;
296 unsigned int priority;
297
298 priority = __ffs(*pending);
299 while (priority <= BOOKE_IRQPRIO_MAX) {
300 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
301 break;
302
303 priority = find_next_bit(pending,
304 BITS_PER_BYTE * sizeof(*pending),
305 priority + 1);
306 }
307
308 /* Tell the guest about our interrupt status */
309 if (*pending)
310 vcpu->arch.shared->int_pending = 1;
311 else if (old_pending)
312 vcpu->arch.shared->int_pending = 0;
313 }
314
315 /**
316 * kvmppc_handle_exit
317 *
318 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
319 */
320 int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
321 unsigned int exit_nr)
322 {
323 enum emulation_result er;
324 int r = RESUME_HOST;
325
326 /* update before a new last_exit_type is rewritten */
327 kvmppc_update_timing_stats(vcpu);
328
329 local_irq_enable();
330
331 run->exit_reason = KVM_EXIT_UNKNOWN;
332 run->ready_for_interrupt_injection = 1;
333
334 switch (exit_nr) {
335 case BOOKE_INTERRUPT_MACHINE_CHECK:
336 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
337 kvmppc_dump_vcpu(vcpu);
338 r = RESUME_HOST;
339 break;
340
341 case BOOKE_INTERRUPT_EXTERNAL:
342 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
343 if (need_resched())
344 cond_resched();
345 r = RESUME_GUEST;
346 break;
347
348 case BOOKE_INTERRUPT_DECREMENTER:
349 /* Since we switched IVPR back to the host's value, the host
350 * handled this interrupt the moment we enabled interrupts.
351 * Now we just offer it a chance to reschedule the guest. */
352 kvmppc_account_exit(vcpu, DEC_EXITS);
353 if (need_resched())
354 cond_resched();
355 r = RESUME_GUEST;
356 break;
357
358 case BOOKE_INTERRUPT_PROGRAM:
359 if (vcpu->arch.shared->msr & MSR_PR) {
360 /* Program traps generated by user-level software must be handled
361 * by the guest kernel. */
362 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
363 r = RESUME_GUEST;
364 kvmppc_account_exit(vcpu, USR_PR_INST);
365 break;
366 }
367
368 er = kvmppc_emulate_instruction(run, vcpu);
369 switch (er) {
370 case EMULATE_DONE:
371 /* don't overwrite subtypes, just account kvm_stats */
372 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
373 /* Future optimization: only reload non-volatiles if
374 * they were actually modified by emulation. */
375 r = RESUME_GUEST_NV;
376 break;
377 case EMULATE_DO_DCR:
378 run->exit_reason = KVM_EXIT_DCR;
379 r = RESUME_HOST;
380 break;
381 case EMULATE_FAIL:
382 /* XXX Deliver Program interrupt to guest. */
383 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
384 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
385 /* For debugging, encode the failing instruction and
386 * report it to userspace. */
387 run->hw.hardware_exit_reason = ~0ULL << 32;
388 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
389 r = RESUME_HOST;
390 break;
391 default:
392 BUG();
393 }
394 break;
395
396 case BOOKE_INTERRUPT_FP_UNAVAIL:
397 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
398 kvmppc_account_exit(vcpu, FP_UNAVAIL);
399 r = RESUME_GUEST;
400 break;
401
402 #ifdef CONFIG_SPE
403 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
404 if (vcpu->arch.shared->msr & MSR_SPE)
405 kvmppc_vcpu_enable_spe(vcpu);
406 else
407 kvmppc_booke_queue_irqprio(vcpu,
408 BOOKE_IRQPRIO_SPE_UNAVAIL);
409 r = RESUME_GUEST;
410 break;
411 }
412
413 case BOOKE_INTERRUPT_SPE_FP_DATA:
414 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
415 r = RESUME_GUEST;
416 break;
417
418 case BOOKE_INTERRUPT_SPE_FP_ROUND:
419 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
420 r = RESUME_GUEST;
421 break;
422 #else
423 case BOOKE_INTERRUPT_SPE_UNAVAIL:
424 /*
425 * Guest wants SPE, but host kernel doesn't support it. Send
426 * an "unimplemented operation" program check to the guest.
427 */
428 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
429 r = RESUME_GUEST;
430 break;
431
432 /*
433 * These really should never happen without CONFIG_SPE,
434 * as we should never enable the real MSR[SPE] in the guest.
435 */
436 case BOOKE_INTERRUPT_SPE_FP_DATA:
437 case BOOKE_INTERRUPT_SPE_FP_ROUND:
438 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
439 __func__, exit_nr, vcpu->arch.pc);
440 run->hw.hardware_exit_reason = exit_nr;
441 r = RESUME_HOST;
442 break;
443 #endif
444
445 case BOOKE_INTERRUPT_DATA_STORAGE:
446 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
447 vcpu->arch.fault_esr);
448 kvmppc_account_exit(vcpu, DSI_EXITS);
449 r = RESUME_GUEST;
450 break;
451
452 case BOOKE_INTERRUPT_INST_STORAGE:
453 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
454 kvmppc_account_exit(vcpu, ISI_EXITS);
455 r = RESUME_GUEST;
456 break;
457
458 case BOOKE_INTERRUPT_SYSCALL:
459 if (!(vcpu->arch.shared->msr & MSR_PR) &&
460 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
461 /* KVM PV hypercalls */
462 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
463 r = RESUME_GUEST;
464 } else {
465 /* Guest syscalls */
466 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
467 }
468 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
469 r = RESUME_GUEST;
470 break;
471
472 case BOOKE_INTERRUPT_DTLB_MISS: {
473 unsigned long eaddr = vcpu->arch.fault_dear;
474 int gtlb_index;
475 gpa_t gpaddr;
476 gfn_t gfn;
477
478 #ifdef CONFIG_KVM_E500
479 if (!(vcpu->arch.shared->msr & MSR_PR) &&
480 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
481 kvmppc_map_magic(vcpu);
482 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
483 r = RESUME_GUEST;
484
485 break;
486 }
487 #endif
488
489 /* Check the guest TLB. */
490 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
491 if (gtlb_index < 0) {
492 /* The guest didn't have a mapping for it. */
493 kvmppc_core_queue_dtlb_miss(vcpu,
494 vcpu->arch.fault_dear,
495 vcpu->arch.fault_esr);
496 kvmppc_mmu_dtlb_miss(vcpu);
497 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
498 r = RESUME_GUEST;
499 break;
500 }
501
502 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
503 gfn = gpaddr >> PAGE_SHIFT;
504
505 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
506 /* The guest TLB had a mapping, but the shadow TLB
507 * didn't, and it is RAM. This could be because:
508 * a) the entry is mapping the host kernel, or
509 * b) the guest used a large mapping which we're faking
510 * Either way, we need to satisfy the fault without
511 * invoking the guest. */
512 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
513 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
514 r = RESUME_GUEST;
515 } else {
516 /* Guest has mapped and accessed a page which is not
517 * actually RAM. */
518 vcpu->arch.paddr_accessed = gpaddr;
519 r = kvmppc_emulate_mmio(run, vcpu);
520 kvmppc_account_exit(vcpu, MMIO_EXITS);
521 }
522
523 break;
524 }
525
526 case BOOKE_INTERRUPT_ITLB_MISS: {
527 unsigned long eaddr = vcpu->arch.pc;
528 gpa_t gpaddr;
529 gfn_t gfn;
530 int gtlb_index;
531
532 r = RESUME_GUEST;
533
534 /* Check the guest TLB. */
535 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
536 if (gtlb_index < 0) {
537 /* The guest didn't have a mapping for it. */
538 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
539 kvmppc_mmu_itlb_miss(vcpu);
540 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
541 break;
542 }
543
544 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
545
546 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
547 gfn = gpaddr >> PAGE_SHIFT;
548
549 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
550 /* The guest TLB had a mapping, but the shadow TLB
551 * didn't. This could be because:
552 * a) the entry is mapping the host kernel, or
553 * b) the guest used a large mapping which we're faking
554 * Either way, we need to satisfy the fault without
555 * invoking the guest. */
556 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
557 } else {
558 /* Guest mapped and leaped at non-RAM! */
559 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
560 }
561
562 break;
563 }
564
565 case BOOKE_INTERRUPT_DEBUG: {
566 u32 dbsr;
567
568 vcpu->arch.pc = mfspr(SPRN_CSRR0);
569
570 /* clear IAC events in DBSR register */
571 dbsr = mfspr(SPRN_DBSR);
572 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
573 mtspr(SPRN_DBSR, dbsr);
574
575 run->exit_reason = KVM_EXIT_DEBUG;
576 kvmppc_account_exit(vcpu, DEBUG_EXITS);
577 r = RESUME_HOST;
578 break;
579 }
580
581 default:
582 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
583 BUG();
584 }
585
586 local_irq_disable();
587
588 kvmppc_core_deliver_interrupts(vcpu);
589
590 if (!(r & RESUME_HOST)) {
591 /* To avoid clobbering exit_reason, only check for signals if
592 * we aren't already exiting to userspace for some other
593 * reason. */
594 if (signal_pending(current)) {
595 run->exit_reason = KVM_EXIT_INTR;
596 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
597 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
598 }
599 }
600
601 return r;
602 }
603
604 /* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
605 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
606 {
607 int i;
608
609 vcpu->arch.pc = 0;
610 vcpu->arch.shared->msr = 0;
611 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
612 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
613
614 vcpu->arch.shadow_pid = 1;
615
616 /* Eye-catching numbers so we know if the guest takes an interrupt
617 * before it's programmed its own IVPR/IVORs. */
618 vcpu->arch.ivpr = 0x55550000;
619 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
620 vcpu->arch.ivor[i] = 0x7700 | i * 4;
621
622 kvmppc_init_timing_stats(vcpu);
623
624 return kvmppc_core_vcpu_setup(vcpu);
625 }
626
627 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
628 {
629 int i;
630
631 regs->pc = vcpu->arch.pc;
632 regs->cr = kvmppc_get_cr(vcpu);
633 regs->ctr = vcpu->arch.ctr;
634 regs->lr = vcpu->arch.lr;
635 regs->xer = kvmppc_get_xer(vcpu);
636 regs->msr = vcpu->arch.shared->msr;
637 regs->srr0 = vcpu->arch.shared->srr0;
638 regs->srr1 = vcpu->arch.shared->srr1;
639 regs->pid = vcpu->arch.pid;
640 regs->sprg0 = vcpu->arch.shared->sprg0;
641 regs->sprg1 = vcpu->arch.shared->sprg1;
642 regs->sprg2 = vcpu->arch.shared->sprg2;
643 regs->sprg3 = vcpu->arch.shared->sprg3;
644 regs->sprg4 = vcpu->arch.sprg4;
645 regs->sprg5 = vcpu->arch.sprg5;
646 regs->sprg6 = vcpu->arch.sprg6;
647 regs->sprg7 = vcpu->arch.sprg7;
648
649 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
650 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
651
652 return 0;
653 }
654
655 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
656 {
657 int i;
658
659 vcpu->arch.pc = regs->pc;
660 kvmppc_set_cr(vcpu, regs->cr);
661 vcpu->arch.ctr = regs->ctr;
662 vcpu->arch.lr = regs->lr;
663 kvmppc_set_xer(vcpu, regs->xer);
664 kvmppc_set_msr(vcpu, regs->msr);
665 vcpu->arch.shared->srr0 = regs->srr0;
666 vcpu->arch.shared->srr1 = regs->srr1;
667 kvmppc_set_pid(vcpu, regs->pid);
668 vcpu->arch.shared->sprg0 = regs->sprg0;
669 vcpu->arch.shared->sprg1 = regs->sprg1;
670 vcpu->arch.shared->sprg2 = regs->sprg2;
671 vcpu->arch.shared->sprg3 = regs->sprg3;
672 vcpu->arch.sprg4 = regs->sprg4;
673 vcpu->arch.sprg5 = regs->sprg5;
674 vcpu->arch.sprg6 = regs->sprg6;
675 vcpu->arch.sprg7 = regs->sprg7;
676
677 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
678 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
679
680 return 0;
681 }
682
683 static void get_sregs_base(struct kvm_vcpu *vcpu,
684 struct kvm_sregs *sregs)
685 {
686 u64 tb = get_tb();
687
688 sregs->u.e.features |= KVM_SREGS_E_BASE;
689
690 sregs->u.e.csrr0 = vcpu->arch.csrr0;
691 sregs->u.e.csrr1 = vcpu->arch.csrr1;
692 sregs->u.e.mcsr = vcpu->arch.mcsr;
693 sregs->u.e.esr = vcpu->arch.esr;
694 sregs->u.e.dear = vcpu->arch.shared->dar;
695 sregs->u.e.tsr = vcpu->arch.tsr;
696 sregs->u.e.tcr = vcpu->arch.tcr;
697 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
698 sregs->u.e.tb = tb;
699 sregs->u.e.vrsave = vcpu->arch.vrsave;
700 }
701
702 static int set_sregs_base(struct kvm_vcpu *vcpu,
703 struct kvm_sregs *sregs)
704 {
705 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
706 return 0;
707
708 vcpu->arch.csrr0 = sregs->u.e.csrr0;
709 vcpu->arch.csrr1 = sregs->u.e.csrr1;
710 vcpu->arch.mcsr = sregs->u.e.mcsr;
711 vcpu->arch.esr = sregs->u.e.esr;
712 vcpu->arch.shared->dar = sregs->u.e.dear;
713 vcpu->arch.vrsave = sregs->u.e.vrsave;
714 vcpu->arch.tcr = sregs->u.e.tcr;
715
716 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC)
717 vcpu->arch.dec = sregs->u.e.dec;
718
719 kvmppc_emulate_dec(vcpu);
720
721 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
722 /*
723 * FIXME: existing KVM timer handling is incomplete.
724 * TSR cannot be read by the guest, and its value in
725 * vcpu->arch is always zero. For now, just handle
726 * the case where the caller is trying to inject a
727 * decrementer interrupt.
728 */
729
730 if ((sregs->u.e.tsr & TSR_DIS) &&
731 (vcpu->arch.tcr & TCR_DIE))
732 kvmppc_core_queue_dec(vcpu);
733 }
734
735 return 0;
736 }
737
738 static void get_sregs_arch206(struct kvm_vcpu *vcpu,
739 struct kvm_sregs *sregs)
740 {
741 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
742
743 sregs->u.e.pir = 0;
744 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
745 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
746 sregs->u.e.decar = vcpu->arch.decar;
747 sregs->u.e.ivpr = vcpu->arch.ivpr;
748 }
749
750 static int set_sregs_arch206(struct kvm_vcpu *vcpu,
751 struct kvm_sregs *sregs)
752 {
753 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
754 return 0;
755
756 if (sregs->u.e.pir != 0)
757 return -EINVAL;
758
759 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
760 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
761 vcpu->arch.decar = sregs->u.e.decar;
762 vcpu->arch.ivpr = sregs->u.e.ivpr;
763
764 return 0;
765 }
766
767 void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
768 {
769 sregs->u.e.features |= KVM_SREGS_E_IVOR;
770
771 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
772 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
773 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
774 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
775 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
776 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
777 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
778 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
779 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
780 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
781 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
782 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
783 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
784 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
785 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
786 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
787 }
788
789 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
790 {
791 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
792 return 0;
793
794 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
795 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
796 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
797 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
798 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
799 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
800 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
801 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
802 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
803 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
804 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
805 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
806 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
807 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
808 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
809 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
810
811 return 0;
812 }
813
814 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
815 struct kvm_sregs *sregs)
816 {
817 sregs->pvr = vcpu->arch.pvr;
818
819 get_sregs_base(vcpu, sregs);
820 get_sregs_arch206(vcpu, sregs);
821 kvmppc_core_get_sregs(vcpu, sregs);
822 return 0;
823 }
824
825 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
826 struct kvm_sregs *sregs)
827 {
828 int ret;
829
830 if (vcpu->arch.pvr != sregs->pvr)
831 return -EINVAL;
832
833 ret = set_sregs_base(vcpu, sregs);
834 if (ret < 0)
835 return ret;
836
837 ret = set_sregs_arch206(vcpu, sregs);
838 if (ret < 0)
839 return ret;
840
841 return kvmppc_core_set_sregs(vcpu, sregs);
842 }
843
844 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
845 {
846 return -ENOTSUPP;
847 }
848
849 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
850 {
851 return -ENOTSUPP;
852 }
853
854 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
855 struct kvm_translation *tr)
856 {
857 int r;
858
859 r = kvmppc_core_vcpu_translate(vcpu, tr);
860 return r;
861 }
862
863 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
864 {
865 return -ENOTSUPP;
866 }
867
868 int __init kvmppc_booke_init(void)
869 {
870 unsigned long ivor[16];
871 unsigned long max_ivor = 0;
872 int i;
873
874 /* We install our own exception handlers by hijacking IVPR. IVPR must
875 * be 16-bit aligned, so we need a 64KB allocation. */
876 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
877 VCPU_SIZE_ORDER);
878 if (!kvmppc_booke_handlers)
879 return -ENOMEM;
880
881 /* XXX make sure our handlers are smaller than Linux's */
882
883 /* Copy our interrupt handlers to match host IVORs. That way we don't
884 * have to swap the IVORs on every guest/host transition. */
885 ivor[0] = mfspr(SPRN_IVOR0);
886 ivor[1] = mfspr(SPRN_IVOR1);
887 ivor[2] = mfspr(SPRN_IVOR2);
888 ivor[3] = mfspr(SPRN_IVOR3);
889 ivor[4] = mfspr(SPRN_IVOR4);
890 ivor[5] = mfspr(SPRN_IVOR5);
891 ivor[6] = mfspr(SPRN_IVOR6);
892 ivor[7] = mfspr(SPRN_IVOR7);
893 ivor[8] = mfspr(SPRN_IVOR8);
894 ivor[9] = mfspr(SPRN_IVOR9);
895 ivor[10] = mfspr(SPRN_IVOR10);
896 ivor[11] = mfspr(SPRN_IVOR11);
897 ivor[12] = mfspr(SPRN_IVOR12);
898 ivor[13] = mfspr(SPRN_IVOR13);
899 ivor[14] = mfspr(SPRN_IVOR14);
900 ivor[15] = mfspr(SPRN_IVOR15);
901
902 for (i = 0; i < 16; i++) {
903 if (ivor[i] > max_ivor)
904 max_ivor = ivor[i];
905
906 memcpy((void *)kvmppc_booke_handlers + ivor[i],
907 kvmppc_handlers_start + i * kvmppc_handler_len,
908 kvmppc_handler_len);
909 }
910 flush_icache_range(kvmppc_booke_handlers,
911 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
912
913 return 0;
914 }
915
916 void __exit kvmppc_booke_exit(void)
917 {
918 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
919 kvm_exit();
920 }