]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kvm/booke.c
KVM: PPC: Add PV guest scratch registers
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / booke.c
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
19 */
20
21#include <linux/errno.h>
22#include <linux/err.h>
23#include <linux/kvm_host.h>
5a0e3ad6 24#include <linux/gfp.h>
bbf45ba5
HB
25#include <linux/module.h>
26#include <linux/vmalloc.h>
27#include <linux/fs.h>
7924bd41 28
bbf45ba5
HB
29#include <asm/cputable.h>
30#include <asm/uaccess.h>
31#include <asm/kvm_ppc.h>
73e75b41 32#include "timing.h"
d9fbd03d 33#include <asm/cacheflush.h>
bbf45ba5 34
75f74f0d 35#include "booke.h"
bbf45ba5 36
d9fbd03d
HB
37unsigned long kvmppc_booke_handlers;
38
bbf45ba5
HB
39#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
40#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
41
42struct kvm_stats_debugfs_item debugfs_entries[] = {
bbf45ba5
HB
43 { "mmio", VCPU_STAT(mmio_exits) },
44 { "dcr", VCPU_STAT(dcr_exits) },
45 { "sig", VCPU_STAT(signal_exits) },
bbf45ba5
HB
46 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
47 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
48 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
49 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
50 { "sysc", VCPU_STAT(syscall_exits) },
51 { "isi", VCPU_STAT(isi_exits) },
52 { "dsi", VCPU_STAT(dsi_exits) },
53 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
54 { "dec", VCPU_STAT(dec_exits) },
55 { "ext_intr", VCPU_STAT(ext_intr_exits) },
45c5eb67 56 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
bbf45ba5
HB
57 { NULL }
58};
59
bbf45ba5
HB
60/* TODO: use vcpu_printf() */
61void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
62{
63 int i;
64
666e7252 65 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
5cf8ca22 66 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
de7906c3
AG
67 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
68 vcpu->arch.shared->srr1);
bbf45ba5
HB
69
70 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
71
72 for (i = 0; i < 32; i += 4) {
5cf8ca22 73 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
8e5b26b5
AG
74 kvmppc_get_gpr(vcpu, i),
75 kvmppc_get_gpr(vcpu, i+1),
76 kvmppc_get_gpr(vcpu, i+2),
77 kvmppc_get_gpr(vcpu, i+3));
bbf45ba5
HB
78 }
79}
80
d4cf3892
HB
81static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
82 unsigned int priority)
9dd921cf 83{
9dd921cf
HB
84 set_bit(priority, &vcpu->arch.pending_exceptions);
85}
86
daf5e271
LY
87static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
88 ulong dear_flags, ulong esr_flags)
9dd921cf 89{
daf5e271
LY
90 vcpu->arch.queued_dear = dear_flags;
91 vcpu->arch.queued_esr = esr_flags;
92 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
93}
94
95static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
96 ulong dear_flags, ulong esr_flags)
97{
98 vcpu->arch.queued_dear = dear_flags;
99 vcpu->arch.queued_esr = esr_flags;
100 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
101}
102
103static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
104 ulong esr_flags)
105{
106 vcpu->arch.queued_esr = esr_flags;
107 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
108}
109
110void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
111{
112 vcpu->arch.queued_esr = esr_flags;
d4cf3892 113 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
9dd921cf
HB
114}
115
116void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
117{
d4cf3892 118 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
9dd921cf
HB
119}
120
121int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
122{
d4cf3892 123 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
9dd921cf
HB
124}
125
7706664d
AG
126void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
127{
128 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
129}
130
9dd921cf
HB
131void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
132 struct kvm_interrupt *irq)
133{
d4cf3892 134 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_EXTERNAL);
9dd921cf
HB
135}
136
4496f974
AG
137void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
138 struct kvm_interrupt *irq)
139{
140 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
141}
142
d4cf3892
HB
143/* Deliver the interrupt of the corresponding priority, if possible. */
144static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
145 unsigned int priority)
bbf45ba5 146{
d4cf3892 147 int allowed = 0;
6045be5d 148 ulong uninitialized_var(msr_mask);
daf5e271 149 bool update_esr = false, update_dear = false;
5c6cedf4
AG
150 ulong crit_raw = vcpu->arch.shared->critical;
151 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
152 bool crit;
153
154 /* Truncate crit indicators in 32 bit mode */
155 if (!(vcpu->arch.shared->msr & MSR_SF)) {
156 crit_raw &= 0xffffffff;
157 crit_r1 &= 0xffffffff;
158 }
159
160 /* Critical section when crit == r1 */
161 crit = (crit_raw == crit_r1);
162 /* ... and we're in supervisor mode */
163 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
d4cf3892
HB
164
165 switch (priority) {
d4cf3892 166 case BOOKE_IRQPRIO_DTLB_MISS:
d4cf3892 167 case BOOKE_IRQPRIO_DATA_STORAGE:
daf5e271
LY
168 update_dear = true;
169 /* fall through */
d4cf3892 170 case BOOKE_IRQPRIO_INST_STORAGE:
daf5e271
LY
171 case BOOKE_IRQPRIO_PROGRAM:
172 update_esr = true;
173 /* fall through */
174 case BOOKE_IRQPRIO_ITLB_MISS:
175 case BOOKE_IRQPRIO_SYSCALL:
d4cf3892 176 case BOOKE_IRQPRIO_FP_UNAVAIL:
bb3a8a17
HB
177 case BOOKE_IRQPRIO_SPE_UNAVAIL:
178 case BOOKE_IRQPRIO_SPE_FP_DATA:
179 case BOOKE_IRQPRIO_SPE_FP_ROUND:
d4cf3892
HB
180 case BOOKE_IRQPRIO_AP_UNAVAIL:
181 case BOOKE_IRQPRIO_ALIGNMENT:
182 allowed = 1;
183 msr_mask = MSR_CE|MSR_ME|MSR_DE;
bbf45ba5 184 break;
d4cf3892
HB
185 case BOOKE_IRQPRIO_CRITICAL:
186 case BOOKE_IRQPRIO_WATCHDOG:
666e7252 187 allowed = vcpu->arch.shared->msr & MSR_CE;
d4cf3892 188 msr_mask = MSR_ME;
bbf45ba5 189 break;
d4cf3892 190 case BOOKE_IRQPRIO_MACHINE_CHECK:
666e7252 191 allowed = vcpu->arch.shared->msr & MSR_ME;
d4cf3892 192 msr_mask = 0;
bbf45ba5 193 break;
d4cf3892
HB
194 case BOOKE_IRQPRIO_EXTERNAL:
195 case BOOKE_IRQPRIO_DECREMENTER:
196 case BOOKE_IRQPRIO_FIT:
666e7252 197 allowed = vcpu->arch.shared->msr & MSR_EE;
5c6cedf4 198 allowed = allowed && !crit;
d4cf3892 199 msr_mask = MSR_CE|MSR_ME|MSR_DE;
bbf45ba5 200 break;
d4cf3892 201 case BOOKE_IRQPRIO_DEBUG:
666e7252 202 allowed = vcpu->arch.shared->msr & MSR_DE;
d4cf3892 203 msr_mask = MSR_ME;
bbf45ba5 204 break;
bbf45ba5
HB
205 }
206
d4cf3892 207 if (allowed) {
de7906c3
AG
208 vcpu->arch.shared->srr0 = vcpu->arch.pc;
209 vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
d4cf3892 210 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
daf5e271
LY
211 if (update_esr == true)
212 vcpu->arch.esr = vcpu->arch.queued_esr;
213 if (update_dear == true)
5e030186 214 vcpu->arch.shared->dar = vcpu->arch.queued_dear;
666e7252 215 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
bbf45ba5 216
d4cf3892 217 clear_bit(priority, &vcpu->arch.pending_exceptions);
bbf45ba5
HB
218 }
219
d4cf3892 220 return allowed;
bbf45ba5
HB
221}
222
223/* Check pending exceptions and deliver one, if possible. */
9dd921cf 224void kvmppc_core_deliver_interrupts(struct kvm_vcpu *vcpu)
bbf45ba5
HB
225{
226 unsigned long *pending = &vcpu->arch.pending_exceptions;
bbf45ba5
HB
227 unsigned int priority;
228
9ab80843 229 priority = __ffs(*pending);
bdc89f13 230 while (priority <= BOOKE_IRQPRIO_MAX) {
d4cf3892 231 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
bbf45ba5 232 break;
bbf45ba5
HB
233
234 priority = find_next_bit(pending,
235 BITS_PER_BYTE * sizeof(*pending),
236 priority + 1);
237 }
238}
239
bbf45ba5
HB
240/**
241 * kvmppc_handle_exit
242 *
243 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
244 */
245int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
246 unsigned int exit_nr)
247{
248 enum emulation_result er;
249 int r = RESUME_HOST;
250
73e75b41
HB
251 /* update before a new last_exit_type is rewritten */
252 kvmppc_update_timing_stats(vcpu);
253
bbf45ba5
HB
254 local_irq_enable();
255
256 run->exit_reason = KVM_EXIT_UNKNOWN;
257 run->ready_for_interrupt_injection = 1;
258
259 switch (exit_nr) {
260 case BOOKE_INTERRUPT_MACHINE_CHECK:
261 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
262 kvmppc_dump_vcpu(vcpu);
263 r = RESUME_HOST;
264 break;
265
266 case BOOKE_INTERRUPT_EXTERNAL:
7b701591 267 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
1b6766c7
HB
268 if (need_resched())
269 cond_resched();
270 r = RESUME_GUEST;
271 break;
272
bbf45ba5
HB
273 case BOOKE_INTERRUPT_DECREMENTER:
274 /* Since we switched IVPR back to the host's value, the host
275 * handled this interrupt the moment we enabled interrupts.
276 * Now we just offer it a chance to reschedule the guest. */
7b701591 277 kvmppc_account_exit(vcpu, DEC_EXITS);
bbf45ba5
HB
278 if (need_resched())
279 cond_resched();
bbf45ba5
HB
280 r = RESUME_GUEST;
281 break;
282
283 case BOOKE_INTERRUPT_PROGRAM:
666e7252 284 if (vcpu->arch.shared->msr & MSR_PR) {
bbf45ba5
HB
285 /* Program traps generated by user-level software must be handled
286 * by the guest kernel. */
daf5e271 287 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
bbf45ba5 288 r = RESUME_GUEST;
7b701591 289 kvmppc_account_exit(vcpu, USR_PR_INST);
bbf45ba5
HB
290 break;
291 }
292
293 er = kvmppc_emulate_instruction(run, vcpu);
294 switch (er) {
295 case EMULATE_DONE:
73e75b41 296 /* don't overwrite subtypes, just account kvm_stats */
7b701591 297 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
bbf45ba5
HB
298 /* Future optimization: only reload non-volatiles if
299 * they were actually modified by emulation. */
bbf45ba5
HB
300 r = RESUME_GUEST_NV;
301 break;
302 case EMULATE_DO_DCR:
303 run->exit_reason = KVM_EXIT_DCR;
304 r = RESUME_HOST;
305 break;
306 case EMULATE_FAIL:
307 /* XXX Deliver Program interrupt to guest. */
5cf8ca22 308 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
bbf45ba5
HB
309 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
310 /* For debugging, encode the failing instruction and
311 * report it to userspace. */
312 run->hw.hardware_exit_reason = ~0ULL << 32;
313 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
314 r = RESUME_HOST;
315 break;
316 default:
317 BUG();
318 }
319 break;
320
de368dce 321 case BOOKE_INTERRUPT_FP_UNAVAIL:
d4cf3892 322 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
7b701591 323 kvmppc_account_exit(vcpu, FP_UNAVAIL);
de368dce
CE
324 r = RESUME_GUEST;
325 break;
326
bb3a8a17
HB
327 case BOOKE_INTERRUPT_SPE_UNAVAIL:
328 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_UNAVAIL);
329 r = RESUME_GUEST;
330 break;
331
332 case BOOKE_INTERRUPT_SPE_FP_DATA:
333 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
334 r = RESUME_GUEST;
335 break;
336
337 case BOOKE_INTERRUPT_SPE_FP_ROUND:
338 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
339 r = RESUME_GUEST;
340 break;
341
bbf45ba5 342 case BOOKE_INTERRUPT_DATA_STORAGE:
daf5e271
LY
343 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
344 vcpu->arch.fault_esr);
7b701591 345 kvmppc_account_exit(vcpu, DSI_EXITS);
bbf45ba5
HB
346 r = RESUME_GUEST;
347 break;
348
349 case BOOKE_INTERRUPT_INST_STORAGE:
daf5e271 350 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
7b701591 351 kvmppc_account_exit(vcpu, ISI_EXITS);
bbf45ba5
HB
352 r = RESUME_GUEST;
353 break;
354
355 case BOOKE_INTERRUPT_SYSCALL:
2a342ed5
AG
356 if (!(vcpu->arch.shared->msr & MSR_PR) &&
357 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
358 /* KVM PV hypercalls */
359 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
360 r = RESUME_GUEST;
361 } else {
362 /* Guest syscalls */
363 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
364 }
7b701591 365 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
bbf45ba5
HB
366 r = RESUME_GUEST;
367 break;
368
369 case BOOKE_INTERRUPT_DTLB_MISS: {
bbf45ba5 370 unsigned long eaddr = vcpu->arch.fault_dear;
7924bd41 371 int gtlb_index;
475e7cdd 372 gpa_t gpaddr;
bbf45ba5
HB
373 gfn_t gfn;
374
375 /* Check the guest TLB. */
fa86b8dd 376 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
7924bd41 377 if (gtlb_index < 0) {
bbf45ba5 378 /* The guest didn't have a mapping for it. */
daf5e271
LY
379 kvmppc_core_queue_dtlb_miss(vcpu,
380 vcpu->arch.fault_dear,
381 vcpu->arch.fault_esr);
b52a638c 382 kvmppc_mmu_dtlb_miss(vcpu);
7b701591 383 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
bbf45ba5
HB
384 r = RESUME_GUEST;
385 break;
386 }
387
be8d1cae 388 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
475e7cdd 389 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
390
391 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
392 /* The guest TLB had a mapping, but the shadow TLB
393 * didn't, and it is RAM. This could be because:
394 * a) the entry is mapping the host kernel, or
395 * b) the guest used a large mapping which we're faking
396 * Either way, we need to satisfy the fault without
397 * invoking the guest. */
58a96214 398 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
7b701591 399 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
bbf45ba5
HB
400 r = RESUME_GUEST;
401 } else {
402 /* Guest has mapped and accessed a page which is not
403 * actually RAM. */
475e7cdd 404 vcpu->arch.paddr_accessed = gpaddr;
bbf45ba5 405 r = kvmppc_emulate_mmio(run, vcpu);
7b701591 406 kvmppc_account_exit(vcpu, MMIO_EXITS);
bbf45ba5
HB
407 }
408
409 break;
410 }
411
412 case BOOKE_INTERRUPT_ITLB_MISS: {
bbf45ba5 413 unsigned long eaddr = vcpu->arch.pc;
89168618 414 gpa_t gpaddr;
bbf45ba5 415 gfn_t gfn;
7924bd41 416 int gtlb_index;
bbf45ba5
HB
417
418 r = RESUME_GUEST;
419
420 /* Check the guest TLB. */
fa86b8dd 421 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
7924bd41 422 if (gtlb_index < 0) {
bbf45ba5 423 /* The guest didn't have a mapping for it. */
d4cf3892 424 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
b52a638c 425 kvmppc_mmu_itlb_miss(vcpu);
7b701591 426 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
bbf45ba5
HB
427 break;
428 }
429
7b701591 430 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
bbf45ba5 431
be8d1cae 432 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
89168618 433 gfn = gpaddr >> PAGE_SHIFT;
bbf45ba5
HB
434
435 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
436 /* The guest TLB had a mapping, but the shadow TLB
437 * didn't. This could be because:
438 * a) the entry is mapping the host kernel, or
439 * b) the guest used a large mapping which we're faking
440 * Either way, we need to satisfy the fault without
441 * invoking the guest. */
58a96214 442 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
bbf45ba5
HB
443 } else {
444 /* Guest mapped and leaped at non-RAM! */
d4cf3892 445 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
bbf45ba5
HB
446 }
447
448 break;
449 }
450
6a0ab738
HB
451 case BOOKE_INTERRUPT_DEBUG: {
452 u32 dbsr;
453
454 vcpu->arch.pc = mfspr(SPRN_CSRR0);
455
456 /* clear IAC events in DBSR register */
457 dbsr = mfspr(SPRN_DBSR);
458 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
459 mtspr(SPRN_DBSR, dbsr);
460
461 run->exit_reason = KVM_EXIT_DEBUG;
7b701591 462 kvmppc_account_exit(vcpu, DEBUG_EXITS);
6a0ab738
HB
463 r = RESUME_HOST;
464 break;
465 }
466
bbf45ba5
HB
467 default:
468 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
469 BUG();
470 }
471
472 local_irq_disable();
473
9dd921cf 474 kvmppc_core_deliver_interrupts(vcpu);
bbf45ba5 475
bbf45ba5
HB
476 if (!(r & RESUME_HOST)) {
477 /* To avoid clobbering exit_reason, only check for signals if
478 * we aren't already exiting to userspace for some other
479 * reason. */
480 if (signal_pending(current)) {
481 run->exit_reason = KVM_EXIT_INTR;
482 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
7b701591 483 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
bbf45ba5
HB
484 }
485 }
486
487 return r;
488}
489
490/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
491int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
492{
bbf45ba5 493 vcpu->arch.pc = 0;
666e7252 494 vcpu->arch.shared->msr = 0;
8e5b26b5 495 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
bbf45ba5 496
49dd2c49
HB
497 vcpu->arch.shadow_pid = 1;
498
bbf45ba5
HB
499 /* Eye-catching number so we know if the guest takes an interrupt
500 * before it's programmed its own IVPR. */
501 vcpu->arch.ivpr = 0x55550000;
502
73e75b41
HB
503 kvmppc_init_timing_stats(vcpu);
504
5cbb5106 505 return kvmppc_core_vcpu_setup(vcpu);
bbf45ba5
HB
506}
507
508int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
509{
510 int i;
511
512 regs->pc = vcpu->arch.pc;
992b5b29 513 regs->cr = kvmppc_get_cr(vcpu);
bbf45ba5
HB
514 regs->ctr = vcpu->arch.ctr;
515 regs->lr = vcpu->arch.lr;
992b5b29 516 regs->xer = kvmppc_get_xer(vcpu);
666e7252 517 regs->msr = vcpu->arch.shared->msr;
de7906c3
AG
518 regs->srr0 = vcpu->arch.shared->srr0;
519 regs->srr1 = vcpu->arch.shared->srr1;
bbf45ba5 520 regs->pid = vcpu->arch.pid;
a73a9599
AG
521 regs->sprg0 = vcpu->arch.shared->sprg0;
522 regs->sprg1 = vcpu->arch.shared->sprg1;
523 regs->sprg2 = vcpu->arch.shared->sprg2;
524 regs->sprg3 = vcpu->arch.shared->sprg3;
bbf45ba5
HB
525 regs->sprg5 = vcpu->arch.sprg4;
526 regs->sprg6 = vcpu->arch.sprg5;
527 regs->sprg7 = vcpu->arch.sprg6;
528
529 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
8e5b26b5 530 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
bbf45ba5
HB
531
532 return 0;
533}
534
535int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
536{
537 int i;
538
539 vcpu->arch.pc = regs->pc;
992b5b29 540 kvmppc_set_cr(vcpu, regs->cr);
bbf45ba5
HB
541 vcpu->arch.ctr = regs->ctr;
542 vcpu->arch.lr = regs->lr;
992b5b29 543 kvmppc_set_xer(vcpu, regs->xer);
b8fd68ac 544 kvmppc_set_msr(vcpu, regs->msr);
de7906c3
AG
545 vcpu->arch.shared->srr0 = regs->srr0;
546 vcpu->arch.shared->srr1 = regs->srr1;
a73a9599
AG
547 vcpu->arch.shared->sprg0 = regs->sprg0;
548 vcpu->arch.shared->sprg1 = regs->sprg1;
549 vcpu->arch.shared->sprg2 = regs->sprg2;
550 vcpu->arch.shared->sprg3 = regs->sprg3;
bbf45ba5
HB
551 vcpu->arch.sprg5 = regs->sprg4;
552 vcpu->arch.sprg6 = regs->sprg5;
553 vcpu->arch.sprg7 = regs->sprg6;
554
8e5b26b5
AG
555 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
556 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
bbf45ba5
HB
557
558 return 0;
559}
560
561int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
562 struct kvm_sregs *sregs)
563{
564 return -ENOTSUPP;
565}
566
567int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
568 struct kvm_sregs *sregs)
569{
570 return -ENOTSUPP;
571}
572
573int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
574{
575 return -ENOTSUPP;
576}
577
578int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
579{
580 return -ENOTSUPP;
581}
582
bbf45ba5
HB
583int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
584 struct kvm_translation *tr)
585{
98001d8d
AK
586 int r;
587
98001d8d 588 r = kvmppc_core_vcpu_translate(vcpu, tr);
98001d8d 589 return r;
bbf45ba5 590}
d9fbd03d 591
4e755758
AG
592int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
593{
594 return -ENOTSUPP;
595}
596
2986b8c7 597int __init kvmppc_booke_init(void)
d9fbd03d
HB
598{
599 unsigned long ivor[16];
600 unsigned long max_ivor = 0;
601 int i;
602
603 /* We install our own exception handlers by hijacking IVPR. IVPR must
604 * be 16-bit aligned, so we need a 64KB allocation. */
605 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
606 VCPU_SIZE_ORDER);
607 if (!kvmppc_booke_handlers)
608 return -ENOMEM;
609
610 /* XXX make sure our handlers are smaller than Linux's */
611
612 /* Copy our interrupt handlers to match host IVORs. That way we don't
613 * have to swap the IVORs on every guest/host transition. */
614 ivor[0] = mfspr(SPRN_IVOR0);
615 ivor[1] = mfspr(SPRN_IVOR1);
616 ivor[2] = mfspr(SPRN_IVOR2);
617 ivor[3] = mfspr(SPRN_IVOR3);
618 ivor[4] = mfspr(SPRN_IVOR4);
619 ivor[5] = mfspr(SPRN_IVOR5);
620 ivor[6] = mfspr(SPRN_IVOR6);
621 ivor[7] = mfspr(SPRN_IVOR7);
622 ivor[8] = mfspr(SPRN_IVOR8);
623 ivor[9] = mfspr(SPRN_IVOR9);
624 ivor[10] = mfspr(SPRN_IVOR10);
625 ivor[11] = mfspr(SPRN_IVOR11);
626 ivor[12] = mfspr(SPRN_IVOR12);
627 ivor[13] = mfspr(SPRN_IVOR13);
628 ivor[14] = mfspr(SPRN_IVOR14);
629 ivor[15] = mfspr(SPRN_IVOR15);
630
631 for (i = 0; i < 16; i++) {
632 if (ivor[i] > max_ivor)
633 max_ivor = ivor[i];
634
635 memcpy((void *)kvmppc_booke_handlers + ivor[i],
636 kvmppc_handlers_start + i * kvmppc_handler_len,
637 kvmppc_handler_len);
638 }
639 flush_icache_range(kvmppc_booke_handlers,
640 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
641
db93f574 642 return 0;
d9fbd03d
HB
643}
644
db93f574 645void __exit kvmppc_booke_exit(void)
d9fbd03d
HB
646{
647 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
648 kvm_exit();
649}