]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/powerpc/kvm/book3s.c
Merge tag 'for-linus' of git://git.kernel.org/pub/scm/virt/kvm/kvm
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s.c
1 /*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * Description:
9 * This file is derived from arch/powerpc/kvm/44x.c,
10 * by Hollis Blanchard <hollisb@us.ibm.com>.
11 *
12 * This program is free software; you can redistribute it and/or modify
13 * it under the terms of the GNU General Public License, version 2, as
14 * published by the Free Software Foundation.
15 */
16
17 #include <linux/kvm_host.h>
18 #include <linux/err.h>
19 #include <linux/export.h>
20 #include <linux/slab.h>
21 #include <linux/module.h>
22 #include <linux/miscdevice.h>
23
24 #include <asm/reg.h>
25 #include <asm/cputable.h>
26 #include <asm/cacheflush.h>
27 #include <asm/tlbflush.h>
28 #include <asm/uaccess.h>
29 #include <asm/io.h>
30 #include <asm/kvm_ppc.h>
31 #include <asm/kvm_book3s.h>
32 #include <asm/mmu_context.h>
33 #include <asm/page.h>
34 #include <linux/gfp.h>
35 #include <linux/sched.h>
36 #include <linux/vmalloc.h>
37 #include <linux/highmem.h>
38
39 #include "book3s.h"
40 #include "trace.h"
41
42 #define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
43
44 /* #define EXIT_DEBUG */
45
46 struct kvm_stats_debugfs_item debugfs_entries[] = {
47 { "exits", VCPU_STAT(sum_exits) },
48 { "mmio", VCPU_STAT(mmio_exits) },
49 { "sig", VCPU_STAT(signal_exits) },
50 { "sysc", VCPU_STAT(syscall_exits) },
51 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
52 { "dec", VCPU_STAT(dec_exits) },
53 { "ext_intr", VCPU_STAT(ext_intr_exits) },
54 { "queue_intr", VCPU_STAT(queue_intr) },
55 { "halt_successful_poll", VCPU_STAT(halt_successful_poll), },
56 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
57 { "pf_storage", VCPU_STAT(pf_storage) },
58 { "sp_storage", VCPU_STAT(sp_storage) },
59 { "pf_instruc", VCPU_STAT(pf_instruc) },
60 { "sp_instruc", VCPU_STAT(sp_instruc) },
61 { "ld", VCPU_STAT(ld) },
62 { "ld_slow", VCPU_STAT(ld_slow) },
63 { "st", VCPU_STAT(st) },
64 { "st_slow", VCPU_STAT(st_slow) },
65 { NULL }
66 };
67
68 void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu)
69 {
70 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) {
71 ulong pc = kvmppc_get_pc(vcpu);
72 if ((pc & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS)
73 kvmppc_set_pc(vcpu, pc & ~SPLIT_HACK_MASK);
74 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SPLIT_HACK;
75 }
76 }
77 EXPORT_SYMBOL_GPL(kvmppc_unfixup_split_real);
78
79 static inline unsigned long kvmppc_interrupt_offset(struct kvm_vcpu *vcpu)
80 {
81 if (!is_kvmppc_hv_enabled(vcpu->kvm))
82 return to_book3s(vcpu)->hior;
83 return 0;
84 }
85
86 static inline void kvmppc_update_int_pending(struct kvm_vcpu *vcpu,
87 unsigned long pending_now, unsigned long old_pending)
88 {
89 if (is_kvmppc_hv_enabled(vcpu->kvm))
90 return;
91 if (pending_now)
92 kvmppc_set_int_pending(vcpu, 1);
93 else if (old_pending)
94 kvmppc_set_int_pending(vcpu, 0);
95 }
96
97 static inline bool kvmppc_critical_section(struct kvm_vcpu *vcpu)
98 {
99 ulong crit_raw;
100 ulong crit_r1;
101 bool crit;
102
103 if (is_kvmppc_hv_enabled(vcpu->kvm))
104 return false;
105
106 crit_raw = kvmppc_get_critical(vcpu);
107 crit_r1 = kvmppc_get_gpr(vcpu, 1);
108
109 /* Truncate crit indicators in 32 bit mode */
110 if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
111 crit_raw &= 0xffffffff;
112 crit_r1 &= 0xffffffff;
113 }
114
115 /* Critical section when crit == r1 */
116 crit = (crit_raw == crit_r1);
117 /* ... and we're in supervisor mode */
118 crit = crit && !(kvmppc_get_msr(vcpu) & MSR_PR);
119
120 return crit;
121 }
122
123 void kvmppc_inject_interrupt(struct kvm_vcpu *vcpu, int vec, u64 flags)
124 {
125 kvmppc_unfixup_split_real(vcpu);
126 kvmppc_set_srr0(vcpu, kvmppc_get_pc(vcpu));
127 kvmppc_set_srr1(vcpu, kvmppc_get_msr(vcpu) | flags);
128 kvmppc_set_pc(vcpu, kvmppc_interrupt_offset(vcpu) + vec);
129 vcpu->arch.mmu.reset_msr(vcpu);
130 }
131
132 static int kvmppc_book3s_vec2irqprio(unsigned int vec)
133 {
134 unsigned int prio;
135
136 switch (vec) {
137 case 0x100: prio = BOOK3S_IRQPRIO_SYSTEM_RESET; break;
138 case 0x200: prio = BOOK3S_IRQPRIO_MACHINE_CHECK; break;
139 case 0x300: prio = BOOK3S_IRQPRIO_DATA_STORAGE; break;
140 case 0x380: prio = BOOK3S_IRQPRIO_DATA_SEGMENT; break;
141 case 0x400: prio = BOOK3S_IRQPRIO_INST_STORAGE; break;
142 case 0x480: prio = BOOK3S_IRQPRIO_INST_SEGMENT; break;
143 case 0x500: prio = BOOK3S_IRQPRIO_EXTERNAL; break;
144 case 0x501: prio = BOOK3S_IRQPRIO_EXTERNAL_LEVEL; break;
145 case 0x600: prio = BOOK3S_IRQPRIO_ALIGNMENT; break;
146 case 0x700: prio = BOOK3S_IRQPRIO_PROGRAM; break;
147 case 0x800: prio = BOOK3S_IRQPRIO_FP_UNAVAIL; break;
148 case 0x900: prio = BOOK3S_IRQPRIO_DECREMENTER; break;
149 case 0xc00: prio = BOOK3S_IRQPRIO_SYSCALL; break;
150 case 0xd00: prio = BOOK3S_IRQPRIO_DEBUG; break;
151 case 0xf20: prio = BOOK3S_IRQPRIO_ALTIVEC; break;
152 case 0xf40: prio = BOOK3S_IRQPRIO_VSX; break;
153 case 0xf60: prio = BOOK3S_IRQPRIO_FAC_UNAVAIL; break;
154 default: prio = BOOK3S_IRQPRIO_MAX; break;
155 }
156
157 return prio;
158 }
159
160 void kvmppc_book3s_dequeue_irqprio(struct kvm_vcpu *vcpu,
161 unsigned int vec)
162 {
163 unsigned long old_pending = vcpu->arch.pending_exceptions;
164
165 clear_bit(kvmppc_book3s_vec2irqprio(vec),
166 &vcpu->arch.pending_exceptions);
167
168 kvmppc_update_int_pending(vcpu, vcpu->arch.pending_exceptions,
169 old_pending);
170 }
171
172 void kvmppc_book3s_queue_irqprio(struct kvm_vcpu *vcpu, unsigned int vec)
173 {
174 vcpu->stat.queue_intr++;
175
176 set_bit(kvmppc_book3s_vec2irqprio(vec),
177 &vcpu->arch.pending_exceptions);
178 #ifdef EXIT_DEBUG
179 printk(KERN_INFO "Queueing interrupt %x\n", vec);
180 #endif
181 }
182 EXPORT_SYMBOL_GPL(kvmppc_book3s_queue_irqprio);
183
184 void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags)
185 {
186 /* might as well deliver this straight away */
187 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_PROGRAM, flags);
188 }
189 EXPORT_SYMBOL_GPL(kvmppc_core_queue_program);
190
191 void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
192 {
193 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
194 }
195 EXPORT_SYMBOL_GPL(kvmppc_core_queue_dec);
196
197 int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
198 {
199 return test_bit(BOOK3S_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
200 }
201 EXPORT_SYMBOL_GPL(kvmppc_core_pending_dec);
202
203 void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
204 {
205 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_DECREMENTER);
206 }
207 EXPORT_SYMBOL_GPL(kvmppc_core_dequeue_dec);
208
209 void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
210 struct kvm_interrupt *irq)
211 {
212 unsigned int vec = BOOK3S_INTERRUPT_EXTERNAL;
213
214 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
215 vec = BOOK3S_INTERRUPT_EXTERNAL_LEVEL;
216
217 kvmppc_book3s_queue_irqprio(vcpu, vec);
218 }
219
220 void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
221 {
222 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL);
223 kvmppc_book3s_dequeue_irqprio(vcpu, BOOK3S_INTERRUPT_EXTERNAL_LEVEL);
224 }
225
226 void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, ulong dar,
227 ulong flags)
228 {
229 kvmppc_set_dar(vcpu, dar);
230 kvmppc_set_dsisr(vcpu, flags);
231 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE);
232 }
233
234 void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong flags)
235 {
236 u64 msr = kvmppc_get_msr(vcpu);
237 msr &= ~(SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
238 msr |= flags & (SRR1_ISI_NOPT | SRR1_ISI_N_OR_G | SRR1_ISI_PROT);
239 kvmppc_set_msr_fast(vcpu, msr);
240 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
241 }
242
243 static int kvmppc_book3s_irqprio_deliver(struct kvm_vcpu *vcpu,
244 unsigned int priority)
245 {
246 int deliver = 1;
247 int vec = 0;
248 bool crit = kvmppc_critical_section(vcpu);
249
250 switch (priority) {
251 case BOOK3S_IRQPRIO_DECREMENTER:
252 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
253 vec = BOOK3S_INTERRUPT_DECREMENTER;
254 break;
255 case BOOK3S_IRQPRIO_EXTERNAL:
256 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
257 deliver = (kvmppc_get_msr(vcpu) & MSR_EE) && !crit;
258 vec = BOOK3S_INTERRUPT_EXTERNAL;
259 break;
260 case BOOK3S_IRQPRIO_SYSTEM_RESET:
261 vec = BOOK3S_INTERRUPT_SYSTEM_RESET;
262 break;
263 case BOOK3S_IRQPRIO_MACHINE_CHECK:
264 vec = BOOK3S_INTERRUPT_MACHINE_CHECK;
265 break;
266 case BOOK3S_IRQPRIO_DATA_STORAGE:
267 vec = BOOK3S_INTERRUPT_DATA_STORAGE;
268 break;
269 case BOOK3S_IRQPRIO_INST_STORAGE:
270 vec = BOOK3S_INTERRUPT_INST_STORAGE;
271 break;
272 case BOOK3S_IRQPRIO_DATA_SEGMENT:
273 vec = BOOK3S_INTERRUPT_DATA_SEGMENT;
274 break;
275 case BOOK3S_IRQPRIO_INST_SEGMENT:
276 vec = BOOK3S_INTERRUPT_INST_SEGMENT;
277 break;
278 case BOOK3S_IRQPRIO_ALIGNMENT:
279 vec = BOOK3S_INTERRUPT_ALIGNMENT;
280 break;
281 case BOOK3S_IRQPRIO_PROGRAM:
282 vec = BOOK3S_INTERRUPT_PROGRAM;
283 break;
284 case BOOK3S_IRQPRIO_VSX:
285 vec = BOOK3S_INTERRUPT_VSX;
286 break;
287 case BOOK3S_IRQPRIO_ALTIVEC:
288 vec = BOOK3S_INTERRUPT_ALTIVEC;
289 break;
290 case BOOK3S_IRQPRIO_FP_UNAVAIL:
291 vec = BOOK3S_INTERRUPT_FP_UNAVAIL;
292 break;
293 case BOOK3S_IRQPRIO_SYSCALL:
294 vec = BOOK3S_INTERRUPT_SYSCALL;
295 break;
296 case BOOK3S_IRQPRIO_DEBUG:
297 vec = BOOK3S_INTERRUPT_TRACE;
298 break;
299 case BOOK3S_IRQPRIO_PERFORMANCE_MONITOR:
300 vec = BOOK3S_INTERRUPT_PERFMON;
301 break;
302 case BOOK3S_IRQPRIO_FAC_UNAVAIL:
303 vec = BOOK3S_INTERRUPT_FAC_UNAVAIL;
304 break;
305 default:
306 deliver = 0;
307 printk(KERN_ERR "KVM: Unknown interrupt: 0x%x\n", priority);
308 break;
309 }
310
311 #if 0
312 printk(KERN_INFO "Deliver interrupt 0x%x? %x\n", vec, deliver);
313 #endif
314
315 if (deliver)
316 kvmppc_inject_interrupt(vcpu, vec, 0);
317
318 return deliver;
319 }
320
321 /*
322 * This function determines if an irqprio should be cleared once issued.
323 */
324 static bool clear_irqprio(struct kvm_vcpu *vcpu, unsigned int priority)
325 {
326 switch (priority) {
327 case BOOK3S_IRQPRIO_DECREMENTER:
328 /* DEC interrupts get cleared by mtdec */
329 return false;
330 case BOOK3S_IRQPRIO_EXTERNAL_LEVEL:
331 /* External interrupts get cleared by userspace */
332 return false;
333 }
334
335 return true;
336 }
337
338 int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
339 {
340 unsigned long *pending = &vcpu->arch.pending_exceptions;
341 unsigned long old_pending = vcpu->arch.pending_exceptions;
342 unsigned int priority;
343
344 #ifdef EXIT_DEBUG
345 if (vcpu->arch.pending_exceptions)
346 printk(KERN_EMERG "KVM: Check pending: %lx\n", vcpu->arch.pending_exceptions);
347 #endif
348 priority = __ffs(*pending);
349 while (priority < BOOK3S_IRQPRIO_MAX) {
350 if (kvmppc_book3s_irqprio_deliver(vcpu, priority) &&
351 clear_irqprio(vcpu, priority)) {
352 clear_bit(priority, &vcpu->arch.pending_exceptions);
353 break;
354 }
355
356 priority = find_next_bit(pending,
357 BITS_PER_BYTE * sizeof(*pending),
358 priority + 1);
359 }
360
361 /* Tell the guest about our interrupt status */
362 kvmppc_update_int_pending(vcpu, *pending, old_pending);
363
364 return 0;
365 }
366 EXPORT_SYMBOL_GPL(kvmppc_core_prepare_to_enter);
367
368 pfn_t kvmppc_gpa_to_pfn(struct kvm_vcpu *vcpu, gpa_t gpa, bool writing,
369 bool *writable)
370 {
371 ulong mp_pa = vcpu->arch.magic_page_pa & KVM_PAM;
372 gfn_t gfn = gpa >> PAGE_SHIFT;
373
374 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
375 mp_pa = (uint32_t)mp_pa;
376
377 /* Magic page override */
378 gpa &= ~0xFFFULL;
379 if (unlikely(mp_pa) && unlikely((gpa & KVM_PAM) == mp_pa)) {
380 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
381 pfn_t pfn;
382
383 pfn = (pfn_t)virt_to_phys((void*)shared_page) >> PAGE_SHIFT;
384 get_page(pfn_to_page(pfn));
385 if (writable)
386 *writable = true;
387 return pfn;
388 }
389
390 return gfn_to_pfn_prot(vcpu->kvm, gfn, writing, writable);
391 }
392 EXPORT_SYMBOL_GPL(kvmppc_gpa_to_pfn);
393
394 int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
395 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
396 {
397 bool data = (xlid == XLATE_DATA);
398 bool iswrite = (xlrw == XLATE_WRITE);
399 int relocated = (kvmppc_get_msr(vcpu) & (data ? MSR_DR : MSR_IR));
400 int r;
401
402 if (relocated) {
403 r = vcpu->arch.mmu.xlate(vcpu, eaddr, pte, data, iswrite);
404 } else {
405 pte->eaddr = eaddr;
406 pte->raddr = eaddr & KVM_PAM;
407 pte->vpage = VSID_REAL | eaddr >> 12;
408 pte->may_read = true;
409 pte->may_write = true;
410 pte->may_execute = true;
411 r = 0;
412
413 if ((kvmppc_get_msr(vcpu) & (MSR_IR | MSR_DR)) == MSR_DR &&
414 !data) {
415 if ((vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
416 ((eaddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
417 pte->raddr &= ~SPLIT_HACK_MASK;
418 }
419 }
420
421 return r;
422 }
423
424 int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
425 u32 *inst)
426 {
427 ulong pc = kvmppc_get_pc(vcpu);
428 int r;
429
430 if (type == INST_SC)
431 pc -= 4;
432
433 r = kvmppc_ld(vcpu, &pc, sizeof(u32), inst, false);
434 if (r == EMULATE_DONE)
435 return r;
436 else
437 return EMULATE_AGAIN;
438 }
439 EXPORT_SYMBOL_GPL(kvmppc_load_last_inst);
440
441 int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
442 {
443 return 0;
444 }
445
446 int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
447 {
448 return 0;
449 }
450
451 void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
452 {
453 }
454
455 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
456 struct kvm_sregs *sregs)
457 {
458 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
459 }
460
461 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
462 struct kvm_sregs *sregs)
463 {
464 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
465 }
466
467 int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
468 {
469 int i;
470
471 regs->pc = kvmppc_get_pc(vcpu);
472 regs->cr = kvmppc_get_cr(vcpu);
473 regs->ctr = kvmppc_get_ctr(vcpu);
474 regs->lr = kvmppc_get_lr(vcpu);
475 regs->xer = kvmppc_get_xer(vcpu);
476 regs->msr = kvmppc_get_msr(vcpu);
477 regs->srr0 = kvmppc_get_srr0(vcpu);
478 regs->srr1 = kvmppc_get_srr1(vcpu);
479 regs->pid = vcpu->arch.pid;
480 regs->sprg0 = kvmppc_get_sprg0(vcpu);
481 regs->sprg1 = kvmppc_get_sprg1(vcpu);
482 regs->sprg2 = kvmppc_get_sprg2(vcpu);
483 regs->sprg3 = kvmppc_get_sprg3(vcpu);
484 regs->sprg4 = kvmppc_get_sprg4(vcpu);
485 regs->sprg5 = kvmppc_get_sprg5(vcpu);
486 regs->sprg6 = kvmppc_get_sprg6(vcpu);
487 regs->sprg7 = kvmppc_get_sprg7(vcpu);
488
489 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
490 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
491
492 return 0;
493 }
494
495 int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
496 {
497 int i;
498
499 kvmppc_set_pc(vcpu, regs->pc);
500 kvmppc_set_cr(vcpu, regs->cr);
501 kvmppc_set_ctr(vcpu, regs->ctr);
502 kvmppc_set_lr(vcpu, regs->lr);
503 kvmppc_set_xer(vcpu, regs->xer);
504 kvmppc_set_msr(vcpu, regs->msr);
505 kvmppc_set_srr0(vcpu, regs->srr0);
506 kvmppc_set_srr1(vcpu, regs->srr1);
507 kvmppc_set_sprg0(vcpu, regs->sprg0);
508 kvmppc_set_sprg1(vcpu, regs->sprg1);
509 kvmppc_set_sprg2(vcpu, regs->sprg2);
510 kvmppc_set_sprg3(vcpu, regs->sprg3);
511 kvmppc_set_sprg4(vcpu, regs->sprg4);
512 kvmppc_set_sprg5(vcpu, regs->sprg5);
513 kvmppc_set_sprg6(vcpu, regs->sprg6);
514 kvmppc_set_sprg7(vcpu, regs->sprg7);
515
516 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
517 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
518
519 return 0;
520 }
521
522 int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
523 {
524 return -ENOTSUPP;
525 }
526
527 int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
528 {
529 return -ENOTSUPP;
530 }
531
532 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
533 union kvmppc_one_reg *val)
534 {
535 int r = 0;
536 long int i;
537
538 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
539 if (r == -EINVAL) {
540 r = 0;
541 switch (id) {
542 case KVM_REG_PPC_DAR:
543 *val = get_reg_val(id, kvmppc_get_dar(vcpu));
544 break;
545 case KVM_REG_PPC_DSISR:
546 *val = get_reg_val(id, kvmppc_get_dsisr(vcpu));
547 break;
548 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
549 i = id - KVM_REG_PPC_FPR0;
550 *val = get_reg_val(id, VCPU_FPR(vcpu, i));
551 break;
552 case KVM_REG_PPC_FPSCR:
553 *val = get_reg_val(id, vcpu->arch.fp.fpscr);
554 break;
555 #ifdef CONFIG_VSX
556 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
557 if (cpu_has_feature(CPU_FTR_VSX)) {
558 i = id - KVM_REG_PPC_VSR0;
559 val->vsxval[0] = vcpu->arch.fp.fpr[i][0];
560 val->vsxval[1] = vcpu->arch.fp.fpr[i][1];
561 } else {
562 r = -ENXIO;
563 }
564 break;
565 #endif /* CONFIG_VSX */
566 case KVM_REG_PPC_DEBUG_INST:
567 *val = get_reg_val(id, INS_TW);
568 break;
569 #ifdef CONFIG_KVM_XICS
570 case KVM_REG_PPC_ICP_STATE:
571 if (!vcpu->arch.icp) {
572 r = -ENXIO;
573 break;
574 }
575 *val = get_reg_val(id, kvmppc_xics_get_icp(vcpu));
576 break;
577 #endif /* CONFIG_KVM_XICS */
578 case KVM_REG_PPC_FSCR:
579 *val = get_reg_val(id, vcpu->arch.fscr);
580 break;
581 case KVM_REG_PPC_TAR:
582 *val = get_reg_val(id, vcpu->arch.tar);
583 break;
584 case KVM_REG_PPC_EBBHR:
585 *val = get_reg_val(id, vcpu->arch.ebbhr);
586 break;
587 case KVM_REG_PPC_EBBRR:
588 *val = get_reg_val(id, vcpu->arch.ebbrr);
589 break;
590 case KVM_REG_PPC_BESCR:
591 *val = get_reg_val(id, vcpu->arch.bescr);
592 break;
593 case KVM_REG_PPC_VTB:
594 *val = get_reg_val(id, vcpu->arch.vtb);
595 break;
596 case KVM_REG_PPC_IC:
597 *val = get_reg_val(id, vcpu->arch.ic);
598 break;
599 default:
600 r = -EINVAL;
601 break;
602 }
603 }
604
605 return r;
606 }
607
608 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
609 union kvmppc_one_reg *val)
610 {
611 int r = 0;
612 long int i;
613
614 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
615 if (r == -EINVAL) {
616 r = 0;
617 switch (id) {
618 case KVM_REG_PPC_DAR:
619 kvmppc_set_dar(vcpu, set_reg_val(id, *val));
620 break;
621 case KVM_REG_PPC_DSISR:
622 kvmppc_set_dsisr(vcpu, set_reg_val(id, *val));
623 break;
624 case KVM_REG_PPC_FPR0 ... KVM_REG_PPC_FPR31:
625 i = id - KVM_REG_PPC_FPR0;
626 VCPU_FPR(vcpu, i) = set_reg_val(id, *val);
627 break;
628 case KVM_REG_PPC_FPSCR:
629 vcpu->arch.fp.fpscr = set_reg_val(id, *val);
630 break;
631 #ifdef CONFIG_VSX
632 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31:
633 if (cpu_has_feature(CPU_FTR_VSX)) {
634 i = id - KVM_REG_PPC_VSR0;
635 vcpu->arch.fp.fpr[i][0] = val->vsxval[0];
636 vcpu->arch.fp.fpr[i][1] = val->vsxval[1];
637 } else {
638 r = -ENXIO;
639 }
640 break;
641 #endif /* CONFIG_VSX */
642 #ifdef CONFIG_KVM_XICS
643 case KVM_REG_PPC_ICP_STATE:
644 if (!vcpu->arch.icp) {
645 r = -ENXIO;
646 break;
647 }
648 r = kvmppc_xics_set_icp(vcpu,
649 set_reg_val(id, *val));
650 break;
651 #endif /* CONFIG_KVM_XICS */
652 case KVM_REG_PPC_FSCR:
653 vcpu->arch.fscr = set_reg_val(id, *val);
654 break;
655 case KVM_REG_PPC_TAR:
656 vcpu->arch.tar = set_reg_val(id, *val);
657 break;
658 case KVM_REG_PPC_EBBHR:
659 vcpu->arch.ebbhr = set_reg_val(id, *val);
660 break;
661 case KVM_REG_PPC_EBBRR:
662 vcpu->arch.ebbrr = set_reg_val(id, *val);
663 break;
664 case KVM_REG_PPC_BESCR:
665 vcpu->arch.bescr = set_reg_val(id, *val);
666 break;
667 case KVM_REG_PPC_VTB:
668 vcpu->arch.vtb = set_reg_val(id, *val);
669 break;
670 case KVM_REG_PPC_IC:
671 vcpu->arch.ic = set_reg_val(id, *val);
672 break;
673 default:
674 r = -EINVAL;
675 break;
676 }
677 }
678
679 return r;
680 }
681
682 void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
683 {
684 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
685 }
686
687 void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
688 {
689 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
690 }
691
692 void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
693 {
694 vcpu->kvm->arch.kvm_ops->set_msr(vcpu, msr);
695 }
696 EXPORT_SYMBOL_GPL(kvmppc_set_msr);
697
698 int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
699 {
700 return vcpu->kvm->arch.kvm_ops->vcpu_run(kvm_run, vcpu);
701 }
702
703 int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
704 struct kvm_translation *tr)
705 {
706 return 0;
707 }
708
709 int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
710 struct kvm_guest_debug *dbg)
711 {
712 vcpu->guest_debug = dbg->control;
713 return 0;
714 }
715
716 void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
717 {
718 kvmppc_core_queue_dec(vcpu);
719 kvm_vcpu_kick(vcpu);
720 }
721
722 struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
723 {
724 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
725 }
726
727 void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
728 {
729 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
730 }
731
732 int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
733 {
734 return vcpu->kvm->arch.kvm_ops->check_requests(vcpu);
735 }
736
737 int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
738 {
739 return kvm->arch.kvm_ops->get_dirty_log(kvm, log);
740 }
741
742 void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
743 struct kvm_memory_slot *dont)
744 {
745 kvm->arch.kvm_ops->free_memslot(free, dont);
746 }
747
748 int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
749 unsigned long npages)
750 {
751 return kvm->arch.kvm_ops->create_memslot(slot, npages);
752 }
753
754 void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
755 {
756 kvm->arch.kvm_ops->flush_memslot(kvm, memslot);
757 }
758
759 int kvmppc_core_prepare_memory_region(struct kvm *kvm,
760 struct kvm_memory_slot *memslot,
761 const struct kvm_userspace_memory_region *mem)
762 {
763 return kvm->arch.kvm_ops->prepare_memory_region(kvm, memslot, mem);
764 }
765
766 void kvmppc_core_commit_memory_region(struct kvm *kvm,
767 const struct kvm_userspace_memory_region *mem,
768 const struct kvm_memory_slot *old,
769 const struct kvm_memory_slot *new)
770 {
771 kvm->arch.kvm_ops->commit_memory_region(kvm, mem, old, new);
772 }
773
774 int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
775 {
776 return kvm->arch.kvm_ops->unmap_hva(kvm, hva);
777 }
778 EXPORT_SYMBOL_GPL(kvm_unmap_hva);
779
780 int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
781 {
782 return kvm->arch.kvm_ops->unmap_hva_range(kvm, start, end);
783 }
784
785 int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
786 {
787 return kvm->arch.kvm_ops->age_hva(kvm, start, end);
788 }
789
790 int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
791 {
792 return kvm->arch.kvm_ops->test_age_hva(kvm, hva);
793 }
794
795 void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
796 {
797 kvm->arch.kvm_ops->set_spte_hva(kvm, hva, pte);
798 }
799
800 void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
801 {
802 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
803 }
804
805 int kvmppc_core_init_vm(struct kvm *kvm)
806 {
807
808 #ifdef CONFIG_PPC64
809 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
810 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
811 #endif
812
813 return kvm->arch.kvm_ops->init_vm(kvm);
814 }
815
816 void kvmppc_core_destroy_vm(struct kvm *kvm)
817 {
818 kvm->arch.kvm_ops->destroy_vm(kvm);
819
820 #ifdef CONFIG_PPC64
821 kvmppc_rtas_tokens_free(kvm);
822 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
823 #endif
824 }
825
826 int kvmppc_h_logical_ci_load(struct kvm_vcpu *vcpu)
827 {
828 unsigned long size = kvmppc_get_gpr(vcpu, 4);
829 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
830 u64 buf;
831 int ret;
832
833 if (!is_power_of_2(size) || (size > sizeof(buf)))
834 return H_TOO_HARD;
835
836 ret = kvm_io_bus_read(vcpu, KVM_MMIO_BUS, addr, size, &buf);
837 if (ret != 0)
838 return H_TOO_HARD;
839
840 switch (size) {
841 case 1:
842 kvmppc_set_gpr(vcpu, 4, *(u8 *)&buf);
843 break;
844
845 case 2:
846 kvmppc_set_gpr(vcpu, 4, be16_to_cpu(*(__be16 *)&buf));
847 break;
848
849 case 4:
850 kvmppc_set_gpr(vcpu, 4, be32_to_cpu(*(__be32 *)&buf));
851 break;
852
853 case 8:
854 kvmppc_set_gpr(vcpu, 4, be64_to_cpu(*(__be64 *)&buf));
855 break;
856
857 default:
858 BUG();
859 }
860
861 return H_SUCCESS;
862 }
863 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_load);
864
865 int kvmppc_h_logical_ci_store(struct kvm_vcpu *vcpu)
866 {
867 unsigned long size = kvmppc_get_gpr(vcpu, 4);
868 unsigned long addr = kvmppc_get_gpr(vcpu, 5);
869 unsigned long val = kvmppc_get_gpr(vcpu, 6);
870 u64 buf;
871 int ret;
872
873 switch (size) {
874 case 1:
875 *(u8 *)&buf = val;
876 break;
877
878 case 2:
879 *(__be16 *)&buf = cpu_to_be16(val);
880 break;
881
882 case 4:
883 *(__be32 *)&buf = cpu_to_be32(val);
884 break;
885
886 case 8:
887 *(__be64 *)&buf = cpu_to_be64(val);
888 break;
889
890 default:
891 return H_TOO_HARD;
892 }
893
894 ret = kvm_io_bus_write(vcpu, KVM_MMIO_BUS, addr, size, &buf);
895 if (ret != 0)
896 return H_TOO_HARD;
897
898 return H_SUCCESS;
899 }
900 EXPORT_SYMBOL_GPL(kvmppc_h_logical_ci_store);
901
902 int kvmppc_core_check_processor_compat(void)
903 {
904 /*
905 * We always return 0 for book3s. We check
906 * for compatibility while loading the HV
907 * or PR module
908 */
909 return 0;
910 }
911
912 int kvmppc_book3s_hcall_implemented(struct kvm *kvm, unsigned long hcall)
913 {
914 return kvm->arch.kvm_ops->hcall_implemented(hcall);
915 }
916
917 static int kvmppc_book3s_init(void)
918 {
919 int r;
920
921 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
922 if (r)
923 return r;
924 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
925 r = kvmppc_book3s_init_pr();
926 #endif
927 return r;
928
929 }
930
931 static void kvmppc_book3s_exit(void)
932 {
933 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
934 kvmppc_book3s_exit_pr();
935 #endif
936 kvm_exit();
937 }
938
939 module_init(kvmppc_book3s_init);
940 module_exit(kvmppc_book3s_exit);
941
942 /* On 32bit this is our one and only kernel module */
943 #ifdef CONFIG_KVM_BOOK3S_32_HANDLER
944 MODULE_ALIAS_MISCDEV(KVM_MINOR);
945 MODULE_ALIAS("devname:kvm");
946 #endif