]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/kvm/book3s_hv.c
powerpc, KVM: Split HVMODE_206 cpu feature bit into separate HV and architecture...
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / book3s_hv.c
CommitLineData
de56a948
PM
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
27#include <linux/fs.h>
28#include <linux/anon_inodes.h>
29#include <linux/cpumask.h>
aa04b4cc
PM
30#include <linux/spinlock.h>
31#include <linux/page-flags.h>
de56a948
PM
32
33#include <asm/reg.h>
34#include <asm/cputable.h>
35#include <asm/cacheflush.h>
36#include <asm/tlbflush.h>
37#include <asm/uaccess.h>
38#include <asm/io.h>
39#include <asm/kvm_ppc.h>
40#include <asm/kvm_book3s.h>
41#include <asm/mmu_context.h>
42#include <asm/lppaca.h>
43#include <asm/processor.h>
371fefd6 44#include <asm/cputhreads.h>
aa04b4cc 45#include <asm/page.h>
de56a948
PM
46#include <linux/gfp.h>
47#include <linux/sched.h>
48#include <linux/vmalloc.h>
49#include <linux/highmem.h>
50
aa04b4cc
PM
51/*
52 * For now, limit memory to 64GB and require it to be large pages.
53 * This value is chosen because it makes the ram_pginfo array be
54 * 64kB in size, which is about as large as we want to be trying
55 * to allocate with kmalloc.
56 */
57#define MAX_MEM_ORDER 36
58
59#define LARGE_PAGE_ORDER 24 /* 16MB pages */
60
de56a948
PM
61/* #define EXIT_DEBUG */
62/* #define EXIT_DEBUG_SIMPLE */
63/* #define EXIT_DEBUG_INT */
64
65void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
66{
67 local_paca->kvm_hstate.kvm_vcpu = vcpu;
371fefd6 68 local_paca->kvm_hstate.kvm_vcore = vcpu->arch.vcore;
de56a948
PM
69}
70
71void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
72{
73}
74
371fefd6
PM
75static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu);
76static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu);
77
de56a948
PM
78void kvmppc_vcpu_block(struct kvm_vcpu *vcpu)
79{
80 u64 now;
81 unsigned long dec_nsec;
82
83 now = get_tb();
84 if (now >= vcpu->arch.dec_expires && !kvmppc_core_pending_dec(vcpu))
85 kvmppc_core_queue_dec(vcpu);
86 if (vcpu->arch.pending_exceptions)
87 return;
88 if (vcpu->arch.dec_expires != ~(u64)0) {
89 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC /
90 tb_ticks_per_sec;
91 hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec),
92 HRTIMER_MODE_REL);
93 }
94
371fefd6
PM
95 kvmppc_vcpu_blocked(vcpu);
96
de56a948
PM
97 kvm_vcpu_block(vcpu);
98 vcpu->stat.halt_wakeup++;
99
100 if (vcpu->arch.dec_expires != ~(u64)0)
101 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
371fefd6
PM
102
103 kvmppc_vcpu_unblocked(vcpu);
de56a948
PM
104}
105
106void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
107{
108 vcpu->arch.shregs.msr = msr;
109}
110
111void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
112{
113 vcpu->arch.pvr = pvr;
114}
115
116void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
117{
118 int r;
119
120 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
121 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
122 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
123 for (r = 0; r < 16; ++r)
124 pr_err("r%2d = %.16lx r%d = %.16lx\n",
125 r, kvmppc_get_gpr(vcpu, r),
126 r+16, kvmppc_get_gpr(vcpu, r+16));
127 pr_err("ctr = %.16lx lr = %.16lx\n",
128 vcpu->arch.ctr, vcpu->arch.lr);
129 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
130 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
131 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
132 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
133 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
134 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
135 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
136 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
137 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
138 pr_err("fault dar = %.16lx dsisr = %.8x\n",
139 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
140 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
141 for (r = 0; r < vcpu->arch.slb_max; ++r)
142 pr_err(" ESID = %.16llx VSID = %.16llx\n",
143 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
144 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
aa04b4cc 145 vcpu->kvm->arch.lpcr, vcpu->kvm->arch.sdr1,
de56a948
PM
146 vcpu->arch.last_inst);
147}
148
a8606e20
PM
149struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
150{
151 int r;
152 struct kvm_vcpu *v, *ret = NULL;
153
154 mutex_lock(&kvm->lock);
155 kvm_for_each_vcpu(r, v, kvm) {
156 if (v->vcpu_id == id) {
157 ret = v;
158 break;
159 }
160 }
161 mutex_unlock(&kvm->lock);
162 return ret;
163}
164
165static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
166{
167 vpa->shared_proc = 1;
168 vpa->yield_count = 1;
169}
170
171static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
172 unsigned long flags,
173 unsigned long vcpuid, unsigned long vpa)
174{
175 struct kvm *kvm = vcpu->kvm;
176 unsigned long pg_index, ra, len;
177 unsigned long pg_offset;
178 void *va;
179 struct kvm_vcpu *tvcpu;
180
181 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
182 if (!tvcpu)
183 return H_PARAMETER;
184
185 flags >>= 63 - 18;
186 flags &= 7;
187 if (flags == 0 || flags == 4)
188 return H_PARAMETER;
189 if (flags < 4) {
190 if (vpa & 0x7f)
191 return H_PARAMETER;
192 /* registering new area; convert logical addr to real */
193 pg_index = vpa >> kvm->arch.ram_porder;
194 pg_offset = vpa & (kvm->arch.ram_psize - 1);
195 if (pg_index >= kvm->arch.ram_npages)
196 return H_PARAMETER;
197 if (kvm->arch.ram_pginfo[pg_index].pfn == 0)
198 return H_PARAMETER;
199 ra = kvm->arch.ram_pginfo[pg_index].pfn << PAGE_SHIFT;
200 ra |= pg_offset;
201 va = __va(ra);
202 if (flags <= 1)
203 len = *(unsigned short *)(va + 4);
204 else
205 len = *(unsigned int *)(va + 4);
206 if (pg_offset + len > kvm->arch.ram_psize)
207 return H_PARAMETER;
208 switch (flags) {
209 case 1: /* register VPA */
210 if (len < 640)
211 return H_PARAMETER;
212 tvcpu->arch.vpa = va;
213 init_vpa(vcpu, va);
214 break;
215 case 2: /* register DTL */
216 if (len < 48)
217 return H_PARAMETER;
218 if (!tvcpu->arch.vpa)
219 return H_RESOURCE;
220 len -= len % 48;
221 tvcpu->arch.dtl = va;
222 tvcpu->arch.dtl_end = va + len;
223 break;
224 case 3: /* register SLB shadow buffer */
225 if (len < 8)
226 return H_PARAMETER;
227 if (!tvcpu->arch.vpa)
228 return H_RESOURCE;
229 tvcpu->arch.slb_shadow = va;
230 len = (len - 16) / 16;
231 tvcpu->arch.slb_shadow = va;
232 break;
233 }
234 } else {
235 switch (flags) {
236 case 5: /* unregister VPA */
237 if (tvcpu->arch.slb_shadow || tvcpu->arch.dtl)
238 return H_RESOURCE;
239 tvcpu->arch.vpa = NULL;
240 break;
241 case 6: /* unregister DTL */
242 tvcpu->arch.dtl = NULL;
243 break;
244 case 7: /* unregister SLB shadow buffer */
245 tvcpu->arch.slb_shadow = NULL;
246 break;
247 }
248 }
249 return H_SUCCESS;
250}
251
252int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
253{
254 unsigned long req = kvmppc_get_gpr(vcpu, 3);
255 unsigned long target, ret = H_SUCCESS;
256 struct kvm_vcpu *tvcpu;
257
258 switch (req) {
259 case H_CEDE:
260 vcpu->arch.shregs.msr |= MSR_EE;
261 vcpu->arch.ceded = 1;
262 smp_mb();
263 if (!vcpu->arch.prodded)
264 kvmppc_vcpu_block(vcpu);
265 else
266 vcpu->arch.prodded = 0;
267 smp_mb();
268 vcpu->arch.ceded = 0;
269 break;
270 case H_PROD:
271 target = kvmppc_get_gpr(vcpu, 4);
272 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
273 if (!tvcpu) {
274 ret = H_PARAMETER;
275 break;
276 }
277 tvcpu->arch.prodded = 1;
278 smp_mb();
279 if (vcpu->arch.ceded) {
280 if (waitqueue_active(&vcpu->wq)) {
281 wake_up_interruptible(&vcpu->wq);
282 vcpu->stat.halt_wakeup++;
283 }
284 }
285 break;
286 case H_CONFER:
287 break;
288 case H_REGISTER_VPA:
289 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
290 kvmppc_get_gpr(vcpu, 5),
291 kvmppc_get_gpr(vcpu, 6));
292 break;
293 default:
294 return RESUME_HOST;
295 }
296 kvmppc_set_gpr(vcpu, 3, ret);
297 vcpu->arch.hcall_needed = 0;
298 return RESUME_GUEST;
299}
300
de56a948
PM
301static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
302 struct task_struct *tsk)
303{
304 int r = RESUME_HOST;
305
306 vcpu->stat.sum_exits++;
307
308 run->exit_reason = KVM_EXIT_UNKNOWN;
309 run->ready_for_interrupt_injection = 1;
310 switch (vcpu->arch.trap) {
311 /* We're good on these - the host merely wanted to get our attention */
312 case BOOK3S_INTERRUPT_HV_DECREMENTER:
313 vcpu->stat.dec_exits++;
314 r = RESUME_GUEST;
315 break;
316 case BOOK3S_INTERRUPT_EXTERNAL:
317 vcpu->stat.ext_intr_exits++;
318 r = RESUME_GUEST;
319 break;
320 case BOOK3S_INTERRUPT_PERFMON:
321 r = RESUME_GUEST;
322 break;
323 case BOOK3S_INTERRUPT_PROGRAM:
324 {
325 ulong flags;
326 /*
327 * Normally program interrupts are delivered directly
328 * to the guest by the hardware, but we can get here
329 * as a result of a hypervisor emulation interrupt
330 * (e40) getting turned into a 700 by BML RTAS.
331 */
332 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
333 kvmppc_core_queue_program(vcpu, flags);
334 r = RESUME_GUEST;
335 break;
336 }
337 case BOOK3S_INTERRUPT_SYSCALL:
338 {
339 /* hcall - punt to userspace */
340 int i;
341
342 if (vcpu->arch.shregs.msr & MSR_PR) {
343 /* sc 1 from userspace - reflect to guest syscall */
344 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
345 r = RESUME_GUEST;
346 break;
347 }
348 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
349 for (i = 0; i < 9; ++i)
350 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
351 run->exit_reason = KVM_EXIT_PAPR_HCALL;
352 vcpu->arch.hcall_needed = 1;
353 r = RESUME_HOST;
354 break;
355 }
356 /*
357 * We get these next two if the guest does a bad real-mode access,
358 * as we have enabled VRMA (virtualized real mode area) mode in the
359 * LPCR. We just generate an appropriate DSI/ISI to the guest.
360 */
361 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
362 vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
363 vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
364 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
365 r = RESUME_GUEST;
366 break;
367 case BOOK3S_INTERRUPT_H_INST_STORAGE:
368 kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
369 0x08000000);
370 r = RESUME_GUEST;
371 break;
372 /*
373 * This occurs if the guest executes an illegal instruction.
374 * We just generate a program interrupt to the guest, since
375 * we don't emulate any guest instructions at this stage.
376 */
377 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
378 kvmppc_core_queue_program(vcpu, 0x80000);
379 r = RESUME_GUEST;
380 break;
381 default:
382 kvmppc_dump_regs(vcpu);
383 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
384 vcpu->arch.trap, kvmppc_get_pc(vcpu),
385 vcpu->arch.shregs.msr);
386 r = RESUME_HOST;
387 BUG();
388 break;
389 }
390
391
392 if (!(r & RESUME_HOST)) {
393 /* To avoid clobbering exit_reason, only check for signals if
394 * we aren't already exiting to userspace for some other
395 * reason. */
396 if (signal_pending(tsk)) {
397 vcpu->stat.signal_exits++;
398 run->exit_reason = KVM_EXIT_INTR;
399 r = -EINTR;
400 } else {
401 kvmppc_core_deliver_interrupts(vcpu);
402 }
403 }
404
405 return r;
406}
407
408int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
409 struct kvm_sregs *sregs)
410{
411 int i;
412
413 sregs->pvr = vcpu->arch.pvr;
414
415 memset(sregs, 0, sizeof(struct kvm_sregs));
416 for (i = 0; i < vcpu->arch.slb_max; i++) {
417 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
418 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
419 }
420
421 return 0;
422}
423
424int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
425 struct kvm_sregs *sregs)
426{
427 int i, j;
428
429 kvmppc_set_pvr(vcpu, sregs->pvr);
430
431 j = 0;
432 for (i = 0; i < vcpu->arch.slb_nr; i++) {
433 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
434 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
435 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
436 ++j;
437 }
438 }
439 vcpu->arch.slb_max = j;
440
441 return 0;
442}
443
444int kvmppc_core_check_processor_compat(void)
445{
969391c5
PM
446 if (cpu_has_feature(CPU_FTR_HVMODE) &&
447 cpu_has_feature(CPU_FTR_ARCH_206))
de56a948
PM
448 return 0;
449 return -EIO;
450}
451
452struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
453{
454 struct kvm_vcpu *vcpu;
371fefd6
PM
455 int err = -EINVAL;
456 int core;
457 struct kvmppc_vcore *vcore;
de56a948 458
371fefd6
PM
459 core = id / threads_per_core;
460 if (core >= KVM_MAX_VCORES)
461 goto out;
462
463 err = -ENOMEM;
de56a948
PM
464 vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
465 if (!vcpu)
466 goto out;
467
468 err = kvm_vcpu_init(vcpu, kvm, id);
469 if (err)
470 goto free_vcpu;
471
472 vcpu->arch.shared = &vcpu->arch.shregs;
473 vcpu->arch.last_cpu = -1;
474 vcpu->arch.mmcr[0] = MMCR0_FC;
475 vcpu->arch.ctrl = CTRL_RUNLATCH;
476 /* default to host PVR, since we can't spoof it */
477 vcpu->arch.pvr = mfspr(SPRN_PVR);
478 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
479
de56a948
PM
480 kvmppc_mmu_book3s_hv_init(vcpu);
481
371fefd6
PM
482 /*
483 * Some vcpus may start out in stopped state. If we initialize
484 * them to busy-in-host state they will stop other vcpus in the
485 * vcore from running. Instead we initialize them to blocked
486 * state, effectively considering them to be stopped until we
487 * see the first run ioctl for them.
488 */
489 vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
490
491 init_waitqueue_head(&vcpu->arch.cpu_run);
492
493 mutex_lock(&kvm->lock);
494 vcore = kvm->arch.vcores[core];
495 if (!vcore) {
496 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
497 if (vcore) {
498 INIT_LIST_HEAD(&vcore->runnable_threads);
499 spin_lock_init(&vcore->lock);
500 }
501 kvm->arch.vcores[core] = vcore;
502 }
503 mutex_unlock(&kvm->lock);
504
505 if (!vcore)
506 goto free_vcpu;
507
508 spin_lock(&vcore->lock);
509 ++vcore->num_threads;
510 ++vcore->n_blocked;
511 spin_unlock(&vcore->lock);
512 vcpu->arch.vcore = vcore;
513
de56a948
PM
514 return vcpu;
515
516free_vcpu:
517 kfree(vcpu);
518out:
519 return ERR_PTR(err);
520}
521
522void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
523{
524 kvm_vcpu_uninit(vcpu);
525 kfree(vcpu);
526}
527
371fefd6
PM
528static void kvmppc_vcpu_blocked(struct kvm_vcpu *vcpu)
529{
530 struct kvmppc_vcore *vc = vcpu->arch.vcore;
531
532 spin_lock(&vc->lock);
533 vcpu->arch.state = KVMPPC_VCPU_BLOCKED;
534 ++vc->n_blocked;
535 if (vc->n_runnable > 0 &&
536 vc->n_runnable + vc->n_blocked == vc->num_threads) {
537 vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
538 arch.run_list);
539 wake_up(&vcpu->arch.cpu_run);
540 }
541 spin_unlock(&vc->lock);
542}
543
544static void kvmppc_vcpu_unblocked(struct kvm_vcpu *vcpu)
545{
546 struct kvmppc_vcore *vc = vcpu->arch.vcore;
547
548 spin_lock(&vc->lock);
549 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
550 --vc->n_blocked;
551 spin_unlock(&vc->lock);
552}
553
de56a948 554extern int __kvmppc_vcore_entry(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
371fefd6 555extern void xics_wake_cpu(int cpu);
de56a948 556
371fefd6
PM
557static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
558 struct kvm_vcpu *vcpu)
de56a948 559{
371fefd6 560 struct kvm_vcpu *v;
de56a948 561
371fefd6
PM
562 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
563 return;
564 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
565 --vc->n_runnable;
566 /* decrement the physical thread id of each following vcpu */
567 v = vcpu;
568 list_for_each_entry_continue(v, &vc->runnable_threads, arch.run_list)
569 --v->arch.ptid;
570 list_del(&vcpu->arch.run_list);
571}
572
573static void kvmppc_start_thread(struct kvm_vcpu *vcpu)
574{
575 int cpu;
576 struct paca_struct *tpaca;
577 struct kvmppc_vcore *vc = vcpu->arch.vcore;
578
579 cpu = vc->pcpu + vcpu->arch.ptid;
580 tpaca = &paca[cpu];
581 tpaca->kvm_hstate.kvm_vcpu = vcpu;
582 tpaca->kvm_hstate.kvm_vcore = vc;
583 smp_wmb();
584#ifdef CONFIG_PPC_ICP_NATIVE
585 if (vcpu->arch.ptid) {
586 tpaca->cpu_start = 0x80;
587 tpaca->kvm_hstate.in_guest = KVM_GUEST_MODE_GUEST;
588 wmb();
589 xics_wake_cpu(cpu);
590 ++vc->n_woken;
de56a948 591 }
371fefd6
PM
592#endif
593}
de56a948 594
371fefd6
PM
595static void kvmppc_wait_for_nap(struct kvmppc_vcore *vc)
596{
597 int i;
598
599 HMT_low();
600 i = 0;
601 while (vc->nap_count < vc->n_woken) {
602 if (++i >= 1000000) {
603 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
604 vc->nap_count, vc->n_woken);
605 break;
606 }
607 cpu_relax();
608 }
609 HMT_medium();
610}
611
612/*
613 * Check that we are on thread 0 and that any other threads in
614 * this core are off-line.
615 */
616static int on_primary_thread(void)
617{
618 int cpu = smp_processor_id();
619 int thr = cpu_thread_in_core(cpu);
620
621 if (thr)
622 return 0;
623 while (++thr < threads_per_core)
624 if (cpu_online(cpu + thr))
625 return 0;
626 return 1;
627}
628
629/*
630 * Run a set of guest threads on a physical core.
631 * Called with vc->lock held.
632 */
633static int kvmppc_run_core(struct kvmppc_vcore *vc)
634{
635 struct kvm_vcpu *vcpu, *vnext;
636 long ret;
637 u64 now;
638
639 /* don't start if any threads have a signal pending */
640 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
641 if (signal_pending(vcpu->arch.run_task))
642 return 0;
de56a948
PM
643
644 /*
645 * Make sure we are running on thread 0, and that
646 * secondary threads are offline.
647 * XXX we should also block attempts to bring any
648 * secondary threads online.
649 */
371fefd6
PM
650 if (threads_per_core > 1 && !on_primary_thread()) {
651 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
652 vcpu->arch.ret = -EBUSY;
653 goto out;
de56a948
PM
654 }
655
371fefd6
PM
656 vc->n_woken = 0;
657 vc->nap_count = 0;
658 vc->entry_exit_count = 0;
659 vc->vcore_running = 1;
660 vc->in_guest = 0;
661 vc->pcpu = smp_processor_id();
662 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list)
663 kvmppc_start_thread(vcpu);
664 vcpu = list_first_entry(&vc->runnable_threads, struct kvm_vcpu,
665 arch.run_list);
666
667 spin_unlock(&vc->lock);
de56a948 668
371fefd6
PM
669 preempt_disable();
670 kvm_guest_enter();
de56a948
PM
671 __kvmppc_vcore_entry(NULL, vcpu);
672
371fefd6
PM
673 /* wait for secondary threads to finish writing their state to memory */
674 spin_lock(&vc->lock);
675 if (vc->nap_count < vc->n_woken)
676 kvmppc_wait_for_nap(vc);
677 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
678 vc->vcore_running = 2;
679 spin_unlock(&vc->lock);
680
681 /* make sure updates to secondary vcpu structs are visible now */
682 smp_mb();
de56a948
PM
683 kvm_guest_exit();
684
685 preempt_enable();
686 kvm_resched(vcpu);
687
688 now = get_tb();
371fefd6
PM
689 list_for_each_entry(vcpu, &vc->runnable_threads, arch.run_list) {
690 /* cancel pending dec exception if dec is positive */
691 if (now < vcpu->arch.dec_expires &&
692 kvmppc_core_pending_dec(vcpu))
693 kvmppc_core_dequeue_dec(vcpu);
694 if (!vcpu->arch.trap) {
695 if (signal_pending(vcpu->arch.run_task)) {
696 vcpu->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
697 vcpu->arch.ret = -EINTR;
698 }
699 continue; /* didn't get to run */
700 }
701 ret = kvmppc_handle_exit(vcpu->arch.kvm_run, vcpu,
702 vcpu->arch.run_task);
703 vcpu->arch.ret = ret;
704 vcpu->arch.trap = 0;
705 }
de56a948 706
371fefd6 707 spin_lock(&vc->lock);
de56a948 708 out:
371fefd6
PM
709 vc->vcore_running = 0;
710 list_for_each_entry_safe(vcpu, vnext, &vc->runnable_threads,
711 arch.run_list) {
712 if (vcpu->arch.ret != RESUME_GUEST) {
713 kvmppc_remove_runnable(vc, vcpu);
714 wake_up(&vcpu->arch.cpu_run);
715 }
716 }
717
718 return 1;
719}
720
721static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
722{
723 int ptid;
724 int wait_state;
725 struct kvmppc_vcore *vc;
726 DEFINE_WAIT(wait);
727
728 /* No need to go into the guest when all we do is going out */
729 if (signal_pending(current)) {
730 kvm_run->exit_reason = KVM_EXIT_INTR;
731 return -EINTR;
732 }
733
734 kvm_run->exit_reason = 0;
735 vcpu->arch.ret = RESUME_GUEST;
736 vcpu->arch.trap = 0;
737
738 flush_fp_to_thread(current);
739 flush_altivec_to_thread(current);
740 flush_vsx_to_thread(current);
741
742 /*
743 * Synchronize with other threads in this virtual core
744 */
745 vc = vcpu->arch.vcore;
746 spin_lock(&vc->lock);
747 /* This happens the first time this is called for a vcpu */
748 if (vcpu->arch.state == KVMPPC_VCPU_BLOCKED)
749 --vc->n_blocked;
750 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
751 ptid = vc->n_runnable;
752 vcpu->arch.run_task = current;
753 vcpu->arch.kvm_run = kvm_run;
754 vcpu->arch.ptid = ptid;
755 list_add_tail(&vcpu->arch.run_list, &vc->runnable_threads);
756 ++vc->n_runnable;
757
758 wait_state = TASK_INTERRUPTIBLE;
759 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
760 if (signal_pending(current)) {
761 if (!vc->vcore_running) {
762 kvm_run->exit_reason = KVM_EXIT_INTR;
763 vcpu->arch.ret = -EINTR;
764 break;
765 }
766 /* have to wait for vcore to stop executing guest */
767 wait_state = TASK_UNINTERRUPTIBLE;
768 smp_send_reschedule(vc->pcpu);
769 }
770
771 if (!vc->vcore_running &&
772 vc->n_runnable + vc->n_blocked == vc->num_threads) {
773 /* we can run now */
774 if (kvmppc_run_core(vc))
775 continue;
776 }
777
778 if (vc->vcore_running == 1 && VCORE_EXIT_COUNT(vc) == 0)
779 kvmppc_start_thread(vcpu);
780
781 /* wait for other threads to come in, or wait for vcore */
782 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
783 spin_unlock(&vc->lock);
784 schedule();
785 finish_wait(&vcpu->arch.cpu_run, &wait);
786 spin_lock(&vc->lock);
787 }
788
789 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE)
790 kvmppc_remove_runnable(vc, vcpu);
791 spin_unlock(&vc->lock);
792
793 return vcpu->arch.ret;
de56a948
PM
794}
795
a8606e20
PM
796int kvmppc_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
797{
798 int r;
799
800 do {
801 r = kvmppc_run_vcpu(run, vcpu);
802
803 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
804 !(vcpu->arch.shregs.msr & MSR_PR)) {
805 r = kvmppc_pseries_do_hcall(vcpu);
806 kvmppc_core_deliver_interrupts(vcpu);
807 }
808 } while (r == RESUME_GUEST);
809 return r;
810}
811
54738c09
DG
812static long kvmppc_stt_npages(unsigned long window_size)
813{
814 return ALIGN((window_size >> SPAPR_TCE_SHIFT)
815 * sizeof(u64), PAGE_SIZE) / PAGE_SIZE;
816}
817
818static void release_spapr_tce_table(struct kvmppc_spapr_tce_table *stt)
819{
820 struct kvm *kvm = stt->kvm;
821 int i;
822
823 mutex_lock(&kvm->lock);
824 list_del(&stt->list);
825 for (i = 0; i < kvmppc_stt_npages(stt->window_size); i++)
826 __free_page(stt->pages[i]);
827 kfree(stt);
828 mutex_unlock(&kvm->lock);
829
830 kvm_put_kvm(kvm);
831}
832
833static int kvm_spapr_tce_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
834{
835 struct kvmppc_spapr_tce_table *stt = vma->vm_file->private_data;
836 struct page *page;
837
838 if (vmf->pgoff >= kvmppc_stt_npages(stt->window_size))
839 return VM_FAULT_SIGBUS;
840
841 page = stt->pages[vmf->pgoff];
842 get_page(page);
843 vmf->page = page;
844 return 0;
845}
846
847static const struct vm_operations_struct kvm_spapr_tce_vm_ops = {
848 .fault = kvm_spapr_tce_fault,
849};
850
851static int kvm_spapr_tce_mmap(struct file *file, struct vm_area_struct *vma)
852{
853 vma->vm_ops = &kvm_spapr_tce_vm_ops;
854 return 0;
855}
856
857static int kvm_spapr_tce_release(struct inode *inode, struct file *filp)
858{
859 struct kvmppc_spapr_tce_table *stt = filp->private_data;
860
861 release_spapr_tce_table(stt);
862 return 0;
863}
864
865static struct file_operations kvm_spapr_tce_fops = {
866 .mmap = kvm_spapr_tce_mmap,
867 .release = kvm_spapr_tce_release,
868};
869
870long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
871 struct kvm_create_spapr_tce *args)
872{
873 struct kvmppc_spapr_tce_table *stt = NULL;
874 long npages;
875 int ret = -ENOMEM;
876 int i;
877
878 /* Check this LIOBN hasn't been previously allocated */
879 list_for_each_entry(stt, &kvm->arch.spapr_tce_tables, list) {
880 if (stt->liobn == args->liobn)
881 return -EBUSY;
882 }
883
884 npages = kvmppc_stt_npages(args->window_size);
885
886 stt = kzalloc(sizeof(*stt) + npages* sizeof(struct page *),
887 GFP_KERNEL);
888 if (!stt)
889 goto fail;
890
891 stt->liobn = args->liobn;
892 stt->window_size = args->window_size;
893 stt->kvm = kvm;
894
895 for (i = 0; i < npages; i++) {
896 stt->pages[i] = alloc_page(GFP_KERNEL | __GFP_ZERO);
897 if (!stt->pages[i])
898 goto fail;
899 }
900
901 kvm_get_kvm(kvm);
902
903 mutex_lock(&kvm->lock);
904 list_add(&stt->list, &kvm->arch.spapr_tce_tables);
905
906 mutex_unlock(&kvm->lock);
907
908 return anon_inode_getfd("kvm-spapr-tce", &kvm_spapr_tce_fops,
909 stt, O_RDWR);
910
911fail:
912 if (stt) {
913 for (i = 0; i < npages; i++)
914 if (stt->pages[i])
915 __free_page(stt->pages[i]);
916
917 kfree(stt);
918 }
919 return ret;
920}
921
aa04b4cc
PM
922/* Work out RMLS (real mode limit selector) field value for a given RMA size.
923 Assumes POWER7. */
924static inline int lpcr_rmls(unsigned long rma_size)
925{
926 switch (rma_size) {
927 case 32ul << 20: /* 32 MB */
928 return 8;
929 case 64ul << 20: /* 64 MB */
930 return 3;
931 case 128ul << 20: /* 128 MB */
932 return 7;
933 case 256ul << 20: /* 256 MB */
934 return 4;
935 case 1ul << 30: /* 1 GB */
936 return 2;
937 case 16ul << 30: /* 16 GB */
938 return 1;
939 case 256ul << 30: /* 256 GB */
940 return 0;
941 default:
942 return -1;
943 }
944}
945
946static int kvm_rma_fault(struct vm_area_struct *vma, struct vm_fault *vmf)
947{
948 struct kvmppc_rma_info *ri = vma->vm_file->private_data;
949 struct page *page;
950
951 if (vmf->pgoff >= ri->npages)
952 return VM_FAULT_SIGBUS;
953
954 page = pfn_to_page(ri->base_pfn + vmf->pgoff);
955 get_page(page);
956 vmf->page = page;
957 return 0;
958}
959
960static const struct vm_operations_struct kvm_rma_vm_ops = {
961 .fault = kvm_rma_fault,
962};
963
964static int kvm_rma_mmap(struct file *file, struct vm_area_struct *vma)
965{
966 vma->vm_flags |= VM_RESERVED;
967 vma->vm_ops = &kvm_rma_vm_ops;
968 return 0;
969}
970
971static int kvm_rma_release(struct inode *inode, struct file *filp)
972{
973 struct kvmppc_rma_info *ri = filp->private_data;
974
975 kvm_release_rma(ri);
976 return 0;
977}
978
979static struct file_operations kvm_rma_fops = {
980 .mmap = kvm_rma_mmap,
981 .release = kvm_rma_release,
982};
983
984long kvm_vm_ioctl_allocate_rma(struct kvm *kvm, struct kvm_allocate_rma *ret)
985{
986 struct kvmppc_rma_info *ri;
987 long fd;
988
989 ri = kvm_alloc_rma();
990 if (!ri)
991 return -ENOMEM;
992
993 fd = anon_inode_getfd("kvm-rma", &kvm_rma_fops, ri, O_RDWR);
994 if (fd < 0)
995 kvm_release_rma(ri);
996
997 ret->rma_size = ri->npages << PAGE_SHIFT;
998 return fd;
999}
1000
1001static struct page *hva_to_page(unsigned long addr)
1002{
1003 struct page *page[1];
1004 int npages;
1005
1006 might_sleep();
1007
1008 npages = get_user_pages_fast(addr, 1, 1, page);
1009
1010 if (unlikely(npages != 1))
1011 return 0;
1012
1013 return page[0];
1014}
1015
de56a948
PM
1016int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1017 struct kvm_userspace_memory_region *mem)
1018{
aa04b4cc
PM
1019 unsigned long psize, porder;
1020 unsigned long i, npages, totalpages;
1021 unsigned long pg_ix;
1022 struct kvmppc_pginfo *pginfo;
1023 unsigned long hva;
1024 struct kvmppc_rma_info *ri = NULL;
1025 struct page *page;
1026
1027 /* For now, only allow 16MB pages */
1028 porder = LARGE_PAGE_ORDER;
1029 psize = 1ul << porder;
1030 if ((mem->memory_size & (psize - 1)) ||
1031 (mem->guest_phys_addr & (psize - 1))) {
1032 pr_err("bad memory_size=%llx @ %llx\n",
1033 mem->memory_size, mem->guest_phys_addr);
1034 return -EINVAL;
1035 }
1036
1037 npages = mem->memory_size >> porder;
1038 totalpages = (mem->guest_phys_addr + mem->memory_size) >> porder;
1039
1040 /* More memory than we have space to track? */
1041 if (totalpages > (1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER)))
1042 return -EINVAL;
1043
1044 /* Do we already have an RMA registered? */
1045 if (mem->guest_phys_addr == 0 && kvm->arch.rma)
1046 return -EINVAL;
1047
1048 if (totalpages > kvm->arch.ram_npages)
1049 kvm->arch.ram_npages = totalpages;
1050
1051 /* Is this one of our preallocated RMAs? */
1052 if (mem->guest_phys_addr == 0) {
1053 struct vm_area_struct *vma;
1054
1055 down_read(&current->mm->mmap_sem);
1056 vma = find_vma(current->mm, mem->userspace_addr);
1057 if (vma && vma->vm_file &&
1058 vma->vm_file->f_op == &kvm_rma_fops &&
1059 mem->userspace_addr == vma->vm_start)
1060 ri = vma->vm_file->private_data;
1061 up_read(&current->mm->mmap_sem);
1062 }
1063
1064 if (ri) {
1065 unsigned long rma_size;
1066 unsigned long lpcr;
1067 long rmls;
1068
1069 rma_size = ri->npages << PAGE_SHIFT;
1070 if (rma_size > mem->memory_size)
1071 rma_size = mem->memory_size;
1072 rmls = lpcr_rmls(rma_size);
1073 if (rmls < 0) {
1074 pr_err("Can't use RMA of 0x%lx bytes\n", rma_size);
1075 return -EINVAL;
1076 }
1077 atomic_inc(&ri->use_count);
1078 kvm->arch.rma = ri;
1079 kvm->arch.n_rma_pages = rma_size >> porder;
1080 lpcr = kvm->arch.lpcr & ~(LPCR_VPM0 | LPCR_VRMA_L);
1081 lpcr |= rmls << LPCR_RMLS_SH;
1082 kvm->arch.lpcr = lpcr;
1083 kvm->arch.rmor = kvm->arch.rma->base_pfn << PAGE_SHIFT;
1084 pr_info("Using RMO at %lx size %lx (LPCR = %lx)\n",
1085 ri->base_pfn << PAGE_SHIFT, rma_size, lpcr);
1086 }
1087
1088 pg_ix = mem->guest_phys_addr >> porder;
1089 pginfo = kvm->arch.ram_pginfo + pg_ix;
1090 for (i = 0; i < npages; ++i, ++pg_ix) {
1091 if (ri && pg_ix < kvm->arch.n_rma_pages) {
1092 pginfo[i].pfn = ri->base_pfn +
1093 (pg_ix << (porder - PAGE_SHIFT));
1094 continue;
1095 }
1096 hva = mem->userspace_addr + (i << porder);
1097 page = hva_to_page(hva);
1098 if (!page) {
1099 pr_err("oops, no pfn for hva %lx\n", hva);
1100 goto err;
1101 }
1102 /* Check it's a 16MB page */
1103 if (!PageHead(page) ||
1104 compound_order(page) != (LARGE_PAGE_ORDER - PAGE_SHIFT)) {
1105 pr_err("page at %lx isn't 16MB (o=%d)\n",
1106 hva, compound_order(page));
1107 goto err;
1108 }
1109 pginfo[i].pfn = page_to_pfn(page);
1110 }
1111
de56a948 1112 return 0;
aa04b4cc
PM
1113
1114 err:
1115 return -EINVAL;
de56a948
PM
1116}
1117
1118void kvmppc_core_commit_memory_region(struct kvm *kvm,
1119 struct kvm_userspace_memory_region *mem)
1120{
aa04b4cc
PM
1121 if (mem->guest_phys_addr == 0 && mem->memory_size != 0 &&
1122 !kvm->arch.rma)
de56a948
PM
1123 kvmppc_map_vrma(kvm, mem);
1124}
1125
1126int kvmppc_core_init_vm(struct kvm *kvm)
1127{
1128 long r;
aa04b4cc
PM
1129 unsigned long npages = 1ul << (MAX_MEM_ORDER - LARGE_PAGE_ORDER);
1130 long err = -ENOMEM;
1131 unsigned long lpcr;
de56a948
PM
1132
1133 /* Allocate hashed page table */
1134 r = kvmppc_alloc_hpt(kvm);
54738c09
DG
1135 if (r)
1136 return r;
de56a948 1137
54738c09 1138 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
aa04b4cc
PM
1139
1140 kvm->arch.ram_pginfo = kzalloc(npages * sizeof(struct kvmppc_pginfo),
1141 GFP_KERNEL);
1142 if (!kvm->arch.ram_pginfo) {
1143 pr_err("kvmppc_core_init_vm: couldn't alloc %lu bytes\n",
1144 npages * sizeof(struct kvmppc_pginfo));
1145 goto out_free;
1146 }
1147
1148 kvm->arch.ram_npages = 0;
1149 kvm->arch.ram_psize = 1ul << LARGE_PAGE_ORDER;
1150 kvm->arch.ram_porder = LARGE_PAGE_ORDER;
1151 kvm->arch.rma = NULL;
1152 kvm->arch.n_rma_pages = 0;
1153
1154 lpcr = kvm->arch.host_lpcr & (LPCR_PECE | LPCR_LPES);
1155 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
1156 LPCR_VPM0 | LPCR_VRMA_L;
1157 kvm->arch.lpcr = lpcr;
1158
1159
54738c09 1160 return 0;
aa04b4cc
PM
1161
1162 out_free:
1163 kvmppc_free_hpt(kvm);
1164 return err;
de56a948
PM
1165}
1166
1167void kvmppc_core_destroy_vm(struct kvm *kvm)
1168{
aa04b4cc
PM
1169 struct kvmppc_pginfo *pginfo;
1170 unsigned long i;
1171
1172 if (kvm->arch.ram_pginfo) {
1173 pginfo = kvm->arch.ram_pginfo;
1174 kvm->arch.ram_pginfo = NULL;
1175 for (i = kvm->arch.n_rma_pages; i < kvm->arch.ram_npages; ++i)
1176 if (pginfo[i].pfn)
1177 put_page(pfn_to_page(pginfo[i].pfn));
1178 kfree(pginfo);
1179 }
1180 if (kvm->arch.rma) {
1181 kvm_release_rma(kvm->arch.rma);
1182 kvm->arch.rma = NULL;
1183 }
1184
de56a948 1185 kvmppc_free_hpt(kvm);
54738c09 1186 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
de56a948
PM
1187}
1188
1189/* These are stubs for now */
1190void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
1191{
1192}
1193
1194/* We don't need to emulate any privileged instructions or dcbz */
1195int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
1196 unsigned int inst, int *advance)
1197{
1198 return EMULATE_FAIL;
1199}
1200
1201int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
1202{
1203 return EMULATE_FAIL;
1204}
1205
1206int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt)
1207{
1208 return EMULATE_FAIL;
1209}
1210
1211static int kvmppc_book3s_hv_init(void)
1212{
1213 int r;
1214
1215 r = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
1216
1217 if (r)
1218 return r;
1219
1220 r = kvmppc_mmu_hv_init();
1221
1222 return r;
1223}
1224
1225static void kvmppc_book3s_hv_exit(void)
1226{
1227 kvm_exit();
1228}
1229
1230module_init(kvmppc_book3s_hv_init);
1231module_exit(kvmppc_book3s_hv_exit);