2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
21 #include <linux/kvm_host.h>
22 #include <linux/err.h>
23 #include <linux/slab.h>
24 #include <linux/preempt.h>
25 #include <linux/sched.h>
26 #include <linux/delay.h>
27 #include <linux/export.h>
29 #include <linux/anon_inodes.h>
30 #include <linux/cpumask.h>
31 #include <linux/spinlock.h>
32 #include <linux/page-flags.h>
33 #include <linux/srcu.h>
36 #include <asm/cputable.h>
37 #include <asm/cacheflush.h>
38 #include <asm/tlbflush.h>
39 #include <asm/uaccess.h>
41 #include <asm/kvm_ppc.h>
42 #include <asm/kvm_book3s.h>
43 #include <asm/mmu_context.h>
44 #include <asm/lppaca.h>
45 #include <asm/processor.h>
46 #include <asm/cputhreads.h>
48 #include <asm/hvcall.h>
49 #include <asm/switch_to.h>
51 #include <linux/gfp.h>
52 #include <linux/vmalloc.h>
53 #include <linux/highmem.h>
54 #include <linux/hugetlb.h>
56 /* #define EXIT_DEBUG */
57 /* #define EXIT_DEBUG_SIMPLE */
58 /* #define EXIT_DEBUG_INT */
60 /* Used to indicate that a guest page fault needs to be handled */
61 #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
63 /* Used as a "null" value for timebase values */
64 #define TB_NIL (~(u64)0)
66 static void kvmppc_end_cede(struct kvm_vcpu
*vcpu
);
67 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu
*vcpu
);
70 * We use the vcpu_load/put functions to measure stolen time.
71 * Stolen time is counted as time when either the vcpu is able to
72 * run as part of a virtual core, but the task running the vcore
73 * is preempted or sleeping, or when the vcpu needs something done
74 * in the kernel by the task running the vcpu, but that task is
75 * preempted or sleeping. Those two things have to be counted
76 * separately, since one of the vcpu tasks will take on the job
77 * of running the core, and the other vcpu tasks in the vcore will
78 * sleep waiting for it to do that, but that sleep shouldn't count
81 * Hence we accumulate stolen time when the vcpu can run as part of
82 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
83 * needs its task to do other things in the kernel (for example,
84 * service a page fault) in busy_stolen. We don't accumulate
85 * stolen time for a vcore when it is inactive, or for a vcpu
86 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
87 * a misnomer; it means that the vcpu task is not executing in
88 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
89 * the kernel. We don't have any way of dividing up that time
90 * between time that the vcpu is genuinely stopped, time that
91 * the task is actively working on behalf of the vcpu, and time
92 * that the task is preempted, so we don't count any of it as
95 * Updates to busy_stolen are protected by arch.tbacct_lock;
96 * updates to vc->stolen_tb are protected by the arch.tbacct_lock
97 * of the vcpu that has taken responsibility for running the vcore
98 * (i.e. vc->runner). The stolen times are measured in units of
99 * timebase ticks. (Note that the != TB_NIL checks below are
100 * purely defensive; they should never fail.)
103 void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
)
105 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
107 spin_lock(&vcpu
->arch
.tbacct_lock
);
108 if (vc
->runner
== vcpu
&& vc
->vcore_state
!= VCORE_INACTIVE
&&
109 vc
->preempt_tb
!= TB_NIL
) {
110 vc
->stolen_tb
+= mftb() - vc
->preempt_tb
;
111 vc
->preempt_tb
= TB_NIL
;
113 if (vcpu
->arch
.state
== KVMPPC_VCPU_BUSY_IN_HOST
&&
114 vcpu
->arch
.busy_preempt
!= TB_NIL
) {
115 vcpu
->arch
.busy_stolen
+= mftb() - vcpu
->arch
.busy_preempt
;
116 vcpu
->arch
.busy_preempt
= TB_NIL
;
118 spin_unlock(&vcpu
->arch
.tbacct_lock
);
121 void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
)
123 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
125 spin_lock(&vcpu
->arch
.tbacct_lock
);
126 if (vc
->runner
== vcpu
&& vc
->vcore_state
!= VCORE_INACTIVE
)
127 vc
->preempt_tb
= mftb();
128 if (vcpu
->arch
.state
== KVMPPC_VCPU_BUSY_IN_HOST
)
129 vcpu
->arch
.busy_preempt
= mftb();
130 spin_unlock(&vcpu
->arch
.tbacct_lock
);
133 void kvmppc_set_msr(struct kvm_vcpu
*vcpu
, u64 msr
)
135 vcpu
->arch
.shregs
.msr
= msr
;
136 kvmppc_end_cede(vcpu
);
139 void kvmppc_set_pvr(struct kvm_vcpu
*vcpu
, u32 pvr
)
141 vcpu
->arch
.pvr
= pvr
;
144 void kvmppc_dump_regs(struct kvm_vcpu
*vcpu
)
148 pr_err("vcpu %p (%d):\n", vcpu
, vcpu
->vcpu_id
);
149 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
150 vcpu
->arch
.pc
, vcpu
->arch
.shregs
.msr
, vcpu
->arch
.trap
);
151 for (r
= 0; r
< 16; ++r
)
152 pr_err("r%2d = %.16lx r%d = %.16lx\n",
153 r
, kvmppc_get_gpr(vcpu
, r
),
154 r
+16, kvmppc_get_gpr(vcpu
, r
+16));
155 pr_err("ctr = %.16lx lr = %.16lx\n",
156 vcpu
->arch
.ctr
, vcpu
->arch
.lr
);
157 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
158 vcpu
->arch
.shregs
.srr0
, vcpu
->arch
.shregs
.srr1
);
159 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
160 vcpu
->arch
.shregs
.sprg0
, vcpu
->arch
.shregs
.sprg1
);
161 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
162 vcpu
->arch
.shregs
.sprg2
, vcpu
->arch
.shregs
.sprg3
);
163 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
164 vcpu
->arch
.cr
, vcpu
->arch
.xer
, vcpu
->arch
.shregs
.dsisr
);
165 pr_err("dar = %.16llx\n", vcpu
->arch
.shregs
.dar
);
166 pr_err("fault dar = %.16lx dsisr = %.8x\n",
167 vcpu
->arch
.fault_dar
, vcpu
->arch
.fault_dsisr
);
168 pr_err("SLB (%d entries):\n", vcpu
->arch
.slb_max
);
169 for (r
= 0; r
< vcpu
->arch
.slb_max
; ++r
)
170 pr_err(" ESID = %.16llx VSID = %.16llx\n",
171 vcpu
->arch
.slb
[r
].orige
, vcpu
->arch
.slb
[r
].origv
);
172 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
173 vcpu
->kvm
->arch
.lpcr
, vcpu
->kvm
->arch
.sdr1
,
174 vcpu
->arch
.last_inst
);
177 struct kvm_vcpu
*kvmppc_find_vcpu(struct kvm
*kvm
, int id
)
180 struct kvm_vcpu
*v
, *ret
= NULL
;
182 mutex_lock(&kvm
->lock
);
183 kvm_for_each_vcpu(r
, v
, kvm
) {
184 if (v
->vcpu_id
== id
) {
189 mutex_unlock(&kvm
->lock
);
193 static void init_vpa(struct kvm_vcpu
*vcpu
, struct lppaca
*vpa
)
195 vpa
->shared_proc
= 1;
196 vpa
->yield_count
= 1;
199 static int set_vpa(struct kvm_vcpu
*vcpu
, struct kvmppc_vpa
*v
,
200 unsigned long addr
, unsigned long len
)
202 /* check address is cacheline aligned */
203 if (addr
& (L1_CACHE_BYTES
- 1))
205 spin_lock(&vcpu
->arch
.vpa_update_lock
);
206 if (v
->next_gpa
!= addr
|| v
->len
!= len
) {
208 v
->len
= addr
? len
: 0;
209 v
->update_pending
= 1;
211 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
215 /* Length for a per-processor buffer is passed in at offset 4 in the buffer */
224 static int vpa_is_registered(struct kvmppc_vpa
*vpap
)
226 if (vpap
->update_pending
)
227 return vpap
->next_gpa
!= 0;
228 return vpap
->pinned_addr
!= NULL
;
231 static unsigned long do_h_register_vpa(struct kvm_vcpu
*vcpu
,
233 unsigned long vcpuid
, unsigned long vpa
)
235 struct kvm
*kvm
= vcpu
->kvm
;
236 unsigned long len
, nb
;
238 struct kvm_vcpu
*tvcpu
;
241 struct kvmppc_vpa
*vpap
;
243 tvcpu
= kvmppc_find_vcpu(kvm
, vcpuid
);
247 subfunc
= (flags
>> H_VPA_FUNC_SHIFT
) & H_VPA_FUNC_MASK
;
248 if (subfunc
== H_VPA_REG_VPA
|| subfunc
== H_VPA_REG_DTL
||
249 subfunc
== H_VPA_REG_SLB
) {
250 /* Registering new area - address must be cache-line aligned */
251 if ((vpa
& (L1_CACHE_BYTES
- 1)) || !vpa
)
254 /* convert logical addr to kernel addr and read length */
255 va
= kvmppc_pin_guest_page(kvm
, vpa
, &nb
);
258 if (subfunc
== H_VPA_REG_VPA
)
259 len
= ((struct reg_vpa
*)va
)->length
.hword
;
261 len
= ((struct reg_vpa
*)va
)->length
.word
;
262 kvmppc_unpin_guest_page(kvm
, va
);
265 if (len
> nb
|| len
< sizeof(struct reg_vpa
))
274 spin_lock(&tvcpu
->arch
.vpa_update_lock
);
277 case H_VPA_REG_VPA
: /* register VPA */
278 if (len
< sizeof(struct lppaca
))
280 vpap
= &tvcpu
->arch
.vpa
;
284 case H_VPA_REG_DTL
: /* register DTL */
285 if (len
< sizeof(struct dtl_entry
))
287 len
-= len
% sizeof(struct dtl_entry
);
289 /* Check that they have previously registered a VPA */
291 if (!vpa_is_registered(&tvcpu
->arch
.vpa
))
294 vpap
= &tvcpu
->arch
.dtl
;
298 case H_VPA_REG_SLB
: /* register SLB shadow buffer */
299 /* Check that they have previously registered a VPA */
301 if (!vpa_is_registered(&tvcpu
->arch
.vpa
))
304 vpap
= &tvcpu
->arch
.slb_shadow
;
308 case H_VPA_DEREG_VPA
: /* deregister VPA */
309 /* Check they don't still have a DTL or SLB buf registered */
311 if (vpa_is_registered(&tvcpu
->arch
.dtl
) ||
312 vpa_is_registered(&tvcpu
->arch
.slb_shadow
))
315 vpap
= &tvcpu
->arch
.vpa
;
319 case H_VPA_DEREG_DTL
: /* deregister DTL */
320 vpap
= &tvcpu
->arch
.dtl
;
324 case H_VPA_DEREG_SLB
: /* deregister SLB shadow buffer */
325 vpap
= &tvcpu
->arch
.slb_shadow
;
331 vpap
->next_gpa
= vpa
;
333 vpap
->update_pending
= 1;
336 spin_unlock(&tvcpu
->arch
.vpa_update_lock
);
341 static void kvmppc_update_vpa(struct kvm_vcpu
*vcpu
, struct kvmppc_vpa
*vpap
)
343 struct kvm
*kvm
= vcpu
->kvm
;
349 * We need to pin the page pointed to by vpap->next_gpa,
350 * but we can't call kvmppc_pin_guest_page under the lock
351 * as it does get_user_pages() and down_read(). So we
352 * have to drop the lock, pin the page, then get the lock
353 * again and check that a new area didn't get registered
357 gpa
= vpap
->next_gpa
;
358 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
362 va
= kvmppc_pin_guest_page(kvm
, vpap
->next_gpa
, &nb
);
363 spin_lock(&vcpu
->arch
.vpa_update_lock
);
364 if (gpa
== vpap
->next_gpa
)
366 /* sigh... unpin that one and try again */
368 kvmppc_unpin_guest_page(kvm
, va
);
371 vpap
->update_pending
= 0;
372 if (va
&& nb
< vpap
->len
) {
374 * If it's now too short, it must be that userspace
375 * has changed the mappings underlying guest memory,
376 * so unregister the region.
378 kvmppc_unpin_guest_page(kvm
, va
);
381 if (vpap
->pinned_addr
)
382 kvmppc_unpin_guest_page(kvm
, vpap
->pinned_addr
);
383 vpap
->pinned_addr
= va
;
385 vpap
->pinned_end
= va
+ vpap
->len
;
388 static void kvmppc_update_vpas(struct kvm_vcpu
*vcpu
)
390 if (!(vcpu
->arch
.vpa
.update_pending
||
391 vcpu
->arch
.slb_shadow
.update_pending
||
392 vcpu
->arch
.dtl
.update_pending
))
395 spin_lock(&vcpu
->arch
.vpa_update_lock
);
396 if (vcpu
->arch
.vpa
.update_pending
) {
397 kvmppc_update_vpa(vcpu
, &vcpu
->arch
.vpa
);
398 if (vcpu
->arch
.vpa
.pinned_addr
)
399 init_vpa(vcpu
, vcpu
->arch
.vpa
.pinned_addr
);
401 if (vcpu
->arch
.dtl
.update_pending
) {
402 kvmppc_update_vpa(vcpu
, &vcpu
->arch
.dtl
);
403 vcpu
->arch
.dtl_ptr
= vcpu
->arch
.dtl
.pinned_addr
;
404 vcpu
->arch
.dtl_index
= 0;
406 if (vcpu
->arch
.slb_shadow
.update_pending
)
407 kvmppc_update_vpa(vcpu
, &vcpu
->arch
.slb_shadow
);
408 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
412 * Return the accumulated stolen time for the vcore up until `now'.
413 * The caller should hold the vcore lock.
415 static u64
vcore_stolen_time(struct kvmppc_vcore
*vc
, u64 now
)
420 * If we are the task running the vcore, then since we hold
421 * the vcore lock, we can't be preempted, so stolen_tb/preempt_tb
422 * can't be updated, so we don't need the tbacct_lock.
423 * If the vcore is inactive, it can't become active (since we
424 * hold the vcore lock), so the vcpu load/put functions won't
425 * update stolen_tb/preempt_tb, and we don't need tbacct_lock.
427 if (vc
->vcore_state
!= VCORE_INACTIVE
&&
428 vc
->runner
->arch
.run_task
!= current
) {
429 spin_lock(&vc
->runner
->arch
.tbacct_lock
);
431 if (vc
->preempt_tb
!= TB_NIL
)
432 p
+= now
- vc
->preempt_tb
;
433 spin_unlock(&vc
->runner
->arch
.tbacct_lock
);
440 static void kvmppc_create_dtl_entry(struct kvm_vcpu
*vcpu
,
441 struct kvmppc_vcore
*vc
)
443 struct dtl_entry
*dt
;
445 unsigned long stolen
;
446 unsigned long core_stolen
;
449 dt
= vcpu
->arch
.dtl_ptr
;
450 vpa
= vcpu
->arch
.vpa
.pinned_addr
;
452 core_stolen
= vcore_stolen_time(vc
, now
);
453 stolen
= core_stolen
- vcpu
->arch
.stolen_logged
;
454 vcpu
->arch
.stolen_logged
= core_stolen
;
455 spin_lock(&vcpu
->arch
.tbacct_lock
);
456 stolen
+= vcpu
->arch
.busy_stolen
;
457 vcpu
->arch
.busy_stolen
= 0;
458 spin_unlock(&vcpu
->arch
.tbacct_lock
);
461 memset(dt
, 0, sizeof(struct dtl_entry
));
462 dt
->dispatch_reason
= 7;
463 dt
->processor_id
= vc
->pcpu
+ vcpu
->arch
.ptid
;
465 dt
->enqueue_to_dispatch_time
= stolen
;
466 dt
->srr0
= kvmppc_get_pc(vcpu
);
467 dt
->srr1
= vcpu
->arch
.shregs
.msr
;
469 if (dt
== vcpu
->arch
.dtl
.pinned_end
)
470 dt
= vcpu
->arch
.dtl
.pinned_addr
;
471 vcpu
->arch
.dtl_ptr
= dt
;
472 /* order writing *dt vs. writing vpa->dtl_idx */
474 vpa
->dtl_idx
= ++vcpu
->arch
.dtl_index
;
477 int kvmppc_pseries_do_hcall(struct kvm_vcpu
*vcpu
)
479 unsigned long req
= kvmppc_get_gpr(vcpu
, 3);
480 unsigned long target
, ret
= H_SUCCESS
;
481 struct kvm_vcpu
*tvcpu
;
486 idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
487 ret
= kvmppc_virtmode_h_enter(vcpu
, kvmppc_get_gpr(vcpu
, 4),
488 kvmppc_get_gpr(vcpu
, 5),
489 kvmppc_get_gpr(vcpu
, 6),
490 kvmppc_get_gpr(vcpu
, 7));
491 srcu_read_unlock(&vcpu
->kvm
->srcu
, idx
);
496 target
= kvmppc_get_gpr(vcpu
, 4);
497 tvcpu
= kvmppc_find_vcpu(vcpu
->kvm
, target
);
502 tvcpu
->arch
.prodded
= 1;
504 if (vcpu
->arch
.ceded
) {
505 if (waitqueue_active(&vcpu
->wq
)) {
506 wake_up_interruptible(&vcpu
->wq
);
507 vcpu
->stat
.halt_wakeup
++;
514 ret
= do_h_register_vpa(vcpu
, kvmppc_get_gpr(vcpu
, 4),
515 kvmppc_get_gpr(vcpu
, 5),
516 kvmppc_get_gpr(vcpu
, 6));
521 kvmppc_set_gpr(vcpu
, 3, ret
);
522 vcpu
->arch
.hcall_needed
= 0;
526 static int kvmppc_handle_exit(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
527 struct task_struct
*tsk
)
531 vcpu
->stat
.sum_exits
++;
533 run
->exit_reason
= KVM_EXIT_UNKNOWN
;
534 run
->ready_for_interrupt_injection
= 1;
535 switch (vcpu
->arch
.trap
) {
536 /* We're good on these - the host merely wanted to get our attention */
537 case BOOK3S_INTERRUPT_HV_DECREMENTER
:
538 vcpu
->stat
.dec_exits
++;
541 case BOOK3S_INTERRUPT_EXTERNAL
:
542 vcpu
->stat
.ext_intr_exits
++;
545 case BOOK3S_INTERRUPT_PERFMON
:
548 case BOOK3S_INTERRUPT_MACHINE_CHECK
:
550 * Deliver a machine check interrupt to the guest.
551 * We have to do this, even if the host has handled the
552 * machine check, because machine checks use SRR0/1 and
553 * the interrupt might have trashed guest state in them.
555 kvmppc_book3s_queue_irqprio(vcpu
,
556 BOOK3S_INTERRUPT_MACHINE_CHECK
);
559 case BOOK3S_INTERRUPT_PROGRAM
:
563 * Normally program interrupts are delivered directly
564 * to the guest by the hardware, but we can get here
565 * as a result of a hypervisor emulation interrupt
566 * (e40) getting turned into a 700 by BML RTAS.
568 flags
= vcpu
->arch
.shregs
.msr
& 0x1f0000ull
;
569 kvmppc_core_queue_program(vcpu
, flags
);
573 case BOOK3S_INTERRUPT_SYSCALL
:
575 /* hcall - punt to userspace */
578 if (vcpu
->arch
.shregs
.msr
& MSR_PR
) {
579 /* sc 1 from userspace - reflect to guest syscall */
580 kvmppc_book3s_queue_irqprio(vcpu
, BOOK3S_INTERRUPT_SYSCALL
);
584 run
->papr_hcall
.nr
= kvmppc_get_gpr(vcpu
, 3);
585 for (i
= 0; i
< 9; ++i
)
586 run
->papr_hcall
.args
[i
] = kvmppc_get_gpr(vcpu
, 4 + i
);
587 run
->exit_reason
= KVM_EXIT_PAPR_HCALL
;
588 vcpu
->arch
.hcall_needed
= 1;
593 * We get these next two if the guest accesses a page which it thinks
594 * it has mapped but which is not actually present, either because
595 * it is for an emulated I/O device or because the corresonding
596 * host page has been paged out. Any other HDSI/HISI interrupts
597 * have been handled already.
599 case BOOK3S_INTERRUPT_H_DATA_STORAGE
:
600 r
= RESUME_PAGE_FAULT
;
602 case BOOK3S_INTERRUPT_H_INST_STORAGE
:
603 vcpu
->arch
.fault_dar
= kvmppc_get_pc(vcpu
);
604 vcpu
->arch
.fault_dsisr
= 0;
605 r
= RESUME_PAGE_FAULT
;
608 * This occurs if the guest executes an illegal instruction.
609 * We just generate a program interrupt to the guest, since
610 * we don't emulate any guest instructions at this stage.
612 case BOOK3S_INTERRUPT_H_EMUL_ASSIST
:
613 kvmppc_core_queue_program(vcpu
, 0x80000);
617 kvmppc_dump_regs(vcpu
);
618 printk(KERN_EMERG
"trap=0x%x | pc=0x%lx | msr=0x%llx\n",
619 vcpu
->arch
.trap
, kvmppc_get_pc(vcpu
),
620 vcpu
->arch
.shregs
.msr
);
629 int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu
*vcpu
,
630 struct kvm_sregs
*sregs
)
634 sregs
->pvr
= vcpu
->arch
.pvr
;
636 memset(sregs
, 0, sizeof(struct kvm_sregs
));
637 for (i
= 0; i
< vcpu
->arch
.slb_max
; i
++) {
638 sregs
->u
.s
.ppc64
.slb
[i
].slbe
= vcpu
->arch
.slb
[i
].orige
;
639 sregs
->u
.s
.ppc64
.slb
[i
].slbv
= vcpu
->arch
.slb
[i
].origv
;
645 int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu
*vcpu
,
646 struct kvm_sregs
*sregs
)
650 kvmppc_set_pvr(vcpu
, sregs
->pvr
);
653 for (i
= 0; i
< vcpu
->arch
.slb_nr
; i
++) {
654 if (sregs
->u
.s
.ppc64
.slb
[i
].slbe
& SLB_ESID_V
) {
655 vcpu
->arch
.slb
[j
].orige
= sregs
->u
.s
.ppc64
.slb
[i
].slbe
;
656 vcpu
->arch
.slb
[j
].origv
= sregs
->u
.s
.ppc64
.slb
[i
].slbv
;
660 vcpu
->arch
.slb_max
= j
;
665 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*val
)
671 case KVM_REG_PPC_HIOR
:
672 *val
= get_reg_val(id
, 0);
674 case KVM_REG_PPC_DABR
:
675 *val
= get_reg_val(id
, vcpu
->arch
.dabr
);
677 case KVM_REG_PPC_DSCR
:
678 *val
= get_reg_val(id
, vcpu
->arch
.dscr
);
680 case KVM_REG_PPC_PURR
:
681 *val
= get_reg_val(id
, vcpu
->arch
.purr
);
683 case KVM_REG_PPC_SPURR
:
684 *val
= get_reg_val(id
, vcpu
->arch
.spurr
);
686 case KVM_REG_PPC_AMR
:
687 *val
= get_reg_val(id
, vcpu
->arch
.amr
);
689 case KVM_REG_PPC_UAMOR
:
690 *val
= get_reg_val(id
, vcpu
->arch
.uamor
);
692 case KVM_REG_PPC_MMCR0
... KVM_REG_PPC_MMCRA
:
693 i
= id
- KVM_REG_PPC_MMCR0
;
694 *val
= get_reg_val(id
, vcpu
->arch
.mmcr
[i
]);
696 case KVM_REG_PPC_PMC1
... KVM_REG_PPC_PMC8
:
697 i
= id
- KVM_REG_PPC_PMC1
;
698 *val
= get_reg_val(id
, vcpu
->arch
.pmc
[i
]);
701 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
702 if (cpu_has_feature(CPU_FTR_VSX
)) {
703 /* VSX => FP reg i is stored in arch.vsr[2*i] */
704 long int i
= id
- KVM_REG_PPC_FPR0
;
705 *val
= get_reg_val(id
, vcpu
->arch
.vsr
[2 * i
]);
707 /* let generic code handle it */
711 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
712 if (cpu_has_feature(CPU_FTR_VSX
)) {
713 long int i
= id
- KVM_REG_PPC_VSR0
;
714 val
->vsxval
[0] = vcpu
->arch
.vsr
[2 * i
];
715 val
->vsxval
[1] = vcpu
->arch
.vsr
[2 * i
+ 1];
720 #endif /* CONFIG_VSX */
721 case KVM_REG_PPC_VPA_ADDR
:
722 spin_lock(&vcpu
->arch
.vpa_update_lock
);
723 *val
= get_reg_val(id
, vcpu
->arch
.vpa
.next_gpa
);
724 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
726 case KVM_REG_PPC_VPA_SLB
:
727 spin_lock(&vcpu
->arch
.vpa_update_lock
);
728 val
->vpaval
.addr
= vcpu
->arch
.slb_shadow
.next_gpa
;
729 val
->vpaval
.length
= vcpu
->arch
.slb_shadow
.len
;
730 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
732 case KVM_REG_PPC_VPA_DTL
:
733 spin_lock(&vcpu
->arch
.vpa_update_lock
);
734 val
->vpaval
.addr
= vcpu
->arch
.dtl
.next_gpa
;
735 val
->vpaval
.length
= vcpu
->arch
.dtl
.len
;
736 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
746 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*val
)
750 unsigned long addr
, len
;
753 case KVM_REG_PPC_HIOR
:
754 /* Only allow this to be set to zero */
755 if (set_reg_val(id
, *val
))
758 case KVM_REG_PPC_DABR
:
759 vcpu
->arch
.dabr
= set_reg_val(id
, *val
);
761 case KVM_REG_PPC_DSCR
:
762 vcpu
->arch
.dscr
= set_reg_val(id
, *val
);
764 case KVM_REG_PPC_PURR
:
765 vcpu
->arch
.purr
= set_reg_val(id
, *val
);
767 case KVM_REG_PPC_SPURR
:
768 vcpu
->arch
.spurr
= set_reg_val(id
, *val
);
770 case KVM_REG_PPC_AMR
:
771 vcpu
->arch
.amr
= set_reg_val(id
, *val
);
773 case KVM_REG_PPC_UAMOR
:
774 vcpu
->arch
.uamor
= set_reg_val(id
, *val
);
776 case KVM_REG_PPC_MMCR0
... KVM_REG_PPC_MMCRA
:
777 i
= id
- KVM_REG_PPC_MMCR0
;
778 vcpu
->arch
.mmcr
[i
] = set_reg_val(id
, *val
);
780 case KVM_REG_PPC_PMC1
... KVM_REG_PPC_PMC8
:
781 i
= id
- KVM_REG_PPC_PMC1
;
782 vcpu
->arch
.pmc
[i
] = set_reg_val(id
, *val
);
785 case KVM_REG_PPC_FPR0
... KVM_REG_PPC_FPR31
:
786 if (cpu_has_feature(CPU_FTR_VSX
)) {
787 /* VSX => FP reg i is stored in arch.vsr[2*i] */
788 long int i
= id
- KVM_REG_PPC_FPR0
;
789 vcpu
->arch
.vsr
[2 * i
] = set_reg_val(id
, *val
);
791 /* let generic code handle it */
795 case KVM_REG_PPC_VSR0
... KVM_REG_PPC_VSR31
:
796 if (cpu_has_feature(CPU_FTR_VSX
)) {
797 long int i
= id
- KVM_REG_PPC_VSR0
;
798 vcpu
->arch
.vsr
[2 * i
] = val
->vsxval
[0];
799 vcpu
->arch
.vsr
[2 * i
+ 1] = val
->vsxval
[1];
804 #endif /* CONFIG_VSX */
805 case KVM_REG_PPC_VPA_ADDR
:
806 addr
= set_reg_val(id
, *val
);
808 if (!addr
&& (vcpu
->arch
.slb_shadow
.next_gpa
||
809 vcpu
->arch
.dtl
.next_gpa
))
811 r
= set_vpa(vcpu
, &vcpu
->arch
.vpa
, addr
, sizeof(struct lppaca
));
813 case KVM_REG_PPC_VPA_SLB
:
814 addr
= val
->vpaval
.addr
;
815 len
= val
->vpaval
.length
;
817 if (addr
&& !vcpu
->arch
.vpa
.next_gpa
)
819 r
= set_vpa(vcpu
, &vcpu
->arch
.slb_shadow
, addr
, len
);
821 case KVM_REG_PPC_VPA_DTL
:
822 addr
= val
->vpaval
.addr
;
823 len
= val
->vpaval
.length
;
825 if (addr
&& (len
< sizeof(struct dtl_entry
) ||
826 !vcpu
->arch
.vpa
.next_gpa
))
828 len
-= len
% sizeof(struct dtl_entry
);
829 r
= set_vpa(vcpu
, &vcpu
->arch
.dtl
, addr
, len
);
839 int kvmppc_core_check_processor_compat(void)
841 if (cpu_has_feature(CPU_FTR_HVMODE
))
846 struct kvm_vcpu
*kvmppc_core_vcpu_create(struct kvm
*kvm
, unsigned int id
)
848 struct kvm_vcpu
*vcpu
;
851 struct kvmppc_vcore
*vcore
;
853 core
= id
/ threads_per_core
;
854 if (core
>= KVM_MAX_VCORES
)
858 vcpu
= kmem_cache_zalloc(kvm_vcpu_cache
, GFP_KERNEL
);
862 err
= kvm_vcpu_init(vcpu
, kvm
, id
);
866 vcpu
->arch
.shared
= &vcpu
->arch
.shregs
;
867 vcpu
->arch
.mmcr
[0] = MMCR0_FC
;
868 vcpu
->arch
.ctrl
= CTRL_RUNLATCH
;
869 /* default to host PVR, since we can't spoof it */
870 vcpu
->arch
.pvr
= mfspr(SPRN_PVR
);
871 kvmppc_set_pvr(vcpu
, vcpu
->arch
.pvr
);
872 spin_lock_init(&vcpu
->arch
.vpa_update_lock
);
873 spin_lock_init(&vcpu
->arch
.tbacct_lock
);
874 vcpu
->arch
.busy_preempt
= TB_NIL
;
876 kvmppc_mmu_book3s_hv_init(vcpu
);
878 vcpu
->arch
.state
= KVMPPC_VCPU_NOTREADY
;
880 init_waitqueue_head(&vcpu
->arch
.cpu_run
);
882 mutex_lock(&kvm
->lock
);
883 vcore
= kvm
->arch
.vcores
[core
];
885 vcore
= kzalloc(sizeof(struct kvmppc_vcore
), GFP_KERNEL
);
887 INIT_LIST_HEAD(&vcore
->runnable_threads
);
888 spin_lock_init(&vcore
->lock
);
889 init_waitqueue_head(&vcore
->wq
);
890 vcore
->preempt_tb
= TB_NIL
;
892 kvm
->arch
.vcores
[core
] = vcore
;
893 kvm
->arch
.online_vcores
++;
895 mutex_unlock(&kvm
->lock
);
900 spin_lock(&vcore
->lock
);
901 ++vcore
->num_threads
;
902 spin_unlock(&vcore
->lock
);
903 vcpu
->arch
.vcore
= vcore
;
905 vcpu
->arch
.cpu_type
= KVM_CPU_3S_64
;
906 kvmppc_sanity_check(vcpu
);
911 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
916 void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
)
918 spin_lock(&vcpu
->arch
.vpa_update_lock
);
919 if (vcpu
->arch
.dtl
.pinned_addr
)
920 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.dtl
.pinned_addr
);
921 if (vcpu
->arch
.slb_shadow
.pinned_addr
)
922 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.slb_shadow
.pinned_addr
);
923 if (vcpu
->arch
.vpa
.pinned_addr
)
924 kvmppc_unpin_guest_page(vcpu
->kvm
, vcpu
->arch
.vpa
.pinned_addr
);
925 spin_unlock(&vcpu
->arch
.vpa_update_lock
);
926 kvm_vcpu_uninit(vcpu
);
927 kmem_cache_free(kvm_vcpu_cache
, vcpu
);
930 static void kvmppc_set_timer(struct kvm_vcpu
*vcpu
)
932 unsigned long dec_nsec
, now
;
935 if (now
> vcpu
->arch
.dec_expires
) {
936 /* decrementer has already gone negative */
937 kvmppc_core_queue_dec(vcpu
);
938 kvmppc_core_prepare_to_enter(vcpu
);
941 dec_nsec
= (vcpu
->arch
.dec_expires
- now
) * NSEC_PER_SEC
943 hrtimer_start(&vcpu
->arch
.dec_timer
, ktime_set(0, dec_nsec
),
945 vcpu
->arch
.timer_running
= 1;
948 static void kvmppc_end_cede(struct kvm_vcpu
*vcpu
)
950 vcpu
->arch
.ceded
= 0;
951 if (vcpu
->arch
.timer_running
) {
952 hrtimer_try_to_cancel(&vcpu
->arch
.dec_timer
);
953 vcpu
->arch
.timer_running
= 0;
957 extern int __kvmppc_vcore_entry(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
);
958 extern void xics_wake_cpu(int cpu
);
960 static void kvmppc_remove_runnable(struct kvmppc_vcore
*vc
,
961 struct kvm_vcpu
*vcpu
)
965 if (vcpu
->arch
.state
!= KVMPPC_VCPU_RUNNABLE
)
967 spin_lock(&vcpu
->arch
.tbacct_lock
);
969 vcpu
->arch
.busy_stolen
+= vcore_stolen_time(vc
, now
) -
970 vcpu
->arch
.stolen_logged
;
971 vcpu
->arch
.busy_preempt
= now
;
972 vcpu
->arch
.state
= KVMPPC_VCPU_BUSY_IN_HOST
;
973 spin_unlock(&vcpu
->arch
.tbacct_lock
);
975 list_del(&vcpu
->arch
.run_list
);
978 static int kvmppc_grab_hwthread(int cpu
)
980 struct paca_struct
*tpaca
;
985 /* Ensure the thread won't go into the kernel if it wakes */
986 tpaca
->kvm_hstate
.hwthread_req
= 1;
987 tpaca
->kvm_hstate
.kvm_vcpu
= NULL
;
990 * If the thread is already executing in the kernel (e.g. handling
991 * a stray interrupt), wait for it to get back to nap mode.
992 * The smp_mb() is to ensure that our setting of hwthread_req
993 * is visible before we look at hwthread_state, so if this
994 * races with the code at system_reset_pSeries and the thread
995 * misses our setting of hwthread_req, we are sure to see its
996 * setting of hwthread_state, and vice versa.
999 while (tpaca
->kvm_hstate
.hwthread_state
== KVM_HWTHREAD_IN_KERNEL
) {
1000 if (--timeout
<= 0) {
1001 pr_err("KVM: couldn't grab cpu %d\n", cpu
);
1009 static void kvmppc_release_hwthread(int cpu
)
1011 struct paca_struct
*tpaca
;
1014 tpaca
->kvm_hstate
.hwthread_req
= 0;
1015 tpaca
->kvm_hstate
.kvm_vcpu
= NULL
;
1018 static void kvmppc_start_thread(struct kvm_vcpu
*vcpu
)
1021 struct paca_struct
*tpaca
;
1022 struct kvmppc_vcore
*vc
= vcpu
->arch
.vcore
;
1024 if (vcpu
->arch
.timer_running
) {
1025 hrtimer_try_to_cancel(&vcpu
->arch
.dec_timer
);
1026 vcpu
->arch
.timer_running
= 0;
1028 cpu
= vc
->pcpu
+ vcpu
->arch
.ptid
;
1030 tpaca
->kvm_hstate
.kvm_vcpu
= vcpu
;
1031 tpaca
->kvm_hstate
.kvm_vcore
= vc
;
1032 tpaca
->kvm_hstate
.napping
= 0;
1033 vcpu
->cpu
= vc
->pcpu
;
1035 #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
1036 if (vcpu
->arch
.ptid
) {
1043 static void kvmppc_wait_for_nap(struct kvmppc_vcore
*vc
)
1049 while (vc
->nap_count
< vc
->n_woken
) {
1050 if (++i
>= 1000000) {
1051 pr_err("kvmppc_wait_for_nap timeout %d %d\n",
1052 vc
->nap_count
, vc
->n_woken
);
1061 * Check that we are on thread 0 and that any other threads in
1062 * this core are off-line. Then grab the threads so they can't
1065 static int on_primary_thread(void)
1067 int cpu
= smp_processor_id();
1068 int thr
= cpu_thread_in_core(cpu
);
1072 while (++thr
< threads_per_core
)
1073 if (cpu_online(cpu
+ thr
))
1076 /* Grab all hw threads so they can't go into the kernel */
1077 for (thr
= 1; thr
< threads_per_core
; ++thr
) {
1078 if (kvmppc_grab_hwthread(cpu
+ thr
)) {
1079 /* Couldn't grab one; let the others go */
1081 kvmppc_release_hwthread(cpu
+ thr
);
1082 } while (--thr
> 0);
1090 * Run a set of guest threads on a physical core.
1091 * Called with vc->lock held.
1093 static void kvmppc_run_core(struct kvmppc_vcore
*vc
)
1095 struct kvm_vcpu
*vcpu
, *vcpu0
, *vnext
;
1098 int ptid
, i
, need_vpa_update
;
1100 struct kvm_vcpu
*vcpus_to_update
[threads_per_core
];
1102 /* don't start if any threads have a signal pending */
1103 need_vpa_update
= 0;
1104 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1105 if (signal_pending(vcpu
->arch
.run_task
))
1107 if (vcpu
->arch
.vpa
.update_pending
||
1108 vcpu
->arch
.slb_shadow
.update_pending
||
1109 vcpu
->arch
.dtl
.update_pending
)
1110 vcpus_to_update
[need_vpa_update
++] = vcpu
;
1114 * Initialize *vc, in particular vc->vcore_state, so we can
1115 * drop the vcore lock if necessary.
1119 vc
->entry_exit_count
= 0;
1120 vc
->vcore_state
= VCORE_STARTING
;
1122 vc
->napping_threads
= 0;
1125 * Updating any of the vpas requires calling kvmppc_pin_guest_page,
1126 * which can't be called with any spinlocks held.
1128 if (need_vpa_update
) {
1129 spin_unlock(&vc
->lock
);
1130 for (i
= 0; i
< need_vpa_update
; ++i
)
1131 kvmppc_update_vpas(vcpus_to_update
[i
]);
1132 spin_lock(&vc
->lock
);
1136 * Assign physical thread IDs, first to non-ceded vcpus
1137 * and then to ceded ones.
1141 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1142 if (!vcpu
->arch
.ceded
) {
1145 vcpu
->arch
.ptid
= ptid
++;
1149 goto out
; /* nothing to run; should never happen */
1150 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
1151 if (vcpu
->arch
.ceded
)
1152 vcpu
->arch
.ptid
= ptid
++;
1155 * Make sure we are running on thread 0, and that
1156 * secondary threads are offline.
1158 if (threads_per_core
> 1 && !on_primary_thread()) {
1159 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
1160 vcpu
->arch
.ret
= -EBUSY
;
1164 vc
->pcpu
= smp_processor_id();
1165 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1166 kvmppc_start_thread(vcpu
);
1167 kvmppc_create_dtl_entry(vcpu
, vc
);
1170 vc
->vcore_state
= VCORE_RUNNING
;
1172 spin_unlock(&vc
->lock
);
1176 srcu_idx
= srcu_read_lock(&vcpu0
->kvm
->srcu
);
1178 __kvmppc_vcore_entry(NULL
, vcpu0
);
1180 spin_lock(&vc
->lock
);
1181 /* disable sending of IPIs on virtual external irqs */
1182 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
)
1184 /* wait for secondary threads to finish writing their state to memory */
1185 if (vc
->nap_count
< vc
->n_woken
)
1186 kvmppc_wait_for_nap(vc
);
1187 for (i
= 0; i
< threads_per_core
; ++i
)
1188 kvmppc_release_hwthread(vc
->pcpu
+ i
);
1189 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
1190 vc
->vcore_state
= VCORE_EXITING
;
1191 spin_unlock(&vc
->lock
);
1193 srcu_read_unlock(&vcpu0
->kvm
->srcu
, srcu_idx
);
1195 /* make sure updates to secondary vcpu structs are visible now */
1202 spin_lock(&vc
->lock
);
1204 list_for_each_entry(vcpu
, &vc
->runnable_threads
, arch
.run_list
) {
1205 /* cancel pending dec exception if dec is positive */
1206 if (now
< vcpu
->arch
.dec_expires
&&
1207 kvmppc_core_pending_dec(vcpu
))
1208 kvmppc_core_dequeue_dec(vcpu
);
1211 if (vcpu
->arch
.trap
)
1212 ret
= kvmppc_handle_exit(vcpu
->arch
.kvm_run
, vcpu
,
1213 vcpu
->arch
.run_task
);
1215 vcpu
->arch
.ret
= ret
;
1216 vcpu
->arch
.trap
= 0;
1218 if (vcpu
->arch
.ceded
) {
1219 if (ret
!= RESUME_GUEST
)
1220 kvmppc_end_cede(vcpu
);
1222 kvmppc_set_timer(vcpu
);
1227 vc
->vcore_state
= VCORE_INACTIVE
;
1228 list_for_each_entry_safe(vcpu
, vnext
, &vc
->runnable_threads
,
1230 if (vcpu
->arch
.ret
!= RESUME_GUEST
) {
1231 kvmppc_remove_runnable(vc
, vcpu
);
1232 wake_up(&vcpu
->arch
.cpu_run
);
1238 * Wait for some other vcpu thread to execute us, and
1239 * wake us up when we need to handle something in the host.
1241 static void kvmppc_wait_for_exec(struct kvm_vcpu
*vcpu
, int wait_state
)
1245 prepare_to_wait(&vcpu
->arch
.cpu_run
, &wait
, wait_state
);
1246 if (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
)
1248 finish_wait(&vcpu
->arch
.cpu_run
, &wait
);
1252 * All the vcpus in this vcore are idle, so wait for a decrementer
1253 * or external interrupt to one of the vcpus. vc->lock is held.
1255 static void kvmppc_vcore_blocked(struct kvmppc_vcore
*vc
)
1259 prepare_to_wait(&vc
->wq
, &wait
, TASK_INTERRUPTIBLE
);
1260 vc
->vcore_state
= VCORE_SLEEPING
;
1261 spin_unlock(&vc
->lock
);
1263 finish_wait(&vc
->wq
, &wait
);
1264 spin_lock(&vc
->lock
);
1265 vc
->vcore_state
= VCORE_INACTIVE
;
1268 static int kvmppc_run_vcpu(struct kvm_run
*kvm_run
, struct kvm_vcpu
*vcpu
)
1271 struct kvmppc_vcore
*vc
;
1272 struct kvm_vcpu
*v
, *vn
;
1274 kvm_run
->exit_reason
= 0;
1275 vcpu
->arch
.ret
= RESUME_GUEST
;
1276 vcpu
->arch
.trap
= 0;
1277 kvmppc_update_vpas(vcpu
);
1280 * Synchronize with other threads in this virtual core
1282 vc
= vcpu
->arch
.vcore
;
1283 spin_lock(&vc
->lock
);
1284 vcpu
->arch
.ceded
= 0;
1285 vcpu
->arch
.run_task
= current
;
1286 vcpu
->arch
.kvm_run
= kvm_run
;
1287 vcpu
->arch
.stolen_logged
= vcore_stolen_time(vc
, mftb());
1288 vcpu
->arch
.state
= KVMPPC_VCPU_RUNNABLE
;
1289 vcpu
->arch
.busy_preempt
= TB_NIL
;
1290 list_add_tail(&vcpu
->arch
.run_list
, &vc
->runnable_threads
);
1294 * This happens the first time this is called for a vcpu.
1295 * If the vcore is already running, we may be able to start
1296 * this thread straight away and have it join in.
1298 if (!signal_pending(current
)) {
1299 if (vc
->vcore_state
== VCORE_RUNNING
&&
1300 VCORE_EXIT_COUNT(vc
) == 0) {
1301 vcpu
->arch
.ptid
= vc
->n_runnable
- 1;
1302 kvmppc_create_dtl_entry(vcpu
, vc
);
1303 kvmppc_start_thread(vcpu
);
1304 } else if (vc
->vcore_state
== VCORE_SLEEPING
) {
1310 while (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
&&
1311 !signal_pending(current
)) {
1312 if (vc
->vcore_state
!= VCORE_INACTIVE
) {
1313 spin_unlock(&vc
->lock
);
1314 kvmppc_wait_for_exec(vcpu
, TASK_INTERRUPTIBLE
);
1315 spin_lock(&vc
->lock
);
1318 list_for_each_entry_safe(v
, vn
, &vc
->runnable_threads
,
1320 kvmppc_core_prepare_to_enter(v
);
1321 if (signal_pending(v
->arch
.run_task
)) {
1322 kvmppc_remove_runnable(vc
, v
);
1323 v
->stat
.signal_exits
++;
1324 v
->arch
.kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1325 v
->arch
.ret
= -EINTR
;
1326 wake_up(&v
->arch
.cpu_run
);
1329 if (!vc
->n_runnable
|| vcpu
->arch
.state
!= KVMPPC_VCPU_RUNNABLE
)
1333 list_for_each_entry(v
, &vc
->runnable_threads
, arch
.run_list
)
1334 if (!v
->arch
.pending_exceptions
)
1335 n_ceded
+= v
->arch
.ceded
;
1336 if (n_ceded
== vc
->n_runnable
)
1337 kvmppc_vcore_blocked(vc
);
1339 kvmppc_run_core(vc
);
1343 while (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
&&
1344 (vc
->vcore_state
== VCORE_RUNNING
||
1345 vc
->vcore_state
== VCORE_EXITING
)) {
1346 spin_unlock(&vc
->lock
);
1347 kvmppc_wait_for_exec(vcpu
, TASK_UNINTERRUPTIBLE
);
1348 spin_lock(&vc
->lock
);
1351 if (vcpu
->arch
.state
== KVMPPC_VCPU_RUNNABLE
) {
1352 kvmppc_remove_runnable(vc
, vcpu
);
1353 vcpu
->stat
.signal_exits
++;
1354 kvm_run
->exit_reason
= KVM_EXIT_INTR
;
1355 vcpu
->arch
.ret
= -EINTR
;
1358 if (vc
->n_runnable
&& vc
->vcore_state
== VCORE_INACTIVE
) {
1359 /* Wake up some vcpu to run the core */
1360 v
= list_first_entry(&vc
->runnable_threads
,
1361 struct kvm_vcpu
, arch
.run_list
);
1362 wake_up(&v
->arch
.cpu_run
);
1365 spin_unlock(&vc
->lock
);
1366 return vcpu
->arch
.ret
;
1369 int kvmppc_vcpu_run(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
)
1374 if (!vcpu
->arch
.sane
) {
1375 run
->exit_reason
= KVM_EXIT_INTERNAL_ERROR
;
1379 kvmppc_core_prepare_to_enter(vcpu
);
1381 /* No need to go into the guest when all we'll do is come back out */
1382 if (signal_pending(current
)) {
1383 run
->exit_reason
= KVM_EXIT_INTR
;
1387 atomic_inc(&vcpu
->kvm
->arch
.vcpus_running
);
1388 /* Order vcpus_running vs. rma_setup_done, see kvmppc_alloc_reset_hpt */
1391 /* On the first time here, set up HTAB and VRMA or RMA */
1392 if (!vcpu
->kvm
->arch
.rma_setup_done
) {
1393 r
= kvmppc_hv_setup_htab_rma(vcpu
);
1398 flush_fp_to_thread(current
);
1399 flush_altivec_to_thread(current
);
1400 flush_vsx_to_thread(current
);
1401 vcpu
->arch
.wqp
= &vcpu
->arch
.vcore
->wq
;
1402 vcpu
->arch
.pgdir
= current
->mm
->pgd
;
1403 vcpu
->arch
.state
= KVMPPC_VCPU_BUSY_IN_HOST
;
1406 r
= kvmppc_run_vcpu(run
, vcpu
);
1408 if (run
->exit_reason
== KVM_EXIT_PAPR_HCALL
&&
1409 !(vcpu
->arch
.shregs
.msr
& MSR_PR
)) {
1410 r
= kvmppc_pseries_do_hcall(vcpu
);
1411 kvmppc_core_prepare_to_enter(vcpu
);
1412 } else if (r
== RESUME_PAGE_FAULT
) {
1413 srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1414 r
= kvmppc_book3s_hv_page_fault(run
, vcpu
,
1415 vcpu
->arch
.fault_dar
, vcpu
->arch
.fault_dsisr
);
1416 srcu_read_unlock(&vcpu
->kvm
->srcu
, srcu_idx
);
1418 } while (r
== RESUME_GUEST
);
1421 vcpu
->arch
.state
= KVMPPC_VCPU_NOTREADY
;
1422 atomic_dec(&vcpu
->kvm
->arch
.vcpus_running
);
1427 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
1428 Assumes POWER7 or PPC970. */
1429 static inline int lpcr_rmls(unsigned long rma_size
)
1432 case 32ul << 20: /* 32 MB */
1433 if (cpu_has_feature(CPU_FTR_ARCH_206
))
1434 return 8; /* only supported on POWER7 */
1436 case 64ul << 20: /* 64 MB */
1438 case 128ul << 20: /* 128 MB */
1440 case 256ul << 20: /* 256 MB */
1442 case 1ul << 30: /* 1 GB */
1444 case 16ul << 30: /* 16 GB */
1446 case 256ul << 30: /* 256 GB */
1453 static int kvm_rma_fault(struct vm_area_struct
*vma
, struct vm_fault
*vmf
)
1455 struct kvmppc_linear_info
*ri
= vma
->vm_file
->private_data
;
1458 if (vmf
->pgoff
>= ri
->npages
)
1459 return VM_FAULT_SIGBUS
;
1461 page
= pfn_to_page(ri
->base_pfn
+ vmf
->pgoff
);
1467 static const struct vm_operations_struct kvm_rma_vm_ops
= {
1468 .fault
= kvm_rma_fault
,
1471 static int kvm_rma_mmap(struct file
*file
, struct vm_area_struct
*vma
)
1473 vma
->vm_flags
|= VM_DONTEXPAND
| VM_DONTDUMP
;
1474 vma
->vm_ops
= &kvm_rma_vm_ops
;
1478 static int kvm_rma_release(struct inode
*inode
, struct file
*filp
)
1480 struct kvmppc_linear_info
*ri
= filp
->private_data
;
1482 kvm_release_rma(ri
);
1486 static struct file_operations kvm_rma_fops
= {
1487 .mmap
= kvm_rma_mmap
,
1488 .release
= kvm_rma_release
,
1491 long kvm_vm_ioctl_allocate_rma(struct kvm
*kvm
, struct kvm_allocate_rma
*ret
)
1493 struct kvmppc_linear_info
*ri
;
1496 ri
= kvm_alloc_rma();
1500 fd
= anon_inode_getfd("kvm-rma", &kvm_rma_fops
, ri
, O_RDWR
);
1502 kvm_release_rma(ri
);
1504 ret
->rma_size
= ri
->npages
<< PAGE_SHIFT
;
1508 static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size
**sps
,
1511 struct mmu_psize_def
*def
= &mmu_psize_defs
[linux_psize
];
1515 (*sps
)->page_shift
= def
->shift
;
1516 (*sps
)->slb_enc
= def
->sllp
;
1517 (*sps
)->enc
[0].page_shift
= def
->shift
;
1518 (*sps
)->enc
[0].pte_enc
= def
->penc
;
1522 int kvm_vm_ioctl_get_smmu_info(struct kvm
*kvm
, struct kvm_ppc_smmu_info
*info
)
1524 struct kvm_ppc_one_seg_page_size
*sps
;
1526 info
->flags
= KVM_PPC_PAGE_SIZES_REAL
;
1527 if (mmu_has_feature(MMU_FTR_1T_SEGMENT
))
1528 info
->flags
|= KVM_PPC_1T_SEGMENTS
;
1529 info
->slb_size
= mmu_slb_size
;
1531 /* We only support these sizes for now, and no muti-size segments */
1532 sps
= &info
->sps
[0];
1533 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_4K
);
1534 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_64K
);
1535 kvmppc_add_seg_page_size(&sps
, MMU_PAGE_16M
);
1541 * Get (and clear) the dirty memory log for a memory slot.
1543 int kvm_vm_ioctl_get_dirty_log(struct kvm
*kvm
, struct kvm_dirty_log
*log
)
1545 struct kvm_memory_slot
*memslot
;
1549 mutex_lock(&kvm
->slots_lock
);
1552 if (log
->slot
>= KVM_USER_MEM_SLOTS
)
1555 memslot
= id_to_memslot(kvm
->memslots
, log
->slot
);
1557 if (!memslot
->dirty_bitmap
)
1560 n
= kvm_dirty_bitmap_bytes(memslot
);
1561 memset(memslot
->dirty_bitmap
, 0, n
);
1563 r
= kvmppc_hv_get_dirty_log(kvm
, memslot
, memslot
->dirty_bitmap
);
1568 if (copy_to_user(log
->dirty_bitmap
, memslot
->dirty_bitmap
, n
))
1573 mutex_unlock(&kvm
->slots_lock
);
1577 static void unpin_slot(struct kvm_memory_slot
*memslot
)
1579 unsigned long *physp
;
1580 unsigned long j
, npages
, pfn
;
1583 physp
= memslot
->arch
.slot_phys
;
1584 npages
= memslot
->npages
;
1587 for (j
= 0; j
< npages
; j
++) {
1588 if (!(physp
[j
] & KVMPPC_GOT_PAGE
))
1590 pfn
= physp
[j
] >> PAGE_SHIFT
;
1591 page
= pfn_to_page(pfn
);
1597 void kvmppc_core_free_memslot(struct kvm_memory_slot
*free
,
1598 struct kvm_memory_slot
*dont
)
1600 if (!dont
|| free
->arch
.rmap
!= dont
->arch
.rmap
) {
1601 vfree(free
->arch
.rmap
);
1602 free
->arch
.rmap
= NULL
;
1604 if (!dont
|| free
->arch
.slot_phys
!= dont
->arch
.slot_phys
) {
1606 vfree(free
->arch
.slot_phys
);
1607 free
->arch
.slot_phys
= NULL
;
1611 int kvmppc_core_create_memslot(struct kvm_memory_slot
*slot
,
1612 unsigned long npages
)
1614 slot
->arch
.rmap
= vzalloc(npages
* sizeof(*slot
->arch
.rmap
));
1615 if (!slot
->arch
.rmap
)
1617 slot
->arch
.slot_phys
= NULL
;
1622 int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
1623 struct kvm_memory_slot
*memslot
,
1624 struct kvm_userspace_memory_region
*mem
)
1626 unsigned long *phys
;
1628 /* Allocate a slot_phys array if needed */
1629 phys
= memslot
->arch
.slot_phys
;
1630 if (!kvm
->arch
.using_mmu_notifiers
&& !phys
&& memslot
->npages
) {
1631 phys
= vzalloc(memslot
->npages
* sizeof(unsigned long));
1634 memslot
->arch
.slot_phys
= phys
;
1640 void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
1641 struct kvm_userspace_memory_region
*mem
,
1642 const struct kvm_memory_slot
*old
)
1644 unsigned long npages
= mem
->memory_size
>> PAGE_SHIFT
;
1645 struct kvm_memory_slot
*memslot
;
1647 if (npages
&& old
->npages
) {
1649 * If modifying a memslot, reset all the rmap dirty bits.
1650 * If this is a new memslot, we don't need to do anything
1651 * since the rmap array starts out as all zeroes,
1652 * i.e. no pages are dirty.
1654 memslot
= id_to_memslot(kvm
->memslots
, mem
->slot
);
1655 kvmppc_hv_get_dirty_log(kvm
, memslot
, NULL
);
1659 static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu
*vcpu
)
1662 struct kvm
*kvm
= vcpu
->kvm
;
1663 struct kvmppc_linear_info
*ri
= NULL
;
1665 struct kvm_memory_slot
*memslot
;
1666 struct vm_area_struct
*vma
;
1667 unsigned long lpcr
, senc
;
1668 unsigned long psize
, porder
;
1669 unsigned long rma_size
;
1671 unsigned long *physp
;
1672 unsigned long i
, npages
;
1675 mutex_lock(&kvm
->lock
);
1676 if (kvm
->arch
.rma_setup_done
)
1677 goto out
; /* another vcpu beat us to it */
1679 /* Allocate hashed page table (if not done already) and reset it */
1680 if (!kvm
->arch
.hpt_virt
) {
1681 err
= kvmppc_alloc_hpt(kvm
, NULL
);
1683 pr_err("KVM: Couldn't alloc HPT\n");
1688 /* Look up the memslot for guest physical address 0 */
1689 srcu_idx
= srcu_read_lock(&kvm
->srcu
);
1690 memslot
= gfn_to_memslot(kvm
, 0);
1692 /* We must have some memory at 0 by now */
1694 if (!memslot
|| (memslot
->flags
& KVM_MEMSLOT_INVALID
))
1697 /* Look up the VMA for the start of this memory slot */
1698 hva
= memslot
->userspace_addr
;
1699 down_read(¤t
->mm
->mmap_sem
);
1700 vma
= find_vma(current
->mm
, hva
);
1701 if (!vma
|| vma
->vm_start
> hva
|| (vma
->vm_flags
& VM_IO
))
1704 psize
= vma_kernel_pagesize(vma
);
1705 porder
= __ilog2(psize
);
1707 /* Is this one of our preallocated RMAs? */
1708 if (vma
->vm_file
&& vma
->vm_file
->f_op
== &kvm_rma_fops
&&
1709 hva
== vma
->vm_start
)
1710 ri
= vma
->vm_file
->private_data
;
1712 up_read(¤t
->mm
->mmap_sem
);
1715 /* On POWER7, use VRMA; on PPC970, give up */
1717 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1718 pr_err("KVM: CPU requires an RMO\n");
1722 /* We can handle 4k, 64k or 16M pages in the VRMA */
1724 if (!(psize
== 0x1000 || psize
== 0x10000 ||
1725 psize
== 0x1000000))
1728 /* Update VRMASD field in the LPCR */
1729 senc
= slb_pgsize_encoding(psize
);
1730 kvm
->arch
.vrma_slb_v
= senc
| SLB_VSID_B_1T
|
1731 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1732 lpcr
= kvm
->arch
.lpcr
& ~LPCR_VRMASD
;
1733 lpcr
|= senc
<< (LPCR_VRMASD_SH
- 4);
1734 kvm
->arch
.lpcr
= lpcr
;
1736 /* Create HPTEs in the hash page table for the VRMA */
1737 kvmppc_map_vrma(vcpu
, memslot
, porder
);
1740 /* Set up to use an RMO region */
1741 rma_size
= ri
->npages
;
1742 if (rma_size
> memslot
->npages
)
1743 rma_size
= memslot
->npages
;
1744 rma_size
<<= PAGE_SHIFT
;
1745 rmls
= lpcr_rmls(rma_size
);
1748 pr_err("KVM: Can't use RMA of 0x%lx bytes\n", rma_size
);
1751 atomic_inc(&ri
->use_count
);
1754 /* Update LPCR and RMOR */
1755 lpcr
= kvm
->arch
.lpcr
;
1756 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1757 /* PPC970; insert RMLS value (split field) in HID4 */
1758 lpcr
&= ~((1ul << HID4_RMLS0_SH
) |
1759 (3ul << HID4_RMLS2_SH
));
1760 lpcr
|= ((rmls
>> 2) << HID4_RMLS0_SH
) |
1761 ((rmls
& 3) << HID4_RMLS2_SH
);
1762 /* RMOR is also in HID4 */
1763 lpcr
|= ((ri
->base_pfn
>> (26 - PAGE_SHIFT
)) & 0xffff)
1767 lpcr
&= ~(LPCR_VPM0
| LPCR_VRMA_L
);
1768 lpcr
|= rmls
<< LPCR_RMLS_SH
;
1769 kvm
->arch
.rmor
= kvm
->arch
.rma
->base_pfn
<< PAGE_SHIFT
;
1771 kvm
->arch
.lpcr
= lpcr
;
1772 pr_info("KVM: Using RMO at %lx size %lx (LPCR = %lx)\n",
1773 ri
->base_pfn
<< PAGE_SHIFT
, rma_size
, lpcr
);
1775 /* Initialize phys addrs of pages in RMO */
1776 npages
= ri
->npages
;
1777 porder
= __ilog2(npages
);
1778 physp
= memslot
->arch
.slot_phys
;
1780 if (npages
> memslot
->npages
)
1781 npages
= memslot
->npages
;
1782 spin_lock(&kvm
->arch
.slot_phys_lock
);
1783 for (i
= 0; i
< npages
; ++i
)
1784 physp
[i
] = ((ri
->base_pfn
+ i
) << PAGE_SHIFT
) +
1786 spin_unlock(&kvm
->arch
.slot_phys_lock
);
1790 /* Order updates to kvm->arch.lpcr etc. vs. rma_setup_done */
1792 kvm
->arch
.rma_setup_done
= 1;
1795 srcu_read_unlock(&kvm
->srcu
, srcu_idx
);
1797 mutex_unlock(&kvm
->lock
);
1801 up_read(¤t
->mm
->mmap_sem
);
1805 int kvmppc_core_init_vm(struct kvm
*kvm
)
1807 unsigned long lpcr
, lpid
;
1809 /* Allocate the guest's logical partition ID */
1811 lpid
= kvmppc_alloc_lpid();
1814 kvm
->arch
.lpid
= lpid
;
1817 * Since we don't flush the TLB when tearing down a VM,
1818 * and this lpid might have previously been used,
1819 * make sure we flush on each core before running the new VM.
1821 cpumask_setall(&kvm
->arch
.need_tlb_flush
);
1823 INIT_LIST_HEAD(&kvm
->arch
.spapr_tce_tables
);
1825 kvm
->arch
.rma
= NULL
;
1827 kvm
->arch
.host_sdr1
= mfspr(SPRN_SDR1
);
1829 if (cpu_has_feature(CPU_FTR_ARCH_201
)) {
1830 /* PPC970; HID4 is effectively the LPCR */
1831 kvm
->arch
.host_lpid
= 0;
1832 kvm
->arch
.host_lpcr
= lpcr
= mfspr(SPRN_HID4
);
1833 lpcr
&= ~((3 << HID4_LPID1_SH
) | (0xful
<< HID4_LPID5_SH
));
1834 lpcr
|= ((lpid
>> 4) << HID4_LPID1_SH
) |
1835 ((lpid
& 0xf) << HID4_LPID5_SH
);
1837 /* POWER7; init LPCR for virtual RMA mode */
1838 kvm
->arch
.host_lpid
= mfspr(SPRN_LPID
);
1839 kvm
->arch
.host_lpcr
= lpcr
= mfspr(SPRN_LPCR
);
1840 lpcr
&= LPCR_PECE
| LPCR_LPES
;
1841 lpcr
|= (4UL << LPCR_DPFD_SH
) | LPCR_HDICE
|
1842 LPCR_VPM0
| LPCR_VPM1
;
1843 kvm
->arch
.vrma_slb_v
= SLB_VSID_B_1T
|
1844 (VRMA_VSID
<< SLB_VSID_SHIFT_1T
);
1846 kvm
->arch
.lpcr
= lpcr
;
1848 kvm
->arch
.using_mmu_notifiers
= !!cpu_has_feature(CPU_FTR_ARCH_206
);
1849 spin_lock_init(&kvm
->arch
.slot_phys_lock
);
1852 * Don't allow secondary CPU threads to come online
1853 * while any KVM VMs exist.
1855 inhibit_secondary_onlining();
1860 void kvmppc_core_destroy_vm(struct kvm
*kvm
)
1862 uninhibit_secondary_onlining();
1864 if (kvm
->arch
.rma
) {
1865 kvm_release_rma(kvm
->arch
.rma
);
1866 kvm
->arch
.rma
= NULL
;
1869 kvmppc_free_hpt(kvm
);
1870 WARN_ON(!list_empty(&kvm
->arch
.spapr_tce_tables
));
1873 /* These are stubs for now */
1874 void kvmppc_mmu_pte_pflush(struct kvm_vcpu
*vcpu
, ulong pa_start
, ulong pa_end
)
1878 /* We don't need to emulate any privileged instructions or dcbz */
1879 int kvmppc_core_emulate_op(struct kvm_run
*run
, struct kvm_vcpu
*vcpu
,
1880 unsigned int inst
, int *advance
)
1882 return EMULATE_FAIL
;
1885 int kvmppc_core_emulate_mtspr(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
)
1887 return EMULATE_FAIL
;
1890 int kvmppc_core_emulate_mfspr(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
)
1892 return EMULATE_FAIL
;
1895 static int kvmppc_book3s_hv_init(void)
1899 r
= kvm_init(NULL
, sizeof(struct kvm_vcpu
), 0, THIS_MODULE
);
1904 r
= kvmppc_mmu_hv_init();
1909 static void kvmppc_book3s_hv_exit(void)
1914 module_init(kvmppc_book3s_hv_init
);
1915 module_exit(kvmppc_book3s_hv_exit
);