]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kvm/book3s_hv.c
Merge tag 'for-linus-20170825' of git://git.infradead.org/linux-mtd
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s_hv.c
CommitLineData
de56a948
PM
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
174cd4b1 25#include <linux/sched/signal.h>
03441a34 26#include <linux/sched/stat.h>
de56a948 27#include <linux/delay.h>
66b15db6 28#include <linux/export.h>
de56a948
PM
29#include <linux/fs.h>
30#include <linux/anon_inodes.h>
07f8ab25 31#include <linux/cpu.h>
de56a948 32#include <linux/cpumask.h>
aa04b4cc
PM
33#include <linux/spinlock.h>
34#include <linux/page-flags.h>
2c9097e4 35#include <linux/srcu.h>
398a76c6 36#include <linux/miscdevice.h>
e23a808b 37#include <linux/debugfs.h>
d3989143
BH
38#include <linux/gfp.h>
39#include <linux/vmalloc.h>
40#include <linux/highmem.h>
41#include <linux/hugetlb.h>
42#include <linux/kvm_irqfd.h>
43#include <linux/irqbypass.h>
44#include <linux/module.h>
45#include <linux/compiler.h>
46#include <linux/of.h>
de56a948
PM
47
48#include <asm/reg.h>
57900694
PM
49#include <asm/ppc-opcode.h>
50#include <asm/disassemble.h>
de56a948
PM
51#include <asm/cputable.h>
52#include <asm/cacheflush.h>
53#include <asm/tlbflush.h>
7c0f6ba6 54#include <linux/uaccess.h>
de56a948
PM
55#include <asm/io.h>
56#include <asm/kvm_ppc.h>
57#include <asm/kvm_book3s.h>
58#include <asm/mmu_context.h>
59#include <asm/lppaca.h>
60#include <asm/processor.h>
371fefd6 61#include <asm/cputhreads.h>
aa04b4cc 62#include <asm/page.h>
de1d9248 63#include <asm/hvcall.h>
ae3a197e 64#include <asm/switch_to.h>
512691d4 65#include <asm/smp.h>
66feed61 66#include <asm/dbell.h>
fd7bacbc 67#include <asm/hmi.h>
c57875f5 68#include <asm/pnv-pci.h>
7a84084c 69#include <asm/mmu.h>
f725758b
PM
70#include <asm/opal.h>
71#include <asm/xics.h>
5af50993 72#include <asm/xive.h>
de56a948 73
3a167bea
AK
74#include "book3s.h"
75
3c78f78a
SW
76#define CREATE_TRACE_POINTS
77#include "trace_hv.h"
78
de56a948
PM
79/* #define EXIT_DEBUG */
80/* #define EXIT_DEBUG_SIMPLE */
81/* #define EXIT_DEBUG_INT */
82
913d3ff9
PM
83/* Used to indicate that a guest page fault needs to be handled */
84#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
f7af5209
SW
85/* Used to indicate that a guest passthrough interrupt needs to be handled */
86#define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2)
913d3ff9 87
c7b67670
PM
88/* Used as a "null" value for timebase values */
89#define TB_NIL (~(u64)0)
90
699a0ea0
PM
91static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
92
b4deba5c
PM
93static int dynamic_mt_modes = 6;
94module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
95MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
ec257165
PM
96static int target_smt_mode;
97module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
98MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
9678cdaa 99
520fe9c6
SW
100#ifdef CONFIG_KVM_XICS
101static struct kernel_param_ops module_param_ops = {
102 .set = param_set_int,
103 .get = param_get_int,
104};
105
644abbb2
SW
106module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass,
107 S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
109
520fe9c6
SW
110module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
111 S_IRUGO | S_IWUSR);
112MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
113#endif
114
19ccb76a 115static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
32fad281 116static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
19ccb76a 117
7b5f8272
SJS
118static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
119 int *ip)
120{
121 int i = *ip;
122 struct kvm_vcpu *vcpu;
123
124 while (++i < MAX_SMT_THREADS) {
125 vcpu = READ_ONCE(vc->runnable_threads[i]);
126 if (vcpu) {
127 *ip = i;
128 return vcpu;
129 }
130 }
131 return NULL;
132}
133
134/* Used to traverse the list of runnable threads for a given vcore */
135#define for_each_runnable_thread(i, vcpu, vc) \
136 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
137
66feed61
PM
138static bool kvmppc_ipi_thread(int cpu)
139{
1704a81c
PM
140 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
141
142 /* On POWER9 we can use msgsnd to IPI any cpu */
143 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
144 msg |= get_hard_smp_processor_id(cpu);
145 smp_mb();
146 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
147 return true;
148 }
149
66feed61
PM
150 /* On POWER8 for IPIs to threads in the same core, use msgsnd */
151 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
152 preempt_disable();
153 if (cpu_first_thread_sibling(cpu) ==
154 cpu_first_thread_sibling(smp_processor_id())) {
66feed61
PM
155 msg |= cpu_thread_in_core(cpu);
156 smp_mb();
157 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
158 preempt_enable();
159 return true;
160 }
161 preempt_enable();
162 }
163
164#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
f725758b
PM
165 if (cpu >= 0 && cpu < nr_cpu_ids) {
166 if (paca[cpu].kvm_hstate.xics_phys) {
167 xics_wake_cpu(cpu);
168 return true;
169 }
170 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
66feed61
PM
171 return true;
172 }
173#endif
174
175 return false;
176}
177
3a167bea 178static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
54695c30 179{
ec257165 180 int cpu;
8577370f 181 struct swait_queue_head *wqp;
54695c30
BH
182
183 wqp = kvm_arch_vcpu_wq(vcpu);
8577370f
MT
184 if (swait_active(wqp)) {
185 swake_up(wqp);
54695c30
BH
186 ++vcpu->stat.halt_wakeup;
187 }
188
3deda5e5
PM
189 cpu = READ_ONCE(vcpu->arch.thread_cpu);
190 if (cpu >= 0 && kvmppc_ipi_thread(cpu))
66feed61 191 return;
54695c30
BH
192
193 /* CPU points to the first thread of the core */
ec257165 194 cpu = vcpu->cpu;
66feed61
PM
195 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
196 smp_send_reschedule(cpu);
54695c30
BH
197}
198
c7b67670
PM
199/*
200 * We use the vcpu_load/put functions to measure stolen time.
201 * Stolen time is counted as time when either the vcpu is able to
202 * run as part of a virtual core, but the task running the vcore
203 * is preempted or sleeping, or when the vcpu needs something done
204 * in the kernel by the task running the vcpu, but that task is
205 * preempted or sleeping. Those two things have to be counted
206 * separately, since one of the vcpu tasks will take on the job
207 * of running the core, and the other vcpu tasks in the vcore will
208 * sleep waiting for it to do that, but that sleep shouldn't count
209 * as stolen time.
210 *
211 * Hence we accumulate stolen time when the vcpu can run as part of
212 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
213 * needs its task to do other things in the kernel (for example,
214 * service a page fault) in busy_stolen. We don't accumulate
215 * stolen time for a vcore when it is inactive, or for a vcpu
216 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
217 * a misnomer; it means that the vcpu task is not executing in
218 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
219 * the kernel. We don't have any way of dividing up that time
220 * between time that the vcpu is genuinely stopped, time that
221 * the task is actively working on behalf of the vcpu, and time
222 * that the task is preempted, so we don't count any of it as
223 * stolen.
224 *
225 * Updates to busy_stolen are protected by arch.tbacct_lock;
2711e248
PM
226 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
227 * lock. The stolen times are measured in units of timebase ticks.
228 * (Note that the != TB_NIL checks below are purely defensive;
229 * they should never fail.)
c7b67670
PM
230 */
231
ec257165
PM
232static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
233{
234 unsigned long flags;
235
236 spin_lock_irqsave(&vc->stoltb_lock, flags);
237 vc->preempt_tb = mftb();
238 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
239}
240
241static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
242{
243 unsigned long flags;
244
245 spin_lock_irqsave(&vc->stoltb_lock, flags);
246 if (vc->preempt_tb != TB_NIL) {
247 vc->stolen_tb += mftb() - vc->preempt_tb;
248 vc->preempt_tb = TB_NIL;
249 }
250 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
251}
252
3a167bea 253static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
de56a948 254{
0456ec4f 255 struct kvmppc_vcore *vc = vcpu->arch.vcore;
bf3d32e1 256 unsigned long flags;
0456ec4f 257
2711e248
PM
258 /*
259 * We can test vc->runner without taking the vcore lock,
260 * because only this task ever sets vc->runner to this
261 * vcpu, and once it is set to this vcpu, only this task
262 * ever sets it to NULL.
263 */
ec257165
PM
264 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
265 kvmppc_core_end_stolen(vc);
266
2711e248 267 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
c7b67670
PM
268 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
269 vcpu->arch.busy_preempt != TB_NIL) {
270 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
271 vcpu->arch.busy_preempt = TB_NIL;
272 }
bf3d32e1 273 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
de56a948
PM
274}
275
3a167bea 276static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
de56a948 277{
0456ec4f 278 struct kvmppc_vcore *vc = vcpu->arch.vcore;
bf3d32e1 279 unsigned long flags;
0456ec4f 280
ec257165
PM
281 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
282 kvmppc_core_start_stolen(vc);
283
2711e248 284 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
c7b67670
PM
285 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
286 vcpu->arch.busy_preempt = mftb();
bf3d32e1 287 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
de56a948
PM
288}
289
3a167bea 290static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
de56a948 291{
c20875a3
PM
292 /*
293 * Check for illegal transactional state bit combination
294 * and if we find it, force the TS field to a safe state.
295 */
296 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
297 msr &= ~MSR_TS_MASK;
de56a948 298 vcpu->arch.shregs.msr = msr;
19ccb76a 299 kvmppc_end_cede(vcpu);
de56a948
PM
300}
301
5358a963 302static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
de56a948
PM
303{
304 vcpu->arch.pvr = pvr;
305}
306
2ee13be3
SJS
307/* Dummy value used in computing PCR value below */
308#define PCR_ARCH_300 (PCR_ARCH_207 << 1)
309
5358a963 310static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
388cc6e1 311{
2ee13be3 312 unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
388cc6e1
PM
313 struct kvmppc_vcore *vc = vcpu->arch.vcore;
314
2ee13be3
SJS
315 /* We can (emulate) our own architecture version and anything older */
316 if (cpu_has_feature(CPU_FTR_ARCH_300))
317 host_pcr_bit = PCR_ARCH_300;
318 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
319 host_pcr_bit = PCR_ARCH_207;
320 else if (cpu_has_feature(CPU_FTR_ARCH_206))
321 host_pcr_bit = PCR_ARCH_206;
322 else
323 host_pcr_bit = PCR_ARCH_205;
324
325 /* Determine lowest PCR bit needed to run guest in given PVR level */
326 guest_pcr_bit = host_pcr_bit;
388cc6e1 327 if (arch_compat) {
388cc6e1
PM
328 switch (arch_compat) {
329 case PVR_ARCH_205:
2ee13be3 330 guest_pcr_bit = PCR_ARCH_205;
388cc6e1
PM
331 break;
332 case PVR_ARCH_206:
333 case PVR_ARCH_206p:
2ee13be3 334 guest_pcr_bit = PCR_ARCH_206;
5557ae0e
PM
335 break;
336 case PVR_ARCH_207:
2ee13be3
SJS
337 guest_pcr_bit = PCR_ARCH_207;
338 break;
339 case PVR_ARCH_300:
340 guest_pcr_bit = PCR_ARCH_300;
388cc6e1
PM
341 break;
342 default:
343 return -EINVAL;
344 }
345 }
346
2ee13be3
SJS
347 /* Check requested PCR bits don't exceed our capabilities */
348 if (guest_pcr_bit > host_pcr_bit)
349 return -EINVAL;
350
388cc6e1
PM
351 spin_lock(&vc->lock);
352 vc->arch_compat = arch_compat;
2ee13be3
SJS
353 /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
354 vc->pcr = host_pcr_bit - guest_pcr_bit;
388cc6e1
PM
355 spin_unlock(&vc->lock);
356
357 return 0;
358}
359
5358a963 360static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
de56a948
PM
361{
362 int r;
363
364 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
365 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
366 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
367 for (r = 0; r < 16; ++r)
368 pr_err("r%2d = %.16lx r%d = %.16lx\n",
369 r, kvmppc_get_gpr(vcpu, r),
370 r+16, kvmppc_get_gpr(vcpu, r+16));
371 pr_err("ctr = %.16lx lr = %.16lx\n",
372 vcpu->arch.ctr, vcpu->arch.lr);
373 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
374 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
375 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
376 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
377 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
378 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
379 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
380 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
381 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
382 pr_err("fault dar = %.16lx dsisr = %.8x\n",
383 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
384 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
385 for (r = 0; r < vcpu->arch.slb_max; ++r)
386 pr_err(" ESID = %.16llx VSID = %.16llx\n",
387 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
388 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
a0144e2a 389 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
de56a948
PM
390 vcpu->arch.last_inst);
391}
392
5358a963 393static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
a8606e20 394{
e09fefde 395 struct kvm_vcpu *ret;
a8606e20
PM
396
397 mutex_lock(&kvm->lock);
e09fefde 398 ret = kvm_get_vcpu_by_id(kvm, id);
a8606e20
PM
399 mutex_unlock(&kvm->lock);
400 return ret;
401}
402
403static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
404{
f13c13a0 405 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
02407552 406 vpa->yield_count = cpu_to_be32(1);
a8606e20
PM
407}
408
55b665b0
PM
409static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
410 unsigned long addr, unsigned long len)
411{
412 /* check address is cacheline aligned */
413 if (addr & (L1_CACHE_BYTES - 1))
414 return -EINVAL;
415 spin_lock(&vcpu->arch.vpa_update_lock);
416 if (v->next_gpa != addr || v->len != len) {
417 v->next_gpa = addr;
418 v->len = addr ? len : 0;
419 v->update_pending = 1;
420 }
421 spin_unlock(&vcpu->arch.vpa_update_lock);
422 return 0;
423}
424
2e25aa5f
PM
425/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
426struct reg_vpa {
427 u32 dummy;
428 union {
02407552
AG
429 __be16 hword;
430 __be32 word;
2e25aa5f
PM
431 } length;
432};
433
434static int vpa_is_registered(struct kvmppc_vpa *vpap)
435{
436 if (vpap->update_pending)
437 return vpap->next_gpa != 0;
438 return vpap->pinned_addr != NULL;
439}
440
a8606e20
PM
441static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
442 unsigned long flags,
443 unsigned long vcpuid, unsigned long vpa)
444{
445 struct kvm *kvm = vcpu->kvm;
93e60249 446 unsigned long len, nb;
a8606e20
PM
447 void *va;
448 struct kvm_vcpu *tvcpu;
2e25aa5f
PM
449 int err;
450 int subfunc;
451 struct kvmppc_vpa *vpap;
a8606e20
PM
452
453 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
454 if (!tvcpu)
455 return H_PARAMETER;
456
2e25aa5f
PM
457 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
458 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
459 subfunc == H_VPA_REG_SLB) {
460 /* Registering new area - address must be cache-line aligned */
461 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
a8606e20 462 return H_PARAMETER;
2e25aa5f
PM
463
464 /* convert logical addr to kernel addr and read length */
93e60249
PM
465 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
466 if (va == NULL)
b2b2f165 467 return H_PARAMETER;
2e25aa5f 468 if (subfunc == H_VPA_REG_VPA)
02407552 469 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
a8606e20 470 else
02407552 471 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
c35635ef 472 kvmppc_unpin_guest_page(kvm, va, vpa, false);
2e25aa5f
PM
473
474 /* Check length */
475 if (len > nb || len < sizeof(struct reg_vpa))
476 return H_PARAMETER;
477 } else {
478 vpa = 0;
479 len = 0;
480 }
481
482 err = H_PARAMETER;
483 vpap = NULL;
484 spin_lock(&tvcpu->arch.vpa_update_lock);
485
486 switch (subfunc) {
487 case H_VPA_REG_VPA: /* register VPA */
488 if (len < sizeof(struct lppaca))
a8606e20 489 break;
2e25aa5f
PM
490 vpap = &tvcpu->arch.vpa;
491 err = 0;
492 break;
493
494 case H_VPA_REG_DTL: /* register DTL */
495 if (len < sizeof(struct dtl_entry))
a8606e20 496 break;
2e25aa5f
PM
497 len -= len % sizeof(struct dtl_entry);
498
499 /* Check that they have previously registered a VPA */
500 err = H_RESOURCE;
501 if (!vpa_is_registered(&tvcpu->arch.vpa))
a8606e20 502 break;
2e25aa5f
PM
503
504 vpap = &tvcpu->arch.dtl;
505 err = 0;
506 break;
507
508 case H_VPA_REG_SLB: /* register SLB shadow buffer */
509 /* Check that they have previously registered a VPA */
510 err = H_RESOURCE;
511 if (!vpa_is_registered(&tvcpu->arch.vpa))
a8606e20 512 break;
2e25aa5f
PM
513
514 vpap = &tvcpu->arch.slb_shadow;
515 err = 0;
516 break;
517
518 case H_VPA_DEREG_VPA: /* deregister VPA */
519 /* Check they don't still have a DTL or SLB buf registered */
520 err = H_RESOURCE;
521 if (vpa_is_registered(&tvcpu->arch.dtl) ||
522 vpa_is_registered(&tvcpu->arch.slb_shadow))
a8606e20 523 break;
2e25aa5f
PM
524
525 vpap = &tvcpu->arch.vpa;
526 err = 0;
527 break;
528
529 case H_VPA_DEREG_DTL: /* deregister DTL */
530 vpap = &tvcpu->arch.dtl;
531 err = 0;
532 break;
533
534 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
535 vpap = &tvcpu->arch.slb_shadow;
536 err = 0;
537 break;
538 }
539
540 if (vpap) {
541 vpap->next_gpa = vpa;
542 vpap->len = len;
543 vpap->update_pending = 1;
a8606e20 544 }
93e60249 545
2e25aa5f
PM
546 spin_unlock(&tvcpu->arch.vpa_update_lock);
547
93e60249 548 return err;
a8606e20
PM
549}
550
081f323b 551static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
2e25aa5f 552{
081f323b 553 struct kvm *kvm = vcpu->kvm;
2e25aa5f
PM
554 void *va;
555 unsigned long nb;
081f323b 556 unsigned long gpa;
2e25aa5f 557
081f323b
PM
558 /*
559 * We need to pin the page pointed to by vpap->next_gpa,
560 * but we can't call kvmppc_pin_guest_page under the lock
561 * as it does get_user_pages() and down_read(). So we
562 * have to drop the lock, pin the page, then get the lock
563 * again and check that a new area didn't get registered
564 * in the meantime.
565 */
566 for (;;) {
567 gpa = vpap->next_gpa;
568 spin_unlock(&vcpu->arch.vpa_update_lock);
569 va = NULL;
570 nb = 0;
571 if (gpa)
c35635ef 572 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
081f323b
PM
573 spin_lock(&vcpu->arch.vpa_update_lock);
574 if (gpa == vpap->next_gpa)
575 break;
576 /* sigh... unpin that one and try again */
577 if (va)
c35635ef 578 kvmppc_unpin_guest_page(kvm, va, gpa, false);
081f323b
PM
579 }
580
581 vpap->update_pending = 0;
582 if (va && nb < vpap->len) {
583 /*
584 * If it's now too short, it must be that userspace
585 * has changed the mappings underlying guest memory,
586 * so unregister the region.
587 */
c35635ef 588 kvmppc_unpin_guest_page(kvm, va, gpa, false);
081f323b 589 va = NULL;
2e25aa5f
PM
590 }
591 if (vpap->pinned_addr)
c35635ef
PM
592 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
593 vpap->dirty);
594 vpap->gpa = gpa;
2e25aa5f 595 vpap->pinned_addr = va;
c35635ef 596 vpap->dirty = false;
2e25aa5f
PM
597 if (va)
598 vpap->pinned_end = va + vpap->len;
599}
600
601static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
602{
2f12f034
PM
603 if (!(vcpu->arch.vpa.update_pending ||
604 vcpu->arch.slb_shadow.update_pending ||
605 vcpu->arch.dtl.update_pending))
606 return;
607
2e25aa5f
PM
608 spin_lock(&vcpu->arch.vpa_update_lock);
609 if (vcpu->arch.vpa.update_pending) {
081f323b 610 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
55b665b0
PM
611 if (vcpu->arch.vpa.pinned_addr)
612 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
2e25aa5f
PM
613 }
614 if (vcpu->arch.dtl.update_pending) {
081f323b 615 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
2e25aa5f
PM
616 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
617 vcpu->arch.dtl_index = 0;
618 }
619 if (vcpu->arch.slb_shadow.update_pending)
081f323b 620 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
2e25aa5f
PM
621 spin_unlock(&vcpu->arch.vpa_update_lock);
622}
623
c7b67670
PM
624/*
625 * Return the accumulated stolen time for the vcore up until `now'.
626 * The caller should hold the vcore lock.
627 */
628static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
629{
630 u64 p;
2711e248 631 unsigned long flags;
c7b67670 632
2711e248
PM
633 spin_lock_irqsave(&vc->stoltb_lock, flags);
634 p = vc->stolen_tb;
c7b67670 635 if (vc->vcore_state != VCORE_INACTIVE &&
2711e248
PM
636 vc->preempt_tb != TB_NIL)
637 p += now - vc->preempt_tb;
638 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
c7b67670
PM
639 return p;
640}
641
0456ec4f
PM
642static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
643 struct kvmppc_vcore *vc)
644{
645 struct dtl_entry *dt;
646 struct lppaca *vpa;
c7b67670
PM
647 unsigned long stolen;
648 unsigned long core_stolen;
649 u64 now;
8b24e69f 650 unsigned long flags;
0456ec4f
PM
651
652 dt = vcpu->arch.dtl_ptr;
653 vpa = vcpu->arch.vpa.pinned_addr;
c7b67670
PM
654 now = mftb();
655 core_stolen = vcore_stolen_time(vc, now);
656 stolen = core_stolen - vcpu->arch.stolen_logged;
657 vcpu->arch.stolen_logged = core_stolen;
8b24e69f 658 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
c7b67670
PM
659 stolen += vcpu->arch.busy_stolen;
660 vcpu->arch.busy_stolen = 0;
8b24e69f 661 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
0456ec4f
PM
662 if (!dt || !vpa)
663 return;
664 memset(dt, 0, sizeof(struct dtl_entry));
665 dt->dispatch_reason = 7;
02407552
AG
666 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
667 dt->timebase = cpu_to_be64(now + vc->tb_offset);
668 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
669 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
670 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
0456ec4f
PM
671 ++dt;
672 if (dt == vcpu->arch.dtl.pinned_end)
673 dt = vcpu->arch.dtl.pinned_addr;
674 vcpu->arch.dtl_ptr = dt;
675 /* order writing *dt vs. writing vpa->dtl_idx */
676 smp_wmb();
02407552 677 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
c35635ef 678 vcpu->arch.dtl.dirty = true;
0456ec4f
PM
679}
680
1da4e2f4
PM
681/* See if there is a doorbell interrupt pending for a vcpu */
682static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu)
683{
684 int thr;
685 struct kvmppc_vcore *vc;
686
57900694
PM
687 if (vcpu->arch.doorbell_request)
688 return true;
689 /*
690 * Ensure that the read of vcore->dpdes comes after the read
691 * of vcpu->doorbell_request. This barrier matches the
692 * lwsync in book3s_hv_rmhandlers.S just before the
693 * fast_guest_return label.
694 */
695 smp_rmb();
1da4e2f4
PM
696 vc = vcpu->arch.vcore;
697 thr = vcpu->vcpu_id - vc->first_vcpuid;
698 return !!(vc->dpdes & (1 << thr));
699}
700
9642382e
MN
701static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
702{
703 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
704 return true;
705 if ((!vcpu->arch.vcore->arch_compat) &&
706 cpu_has_feature(CPU_FTR_ARCH_207S))
707 return true;
708 return false;
709}
710
711static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
712 unsigned long resource, unsigned long value1,
713 unsigned long value2)
714{
715 switch (resource) {
716 case H_SET_MODE_RESOURCE_SET_CIABR:
717 if (!kvmppc_power8_compatible(vcpu))
718 return H_P2;
719 if (value2)
720 return H_P4;
721 if (mflags)
722 return H_UNSUPPORTED_FLAG_START;
723 /* Guests can't breakpoint the hypervisor */
724 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
725 return H_P3;
726 vcpu->arch.ciabr = value1;
727 return H_SUCCESS;
728 case H_SET_MODE_RESOURCE_SET_DAWR:
729 if (!kvmppc_power8_compatible(vcpu))
730 return H_P2;
731 if (mflags)
732 return H_UNSUPPORTED_FLAG_START;
733 if (value2 & DABRX_HYP)
734 return H_P4;
735 vcpu->arch.dawr = value1;
736 vcpu->arch.dawrx = value2;
737 return H_SUCCESS;
738 default:
739 return H_TOO_HARD;
740 }
741}
742
90fd09f8
SB
743static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
744{
745 struct kvmppc_vcore *vcore = target->arch.vcore;
746
747 /*
748 * We expect to have been called by the real mode handler
749 * (kvmppc_rm_h_confer()) which would have directly returned
750 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
751 * have useful work to do and should not confer) so we don't
752 * recheck that here.
753 */
754
755 spin_lock(&vcore->lock);
756 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
ec257165
PM
757 vcore->vcore_state != VCORE_INACTIVE &&
758 vcore->runner)
90fd09f8
SB
759 target = vcore->runner;
760 spin_unlock(&vcore->lock);
761
762 return kvm_vcpu_yield_to(target);
763}
764
765static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
766{
767 int yield_count = 0;
768 struct lppaca *lppaca;
769
770 spin_lock(&vcpu->arch.vpa_update_lock);
771 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
772 if (lppaca)
ecb6d618 773 yield_count = be32_to_cpu(lppaca->yield_count);
90fd09f8
SB
774 spin_unlock(&vcpu->arch.vpa_update_lock);
775 return yield_count;
776}
777
a8606e20
PM
778int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
779{
780 unsigned long req = kvmppc_get_gpr(vcpu, 3);
781 unsigned long target, ret = H_SUCCESS;
90fd09f8 782 int yield_count;
a8606e20 783 struct kvm_vcpu *tvcpu;
8e591cb7 784 int idx, rc;
a8606e20 785
699a0ea0
PM
786 if (req <= MAX_HCALL_OPCODE &&
787 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
788 return RESUME_HOST;
789
a8606e20
PM
790 switch (req) {
791 case H_CEDE:
a8606e20
PM
792 break;
793 case H_PROD:
794 target = kvmppc_get_gpr(vcpu, 4);
795 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
796 if (!tvcpu) {
797 ret = H_PARAMETER;
798 break;
799 }
800 tvcpu->arch.prodded = 1;
801 smp_mb();
8464c884
PM
802 if (tvcpu->arch.ceded)
803 kvmppc_fast_vcpu_kick_hv(tvcpu);
a8606e20
PM
804 break;
805 case H_CONFER:
42d7604d
PM
806 target = kvmppc_get_gpr(vcpu, 4);
807 if (target == -1)
808 break;
809 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
810 if (!tvcpu) {
811 ret = H_PARAMETER;
812 break;
813 }
90fd09f8
SB
814 yield_count = kvmppc_get_gpr(vcpu, 5);
815 if (kvmppc_get_yield_count(tvcpu) != yield_count)
816 break;
817 kvm_arch_vcpu_yield_to(tvcpu);
a8606e20
PM
818 break;
819 case H_REGISTER_VPA:
820 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
821 kvmppc_get_gpr(vcpu, 5),
822 kvmppc_get_gpr(vcpu, 6));
823 break;
8e591cb7
ME
824 case H_RTAS:
825 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
826 return RESUME_HOST;
827
c9438092 828 idx = srcu_read_lock(&vcpu->kvm->srcu);
8e591cb7 829 rc = kvmppc_rtas_hcall(vcpu);
c9438092 830 srcu_read_unlock(&vcpu->kvm->srcu, idx);
8e591cb7
ME
831
832 if (rc == -ENOENT)
833 return RESUME_HOST;
834 else if (rc == 0)
835 break;
836
837 /* Send the error out to userspace via KVM_RUN */
838 return rc;
99342cf8
DG
839 case H_LOGICAL_CI_LOAD:
840 ret = kvmppc_h_logical_ci_load(vcpu);
841 if (ret == H_TOO_HARD)
842 return RESUME_HOST;
843 break;
844 case H_LOGICAL_CI_STORE:
845 ret = kvmppc_h_logical_ci_store(vcpu);
846 if (ret == H_TOO_HARD)
847 return RESUME_HOST;
848 break;
9642382e
MN
849 case H_SET_MODE:
850 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
851 kvmppc_get_gpr(vcpu, 5),
852 kvmppc_get_gpr(vcpu, 6),
853 kvmppc_get_gpr(vcpu, 7));
854 if (ret == H_TOO_HARD)
855 return RESUME_HOST;
856 break;
bc5ad3f3
BH
857 case H_XIRR:
858 case H_CPPR:
859 case H_EOI:
860 case H_IPI:
8e44ddc3
PM
861 case H_IPOLL:
862 case H_XIRR_X:
bc5ad3f3 863 if (kvmppc_xics_enabled(vcpu)) {
5af50993
BH
864 if (xive_enabled()) {
865 ret = H_NOT_AVAILABLE;
866 return RESUME_GUEST;
867 }
bc5ad3f3
BH
868 ret = kvmppc_xics_hcall(vcpu, req);
869 break;
d3695aa4
AK
870 }
871 return RESUME_HOST;
872 case H_PUT_TCE:
873 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
874 kvmppc_get_gpr(vcpu, 5),
875 kvmppc_get_gpr(vcpu, 6));
876 if (ret == H_TOO_HARD)
877 return RESUME_HOST;
878 break;
879 case H_PUT_TCE_INDIRECT:
880 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
881 kvmppc_get_gpr(vcpu, 5),
882 kvmppc_get_gpr(vcpu, 6),
883 kvmppc_get_gpr(vcpu, 7));
884 if (ret == H_TOO_HARD)
885 return RESUME_HOST;
886 break;
887 case H_STUFF_TCE:
888 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
889 kvmppc_get_gpr(vcpu, 5),
890 kvmppc_get_gpr(vcpu, 6),
891 kvmppc_get_gpr(vcpu, 7));
892 if (ret == H_TOO_HARD)
893 return RESUME_HOST;
894 break;
a8606e20
PM
895 default:
896 return RESUME_HOST;
897 }
898 kvmppc_set_gpr(vcpu, 3, ret);
899 vcpu->arch.hcall_needed = 0;
900 return RESUME_GUEST;
901}
902
ae2113a4
PM
903static int kvmppc_hcall_impl_hv(unsigned long cmd)
904{
905 switch (cmd) {
906 case H_CEDE:
907 case H_PROD:
908 case H_CONFER:
909 case H_REGISTER_VPA:
9642382e 910 case H_SET_MODE:
99342cf8
DG
911 case H_LOGICAL_CI_LOAD:
912 case H_LOGICAL_CI_STORE:
ae2113a4
PM
913#ifdef CONFIG_KVM_XICS
914 case H_XIRR:
915 case H_CPPR:
916 case H_EOI:
917 case H_IPI:
918 case H_IPOLL:
919 case H_XIRR_X:
920#endif
921 return 1;
922 }
923
924 /* See if it's in the real-mode table */
925 return kvmppc_hcall_impl_hv_realmode(cmd);
926}
927
a59c1d9e
MS
928static int kvmppc_emulate_debug_inst(struct kvm_run *run,
929 struct kvm_vcpu *vcpu)
930{
931 u32 last_inst;
932
933 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
934 EMULATE_DONE) {
935 /*
936 * Fetch failed, so return to guest and
937 * try executing it again.
938 */
939 return RESUME_GUEST;
940 }
941
942 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
943 run->exit_reason = KVM_EXIT_DEBUG;
944 run->debug.arch.address = kvmppc_get_pc(vcpu);
945 return RESUME_HOST;
946 } else {
947 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
948 return RESUME_GUEST;
949 }
950}
951
57900694
PM
952static void do_nothing(void *x)
953{
954}
955
956static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu)
957{
958 int thr, cpu, pcpu, nthreads;
959 struct kvm_vcpu *v;
960 unsigned long dpdes;
961
962 nthreads = vcpu->kvm->arch.emul_smt_mode;
963 dpdes = 0;
964 cpu = vcpu->vcpu_id & ~(nthreads - 1);
965 for (thr = 0; thr < nthreads; ++thr, ++cpu) {
966 v = kvmppc_find_vcpu(vcpu->kvm, cpu);
967 if (!v)
968 continue;
969 /*
970 * If the vcpu is currently running on a physical cpu thread,
971 * interrupt it in order to pull it out of the guest briefly,
972 * which will update its vcore->dpdes value.
973 */
974 pcpu = READ_ONCE(v->cpu);
975 if (pcpu >= 0)
976 smp_call_function_single(pcpu, do_nothing, NULL, 1);
977 if (kvmppc_doorbell_pending(v))
978 dpdes |= 1 << thr;
979 }
980 return dpdes;
981}
982
983/*
984 * On POWER9, emulate doorbell-related instructions in order to
985 * give the guest the illusion of running on a multi-threaded core.
986 * The instructions emulated are msgsndp, msgclrp, mfspr TIR,
987 * and mfspr DPDES.
988 */
989static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu)
990{
991 u32 inst, rb, thr;
992 unsigned long arg;
993 struct kvm *kvm = vcpu->kvm;
994 struct kvm_vcpu *tvcpu;
995
996 if (!cpu_has_feature(CPU_FTR_ARCH_300))
997 return EMULATE_FAIL;
998 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE)
999 return RESUME_GUEST;
1000 if (get_op(inst) != 31)
1001 return EMULATE_FAIL;
1002 rb = get_rb(inst);
1003 thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1);
1004 switch (get_xop(inst)) {
1005 case OP_31_XOP_MSGSNDP:
1006 arg = kvmppc_get_gpr(vcpu, rb);
1007 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1008 break;
1009 arg &= 0x3f;
1010 if (arg >= kvm->arch.emul_smt_mode)
1011 break;
1012 tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg);
1013 if (!tvcpu)
1014 break;
1015 if (!tvcpu->arch.doorbell_request) {
1016 tvcpu->arch.doorbell_request = 1;
1017 kvmppc_fast_vcpu_kick_hv(tvcpu);
1018 }
1019 break;
1020 case OP_31_XOP_MSGCLRP:
1021 arg = kvmppc_get_gpr(vcpu, rb);
1022 if (((arg >> 27) & 0xf) != PPC_DBELL_SERVER)
1023 break;
1024 vcpu->arch.vcore->dpdes = 0;
1025 vcpu->arch.doorbell_request = 0;
1026 break;
1027 case OP_31_XOP_MFSPR:
1028 switch (get_sprn(inst)) {
1029 case SPRN_TIR:
1030 arg = thr;
1031 break;
1032 case SPRN_DPDES:
1033 arg = kvmppc_read_dpdes(vcpu);
1034 break;
1035 default:
1036 return EMULATE_FAIL;
1037 }
1038 kvmppc_set_gpr(vcpu, get_rt(inst), arg);
1039 break;
1040 default:
1041 return EMULATE_FAIL;
1042 }
1043 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
1044 return RESUME_GUEST;
1045}
1046
3a167bea
AK
1047static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
1048 struct task_struct *tsk)
de56a948
PM
1049{
1050 int r = RESUME_HOST;
1051
1052 vcpu->stat.sum_exits++;
1053
1c9e3d51
PM
1054 /*
1055 * This can happen if an interrupt occurs in the last stages
1056 * of guest entry or the first stages of guest exit (i.e. after
1057 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
1058 * and before setting it to KVM_GUEST_MODE_HOST_HV).
1059 * That can happen due to a bug, or due to a machine check
1060 * occurring at just the wrong time.
1061 */
1062 if (vcpu->arch.shregs.msr & MSR_HV) {
1063 printk(KERN_EMERG "KVM trap in HV mode!\n");
1064 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1065 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1066 vcpu->arch.shregs.msr);
1067 kvmppc_dump_regs(vcpu);
1068 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1069 run->hw.hardware_exit_reason = vcpu->arch.trap;
1070 return RESUME_HOST;
1071 }
de56a948
PM
1072 run->exit_reason = KVM_EXIT_UNKNOWN;
1073 run->ready_for_interrupt_injection = 1;
1074 switch (vcpu->arch.trap) {
1075 /* We're good on these - the host merely wanted to get our attention */
1076 case BOOK3S_INTERRUPT_HV_DECREMENTER:
1077 vcpu->stat.dec_exits++;
1078 r = RESUME_GUEST;
1079 break;
1080 case BOOK3S_INTERRUPT_EXTERNAL:
5d00f66b 1081 case BOOK3S_INTERRUPT_H_DOORBELL:
84f7139c 1082 case BOOK3S_INTERRUPT_H_VIRT:
de56a948
PM
1083 vcpu->stat.ext_intr_exits++;
1084 r = RESUME_GUEST;
1085 break;
dee6f24c
MS
1086 /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
1087 case BOOK3S_INTERRUPT_HMI:
de56a948
PM
1088 case BOOK3S_INTERRUPT_PERFMON:
1089 r = RESUME_GUEST;
1090 break;
b4072df4 1091 case BOOK3S_INTERRUPT_MACHINE_CHECK:
e20bbd3d
AP
1092 /* Exit to guest with KVM_EXIT_NMI as exit reason */
1093 run->exit_reason = KVM_EXIT_NMI;
1094 run->hw.hardware_exit_reason = vcpu->arch.trap;
1095 /* Clear out the old NMI status from run->flags */
1096 run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK;
1097 /* Now set the NMI status */
1098 if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED)
1099 run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV;
1100 else
1101 run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV;
1102
1103 r = RESUME_HOST;
1104 /* Print the MCE event to host console. */
1105 machine_check_print_event_info(&vcpu->arch.mce_evt, false);
b4072df4 1106 break;
de56a948
PM
1107 case BOOK3S_INTERRUPT_PROGRAM:
1108 {
1109 ulong flags;
1110 /*
1111 * Normally program interrupts are delivered directly
1112 * to the guest by the hardware, but we can get here
1113 * as a result of a hypervisor emulation interrupt
1114 * (e40) getting turned into a 700 by BML RTAS.
1115 */
1116 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
1117 kvmppc_core_queue_program(vcpu, flags);
1118 r = RESUME_GUEST;
1119 break;
1120 }
1121 case BOOK3S_INTERRUPT_SYSCALL:
1122 {
1123 /* hcall - punt to userspace */
1124 int i;
1125
27025a60
LPF
1126 /* hypercall with MSR_PR has already been handled in rmode,
1127 * and never reaches here.
1128 */
1129
de56a948
PM
1130 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1131 for (i = 0; i < 9; ++i)
1132 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1133 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1134 vcpu->arch.hcall_needed = 1;
1135 r = RESUME_HOST;
1136 break;
1137 }
1138 /*
342d3db7
PM
1139 * We get these next two if the guest accesses a page which it thinks
1140 * it has mapped but which is not actually present, either because
1141 * it is for an emulated I/O device or because the corresonding
1142 * host page has been paged out. Any other HDSI/HISI interrupts
1143 * have been handled already.
de56a948
PM
1144 */
1145 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
913d3ff9 1146 r = RESUME_PAGE_FAULT;
de56a948
PM
1147 break;
1148 case BOOK3S_INTERRUPT_H_INST_STORAGE:
913d3ff9
PM
1149 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1150 vcpu->arch.fault_dsisr = 0;
1151 r = RESUME_PAGE_FAULT;
de56a948
PM
1152 break;
1153 /*
1154 * This occurs if the guest executes an illegal instruction.
a59c1d9e
MS
1155 * If the guest debug is disabled, generate a program interrupt
1156 * to the guest. If guest debug is enabled, we need to check
1157 * whether the instruction is a software breakpoint instruction.
1158 * Accordingly return to Guest or Host.
de56a948
PM
1159 */
1160 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
4a157d61
PM
1161 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1162 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1163 swab32(vcpu->arch.emul_inst) :
1164 vcpu->arch.emul_inst;
a59c1d9e
MS
1165 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1166 r = kvmppc_emulate_debug_inst(run, vcpu);
1167 } else {
1168 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1169 r = RESUME_GUEST;
1170 }
bd3048b8
ME
1171 break;
1172 /*
1173 * This occurs if the guest (kernel or userspace), does something that
57900694
PM
1174 * is prohibited by HFSCR.
1175 * On POWER9, this could be a doorbell instruction that we need
1176 * to emulate.
1177 * Otherwise, we just generate a program interrupt to the guest.
bd3048b8
ME
1178 */
1179 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
57900694
PM
1180 r = EMULATE_FAIL;
1181 if ((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG)
1182 r = kvmppc_emulate_doorbell_instr(vcpu);
1183 if (r == EMULATE_FAIL) {
1184 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1185 r = RESUME_GUEST;
1186 }
de56a948 1187 break;
f7af5209
SW
1188 case BOOK3S_INTERRUPT_HV_RM_HARD:
1189 r = RESUME_PASSTHROUGH;
1190 break;
de56a948
PM
1191 default:
1192 kvmppc_dump_regs(vcpu);
1193 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1194 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1195 vcpu->arch.shregs.msr);
f3271d4c 1196 run->hw.hardware_exit_reason = vcpu->arch.trap;
de56a948 1197 r = RESUME_HOST;
de56a948
PM
1198 break;
1199 }
1200
de56a948
PM
1201 return r;
1202}
1203
3a167bea
AK
1204static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1205 struct kvm_sregs *sregs)
de56a948
PM
1206{
1207 int i;
1208
de56a948 1209 memset(sregs, 0, sizeof(struct kvm_sregs));
87916442 1210 sregs->pvr = vcpu->arch.pvr;
de56a948
PM
1211 for (i = 0; i < vcpu->arch.slb_max; i++) {
1212 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1213 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1214 }
1215
1216 return 0;
1217}
1218
3a167bea
AK
1219static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1220 struct kvm_sregs *sregs)
de56a948
PM
1221{
1222 int i, j;
1223
9333e6c4
PM
1224 /* Only accept the same PVR as the host's, since we can't spoof it */
1225 if (sregs->pvr != vcpu->arch.pvr)
1226 return -EINVAL;
de56a948
PM
1227
1228 j = 0;
1229 for (i = 0; i < vcpu->arch.slb_nr; i++) {
1230 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1231 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1232 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1233 ++j;
1234 }
1235 }
1236 vcpu->arch.slb_max = j;
1237
1238 return 0;
1239}
1240
a0840240
AK
1241static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1242 bool preserve_top32)
a0144e2a 1243{
8f902b00 1244 struct kvm *kvm = vcpu->kvm;
a0144e2a
PM
1245 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1246 u64 mask;
1247
8f902b00 1248 mutex_lock(&kvm->lock);
a0144e2a 1249 spin_lock(&vc->lock);
d682916a
AB
1250 /*
1251 * If ILE (interrupt little-endian) has changed, update the
1252 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1253 */
1254 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
d682916a
AB
1255 struct kvm_vcpu *vcpu;
1256 int i;
1257
d682916a
AB
1258 kvm_for_each_vcpu(i, vcpu, kvm) {
1259 if (vcpu->arch.vcore != vc)
1260 continue;
1261 if (new_lpcr & LPCR_ILE)
1262 vcpu->arch.intr_msr |= MSR_LE;
1263 else
1264 vcpu->arch.intr_msr &= ~MSR_LE;
1265 }
d682916a
AB
1266 }
1267
a0144e2a
PM
1268 /*
1269 * Userspace can only modify DPFD (default prefetch depth),
1270 * ILE (interrupt little-endian) and TC (translation control).
8cf4ecc0 1271 * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
a0144e2a
PM
1272 */
1273 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
e0622bd9
PM
1274 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1275 mask |= LPCR_AIL;
1bc3fe81
PM
1276 /*
1277 * On POWER9, allow userspace to enable large decrementer for the
1278 * guest, whether or not the host has it enabled.
1279 */
1280 if (cpu_has_feature(CPU_FTR_ARCH_300))
1281 mask |= LPCR_LD;
a0840240
AK
1282
1283 /* Broken 32-bit version of LPCR must not clear top bits */
1284 if (preserve_top32)
1285 mask &= 0xFFFFFFFF;
a0144e2a
PM
1286 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1287 spin_unlock(&vc->lock);
8f902b00 1288 mutex_unlock(&kvm->lock);
a0144e2a
PM
1289}
1290
3a167bea
AK
1291static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1292 union kvmppc_one_reg *val)
31f3438e 1293{
a136a8bd
PM
1294 int r = 0;
1295 long int i;
31f3438e 1296
a136a8bd 1297 switch (id) {
a59c1d9e
MS
1298 case KVM_REG_PPC_DEBUG_INST:
1299 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1300 break;
31f3438e 1301 case KVM_REG_PPC_HIOR:
a136a8bd
PM
1302 *val = get_reg_val(id, 0);
1303 break;
1304 case KVM_REG_PPC_DABR:
1305 *val = get_reg_val(id, vcpu->arch.dabr);
1306 break;
8563bf52
PM
1307 case KVM_REG_PPC_DABRX:
1308 *val = get_reg_val(id, vcpu->arch.dabrx);
1309 break;
a136a8bd
PM
1310 case KVM_REG_PPC_DSCR:
1311 *val = get_reg_val(id, vcpu->arch.dscr);
1312 break;
1313 case KVM_REG_PPC_PURR:
1314 *val = get_reg_val(id, vcpu->arch.purr);
1315 break;
1316 case KVM_REG_PPC_SPURR:
1317 *val = get_reg_val(id, vcpu->arch.spurr);
1318 break;
1319 case KVM_REG_PPC_AMR:
1320 *val = get_reg_val(id, vcpu->arch.amr);
1321 break;
1322 case KVM_REG_PPC_UAMOR:
1323 *val = get_reg_val(id, vcpu->arch.uamor);
1324 break;
b005255e 1325 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
a136a8bd
PM
1326 i = id - KVM_REG_PPC_MMCR0;
1327 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1328 break;
1329 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1330 i = id - KVM_REG_PPC_PMC1;
1331 *val = get_reg_val(id, vcpu->arch.pmc[i]);
31f3438e 1332 break;
b005255e
MN
1333 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1334 i = id - KVM_REG_PPC_SPMC1;
1335 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1336 break;
14941789
PM
1337 case KVM_REG_PPC_SIAR:
1338 *val = get_reg_val(id, vcpu->arch.siar);
1339 break;
1340 case KVM_REG_PPC_SDAR:
1341 *val = get_reg_val(id, vcpu->arch.sdar);
1342 break;
b005255e
MN
1343 case KVM_REG_PPC_SIER:
1344 *val = get_reg_val(id, vcpu->arch.sier);
a8bd19ef 1345 break;
b005255e
MN
1346 case KVM_REG_PPC_IAMR:
1347 *val = get_reg_val(id, vcpu->arch.iamr);
1348 break;
b005255e
MN
1349 case KVM_REG_PPC_PSPB:
1350 *val = get_reg_val(id, vcpu->arch.pspb);
1351 break;
b005255e
MN
1352 case KVM_REG_PPC_DPDES:
1353 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1354 break;
88b02cf9
PM
1355 case KVM_REG_PPC_VTB:
1356 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
1357 break;
b005255e
MN
1358 case KVM_REG_PPC_DAWR:
1359 *val = get_reg_val(id, vcpu->arch.dawr);
1360 break;
1361 case KVM_REG_PPC_DAWRX:
1362 *val = get_reg_val(id, vcpu->arch.dawrx);
1363 break;
1364 case KVM_REG_PPC_CIABR:
1365 *val = get_reg_val(id, vcpu->arch.ciabr);
1366 break;
b005255e
MN
1367 case KVM_REG_PPC_CSIGR:
1368 *val = get_reg_val(id, vcpu->arch.csigr);
1369 break;
1370 case KVM_REG_PPC_TACR:
1371 *val = get_reg_val(id, vcpu->arch.tacr);
1372 break;
1373 case KVM_REG_PPC_TCSCR:
1374 *val = get_reg_val(id, vcpu->arch.tcscr);
1375 break;
1376 case KVM_REG_PPC_PID:
1377 *val = get_reg_val(id, vcpu->arch.pid);
1378 break;
1379 case KVM_REG_PPC_ACOP:
1380 *val = get_reg_val(id, vcpu->arch.acop);
1381 break;
1382 case KVM_REG_PPC_WORT:
1383 *val = get_reg_val(id, vcpu->arch.wort);
a8bd19ef 1384 break;
e9cf1e08
PM
1385 case KVM_REG_PPC_TIDR:
1386 *val = get_reg_val(id, vcpu->arch.tid);
1387 break;
1388 case KVM_REG_PPC_PSSCR:
1389 *val = get_reg_val(id, vcpu->arch.psscr);
1390 break;
55b665b0
PM
1391 case KVM_REG_PPC_VPA_ADDR:
1392 spin_lock(&vcpu->arch.vpa_update_lock);
1393 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1394 spin_unlock(&vcpu->arch.vpa_update_lock);
1395 break;
1396 case KVM_REG_PPC_VPA_SLB:
1397 spin_lock(&vcpu->arch.vpa_update_lock);
1398 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1399 val->vpaval.length = vcpu->arch.slb_shadow.len;
1400 spin_unlock(&vcpu->arch.vpa_update_lock);
1401 break;
1402 case KVM_REG_PPC_VPA_DTL:
1403 spin_lock(&vcpu->arch.vpa_update_lock);
1404 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1405 val->vpaval.length = vcpu->arch.dtl.len;
1406 spin_unlock(&vcpu->arch.vpa_update_lock);
1407 break;
93b0f4dc
PM
1408 case KVM_REG_PPC_TB_OFFSET:
1409 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1410 break;
a0144e2a 1411 case KVM_REG_PPC_LPCR:
a0840240 1412 case KVM_REG_PPC_LPCR_64:
a0144e2a
PM
1413 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1414 break;
4b8473c9
PM
1415 case KVM_REG_PPC_PPR:
1416 *val = get_reg_val(id, vcpu->arch.ppr);
1417 break;
a7d80d01
MN
1418#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1419 case KVM_REG_PPC_TFHAR:
1420 *val = get_reg_val(id, vcpu->arch.tfhar);
1421 break;
1422 case KVM_REG_PPC_TFIAR:
1423 *val = get_reg_val(id, vcpu->arch.tfiar);
1424 break;
1425 case KVM_REG_PPC_TEXASR:
1426 *val = get_reg_val(id, vcpu->arch.texasr);
1427 break;
1428 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1429 i = id - KVM_REG_PPC_TM_GPR0;
1430 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1431 break;
1432 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1433 {
1434 int j;
1435 i = id - KVM_REG_PPC_TM_VSR0;
1436 if (i < 32)
1437 for (j = 0; j < TS_FPRWIDTH; j++)
1438 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1439 else {
1440 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1441 val->vval = vcpu->arch.vr_tm.vr[i-32];
1442 else
1443 r = -ENXIO;
1444 }
1445 break;
1446 }
1447 case KVM_REG_PPC_TM_CR:
1448 *val = get_reg_val(id, vcpu->arch.cr_tm);
1449 break;
0d808df0
PM
1450 case KVM_REG_PPC_TM_XER:
1451 *val = get_reg_val(id, vcpu->arch.xer_tm);
1452 break;
a7d80d01
MN
1453 case KVM_REG_PPC_TM_LR:
1454 *val = get_reg_val(id, vcpu->arch.lr_tm);
1455 break;
1456 case KVM_REG_PPC_TM_CTR:
1457 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1458 break;
1459 case KVM_REG_PPC_TM_FPSCR:
1460 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1461 break;
1462 case KVM_REG_PPC_TM_AMR:
1463 *val = get_reg_val(id, vcpu->arch.amr_tm);
1464 break;
1465 case KVM_REG_PPC_TM_PPR:
1466 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1467 break;
1468 case KVM_REG_PPC_TM_VRSAVE:
1469 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1470 break;
1471 case KVM_REG_PPC_TM_VSCR:
1472 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1473 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1474 else
1475 r = -ENXIO;
1476 break;
1477 case KVM_REG_PPC_TM_DSCR:
1478 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1479 break;
1480 case KVM_REG_PPC_TM_TAR:
1481 *val = get_reg_val(id, vcpu->arch.tar_tm);
1482 break;
1483#endif
388cc6e1
PM
1484 case KVM_REG_PPC_ARCH_COMPAT:
1485 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1486 break;
31f3438e 1487 default:
a136a8bd 1488 r = -EINVAL;
31f3438e
PM
1489 break;
1490 }
1491
1492 return r;
1493}
1494
3a167bea
AK
1495static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1496 union kvmppc_one_reg *val)
31f3438e 1497{
a136a8bd
PM
1498 int r = 0;
1499 long int i;
55b665b0 1500 unsigned long addr, len;
31f3438e 1501
a136a8bd 1502 switch (id) {
31f3438e 1503 case KVM_REG_PPC_HIOR:
31f3438e 1504 /* Only allow this to be set to zero */
a136a8bd 1505 if (set_reg_val(id, *val))
31f3438e
PM
1506 r = -EINVAL;
1507 break;
a136a8bd
PM
1508 case KVM_REG_PPC_DABR:
1509 vcpu->arch.dabr = set_reg_val(id, *val);
1510 break;
8563bf52
PM
1511 case KVM_REG_PPC_DABRX:
1512 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1513 break;
a136a8bd
PM
1514 case KVM_REG_PPC_DSCR:
1515 vcpu->arch.dscr = set_reg_val(id, *val);
1516 break;
1517 case KVM_REG_PPC_PURR:
1518 vcpu->arch.purr = set_reg_val(id, *val);
1519 break;
1520 case KVM_REG_PPC_SPURR:
1521 vcpu->arch.spurr = set_reg_val(id, *val);
1522 break;
1523 case KVM_REG_PPC_AMR:
1524 vcpu->arch.amr = set_reg_val(id, *val);
1525 break;
1526 case KVM_REG_PPC_UAMOR:
1527 vcpu->arch.uamor = set_reg_val(id, *val);
1528 break;
b005255e 1529 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
a136a8bd
PM
1530 i = id - KVM_REG_PPC_MMCR0;
1531 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1532 break;
1533 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1534 i = id - KVM_REG_PPC_PMC1;
1535 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1536 break;
b005255e
MN
1537 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1538 i = id - KVM_REG_PPC_SPMC1;
1539 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1540 break;
14941789
PM
1541 case KVM_REG_PPC_SIAR:
1542 vcpu->arch.siar = set_reg_val(id, *val);
1543 break;
1544 case KVM_REG_PPC_SDAR:
1545 vcpu->arch.sdar = set_reg_val(id, *val);
1546 break;
b005255e
MN
1547 case KVM_REG_PPC_SIER:
1548 vcpu->arch.sier = set_reg_val(id, *val);
a8bd19ef 1549 break;
b005255e
MN
1550 case KVM_REG_PPC_IAMR:
1551 vcpu->arch.iamr = set_reg_val(id, *val);
1552 break;
b005255e
MN
1553 case KVM_REG_PPC_PSPB:
1554 vcpu->arch.pspb = set_reg_val(id, *val);
1555 break;
b005255e
MN
1556 case KVM_REG_PPC_DPDES:
1557 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1558 break;
88b02cf9
PM
1559 case KVM_REG_PPC_VTB:
1560 vcpu->arch.vcore->vtb = set_reg_val(id, *val);
1561 break;
b005255e
MN
1562 case KVM_REG_PPC_DAWR:
1563 vcpu->arch.dawr = set_reg_val(id, *val);
1564 break;
1565 case KVM_REG_PPC_DAWRX:
1566 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1567 break;
1568 case KVM_REG_PPC_CIABR:
1569 vcpu->arch.ciabr = set_reg_val(id, *val);
1570 /* Don't allow setting breakpoints in hypervisor code */
1571 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1572 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1573 break;
b005255e
MN
1574 case KVM_REG_PPC_CSIGR:
1575 vcpu->arch.csigr = set_reg_val(id, *val);
1576 break;
1577 case KVM_REG_PPC_TACR:
1578 vcpu->arch.tacr = set_reg_val(id, *val);
1579 break;
1580 case KVM_REG_PPC_TCSCR:
1581 vcpu->arch.tcscr = set_reg_val(id, *val);
1582 break;
1583 case KVM_REG_PPC_PID:
1584 vcpu->arch.pid = set_reg_val(id, *val);
1585 break;
1586 case KVM_REG_PPC_ACOP:
1587 vcpu->arch.acop = set_reg_val(id, *val);
1588 break;
1589 case KVM_REG_PPC_WORT:
1590 vcpu->arch.wort = set_reg_val(id, *val);
a8bd19ef 1591 break;
e9cf1e08
PM
1592 case KVM_REG_PPC_TIDR:
1593 vcpu->arch.tid = set_reg_val(id, *val);
1594 break;
1595 case KVM_REG_PPC_PSSCR:
1596 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
1597 break;
55b665b0
PM
1598 case KVM_REG_PPC_VPA_ADDR:
1599 addr = set_reg_val(id, *val);
1600 r = -EINVAL;
1601 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1602 vcpu->arch.dtl.next_gpa))
1603 break;
1604 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1605 break;
1606 case KVM_REG_PPC_VPA_SLB:
1607 addr = val->vpaval.addr;
1608 len = val->vpaval.length;
1609 r = -EINVAL;
1610 if (addr && !vcpu->arch.vpa.next_gpa)
1611 break;
1612 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1613 break;
1614 case KVM_REG_PPC_VPA_DTL:
1615 addr = val->vpaval.addr;
1616 len = val->vpaval.length;
1617 r = -EINVAL;
9f8c8c78
PM
1618 if (addr && (len < sizeof(struct dtl_entry) ||
1619 !vcpu->arch.vpa.next_gpa))
55b665b0
PM
1620 break;
1621 len -= len % sizeof(struct dtl_entry);
1622 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1623 break;
93b0f4dc 1624 case KVM_REG_PPC_TB_OFFSET:
3d3efb68
PM
1625 /*
1626 * POWER9 DD1 has an erratum where writing TBU40 causes
1627 * the timebase to lose ticks. So we don't let the
1628 * timebase offset be changed on P9 DD1. (It is
1629 * initialized to zero.)
1630 */
1631 if (cpu_has_feature(CPU_FTR_POWER9_DD1))
1632 break;
93b0f4dc
PM
1633 /* round up to multiple of 2^24 */
1634 vcpu->arch.vcore->tb_offset =
1635 ALIGN(set_reg_val(id, *val), 1UL << 24);
1636 break;
a0144e2a 1637 case KVM_REG_PPC_LPCR:
a0840240
AK
1638 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1639 break;
1640 case KVM_REG_PPC_LPCR_64:
1641 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
a0144e2a 1642 break;
4b8473c9
PM
1643 case KVM_REG_PPC_PPR:
1644 vcpu->arch.ppr = set_reg_val(id, *val);
1645 break;
a7d80d01
MN
1646#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1647 case KVM_REG_PPC_TFHAR:
1648 vcpu->arch.tfhar = set_reg_val(id, *val);
1649 break;
1650 case KVM_REG_PPC_TFIAR:
1651 vcpu->arch.tfiar = set_reg_val(id, *val);
1652 break;
1653 case KVM_REG_PPC_TEXASR:
1654 vcpu->arch.texasr = set_reg_val(id, *val);
1655 break;
1656 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1657 i = id - KVM_REG_PPC_TM_GPR0;
1658 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1659 break;
1660 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1661 {
1662 int j;
1663 i = id - KVM_REG_PPC_TM_VSR0;
1664 if (i < 32)
1665 for (j = 0; j < TS_FPRWIDTH; j++)
1666 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1667 else
1668 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1669 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1670 else
1671 r = -ENXIO;
1672 break;
1673 }
1674 case KVM_REG_PPC_TM_CR:
1675 vcpu->arch.cr_tm = set_reg_val(id, *val);
1676 break;
0d808df0
PM
1677 case KVM_REG_PPC_TM_XER:
1678 vcpu->arch.xer_tm = set_reg_val(id, *val);
1679 break;
a7d80d01
MN
1680 case KVM_REG_PPC_TM_LR:
1681 vcpu->arch.lr_tm = set_reg_val(id, *val);
1682 break;
1683 case KVM_REG_PPC_TM_CTR:
1684 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1685 break;
1686 case KVM_REG_PPC_TM_FPSCR:
1687 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1688 break;
1689 case KVM_REG_PPC_TM_AMR:
1690 vcpu->arch.amr_tm = set_reg_val(id, *val);
1691 break;
1692 case KVM_REG_PPC_TM_PPR:
1693 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1694 break;
1695 case KVM_REG_PPC_TM_VRSAVE:
1696 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1697 break;
1698 case KVM_REG_PPC_TM_VSCR:
1699 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1700 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1701 else
1702 r = - ENXIO;
1703 break;
1704 case KVM_REG_PPC_TM_DSCR:
1705 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1706 break;
1707 case KVM_REG_PPC_TM_TAR:
1708 vcpu->arch.tar_tm = set_reg_val(id, *val);
1709 break;
1710#endif
388cc6e1
PM
1711 case KVM_REG_PPC_ARCH_COMPAT:
1712 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1713 break;
31f3438e 1714 default:
a136a8bd 1715 r = -EINVAL;
31f3438e
PM
1716 break;
1717 }
1718
1719 return r;
1720}
1721
45c940ba
PM
1722/*
1723 * On POWER9, threads are independent and can be in different partitions.
1724 * Therefore we consider each thread to be a subcore.
1725 * There is a restriction that all threads have to be in the same
1726 * MMU mode (radix or HPT), unfortunately, but since we only support
1727 * HPT guests on a HPT host so far, that isn't an impediment yet.
1728 */
1729static int threads_per_vcore(void)
1730{
1731 if (cpu_has_feature(CPU_FTR_ARCH_300))
1732 return 1;
1733 return threads_per_subcore;
1734}
1735
de9bdd1a
SS
1736static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1737{
1738 struct kvmppc_vcore *vcore;
1739
1740 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1741
1742 if (vcore == NULL)
1743 return NULL;
1744
de9bdd1a 1745 spin_lock_init(&vcore->lock);
2711e248 1746 spin_lock_init(&vcore->stoltb_lock);
8577370f 1747 init_swait_queue_head(&vcore->wq);
de9bdd1a
SS
1748 vcore->preempt_tb = TB_NIL;
1749 vcore->lpcr = kvm->arch.lpcr;
3c313524 1750 vcore->first_vcpuid = core * kvm->arch.smt_mode;
de9bdd1a 1751 vcore->kvm = kvm;
ec257165 1752 INIT_LIST_HEAD(&vcore->preempt_list);
de9bdd1a
SS
1753
1754 return vcore;
1755}
1756
b6c295df
PM
1757#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1758static struct debugfs_timings_element {
1759 const char *name;
1760 size_t offset;
1761} timings[] = {
1762 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
1763 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
1764 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
1765 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
1766 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
1767};
1768
1769#define N_TIMINGS (sizeof(timings) / sizeof(timings[0]))
1770
1771struct debugfs_timings_state {
1772 struct kvm_vcpu *vcpu;
1773 unsigned int buflen;
1774 char buf[N_TIMINGS * 100];
1775};
1776
1777static int debugfs_timings_open(struct inode *inode, struct file *file)
1778{
1779 struct kvm_vcpu *vcpu = inode->i_private;
1780 struct debugfs_timings_state *p;
1781
1782 p = kzalloc(sizeof(*p), GFP_KERNEL);
1783 if (!p)
1784 return -ENOMEM;
1785
1786 kvm_get_kvm(vcpu->kvm);
1787 p->vcpu = vcpu;
1788 file->private_data = p;
1789
1790 return nonseekable_open(inode, file);
1791}
1792
1793static int debugfs_timings_release(struct inode *inode, struct file *file)
1794{
1795 struct debugfs_timings_state *p = file->private_data;
1796
1797 kvm_put_kvm(p->vcpu->kvm);
1798 kfree(p);
1799 return 0;
1800}
1801
1802static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
1803 size_t len, loff_t *ppos)
1804{
1805 struct debugfs_timings_state *p = file->private_data;
1806 struct kvm_vcpu *vcpu = p->vcpu;
1807 char *s, *buf_end;
1808 struct kvmhv_tb_accumulator tb;
1809 u64 count;
1810 loff_t pos;
1811 ssize_t n;
1812 int i, loops;
1813 bool ok;
1814
1815 if (!p->buflen) {
1816 s = p->buf;
1817 buf_end = s + sizeof(p->buf);
1818 for (i = 0; i < N_TIMINGS; ++i) {
1819 struct kvmhv_tb_accumulator *acc;
1820
1821 acc = (struct kvmhv_tb_accumulator *)
1822 ((unsigned long)vcpu + timings[i].offset);
1823 ok = false;
1824 for (loops = 0; loops < 1000; ++loops) {
1825 count = acc->seqcount;
1826 if (!(count & 1)) {
1827 smp_rmb();
1828 tb = *acc;
1829 smp_rmb();
1830 if (count == acc->seqcount) {
1831 ok = true;
1832 break;
1833 }
1834 }
1835 udelay(1);
1836 }
1837 if (!ok)
1838 snprintf(s, buf_end - s, "%s: stuck\n",
1839 timings[i].name);
1840 else
1841 snprintf(s, buf_end - s,
1842 "%s: %llu %llu %llu %llu\n",
1843 timings[i].name, count / 2,
1844 tb_to_ns(tb.tb_total),
1845 tb_to_ns(tb.tb_min),
1846 tb_to_ns(tb.tb_max));
1847 s += strlen(s);
1848 }
1849 p->buflen = s - p->buf;
1850 }
1851
1852 pos = *ppos;
1853 if (pos >= p->buflen)
1854 return 0;
1855 if (len > p->buflen - pos)
1856 len = p->buflen - pos;
1857 n = copy_to_user(buf, p->buf + pos, len);
1858 if (n) {
1859 if (n == len)
1860 return -EFAULT;
1861 len -= n;
1862 }
1863 *ppos = pos + len;
1864 return len;
1865}
1866
1867static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
1868 size_t len, loff_t *ppos)
1869{
1870 return -EACCES;
1871}
1872
1873static const struct file_operations debugfs_timings_ops = {
1874 .owner = THIS_MODULE,
1875 .open = debugfs_timings_open,
1876 .release = debugfs_timings_release,
1877 .read = debugfs_timings_read,
1878 .write = debugfs_timings_write,
1879 .llseek = generic_file_llseek,
1880};
1881
1882/* Create a debugfs directory for the vcpu */
1883static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1884{
1885 char buf[16];
1886 struct kvm *kvm = vcpu->kvm;
1887
1888 snprintf(buf, sizeof(buf), "vcpu%u", id);
1889 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
1890 return;
1891 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
1892 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
1893 return;
1894 vcpu->arch.debugfs_timings =
1895 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
1896 vcpu, &debugfs_timings_ops);
1897}
1898
1899#else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1900static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1901{
1902}
1903#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1904
3a167bea
AK
1905static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1906 unsigned int id)
de56a948
PM
1907{
1908 struct kvm_vcpu *vcpu;
3c313524 1909 int err;
371fefd6
PM
1910 int core;
1911 struct kvmppc_vcore *vcore;
de56a948 1912
371fefd6 1913 err = -ENOMEM;
6b75e6bf 1914 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
de56a948
PM
1915 if (!vcpu)
1916 goto out;
1917
1918 err = kvm_vcpu_init(vcpu, kvm, id);
1919 if (err)
1920 goto free_vcpu;
1921
1922 vcpu->arch.shared = &vcpu->arch.shregs;
5deb8e7a
AG
1923#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1924 /*
1925 * The shared struct is never shared on HV,
1926 * so we can always use host endianness
1927 */
1928#ifdef __BIG_ENDIAN__
1929 vcpu->arch.shared_big_endian = true;
1930#else
1931 vcpu->arch.shared_big_endian = false;
1932#endif
1933#endif
de56a948
PM
1934 vcpu->arch.mmcr[0] = MMCR0_FC;
1935 vcpu->arch.ctrl = CTRL_RUNLATCH;
1936 /* default to host PVR, since we can't spoof it */
3a167bea 1937 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2e25aa5f 1938 spin_lock_init(&vcpu->arch.vpa_update_lock);
c7b67670
PM
1939 spin_lock_init(&vcpu->arch.tbacct_lock);
1940 vcpu->arch.busy_preempt = TB_NIL;
d682916a 1941 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
de56a948 1942
769377f7
PM
1943 /*
1944 * Set the default HFSCR for the guest from the host value.
1945 * This value is only used on POWER9.
1946 * On POWER9 DD1, TM doesn't work, so we make sure to
1947 * prevent the guest from using it.
57900694
PM
1948 * On POWER9, we want to virtualize the doorbell facility, so we
1949 * turn off the HFSCR bit, which causes those instructions to trap.
769377f7
PM
1950 */
1951 vcpu->arch.hfscr = mfspr(SPRN_HFSCR);
1952 if (!cpu_has_feature(CPU_FTR_TM))
1953 vcpu->arch.hfscr &= ~HFSCR_TM;
57900694
PM
1954 if (cpu_has_feature(CPU_FTR_ARCH_300))
1955 vcpu->arch.hfscr &= ~HFSCR_MSGP;
769377f7 1956
de56a948
PM
1957 kvmppc_mmu_book3s_hv_init(vcpu);
1958
8455d79e 1959 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
371fefd6
PM
1960
1961 init_waitqueue_head(&vcpu->arch.cpu_run);
1962
1963 mutex_lock(&kvm->lock);
3c313524
PM
1964 vcore = NULL;
1965 err = -EINVAL;
1966 core = id / kvm->arch.smt_mode;
1967 if (core < KVM_MAX_VCORES) {
1968 vcore = kvm->arch.vcores[core];
1969 if (!vcore) {
1970 err = -ENOMEM;
1971 vcore = kvmppc_vcore_create(kvm, core);
1972 kvm->arch.vcores[core] = vcore;
1973 kvm->arch.online_vcores++;
1974 }
371fefd6
PM
1975 }
1976 mutex_unlock(&kvm->lock);
1977
1978 if (!vcore)
1979 goto free_vcpu;
1980
1981 spin_lock(&vcore->lock);
1982 ++vcore->num_threads;
371fefd6
PM
1983 spin_unlock(&vcore->lock);
1984 vcpu->arch.vcore = vcore;
e0b7ec05 1985 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
ec257165 1986 vcpu->arch.thread_cpu = -1;
a29ebeaf 1987 vcpu->arch.prev_cpu = -1;
371fefd6 1988
af8f38b3
AG
1989 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1990 kvmppc_sanity_check(vcpu);
1991
b6c295df
PM
1992 debugfs_vcpu_init(vcpu, id);
1993
de56a948
PM
1994 return vcpu;
1995
1996free_vcpu:
6b75e6bf 1997 kmem_cache_free(kvm_vcpu_cache, vcpu);
de56a948
PM
1998out:
1999 return ERR_PTR(err);
2000}
2001
3c313524
PM
2002static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode,
2003 unsigned long flags)
2004{
2005 int err;
57900694 2006 int esmt = 0;
3c313524
PM
2007
2008 if (flags)
2009 return -EINVAL;
2010 if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode))
2011 return -EINVAL;
2012 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
2013 /*
2014 * On POWER8 (or POWER7), the threading mode is "strict",
2015 * so we pack smt_mode vcpus per vcore.
2016 */
2017 if (smt_mode > threads_per_subcore)
2018 return -EINVAL;
2019 } else {
2020 /*
2021 * On POWER9, the threading mode is "loose",
2022 * so each vcpu gets its own vcore.
2023 */
57900694 2024 esmt = smt_mode;
3c313524
PM
2025 smt_mode = 1;
2026 }
2027 mutex_lock(&kvm->lock);
2028 err = -EBUSY;
2029 if (!kvm->arch.online_vcores) {
2030 kvm->arch.smt_mode = smt_mode;
57900694 2031 kvm->arch.emul_smt_mode = esmt;
3c313524
PM
2032 err = 0;
2033 }
2034 mutex_unlock(&kvm->lock);
2035
2036 return err;
2037}
2038
c35635ef
PM
2039static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
2040{
2041 if (vpa->pinned_addr)
2042 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
2043 vpa->dirty);
2044}
2045
3a167bea 2046static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
de56a948 2047{
2e25aa5f 2048 spin_lock(&vcpu->arch.vpa_update_lock);
c35635ef
PM
2049 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
2050 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
2051 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2e25aa5f 2052 spin_unlock(&vcpu->arch.vpa_update_lock);
de56a948 2053 kvm_vcpu_uninit(vcpu);
6b75e6bf 2054 kmem_cache_free(kvm_vcpu_cache, vcpu);
de56a948
PM
2055}
2056
3a167bea
AK
2057static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
2058{
2059 /* Indicate we want to get back into the guest */
2060 return 1;
2061}
2062
19ccb76a 2063static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
371fefd6 2064{
19ccb76a 2065 unsigned long dec_nsec, now;
371fefd6 2066
19ccb76a
PM
2067 now = get_tb();
2068 if (now > vcpu->arch.dec_expires) {
2069 /* decrementer has already gone negative */
2070 kvmppc_core_queue_dec(vcpu);
7e28e60e 2071 kvmppc_core_prepare_to_enter(vcpu);
19ccb76a 2072 return;
371fefd6 2073 }
19ccb76a
PM
2074 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
2075 / tb_ticks_per_sec;
8b0e1953 2076 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
19ccb76a 2077 vcpu->arch.timer_running = 1;
371fefd6
PM
2078}
2079
19ccb76a 2080static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
371fefd6 2081{
19ccb76a
PM
2082 vcpu->arch.ceded = 0;
2083 if (vcpu->arch.timer_running) {
2084 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2085 vcpu->arch.timer_running = 0;
2086 }
371fefd6
PM
2087}
2088
8b24e69f 2089extern int __kvmppc_vcore_entry(void);
de56a948 2090
371fefd6
PM
2091static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
2092 struct kvm_vcpu *vcpu)
de56a948 2093{
c7b67670
PM
2094 u64 now;
2095
371fefd6
PM
2096 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2097 return;
bf3d32e1 2098 spin_lock_irq(&vcpu->arch.tbacct_lock);
c7b67670
PM
2099 now = mftb();
2100 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
2101 vcpu->arch.stolen_logged;
2102 vcpu->arch.busy_preempt = now;
2103 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
bf3d32e1 2104 spin_unlock_irq(&vcpu->arch.tbacct_lock);
371fefd6 2105 --vc->n_runnable;
7b5f8272 2106 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
371fefd6
PM
2107}
2108
f0888f70
PM
2109static int kvmppc_grab_hwthread(int cpu)
2110{
2111 struct paca_struct *tpaca;
b754c739 2112 long timeout = 10000;
f0888f70
PM
2113
2114 tpaca = &paca[cpu];
2115
2116 /* Ensure the thread won't go into the kernel if it wakes */
7b444c67 2117 tpaca->kvm_hstate.kvm_vcpu = NULL;
b4deba5c 2118 tpaca->kvm_hstate.kvm_vcore = NULL;
5d5b99cd
PM
2119 tpaca->kvm_hstate.napping = 0;
2120 smp_wmb();
2121 tpaca->kvm_hstate.hwthread_req = 1;
f0888f70
PM
2122
2123 /*
2124 * If the thread is already executing in the kernel (e.g. handling
2125 * a stray interrupt), wait for it to get back to nap mode.
2126 * The smp_mb() is to ensure that our setting of hwthread_req
2127 * is visible before we look at hwthread_state, so if this
2128 * races with the code at system_reset_pSeries and the thread
2129 * misses our setting of hwthread_req, we are sure to see its
2130 * setting of hwthread_state, and vice versa.
2131 */
2132 smp_mb();
2133 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
2134 if (--timeout <= 0) {
2135 pr_err("KVM: couldn't grab cpu %d\n", cpu);
2136 return -EBUSY;
2137 }
2138 udelay(1);
2139 }
2140 return 0;
2141}
2142
2143static void kvmppc_release_hwthread(int cpu)
2144{
2145 struct paca_struct *tpaca;
2146
2147 tpaca = &paca[cpu];
2148 tpaca->kvm_hstate.hwthread_req = 0;
2149 tpaca->kvm_hstate.kvm_vcpu = NULL;
b4deba5c
PM
2150 tpaca->kvm_hstate.kvm_vcore = NULL;
2151 tpaca->kvm_hstate.kvm_split_mode = NULL;
f0888f70
PM
2152}
2153
a29ebeaf
PM
2154static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
2155{
2156 int i;
2157
2158 cpu = cpu_first_thread_sibling(cpu);
2159 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
2160 /*
2161 * Make sure setting of bit in need_tlb_flush precedes
2162 * testing of cpu_in_guest bits. The matching barrier on
2163 * the other side is the first smp_mb() in kvmppc_run_core().
2164 */
2165 smp_mb();
2166 for (i = 0; i < threads_per_core; ++i)
2167 if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest))
2168 smp_call_function_single(cpu + i, do_nothing, NULL, 1);
2169}
2170
8b24e69f
PM
2171static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu)
2172{
2173 struct kvm *kvm = vcpu->kvm;
2174
2175 /*
2176 * With radix, the guest can do TLB invalidations itself,
2177 * and it could choose to use the local form (tlbiel) if
2178 * it is invalidating a translation that has only ever been
2179 * used on one vcpu. However, that doesn't mean it has
2180 * only ever been used on one physical cpu, since vcpus
2181 * can move around between pcpus. To cope with this, when
2182 * a vcpu moves from one pcpu to another, we need to tell
2183 * any vcpus running on the same core as this vcpu previously
2184 * ran to flush the TLB. The TLB is shared between threads,
2185 * so we use a single bit in .need_tlb_flush for all 4 threads.
2186 */
2187 if (vcpu->arch.prev_cpu != pcpu) {
2188 if (vcpu->arch.prev_cpu >= 0 &&
2189 cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
2190 cpu_first_thread_sibling(pcpu))
2191 radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
2192 vcpu->arch.prev_cpu = pcpu;
2193 }
2194}
2195
b4deba5c 2196static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
371fefd6
PM
2197{
2198 int cpu;
2199 struct paca_struct *tpaca;
a29ebeaf 2200 struct kvm *kvm = vc->kvm;
371fefd6 2201
b4deba5c
PM
2202 cpu = vc->pcpu;
2203 if (vcpu) {
2204 if (vcpu->arch.timer_running) {
2205 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
2206 vcpu->arch.timer_running = 0;
2207 }
2208 cpu += vcpu->arch.ptid;
898b25b2 2209 vcpu->cpu = vc->pcpu;
b4deba5c 2210 vcpu->arch.thread_cpu = cpu;
a29ebeaf 2211 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
19ccb76a 2212 }
371fefd6 2213 tpaca = &paca[cpu];
5d5b99cd 2214 tpaca->kvm_hstate.kvm_vcpu = vcpu;
898b25b2 2215 tpaca->kvm_hstate.ptid = cpu - vc->pcpu;
ec257165 2216 /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
371fefd6 2217 smp_wmb();
898b25b2 2218 tpaca->kvm_hstate.kvm_vcore = vc;
5d5b99cd 2219 if (cpu != smp_processor_id())
66feed61 2220 kvmppc_ipi_thread(cpu);
371fefd6 2221}
de56a948 2222
5d5b99cd 2223static void kvmppc_wait_for_nap(void)
371fefd6 2224{
5d5b99cd
PM
2225 int cpu = smp_processor_id();
2226 int i, loops;
45c940ba 2227 int n_threads = threads_per_vcore();
371fefd6 2228
45c940ba
PM
2229 if (n_threads <= 1)
2230 return;
5d5b99cd
PM
2231 for (loops = 0; loops < 1000000; ++loops) {
2232 /*
2233 * Check if all threads are finished.
b4deba5c 2234 * We set the vcore pointer when starting a thread
5d5b99cd 2235 * and the thread clears it when finished, so we look
b4deba5c 2236 * for any threads that still have a non-NULL vcore ptr.
5d5b99cd 2237 */
45c940ba 2238 for (i = 1; i < n_threads; ++i)
b4deba5c 2239 if (paca[cpu + i].kvm_hstate.kvm_vcore)
5d5b99cd 2240 break;
45c940ba 2241 if (i == n_threads) {
5d5b99cd
PM
2242 HMT_medium();
2243 return;
371fefd6 2244 }
5d5b99cd 2245 HMT_low();
371fefd6
PM
2246 }
2247 HMT_medium();
45c940ba 2248 for (i = 1; i < n_threads; ++i)
b4deba5c 2249 if (paca[cpu + i].kvm_hstate.kvm_vcore)
5d5b99cd 2250 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
371fefd6
PM
2251}
2252
2253/*
2254 * Check that we are on thread 0 and that any other threads in
7b444c67
PM
2255 * this core are off-line. Then grab the threads so they can't
2256 * enter the kernel.
371fefd6
PM
2257 */
2258static int on_primary_thread(void)
2259{
2260 int cpu = smp_processor_id();
3102f784 2261 int thr;
371fefd6 2262
3102f784
ME
2263 /* Are we on a primary subcore? */
2264 if (cpu_thread_in_subcore(cpu))
371fefd6 2265 return 0;
3102f784
ME
2266
2267 thr = 0;
2268 while (++thr < threads_per_subcore)
371fefd6
PM
2269 if (cpu_online(cpu + thr))
2270 return 0;
7b444c67
PM
2271
2272 /* Grab all hw threads so they can't go into the kernel */
3102f784 2273 for (thr = 1; thr < threads_per_subcore; ++thr) {
7b444c67
PM
2274 if (kvmppc_grab_hwthread(cpu + thr)) {
2275 /* Couldn't grab one; let the others go */
2276 do {
2277 kvmppc_release_hwthread(cpu + thr);
2278 } while (--thr > 0);
2279 return 0;
2280 }
2281 }
371fefd6
PM
2282 return 1;
2283}
2284
ec257165
PM
2285/*
2286 * A list of virtual cores for each physical CPU.
2287 * These are vcores that could run but their runner VCPU tasks are
2288 * (or may be) preempted.
2289 */
2290struct preempted_vcore_list {
2291 struct list_head list;
2292 spinlock_t lock;
2293};
2294
2295static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
2296
2297static void init_vcore_lists(void)
2298{
2299 int cpu;
2300
2301 for_each_possible_cpu(cpu) {
2302 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
2303 spin_lock_init(&lp->lock);
2304 INIT_LIST_HEAD(&lp->list);
2305 }
2306}
2307
2308static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
2309{
2310 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2311
2312 vc->vcore_state = VCORE_PREEMPT;
2313 vc->pcpu = smp_processor_id();
45c940ba 2314 if (vc->num_threads < threads_per_vcore()) {
ec257165
PM
2315 spin_lock(&lp->lock);
2316 list_add_tail(&vc->preempt_list, &lp->list);
2317 spin_unlock(&lp->lock);
2318 }
2319
2320 /* Start accumulating stolen time */
2321 kvmppc_core_start_stolen(vc);
2322}
2323
2324static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
2325{
402813fe 2326 struct preempted_vcore_list *lp;
ec257165
PM
2327
2328 kvmppc_core_end_stolen(vc);
2329 if (!list_empty(&vc->preempt_list)) {
402813fe 2330 lp = &per_cpu(preempted_vcores, vc->pcpu);
ec257165
PM
2331 spin_lock(&lp->lock);
2332 list_del_init(&vc->preempt_list);
2333 spin_unlock(&lp->lock);
2334 }
2335 vc->vcore_state = VCORE_INACTIVE;
2336}
2337
b4deba5c
PM
2338/*
2339 * This stores information about the virtual cores currently
2340 * assigned to a physical core.
2341 */
ec257165 2342struct core_info {
b4deba5c
PM
2343 int n_subcores;
2344 int max_subcore_threads;
ec257165 2345 int total_threads;
b4deba5c 2346 int subcore_threads[MAX_SUBCORES];
898b25b2 2347 struct kvmppc_vcore *vc[MAX_SUBCORES];
ec257165
PM
2348};
2349
b4deba5c
PM
2350/*
2351 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2352 * respectively in 2-way micro-threading (split-core) mode.
2353 */
2354static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2355
ec257165
PM
2356static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2357{
2358 memset(cip, 0, sizeof(*cip));
b4deba5c
PM
2359 cip->n_subcores = 1;
2360 cip->max_subcore_threads = vc->num_threads;
ec257165 2361 cip->total_threads = vc->num_threads;
b4deba5c 2362 cip->subcore_threads[0] = vc->num_threads;
898b25b2 2363 cip->vc[0] = vc;
b4deba5c
PM
2364}
2365
2366static bool subcore_config_ok(int n_subcores, int n_threads)
2367{
2368 /* Can only dynamically split if unsplit to begin with */
2369 if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2370 return false;
2371 if (n_subcores > MAX_SUBCORES)
2372 return false;
2373 if (n_subcores > 1) {
2374 if (!(dynamic_mt_modes & 2))
2375 n_subcores = 4;
2376 if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2377 return false;
2378 }
2379
2380 return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
ec257165
PM
2381}
2382
898b25b2 2383static void init_vcore_to_run(struct kvmppc_vcore *vc)
ec257165 2384{
ec257165
PM
2385 vc->entry_exit_map = 0;
2386 vc->in_guest = 0;
2387 vc->napping_threads = 0;
2388 vc->conferring_threads = 0;
2389}
2390
b4deba5c
PM
2391static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2392{
2393 int n_threads = vc->num_threads;
2394 int sub;
2395
2396 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2397 return false;
2398
2399 if (n_threads < cip->max_subcore_threads)
2400 n_threads = cip->max_subcore_threads;
b009031f 2401 if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
b4deba5c 2402 return false;
b009031f 2403 cip->max_subcore_threads = n_threads;
b4deba5c
PM
2404
2405 sub = cip->n_subcores;
2406 ++cip->n_subcores;
2407 cip->total_threads += vc->num_threads;
2408 cip->subcore_threads[sub] = vc->num_threads;
898b25b2
PM
2409 cip->vc[sub] = vc;
2410 init_vcore_to_run(vc);
2411 list_del_init(&vc->preempt_list);
b4deba5c
PM
2412
2413 return true;
2414}
2415
b4deba5c
PM
2416/*
2417 * Work out whether it is possible to piggyback the execution of
2418 * vcore *pvc onto the execution of the other vcores described in *cip.
2419 */
2420static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2421 int target_threads)
2422{
b4deba5c
PM
2423 if (cip->total_threads + pvc->num_threads > target_threads)
2424 return false;
b4deba5c 2425
b009031f 2426 return can_dynamic_split(pvc, cip);
b4deba5c
PM
2427}
2428
d911f0be
PM
2429static void prepare_threads(struct kvmppc_vcore *vc)
2430{
7b5f8272
SJS
2431 int i;
2432 struct kvm_vcpu *vcpu;
d911f0be 2433
7b5f8272 2434 for_each_runnable_thread(i, vcpu, vc) {
d911f0be
PM
2435 if (signal_pending(vcpu->arch.run_task))
2436 vcpu->arch.ret = -EINTR;
2437 else if (vcpu->arch.vpa.update_pending ||
2438 vcpu->arch.slb_shadow.update_pending ||
2439 vcpu->arch.dtl.update_pending)
2440 vcpu->arch.ret = RESUME_GUEST;
2441 else
2442 continue;
2443 kvmppc_remove_runnable(vc, vcpu);
2444 wake_up(&vcpu->arch.cpu_run);
2445 }
2446}
2447
ec257165
PM
2448static void collect_piggybacks(struct core_info *cip, int target_threads)
2449{
2450 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2451 struct kvmppc_vcore *pvc, *vcnext;
2452
2453 spin_lock(&lp->lock);
2454 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2455 if (!spin_trylock(&pvc->lock))
2456 continue;
2457 prepare_threads(pvc);
2458 if (!pvc->n_runnable) {
2459 list_del_init(&pvc->preempt_list);
2460 if (pvc->runner == NULL) {
2461 pvc->vcore_state = VCORE_INACTIVE;
2462 kvmppc_core_end_stolen(pvc);
2463 }
2464 spin_unlock(&pvc->lock);
2465 continue;
2466 }
2467 if (!can_piggyback(pvc, cip, target_threads)) {
2468 spin_unlock(&pvc->lock);
2469 continue;
2470 }
2471 kvmppc_core_end_stolen(pvc);
2472 pvc->vcore_state = VCORE_PIGGYBACK;
2473 if (cip->total_threads >= target_threads)
2474 break;
2475 }
2476 spin_unlock(&lp->lock);
2477}
2478
8b24e69f
PM
2479static bool recheck_signals(struct core_info *cip)
2480{
2481 int sub, i;
2482 struct kvm_vcpu *vcpu;
2483
2484 for (sub = 0; sub < cip->n_subcores; ++sub)
2485 for_each_runnable_thread(i, vcpu, cip->vc[sub])
2486 if (signal_pending(vcpu->arch.run_task))
2487 return true;
2488 return false;
2489}
2490
ec257165 2491static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
25fedfca 2492{
7b5f8272 2493 int still_running = 0, i;
25fedfca
PM
2494 u64 now;
2495 long ret;
7b5f8272 2496 struct kvm_vcpu *vcpu;
25fedfca 2497
ec257165 2498 spin_lock(&vc->lock);
25fedfca 2499 now = get_tb();
7b5f8272 2500 for_each_runnable_thread(i, vcpu, vc) {
25fedfca
PM
2501 /* cancel pending dec exception if dec is positive */
2502 if (now < vcpu->arch.dec_expires &&
2503 kvmppc_core_pending_dec(vcpu))
2504 kvmppc_core_dequeue_dec(vcpu);
2505
2506 trace_kvm_guest_exit(vcpu);
2507
2508 ret = RESUME_GUEST;
2509 if (vcpu->arch.trap)
2510 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
2511 vcpu->arch.run_task);
2512
2513 vcpu->arch.ret = ret;
2514 vcpu->arch.trap = 0;
2515
ec257165
PM
2516 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2517 if (vcpu->arch.pending_exceptions)
2518 kvmppc_core_prepare_to_enter(vcpu);
2519 if (vcpu->arch.ceded)
25fedfca 2520 kvmppc_set_timer(vcpu);
ec257165
PM
2521 else
2522 ++still_running;
2523 } else {
25fedfca
PM
2524 kvmppc_remove_runnable(vc, vcpu);
2525 wake_up(&vcpu->arch.cpu_run);
2526 }
2527 }
ec257165 2528 if (!is_master) {
563a1e93 2529 if (still_running > 0) {
ec257165 2530 kvmppc_vcore_preempt(vc);
563a1e93
PM
2531 } else if (vc->runner) {
2532 vc->vcore_state = VCORE_PREEMPT;
2533 kvmppc_core_start_stolen(vc);
2534 } else {
2535 vc->vcore_state = VCORE_INACTIVE;
2536 }
ec257165
PM
2537 if (vc->n_runnable > 0 && vc->runner == NULL) {
2538 /* make sure there's a candidate runner awake */
7b5f8272
SJS
2539 i = -1;
2540 vcpu = next_runnable_thread(vc, &i);
ec257165
PM
2541 wake_up(&vcpu->arch.cpu_run);
2542 }
2543 }
2544 spin_unlock(&vc->lock);
25fedfca
PM
2545}
2546
b8e6a87c
SW
2547/*
2548 * Clear core from the list of active host cores as we are about to
2549 * enter the guest. Only do this if it is the primary thread of the
2550 * core (not if a subcore) that is entering the guest.
2551 */
3f7cd919 2552static inline int kvmppc_clear_host_core(unsigned int cpu)
b8e6a87c
SW
2553{
2554 int core;
2555
2556 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3f7cd919 2557 return 0;
b8e6a87c
SW
2558 /*
2559 * Memory barrier can be omitted here as we will do a smp_wmb()
2560 * later in kvmppc_start_thread and we need ensure that state is
2561 * visible to other CPUs only after we enter guest.
2562 */
2563 core = cpu >> threads_shift;
2564 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
3f7cd919 2565 return 0;
b8e6a87c
SW
2566}
2567
2568/*
2569 * Advertise this core as an active host core since we exited the guest
2570 * Only need to do this if it is the primary thread of the core that is
2571 * exiting.
2572 */
3f7cd919 2573static inline int kvmppc_set_host_core(unsigned int cpu)
b8e6a87c
SW
2574{
2575 int core;
2576
2577 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3f7cd919 2578 return 0;
b8e6a87c
SW
2579
2580 /*
2581 * Memory barrier can be omitted here because we do a spin_unlock
2582 * immediately after this which provides the memory barrier.
2583 */
2584 core = cpu >> threads_shift;
2585 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
3f7cd919 2586 return 0;
b8e6a87c
SW
2587}
2588
8b24e69f
PM
2589static void set_irq_happened(int trap)
2590{
2591 switch (trap) {
2592 case BOOK3S_INTERRUPT_EXTERNAL:
2593 local_paca->irq_happened |= PACA_IRQ_EE;
2594 break;
2595 case BOOK3S_INTERRUPT_H_DOORBELL:
2596 local_paca->irq_happened |= PACA_IRQ_DBELL;
2597 break;
2598 case BOOK3S_INTERRUPT_HMI:
2599 local_paca->irq_happened |= PACA_IRQ_HMI;
2600 break;
2601 }
2602}
2603
371fefd6
PM
2604/*
2605 * Run a set of guest threads on a physical core.
2606 * Called with vc->lock held.
2607 */
66feed61 2608static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
371fefd6 2609{
7b5f8272 2610 struct kvm_vcpu *vcpu;
d911f0be 2611 int i;
2c9097e4 2612 int srcu_idx;
ec257165 2613 struct core_info core_info;
898b25b2 2614 struct kvmppc_vcore *pvc;
b4deba5c
PM
2615 struct kvm_split_mode split_info, *sip;
2616 int split, subcore_size, active;
2617 int sub;
2618 bool thr0_done;
2619 unsigned long cmd_bit, stat_bit;
ec257165
PM
2620 int pcpu, thr;
2621 int target_threads;
45c940ba 2622 int controlled_threads;
8b24e69f 2623 int trap;
371fefd6 2624
d911f0be
PM
2625 /*
2626 * Remove from the list any threads that have a signal pending
2627 * or need a VPA update done
2628 */
2629 prepare_threads(vc);
2630
2631 /* if the runner is no longer runnable, let the caller pick a new one */
2632 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
2633 return;
081f323b
PM
2634
2635 /*
d911f0be 2636 * Initialize *vc.
081f323b 2637 */
898b25b2 2638 init_vcore_to_run(vc);
2711e248 2639 vc->preempt_tb = TB_NIL;
081f323b 2640
45c940ba
PM
2641 /*
2642 * Number of threads that we will be controlling: the same as
2643 * the number of threads per subcore, except on POWER9,
2644 * where it's 1 because the threads are (mostly) independent.
2645 */
2646 controlled_threads = threads_per_vcore();
2647
7b444c67 2648 /*
3102f784
ME
2649 * Make sure we are running on primary threads, and that secondary
2650 * threads are offline. Also check if the number of threads in this
2651 * guest are greater than the current system threads per guest.
7b444c67 2652 */
45c940ba 2653 if ((controlled_threads > 1) &&
3102f784 2654 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
7b5f8272 2655 for_each_runnable_thread(i, vcpu, vc) {
7b444c67 2656 vcpu->arch.ret = -EBUSY;
25fedfca
PM
2657 kvmppc_remove_runnable(vc, vcpu);
2658 wake_up(&vcpu->arch.cpu_run);
2659 }
7b444c67
PM
2660 goto out;
2661 }
2662
ec257165
PM
2663 /*
2664 * See if we could run any other vcores on the physical core
2665 * along with this one.
2666 */
2667 init_core_info(&core_info, vc);
2668 pcpu = smp_processor_id();
45c940ba 2669 target_threads = controlled_threads;
ec257165
PM
2670 if (target_smt_mode && target_smt_mode < target_threads)
2671 target_threads = target_smt_mode;
2672 if (vc->num_threads < target_threads)
2673 collect_piggybacks(&core_info, target_threads);
3102f784 2674
8b24e69f
PM
2675 /*
2676 * On radix, arrange for TLB flushing if necessary.
2677 * This has to be done before disabling interrupts since
2678 * it uses smp_call_function().
2679 */
2680 pcpu = smp_processor_id();
2681 if (kvm_is_radix(vc->kvm)) {
2682 for (sub = 0; sub < core_info.n_subcores; ++sub)
2683 for_each_runnable_thread(i, vcpu, core_info.vc[sub])
2684 kvmppc_prepare_radix_vcpu(vcpu, pcpu);
2685 }
2686
2687 /*
2688 * Hard-disable interrupts, and check resched flag and signals.
2689 * If we need to reschedule or deliver a signal, clean up
2690 * and return without going into the guest(s).
2691 */
2692 local_irq_disable();
2693 hard_irq_disable();
2694 if (lazy_irq_pending() || need_resched() ||
2695 recheck_signals(&core_info)) {
2696 local_irq_enable();
2697 vc->vcore_state = VCORE_INACTIVE;
2698 /* Unlock all except the primary vcore */
2699 for (sub = 1; sub < core_info.n_subcores; ++sub) {
2700 pvc = core_info.vc[sub];
2701 /* Put back on to the preempted vcores list */
2702 kvmppc_vcore_preempt(pvc);
2703 spin_unlock(&pvc->lock);
2704 }
2705 for (i = 0; i < controlled_threads; ++i)
2706 kvmppc_release_hwthread(pcpu + i);
2707 return;
2708 }
2709
2710 kvmppc_clear_host_core(pcpu);
2711
b4deba5c
PM
2712 /* Decide on micro-threading (split-core) mode */
2713 subcore_size = threads_per_subcore;
2714 cmd_bit = stat_bit = 0;
2715 split = core_info.n_subcores;
2716 sip = NULL;
2717 if (split > 1) {
2718 /* threads_per_subcore must be MAX_SMT_THREADS (8) here */
2719 if (split == 2 && (dynamic_mt_modes & 2)) {
2720 cmd_bit = HID0_POWER8_1TO2LPAR;
2721 stat_bit = HID0_POWER8_2LPARMODE;
2722 } else {
2723 split = 4;
2724 cmd_bit = HID0_POWER8_1TO4LPAR;
2725 stat_bit = HID0_POWER8_4LPARMODE;
2726 }
2727 subcore_size = MAX_SMT_THREADS / split;
2728 sip = &split_info;
2729 memset(&split_info, 0, sizeof(split_info));
2730 split_info.rpr = mfspr(SPRN_RPR);
2731 split_info.pmmar = mfspr(SPRN_PMMAR);
2732 split_info.ldbar = mfspr(SPRN_LDBAR);
2733 split_info.subcore_size = subcore_size;
2734 for (sub = 0; sub < core_info.n_subcores; ++sub)
898b25b2 2735 split_info.vc[sub] = core_info.vc[sub];
b4deba5c
PM
2736 /* order writes to split_info before kvm_split_mode pointer */
2737 smp_wmb();
2738 }
45c940ba 2739 for (thr = 0; thr < controlled_threads; ++thr)
b4deba5c
PM
2740 paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
2741
2742 /* Initiate micro-threading (split-core) if required */
2743 if (cmd_bit) {
2744 unsigned long hid0 = mfspr(SPRN_HID0);
2745
2746 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
2747 mb();
2748 mtspr(SPRN_HID0, hid0);
2749 isync();
2750 for (;;) {
2751 hid0 = mfspr(SPRN_HID0);
2752 if (hid0 & stat_bit)
2753 break;
2754 cpu_relax();
ec257165 2755 }
2e25aa5f 2756 }
3102f784 2757
b4deba5c
PM
2758 /* Start all the threads */
2759 active = 0;
2760 for (sub = 0; sub < core_info.n_subcores; ++sub) {
2761 thr = subcore_thread_map[sub];
2762 thr0_done = false;
2763 active |= 1 << thr;
898b25b2
PM
2764 pvc = core_info.vc[sub];
2765 pvc->pcpu = pcpu + thr;
2766 for_each_runnable_thread(i, vcpu, pvc) {
2767 kvmppc_start_thread(vcpu, pvc);
2768 kvmppc_create_dtl_entry(vcpu, pvc);
2769 trace_kvm_guest_enter(vcpu);
2770 if (!vcpu->arch.ptid)
2771 thr0_done = true;
2772 active |= 1 << (thr + vcpu->arch.ptid);
b4deba5c 2773 }
898b25b2
PM
2774 /*
2775 * We need to start the first thread of each subcore
2776 * even if it doesn't have a vcpu.
2777 */
2778 if (!thr0_done)
2779 kvmppc_start_thread(NULL, pvc);
2780 thr += pvc->num_threads;
2e25aa5f 2781 }
371fefd6 2782
7f235328
GS
2783 /*
2784 * Ensure that split_info.do_nap is set after setting
2785 * the vcore pointer in the PACA of the secondaries.
2786 */
2787 smp_mb();
2788 if (cmd_bit)
2789 split_info.do_nap = 1; /* ask secondaries to nap when done */
2790
b4deba5c
PM
2791 /*
2792 * When doing micro-threading, poke the inactive threads as well.
2793 * This gets them to the nap instruction after kvm_do_nap,
2794 * which reduces the time taken to unsplit later.
2795 */
2796 if (split > 1)
2797 for (thr = 1; thr < threads_per_subcore; ++thr)
2798 if (!(active & (1 << thr)))
2799 kvmppc_ipi_thread(pcpu + thr);
e0b7ec05 2800
2f12f034 2801 vc->vcore_state = VCORE_RUNNING;
19ccb76a 2802 preempt_disable();
3c78f78a
SW
2803
2804 trace_kvmppc_run_core(vc, 0);
2805
b4deba5c 2806 for (sub = 0; sub < core_info.n_subcores; ++sub)
898b25b2 2807 spin_unlock(&core_info.vc[sub]->lock);
de56a948 2808
8b24e69f
PM
2809 /*
2810 * Interrupts will be enabled once we get into the guest,
2811 * so tell lockdep that we're about to enable interrupts.
2812 */
2813 trace_hardirqs_on();
de56a948 2814
6edaa530 2815 guest_enter();
2c9097e4 2816
e0b7ec05 2817 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2c9097e4 2818
8b24e69f 2819 trap = __kvmppc_vcore_entry();
de56a948 2820
ec257165
PM
2821 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2822
8b24e69f
PM
2823 guest_exit();
2824
2825 trace_hardirqs_off();
2826 set_irq_happened(trap);
2827
ec257165 2828 spin_lock(&vc->lock);
371fefd6 2829 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
19ccb76a 2830 vc->vcore_state = VCORE_EXITING;
371fefd6 2831
19ccb76a 2832 /* wait for secondary threads to finish writing their state to memory */
5d5b99cd 2833 kvmppc_wait_for_nap();
b4deba5c
PM
2834
2835 /* Return to whole-core mode if we split the core earlier */
2836 if (split > 1) {
2837 unsigned long hid0 = mfspr(SPRN_HID0);
2838 unsigned long loops = 0;
2839
2840 hid0 &= ~HID0_POWER8_DYNLPARDIS;
2841 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
2842 mb();
2843 mtspr(SPRN_HID0, hid0);
2844 isync();
2845 for (;;) {
2846 hid0 = mfspr(SPRN_HID0);
2847 if (!(hid0 & stat_bit))
2848 break;
2849 cpu_relax();
2850 ++loops;
2851 }
2852 split_info.do_nap = 0;
2853 }
2854
8b24e69f
PM
2855 kvmppc_set_host_core(pcpu);
2856
2857 local_irq_enable();
2858
b4deba5c 2859 /* Let secondaries go back to the offline loop */
45c940ba 2860 for (i = 0; i < controlled_threads; ++i) {
b4deba5c
PM
2861 kvmppc_release_hwthread(pcpu + i);
2862 if (sip && sip->napped[i])
2863 kvmppc_ipi_thread(pcpu + i);
a29ebeaf 2864 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
b4deba5c
PM
2865 }
2866
371fefd6 2867 spin_unlock(&vc->lock);
2c9097e4 2868
371fefd6
PM
2869 /* make sure updates to secondary vcpu structs are visible now */
2870 smp_mb();
de56a948 2871
898b25b2
PM
2872 for (sub = 0; sub < core_info.n_subcores; ++sub) {
2873 pvc = core_info.vc[sub];
2874 post_guest_process(pvc, pvc == vc);
2875 }
de56a948 2876
913d3ff9 2877 spin_lock(&vc->lock);
ec257165 2878 preempt_enable();
de56a948
PM
2879
2880 out:
19ccb76a 2881 vc->vcore_state = VCORE_INACTIVE;
3c78f78a 2882 trace_kvmppc_run_core(vc, 1);
371fefd6
PM
2883}
2884
19ccb76a
PM
2885/*
2886 * Wait for some other vcpu thread to execute us, and
2887 * wake us up when we need to handle something in the host.
2888 */
ec257165
PM
2889static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
2890 struct kvm_vcpu *vcpu, int wait_state)
371fefd6 2891{
371fefd6
PM
2892 DEFINE_WAIT(wait);
2893
19ccb76a 2894 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
ec257165
PM
2895 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2896 spin_unlock(&vc->lock);
19ccb76a 2897 schedule();
ec257165
PM
2898 spin_lock(&vc->lock);
2899 }
19ccb76a
PM
2900 finish_wait(&vcpu->arch.cpu_run, &wait);
2901}
2902
0cda69dd
SJS
2903static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
2904{
2905 /* 10us base */
2906 if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
2907 vc->halt_poll_ns = 10000;
2908 else
2909 vc->halt_poll_ns *= halt_poll_ns_grow;
0cda69dd
SJS
2910}
2911
2912static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
2913{
2914 if (halt_poll_ns_shrink == 0)
2915 vc->halt_poll_ns = 0;
2916 else
2917 vc->halt_poll_ns /= halt_poll_ns_shrink;
2918}
2919
ee3308a2
PM
2920#ifdef CONFIG_KVM_XICS
2921static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
2922{
2923 if (!xive_enabled())
2924 return false;
2925 return vcpu->arch.xive_saved_state.pipr <
2926 vcpu->arch.xive_saved_state.cppr;
2927}
2928#else
2929static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu)
2930{
2931 return false;
2932}
2933#endif /* CONFIG_KVM_XICS */
2934
1da4e2f4
PM
2935static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu)
2936{
2937 if (vcpu->arch.pending_exceptions || vcpu->arch.prodded ||
ee3308a2 2938 kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu))
1da4e2f4
PM
2939 return true;
2940
2941 return false;
2942}
2943
908a0935
SJS
2944/*
2945 * Check to see if any of the runnable vcpus on the vcore have pending
0cda69dd
SJS
2946 * exceptions or are no longer ceded
2947 */
2948static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
2949{
2950 struct kvm_vcpu *vcpu;
2951 int i;
2952
2953 for_each_runnable_thread(i, vcpu, vc) {
1da4e2f4 2954 if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu))
0cda69dd
SJS
2955 return 1;
2956 }
2957
2958 return 0;
2959}
2960
19ccb76a
PM
2961/*
2962 * All the vcpus in this vcore are idle, so wait for a decrementer
2963 * or external interrupt to one of the vcpus. vc->lock is held.
2964 */
2965static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
2966{
2a27f514 2967 ktime_t cur, start_poll, start_wait;
0cda69dd 2968 int do_sleep = 1;
0cda69dd 2969 u64 block_ns;
8577370f 2970 DECLARE_SWAITQUEUE(wait);
1bc5d59c 2971
0cda69dd 2972 /* Poll for pending exceptions and ceded state */
2a27f514 2973 cur = start_poll = ktime_get();
0cda69dd 2974 if (vc->halt_poll_ns) {
2a27f514
SJS
2975 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
2976 ++vc->runner->stat.halt_attempted_poll;
1bc5d59c 2977
0cda69dd
SJS
2978 vc->vcore_state = VCORE_POLLING;
2979 spin_unlock(&vc->lock);
2980
2981 do {
2982 if (kvmppc_vcore_check_block(vc)) {
2983 do_sleep = 0;
2984 break;
2985 }
2986 cur = ktime_get();
2987 } while (single_task_running() && ktime_before(cur, stop));
2988
2989 spin_lock(&vc->lock);
2990 vc->vcore_state = VCORE_INACTIVE;
2991
2a27f514
SJS
2992 if (!do_sleep) {
2993 ++vc->runner->stat.halt_successful_poll;
0cda69dd 2994 goto out;
2a27f514 2995 }
1bc5d59c
SW
2996 }
2997
0cda69dd
SJS
2998 prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
2999
3000 if (kvmppc_vcore_check_block(vc)) {
8577370f 3001 finish_swait(&vc->wq, &wait);
0cda69dd 3002 do_sleep = 0;
2a27f514
SJS
3003 /* If we polled, count this as a successful poll */
3004 if (vc->halt_poll_ns)
3005 ++vc->runner->stat.halt_successful_poll;
0cda69dd 3006 goto out;
1bc5d59c
SW
3007 }
3008
2a27f514
SJS
3009 start_wait = ktime_get();
3010
19ccb76a 3011 vc->vcore_state = VCORE_SLEEPING;
3c78f78a 3012 trace_kvmppc_vcore_blocked(vc, 0);
19ccb76a 3013 spin_unlock(&vc->lock);
913d3ff9 3014 schedule();
8577370f 3015 finish_swait(&vc->wq, &wait);
19ccb76a
PM
3016 spin_lock(&vc->lock);
3017 vc->vcore_state = VCORE_INACTIVE;
3c78f78a 3018 trace_kvmppc_vcore_blocked(vc, 1);
2a27f514 3019 ++vc->runner->stat.halt_successful_wait;
0cda69dd
SJS
3020
3021 cur = ktime_get();
3022
3023out:
2a27f514
SJS
3024 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
3025
3026 /* Attribute wait time */
3027 if (do_sleep) {
3028 vc->runner->stat.halt_wait_ns +=
3029 ktime_to_ns(cur) - ktime_to_ns(start_wait);
3030 /* Attribute failed poll time */
3031 if (vc->halt_poll_ns)
3032 vc->runner->stat.halt_poll_fail_ns +=
3033 ktime_to_ns(start_wait) -
3034 ktime_to_ns(start_poll);
3035 } else {
3036 /* Attribute successful poll time */
3037 if (vc->halt_poll_ns)
3038 vc->runner->stat.halt_poll_success_ns +=
3039 ktime_to_ns(cur) -
3040 ktime_to_ns(start_poll);
3041 }
0cda69dd
SJS
3042
3043 /* Adjust poll time */
307d93e4 3044 if (halt_poll_ns) {
0cda69dd
SJS
3045 if (block_ns <= vc->halt_poll_ns)
3046 ;
3047 /* We slept and blocked for longer than the max halt time */
307d93e4 3048 else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
0cda69dd
SJS
3049 shrink_halt_poll_ns(vc);
3050 /* We slept and our poll time is too small */
307d93e4
SJS
3051 else if (vc->halt_poll_ns < halt_poll_ns &&
3052 block_ns < halt_poll_ns)
0cda69dd 3053 grow_halt_poll_ns(vc);
e03f3921
SJS
3054 if (vc->halt_poll_ns > halt_poll_ns)
3055 vc->halt_poll_ns = halt_poll_ns;
0cda69dd
SJS
3056 } else
3057 vc->halt_poll_ns = 0;
3058
3059 trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
19ccb76a 3060}
371fefd6 3061
19ccb76a
PM
3062static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
3063{
7b5f8272 3064 int n_ceded, i;
19ccb76a 3065 struct kvmppc_vcore *vc;
7b5f8272 3066 struct kvm_vcpu *v;
9e368f29 3067
3c78f78a
SW
3068 trace_kvmppc_run_vcpu_enter(vcpu);
3069
371fefd6
PM
3070 kvm_run->exit_reason = 0;
3071 vcpu->arch.ret = RESUME_GUEST;
3072 vcpu->arch.trap = 0;
2f12f034 3073 kvmppc_update_vpas(vcpu);
371fefd6 3074
371fefd6
PM
3075 /*
3076 * Synchronize with other threads in this virtual core
3077 */
3078 vc = vcpu->arch.vcore;
3079 spin_lock(&vc->lock);
19ccb76a 3080 vcpu->arch.ceded = 0;
371fefd6
PM
3081 vcpu->arch.run_task = current;
3082 vcpu->arch.kvm_run = kvm_run;
c7b67670 3083 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
19ccb76a 3084 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
c7b67670 3085 vcpu->arch.busy_preempt = TB_NIL;
7b5f8272 3086 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
371fefd6
PM
3087 ++vc->n_runnable;
3088
19ccb76a
PM
3089 /*
3090 * This happens the first time this is called for a vcpu.
3091 * If the vcore is already running, we may be able to start
3092 * this thread straight away and have it join in.
3093 */
8455d79e 3094 if (!signal_pending(current)) {
ec257165 3095 if (vc->vcore_state == VCORE_PIGGYBACK) {
898b25b2
PM
3096 if (spin_trylock(&vc->lock)) {
3097 if (vc->vcore_state == VCORE_RUNNING &&
3098 !VCORE_IS_EXITING(vc)) {
ec257165 3099 kvmppc_create_dtl_entry(vcpu, vc);
b4deba5c 3100 kvmppc_start_thread(vcpu, vc);
ec257165
PM
3101 trace_kvm_guest_enter(vcpu);
3102 }
898b25b2 3103 spin_unlock(&vc->lock);
ec257165
PM
3104 }
3105 } else if (vc->vcore_state == VCORE_RUNNING &&
3106 !VCORE_IS_EXITING(vc)) {
2f12f034 3107 kvmppc_create_dtl_entry(vcpu, vc);
b4deba5c 3108 kvmppc_start_thread(vcpu, vc);
3c78f78a 3109 trace_kvm_guest_enter(vcpu);
8455d79e 3110 } else if (vc->vcore_state == VCORE_SLEEPING) {
8577370f 3111 swake_up(&vc->wq);
371fefd6
PM
3112 }
3113
8455d79e 3114 }
371fefd6 3115
19ccb76a
PM
3116 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
3117 !signal_pending(current)) {
ec257165
PM
3118 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
3119 kvmppc_vcore_end_preempt(vc);
3120
8455d79e 3121 if (vc->vcore_state != VCORE_INACTIVE) {
ec257165 3122 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
19ccb76a
PM
3123 continue;
3124 }
7b5f8272 3125 for_each_runnable_thread(i, v, vc) {
7e28e60e 3126 kvmppc_core_prepare_to_enter(v);
19ccb76a
PM
3127 if (signal_pending(v->arch.run_task)) {
3128 kvmppc_remove_runnable(vc, v);
3129 v->stat.signal_exits++;
3130 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
3131 v->arch.ret = -EINTR;
3132 wake_up(&v->arch.cpu_run);
3133 }
3134 }
8455d79e
PM
3135 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
3136 break;
8455d79e 3137 n_ceded = 0;
7b5f8272 3138 for_each_runnable_thread(i, v, vc) {
1da4e2f4 3139 if (!kvmppc_vcpu_woken(v))
8455d79e 3140 n_ceded += v->arch.ceded;
4619ac88
PM
3141 else
3142 v->arch.ceded = 0;
3143 }
25fedfca
PM
3144 vc->runner = vcpu;
3145 if (n_ceded == vc->n_runnable) {
8455d79e 3146 kvmppc_vcore_blocked(vc);
c56dadf3 3147 } else if (need_resched()) {
ec257165 3148 kvmppc_vcore_preempt(vc);
25fedfca
PM
3149 /* Let something else run */
3150 cond_resched_lock(&vc->lock);
ec257165
PM
3151 if (vc->vcore_state == VCORE_PREEMPT)
3152 kvmppc_vcore_end_preempt(vc);
25fedfca 3153 } else {
8455d79e 3154 kvmppc_run_core(vc);
25fedfca 3155 }
0456ec4f 3156 vc->runner = NULL;
19ccb76a 3157 }
371fefd6 3158
8455d79e
PM
3159 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
3160 (vc->vcore_state == VCORE_RUNNING ||
5fc3e64f
PM
3161 vc->vcore_state == VCORE_EXITING ||
3162 vc->vcore_state == VCORE_PIGGYBACK))
ec257165 3163 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
8455d79e 3164
5fc3e64f
PM
3165 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
3166 kvmppc_vcore_end_preempt(vc);
3167
8455d79e
PM
3168 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
3169 kvmppc_remove_runnable(vc, vcpu);
3170 vcpu->stat.signal_exits++;
3171 kvm_run->exit_reason = KVM_EXIT_INTR;
3172 vcpu->arch.ret = -EINTR;
3173 }
3174
3175 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
3176 /* Wake up some vcpu to run the core */
7b5f8272
SJS
3177 i = -1;
3178 v = next_runnable_thread(vc, &i);
8455d79e 3179 wake_up(&v->arch.cpu_run);
371fefd6
PM
3180 }
3181
3c78f78a 3182 trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
371fefd6 3183 spin_unlock(&vc->lock);
371fefd6 3184 return vcpu->arch.ret;
de56a948
PM
3185}
3186
3a167bea 3187static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
a8606e20
PM
3188{
3189 int r;
913d3ff9 3190 int srcu_idx;
ca8efa1d 3191 unsigned long ebb_regs[3] = {}; /* shut up GCC */
4c3bb4cc
PM
3192 unsigned long user_tar = 0;
3193 unsigned int user_vrsave;
a8606e20 3194
af8f38b3
AG
3195 if (!vcpu->arch.sane) {
3196 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3197 return -EINVAL;
3198 }
3199
46a704f8
PM
3200 /*
3201 * Don't allow entry with a suspended transaction, because
3202 * the guest entry/exit code will lose it.
3203 * If the guest has TM enabled, save away their TM-related SPRs
3204 * (they will get restored by the TM unavailable interrupt).
3205 */
3206#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3207 if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs &&
3208 (current->thread.regs->msr & MSR_TM)) {
3209 if (MSR_TM_ACTIVE(current->thread.regs->msr)) {
3210 run->exit_reason = KVM_EXIT_FAIL_ENTRY;
3211 run->fail_entry.hardware_entry_failure_reason = 0;
3212 return -EINVAL;
3213 }
e4705715
PM
3214 /* Enable TM so we can read the TM SPRs */
3215 mtmsr(mfmsr() | MSR_TM);
46a704f8
PM
3216 current->thread.tm_tfhar = mfspr(SPRN_TFHAR);
3217 current->thread.tm_tfiar = mfspr(SPRN_TFIAR);
3218 current->thread.tm_texasr = mfspr(SPRN_TEXASR);
3219 current->thread.regs->msr &= ~MSR_TM;
3220 }
3221#endif
3222
25051b5a
SW
3223 kvmppc_core_prepare_to_enter(vcpu);
3224
19ccb76a
PM
3225 /* No need to go into the guest when all we'll do is come back out */
3226 if (signal_pending(current)) {
3227 run->exit_reason = KVM_EXIT_INTR;
3228 return -EINTR;
3229 }
3230
32fad281 3231 atomic_inc(&vcpu->kvm->arch.vcpus_running);
31037eca 3232 /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
32fad281
PM
3233 smp_mb();
3234
c17b98cf 3235 /* On the first time here, set up HTAB and VRMA */
8cf4ecc0 3236 if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) {
32fad281 3237 r = kvmppc_hv_setup_htab_rma(vcpu);
c77162de 3238 if (r)
32fad281 3239 goto out;
c77162de 3240 }
19ccb76a 3241
579e633e
AB
3242 flush_all_to_thread(current);
3243
4c3bb4cc 3244 /* Save userspace EBB and other register values */
ca8efa1d
PM
3245 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3246 ebb_regs[0] = mfspr(SPRN_EBBHR);
3247 ebb_regs[1] = mfspr(SPRN_EBBRR);
3248 ebb_regs[2] = mfspr(SPRN_BESCR);
4c3bb4cc 3249 user_tar = mfspr(SPRN_TAR);
ca8efa1d 3250 }
4c3bb4cc 3251 user_vrsave = mfspr(SPRN_VRSAVE);
ca8efa1d 3252
19ccb76a 3253 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
342d3db7 3254 vcpu->arch.pgdir = current->mm->pgd;
c7b67670 3255 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
19ccb76a 3256
a8606e20
PM
3257 do {
3258 r = kvmppc_run_vcpu(run, vcpu);
3259
3260 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
3261 !(vcpu->arch.shregs.msr & MSR_PR)) {
3c78f78a 3262 trace_kvm_hcall_enter(vcpu);
a8606e20 3263 r = kvmppc_pseries_do_hcall(vcpu);
3c78f78a 3264 trace_kvm_hcall_exit(vcpu, r);
7e28e60e 3265 kvmppc_core_prepare_to_enter(vcpu);
913d3ff9
PM
3266 } else if (r == RESUME_PAGE_FAULT) {
3267 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
3268 r = kvmppc_book3s_hv_page_fault(run, vcpu,
3269 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
3270 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
5af50993
BH
3271 } else if (r == RESUME_PASSTHROUGH) {
3272 if (WARN_ON(xive_enabled()))
3273 r = H_SUCCESS;
3274 else
3275 r = kvmppc_xics_rm_complete(vcpu, 0);
3276 }
e59d24e6 3277 } while (is_kvmppc_resume_guest(r));
32fad281 3278
4c3bb4cc 3279 /* Restore userspace EBB and other register values */
ca8efa1d
PM
3280 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
3281 mtspr(SPRN_EBBHR, ebb_regs[0]);
3282 mtspr(SPRN_EBBRR, ebb_regs[1]);
3283 mtspr(SPRN_BESCR, ebb_regs[2]);
4c3bb4cc
PM
3284 mtspr(SPRN_TAR, user_tar);
3285 mtspr(SPRN_FSCR, current->thread.fscr);
ca8efa1d 3286 }
4c3bb4cc 3287 mtspr(SPRN_VRSAVE, user_vrsave);
ca8efa1d 3288
32fad281 3289 out:
c7b67670 3290 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
32fad281 3291 atomic_dec(&vcpu->kvm->arch.vcpus_running);
a8606e20
PM
3292 return r;
3293}
3294
5b74716e
BH
3295static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
3296 int linux_psize)
3297{
3298 struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
3299
3300 if (!def->shift)
3301 return;
3302 (*sps)->page_shift = def->shift;
3303 (*sps)->slb_enc = def->sllp;
3304 (*sps)->enc[0].page_shift = def->shift;
b1022fbd 3305 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
1f365bb0
AK
3306 /*
3307 * Add 16MB MPSS support if host supports it
3308 */
3309 if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
3310 (*sps)->enc[1].page_shift = 24;
3311 (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
3312 }
5b74716e
BH
3313 (*sps)++;
3314}
3315
3a167bea
AK
3316static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
3317 struct kvm_ppc_smmu_info *info)
5b74716e
BH
3318{
3319 struct kvm_ppc_one_seg_page_size *sps;
3320
8cf4ecc0
PM
3321 /*
3322 * Since we don't yet support HPT guests on a radix host,
3323 * return an error if the host uses radix.
3324 */
3325 if (radix_enabled())
3326 return -EINVAL;
3327
5b74716e
BH
3328 info->flags = KVM_PPC_PAGE_SIZES_REAL;
3329 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
3330 info->flags |= KVM_PPC_1T_SEGMENTS;
3331 info->slb_size = mmu_slb_size;
3332
3333 /* We only support these sizes for now, and no muti-size segments */
3334 sps = &info->sps[0];
3335 kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
3336 kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
3337 kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
3338
3339 return 0;
3340}
3341
82ed3616
PM
3342/*
3343 * Get (and clear) the dirty memory log for a memory slot.
3344 */
3a167bea
AK
3345static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
3346 struct kvm_dirty_log *log)
82ed3616 3347{
9f6b8029 3348 struct kvm_memslots *slots;
82ed3616 3349 struct kvm_memory_slot *memslot;
8f7b79b8 3350 int i, r;
82ed3616 3351 unsigned long n;
8f7b79b8
PM
3352 unsigned long *buf;
3353 struct kvm_vcpu *vcpu;
82ed3616
PM
3354
3355 mutex_lock(&kvm->slots_lock);
3356
3357 r = -EINVAL;
bbacc0c1 3358 if (log->slot >= KVM_USER_MEM_SLOTS)
82ed3616
PM
3359 goto out;
3360
9f6b8029
PB
3361 slots = kvm_memslots(kvm);
3362 memslot = id_to_memslot(slots, log->slot);
82ed3616
PM
3363 r = -ENOENT;
3364 if (!memslot->dirty_bitmap)
3365 goto out;
3366
8f7b79b8
PM
3367 /*
3368 * Use second half of bitmap area because radix accumulates
3369 * bits in the first half.
3370 */
82ed3616 3371 n = kvm_dirty_bitmap_bytes(memslot);
8f7b79b8
PM
3372 buf = memslot->dirty_bitmap + n / sizeof(long);
3373 memset(buf, 0, n);
82ed3616 3374
8f7b79b8
PM
3375 if (kvm_is_radix(kvm))
3376 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
3377 else
3378 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
82ed3616
PM
3379 if (r)
3380 goto out;
3381
8f7b79b8
PM
3382 /* Harvest dirty bits from VPA and DTL updates */
3383 /* Note: we never modify the SLB shadow buffer areas */
3384 kvm_for_each_vcpu(i, vcpu, kvm) {
3385 spin_lock(&vcpu->arch.vpa_update_lock);
3386 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
3387 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
3388 spin_unlock(&vcpu->arch.vpa_update_lock);
3389 }
3390
82ed3616 3391 r = -EFAULT;
8f7b79b8 3392 if (copy_to_user(log->dirty_bitmap, buf, n))
82ed3616
PM
3393 goto out;
3394
3395 r = 0;
3396out:
3397 mutex_unlock(&kvm->slots_lock);
3398 return r;
3399}
3400
3a167bea
AK
3401static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
3402 struct kvm_memory_slot *dont)
a66b48c3
PM
3403{
3404 if (!dont || free->arch.rmap != dont->arch.rmap) {
3405 vfree(free->arch.rmap);
3406 free->arch.rmap = NULL;
b2b2f165 3407 }
a66b48c3
PM
3408}
3409
3a167bea
AK
3410static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
3411 unsigned long npages)
a66b48c3 3412{
8cf4ecc0
PM
3413 /*
3414 * For now, if radix_enabled() then we only support radix guests,
3415 * and in that case we don't need the rmap array.
3416 */
3417 if (radix_enabled()) {
3418 slot->arch.rmap = NULL;
3419 return 0;
3420 }
3421
a66b48c3
PM
3422 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
3423 if (!slot->arch.rmap)
3424 return -ENOMEM;
aa04b4cc 3425
c77162de
PM
3426 return 0;
3427}
aa04b4cc 3428
3a167bea
AK
3429static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
3430 struct kvm_memory_slot *memslot,
09170a49 3431 const struct kvm_userspace_memory_region *mem)
c77162de 3432{
a66b48c3 3433 return 0;
c77162de
PM
3434}
3435
3a167bea 3436static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
09170a49 3437 const struct kvm_userspace_memory_region *mem,
f36f3f28
PB
3438 const struct kvm_memory_slot *old,
3439 const struct kvm_memory_slot *new)
c77162de 3440{
dfe49dbd 3441 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
9f6b8029 3442 struct kvm_memslots *slots;
dfe49dbd
PM
3443 struct kvm_memory_slot *memslot;
3444
a56ee9f8
YX
3445 /*
3446 * If we are making a new memslot, it might make
3447 * some address that was previously cached as emulated
3448 * MMIO be no longer emulated MMIO, so invalidate
3449 * all the caches of emulated MMIO translations.
3450 */
3451 if (npages)
3452 atomic64_inc(&kvm->arch.mmio_update);
3453
8f7b79b8 3454 if (npages && old->npages && !kvm_is_radix(kvm)) {
dfe49dbd
PM
3455 /*
3456 * If modifying a memslot, reset all the rmap dirty bits.
3457 * If this is a new memslot, we don't need to do anything
3458 * since the rmap array starts out as all zeroes,
3459 * i.e. no pages are dirty.
3460 */
9f6b8029
PB
3461 slots = kvm_memslots(kvm);
3462 memslot = id_to_memslot(slots, mem->slot);
8f7b79b8 3463 kvmppc_hv_get_dirty_log_hpt(kvm, memslot, NULL);
dfe49dbd 3464 }
c77162de
PM
3465}
3466
a0144e2a
PM
3467/*
3468 * Update LPCR values in kvm->arch and in vcores.
3469 * Caller must hold kvm->lock.
3470 */
3471void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
3472{
3473 long int i;
3474 u32 cores_done = 0;
3475
3476 if ((kvm->arch.lpcr & mask) == lpcr)
3477 return;
3478
3479 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
3480
3481 for (i = 0; i < KVM_MAX_VCORES; ++i) {
3482 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
3483 if (!vc)
3484 continue;
3485 spin_lock(&vc->lock);
3486 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
3487 spin_unlock(&vc->lock);
3488 if (++cores_done >= kvm->arch.online_vcores)
3489 break;
3490 }
3491}
3492
3a167bea
AK
3493static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
3494{
3495 return;
3496}
3497
7a84084c
PM
3498static void kvmppc_setup_partition_table(struct kvm *kvm)
3499{
3500 unsigned long dw0, dw1;
3501
8cf4ecc0
PM
3502 if (!kvm_is_radix(kvm)) {
3503 /* PS field - page size for VRMA */
3504 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
3505 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
3506 /* HTABSIZE and HTABORG fields */
3507 dw0 |= kvm->arch.sdr1;
7a84084c 3508
8cf4ecc0
PM
3509 /* Second dword as set by userspace */
3510 dw1 = kvm->arch.process_table;
3511 } else {
3512 dw0 = PATB_HR | radix__get_tree_size() |
3513 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
3514 dw1 = PATB_GR | kvm->arch.process_table;
3515 }
7a84084c
PM
3516
3517 mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
3518}
3519
32fad281 3520static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
c77162de
PM
3521{
3522 int err = 0;
3523 struct kvm *kvm = vcpu->kvm;
c77162de
PM
3524 unsigned long hva;
3525 struct kvm_memory_slot *memslot;
3526 struct vm_area_struct *vma;
a0144e2a 3527 unsigned long lpcr = 0, senc;
c77162de 3528 unsigned long psize, porder;
2c9097e4 3529 int srcu_idx;
c77162de
PM
3530
3531 mutex_lock(&kvm->lock);
31037eca 3532 if (kvm->arch.hpte_setup_done)
c77162de 3533 goto out; /* another vcpu beat us to it */
aa04b4cc 3534
32fad281 3535 /* Allocate hashed page table (if not done already) and reset it */
3f9d4f5a 3536 if (!kvm->arch.hpt.virt) {
aae0777f
DG
3537 int order = KVM_DEFAULT_HPT_ORDER;
3538 struct kvm_hpt_info info;
3539
3540 err = kvmppc_allocate_hpt(&info, order);
3541 /* If we get here, it means userspace didn't specify a
3542 * size explicitly. So, try successively smaller
3543 * sizes if the default failed. */
3544 while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER)
3545 err = kvmppc_allocate_hpt(&info, order);
3546
3547 if (err < 0) {
32fad281
PM
3548 pr_err("KVM: Couldn't alloc HPT\n");
3549 goto out;
3550 }
aae0777f
DG
3551
3552 kvmppc_set_hpt(kvm, &info);
32fad281
PM
3553 }
3554
c77162de 3555 /* Look up the memslot for guest physical address 0 */
2c9097e4 3556 srcu_idx = srcu_read_lock(&kvm->srcu);
c77162de 3557 memslot = gfn_to_memslot(kvm, 0);
aa04b4cc 3558
c77162de
PM
3559 /* We must have some memory at 0 by now */
3560 err = -EINVAL;
3561 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
2c9097e4 3562 goto out_srcu;
c77162de
PM
3563
3564 /* Look up the VMA for the start of this memory slot */
3565 hva = memslot->userspace_addr;
3566 down_read(&current->mm->mmap_sem);
3567 vma = find_vma(current->mm, hva);
3568 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
3569 goto up_out;
3570
3571 psize = vma_kernel_pagesize(vma);
da9d1d7f 3572 porder = __ilog2(psize);
c77162de 3573
c77162de
PM
3574 up_read(&current->mm->mmap_sem);
3575
c17b98cf
PM
3576 /* We can handle 4k, 64k or 16M pages in the VRMA */
3577 err = -EINVAL;
3578 if (!(psize == 0x1000 || psize == 0x10000 ||
3579 psize == 0x1000000))
3580 goto out_srcu;
c77162de 3581
c17b98cf
PM
3582 senc = slb_pgsize_encoding(psize);
3583 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
3584 (VRMA_VSID << SLB_VSID_SHIFT_1T);
c17b98cf
PM
3585 /* Create HPTEs in the hash page table for the VRMA */
3586 kvmppc_map_vrma(vcpu, memslot, porder);
aa04b4cc 3587
7a84084c
PM
3588 /* Update VRMASD field in the LPCR */
3589 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
3590 /* the -4 is to account for senc values starting at 0x10 */
3591 lpcr = senc << (LPCR_VRMASD_SH - 4);
3592 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
3593 } else {
3594 kvmppc_setup_partition_table(kvm);
3595 }
a0144e2a 3596
31037eca 3597 /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
c77162de 3598 smp_wmb();
31037eca 3599 kvm->arch.hpte_setup_done = 1;
c77162de 3600 err = 0;
2c9097e4
PM
3601 out_srcu:
3602 srcu_read_unlock(&kvm->srcu, srcu_idx);
c77162de
PM
3603 out:
3604 mutex_unlock(&kvm->lock);
3605 return err;
b2b2f165 3606
c77162de
PM
3607 up_out:
3608 up_read(&current->mm->mmap_sem);
505d6421 3609 goto out_srcu;
de56a948
PM
3610}
3611
79b6c247
SW
3612#ifdef CONFIG_KVM_XICS
3613/*
3614 * Allocate a per-core structure for managing state about which cores are
3615 * running in the host versus the guest and for exchanging data between
3616 * real mode KVM and CPU running in the host.
3617 * This is only done for the first VM.
3618 * The allocated structure stays even if all VMs have stopped.
3619 * It is only freed when the kvm-hv module is unloaded.
3620 * It's OK for this routine to fail, we just don't support host
3621 * core operations like redirecting H_IPI wakeups.
3622 */
3623void kvmppc_alloc_host_rm_ops(void)
3624{
3625 struct kvmppc_host_rm_ops *ops;
3626 unsigned long l_ops;
3627 int cpu, core;
3628 int size;
3629
3630 /* Not the first time here ? */
3631 if (kvmppc_host_rm_ops_hv != NULL)
3632 return;
3633
3634 ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
3635 if (!ops)
3636 return;
3637
3638 size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
3639 ops->rm_core = kzalloc(size, GFP_KERNEL);
3640
3641 if (!ops->rm_core) {
3642 kfree(ops);
3643 return;
3644 }
3645
419af25f 3646 cpus_read_lock();
6f3bb809 3647
79b6c247
SW
3648 for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
3649 if (!cpu_online(cpu))
3650 continue;
3651
3652 core = cpu >> threads_shift;
3653 ops->rm_core[core].rm_state.in_host = 1;
3654 }
3655
0c2a6606
SW
3656 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
3657
79b6c247
SW
3658 /*
3659 * Make the contents of the kvmppc_host_rm_ops structure visible
3660 * to other CPUs before we assign it to the global variable.
3661 * Do an atomic assignment (no locks used here), but if someone
3662 * beats us to it, just free our copy and return.
3663 */
3664 smp_wmb();
3665 l_ops = (unsigned long) ops;
3666
3667 if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
419af25f 3668 cpus_read_unlock();
79b6c247
SW
3669 kfree(ops->rm_core);
3670 kfree(ops);
6f3bb809 3671 return;
79b6c247 3672 }
6f3bb809 3673
419af25f
SAS
3674 cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE,
3675 "ppc/kvm_book3s:prepare",
3676 kvmppc_set_host_core,
3677 kvmppc_clear_host_core);
3678 cpus_read_unlock();
79b6c247
SW
3679}
3680
3681void kvmppc_free_host_rm_ops(void)
3682{
3683 if (kvmppc_host_rm_ops_hv) {
3f7cd919 3684 cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
79b6c247
SW
3685 kfree(kvmppc_host_rm_ops_hv->rm_core);
3686 kfree(kvmppc_host_rm_ops_hv);
3687 kvmppc_host_rm_ops_hv = NULL;
3688 }
3689}
3690#endif
3691
3a167bea 3692static int kvmppc_core_init_vm_hv(struct kvm *kvm)
de56a948 3693{
32fad281 3694 unsigned long lpcr, lpid;
e23a808b 3695 char buf[32];
8cf4ecc0 3696 int ret;
de56a948 3697
32fad281
PM
3698 /* Allocate the guest's logical partition ID */
3699
3700 lpid = kvmppc_alloc_lpid();
5d226ae5 3701 if ((long)lpid < 0)
32fad281
PM
3702 return -ENOMEM;
3703 kvm->arch.lpid = lpid;
de56a948 3704
79b6c247
SW
3705 kvmppc_alloc_host_rm_ops();
3706
1b400ba0
PM
3707 /*
3708 * Since we don't flush the TLB when tearing down a VM,
3709 * and this lpid might have previously been used,
3710 * make sure we flush on each core before running the new VM.
7c5b06ca
PM
3711 * On POWER9, the tlbie in mmu_partition_table_set_entry()
3712 * does this flush for us.
1b400ba0 3713 */
7c5b06ca
PM
3714 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3715 cpumask_setall(&kvm->arch.need_tlb_flush);
1b400ba0 3716
699a0ea0
PM
3717 /* Start out with the default set of hcalls enabled */
3718 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
3719 sizeof(kvm->arch.enabled_hcalls));
3720
7a84084c
PM
3721 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3722 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
aa04b4cc 3723
c17b98cf
PM
3724 /* Init LPCR for virtual RMA mode */
3725 kvm->arch.host_lpid = mfspr(SPRN_LPID);
3726 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
3727 lpcr &= LPCR_PECE | LPCR_LPES;
3728 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
3729 LPCR_VPM0 | LPCR_VPM1;
3730 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
3731 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3732 /* On POWER8 turn on online bit to enable PURR/SPURR */
3733 if (cpu_has_feature(CPU_FTR_ARCH_207S))
3734 lpcr |= LPCR_ONL;
84f7139c
PM
3735 /*
3736 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
3737 * Set HVICE bit to enable hypervisor virtualization interrupts.
5af50993
BH
3738 * Set HEIC to prevent OS interrupts to go to hypervisor (should
3739 * be unnecessary but better safe than sorry in case we re-enable
3740 * EE in HV mode with this LPCR still set)
84f7139c
PM
3741 */
3742 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
7a84084c 3743 lpcr &= ~LPCR_VPM0;
5af50993
BH
3744 lpcr |= LPCR_HVICE | LPCR_HEIC;
3745
3746 /*
3747 * If xive is enabled, we route 0x500 interrupts directly
3748 * to the guest.
3749 */
3750 if (xive_enabled())
3751 lpcr |= LPCR_LPES;
84f7139c
PM
3752 }
3753
8cf4ecc0
PM
3754 /*
3755 * For now, if the host uses radix, the guest must be radix.
3756 */
3757 if (radix_enabled()) {
3758 kvm->arch.radix = 1;
3759 lpcr &= ~LPCR_VPM1;
3760 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
3761 ret = kvmppc_init_vm_radix(kvm);
3762 if (ret) {
3763 kvmppc_free_lpid(kvm->arch.lpid);
3764 return ret;
3765 }
3766 kvmppc_setup_partition_table(kvm);
3767 }
3768
9e368f29 3769 kvm->arch.lpcr = lpcr;
aa04b4cc 3770
5e985969
DG
3771 /* Initialization for future HPT resizes */
3772 kvm->arch.resize_hpt = NULL;
3773
7c5b06ca
PM
3774 /*
3775 * Work out how many sets the TLB has, for the use of
3776 * the TLB invalidation loop in book3s_hv_rmhandlers.S.
3777 */
8cf4ecc0
PM
3778 if (kvm_is_radix(kvm))
3779 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */
3780 else if (cpu_has_feature(CPU_FTR_ARCH_300))
7c5b06ca
PM
3781 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
3782 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
3783 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
3784 else
3785 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
3786
512691d4 3787 /*
441c19c8
ME
3788 * Track that we now have a HV mode VM active. This blocks secondary
3789 * CPU threads from coming online.
8cf4ecc0
PM
3790 * On POWER9, we only need to do this for HPT guests on a radix
3791 * host, which is not yet supported.
512691d4 3792 */
8cf4ecc0
PM
3793 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3794 kvm_hv_vm_activated();
512691d4 3795
3c313524
PM
3796 /*
3797 * Initialize smt_mode depending on processor.
3798 * POWER8 and earlier have to use "strict" threading, where
3799 * all vCPUs in a vcore have to run on the same (sub)core,
3800 * whereas on POWER9 the threads can each run a different
3801 * guest.
3802 */
3803 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3804 kvm->arch.smt_mode = threads_per_subcore;
3805 else
3806 kvm->arch.smt_mode = 1;
57900694 3807 kvm->arch.emul_smt_mode = 1;
3c313524 3808
e23a808b
PM
3809 /*
3810 * Create a debugfs directory for the VM
3811 */
3812 snprintf(buf, sizeof(buf), "vm%d", current->pid);
3813 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
3814 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
3815 kvmppc_mmu_debugfs_init(kvm);
3816
54738c09 3817 return 0;
de56a948
PM
3818}
3819
f1378b1c
PM
3820static void kvmppc_free_vcores(struct kvm *kvm)
3821{
3822 long int i;
3823
23316316 3824 for (i = 0; i < KVM_MAX_VCORES; ++i)
f1378b1c
PM
3825 kfree(kvm->arch.vcores[i]);
3826 kvm->arch.online_vcores = 0;
3827}
3828
3a167bea 3829static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
de56a948 3830{
e23a808b
PM
3831 debugfs_remove_recursive(kvm->arch.debugfs_dir);
3832
8cf4ecc0
PM
3833 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3834 kvm_hv_vm_deactivated();
512691d4 3835
f1378b1c 3836 kvmppc_free_vcores(kvm);
aa04b4cc 3837
8cf4ecc0
PM
3838 kvmppc_free_lpid(kvm->arch.lpid);
3839
5a319350
PM
3840 if (kvm_is_radix(kvm))
3841 kvmppc_free_radix(kvm);
3842 else
aae0777f 3843 kvmppc_free_hpt(&kvm->arch.hpt);
c57875f5
SW
3844
3845 kvmppc_free_pimap(kvm);
de56a948
PM
3846}
3847
3a167bea
AK
3848/* We don't need to emulate any privileged instructions or dcbz */
3849static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3850 unsigned int inst, int *advance)
de56a948 3851{
3a167bea 3852 return EMULATE_FAIL;
de56a948
PM
3853}
3854
3a167bea
AK
3855static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
3856 ulong spr_val)
de56a948
PM
3857{
3858 return EMULATE_FAIL;
3859}
3860
3a167bea
AK
3861static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
3862 ulong *spr_val)
de56a948
PM
3863{
3864 return EMULATE_FAIL;
3865}
3866
3a167bea 3867static int kvmppc_core_check_processor_compat_hv(void)
de56a948 3868{
c17b98cf
PM
3869 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
3870 !cpu_has_feature(CPU_FTR_ARCH_206))
3a167bea 3871 return -EIO;
50de596d 3872
3a167bea 3873 return 0;
de56a948
PM
3874}
3875
8daaafc8
SW
3876#ifdef CONFIG_KVM_XICS
3877
3878void kvmppc_free_pimap(struct kvm *kvm)
3879{
3880 kfree(kvm->arch.pimap);
3881}
3882
c57875f5 3883static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void)
8daaafc8
SW
3884{
3885 return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL);
3886}
c57875f5
SW
3887
3888static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
3889{
3890 struct irq_desc *desc;
3891 struct kvmppc_irq_map *irq_map;
3892 struct kvmppc_passthru_irqmap *pimap;
3893 struct irq_chip *chip;
5af50993 3894 int i, rc = 0;
c57875f5 3895
644abbb2
SW
3896 if (!kvm_irq_bypass)
3897 return 1;
3898
c57875f5
SW
3899 desc = irq_to_desc(host_irq);
3900 if (!desc)
3901 return -EIO;
3902
3903 mutex_lock(&kvm->lock);
3904
3905 pimap = kvm->arch.pimap;
3906 if (pimap == NULL) {
3907 /* First call, allocate structure to hold IRQ map */
3908 pimap = kvmppc_alloc_pimap();
3909 if (pimap == NULL) {
3910 mutex_unlock(&kvm->lock);
3911 return -ENOMEM;
3912 }
3913 kvm->arch.pimap = pimap;
3914 }
3915
3916 /*
3917 * For now, we only support interrupts for which the EOI operation
3918 * is an OPAL call followed by a write to XIRR, since that's
5af50993 3919 * what our real-mode EOI code does, or a XIVE interrupt
c57875f5
SW
3920 */
3921 chip = irq_data_get_irq_chip(&desc->irq_data);
5af50993 3922 if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) {
c57875f5
SW
3923 pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
3924 host_irq, guest_gsi);
3925 mutex_unlock(&kvm->lock);
3926 return -ENOENT;
3927 }
3928
3929 /*
3930 * See if we already have an entry for this guest IRQ number.
3931 * If it's mapped to a hardware IRQ number, that's an error,
3932 * otherwise re-use this entry.
3933 */
3934 for (i = 0; i < pimap->n_mapped; i++) {
3935 if (guest_gsi == pimap->mapped[i].v_hwirq) {
3936 if (pimap->mapped[i].r_hwirq) {
3937 mutex_unlock(&kvm->lock);
3938 return -EINVAL;
3939 }
3940 break;
3941 }
3942 }
3943
3944 if (i == KVMPPC_PIRQ_MAPPED) {
3945 mutex_unlock(&kvm->lock);
3946 return -EAGAIN; /* table is full */
3947 }
3948
3949 irq_map = &pimap->mapped[i];
3950
3951 irq_map->v_hwirq = guest_gsi;
c57875f5
SW
3952 irq_map->desc = desc;
3953
e3c13e56
SW
3954 /*
3955 * Order the above two stores before the next to serialize with
3956 * the KVM real mode handler.
3957 */
3958 smp_wmb();
3959 irq_map->r_hwirq = desc->irq_data.hwirq;
3960
c57875f5
SW
3961 if (i == pimap->n_mapped)
3962 pimap->n_mapped++;
3963
5af50993
BH
3964 if (xive_enabled())
3965 rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc);
3966 else
3967 kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
3968 if (rc)
3969 irq_map->r_hwirq = 0;
5d375199 3970
c57875f5
SW
3971 mutex_unlock(&kvm->lock);
3972
3973 return 0;
3974}
3975
3976static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
3977{
3978 struct irq_desc *desc;
3979 struct kvmppc_passthru_irqmap *pimap;
5af50993 3980 int i, rc = 0;
c57875f5 3981
644abbb2
SW
3982 if (!kvm_irq_bypass)
3983 return 0;
3984
c57875f5
SW
3985 desc = irq_to_desc(host_irq);
3986 if (!desc)
3987 return -EIO;
3988
3989 mutex_lock(&kvm->lock);
a1c52e1c
ME
3990 if (!kvm->arch.pimap)
3991 goto unlock;
c57875f5 3992
c57875f5
SW
3993 pimap = kvm->arch.pimap;
3994
3995 for (i = 0; i < pimap->n_mapped; i++) {
3996 if (guest_gsi == pimap->mapped[i].v_hwirq)
3997 break;
3998 }
3999
4000 if (i == pimap->n_mapped) {
4001 mutex_unlock(&kvm->lock);
4002 return -ENODEV;
4003 }
4004
5af50993
BH
4005 if (xive_enabled())
4006 rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc);
4007 else
4008 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
5d375199 4009
5af50993 4010 /* invalidate the entry (what do do on error from the above ?) */
c57875f5
SW
4011 pimap->mapped[i].r_hwirq = 0;
4012
4013 /*
4014 * We don't free this structure even when the count goes to
4015 * zero. The structure is freed when we destroy the VM.
4016 */
a1c52e1c 4017 unlock:
c57875f5 4018 mutex_unlock(&kvm->lock);
5af50993 4019 return rc;
c57875f5
SW
4020}
4021
4022static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
4023 struct irq_bypass_producer *prod)
4024{
4025 int ret = 0;
4026 struct kvm_kernel_irqfd *irqfd =
4027 container_of(cons, struct kvm_kernel_irqfd, consumer);
4028
4029 irqfd->producer = prod;
4030
4031 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
4032 if (ret)
4033 pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n",
4034 prod->irq, irqfd->gsi, ret);
4035
4036 return ret;
4037}
4038
4039static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons,
4040 struct irq_bypass_producer *prod)
4041{
4042 int ret;
4043 struct kvm_kernel_irqfd *irqfd =
4044 container_of(cons, struct kvm_kernel_irqfd, consumer);
4045
4046 irqfd->producer = NULL;
4047
4048 /*
4049 * When producer of consumer is unregistered, we change back to
4050 * default external interrupt handling mode - KVM real mode
4051 * will switch back to host.
4052 */
4053 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
4054 if (ret)
4055 pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n",
4056 prod->irq, irqfd->gsi, ret);
4057}
8daaafc8
SW
4058#endif
4059
3a167bea
AK
4060static long kvm_arch_vm_ioctl_hv(struct file *filp,
4061 unsigned int ioctl, unsigned long arg)
4062{
4063 struct kvm *kvm __maybe_unused = filp->private_data;
4064 void __user *argp = (void __user *)arg;
4065 long r;
4066
4067 switch (ioctl) {
4068
3a167bea
AK
4069 case KVM_PPC_ALLOCATE_HTAB: {
4070 u32 htab_order;
4071
4072 r = -EFAULT;
4073 if (get_user(htab_order, (u32 __user *)argp))
4074 break;
f98a8bf9 4075 r = kvmppc_alloc_reset_hpt(kvm, htab_order);
3a167bea
AK
4076 if (r)
4077 break;
3a167bea
AK
4078 r = 0;
4079 break;
4080 }
4081
4082 case KVM_PPC_GET_HTAB_FD: {
4083 struct kvm_get_htab_fd ghf;
4084
4085 r = -EFAULT;
4086 if (copy_from_user(&ghf, argp, sizeof(ghf)))
4087 break;
4088 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
4089 break;
4090 }
4091
5e985969
DG
4092 case KVM_PPC_RESIZE_HPT_PREPARE: {
4093 struct kvm_ppc_resize_hpt rhpt;
4094
4095 r = -EFAULT;
4096 if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
4097 break;
4098
4099 r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt);
4100 break;
4101 }
4102
4103 case KVM_PPC_RESIZE_HPT_COMMIT: {
4104 struct kvm_ppc_resize_hpt rhpt;
4105
4106 r = -EFAULT;
4107 if (copy_from_user(&rhpt, argp, sizeof(rhpt)))
4108 break;
4109
4110 r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt);
4111 break;
4112 }
4113
3a167bea
AK
4114 default:
4115 r = -ENOTTY;
4116 }
4117
4118 return r;
4119}
4120
699a0ea0
PM
4121/*
4122 * List of hcall numbers to enable by default.
4123 * For compatibility with old userspace, we enable by default
4124 * all hcalls that were implemented before the hcall-enabling
4125 * facility was added. Note this list should not include H_RTAS.
4126 */
4127static unsigned int default_hcall_list[] = {
4128 H_REMOVE,
4129 H_ENTER,
4130 H_READ,
4131 H_PROTECT,
4132 H_BULK_REMOVE,
4133 H_GET_TCE,
4134 H_PUT_TCE,
4135 H_SET_DABR,
4136 H_SET_XDABR,
4137 H_CEDE,
4138 H_PROD,
4139 H_CONFER,
4140 H_REGISTER_VPA,
4141#ifdef CONFIG_KVM_XICS
4142 H_EOI,
4143 H_CPPR,
4144 H_IPI,
4145 H_IPOLL,
4146 H_XIRR,
4147 H_XIRR_X,
4148#endif
4149 0
4150};
4151
4152static void init_default_hcalls(void)
4153{
4154 int i;
ae2113a4 4155 unsigned int hcall;
699a0ea0 4156
ae2113a4
PM
4157 for (i = 0; default_hcall_list[i]; ++i) {
4158 hcall = default_hcall_list[i];
4159 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
4160 __set_bit(hcall / 4, default_enabled_hcalls);
4161 }
699a0ea0
PM
4162}
4163
c9270132
PM
4164static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
4165{
468808bd 4166 unsigned long lpcr;
8cf4ecc0 4167 int radix;
468808bd
PM
4168
4169 /* If not on a POWER9, reject it */
4170 if (!cpu_has_feature(CPU_FTR_ARCH_300))
4171 return -ENODEV;
4172
4173 /* If any unknown flags set, reject it */
4174 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE))
4175 return -EINVAL;
4176
8cf4ecc0
PM
4177 /* We can't change a guest to/from radix yet */
4178 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX);
4179 if (radix != kvm_is_radix(kvm))
468808bd
PM
4180 return -EINVAL;
4181
4182 /* GR (guest radix) bit in process_table field must match */
8cf4ecc0 4183 if (!!(cfg->process_table & PATB_GR) != radix)
468808bd
PM
4184 return -EINVAL;
4185
4186 /* Process table size field must be reasonable, i.e. <= 24 */
4187 if ((cfg->process_table & PRTS_MASK) > 24)
4188 return -EINVAL;
4189
4190 kvm->arch.process_table = cfg->process_table;
4191 kvmppc_setup_partition_table(kvm);
4192
4193 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
4194 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
4195
4196 return 0;
c9270132
PM
4197}
4198
cbbc58d4 4199static struct kvmppc_ops kvm_ops_hv = {
3a167bea
AK
4200 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
4201 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
4202 .get_one_reg = kvmppc_get_one_reg_hv,
4203 .set_one_reg = kvmppc_set_one_reg_hv,
4204 .vcpu_load = kvmppc_core_vcpu_load_hv,
4205 .vcpu_put = kvmppc_core_vcpu_put_hv,
4206 .set_msr = kvmppc_set_msr_hv,
4207 .vcpu_run = kvmppc_vcpu_run_hv,
4208 .vcpu_create = kvmppc_core_vcpu_create_hv,
4209 .vcpu_free = kvmppc_core_vcpu_free_hv,
4210 .check_requests = kvmppc_core_check_requests_hv,
4211 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
4212 .flush_memslot = kvmppc_core_flush_memslot_hv,
4213 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
4214 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
4215 .unmap_hva = kvm_unmap_hva_hv,
4216 .unmap_hva_range = kvm_unmap_hva_range_hv,
4217 .age_hva = kvm_age_hva_hv,
4218 .test_age_hva = kvm_test_age_hva_hv,
4219 .set_spte_hva = kvm_set_spte_hva_hv,
4220 .mmu_destroy = kvmppc_mmu_destroy_hv,
4221 .free_memslot = kvmppc_core_free_memslot_hv,
4222 .create_memslot = kvmppc_core_create_memslot_hv,
4223 .init_vm = kvmppc_core_init_vm_hv,
4224 .destroy_vm = kvmppc_core_destroy_vm_hv,
3a167bea
AK
4225 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
4226 .emulate_op = kvmppc_core_emulate_op_hv,
4227 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
4228 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
4229 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
4230 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
ae2113a4 4231 .hcall_implemented = kvmppc_hcall_impl_hv,
c57875f5
SW
4232#ifdef CONFIG_KVM_XICS
4233 .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
4234 .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
4235#endif
c9270132
PM
4236 .configure_mmu = kvmhv_configure_mmu,
4237 .get_rmmu_info = kvmhv_get_rmmu_info,
3c313524 4238 .set_smt_mode = kvmhv_set_smt_mode,
3a167bea
AK
4239};
4240
fd7bacbc
MS
4241static int kvm_init_subcore_bitmap(void)
4242{
4243 int i, j;
4244 int nr_cores = cpu_nr_cores();
4245 struct sibling_subcore_state *sibling_subcore_state;
4246
4247 for (i = 0; i < nr_cores; i++) {
4248 int first_cpu = i * threads_per_core;
4249 int node = cpu_to_node(first_cpu);
4250
4251 /* Ignore if it is already allocated. */
4252 if (paca[first_cpu].sibling_subcore_state)
4253 continue;
4254
4255 sibling_subcore_state =
4256 kmalloc_node(sizeof(struct sibling_subcore_state),
4257 GFP_KERNEL, node);
4258 if (!sibling_subcore_state)
4259 return -ENOMEM;
4260
4261 memset(sibling_subcore_state, 0,
4262 sizeof(struct sibling_subcore_state));
4263
4264 for (j = 0; j < threads_per_core; j++) {
4265 int cpu = first_cpu + j;
4266
4267 paca[cpu].sibling_subcore_state = sibling_subcore_state;
4268 }
4269 }
4270 return 0;
4271}
4272
5a319350
PM
4273static int kvmppc_radix_possible(void)
4274{
4275 return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled();
4276}
4277
3a167bea 4278static int kvmppc_book3s_init_hv(void)
de56a948
PM
4279{
4280 int r;
cbbc58d4
AK
4281 /*
4282 * FIXME!! Do we need to check on all cpus ?
4283 */
4284 r = kvmppc_core_check_processor_compat_hv();
4285 if (r < 0)
739e2425 4286 return -ENODEV;
de56a948 4287
fd7bacbc
MS
4288 r = kvm_init_subcore_bitmap();
4289 if (r)
4290 return r;
4291
f725758b
PM
4292 /*
4293 * We need a way of accessing the XICS interrupt controller,
4294 * either directly, via paca[cpu].kvm_hstate.xics_phys, or
4295 * indirectly, via OPAL.
4296 */
4297#ifdef CONFIG_SMP
fb7dcf72 4298 if (!xive_enabled() && !local_paca->kvm_hstate.xics_phys) {
f725758b
PM
4299 struct device_node *np;
4300
4301 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
4302 if (!np) {
4303 pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
4304 return -ENODEV;
4305 }
4306 }
4307#endif
4308
cbbc58d4
AK
4309 kvm_ops_hv.owner = THIS_MODULE;
4310 kvmppc_hv_ops = &kvm_ops_hv;
de56a948 4311
699a0ea0
PM
4312 init_default_hcalls();
4313
ec257165
PM
4314 init_vcore_lists();
4315
cbbc58d4 4316 r = kvmppc_mmu_hv_init();
5a319350
PM
4317 if (r)
4318 return r;
4319
4320 if (kvmppc_radix_possible())
4321 r = kvmppc_radix_init();
de56a948
PM
4322 return r;
4323}
4324
3a167bea 4325static void kvmppc_book3s_exit_hv(void)
de56a948 4326{
79b6c247 4327 kvmppc_free_host_rm_ops();
5a319350
PM
4328 if (kvmppc_radix_possible())
4329 kvmppc_radix_exit();
cbbc58d4 4330 kvmppc_hv_ops = NULL;
de56a948
PM
4331}
4332
3a167bea
AK
4333module_init(kvmppc_book3s_init_hv);
4334module_exit(kvmppc_book3s_exit_hv);
2ba9f0d8 4335MODULE_LICENSE("GPL");
398a76c6
AG
4336MODULE_ALIAS_MISCDEV(KVM_MINOR);
4337MODULE_ALIAS("devname:kvm");
7c5b06ca 4338