]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blame - arch/powerpc/kvm/book3s_hv.c
KVM: PPC: Book3S HV: Rename kvm_alloc_hpt() for clarity
[mirror_ubuntu-artful-kernel.git] / arch / powerpc / kvm / book3s_hv.c
CommitLineData
de56a948
PM
1/*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
4 *
5 * Authors:
6 * Paul Mackerras <paulus@au1.ibm.com>
7 * Alexander Graf <agraf@suse.de>
8 * Kevin Wolf <mail@kevin-wolf.de>
9 *
10 * Description: KVM functions specific to running on Book 3S
11 * processors in hypervisor mode (specifically POWER7 and later).
12 *
13 * This file is derived from arch/powerpc/kvm/book3s.c,
14 * by Alexander Graf <agraf@suse.de>.
15 *
16 * This program is free software; you can redistribute it and/or modify
17 * it under the terms of the GNU General Public License, version 2, as
18 * published by the Free Software Foundation.
19 */
20
21#include <linux/kvm_host.h>
22#include <linux/err.h>
23#include <linux/slab.h>
24#include <linux/preempt.h>
25#include <linux/sched.h>
26#include <linux/delay.h>
66b15db6 27#include <linux/export.h>
de56a948
PM
28#include <linux/fs.h>
29#include <linux/anon_inodes.h>
07f8ab25 30#include <linux/cpu.h>
de56a948 31#include <linux/cpumask.h>
aa04b4cc
PM
32#include <linux/spinlock.h>
33#include <linux/page-flags.h>
2c9097e4 34#include <linux/srcu.h>
398a76c6 35#include <linux/miscdevice.h>
e23a808b 36#include <linux/debugfs.h>
de56a948
PM
37
38#include <asm/reg.h>
39#include <asm/cputable.h>
40#include <asm/cacheflush.h>
41#include <asm/tlbflush.h>
7c0f6ba6 42#include <linux/uaccess.h>
de56a948
PM
43#include <asm/io.h>
44#include <asm/kvm_ppc.h>
45#include <asm/kvm_book3s.h>
46#include <asm/mmu_context.h>
47#include <asm/lppaca.h>
48#include <asm/processor.h>
371fefd6 49#include <asm/cputhreads.h>
aa04b4cc 50#include <asm/page.h>
de1d9248 51#include <asm/hvcall.h>
ae3a197e 52#include <asm/switch_to.h>
512691d4 53#include <asm/smp.h>
66feed61 54#include <asm/dbell.h>
fd7bacbc 55#include <asm/hmi.h>
c57875f5 56#include <asm/pnv-pci.h>
7a84084c 57#include <asm/mmu.h>
f725758b
PM
58#include <asm/opal.h>
59#include <asm/xics.h>
de56a948 60#include <linux/gfp.h>
de56a948
PM
61#include <linux/vmalloc.h>
62#include <linux/highmem.h>
c77162de 63#include <linux/hugetlb.h>
c57875f5
SW
64#include <linux/kvm_irqfd.h>
65#include <linux/irqbypass.h>
2ba9f0d8 66#include <linux/module.h>
7b5f8272 67#include <linux/compiler.h>
f725758b 68#include <linux/of.h>
de56a948 69
3a167bea
AK
70#include "book3s.h"
71
3c78f78a
SW
72#define CREATE_TRACE_POINTS
73#include "trace_hv.h"
74
de56a948
PM
75/* #define EXIT_DEBUG */
76/* #define EXIT_DEBUG_SIMPLE */
77/* #define EXIT_DEBUG_INT */
78
913d3ff9
PM
79/* Used to indicate that a guest page fault needs to be handled */
80#define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1)
f7af5209
SW
81/* Used to indicate that a guest passthrough interrupt needs to be handled */
82#define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2)
913d3ff9 83
c7b67670
PM
84/* Used as a "null" value for timebase values */
85#define TB_NIL (~(u64)0)
86
699a0ea0
PM
87static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1);
88
b4deba5c
PM
89static int dynamic_mt_modes = 6;
90module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR);
91MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)");
ec257165
PM
92static int target_smt_mode;
93module_param(target_smt_mode, int, S_IRUGO | S_IWUSR);
94MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)");
9678cdaa 95
520fe9c6
SW
96#ifdef CONFIG_KVM_XICS
97static struct kernel_param_ops module_param_ops = {
98 .set = param_set_int,
99 .get = param_get_int,
100};
101
644abbb2
SW
102module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass,
103 S_IRUGO | S_IWUSR);
104MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization");
105
520fe9c6
SW
106module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect,
107 S_IRUGO | S_IWUSR);
108MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core");
109#endif
110
19ccb76a 111static void kvmppc_end_cede(struct kvm_vcpu *vcpu);
32fad281 112static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu);
19ccb76a 113
7b5f8272
SJS
114static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc,
115 int *ip)
116{
117 int i = *ip;
118 struct kvm_vcpu *vcpu;
119
120 while (++i < MAX_SMT_THREADS) {
121 vcpu = READ_ONCE(vc->runnable_threads[i]);
122 if (vcpu) {
123 *ip = i;
124 return vcpu;
125 }
126 }
127 return NULL;
128}
129
130/* Used to traverse the list of runnable threads for a given vcore */
131#define for_each_runnable_thread(i, vcpu, vc) \
132 for (i = -1; (vcpu = next_runnable_thread(vc, &i)); )
133
66feed61
PM
134static bool kvmppc_ipi_thread(int cpu)
135{
1704a81c
PM
136 unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER);
137
138 /* On POWER9 we can use msgsnd to IPI any cpu */
139 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
140 msg |= get_hard_smp_processor_id(cpu);
141 smp_mb();
142 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
143 return true;
144 }
145
66feed61
PM
146 /* On POWER8 for IPIs to threads in the same core, use msgsnd */
147 if (cpu_has_feature(CPU_FTR_ARCH_207S)) {
148 preempt_disable();
149 if (cpu_first_thread_sibling(cpu) ==
150 cpu_first_thread_sibling(smp_processor_id())) {
66feed61
PM
151 msg |= cpu_thread_in_core(cpu);
152 smp_mb();
153 __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg));
154 preempt_enable();
155 return true;
156 }
157 preempt_enable();
158 }
159
160#if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP)
f725758b
PM
161 if (cpu >= 0 && cpu < nr_cpu_ids) {
162 if (paca[cpu].kvm_hstate.xics_phys) {
163 xics_wake_cpu(cpu);
164 return true;
165 }
166 opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY);
66feed61
PM
167 return true;
168 }
169#endif
170
171 return false;
172}
173
3a167bea 174static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu)
54695c30 175{
ec257165 176 int cpu;
8577370f 177 struct swait_queue_head *wqp;
54695c30
BH
178
179 wqp = kvm_arch_vcpu_wq(vcpu);
8577370f
MT
180 if (swait_active(wqp)) {
181 swake_up(wqp);
54695c30
BH
182 ++vcpu->stat.halt_wakeup;
183 }
184
3deda5e5
PM
185 cpu = READ_ONCE(vcpu->arch.thread_cpu);
186 if (cpu >= 0 && kvmppc_ipi_thread(cpu))
66feed61 187 return;
54695c30
BH
188
189 /* CPU points to the first thread of the core */
ec257165 190 cpu = vcpu->cpu;
66feed61
PM
191 if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu))
192 smp_send_reschedule(cpu);
54695c30
BH
193}
194
c7b67670
PM
195/*
196 * We use the vcpu_load/put functions to measure stolen time.
197 * Stolen time is counted as time when either the vcpu is able to
198 * run as part of a virtual core, but the task running the vcore
199 * is preempted or sleeping, or when the vcpu needs something done
200 * in the kernel by the task running the vcpu, but that task is
201 * preempted or sleeping. Those two things have to be counted
202 * separately, since one of the vcpu tasks will take on the job
203 * of running the core, and the other vcpu tasks in the vcore will
204 * sleep waiting for it to do that, but that sleep shouldn't count
205 * as stolen time.
206 *
207 * Hence we accumulate stolen time when the vcpu can run as part of
208 * a vcore using vc->stolen_tb, and the stolen time when the vcpu
209 * needs its task to do other things in the kernel (for example,
210 * service a page fault) in busy_stolen. We don't accumulate
211 * stolen time for a vcore when it is inactive, or for a vcpu
212 * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of
213 * a misnomer; it means that the vcpu task is not executing in
214 * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in
215 * the kernel. We don't have any way of dividing up that time
216 * between time that the vcpu is genuinely stopped, time that
217 * the task is actively working on behalf of the vcpu, and time
218 * that the task is preempted, so we don't count any of it as
219 * stolen.
220 *
221 * Updates to busy_stolen are protected by arch.tbacct_lock;
2711e248
PM
222 * updates to vc->stolen_tb are protected by the vcore->stoltb_lock
223 * lock. The stolen times are measured in units of timebase ticks.
224 * (Note that the != TB_NIL checks below are purely defensive;
225 * they should never fail.)
c7b67670
PM
226 */
227
ec257165
PM
228static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc)
229{
230 unsigned long flags;
231
232 spin_lock_irqsave(&vc->stoltb_lock, flags);
233 vc->preempt_tb = mftb();
234 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
235}
236
237static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc)
238{
239 unsigned long flags;
240
241 spin_lock_irqsave(&vc->stoltb_lock, flags);
242 if (vc->preempt_tb != TB_NIL) {
243 vc->stolen_tb += mftb() - vc->preempt_tb;
244 vc->preempt_tb = TB_NIL;
245 }
246 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
247}
248
3a167bea 249static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu)
de56a948 250{
0456ec4f 251 struct kvmppc_vcore *vc = vcpu->arch.vcore;
bf3d32e1 252 unsigned long flags;
0456ec4f 253
2711e248
PM
254 /*
255 * We can test vc->runner without taking the vcore lock,
256 * because only this task ever sets vc->runner to this
257 * vcpu, and once it is set to this vcpu, only this task
258 * ever sets it to NULL.
259 */
ec257165
PM
260 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
261 kvmppc_core_end_stolen(vc);
262
2711e248 263 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
c7b67670
PM
264 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST &&
265 vcpu->arch.busy_preempt != TB_NIL) {
266 vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt;
267 vcpu->arch.busy_preempt = TB_NIL;
268 }
bf3d32e1 269 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
de56a948
PM
270}
271
3a167bea 272static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu)
de56a948 273{
0456ec4f 274 struct kvmppc_vcore *vc = vcpu->arch.vcore;
bf3d32e1 275 unsigned long flags;
0456ec4f 276
ec257165
PM
277 if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING)
278 kvmppc_core_start_stolen(vc);
279
2711e248 280 spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags);
c7b67670
PM
281 if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST)
282 vcpu->arch.busy_preempt = mftb();
bf3d32e1 283 spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags);
de56a948
PM
284}
285
3a167bea 286static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr)
de56a948 287{
c20875a3
PM
288 /*
289 * Check for illegal transactional state bit combination
290 * and if we find it, force the TS field to a safe state.
291 */
292 if ((msr & MSR_TS_MASK) == MSR_TS_MASK)
293 msr &= ~MSR_TS_MASK;
de56a948 294 vcpu->arch.shregs.msr = msr;
19ccb76a 295 kvmppc_end_cede(vcpu);
de56a948
PM
296}
297
5358a963 298static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr)
de56a948
PM
299{
300 vcpu->arch.pvr = pvr;
301}
302
2ee13be3
SJS
303/* Dummy value used in computing PCR value below */
304#define PCR_ARCH_300 (PCR_ARCH_207 << 1)
305
5358a963 306static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat)
388cc6e1 307{
2ee13be3 308 unsigned long host_pcr_bit = 0, guest_pcr_bit = 0;
388cc6e1
PM
309 struct kvmppc_vcore *vc = vcpu->arch.vcore;
310
2ee13be3
SJS
311 /* We can (emulate) our own architecture version and anything older */
312 if (cpu_has_feature(CPU_FTR_ARCH_300))
313 host_pcr_bit = PCR_ARCH_300;
314 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
315 host_pcr_bit = PCR_ARCH_207;
316 else if (cpu_has_feature(CPU_FTR_ARCH_206))
317 host_pcr_bit = PCR_ARCH_206;
318 else
319 host_pcr_bit = PCR_ARCH_205;
320
321 /* Determine lowest PCR bit needed to run guest in given PVR level */
322 guest_pcr_bit = host_pcr_bit;
388cc6e1 323 if (arch_compat) {
388cc6e1
PM
324 switch (arch_compat) {
325 case PVR_ARCH_205:
2ee13be3 326 guest_pcr_bit = PCR_ARCH_205;
388cc6e1
PM
327 break;
328 case PVR_ARCH_206:
329 case PVR_ARCH_206p:
2ee13be3 330 guest_pcr_bit = PCR_ARCH_206;
5557ae0e
PM
331 break;
332 case PVR_ARCH_207:
2ee13be3
SJS
333 guest_pcr_bit = PCR_ARCH_207;
334 break;
335 case PVR_ARCH_300:
336 guest_pcr_bit = PCR_ARCH_300;
388cc6e1
PM
337 break;
338 default:
339 return -EINVAL;
340 }
341 }
342
2ee13be3
SJS
343 /* Check requested PCR bits don't exceed our capabilities */
344 if (guest_pcr_bit > host_pcr_bit)
345 return -EINVAL;
346
388cc6e1
PM
347 spin_lock(&vc->lock);
348 vc->arch_compat = arch_compat;
2ee13be3
SJS
349 /* Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit */
350 vc->pcr = host_pcr_bit - guest_pcr_bit;
388cc6e1
PM
351 spin_unlock(&vc->lock);
352
353 return 0;
354}
355
5358a963 356static void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
de56a948
PM
357{
358 int r;
359
360 pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
361 pr_err("pc = %.16lx msr = %.16llx trap = %x\n",
362 vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
363 for (r = 0; r < 16; ++r)
364 pr_err("r%2d = %.16lx r%d = %.16lx\n",
365 r, kvmppc_get_gpr(vcpu, r),
366 r+16, kvmppc_get_gpr(vcpu, r+16));
367 pr_err("ctr = %.16lx lr = %.16lx\n",
368 vcpu->arch.ctr, vcpu->arch.lr);
369 pr_err("srr0 = %.16llx srr1 = %.16llx\n",
370 vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
371 pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
372 vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
373 pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
374 vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
375 pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n",
376 vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
377 pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
378 pr_err("fault dar = %.16lx dsisr = %.8x\n",
379 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
380 pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
381 for (r = 0; r < vcpu->arch.slb_max; ++r)
382 pr_err(" ESID = %.16llx VSID = %.16llx\n",
383 vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
384 pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
a0144e2a 385 vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1,
de56a948
PM
386 vcpu->arch.last_inst);
387}
388
5358a963 389static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id)
a8606e20 390{
e09fefde 391 struct kvm_vcpu *ret;
a8606e20
PM
392
393 mutex_lock(&kvm->lock);
e09fefde 394 ret = kvm_get_vcpu_by_id(kvm, id);
a8606e20
PM
395 mutex_unlock(&kvm->lock);
396 return ret;
397}
398
399static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa)
400{
f13c13a0 401 vpa->__old_status |= LPPACA_OLD_SHARED_PROC;
02407552 402 vpa->yield_count = cpu_to_be32(1);
a8606e20
PM
403}
404
55b665b0
PM
405static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v,
406 unsigned long addr, unsigned long len)
407{
408 /* check address is cacheline aligned */
409 if (addr & (L1_CACHE_BYTES - 1))
410 return -EINVAL;
411 spin_lock(&vcpu->arch.vpa_update_lock);
412 if (v->next_gpa != addr || v->len != len) {
413 v->next_gpa = addr;
414 v->len = addr ? len : 0;
415 v->update_pending = 1;
416 }
417 spin_unlock(&vcpu->arch.vpa_update_lock);
418 return 0;
419}
420
2e25aa5f
PM
421/* Length for a per-processor buffer is passed in at offset 4 in the buffer */
422struct reg_vpa {
423 u32 dummy;
424 union {
02407552
AG
425 __be16 hword;
426 __be32 word;
2e25aa5f
PM
427 } length;
428};
429
430static int vpa_is_registered(struct kvmppc_vpa *vpap)
431{
432 if (vpap->update_pending)
433 return vpap->next_gpa != 0;
434 return vpap->pinned_addr != NULL;
435}
436
a8606e20
PM
437static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu,
438 unsigned long flags,
439 unsigned long vcpuid, unsigned long vpa)
440{
441 struct kvm *kvm = vcpu->kvm;
93e60249 442 unsigned long len, nb;
a8606e20
PM
443 void *va;
444 struct kvm_vcpu *tvcpu;
2e25aa5f
PM
445 int err;
446 int subfunc;
447 struct kvmppc_vpa *vpap;
a8606e20
PM
448
449 tvcpu = kvmppc_find_vcpu(kvm, vcpuid);
450 if (!tvcpu)
451 return H_PARAMETER;
452
2e25aa5f
PM
453 subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK;
454 if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL ||
455 subfunc == H_VPA_REG_SLB) {
456 /* Registering new area - address must be cache-line aligned */
457 if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa)
a8606e20 458 return H_PARAMETER;
2e25aa5f
PM
459
460 /* convert logical addr to kernel addr and read length */
93e60249
PM
461 va = kvmppc_pin_guest_page(kvm, vpa, &nb);
462 if (va == NULL)
b2b2f165 463 return H_PARAMETER;
2e25aa5f 464 if (subfunc == H_VPA_REG_VPA)
02407552 465 len = be16_to_cpu(((struct reg_vpa *)va)->length.hword);
a8606e20 466 else
02407552 467 len = be32_to_cpu(((struct reg_vpa *)va)->length.word);
c35635ef 468 kvmppc_unpin_guest_page(kvm, va, vpa, false);
2e25aa5f
PM
469
470 /* Check length */
471 if (len > nb || len < sizeof(struct reg_vpa))
472 return H_PARAMETER;
473 } else {
474 vpa = 0;
475 len = 0;
476 }
477
478 err = H_PARAMETER;
479 vpap = NULL;
480 spin_lock(&tvcpu->arch.vpa_update_lock);
481
482 switch (subfunc) {
483 case H_VPA_REG_VPA: /* register VPA */
484 if (len < sizeof(struct lppaca))
a8606e20 485 break;
2e25aa5f
PM
486 vpap = &tvcpu->arch.vpa;
487 err = 0;
488 break;
489
490 case H_VPA_REG_DTL: /* register DTL */
491 if (len < sizeof(struct dtl_entry))
a8606e20 492 break;
2e25aa5f
PM
493 len -= len % sizeof(struct dtl_entry);
494
495 /* Check that they have previously registered a VPA */
496 err = H_RESOURCE;
497 if (!vpa_is_registered(&tvcpu->arch.vpa))
a8606e20 498 break;
2e25aa5f
PM
499
500 vpap = &tvcpu->arch.dtl;
501 err = 0;
502 break;
503
504 case H_VPA_REG_SLB: /* register SLB shadow buffer */
505 /* Check that they have previously registered a VPA */
506 err = H_RESOURCE;
507 if (!vpa_is_registered(&tvcpu->arch.vpa))
a8606e20 508 break;
2e25aa5f
PM
509
510 vpap = &tvcpu->arch.slb_shadow;
511 err = 0;
512 break;
513
514 case H_VPA_DEREG_VPA: /* deregister VPA */
515 /* Check they don't still have a DTL or SLB buf registered */
516 err = H_RESOURCE;
517 if (vpa_is_registered(&tvcpu->arch.dtl) ||
518 vpa_is_registered(&tvcpu->arch.slb_shadow))
a8606e20 519 break;
2e25aa5f
PM
520
521 vpap = &tvcpu->arch.vpa;
522 err = 0;
523 break;
524
525 case H_VPA_DEREG_DTL: /* deregister DTL */
526 vpap = &tvcpu->arch.dtl;
527 err = 0;
528 break;
529
530 case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */
531 vpap = &tvcpu->arch.slb_shadow;
532 err = 0;
533 break;
534 }
535
536 if (vpap) {
537 vpap->next_gpa = vpa;
538 vpap->len = len;
539 vpap->update_pending = 1;
a8606e20 540 }
93e60249 541
2e25aa5f
PM
542 spin_unlock(&tvcpu->arch.vpa_update_lock);
543
93e60249 544 return err;
a8606e20
PM
545}
546
081f323b 547static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap)
2e25aa5f 548{
081f323b 549 struct kvm *kvm = vcpu->kvm;
2e25aa5f
PM
550 void *va;
551 unsigned long nb;
081f323b 552 unsigned long gpa;
2e25aa5f 553
081f323b
PM
554 /*
555 * We need to pin the page pointed to by vpap->next_gpa,
556 * but we can't call kvmppc_pin_guest_page under the lock
557 * as it does get_user_pages() and down_read(). So we
558 * have to drop the lock, pin the page, then get the lock
559 * again and check that a new area didn't get registered
560 * in the meantime.
561 */
562 for (;;) {
563 gpa = vpap->next_gpa;
564 spin_unlock(&vcpu->arch.vpa_update_lock);
565 va = NULL;
566 nb = 0;
567 if (gpa)
c35635ef 568 va = kvmppc_pin_guest_page(kvm, gpa, &nb);
081f323b
PM
569 spin_lock(&vcpu->arch.vpa_update_lock);
570 if (gpa == vpap->next_gpa)
571 break;
572 /* sigh... unpin that one and try again */
573 if (va)
c35635ef 574 kvmppc_unpin_guest_page(kvm, va, gpa, false);
081f323b
PM
575 }
576
577 vpap->update_pending = 0;
578 if (va && nb < vpap->len) {
579 /*
580 * If it's now too short, it must be that userspace
581 * has changed the mappings underlying guest memory,
582 * so unregister the region.
583 */
c35635ef 584 kvmppc_unpin_guest_page(kvm, va, gpa, false);
081f323b 585 va = NULL;
2e25aa5f
PM
586 }
587 if (vpap->pinned_addr)
c35635ef
PM
588 kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa,
589 vpap->dirty);
590 vpap->gpa = gpa;
2e25aa5f 591 vpap->pinned_addr = va;
c35635ef 592 vpap->dirty = false;
2e25aa5f
PM
593 if (va)
594 vpap->pinned_end = va + vpap->len;
595}
596
597static void kvmppc_update_vpas(struct kvm_vcpu *vcpu)
598{
2f12f034
PM
599 if (!(vcpu->arch.vpa.update_pending ||
600 vcpu->arch.slb_shadow.update_pending ||
601 vcpu->arch.dtl.update_pending))
602 return;
603
2e25aa5f
PM
604 spin_lock(&vcpu->arch.vpa_update_lock);
605 if (vcpu->arch.vpa.update_pending) {
081f323b 606 kvmppc_update_vpa(vcpu, &vcpu->arch.vpa);
55b665b0
PM
607 if (vcpu->arch.vpa.pinned_addr)
608 init_vpa(vcpu, vcpu->arch.vpa.pinned_addr);
2e25aa5f
PM
609 }
610 if (vcpu->arch.dtl.update_pending) {
081f323b 611 kvmppc_update_vpa(vcpu, &vcpu->arch.dtl);
2e25aa5f
PM
612 vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr;
613 vcpu->arch.dtl_index = 0;
614 }
615 if (vcpu->arch.slb_shadow.update_pending)
081f323b 616 kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow);
2e25aa5f
PM
617 spin_unlock(&vcpu->arch.vpa_update_lock);
618}
619
c7b67670
PM
620/*
621 * Return the accumulated stolen time for the vcore up until `now'.
622 * The caller should hold the vcore lock.
623 */
624static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now)
625{
626 u64 p;
2711e248 627 unsigned long flags;
c7b67670 628
2711e248
PM
629 spin_lock_irqsave(&vc->stoltb_lock, flags);
630 p = vc->stolen_tb;
c7b67670 631 if (vc->vcore_state != VCORE_INACTIVE &&
2711e248
PM
632 vc->preempt_tb != TB_NIL)
633 p += now - vc->preempt_tb;
634 spin_unlock_irqrestore(&vc->stoltb_lock, flags);
c7b67670
PM
635 return p;
636}
637
0456ec4f
PM
638static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu,
639 struct kvmppc_vcore *vc)
640{
641 struct dtl_entry *dt;
642 struct lppaca *vpa;
c7b67670
PM
643 unsigned long stolen;
644 unsigned long core_stolen;
645 u64 now;
0456ec4f
PM
646
647 dt = vcpu->arch.dtl_ptr;
648 vpa = vcpu->arch.vpa.pinned_addr;
c7b67670
PM
649 now = mftb();
650 core_stolen = vcore_stolen_time(vc, now);
651 stolen = core_stolen - vcpu->arch.stolen_logged;
652 vcpu->arch.stolen_logged = core_stolen;
bf3d32e1 653 spin_lock_irq(&vcpu->arch.tbacct_lock);
c7b67670
PM
654 stolen += vcpu->arch.busy_stolen;
655 vcpu->arch.busy_stolen = 0;
bf3d32e1 656 spin_unlock_irq(&vcpu->arch.tbacct_lock);
0456ec4f
PM
657 if (!dt || !vpa)
658 return;
659 memset(dt, 0, sizeof(struct dtl_entry));
660 dt->dispatch_reason = 7;
02407552
AG
661 dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid);
662 dt->timebase = cpu_to_be64(now + vc->tb_offset);
663 dt->enqueue_to_dispatch_time = cpu_to_be32(stolen);
664 dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu));
665 dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr);
0456ec4f
PM
666 ++dt;
667 if (dt == vcpu->arch.dtl.pinned_end)
668 dt = vcpu->arch.dtl.pinned_addr;
669 vcpu->arch.dtl_ptr = dt;
670 /* order writing *dt vs. writing vpa->dtl_idx */
671 smp_wmb();
02407552 672 vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index);
c35635ef 673 vcpu->arch.dtl.dirty = true;
0456ec4f
PM
674}
675
9642382e
MN
676static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu)
677{
678 if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207)
679 return true;
680 if ((!vcpu->arch.vcore->arch_compat) &&
681 cpu_has_feature(CPU_FTR_ARCH_207S))
682 return true;
683 return false;
684}
685
686static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags,
687 unsigned long resource, unsigned long value1,
688 unsigned long value2)
689{
690 switch (resource) {
691 case H_SET_MODE_RESOURCE_SET_CIABR:
692 if (!kvmppc_power8_compatible(vcpu))
693 return H_P2;
694 if (value2)
695 return H_P4;
696 if (mflags)
697 return H_UNSUPPORTED_FLAG_START;
698 /* Guests can't breakpoint the hypervisor */
699 if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER)
700 return H_P3;
701 vcpu->arch.ciabr = value1;
702 return H_SUCCESS;
703 case H_SET_MODE_RESOURCE_SET_DAWR:
704 if (!kvmppc_power8_compatible(vcpu))
705 return H_P2;
706 if (mflags)
707 return H_UNSUPPORTED_FLAG_START;
708 if (value2 & DABRX_HYP)
709 return H_P4;
710 vcpu->arch.dawr = value1;
711 vcpu->arch.dawrx = value2;
712 return H_SUCCESS;
713 default:
714 return H_TOO_HARD;
715 }
716}
717
90fd09f8
SB
718static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target)
719{
720 struct kvmppc_vcore *vcore = target->arch.vcore;
721
722 /*
723 * We expect to have been called by the real mode handler
724 * (kvmppc_rm_h_confer()) which would have directly returned
725 * H_SUCCESS if the source vcore wasn't idle (e.g. if it may
726 * have useful work to do and should not confer) so we don't
727 * recheck that here.
728 */
729
730 spin_lock(&vcore->lock);
731 if (target->arch.state == KVMPPC_VCPU_RUNNABLE &&
ec257165
PM
732 vcore->vcore_state != VCORE_INACTIVE &&
733 vcore->runner)
90fd09f8
SB
734 target = vcore->runner;
735 spin_unlock(&vcore->lock);
736
737 return kvm_vcpu_yield_to(target);
738}
739
740static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu)
741{
742 int yield_count = 0;
743 struct lppaca *lppaca;
744
745 spin_lock(&vcpu->arch.vpa_update_lock);
746 lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr;
747 if (lppaca)
ecb6d618 748 yield_count = be32_to_cpu(lppaca->yield_count);
90fd09f8
SB
749 spin_unlock(&vcpu->arch.vpa_update_lock);
750 return yield_count;
751}
752
a8606e20
PM
753int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu)
754{
755 unsigned long req = kvmppc_get_gpr(vcpu, 3);
756 unsigned long target, ret = H_SUCCESS;
90fd09f8 757 int yield_count;
a8606e20 758 struct kvm_vcpu *tvcpu;
8e591cb7 759 int idx, rc;
a8606e20 760
699a0ea0
PM
761 if (req <= MAX_HCALL_OPCODE &&
762 !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls))
763 return RESUME_HOST;
764
a8606e20
PM
765 switch (req) {
766 case H_CEDE:
a8606e20
PM
767 break;
768 case H_PROD:
769 target = kvmppc_get_gpr(vcpu, 4);
770 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
771 if (!tvcpu) {
772 ret = H_PARAMETER;
773 break;
774 }
775 tvcpu->arch.prodded = 1;
776 smp_mb();
8464c884
PM
777 if (tvcpu->arch.ceded)
778 kvmppc_fast_vcpu_kick_hv(tvcpu);
a8606e20
PM
779 break;
780 case H_CONFER:
42d7604d
PM
781 target = kvmppc_get_gpr(vcpu, 4);
782 if (target == -1)
783 break;
784 tvcpu = kvmppc_find_vcpu(vcpu->kvm, target);
785 if (!tvcpu) {
786 ret = H_PARAMETER;
787 break;
788 }
90fd09f8
SB
789 yield_count = kvmppc_get_gpr(vcpu, 5);
790 if (kvmppc_get_yield_count(tvcpu) != yield_count)
791 break;
792 kvm_arch_vcpu_yield_to(tvcpu);
a8606e20
PM
793 break;
794 case H_REGISTER_VPA:
795 ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4),
796 kvmppc_get_gpr(vcpu, 5),
797 kvmppc_get_gpr(vcpu, 6));
798 break;
8e591cb7
ME
799 case H_RTAS:
800 if (list_empty(&vcpu->kvm->arch.rtas_tokens))
801 return RESUME_HOST;
802
c9438092 803 idx = srcu_read_lock(&vcpu->kvm->srcu);
8e591cb7 804 rc = kvmppc_rtas_hcall(vcpu);
c9438092 805 srcu_read_unlock(&vcpu->kvm->srcu, idx);
8e591cb7
ME
806
807 if (rc == -ENOENT)
808 return RESUME_HOST;
809 else if (rc == 0)
810 break;
811
812 /* Send the error out to userspace via KVM_RUN */
813 return rc;
99342cf8
DG
814 case H_LOGICAL_CI_LOAD:
815 ret = kvmppc_h_logical_ci_load(vcpu);
816 if (ret == H_TOO_HARD)
817 return RESUME_HOST;
818 break;
819 case H_LOGICAL_CI_STORE:
820 ret = kvmppc_h_logical_ci_store(vcpu);
821 if (ret == H_TOO_HARD)
822 return RESUME_HOST;
823 break;
9642382e
MN
824 case H_SET_MODE:
825 ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4),
826 kvmppc_get_gpr(vcpu, 5),
827 kvmppc_get_gpr(vcpu, 6),
828 kvmppc_get_gpr(vcpu, 7));
829 if (ret == H_TOO_HARD)
830 return RESUME_HOST;
831 break;
bc5ad3f3
BH
832 case H_XIRR:
833 case H_CPPR:
834 case H_EOI:
835 case H_IPI:
8e44ddc3
PM
836 case H_IPOLL:
837 case H_XIRR_X:
bc5ad3f3
BH
838 if (kvmppc_xics_enabled(vcpu)) {
839 ret = kvmppc_xics_hcall(vcpu, req);
840 break;
d3695aa4
AK
841 }
842 return RESUME_HOST;
843 case H_PUT_TCE:
844 ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
845 kvmppc_get_gpr(vcpu, 5),
846 kvmppc_get_gpr(vcpu, 6));
847 if (ret == H_TOO_HARD)
848 return RESUME_HOST;
849 break;
850 case H_PUT_TCE_INDIRECT:
851 ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4),
852 kvmppc_get_gpr(vcpu, 5),
853 kvmppc_get_gpr(vcpu, 6),
854 kvmppc_get_gpr(vcpu, 7));
855 if (ret == H_TOO_HARD)
856 return RESUME_HOST;
857 break;
858 case H_STUFF_TCE:
859 ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4),
860 kvmppc_get_gpr(vcpu, 5),
861 kvmppc_get_gpr(vcpu, 6),
862 kvmppc_get_gpr(vcpu, 7));
863 if (ret == H_TOO_HARD)
864 return RESUME_HOST;
865 break;
a8606e20
PM
866 default:
867 return RESUME_HOST;
868 }
869 kvmppc_set_gpr(vcpu, 3, ret);
870 vcpu->arch.hcall_needed = 0;
871 return RESUME_GUEST;
872}
873
ae2113a4
PM
874static int kvmppc_hcall_impl_hv(unsigned long cmd)
875{
876 switch (cmd) {
877 case H_CEDE:
878 case H_PROD:
879 case H_CONFER:
880 case H_REGISTER_VPA:
9642382e 881 case H_SET_MODE:
99342cf8
DG
882 case H_LOGICAL_CI_LOAD:
883 case H_LOGICAL_CI_STORE:
ae2113a4
PM
884#ifdef CONFIG_KVM_XICS
885 case H_XIRR:
886 case H_CPPR:
887 case H_EOI:
888 case H_IPI:
889 case H_IPOLL:
890 case H_XIRR_X:
891#endif
892 return 1;
893 }
894
895 /* See if it's in the real-mode table */
896 return kvmppc_hcall_impl_hv_realmode(cmd);
897}
898
a59c1d9e
MS
899static int kvmppc_emulate_debug_inst(struct kvm_run *run,
900 struct kvm_vcpu *vcpu)
901{
902 u32 last_inst;
903
904 if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) !=
905 EMULATE_DONE) {
906 /*
907 * Fetch failed, so return to guest and
908 * try executing it again.
909 */
910 return RESUME_GUEST;
911 }
912
913 if (last_inst == KVMPPC_INST_SW_BREAKPOINT) {
914 run->exit_reason = KVM_EXIT_DEBUG;
915 run->debug.arch.address = kvmppc_get_pc(vcpu);
916 return RESUME_HOST;
917 } else {
918 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
919 return RESUME_GUEST;
920 }
921}
922
3a167bea
AK
923static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
924 struct task_struct *tsk)
de56a948
PM
925{
926 int r = RESUME_HOST;
927
928 vcpu->stat.sum_exits++;
929
1c9e3d51
PM
930 /*
931 * This can happen if an interrupt occurs in the last stages
932 * of guest entry or the first stages of guest exit (i.e. after
933 * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV
934 * and before setting it to KVM_GUEST_MODE_HOST_HV).
935 * That can happen due to a bug, or due to a machine check
936 * occurring at just the wrong time.
937 */
938 if (vcpu->arch.shregs.msr & MSR_HV) {
939 printk(KERN_EMERG "KVM trap in HV mode!\n");
940 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
941 vcpu->arch.trap, kvmppc_get_pc(vcpu),
942 vcpu->arch.shregs.msr);
943 kvmppc_dump_regs(vcpu);
944 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
945 run->hw.hardware_exit_reason = vcpu->arch.trap;
946 return RESUME_HOST;
947 }
de56a948
PM
948 run->exit_reason = KVM_EXIT_UNKNOWN;
949 run->ready_for_interrupt_injection = 1;
950 switch (vcpu->arch.trap) {
951 /* We're good on these - the host merely wanted to get our attention */
952 case BOOK3S_INTERRUPT_HV_DECREMENTER:
953 vcpu->stat.dec_exits++;
954 r = RESUME_GUEST;
955 break;
956 case BOOK3S_INTERRUPT_EXTERNAL:
5d00f66b 957 case BOOK3S_INTERRUPT_H_DOORBELL:
84f7139c 958 case BOOK3S_INTERRUPT_H_VIRT:
de56a948
PM
959 vcpu->stat.ext_intr_exits++;
960 r = RESUME_GUEST;
961 break;
dee6f24c
MS
962 /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/
963 case BOOK3S_INTERRUPT_HMI:
de56a948
PM
964 case BOOK3S_INTERRUPT_PERFMON:
965 r = RESUME_GUEST;
966 break;
b4072df4
PM
967 case BOOK3S_INTERRUPT_MACHINE_CHECK:
968 /*
969 * Deliver a machine check interrupt to the guest.
970 * We have to do this, even if the host has handled the
971 * machine check, because machine checks use SRR0/1 and
972 * the interrupt might have trashed guest state in them.
973 */
974 kvmppc_book3s_queue_irqprio(vcpu,
975 BOOK3S_INTERRUPT_MACHINE_CHECK);
976 r = RESUME_GUEST;
977 break;
de56a948
PM
978 case BOOK3S_INTERRUPT_PROGRAM:
979 {
980 ulong flags;
981 /*
982 * Normally program interrupts are delivered directly
983 * to the guest by the hardware, but we can get here
984 * as a result of a hypervisor emulation interrupt
985 * (e40) getting turned into a 700 by BML RTAS.
986 */
987 flags = vcpu->arch.shregs.msr & 0x1f0000ull;
988 kvmppc_core_queue_program(vcpu, flags);
989 r = RESUME_GUEST;
990 break;
991 }
992 case BOOK3S_INTERRUPT_SYSCALL:
993 {
994 /* hcall - punt to userspace */
995 int i;
996
27025a60
LPF
997 /* hypercall with MSR_PR has already been handled in rmode,
998 * and never reaches here.
999 */
1000
de56a948
PM
1001 run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
1002 for (i = 0; i < 9; ++i)
1003 run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
1004 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1005 vcpu->arch.hcall_needed = 1;
1006 r = RESUME_HOST;
1007 break;
1008 }
1009 /*
342d3db7
PM
1010 * We get these next two if the guest accesses a page which it thinks
1011 * it has mapped but which is not actually present, either because
1012 * it is for an emulated I/O device or because the corresonding
1013 * host page has been paged out. Any other HDSI/HISI interrupts
1014 * have been handled already.
de56a948
PM
1015 */
1016 case BOOK3S_INTERRUPT_H_DATA_STORAGE:
913d3ff9 1017 r = RESUME_PAGE_FAULT;
de56a948
PM
1018 break;
1019 case BOOK3S_INTERRUPT_H_INST_STORAGE:
913d3ff9
PM
1020 vcpu->arch.fault_dar = kvmppc_get_pc(vcpu);
1021 vcpu->arch.fault_dsisr = 0;
1022 r = RESUME_PAGE_FAULT;
de56a948
PM
1023 break;
1024 /*
1025 * This occurs if the guest executes an illegal instruction.
a59c1d9e
MS
1026 * If the guest debug is disabled, generate a program interrupt
1027 * to the guest. If guest debug is enabled, we need to check
1028 * whether the instruction is a software breakpoint instruction.
1029 * Accordingly return to Guest or Host.
de56a948
PM
1030 */
1031 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
4a157d61
PM
1032 if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED)
1033 vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ?
1034 swab32(vcpu->arch.emul_inst) :
1035 vcpu->arch.emul_inst;
a59c1d9e
MS
1036 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) {
1037 r = kvmppc_emulate_debug_inst(run, vcpu);
1038 } else {
1039 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1040 r = RESUME_GUEST;
1041 }
bd3048b8
ME
1042 break;
1043 /*
1044 * This occurs if the guest (kernel or userspace), does something that
1045 * is prohibited by HFSCR. We just generate a program interrupt to
1046 * the guest.
1047 */
1048 case BOOK3S_INTERRUPT_H_FAC_UNAVAIL:
1049 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
de56a948
PM
1050 r = RESUME_GUEST;
1051 break;
f7af5209
SW
1052 case BOOK3S_INTERRUPT_HV_RM_HARD:
1053 r = RESUME_PASSTHROUGH;
1054 break;
de56a948
PM
1055 default:
1056 kvmppc_dump_regs(vcpu);
1057 printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
1058 vcpu->arch.trap, kvmppc_get_pc(vcpu),
1059 vcpu->arch.shregs.msr);
f3271d4c 1060 run->hw.hardware_exit_reason = vcpu->arch.trap;
de56a948 1061 r = RESUME_HOST;
de56a948
PM
1062 break;
1063 }
1064
de56a948
PM
1065 return r;
1066}
1067
3a167bea
AK
1068static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu,
1069 struct kvm_sregs *sregs)
de56a948
PM
1070{
1071 int i;
1072
de56a948 1073 memset(sregs, 0, sizeof(struct kvm_sregs));
87916442 1074 sregs->pvr = vcpu->arch.pvr;
de56a948
PM
1075 for (i = 0; i < vcpu->arch.slb_max; i++) {
1076 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige;
1077 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1078 }
1079
1080 return 0;
1081}
1082
3a167bea
AK
1083static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu,
1084 struct kvm_sregs *sregs)
de56a948
PM
1085{
1086 int i, j;
1087
9333e6c4
PM
1088 /* Only accept the same PVR as the host's, since we can't spoof it */
1089 if (sregs->pvr != vcpu->arch.pvr)
1090 return -EINVAL;
de56a948
PM
1091
1092 j = 0;
1093 for (i = 0; i < vcpu->arch.slb_nr; i++) {
1094 if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) {
1095 vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe;
1096 vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv;
1097 ++j;
1098 }
1099 }
1100 vcpu->arch.slb_max = j;
1101
1102 return 0;
1103}
1104
a0840240
AK
1105static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr,
1106 bool preserve_top32)
a0144e2a 1107{
8f902b00 1108 struct kvm *kvm = vcpu->kvm;
a0144e2a
PM
1109 struct kvmppc_vcore *vc = vcpu->arch.vcore;
1110 u64 mask;
1111
8f902b00 1112 mutex_lock(&kvm->lock);
a0144e2a 1113 spin_lock(&vc->lock);
d682916a
AB
1114 /*
1115 * If ILE (interrupt little-endian) has changed, update the
1116 * MSR_LE bit in the intr_msr for each vcpu in this vcore.
1117 */
1118 if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) {
d682916a
AB
1119 struct kvm_vcpu *vcpu;
1120 int i;
1121
d682916a
AB
1122 kvm_for_each_vcpu(i, vcpu, kvm) {
1123 if (vcpu->arch.vcore != vc)
1124 continue;
1125 if (new_lpcr & LPCR_ILE)
1126 vcpu->arch.intr_msr |= MSR_LE;
1127 else
1128 vcpu->arch.intr_msr &= ~MSR_LE;
1129 }
d682916a
AB
1130 }
1131
a0144e2a
PM
1132 /*
1133 * Userspace can only modify DPFD (default prefetch depth),
1134 * ILE (interrupt little-endian) and TC (translation control).
8cf4ecc0 1135 * On POWER8 and POWER9 userspace can also modify AIL (alt. interrupt loc.).
a0144e2a
PM
1136 */
1137 mask = LPCR_DPFD | LPCR_ILE | LPCR_TC;
e0622bd9
PM
1138 if (cpu_has_feature(CPU_FTR_ARCH_207S))
1139 mask |= LPCR_AIL;
a0840240
AK
1140
1141 /* Broken 32-bit version of LPCR must not clear top bits */
1142 if (preserve_top32)
1143 mask &= 0xFFFFFFFF;
a0144e2a
PM
1144 vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask);
1145 spin_unlock(&vc->lock);
8f902b00 1146 mutex_unlock(&kvm->lock);
a0144e2a
PM
1147}
1148
3a167bea
AK
1149static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1150 union kvmppc_one_reg *val)
31f3438e 1151{
a136a8bd
PM
1152 int r = 0;
1153 long int i;
31f3438e 1154
a136a8bd 1155 switch (id) {
a59c1d9e
MS
1156 case KVM_REG_PPC_DEBUG_INST:
1157 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1158 break;
31f3438e 1159 case KVM_REG_PPC_HIOR:
a136a8bd
PM
1160 *val = get_reg_val(id, 0);
1161 break;
1162 case KVM_REG_PPC_DABR:
1163 *val = get_reg_val(id, vcpu->arch.dabr);
1164 break;
8563bf52
PM
1165 case KVM_REG_PPC_DABRX:
1166 *val = get_reg_val(id, vcpu->arch.dabrx);
1167 break;
a136a8bd
PM
1168 case KVM_REG_PPC_DSCR:
1169 *val = get_reg_val(id, vcpu->arch.dscr);
1170 break;
1171 case KVM_REG_PPC_PURR:
1172 *val = get_reg_val(id, vcpu->arch.purr);
1173 break;
1174 case KVM_REG_PPC_SPURR:
1175 *val = get_reg_val(id, vcpu->arch.spurr);
1176 break;
1177 case KVM_REG_PPC_AMR:
1178 *val = get_reg_val(id, vcpu->arch.amr);
1179 break;
1180 case KVM_REG_PPC_UAMOR:
1181 *val = get_reg_val(id, vcpu->arch.uamor);
1182 break;
b005255e 1183 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
a136a8bd
PM
1184 i = id - KVM_REG_PPC_MMCR0;
1185 *val = get_reg_val(id, vcpu->arch.mmcr[i]);
1186 break;
1187 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1188 i = id - KVM_REG_PPC_PMC1;
1189 *val = get_reg_val(id, vcpu->arch.pmc[i]);
31f3438e 1190 break;
b005255e
MN
1191 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1192 i = id - KVM_REG_PPC_SPMC1;
1193 *val = get_reg_val(id, vcpu->arch.spmc[i]);
1194 break;
14941789
PM
1195 case KVM_REG_PPC_SIAR:
1196 *val = get_reg_val(id, vcpu->arch.siar);
1197 break;
1198 case KVM_REG_PPC_SDAR:
1199 *val = get_reg_val(id, vcpu->arch.sdar);
1200 break;
b005255e
MN
1201 case KVM_REG_PPC_SIER:
1202 *val = get_reg_val(id, vcpu->arch.sier);
a8bd19ef 1203 break;
b005255e
MN
1204 case KVM_REG_PPC_IAMR:
1205 *val = get_reg_val(id, vcpu->arch.iamr);
1206 break;
b005255e
MN
1207 case KVM_REG_PPC_PSPB:
1208 *val = get_reg_val(id, vcpu->arch.pspb);
1209 break;
b005255e
MN
1210 case KVM_REG_PPC_DPDES:
1211 *val = get_reg_val(id, vcpu->arch.vcore->dpdes);
1212 break;
88b02cf9
PM
1213 case KVM_REG_PPC_VTB:
1214 *val = get_reg_val(id, vcpu->arch.vcore->vtb);
1215 break;
b005255e
MN
1216 case KVM_REG_PPC_DAWR:
1217 *val = get_reg_val(id, vcpu->arch.dawr);
1218 break;
1219 case KVM_REG_PPC_DAWRX:
1220 *val = get_reg_val(id, vcpu->arch.dawrx);
1221 break;
1222 case KVM_REG_PPC_CIABR:
1223 *val = get_reg_val(id, vcpu->arch.ciabr);
1224 break;
b005255e
MN
1225 case KVM_REG_PPC_CSIGR:
1226 *val = get_reg_val(id, vcpu->arch.csigr);
1227 break;
1228 case KVM_REG_PPC_TACR:
1229 *val = get_reg_val(id, vcpu->arch.tacr);
1230 break;
1231 case KVM_REG_PPC_TCSCR:
1232 *val = get_reg_val(id, vcpu->arch.tcscr);
1233 break;
1234 case KVM_REG_PPC_PID:
1235 *val = get_reg_val(id, vcpu->arch.pid);
1236 break;
1237 case KVM_REG_PPC_ACOP:
1238 *val = get_reg_val(id, vcpu->arch.acop);
1239 break;
1240 case KVM_REG_PPC_WORT:
1241 *val = get_reg_val(id, vcpu->arch.wort);
a8bd19ef 1242 break;
e9cf1e08
PM
1243 case KVM_REG_PPC_TIDR:
1244 *val = get_reg_val(id, vcpu->arch.tid);
1245 break;
1246 case KVM_REG_PPC_PSSCR:
1247 *val = get_reg_val(id, vcpu->arch.psscr);
1248 break;
55b665b0
PM
1249 case KVM_REG_PPC_VPA_ADDR:
1250 spin_lock(&vcpu->arch.vpa_update_lock);
1251 *val = get_reg_val(id, vcpu->arch.vpa.next_gpa);
1252 spin_unlock(&vcpu->arch.vpa_update_lock);
1253 break;
1254 case KVM_REG_PPC_VPA_SLB:
1255 spin_lock(&vcpu->arch.vpa_update_lock);
1256 val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa;
1257 val->vpaval.length = vcpu->arch.slb_shadow.len;
1258 spin_unlock(&vcpu->arch.vpa_update_lock);
1259 break;
1260 case KVM_REG_PPC_VPA_DTL:
1261 spin_lock(&vcpu->arch.vpa_update_lock);
1262 val->vpaval.addr = vcpu->arch.dtl.next_gpa;
1263 val->vpaval.length = vcpu->arch.dtl.len;
1264 spin_unlock(&vcpu->arch.vpa_update_lock);
1265 break;
93b0f4dc
PM
1266 case KVM_REG_PPC_TB_OFFSET:
1267 *val = get_reg_val(id, vcpu->arch.vcore->tb_offset);
1268 break;
a0144e2a 1269 case KVM_REG_PPC_LPCR:
a0840240 1270 case KVM_REG_PPC_LPCR_64:
a0144e2a
PM
1271 *val = get_reg_val(id, vcpu->arch.vcore->lpcr);
1272 break;
4b8473c9
PM
1273 case KVM_REG_PPC_PPR:
1274 *val = get_reg_val(id, vcpu->arch.ppr);
1275 break;
a7d80d01
MN
1276#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1277 case KVM_REG_PPC_TFHAR:
1278 *val = get_reg_val(id, vcpu->arch.tfhar);
1279 break;
1280 case KVM_REG_PPC_TFIAR:
1281 *val = get_reg_val(id, vcpu->arch.tfiar);
1282 break;
1283 case KVM_REG_PPC_TEXASR:
1284 *val = get_reg_val(id, vcpu->arch.texasr);
1285 break;
1286 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1287 i = id - KVM_REG_PPC_TM_GPR0;
1288 *val = get_reg_val(id, vcpu->arch.gpr_tm[i]);
1289 break;
1290 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1291 {
1292 int j;
1293 i = id - KVM_REG_PPC_TM_VSR0;
1294 if (i < 32)
1295 for (j = 0; j < TS_FPRWIDTH; j++)
1296 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1297 else {
1298 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1299 val->vval = vcpu->arch.vr_tm.vr[i-32];
1300 else
1301 r = -ENXIO;
1302 }
1303 break;
1304 }
1305 case KVM_REG_PPC_TM_CR:
1306 *val = get_reg_val(id, vcpu->arch.cr_tm);
1307 break;
0d808df0
PM
1308 case KVM_REG_PPC_TM_XER:
1309 *val = get_reg_val(id, vcpu->arch.xer_tm);
1310 break;
a7d80d01
MN
1311 case KVM_REG_PPC_TM_LR:
1312 *val = get_reg_val(id, vcpu->arch.lr_tm);
1313 break;
1314 case KVM_REG_PPC_TM_CTR:
1315 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1316 break;
1317 case KVM_REG_PPC_TM_FPSCR:
1318 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1319 break;
1320 case KVM_REG_PPC_TM_AMR:
1321 *val = get_reg_val(id, vcpu->arch.amr_tm);
1322 break;
1323 case KVM_REG_PPC_TM_PPR:
1324 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1325 break;
1326 case KVM_REG_PPC_TM_VRSAVE:
1327 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1328 break;
1329 case KVM_REG_PPC_TM_VSCR:
1330 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1331 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1332 else
1333 r = -ENXIO;
1334 break;
1335 case KVM_REG_PPC_TM_DSCR:
1336 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1337 break;
1338 case KVM_REG_PPC_TM_TAR:
1339 *val = get_reg_val(id, vcpu->arch.tar_tm);
1340 break;
1341#endif
388cc6e1
PM
1342 case KVM_REG_PPC_ARCH_COMPAT:
1343 *val = get_reg_val(id, vcpu->arch.vcore->arch_compat);
1344 break;
31f3438e 1345 default:
a136a8bd 1346 r = -EINVAL;
31f3438e
PM
1347 break;
1348 }
1349
1350 return r;
1351}
1352
3a167bea
AK
1353static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id,
1354 union kvmppc_one_reg *val)
31f3438e 1355{
a136a8bd
PM
1356 int r = 0;
1357 long int i;
55b665b0 1358 unsigned long addr, len;
31f3438e 1359
a136a8bd 1360 switch (id) {
31f3438e 1361 case KVM_REG_PPC_HIOR:
31f3438e 1362 /* Only allow this to be set to zero */
a136a8bd 1363 if (set_reg_val(id, *val))
31f3438e
PM
1364 r = -EINVAL;
1365 break;
a136a8bd
PM
1366 case KVM_REG_PPC_DABR:
1367 vcpu->arch.dabr = set_reg_val(id, *val);
1368 break;
8563bf52
PM
1369 case KVM_REG_PPC_DABRX:
1370 vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP;
1371 break;
a136a8bd
PM
1372 case KVM_REG_PPC_DSCR:
1373 vcpu->arch.dscr = set_reg_val(id, *val);
1374 break;
1375 case KVM_REG_PPC_PURR:
1376 vcpu->arch.purr = set_reg_val(id, *val);
1377 break;
1378 case KVM_REG_PPC_SPURR:
1379 vcpu->arch.spurr = set_reg_val(id, *val);
1380 break;
1381 case KVM_REG_PPC_AMR:
1382 vcpu->arch.amr = set_reg_val(id, *val);
1383 break;
1384 case KVM_REG_PPC_UAMOR:
1385 vcpu->arch.uamor = set_reg_val(id, *val);
1386 break;
b005255e 1387 case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS:
a136a8bd
PM
1388 i = id - KVM_REG_PPC_MMCR0;
1389 vcpu->arch.mmcr[i] = set_reg_val(id, *val);
1390 break;
1391 case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8:
1392 i = id - KVM_REG_PPC_PMC1;
1393 vcpu->arch.pmc[i] = set_reg_val(id, *val);
1394 break;
b005255e
MN
1395 case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2:
1396 i = id - KVM_REG_PPC_SPMC1;
1397 vcpu->arch.spmc[i] = set_reg_val(id, *val);
1398 break;
14941789
PM
1399 case KVM_REG_PPC_SIAR:
1400 vcpu->arch.siar = set_reg_val(id, *val);
1401 break;
1402 case KVM_REG_PPC_SDAR:
1403 vcpu->arch.sdar = set_reg_val(id, *val);
1404 break;
b005255e
MN
1405 case KVM_REG_PPC_SIER:
1406 vcpu->arch.sier = set_reg_val(id, *val);
a8bd19ef 1407 break;
b005255e
MN
1408 case KVM_REG_PPC_IAMR:
1409 vcpu->arch.iamr = set_reg_val(id, *val);
1410 break;
b005255e
MN
1411 case KVM_REG_PPC_PSPB:
1412 vcpu->arch.pspb = set_reg_val(id, *val);
1413 break;
b005255e
MN
1414 case KVM_REG_PPC_DPDES:
1415 vcpu->arch.vcore->dpdes = set_reg_val(id, *val);
1416 break;
88b02cf9
PM
1417 case KVM_REG_PPC_VTB:
1418 vcpu->arch.vcore->vtb = set_reg_val(id, *val);
1419 break;
b005255e
MN
1420 case KVM_REG_PPC_DAWR:
1421 vcpu->arch.dawr = set_reg_val(id, *val);
1422 break;
1423 case KVM_REG_PPC_DAWRX:
1424 vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP;
1425 break;
1426 case KVM_REG_PPC_CIABR:
1427 vcpu->arch.ciabr = set_reg_val(id, *val);
1428 /* Don't allow setting breakpoints in hypervisor code */
1429 if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER)
1430 vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */
1431 break;
b005255e
MN
1432 case KVM_REG_PPC_CSIGR:
1433 vcpu->arch.csigr = set_reg_val(id, *val);
1434 break;
1435 case KVM_REG_PPC_TACR:
1436 vcpu->arch.tacr = set_reg_val(id, *val);
1437 break;
1438 case KVM_REG_PPC_TCSCR:
1439 vcpu->arch.tcscr = set_reg_val(id, *val);
1440 break;
1441 case KVM_REG_PPC_PID:
1442 vcpu->arch.pid = set_reg_val(id, *val);
1443 break;
1444 case KVM_REG_PPC_ACOP:
1445 vcpu->arch.acop = set_reg_val(id, *val);
1446 break;
1447 case KVM_REG_PPC_WORT:
1448 vcpu->arch.wort = set_reg_val(id, *val);
a8bd19ef 1449 break;
e9cf1e08
PM
1450 case KVM_REG_PPC_TIDR:
1451 vcpu->arch.tid = set_reg_val(id, *val);
1452 break;
1453 case KVM_REG_PPC_PSSCR:
1454 vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS;
1455 break;
55b665b0
PM
1456 case KVM_REG_PPC_VPA_ADDR:
1457 addr = set_reg_val(id, *val);
1458 r = -EINVAL;
1459 if (!addr && (vcpu->arch.slb_shadow.next_gpa ||
1460 vcpu->arch.dtl.next_gpa))
1461 break;
1462 r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca));
1463 break;
1464 case KVM_REG_PPC_VPA_SLB:
1465 addr = val->vpaval.addr;
1466 len = val->vpaval.length;
1467 r = -EINVAL;
1468 if (addr && !vcpu->arch.vpa.next_gpa)
1469 break;
1470 r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len);
1471 break;
1472 case KVM_REG_PPC_VPA_DTL:
1473 addr = val->vpaval.addr;
1474 len = val->vpaval.length;
1475 r = -EINVAL;
9f8c8c78
PM
1476 if (addr && (len < sizeof(struct dtl_entry) ||
1477 !vcpu->arch.vpa.next_gpa))
55b665b0
PM
1478 break;
1479 len -= len % sizeof(struct dtl_entry);
1480 r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len);
1481 break;
93b0f4dc
PM
1482 case KVM_REG_PPC_TB_OFFSET:
1483 /* round up to multiple of 2^24 */
1484 vcpu->arch.vcore->tb_offset =
1485 ALIGN(set_reg_val(id, *val), 1UL << 24);
1486 break;
a0144e2a 1487 case KVM_REG_PPC_LPCR:
a0840240
AK
1488 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true);
1489 break;
1490 case KVM_REG_PPC_LPCR_64:
1491 kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false);
a0144e2a 1492 break;
4b8473c9
PM
1493 case KVM_REG_PPC_PPR:
1494 vcpu->arch.ppr = set_reg_val(id, *val);
1495 break;
a7d80d01
MN
1496#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1497 case KVM_REG_PPC_TFHAR:
1498 vcpu->arch.tfhar = set_reg_val(id, *val);
1499 break;
1500 case KVM_REG_PPC_TFIAR:
1501 vcpu->arch.tfiar = set_reg_val(id, *val);
1502 break;
1503 case KVM_REG_PPC_TEXASR:
1504 vcpu->arch.texasr = set_reg_val(id, *val);
1505 break;
1506 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1507 i = id - KVM_REG_PPC_TM_GPR0;
1508 vcpu->arch.gpr_tm[i] = set_reg_val(id, *val);
1509 break;
1510 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1511 {
1512 int j;
1513 i = id - KVM_REG_PPC_TM_VSR0;
1514 if (i < 32)
1515 for (j = 0; j < TS_FPRWIDTH; j++)
1516 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1517 else
1518 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1519 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1520 else
1521 r = -ENXIO;
1522 break;
1523 }
1524 case KVM_REG_PPC_TM_CR:
1525 vcpu->arch.cr_tm = set_reg_val(id, *val);
1526 break;
0d808df0
PM
1527 case KVM_REG_PPC_TM_XER:
1528 vcpu->arch.xer_tm = set_reg_val(id, *val);
1529 break;
a7d80d01
MN
1530 case KVM_REG_PPC_TM_LR:
1531 vcpu->arch.lr_tm = set_reg_val(id, *val);
1532 break;
1533 case KVM_REG_PPC_TM_CTR:
1534 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1535 break;
1536 case KVM_REG_PPC_TM_FPSCR:
1537 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1538 break;
1539 case KVM_REG_PPC_TM_AMR:
1540 vcpu->arch.amr_tm = set_reg_val(id, *val);
1541 break;
1542 case KVM_REG_PPC_TM_PPR:
1543 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1544 break;
1545 case KVM_REG_PPC_TM_VRSAVE:
1546 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1547 break;
1548 case KVM_REG_PPC_TM_VSCR:
1549 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1550 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1551 else
1552 r = - ENXIO;
1553 break;
1554 case KVM_REG_PPC_TM_DSCR:
1555 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1556 break;
1557 case KVM_REG_PPC_TM_TAR:
1558 vcpu->arch.tar_tm = set_reg_val(id, *val);
1559 break;
1560#endif
388cc6e1
PM
1561 case KVM_REG_PPC_ARCH_COMPAT:
1562 r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val));
1563 break;
31f3438e 1564 default:
a136a8bd 1565 r = -EINVAL;
31f3438e
PM
1566 break;
1567 }
1568
1569 return r;
1570}
1571
45c940ba
PM
1572/*
1573 * On POWER9, threads are independent and can be in different partitions.
1574 * Therefore we consider each thread to be a subcore.
1575 * There is a restriction that all threads have to be in the same
1576 * MMU mode (radix or HPT), unfortunately, but since we only support
1577 * HPT guests on a HPT host so far, that isn't an impediment yet.
1578 */
1579static int threads_per_vcore(void)
1580{
1581 if (cpu_has_feature(CPU_FTR_ARCH_300))
1582 return 1;
1583 return threads_per_subcore;
1584}
1585
de9bdd1a
SS
1586static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core)
1587{
1588 struct kvmppc_vcore *vcore;
1589
1590 vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL);
1591
1592 if (vcore == NULL)
1593 return NULL;
1594
de9bdd1a 1595 spin_lock_init(&vcore->lock);
2711e248 1596 spin_lock_init(&vcore->stoltb_lock);
8577370f 1597 init_swait_queue_head(&vcore->wq);
de9bdd1a
SS
1598 vcore->preempt_tb = TB_NIL;
1599 vcore->lpcr = kvm->arch.lpcr;
45c940ba 1600 vcore->first_vcpuid = core * threads_per_vcore();
de9bdd1a 1601 vcore->kvm = kvm;
ec257165 1602 INIT_LIST_HEAD(&vcore->preempt_list);
de9bdd1a
SS
1603
1604 return vcore;
1605}
1606
b6c295df
PM
1607#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1608static struct debugfs_timings_element {
1609 const char *name;
1610 size_t offset;
1611} timings[] = {
1612 {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)},
1613 {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)},
1614 {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)},
1615 {"guest", offsetof(struct kvm_vcpu, arch.guest_time)},
1616 {"cede", offsetof(struct kvm_vcpu, arch.cede_time)},
1617};
1618
1619#define N_TIMINGS (sizeof(timings) / sizeof(timings[0]))
1620
1621struct debugfs_timings_state {
1622 struct kvm_vcpu *vcpu;
1623 unsigned int buflen;
1624 char buf[N_TIMINGS * 100];
1625};
1626
1627static int debugfs_timings_open(struct inode *inode, struct file *file)
1628{
1629 struct kvm_vcpu *vcpu = inode->i_private;
1630 struct debugfs_timings_state *p;
1631
1632 p = kzalloc(sizeof(*p), GFP_KERNEL);
1633 if (!p)
1634 return -ENOMEM;
1635
1636 kvm_get_kvm(vcpu->kvm);
1637 p->vcpu = vcpu;
1638 file->private_data = p;
1639
1640 return nonseekable_open(inode, file);
1641}
1642
1643static int debugfs_timings_release(struct inode *inode, struct file *file)
1644{
1645 struct debugfs_timings_state *p = file->private_data;
1646
1647 kvm_put_kvm(p->vcpu->kvm);
1648 kfree(p);
1649 return 0;
1650}
1651
1652static ssize_t debugfs_timings_read(struct file *file, char __user *buf,
1653 size_t len, loff_t *ppos)
1654{
1655 struct debugfs_timings_state *p = file->private_data;
1656 struct kvm_vcpu *vcpu = p->vcpu;
1657 char *s, *buf_end;
1658 struct kvmhv_tb_accumulator tb;
1659 u64 count;
1660 loff_t pos;
1661 ssize_t n;
1662 int i, loops;
1663 bool ok;
1664
1665 if (!p->buflen) {
1666 s = p->buf;
1667 buf_end = s + sizeof(p->buf);
1668 for (i = 0; i < N_TIMINGS; ++i) {
1669 struct kvmhv_tb_accumulator *acc;
1670
1671 acc = (struct kvmhv_tb_accumulator *)
1672 ((unsigned long)vcpu + timings[i].offset);
1673 ok = false;
1674 for (loops = 0; loops < 1000; ++loops) {
1675 count = acc->seqcount;
1676 if (!(count & 1)) {
1677 smp_rmb();
1678 tb = *acc;
1679 smp_rmb();
1680 if (count == acc->seqcount) {
1681 ok = true;
1682 break;
1683 }
1684 }
1685 udelay(1);
1686 }
1687 if (!ok)
1688 snprintf(s, buf_end - s, "%s: stuck\n",
1689 timings[i].name);
1690 else
1691 snprintf(s, buf_end - s,
1692 "%s: %llu %llu %llu %llu\n",
1693 timings[i].name, count / 2,
1694 tb_to_ns(tb.tb_total),
1695 tb_to_ns(tb.tb_min),
1696 tb_to_ns(tb.tb_max));
1697 s += strlen(s);
1698 }
1699 p->buflen = s - p->buf;
1700 }
1701
1702 pos = *ppos;
1703 if (pos >= p->buflen)
1704 return 0;
1705 if (len > p->buflen - pos)
1706 len = p->buflen - pos;
1707 n = copy_to_user(buf, p->buf + pos, len);
1708 if (n) {
1709 if (n == len)
1710 return -EFAULT;
1711 len -= n;
1712 }
1713 *ppos = pos + len;
1714 return len;
1715}
1716
1717static ssize_t debugfs_timings_write(struct file *file, const char __user *buf,
1718 size_t len, loff_t *ppos)
1719{
1720 return -EACCES;
1721}
1722
1723static const struct file_operations debugfs_timings_ops = {
1724 .owner = THIS_MODULE,
1725 .open = debugfs_timings_open,
1726 .release = debugfs_timings_release,
1727 .read = debugfs_timings_read,
1728 .write = debugfs_timings_write,
1729 .llseek = generic_file_llseek,
1730};
1731
1732/* Create a debugfs directory for the vcpu */
1733static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1734{
1735 char buf[16];
1736 struct kvm *kvm = vcpu->kvm;
1737
1738 snprintf(buf, sizeof(buf), "vcpu%u", id);
1739 if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
1740 return;
1741 vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir);
1742 if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir))
1743 return;
1744 vcpu->arch.debugfs_timings =
1745 debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir,
1746 vcpu, &debugfs_timings_ops);
1747}
1748
1749#else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1750static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id)
1751{
1752}
1753#endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */
1754
3a167bea
AK
1755static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm,
1756 unsigned int id)
de56a948
PM
1757{
1758 struct kvm_vcpu *vcpu;
371fefd6
PM
1759 int err = -EINVAL;
1760 int core;
1761 struct kvmppc_vcore *vcore;
de56a948 1762
45c940ba 1763 core = id / threads_per_vcore();
371fefd6
PM
1764 if (core >= KVM_MAX_VCORES)
1765 goto out;
1766
1767 err = -ENOMEM;
6b75e6bf 1768 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
de56a948
PM
1769 if (!vcpu)
1770 goto out;
1771
1772 err = kvm_vcpu_init(vcpu, kvm, id);
1773 if (err)
1774 goto free_vcpu;
1775
1776 vcpu->arch.shared = &vcpu->arch.shregs;
5deb8e7a
AG
1777#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1778 /*
1779 * The shared struct is never shared on HV,
1780 * so we can always use host endianness
1781 */
1782#ifdef __BIG_ENDIAN__
1783 vcpu->arch.shared_big_endian = true;
1784#else
1785 vcpu->arch.shared_big_endian = false;
1786#endif
1787#endif
de56a948
PM
1788 vcpu->arch.mmcr[0] = MMCR0_FC;
1789 vcpu->arch.ctrl = CTRL_RUNLATCH;
1790 /* default to host PVR, since we can't spoof it */
3a167bea 1791 kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR));
2e25aa5f 1792 spin_lock_init(&vcpu->arch.vpa_update_lock);
c7b67670
PM
1793 spin_lock_init(&vcpu->arch.tbacct_lock);
1794 vcpu->arch.busy_preempt = TB_NIL;
d682916a 1795 vcpu->arch.intr_msr = MSR_SF | MSR_ME;
de56a948 1796
de56a948
PM
1797 kvmppc_mmu_book3s_hv_init(vcpu);
1798
8455d79e 1799 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
371fefd6
PM
1800
1801 init_waitqueue_head(&vcpu->arch.cpu_run);
1802
1803 mutex_lock(&kvm->lock);
1804 vcore = kvm->arch.vcores[core];
1805 if (!vcore) {
de9bdd1a 1806 vcore = kvmppc_vcore_create(kvm, core);
371fefd6 1807 kvm->arch.vcores[core] = vcore;
1b400ba0 1808 kvm->arch.online_vcores++;
371fefd6
PM
1809 }
1810 mutex_unlock(&kvm->lock);
1811
1812 if (!vcore)
1813 goto free_vcpu;
1814
1815 spin_lock(&vcore->lock);
1816 ++vcore->num_threads;
371fefd6
PM
1817 spin_unlock(&vcore->lock);
1818 vcpu->arch.vcore = vcore;
e0b7ec05 1819 vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid;
ec257165 1820 vcpu->arch.thread_cpu = -1;
a29ebeaf 1821 vcpu->arch.prev_cpu = -1;
371fefd6 1822
af8f38b3
AG
1823 vcpu->arch.cpu_type = KVM_CPU_3S_64;
1824 kvmppc_sanity_check(vcpu);
1825
b6c295df
PM
1826 debugfs_vcpu_init(vcpu, id);
1827
de56a948
PM
1828 return vcpu;
1829
1830free_vcpu:
6b75e6bf 1831 kmem_cache_free(kvm_vcpu_cache, vcpu);
de56a948
PM
1832out:
1833 return ERR_PTR(err);
1834}
1835
c35635ef
PM
1836static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa)
1837{
1838 if (vpa->pinned_addr)
1839 kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa,
1840 vpa->dirty);
1841}
1842
3a167bea 1843static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu)
de56a948 1844{
2e25aa5f 1845 spin_lock(&vcpu->arch.vpa_update_lock);
c35635ef
PM
1846 unpin_vpa(vcpu->kvm, &vcpu->arch.dtl);
1847 unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow);
1848 unpin_vpa(vcpu->kvm, &vcpu->arch.vpa);
2e25aa5f 1849 spin_unlock(&vcpu->arch.vpa_update_lock);
de56a948 1850 kvm_vcpu_uninit(vcpu);
6b75e6bf 1851 kmem_cache_free(kvm_vcpu_cache, vcpu);
de56a948
PM
1852}
1853
3a167bea
AK
1854static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu)
1855{
1856 /* Indicate we want to get back into the guest */
1857 return 1;
1858}
1859
19ccb76a 1860static void kvmppc_set_timer(struct kvm_vcpu *vcpu)
371fefd6 1861{
19ccb76a 1862 unsigned long dec_nsec, now;
371fefd6 1863
19ccb76a
PM
1864 now = get_tb();
1865 if (now > vcpu->arch.dec_expires) {
1866 /* decrementer has already gone negative */
1867 kvmppc_core_queue_dec(vcpu);
7e28e60e 1868 kvmppc_core_prepare_to_enter(vcpu);
19ccb76a 1869 return;
371fefd6 1870 }
19ccb76a
PM
1871 dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC
1872 / tb_ticks_per_sec;
8b0e1953 1873 hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL);
19ccb76a 1874 vcpu->arch.timer_running = 1;
371fefd6
PM
1875}
1876
19ccb76a 1877static void kvmppc_end_cede(struct kvm_vcpu *vcpu)
371fefd6 1878{
19ccb76a
PM
1879 vcpu->arch.ceded = 0;
1880 if (vcpu->arch.timer_running) {
1881 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1882 vcpu->arch.timer_running = 0;
1883 }
371fefd6
PM
1884}
1885
e0b7ec05 1886extern void __kvmppc_vcore_entry(void);
de56a948 1887
371fefd6
PM
1888static void kvmppc_remove_runnable(struct kvmppc_vcore *vc,
1889 struct kvm_vcpu *vcpu)
de56a948 1890{
c7b67670
PM
1891 u64 now;
1892
371fefd6
PM
1893 if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
1894 return;
bf3d32e1 1895 spin_lock_irq(&vcpu->arch.tbacct_lock);
c7b67670
PM
1896 now = mftb();
1897 vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) -
1898 vcpu->arch.stolen_logged;
1899 vcpu->arch.busy_preempt = now;
1900 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
bf3d32e1 1901 spin_unlock_irq(&vcpu->arch.tbacct_lock);
371fefd6 1902 --vc->n_runnable;
7b5f8272 1903 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL);
371fefd6
PM
1904}
1905
f0888f70
PM
1906static int kvmppc_grab_hwthread(int cpu)
1907{
1908 struct paca_struct *tpaca;
b754c739 1909 long timeout = 10000;
f0888f70
PM
1910
1911 tpaca = &paca[cpu];
1912
1913 /* Ensure the thread won't go into the kernel if it wakes */
7b444c67 1914 tpaca->kvm_hstate.kvm_vcpu = NULL;
b4deba5c 1915 tpaca->kvm_hstate.kvm_vcore = NULL;
5d5b99cd
PM
1916 tpaca->kvm_hstate.napping = 0;
1917 smp_wmb();
1918 tpaca->kvm_hstate.hwthread_req = 1;
f0888f70
PM
1919
1920 /*
1921 * If the thread is already executing in the kernel (e.g. handling
1922 * a stray interrupt), wait for it to get back to nap mode.
1923 * The smp_mb() is to ensure that our setting of hwthread_req
1924 * is visible before we look at hwthread_state, so if this
1925 * races with the code at system_reset_pSeries and the thread
1926 * misses our setting of hwthread_req, we are sure to see its
1927 * setting of hwthread_state, and vice versa.
1928 */
1929 smp_mb();
1930 while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) {
1931 if (--timeout <= 0) {
1932 pr_err("KVM: couldn't grab cpu %d\n", cpu);
1933 return -EBUSY;
1934 }
1935 udelay(1);
1936 }
1937 return 0;
1938}
1939
1940static void kvmppc_release_hwthread(int cpu)
1941{
1942 struct paca_struct *tpaca;
1943
1944 tpaca = &paca[cpu];
1945 tpaca->kvm_hstate.hwthread_req = 0;
1946 tpaca->kvm_hstate.kvm_vcpu = NULL;
b4deba5c
PM
1947 tpaca->kvm_hstate.kvm_vcore = NULL;
1948 tpaca->kvm_hstate.kvm_split_mode = NULL;
f0888f70
PM
1949}
1950
a29ebeaf
PM
1951static void do_nothing(void *x)
1952{
1953}
1954
1955static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu)
1956{
1957 int i;
1958
1959 cpu = cpu_first_thread_sibling(cpu);
1960 cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush);
1961 /*
1962 * Make sure setting of bit in need_tlb_flush precedes
1963 * testing of cpu_in_guest bits. The matching barrier on
1964 * the other side is the first smp_mb() in kvmppc_run_core().
1965 */
1966 smp_mb();
1967 for (i = 0; i < threads_per_core; ++i)
1968 if (cpumask_test_cpu(cpu + i, &kvm->arch.cpu_in_guest))
1969 smp_call_function_single(cpu + i, do_nothing, NULL, 1);
1970}
1971
b4deba5c 1972static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc)
371fefd6
PM
1973{
1974 int cpu;
1975 struct paca_struct *tpaca;
ec257165 1976 struct kvmppc_vcore *mvc = vc->master_vcore;
a29ebeaf 1977 struct kvm *kvm = vc->kvm;
371fefd6 1978
b4deba5c
PM
1979 cpu = vc->pcpu;
1980 if (vcpu) {
1981 if (vcpu->arch.timer_running) {
1982 hrtimer_try_to_cancel(&vcpu->arch.dec_timer);
1983 vcpu->arch.timer_running = 0;
1984 }
1985 cpu += vcpu->arch.ptid;
1986 vcpu->cpu = mvc->pcpu;
1987 vcpu->arch.thread_cpu = cpu;
a29ebeaf
PM
1988
1989 /*
1990 * With radix, the guest can do TLB invalidations itself,
1991 * and it could choose to use the local form (tlbiel) if
1992 * it is invalidating a translation that has only ever been
1993 * used on one vcpu. However, that doesn't mean it has
1994 * only ever been used on one physical cpu, since vcpus
1995 * can move around between pcpus. To cope with this, when
1996 * a vcpu moves from one pcpu to another, we need to tell
1997 * any vcpus running on the same core as this vcpu previously
1998 * ran to flush the TLB. The TLB is shared between threads,
1999 * so we use a single bit in .need_tlb_flush for all 4 threads.
2000 */
2001 if (kvm_is_radix(kvm) && vcpu->arch.prev_cpu != cpu) {
2002 if (vcpu->arch.prev_cpu >= 0 &&
2003 cpu_first_thread_sibling(vcpu->arch.prev_cpu) !=
2004 cpu_first_thread_sibling(cpu))
2005 radix_flush_cpu(kvm, vcpu->arch.prev_cpu, vcpu);
2006 vcpu->arch.prev_cpu = cpu;
2007 }
2008 cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest);
19ccb76a 2009 }
371fefd6 2010 tpaca = &paca[cpu];
5d5b99cd 2011 tpaca->kvm_hstate.kvm_vcpu = vcpu;
ec257165 2012 tpaca->kvm_hstate.ptid = cpu - mvc->pcpu;
ec257165 2013 /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */
371fefd6 2014 smp_wmb();
b4deba5c 2015 tpaca->kvm_hstate.kvm_vcore = mvc;
5d5b99cd 2016 if (cpu != smp_processor_id())
66feed61 2017 kvmppc_ipi_thread(cpu);
371fefd6 2018}
de56a948 2019
5d5b99cd 2020static void kvmppc_wait_for_nap(void)
371fefd6 2021{
5d5b99cd
PM
2022 int cpu = smp_processor_id();
2023 int i, loops;
45c940ba 2024 int n_threads = threads_per_vcore();
371fefd6 2025
45c940ba
PM
2026 if (n_threads <= 1)
2027 return;
5d5b99cd
PM
2028 for (loops = 0; loops < 1000000; ++loops) {
2029 /*
2030 * Check if all threads are finished.
b4deba5c 2031 * We set the vcore pointer when starting a thread
5d5b99cd 2032 * and the thread clears it when finished, so we look
b4deba5c 2033 * for any threads that still have a non-NULL vcore ptr.
5d5b99cd 2034 */
45c940ba 2035 for (i = 1; i < n_threads; ++i)
b4deba5c 2036 if (paca[cpu + i].kvm_hstate.kvm_vcore)
5d5b99cd 2037 break;
45c940ba 2038 if (i == n_threads) {
5d5b99cd
PM
2039 HMT_medium();
2040 return;
371fefd6 2041 }
5d5b99cd 2042 HMT_low();
371fefd6
PM
2043 }
2044 HMT_medium();
45c940ba 2045 for (i = 1; i < n_threads; ++i)
b4deba5c 2046 if (paca[cpu + i].kvm_hstate.kvm_vcore)
5d5b99cd 2047 pr_err("KVM: CPU %d seems to be stuck\n", cpu + i);
371fefd6
PM
2048}
2049
2050/*
2051 * Check that we are on thread 0 and that any other threads in
7b444c67
PM
2052 * this core are off-line. Then grab the threads so they can't
2053 * enter the kernel.
371fefd6
PM
2054 */
2055static int on_primary_thread(void)
2056{
2057 int cpu = smp_processor_id();
3102f784 2058 int thr;
371fefd6 2059
3102f784
ME
2060 /* Are we on a primary subcore? */
2061 if (cpu_thread_in_subcore(cpu))
371fefd6 2062 return 0;
3102f784
ME
2063
2064 thr = 0;
2065 while (++thr < threads_per_subcore)
371fefd6
PM
2066 if (cpu_online(cpu + thr))
2067 return 0;
7b444c67
PM
2068
2069 /* Grab all hw threads so they can't go into the kernel */
3102f784 2070 for (thr = 1; thr < threads_per_subcore; ++thr) {
7b444c67
PM
2071 if (kvmppc_grab_hwthread(cpu + thr)) {
2072 /* Couldn't grab one; let the others go */
2073 do {
2074 kvmppc_release_hwthread(cpu + thr);
2075 } while (--thr > 0);
2076 return 0;
2077 }
2078 }
371fefd6
PM
2079 return 1;
2080}
2081
ec257165
PM
2082/*
2083 * A list of virtual cores for each physical CPU.
2084 * These are vcores that could run but their runner VCPU tasks are
2085 * (or may be) preempted.
2086 */
2087struct preempted_vcore_list {
2088 struct list_head list;
2089 spinlock_t lock;
2090};
2091
2092static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores);
2093
2094static void init_vcore_lists(void)
2095{
2096 int cpu;
2097
2098 for_each_possible_cpu(cpu) {
2099 struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu);
2100 spin_lock_init(&lp->lock);
2101 INIT_LIST_HEAD(&lp->list);
2102 }
2103}
2104
2105static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc)
2106{
2107 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2108
2109 vc->vcore_state = VCORE_PREEMPT;
2110 vc->pcpu = smp_processor_id();
45c940ba 2111 if (vc->num_threads < threads_per_vcore()) {
ec257165
PM
2112 spin_lock(&lp->lock);
2113 list_add_tail(&vc->preempt_list, &lp->list);
2114 spin_unlock(&lp->lock);
2115 }
2116
2117 /* Start accumulating stolen time */
2118 kvmppc_core_start_stolen(vc);
2119}
2120
2121static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc)
2122{
402813fe 2123 struct preempted_vcore_list *lp;
ec257165
PM
2124
2125 kvmppc_core_end_stolen(vc);
2126 if (!list_empty(&vc->preempt_list)) {
402813fe 2127 lp = &per_cpu(preempted_vcores, vc->pcpu);
ec257165
PM
2128 spin_lock(&lp->lock);
2129 list_del_init(&vc->preempt_list);
2130 spin_unlock(&lp->lock);
2131 }
2132 vc->vcore_state = VCORE_INACTIVE;
2133}
2134
b4deba5c
PM
2135/*
2136 * This stores information about the virtual cores currently
2137 * assigned to a physical core.
2138 */
ec257165 2139struct core_info {
b4deba5c
PM
2140 int n_subcores;
2141 int max_subcore_threads;
ec257165 2142 int total_threads;
b4deba5c
PM
2143 int subcore_threads[MAX_SUBCORES];
2144 struct kvm *subcore_vm[MAX_SUBCORES];
2145 struct list_head vcs[MAX_SUBCORES];
ec257165
PM
2146};
2147
b4deba5c
PM
2148/*
2149 * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7
2150 * respectively in 2-way micro-threading (split-core) mode.
2151 */
2152static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 };
2153
ec257165
PM
2154static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc)
2155{
b4deba5c
PM
2156 int sub;
2157
ec257165 2158 memset(cip, 0, sizeof(*cip));
b4deba5c
PM
2159 cip->n_subcores = 1;
2160 cip->max_subcore_threads = vc->num_threads;
ec257165 2161 cip->total_threads = vc->num_threads;
b4deba5c
PM
2162 cip->subcore_threads[0] = vc->num_threads;
2163 cip->subcore_vm[0] = vc->kvm;
2164 for (sub = 0; sub < MAX_SUBCORES; ++sub)
2165 INIT_LIST_HEAD(&cip->vcs[sub]);
2166 list_add_tail(&vc->preempt_list, &cip->vcs[0]);
2167}
2168
2169static bool subcore_config_ok(int n_subcores, int n_threads)
2170{
2171 /* Can only dynamically split if unsplit to begin with */
2172 if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS)
2173 return false;
2174 if (n_subcores > MAX_SUBCORES)
2175 return false;
2176 if (n_subcores > 1) {
2177 if (!(dynamic_mt_modes & 2))
2178 n_subcores = 4;
2179 if (n_subcores > 2 && !(dynamic_mt_modes & 4))
2180 return false;
2181 }
2182
2183 return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS;
ec257165
PM
2184}
2185
2186static void init_master_vcore(struct kvmppc_vcore *vc)
2187{
2188 vc->master_vcore = vc;
2189 vc->entry_exit_map = 0;
2190 vc->in_guest = 0;
2191 vc->napping_threads = 0;
2192 vc->conferring_threads = 0;
2193}
2194
b4deba5c
PM
2195static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip)
2196{
2197 int n_threads = vc->num_threads;
2198 int sub;
2199
2200 if (!cpu_has_feature(CPU_FTR_ARCH_207S))
2201 return false;
2202
2203 if (n_threads < cip->max_subcore_threads)
2204 n_threads = cip->max_subcore_threads;
b009031f 2205 if (!subcore_config_ok(cip->n_subcores + 1, n_threads))
b4deba5c 2206 return false;
b009031f 2207 cip->max_subcore_threads = n_threads;
b4deba5c
PM
2208
2209 sub = cip->n_subcores;
2210 ++cip->n_subcores;
2211 cip->total_threads += vc->num_threads;
2212 cip->subcore_threads[sub] = vc->num_threads;
2213 cip->subcore_vm[sub] = vc->kvm;
2214 init_master_vcore(vc);
28d057c8 2215 list_move_tail(&vc->preempt_list, &cip->vcs[sub]);
b4deba5c
PM
2216
2217 return true;
2218}
2219
b4deba5c
PM
2220/*
2221 * Work out whether it is possible to piggyback the execution of
2222 * vcore *pvc onto the execution of the other vcores described in *cip.
2223 */
2224static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip,
2225 int target_threads)
2226{
b4deba5c
PM
2227 if (cip->total_threads + pvc->num_threads > target_threads)
2228 return false;
b4deba5c 2229
b009031f 2230 return can_dynamic_split(pvc, cip);
b4deba5c
PM
2231}
2232
d911f0be
PM
2233static void prepare_threads(struct kvmppc_vcore *vc)
2234{
7b5f8272
SJS
2235 int i;
2236 struct kvm_vcpu *vcpu;
d911f0be 2237
7b5f8272 2238 for_each_runnable_thread(i, vcpu, vc) {
d911f0be
PM
2239 if (signal_pending(vcpu->arch.run_task))
2240 vcpu->arch.ret = -EINTR;
2241 else if (vcpu->arch.vpa.update_pending ||
2242 vcpu->arch.slb_shadow.update_pending ||
2243 vcpu->arch.dtl.update_pending)
2244 vcpu->arch.ret = RESUME_GUEST;
2245 else
2246 continue;
2247 kvmppc_remove_runnable(vc, vcpu);
2248 wake_up(&vcpu->arch.cpu_run);
2249 }
2250}
2251
ec257165
PM
2252static void collect_piggybacks(struct core_info *cip, int target_threads)
2253{
2254 struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores);
2255 struct kvmppc_vcore *pvc, *vcnext;
2256
2257 spin_lock(&lp->lock);
2258 list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) {
2259 if (!spin_trylock(&pvc->lock))
2260 continue;
2261 prepare_threads(pvc);
2262 if (!pvc->n_runnable) {
2263 list_del_init(&pvc->preempt_list);
2264 if (pvc->runner == NULL) {
2265 pvc->vcore_state = VCORE_INACTIVE;
2266 kvmppc_core_end_stolen(pvc);
2267 }
2268 spin_unlock(&pvc->lock);
2269 continue;
2270 }
2271 if (!can_piggyback(pvc, cip, target_threads)) {
2272 spin_unlock(&pvc->lock);
2273 continue;
2274 }
2275 kvmppc_core_end_stolen(pvc);
2276 pvc->vcore_state = VCORE_PIGGYBACK;
2277 if (cip->total_threads >= target_threads)
2278 break;
2279 }
2280 spin_unlock(&lp->lock);
2281}
2282
2283static void post_guest_process(struct kvmppc_vcore *vc, bool is_master)
25fedfca 2284{
7b5f8272 2285 int still_running = 0, i;
25fedfca
PM
2286 u64 now;
2287 long ret;
7b5f8272 2288 struct kvm_vcpu *vcpu;
25fedfca 2289
ec257165 2290 spin_lock(&vc->lock);
25fedfca 2291 now = get_tb();
7b5f8272 2292 for_each_runnable_thread(i, vcpu, vc) {
25fedfca
PM
2293 /* cancel pending dec exception if dec is positive */
2294 if (now < vcpu->arch.dec_expires &&
2295 kvmppc_core_pending_dec(vcpu))
2296 kvmppc_core_dequeue_dec(vcpu);
2297
2298 trace_kvm_guest_exit(vcpu);
2299
2300 ret = RESUME_GUEST;
2301 if (vcpu->arch.trap)
2302 ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu,
2303 vcpu->arch.run_task);
2304
2305 vcpu->arch.ret = ret;
2306 vcpu->arch.trap = 0;
2307
ec257165
PM
2308 if (is_kvmppc_resume_guest(vcpu->arch.ret)) {
2309 if (vcpu->arch.pending_exceptions)
2310 kvmppc_core_prepare_to_enter(vcpu);
2311 if (vcpu->arch.ceded)
25fedfca 2312 kvmppc_set_timer(vcpu);
ec257165
PM
2313 else
2314 ++still_running;
2315 } else {
25fedfca
PM
2316 kvmppc_remove_runnable(vc, vcpu);
2317 wake_up(&vcpu->arch.cpu_run);
2318 }
2319 }
ec257165
PM
2320 list_del_init(&vc->preempt_list);
2321 if (!is_master) {
563a1e93 2322 if (still_running > 0) {
ec257165 2323 kvmppc_vcore_preempt(vc);
563a1e93
PM
2324 } else if (vc->runner) {
2325 vc->vcore_state = VCORE_PREEMPT;
2326 kvmppc_core_start_stolen(vc);
2327 } else {
2328 vc->vcore_state = VCORE_INACTIVE;
2329 }
ec257165
PM
2330 if (vc->n_runnable > 0 && vc->runner == NULL) {
2331 /* make sure there's a candidate runner awake */
7b5f8272
SJS
2332 i = -1;
2333 vcpu = next_runnable_thread(vc, &i);
ec257165
PM
2334 wake_up(&vcpu->arch.cpu_run);
2335 }
2336 }
2337 spin_unlock(&vc->lock);
25fedfca
PM
2338}
2339
b8e6a87c
SW
2340/*
2341 * Clear core from the list of active host cores as we are about to
2342 * enter the guest. Only do this if it is the primary thread of the
2343 * core (not if a subcore) that is entering the guest.
2344 */
3f7cd919 2345static inline int kvmppc_clear_host_core(unsigned int cpu)
b8e6a87c
SW
2346{
2347 int core;
2348
2349 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3f7cd919 2350 return 0;
b8e6a87c
SW
2351 /*
2352 * Memory barrier can be omitted here as we will do a smp_wmb()
2353 * later in kvmppc_start_thread and we need ensure that state is
2354 * visible to other CPUs only after we enter guest.
2355 */
2356 core = cpu >> threads_shift;
2357 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0;
3f7cd919 2358 return 0;
b8e6a87c
SW
2359}
2360
2361/*
2362 * Advertise this core as an active host core since we exited the guest
2363 * Only need to do this if it is the primary thread of the core that is
2364 * exiting.
2365 */
3f7cd919 2366static inline int kvmppc_set_host_core(unsigned int cpu)
b8e6a87c
SW
2367{
2368 int core;
2369
2370 if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu))
3f7cd919 2371 return 0;
b8e6a87c
SW
2372
2373 /*
2374 * Memory barrier can be omitted here because we do a spin_unlock
2375 * immediately after this which provides the memory barrier.
2376 */
2377 core = cpu >> threads_shift;
2378 kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1;
3f7cd919 2379 return 0;
b8e6a87c
SW
2380}
2381
371fefd6
PM
2382/*
2383 * Run a set of guest threads on a physical core.
2384 * Called with vc->lock held.
2385 */
66feed61 2386static noinline void kvmppc_run_core(struct kvmppc_vcore *vc)
371fefd6 2387{
7b5f8272 2388 struct kvm_vcpu *vcpu;
d911f0be 2389 int i;
2c9097e4 2390 int srcu_idx;
ec257165
PM
2391 struct core_info core_info;
2392 struct kvmppc_vcore *pvc, *vcnext;
b4deba5c
PM
2393 struct kvm_split_mode split_info, *sip;
2394 int split, subcore_size, active;
2395 int sub;
2396 bool thr0_done;
2397 unsigned long cmd_bit, stat_bit;
ec257165
PM
2398 int pcpu, thr;
2399 int target_threads;
45c940ba 2400 int controlled_threads;
371fefd6 2401
d911f0be
PM
2402 /*
2403 * Remove from the list any threads that have a signal pending
2404 * or need a VPA update done
2405 */
2406 prepare_threads(vc);
2407
2408 /* if the runner is no longer runnable, let the caller pick a new one */
2409 if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE)
2410 return;
081f323b
PM
2411
2412 /*
d911f0be 2413 * Initialize *vc.
081f323b 2414 */
ec257165 2415 init_master_vcore(vc);
2711e248 2416 vc->preempt_tb = TB_NIL;
081f323b 2417
45c940ba
PM
2418 /*
2419 * Number of threads that we will be controlling: the same as
2420 * the number of threads per subcore, except on POWER9,
2421 * where it's 1 because the threads are (mostly) independent.
2422 */
2423 controlled_threads = threads_per_vcore();
2424
7b444c67 2425 /*
3102f784
ME
2426 * Make sure we are running on primary threads, and that secondary
2427 * threads are offline. Also check if the number of threads in this
2428 * guest are greater than the current system threads per guest.
7b444c67 2429 */
45c940ba 2430 if ((controlled_threads > 1) &&
3102f784 2431 ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) {
7b5f8272 2432 for_each_runnable_thread(i, vcpu, vc) {
7b444c67 2433 vcpu->arch.ret = -EBUSY;
25fedfca
PM
2434 kvmppc_remove_runnable(vc, vcpu);
2435 wake_up(&vcpu->arch.cpu_run);
2436 }
7b444c67
PM
2437 goto out;
2438 }
2439
ec257165
PM
2440 /*
2441 * See if we could run any other vcores on the physical core
2442 * along with this one.
2443 */
2444 init_core_info(&core_info, vc);
2445 pcpu = smp_processor_id();
45c940ba 2446 target_threads = controlled_threads;
ec257165
PM
2447 if (target_smt_mode && target_smt_mode < target_threads)
2448 target_threads = target_smt_mode;
2449 if (vc->num_threads < target_threads)
2450 collect_piggybacks(&core_info, target_threads);
3102f784 2451
b4deba5c
PM
2452 /* Decide on micro-threading (split-core) mode */
2453 subcore_size = threads_per_subcore;
2454 cmd_bit = stat_bit = 0;
2455 split = core_info.n_subcores;
2456 sip = NULL;
2457 if (split > 1) {
2458 /* threads_per_subcore must be MAX_SMT_THREADS (8) here */
2459 if (split == 2 && (dynamic_mt_modes & 2)) {
2460 cmd_bit = HID0_POWER8_1TO2LPAR;
2461 stat_bit = HID0_POWER8_2LPARMODE;
2462 } else {
2463 split = 4;
2464 cmd_bit = HID0_POWER8_1TO4LPAR;
2465 stat_bit = HID0_POWER8_4LPARMODE;
2466 }
2467 subcore_size = MAX_SMT_THREADS / split;
2468 sip = &split_info;
2469 memset(&split_info, 0, sizeof(split_info));
2470 split_info.rpr = mfspr(SPRN_RPR);
2471 split_info.pmmar = mfspr(SPRN_PMMAR);
2472 split_info.ldbar = mfspr(SPRN_LDBAR);
2473 split_info.subcore_size = subcore_size;
2474 for (sub = 0; sub < core_info.n_subcores; ++sub)
2475 split_info.master_vcs[sub] =
2476 list_first_entry(&core_info.vcs[sub],
2477 struct kvmppc_vcore, preempt_list);
2478 /* order writes to split_info before kvm_split_mode pointer */
2479 smp_wmb();
2480 }
2481 pcpu = smp_processor_id();
45c940ba 2482 for (thr = 0; thr < controlled_threads; ++thr)
b4deba5c
PM
2483 paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip;
2484
2485 /* Initiate micro-threading (split-core) if required */
2486 if (cmd_bit) {
2487 unsigned long hid0 = mfspr(SPRN_HID0);
2488
2489 hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS;
2490 mb();
2491 mtspr(SPRN_HID0, hid0);
2492 isync();
2493 for (;;) {
2494 hid0 = mfspr(SPRN_HID0);
2495 if (hid0 & stat_bit)
2496 break;
2497 cpu_relax();
ec257165 2498 }
2e25aa5f 2499 }
3102f784 2500
b8e6a87c
SW
2501 kvmppc_clear_host_core(pcpu);
2502
b4deba5c
PM
2503 /* Start all the threads */
2504 active = 0;
2505 for (sub = 0; sub < core_info.n_subcores; ++sub) {
2506 thr = subcore_thread_map[sub];
2507 thr0_done = false;
2508 active |= 1 << thr;
2509 list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) {
2510 pvc->pcpu = pcpu + thr;
7b5f8272 2511 for_each_runnable_thread(i, vcpu, pvc) {
b4deba5c
PM
2512 kvmppc_start_thread(vcpu, pvc);
2513 kvmppc_create_dtl_entry(vcpu, pvc);
2514 trace_kvm_guest_enter(vcpu);
2515 if (!vcpu->arch.ptid)
2516 thr0_done = true;
2517 active |= 1 << (thr + vcpu->arch.ptid);
2518 }
2519 /*
2520 * We need to start the first thread of each subcore
2521 * even if it doesn't have a vcpu.
2522 */
2523 if (pvc->master_vcore == pvc && !thr0_done)
2524 kvmppc_start_thread(NULL, pvc);
2525 thr += pvc->num_threads;
2526 }
2e25aa5f 2527 }
371fefd6 2528
7f235328
GS
2529 /*
2530 * Ensure that split_info.do_nap is set after setting
2531 * the vcore pointer in the PACA of the secondaries.
2532 */
2533 smp_mb();
2534 if (cmd_bit)
2535 split_info.do_nap = 1; /* ask secondaries to nap when done */
2536
b4deba5c
PM
2537 /*
2538 * When doing micro-threading, poke the inactive threads as well.
2539 * This gets them to the nap instruction after kvm_do_nap,
2540 * which reduces the time taken to unsplit later.
2541 */
2542 if (split > 1)
2543 for (thr = 1; thr < threads_per_subcore; ++thr)
2544 if (!(active & (1 << thr)))
2545 kvmppc_ipi_thread(pcpu + thr);
e0b7ec05 2546
2f12f034 2547 vc->vcore_state = VCORE_RUNNING;
19ccb76a 2548 preempt_disable();
3c78f78a
SW
2549
2550 trace_kvmppc_run_core(vc, 0);
2551
b4deba5c
PM
2552 for (sub = 0; sub < core_info.n_subcores; ++sub)
2553 list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list)
2554 spin_unlock(&pvc->lock);
de56a948 2555
6edaa530 2556 guest_enter();
2c9097e4 2557
e0b7ec05 2558 srcu_idx = srcu_read_lock(&vc->kvm->srcu);
2c9097e4 2559
e0b7ec05 2560 __kvmppc_vcore_entry();
de56a948 2561
ec257165
PM
2562 srcu_read_unlock(&vc->kvm->srcu, srcu_idx);
2563
2564 spin_lock(&vc->lock);
371fefd6 2565 /* prevent other vcpu threads from doing kvmppc_start_thread() now */
19ccb76a 2566 vc->vcore_state = VCORE_EXITING;
371fefd6 2567
19ccb76a 2568 /* wait for secondary threads to finish writing their state to memory */
5d5b99cd 2569 kvmppc_wait_for_nap();
b4deba5c
PM
2570
2571 /* Return to whole-core mode if we split the core earlier */
2572 if (split > 1) {
2573 unsigned long hid0 = mfspr(SPRN_HID0);
2574 unsigned long loops = 0;
2575
2576 hid0 &= ~HID0_POWER8_DYNLPARDIS;
2577 stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE;
2578 mb();
2579 mtspr(SPRN_HID0, hid0);
2580 isync();
2581 for (;;) {
2582 hid0 = mfspr(SPRN_HID0);
2583 if (!(hid0 & stat_bit))
2584 break;
2585 cpu_relax();
2586 ++loops;
2587 }
2588 split_info.do_nap = 0;
2589 }
2590
2591 /* Let secondaries go back to the offline loop */
45c940ba 2592 for (i = 0; i < controlled_threads; ++i) {
b4deba5c
PM
2593 kvmppc_release_hwthread(pcpu + i);
2594 if (sip && sip->napped[i])
2595 kvmppc_ipi_thread(pcpu + i);
a29ebeaf 2596 cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest);
b4deba5c
PM
2597 }
2598
b8e6a87c
SW
2599 kvmppc_set_host_core(pcpu);
2600
371fefd6 2601 spin_unlock(&vc->lock);
2c9097e4 2602
371fefd6
PM
2603 /* make sure updates to secondary vcpu structs are visible now */
2604 smp_mb();
6edaa530 2605 guest_exit();
de56a948 2606
b4deba5c
PM
2607 for (sub = 0; sub < core_info.n_subcores; ++sub)
2608 list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub],
2609 preempt_list)
2610 post_guest_process(pvc, pvc == vc);
de56a948 2611
913d3ff9 2612 spin_lock(&vc->lock);
ec257165 2613 preempt_enable();
de56a948
PM
2614
2615 out:
19ccb76a 2616 vc->vcore_state = VCORE_INACTIVE;
3c78f78a 2617 trace_kvmppc_run_core(vc, 1);
371fefd6
PM
2618}
2619
19ccb76a
PM
2620/*
2621 * Wait for some other vcpu thread to execute us, and
2622 * wake us up when we need to handle something in the host.
2623 */
ec257165
PM
2624static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc,
2625 struct kvm_vcpu *vcpu, int wait_state)
371fefd6 2626{
371fefd6
PM
2627 DEFINE_WAIT(wait);
2628
19ccb76a 2629 prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state);
ec257165
PM
2630 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2631 spin_unlock(&vc->lock);
19ccb76a 2632 schedule();
ec257165
PM
2633 spin_lock(&vc->lock);
2634 }
19ccb76a
PM
2635 finish_wait(&vcpu->arch.cpu_run, &wait);
2636}
2637
0cda69dd
SJS
2638static void grow_halt_poll_ns(struct kvmppc_vcore *vc)
2639{
2640 /* 10us base */
2641 if (vc->halt_poll_ns == 0 && halt_poll_ns_grow)
2642 vc->halt_poll_ns = 10000;
2643 else
2644 vc->halt_poll_ns *= halt_poll_ns_grow;
0cda69dd
SJS
2645}
2646
2647static void shrink_halt_poll_ns(struct kvmppc_vcore *vc)
2648{
2649 if (halt_poll_ns_shrink == 0)
2650 vc->halt_poll_ns = 0;
2651 else
2652 vc->halt_poll_ns /= halt_poll_ns_shrink;
2653}
2654
908a0935
SJS
2655/*
2656 * Check to see if any of the runnable vcpus on the vcore have pending
0cda69dd
SJS
2657 * exceptions or are no longer ceded
2658 */
2659static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc)
2660{
2661 struct kvm_vcpu *vcpu;
2662 int i;
2663
2664 for_each_runnable_thread(i, vcpu, vc) {
8464c884
PM
2665 if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded ||
2666 vcpu->arch.prodded)
0cda69dd
SJS
2667 return 1;
2668 }
2669
2670 return 0;
2671}
2672
19ccb76a
PM
2673/*
2674 * All the vcpus in this vcore are idle, so wait for a decrementer
2675 * or external interrupt to one of the vcpus. vc->lock is held.
2676 */
2677static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc)
2678{
2a27f514 2679 ktime_t cur, start_poll, start_wait;
0cda69dd 2680 int do_sleep = 1;
0cda69dd 2681 u64 block_ns;
8577370f 2682 DECLARE_SWAITQUEUE(wait);
1bc5d59c 2683
0cda69dd 2684 /* Poll for pending exceptions and ceded state */
2a27f514 2685 cur = start_poll = ktime_get();
0cda69dd 2686 if (vc->halt_poll_ns) {
2a27f514
SJS
2687 ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns);
2688 ++vc->runner->stat.halt_attempted_poll;
1bc5d59c 2689
0cda69dd
SJS
2690 vc->vcore_state = VCORE_POLLING;
2691 spin_unlock(&vc->lock);
2692
2693 do {
2694 if (kvmppc_vcore_check_block(vc)) {
2695 do_sleep = 0;
2696 break;
2697 }
2698 cur = ktime_get();
2699 } while (single_task_running() && ktime_before(cur, stop));
2700
2701 spin_lock(&vc->lock);
2702 vc->vcore_state = VCORE_INACTIVE;
2703
2a27f514
SJS
2704 if (!do_sleep) {
2705 ++vc->runner->stat.halt_successful_poll;
0cda69dd 2706 goto out;
2a27f514 2707 }
1bc5d59c
SW
2708 }
2709
0cda69dd
SJS
2710 prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE);
2711
2712 if (kvmppc_vcore_check_block(vc)) {
8577370f 2713 finish_swait(&vc->wq, &wait);
0cda69dd 2714 do_sleep = 0;
2a27f514
SJS
2715 /* If we polled, count this as a successful poll */
2716 if (vc->halt_poll_ns)
2717 ++vc->runner->stat.halt_successful_poll;
0cda69dd 2718 goto out;
1bc5d59c
SW
2719 }
2720
2a27f514
SJS
2721 start_wait = ktime_get();
2722
19ccb76a 2723 vc->vcore_state = VCORE_SLEEPING;
3c78f78a 2724 trace_kvmppc_vcore_blocked(vc, 0);
19ccb76a 2725 spin_unlock(&vc->lock);
913d3ff9 2726 schedule();
8577370f 2727 finish_swait(&vc->wq, &wait);
19ccb76a
PM
2728 spin_lock(&vc->lock);
2729 vc->vcore_state = VCORE_INACTIVE;
3c78f78a 2730 trace_kvmppc_vcore_blocked(vc, 1);
2a27f514 2731 ++vc->runner->stat.halt_successful_wait;
0cda69dd
SJS
2732
2733 cur = ktime_get();
2734
2735out:
2a27f514
SJS
2736 block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll);
2737
2738 /* Attribute wait time */
2739 if (do_sleep) {
2740 vc->runner->stat.halt_wait_ns +=
2741 ktime_to_ns(cur) - ktime_to_ns(start_wait);
2742 /* Attribute failed poll time */
2743 if (vc->halt_poll_ns)
2744 vc->runner->stat.halt_poll_fail_ns +=
2745 ktime_to_ns(start_wait) -
2746 ktime_to_ns(start_poll);
2747 } else {
2748 /* Attribute successful poll time */
2749 if (vc->halt_poll_ns)
2750 vc->runner->stat.halt_poll_success_ns +=
2751 ktime_to_ns(cur) -
2752 ktime_to_ns(start_poll);
2753 }
0cda69dd
SJS
2754
2755 /* Adjust poll time */
307d93e4 2756 if (halt_poll_ns) {
0cda69dd
SJS
2757 if (block_ns <= vc->halt_poll_ns)
2758 ;
2759 /* We slept and blocked for longer than the max halt time */
307d93e4 2760 else if (vc->halt_poll_ns && block_ns > halt_poll_ns)
0cda69dd
SJS
2761 shrink_halt_poll_ns(vc);
2762 /* We slept and our poll time is too small */
307d93e4
SJS
2763 else if (vc->halt_poll_ns < halt_poll_ns &&
2764 block_ns < halt_poll_ns)
0cda69dd 2765 grow_halt_poll_ns(vc);
e03f3921
SJS
2766 if (vc->halt_poll_ns > halt_poll_ns)
2767 vc->halt_poll_ns = halt_poll_ns;
0cda69dd
SJS
2768 } else
2769 vc->halt_poll_ns = 0;
2770
2771 trace_kvmppc_vcore_wakeup(do_sleep, block_ns);
19ccb76a 2772}
371fefd6 2773
19ccb76a
PM
2774static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
2775{
7b5f8272 2776 int n_ceded, i;
19ccb76a 2777 struct kvmppc_vcore *vc;
7b5f8272 2778 struct kvm_vcpu *v;
9e368f29 2779
3c78f78a
SW
2780 trace_kvmppc_run_vcpu_enter(vcpu);
2781
371fefd6
PM
2782 kvm_run->exit_reason = 0;
2783 vcpu->arch.ret = RESUME_GUEST;
2784 vcpu->arch.trap = 0;
2f12f034 2785 kvmppc_update_vpas(vcpu);
371fefd6 2786
371fefd6
PM
2787 /*
2788 * Synchronize with other threads in this virtual core
2789 */
2790 vc = vcpu->arch.vcore;
2791 spin_lock(&vc->lock);
19ccb76a 2792 vcpu->arch.ceded = 0;
371fefd6
PM
2793 vcpu->arch.run_task = current;
2794 vcpu->arch.kvm_run = kvm_run;
c7b67670 2795 vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb());
19ccb76a 2796 vcpu->arch.state = KVMPPC_VCPU_RUNNABLE;
c7b67670 2797 vcpu->arch.busy_preempt = TB_NIL;
7b5f8272 2798 WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu);
371fefd6
PM
2799 ++vc->n_runnable;
2800
19ccb76a
PM
2801 /*
2802 * This happens the first time this is called for a vcpu.
2803 * If the vcore is already running, we may be able to start
2804 * this thread straight away and have it join in.
2805 */
8455d79e 2806 if (!signal_pending(current)) {
ec257165
PM
2807 if (vc->vcore_state == VCORE_PIGGYBACK) {
2808 struct kvmppc_vcore *mvc = vc->master_vcore;
2809 if (spin_trylock(&mvc->lock)) {
2810 if (mvc->vcore_state == VCORE_RUNNING &&
2811 !VCORE_IS_EXITING(mvc)) {
2812 kvmppc_create_dtl_entry(vcpu, vc);
b4deba5c 2813 kvmppc_start_thread(vcpu, vc);
ec257165
PM
2814 trace_kvm_guest_enter(vcpu);
2815 }
2816 spin_unlock(&mvc->lock);
2817 }
2818 } else if (vc->vcore_state == VCORE_RUNNING &&
2819 !VCORE_IS_EXITING(vc)) {
2f12f034 2820 kvmppc_create_dtl_entry(vcpu, vc);
b4deba5c 2821 kvmppc_start_thread(vcpu, vc);
3c78f78a 2822 trace_kvm_guest_enter(vcpu);
8455d79e 2823 } else if (vc->vcore_state == VCORE_SLEEPING) {
8577370f 2824 swake_up(&vc->wq);
371fefd6
PM
2825 }
2826
8455d79e 2827 }
371fefd6 2828
19ccb76a
PM
2829 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2830 !signal_pending(current)) {
ec257165
PM
2831 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2832 kvmppc_vcore_end_preempt(vc);
2833
8455d79e 2834 if (vc->vcore_state != VCORE_INACTIVE) {
ec257165 2835 kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE);
19ccb76a
PM
2836 continue;
2837 }
7b5f8272 2838 for_each_runnable_thread(i, v, vc) {
7e28e60e 2839 kvmppc_core_prepare_to_enter(v);
19ccb76a
PM
2840 if (signal_pending(v->arch.run_task)) {
2841 kvmppc_remove_runnable(vc, v);
2842 v->stat.signal_exits++;
2843 v->arch.kvm_run->exit_reason = KVM_EXIT_INTR;
2844 v->arch.ret = -EINTR;
2845 wake_up(&v->arch.cpu_run);
2846 }
2847 }
8455d79e
PM
2848 if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE)
2849 break;
8455d79e 2850 n_ceded = 0;
7b5f8272 2851 for_each_runnable_thread(i, v, vc) {
8464c884 2852 if (!v->arch.pending_exceptions && !v->arch.prodded)
8455d79e 2853 n_ceded += v->arch.ceded;
4619ac88
PM
2854 else
2855 v->arch.ceded = 0;
2856 }
25fedfca
PM
2857 vc->runner = vcpu;
2858 if (n_ceded == vc->n_runnable) {
8455d79e 2859 kvmppc_vcore_blocked(vc);
c56dadf3 2860 } else if (need_resched()) {
ec257165 2861 kvmppc_vcore_preempt(vc);
25fedfca
PM
2862 /* Let something else run */
2863 cond_resched_lock(&vc->lock);
ec257165
PM
2864 if (vc->vcore_state == VCORE_PREEMPT)
2865 kvmppc_vcore_end_preempt(vc);
25fedfca 2866 } else {
8455d79e 2867 kvmppc_run_core(vc);
25fedfca 2868 }
0456ec4f 2869 vc->runner = NULL;
19ccb76a 2870 }
371fefd6 2871
8455d79e
PM
2872 while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE &&
2873 (vc->vcore_state == VCORE_RUNNING ||
5fc3e64f
PM
2874 vc->vcore_state == VCORE_EXITING ||
2875 vc->vcore_state == VCORE_PIGGYBACK))
ec257165 2876 kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE);
8455d79e 2877
5fc3e64f
PM
2878 if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL)
2879 kvmppc_vcore_end_preempt(vc);
2880
8455d79e
PM
2881 if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) {
2882 kvmppc_remove_runnable(vc, vcpu);
2883 vcpu->stat.signal_exits++;
2884 kvm_run->exit_reason = KVM_EXIT_INTR;
2885 vcpu->arch.ret = -EINTR;
2886 }
2887
2888 if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) {
2889 /* Wake up some vcpu to run the core */
7b5f8272
SJS
2890 i = -1;
2891 v = next_runnable_thread(vc, &i);
8455d79e 2892 wake_up(&v->arch.cpu_run);
371fefd6
PM
2893 }
2894
3c78f78a 2895 trace_kvmppc_run_vcpu_exit(vcpu, kvm_run);
371fefd6 2896 spin_unlock(&vc->lock);
371fefd6 2897 return vcpu->arch.ret;
de56a948
PM
2898}
2899
3a167bea 2900static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu)
a8606e20
PM
2901{
2902 int r;
913d3ff9 2903 int srcu_idx;
a8606e20 2904
af8f38b3
AG
2905 if (!vcpu->arch.sane) {
2906 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
2907 return -EINVAL;
2908 }
2909
25051b5a
SW
2910 kvmppc_core_prepare_to_enter(vcpu);
2911
19ccb76a
PM
2912 /* No need to go into the guest when all we'll do is come back out */
2913 if (signal_pending(current)) {
2914 run->exit_reason = KVM_EXIT_INTR;
2915 return -EINTR;
2916 }
2917
32fad281 2918 atomic_inc(&vcpu->kvm->arch.vcpus_running);
31037eca 2919 /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */
32fad281
PM
2920 smp_mb();
2921
c17b98cf 2922 /* On the first time here, set up HTAB and VRMA */
8cf4ecc0 2923 if (!kvm_is_radix(vcpu->kvm) && !vcpu->kvm->arch.hpte_setup_done) {
32fad281 2924 r = kvmppc_hv_setup_htab_rma(vcpu);
c77162de 2925 if (r)
32fad281 2926 goto out;
c77162de 2927 }
19ccb76a 2928
579e633e
AB
2929 flush_all_to_thread(current);
2930
19ccb76a 2931 vcpu->arch.wqp = &vcpu->arch.vcore->wq;
342d3db7 2932 vcpu->arch.pgdir = current->mm->pgd;
c7b67670 2933 vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST;
19ccb76a 2934
a8606e20
PM
2935 do {
2936 r = kvmppc_run_vcpu(run, vcpu);
2937
2938 if (run->exit_reason == KVM_EXIT_PAPR_HCALL &&
2939 !(vcpu->arch.shregs.msr & MSR_PR)) {
3c78f78a 2940 trace_kvm_hcall_enter(vcpu);
a8606e20 2941 r = kvmppc_pseries_do_hcall(vcpu);
3c78f78a 2942 trace_kvm_hcall_exit(vcpu, r);
7e28e60e 2943 kvmppc_core_prepare_to_enter(vcpu);
913d3ff9
PM
2944 } else if (r == RESUME_PAGE_FAULT) {
2945 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
2946 r = kvmppc_book3s_hv_page_fault(run, vcpu,
2947 vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
2948 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
f7af5209
SW
2949 } else if (r == RESUME_PASSTHROUGH)
2950 r = kvmppc_xics_rm_complete(vcpu, 0);
e59d24e6 2951 } while (is_kvmppc_resume_guest(r));
32fad281
PM
2952
2953 out:
c7b67670 2954 vcpu->arch.state = KVMPPC_VCPU_NOTREADY;
32fad281 2955 atomic_dec(&vcpu->kvm->arch.vcpus_running);
a8606e20
PM
2956 return r;
2957}
2958
5b74716e
BH
2959static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps,
2960 int linux_psize)
2961{
2962 struct mmu_psize_def *def = &mmu_psize_defs[linux_psize];
2963
2964 if (!def->shift)
2965 return;
2966 (*sps)->page_shift = def->shift;
2967 (*sps)->slb_enc = def->sllp;
2968 (*sps)->enc[0].page_shift = def->shift;
b1022fbd 2969 (*sps)->enc[0].pte_enc = def->penc[linux_psize];
1f365bb0
AK
2970 /*
2971 * Add 16MB MPSS support if host supports it
2972 */
2973 if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) {
2974 (*sps)->enc[1].page_shift = 24;
2975 (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M];
2976 }
5b74716e
BH
2977 (*sps)++;
2978}
2979
3a167bea
AK
2980static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm,
2981 struct kvm_ppc_smmu_info *info)
5b74716e
BH
2982{
2983 struct kvm_ppc_one_seg_page_size *sps;
2984
8cf4ecc0
PM
2985 /*
2986 * Since we don't yet support HPT guests on a radix host,
2987 * return an error if the host uses radix.
2988 */
2989 if (radix_enabled())
2990 return -EINVAL;
2991
5b74716e
BH
2992 info->flags = KVM_PPC_PAGE_SIZES_REAL;
2993 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
2994 info->flags |= KVM_PPC_1T_SEGMENTS;
2995 info->slb_size = mmu_slb_size;
2996
2997 /* We only support these sizes for now, and no muti-size segments */
2998 sps = &info->sps[0];
2999 kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K);
3000 kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K);
3001 kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M);
3002
3003 return 0;
3004}
3005
82ed3616
PM
3006/*
3007 * Get (and clear) the dirty memory log for a memory slot.
3008 */
3a167bea
AK
3009static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm,
3010 struct kvm_dirty_log *log)
82ed3616 3011{
9f6b8029 3012 struct kvm_memslots *slots;
82ed3616 3013 struct kvm_memory_slot *memslot;
8f7b79b8 3014 int i, r;
82ed3616 3015 unsigned long n;
8f7b79b8
PM
3016 unsigned long *buf;
3017 struct kvm_vcpu *vcpu;
82ed3616
PM
3018
3019 mutex_lock(&kvm->slots_lock);
3020
3021 r = -EINVAL;
bbacc0c1 3022 if (log->slot >= KVM_USER_MEM_SLOTS)
82ed3616
PM
3023 goto out;
3024
9f6b8029
PB
3025 slots = kvm_memslots(kvm);
3026 memslot = id_to_memslot(slots, log->slot);
82ed3616
PM
3027 r = -ENOENT;
3028 if (!memslot->dirty_bitmap)
3029 goto out;
3030
8f7b79b8
PM
3031 /*
3032 * Use second half of bitmap area because radix accumulates
3033 * bits in the first half.
3034 */
82ed3616 3035 n = kvm_dirty_bitmap_bytes(memslot);
8f7b79b8
PM
3036 buf = memslot->dirty_bitmap + n / sizeof(long);
3037 memset(buf, 0, n);
82ed3616 3038
8f7b79b8
PM
3039 if (kvm_is_radix(kvm))
3040 r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf);
3041 else
3042 r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf);
82ed3616
PM
3043 if (r)
3044 goto out;
3045
8f7b79b8
PM
3046 /* Harvest dirty bits from VPA and DTL updates */
3047 /* Note: we never modify the SLB shadow buffer areas */
3048 kvm_for_each_vcpu(i, vcpu, kvm) {
3049 spin_lock(&vcpu->arch.vpa_update_lock);
3050 kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf);
3051 kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf);
3052 spin_unlock(&vcpu->arch.vpa_update_lock);
3053 }
3054
82ed3616 3055 r = -EFAULT;
8f7b79b8 3056 if (copy_to_user(log->dirty_bitmap, buf, n))
82ed3616
PM
3057 goto out;
3058
3059 r = 0;
3060out:
3061 mutex_unlock(&kvm->slots_lock);
3062 return r;
3063}
3064
3a167bea
AK
3065static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free,
3066 struct kvm_memory_slot *dont)
a66b48c3
PM
3067{
3068 if (!dont || free->arch.rmap != dont->arch.rmap) {
3069 vfree(free->arch.rmap);
3070 free->arch.rmap = NULL;
b2b2f165 3071 }
a66b48c3
PM
3072}
3073
3a167bea
AK
3074static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot,
3075 unsigned long npages)
a66b48c3 3076{
8cf4ecc0
PM
3077 /*
3078 * For now, if radix_enabled() then we only support radix guests,
3079 * and in that case we don't need the rmap array.
3080 */
3081 if (radix_enabled()) {
3082 slot->arch.rmap = NULL;
3083 return 0;
3084 }
3085
a66b48c3
PM
3086 slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap));
3087 if (!slot->arch.rmap)
3088 return -ENOMEM;
aa04b4cc 3089
c77162de
PM
3090 return 0;
3091}
aa04b4cc 3092
3a167bea
AK
3093static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm,
3094 struct kvm_memory_slot *memslot,
09170a49 3095 const struct kvm_userspace_memory_region *mem)
c77162de 3096{
a66b48c3 3097 return 0;
c77162de
PM
3098}
3099
3a167bea 3100static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm,
09170a49 3101 const struct kvm_userspace_memory_region *mem,
f36f3f28
PB
3102 const struct kvm_memory_slot *old,
3103 const struct kvm_memory_slot *new)
c77162de 3104{
dfe49dbd 3105 unsigned long npages = mem->memory_size >> PAGE_SHIFT;
9f6b8029 3106 struct kvm_memslots *slots;
dfe49dbd
PM
3107 struct kvm_memory_slot *memslot;
3108
a56ee9f8
YX
3109 /*
3110 * If we are making a new memslot, it might make
3111 * some address that was previously cached as emulated
3112 * MMIO be no longer emulated MMIO, so invalidate
3113 * all the caches of emulated MMIO translations.
3114 */
3115 if (npages)
3116 atomic64_inc(&kvm->arch.mmio_update);
3117
8f7b79b8 3118 if (npages && old->npages && !kvm_is_radix(kvm)) {
dfe49dbd
PM
3119 /*
3120 * If modifying a memslot, reset all the rmap dirty bits.
3121 * If this is a new memslot, we don't need to do anything
3122 * since the rmap array starts out as all zeroes,
3123 * i.e. no pages are dirty.
3124 */
9f6b8029
PB
3125 slots = kvm_memslots(kvm);
3126 memslot = id_to_memslot(slots, mem->slot);
8f7b79b8 3127 kvmppc_hv_get_dirty_log_hpt(kvm, memslot, NULL);
dfe49dbd 3128 }
c77162de
PM
3129}
3130
a0144e2a
PM
3131/*
3132 * Update LPCR values in kvm->arch and in vcores.
3133 * Caller must hold kvm->lock.
3134 */
3135void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask)
3136{
3137 long int i;
3138 u32 cores_done = 0;
3139
3140 if ((kvm->arch.lpcr & mask) == lpcr)
3141 return;
3142
3143 kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr;
3144
3145 for (i = 0; i < KVM_MAX_VCORES; ++i) {
3146 struct kvmppc_vcore *vc = kvm->arch.vcores[i];
3147 if (!vc)
3148 continue;
3149 spin_lock(&vc->lock);
3150 vc->lpcr = (vc->lpcr & ~mask) | lpcr;
3151 spin_unlock(&vc->lock);
3152 if (++cores_done >= kvm->arch.online_vcores)
3153 break;
3154 }
3155}
3156
3a167bea
AK
3157static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu)
3158{
3159 return;
3160}
3161
7a84084c
PM
3162static void kvmppc_setup_partition_table(struct kvm *kvm)
3163{
3164 unsigned long dw0, dw1;
3165
8cf4ecc0
PM
3166 if (!kvm_is_radix(kvm)) {
3167 /* PS field - page size for VRMA */
3168 dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) |
3169 ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1);
3170 /* HTABSIZE and HTABORG fields */
3171 dw0 |= kvm->arch.sdr1;
7a84084c 3172
8cf4ecc0
PM
3173 /* Second dword as set by userspace */
3174 dw1 = kvm->arch.process_table;
3175 } else {
3176 dw0 = PATB_HR | radix__get_tree_size() |
3177 __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE;
3178 dw1 = PATB_GR | kvm->arch.process_table;
3179 }
7a84084c
PM
3180
3181 mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1);
3182}
3183
32fad281 3184static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu)
c77162de
PM
3185{
3186 int err = 0;
3187 struct kvm *kvm = vcpu->kvm;
c77162de
PM
3188 unsigned long hva;
3189 struct kvm_memory_slot *memslot;
3190 struct vm_area_struct *vma;
a0144e2a 3191 unsigned long lpcr = 0, senc;
c77162de 3192 unsigned long psize, porder;
2c9097e4 3193 int srcu_idx;
c77162de
PM
3194
3195 mutex_lock(&kvm->lock);
31037eca 3196 if (kvm->arch.hpte_setup_done)
c77162de 3197 goto out; /* another vcpu beat us to it */
aa04b4cc 3198
32fad281
PM
3199 /* Allocate hashed page table (if not done already) and reset it */
3200 if (!kvm->arch.hpt_virt) {
3201 err = kvmppc_alloc_hpt(kvm, NULL);
3202 if (err) {
3203 pr_err("KVM: Couldn't alloc HPT\n");
3204 goto out;
3205 }
3206 }
3207
c77162de 3208 /* Look up the memslot for guest physical address 0 */
2c9097e4 3209 srcu_idx = srcu_read_lock(&kvm->srcu);
c77162de 3210 memslot = gfn_to_memslot(kvm, 0);
aa04b4cc 3211
c77162de
PM
3212 /* We must have some memory at 0 by now */
3213 err = -EINVAL;
3214 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
2c9097e4 3215 goto out_srcu;
c77162de
PM
3216
3217 /* Look up the VMA for the start of this memory slot */
3218 hva = memslot->userspace_addr;
3219 down_read(&current->mm->mmap_sem);
3220 vma = find_vma(current->mm, hva);
3221 if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO))
3222 goto up_out;
3223
3224 psize = vma_kernel_pagesize(vma);
da9d1d7f 3225 porder = __ilog2(psize);
c77162de 3226
c77162de
PM
3227 up_read(&current->mm->mmap_sem);
3228
c17b98cf
PM
3229 /* We can handle 4k, 64k or 16M pages in the VRMA */
3230 err = -EINVAL;
3231 if (!(psize == 0x1000 || psize == 0x10000 ||
3232 psize == 0x1000000))
3233 goto out_srcu;
c77162de 3234
c17b98cf
PM
3235 senc = slb_pgsize_encoding(psize);
3236 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
3237 (VRMA_VSID << SLB_VSID_SHIFT_1T);
c17b98cf
PM
3238 /* Create HPTEs in the hash page table for the VRMA */
3239 kvmppc_map_vrma(vcpu, memslot, porder);
aa04b4cc 3240
7a84084c
PM
3241 /* Update VRMASD field in the LPCR */
3242 if (!cpu_has_feature(CPU_FTR_ARCH_300)) {
3243 /* the -4 is to account for senc values starting at 0x10 */
3244 lpcr = senc << (LPCR_VRMASD_SH - 4);
3245 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
3246 } else {
3247 kvmppc_setup_partition_table(kvm);
3248 }
a0144e2a 3249
31037eca 3250 /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */
c77162de 3251 smp_wmb();
31037eca 3252 kvm->arch.hpte_setup_done = 1;
c77162de 3253 err = 0;
2c9097e4
PM
3254 out_srcu:
3255 srcu_read_unlock(&kvm->srcu, srcu_idx);
c77162de
PM
3256 out:
3257 mutex_unlock(&kvm->lock);
3258 return err;
b2b2f165 3259
c77162de
PM
3260 up_out:
3261 up_read(&current->mm->mmap_sem);
505d6421 3262 goto out_srcu;
de56a948
PM
3263}
3264
79b6c247
SW
3265#ifdef CONFIG_KVM_XICS
3266/*
3267 * Allocate a per-core structure for managing state about which cores are
3268 * running in the host versus the guest and for exchanging data between
3269 * real mode KVM and CPU running in the host.
3270 * This is only done for the first VM.
3271 * The allocated structure stays even if all VMs have stopped.
3272 * It is only freed when the kvm-hv module is unloaded.
3273 * It's OK for this routine to fail, we just don't support host
3274 * core operations like redirecting H_IPI wakeups.
3275 */
3276void kvmppc_alloc_host_rm_ops(void)
3277{
3278 struct kvmppc_host_rm_ops *ops;
3279 unsigned long l_ops;
3280 int cpu, core;
3281 int size;
3282
3283 /* Not the first time here ? */
3284 if (kvmppc_host_rm_ops_hv != NULL)
3285 return;
3286
3287 ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL);
3288 if (!ops)
3289 return;
3290
3291 size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core);
3292 ops->rm_core = kzalloc(size, GFP_KERNEL);
3293
3294 if (!ops->rm_core) {
3295 kfree(ops);
3296 return;
3297 }
3298
6f3bb809
SW
3299 get_online_cpus();
3300
79b6c247
SW
3301 for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) {
3302 if (!cpu_online(cpu))
3303 continue;
3304
3305 core = cpu >> threads_shift;
3306 ops->rm_core[core].rm_state.in_host = 1;
3307 }
3308
0c2a6606
SW
3309 ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv;
3310
79b6c247
SW
3311 /*
3312 * Make the contents of the kvmppc_host_rm_ops structure visible
3313 * to other CPUs before we assign it to the global variable.
3314 * Do an atomic assignment (no locks used here), but if someone
3315 * beats us to it, just free our copy and return.
3316 */
3317 smp_wmb();
3318 l_ops = (unsigned long) ops;
3319
3320 if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) {
6f3bb809 3321 put_online_cpus();
79b6c247
SW
3322 kfree(ops->rm_core);
3323 kfree(ops);
6f3bb809 3324 return;
79b6c247 3325 }
6f3bb809 3326
3f7cd919
AMG
3327 cpuhp_setup_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE,
3328 "ppc/kvm_book3s:prepare",
3329 kvmppc_set_host_core,
3330 kvmppc_clear_host_core);
6f3bb809 3331 put_online_cpus();
79b6c247
SW
3332}
3333
3334void kvmppc_free_host_rm_ops(void)
3335{
3336 if (kvmppc_host_rm_ops_hv) {
3f7cd919 3337 cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE);
79b6c247
SW
3338 kfree(kvmppc_host_rm_ops_hv->rm_core);
3339 kfree(kvmppc_host_rm_ops_hv);
3340 kvmppc_host_rm_ops_hv = NULL;
3341 }
3342}
3343#endif
3344
3a167bea 3345static int kvmppc_core_init_vm_hv(struct kvm *kvm)
de56a948 3346{
32fad281 3347 unsigned long lpcr, lpid;
e23a808b 3348 char buf[32];
8cf4ecc0 3349 int ret;
de56a948 3350
32fad281
PM
3351 /* Allocate the guest's logical partition ID */
3352
3353 lpid = kvmppc_alloc_lpid();
5d226ae5 3354 if ((long)lpid < 0)
32fad281
PM
3355 return -ENOMEM;
3356 kvm->arch.lpid = lpid;
de56a948 3357
79b6c247
SW
3358 kvmppc_alloc_host_rm_ops();
3359
1b400ba0
PM
3360 /*
3361 * Since we don't flush the TLB when tearing down a VM,
3362 * and this lpid might have previously been used,
3363 * make sure we flush on each core before running the new VM.
7c5b06ca
PM
3364 * On POWER9, the tlbie in mmu_partition_table_set_entry()
3365 * does this flush for us.
1b400ba0 3366 */
7c5b06ca
PM
3367 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3368 cpumask_setall(&kvm->arch.need_tlb_flush);
1b400ba0 3369
699a0ea0
PM
3370 /* Start out with the default set of hcalls enabled */
3371 memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls,
3372 sizeof(kvm->arch.enabled_hcalls));
3373
7a84084c
PM
3374 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3375 kvm->arch.host_sdr1 = mfspr(SPRN_SDR1);
aa04b4cc 3376
c17b98cf
PM
3377 /* Init LPCR for virtual RMA mode */
3378 kvm->arch.host_lpid = mfspr(SPRN_LPID);
3379 kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR);
3380 lpcr &= LPCR_PECE | LPCR_LPES;
3381 lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE |
3382 LPCR_VPM0 | LPCR_VPM1;
3383 kvm->arch.vrma_slb_v = SLB_VSID_B_1T |
3384 (VRMA_VSID << SLB_VSID_SHIFT_1T);
3385 /* On POWER8 turn on online bit to enable PURR/SPURR */
3386 if (cpu_has_feature(CPU_FTR_ARCH_207S))
3387 lpcr |= LPCR_ONL;
84f7139c
PM
3388 /*
3389 * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed)
3390 * Set HVICE bit to enable hypervisor virtualization interrupts.
3391 */
3392 if (cpu_has_feature(CPU_FTR_ARCH_300)) {
7a84084c 3393 lpcr &= ~LPCR_VPM0;
84f7139c
PM
3394 lpcr |= LPCR_HVICE;
3395 }
3396
8cf4ecc0
PM
3397 /*
3398 * For now, if the host uses radix, the guest must be radix.
3399 */
3400 if (radix_enabled()) {
3401 kvm->arch.radix = 1;
3402 lpcr &= ~LPCR_VPM1;
3403 lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR;
3404 ret = kvmppc_init_vm_radix(kvm);
3405 if (ret) {
3406 kvmppc_free_lpid(kvm->arch.lpid);
3407 return ret;
3408 }
3409 kvmppc_setup_partition_table(kvm);
3410 }
3411
9e368f29 3412 kvm->arch.lpcr = lpcr;
aa04b4cc 3413
7c5b06ca
PM
3414 /*
3415 * Work out how many sets the TLB has, for the use of
3416 * the TLB invalidation loop in book3s_hv_rmhandlers.S.
3417 */
8cf4ecc0
PM
3418 if (kvm_is_radix(kvm))
3419 kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */
3420 else if (cpu_has_feature(CPU_FTR_ARCH_300))
7c5b06ca
PM
3421 kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */
3422 else if (cpu_has_feature(CPU_FTR_ARCH_207S))
3423 kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */
3424 else
3425 kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */
3426
512691d4 3427 /*
441c19c8
ME
3428 * Track that we now have a HV mode VM active. This blocks secondary
3429 * CPU threads from coming online.
8cf4ecc0
PM
3430 * On POWER9, we only need to do this for HPT guests on a radix
3431 * host, which is not yet supported.
512691d4 3432 */
8cf4ecc0
PM
3433 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3434 kvm_hv_vm_activated();
512691d4 3435
e23a808b
PM
3436 /*
3437 * Create a debugfs directory for the VM
3438 */
3439 snprintf(buf, sizeof(buf), "vm%d", current->pid);
3440 kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir);
3441 if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir))
3442 kvmppc_mmu_debugfs_init(kvm);
3443
54738c09 3444 return 0;
de56a948
PM
3445}
3446
f1378b1c
PM
3447static void kvmppc_free_vcores(struct kvm *kvm)
3448{
3449 long int i;
3450
23316316 3451 for (i = 0; i < KVM_MAX_VCORES; ++i)
f1378b1c
PM
3452 kfree(kvm->arch.vcores[i]);
3453 kvm->arch.online_vcores = 0;
3454}
3455
3a167bea 3456static void kvmppc_core_destroy_vm_hv(struct kvm *kvm)
de56a948 3457{
e23a808b
PM
3458 debugfs_remove_recursive(kvm->arch.debugfs_dir);
3459
8cf4ecc0
PM
3460 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3461 kvm_hv_vm_deactivated();
512691d4 3462
f1378b1c 3463 kvmppc_free_vcores(kvm);
aa04b4cc 3464
8cf4ecc0
PM
3465 kvmppc_free_lpid(kvm->arch.lpid);
3466
5a319350
PM
3467 if (kvm_is_radix(kvm))
3468 kvmppc_free_radix(kvm);
3469 else
3470 kvmppc_free_hpt(kvm);
c57875f5
SW
3471
3472 kvmppc_free_pimap(kvm);
de56a948
PM
3473}
3474
3a167bea
AK
3475/* We don't need to emulate any privileged instructions or dcbz */
3476static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu,
3477 unsigned int inst, int *advance)
de56a948 3478{
3a167bea 3479 return EMULATE_FAIL;
de56a948
PM
3480}
3481
3a167bea
AK
3482static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn,
3483 ulong spr_val)
de56a948
PM
3484{
3485 return EMULATE_FAIL;
3486}
3487
3a167bea
AK
3488static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn,
3489 ulong *spr_val)
de56a948
PM
3490{
3491 return EMULATE_FAIL;
3492}
3493
3a167bea 3494static int kvmppc_core_check_processor_compat_hv(void)
de56a948 3495{
c17b98cf
PM
3496 if (!cpu_has_feature(CPU_FTR_HVMODE) ||
3497 !cpu_has_feature(CPU_FTR_ARCH_206))
3a167bea 3498 return -EIO;
50de596d 3499
3a167bea 3500 return 0;
de56a948
PM
3501}
3502
8daaafc8
SW
3503#ifdef CONFIG_KVM_XICS
3504
3505void kvmppc_free_pimap(struct kvm *kvm)
3506{
3507 kfree(kvm->arch.pimap);
3508}
3509
c57875f5 3510static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void)
8daaafc8
SW
3511{
3512 return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL);
3513}
c57875f5
SW
3514
3515static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
3516{
3517 struct irq_desc *desc;
3518 struct kvmppc_irq_map *irq_map;
3519 struct kvmppc_passthru_irqmap *pimap;
3520 struct irq_chip *chip;
3521 int i;
3522
644abbb2
SW
3523 if (!kvm_irq_bypass)
3524 return 1;
3525
c57875f5
SW
3526 desc = irq_to_desc(host_irq);
3527 if (!desc)
3528 return -EIO;
3529
3530 mutex_lock(&kvm->lock);
3531
3532 pimap = kvm->arch.pimap;
3533 if (pimap == NULL) {
3534 /* First call, allocate structure to hold IRQ map */
3535 pimap = kvmppc_alloc_pimap();
3536 if (pimap == NULL) {
3537 mutex_unlock(&kvm->lock);
3538 return -ENOMEM;
3539 }
3540 kvm->arch.pimap = pimap;
3541 }
3542
3543 /*
3544 * For now, we only support interrupts for which the EOI operation
3545 * is an OPAL call followed by a write to XIRR, since that's
3546 * what our real-mode EOI code does.
3547 */
3548 chip = irq_data_get_irq_chip(&desc->irq_data);
3549 if (!chip || !is_pnv_opal_msi(chip)) {
3550 pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n",
3551 host_irq, guest_gsi);
3552 mutex_unlock(&kvm->lock);
3553 return -ENOENT;
3554 }
3555
3556 /*
3557 * See if we already have an entry for this guest IRQ number.
3558 * If it's mapped to a hardware IRQ number, that's an error,
3559 * otherwise re-use this entry.
3560 */
3561 for (i = 0; i < pimap->n_mapped; i++) {
3562 if (guest_gsi == pimap->mapped[i].v_hwirq) {
3563 if (pimap->mapped[i].r_hwirq) {
3564 mutex_unlock(&kvm->lock);
3565 return -EINVAL;
3566 }
3567 break;
3568 }
3569 }
3570
3571 if (i == KVMPPC_PIRQ_MAPPED) {
3572 mutex_unlock(&kvm->lock);
3573 return -EAGAIN; /* table is full */
3574 }
3575
3576 irq_map = &pimap->mapped[i];
3577
3578 irq_map->v_hwirq = guest_gsi;
c57875f5
SW
3579 irq_map->desc = desc;
3580
e3c13e56
SW
3581 /*
3582 * Order the above two stores before the next to serialize with
3583 * the KVM real mode handler.
3584 */
3585 smp_wmb();
3586 irq_map->r_hwirq = desc->irq_data.hwirq;
3587
c57875f5
SW
3588 if (i == pimap->n_mapped)
3589 pimap->n_mapped++;
3590
5d375199
PM
3591 kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq);
3592
c57875f5
SW
3593 mutex_unlock(&kvm->lock);
3594
3595 return 0;
3596}
3597
3598static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi)
3599{
3600 struct irq_desc *desc;
3601 struct kvmppc_passthru_irqmap *pimap;
3602 int i;
3603
644abbb2
SW
3604 if (!kvm_irq_bypass)
3605 return 0;
3606
c57875f5
SW
3607 desc = irq_to_desc(host_irq);
3608 if (!desc)
3609 return -EIO;
3610
3611 mutex_lock(&kvm->lock);
3612
3613 if (kvm->arch.pimap == NULL) {
3614 mutex_unlock(&kvm->lock);
3615 return 0;
3616 }
3617 pimap = kvm->arch.pimap;
3618
3619 for (i = 0; i < pimap->n_mapped; i++) {
3620 if (guest_gsi == pimap->mapped[i].v_hwirq)
3621 break;
3622 }
3623
3624 if (i == pimap->n_mapped) {
3625 mutex_unlock(&kvm->lock);
3626 return -ENODEV;
3627 }
3628
5d375199
PM
3629 kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq);
3630
c57875f5
SW
3631 /* invalidate the entry */
3632 pimap->mapped[i].r_hwirq = 0;
3633
3634 /*
3635 * We don't free this structure even when the count goes to
3636 * zero. The structure is freed when we destroy the VM.
3637 */
3638
3639 mutex_unlock(&kvm->lock);
3640 return 0;
3641}
3642
3643static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons,
3644 struct irq_bypass_producer *prod)
3645{
3646 int ret = 0;
3647 struct kvm_kernel_irqfd *irqfd =
3648 container_of(cons, struct kvm_kernel_irqfd, consumer);
3649
3650 irqfd->producer = prod;
3651
3652 ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
3653 if (ret)
3654 pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n",
3655 prod->irq, irqfd->gsi, ret);
3656
3657 return ret;
3658}
3659
3660static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons,
3661 struct irq_bypass_producer *prod)
3662{
3663 int ret;
3664 struct kvm_kernel_irqfd *irqfd =
3665 container_of(cons, struct kvm_kernel_irqfd, consumer);
3666
3667 irqfd->producer = NULL;
3668
3669 /*
3670 * When producer of consumer is unregistered, we change back to
3671 * default external interrupt handling mode - KVM real mode
3672 * will switch back to host.
3673 */
3674 ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi);
3675 if (ret)
3676 pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n",
3677 prod->irq, irqfd->gsi, ret);
3678}
8daaafc8
SW
3679#endif
3680
3a167bea
AK
3681static long kvm_arch_vm_ioctl_hv(struct file *filp,
3682 unsigned int ioctl, unsigned long arg)
3683{
3684 struct kvm *kvm __maybe_unused = filp->private_data;
3685 void __user *argp = (void __user *)arg;
3686 long r;
3687
3688 switch (ioctl) {
3689
3a167bea
AK
3690 case KVM_PPC_ALLOCATE_HTAB: {
3691 u32 htab_order;
3692
3693 r = -EFAULT;
3694 if (get_user(htab_order, (u32 __user *)argp))
3695 break;
3696 r = kvmppc_alloc_reset_hpt(kvm, &htab_order);
3697 if (r)
3698 break;
3699 r = -EFAULT;
3700 if (put_user(htab_order, (u32 __user *)argp))
3701 break;
3702 r = 0;
3703 break;
3704 }
3705
3706 case KVM_PPC_GET_HTAB_FD: {
3707 struct kvm_get_htab_fd ghf;
3708
3709 r = -EFAULT;
3710 if (copy_from_user(&ghf, argp, sizeof(ghf)))
3711 break;
3712 r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf);
3713 break;
3714 }
3715
3716 default:
3717 r = -ENOTTY;
3718 }
3719
3720 return r;
3721}
3722
699a0ea0
PM
3723/*
3724 * List of hcall numbers to enable by default.
3725 * For compatibility with old userspace, we enable by default
3726 * all hcalls that were implemented before the hcall-enabling
3727 * facility was added. Note this list should not include H_RTAS.
3728 */
3729static unsigned int default_hcall_list[] = {
3730 H_REMOVE,
3731 H_ENTER,
3732 H_READ,
3733 H_PROTECT,
3734 H_BULK_REMOVE,
3735 H_GET_TCE,
3736 H_PUT_TCE,
3737 H_SET_DABR,
3738 H_SET_XDABR,
3739 H_CEDE,
3740 H_PROD,
3741 H_CONFER,
3742 H_REGISTER_VPA,
3743#ifdef CONFIG_KVM_XICS
3744 H_EOI,
3745 H_CPPR,
3746 H_IPI,
3747 H_IPOLL,
3748 H_XIRR,
3749 H_XIRR_X,
3750#endif
3751 0
3752};
3753
3754static void init_default_hcalls(void)
3755{
3756 int i;
ae2113a4 3757 unsigned int hcall;
699a0ea0 3758
ae2113a4
PM
3759 for (i = 0; default_hcall_list[i]; ++i) {
3760 hcall = default_hcall_list[i];
3761 WARN_ON(!kvmppc_hcall_impl_hv(hcall));
3762 __set_bit(hcall / 4, default_enabled_hcalls);
3763 }
699a0ea0
PM
3764}
3765
c9270132
PM
3766static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
3767{
468808bd 3768 unsigned long lpcr;
8cf4ecc0 3769 int radix;
468808bd
PM
3770
3771 /* If not on a POWER9, reject it */
3772 if (!cpu_has_feature(CPU_FTR_ARCH_300))
3773 return -ENODEV;
3774
3775 /* If any unknown flags set, reject it */
3776 if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE))
3777 return -EINVAL;
3778
8cf4ecc0
PM
3779 /* We can't change a guest to/from radix yet */
3780 radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX);
3781 if (radix != kvm_is_radix(kvm))
468808bd
PM
3782 return -EINVAL;
3783
3784 /* GR (guest radix) bit in process_table field must match */
8cf4ecc0 3785 if (!!(cfg->process_table & PATB_GR) != radix)
468808bd
PM
3786 return -EINVAL;
3787
3788 /* Process table size field must be reasonable, i.e. <= 24 */
3789 if ((cfg->process_table & PRTS_MASK) > 24)
3790 return -EINVAL;
3791
3792 kvm->arch.process_table = cfg->process_table;
3793 kvmppc_setup_partition_table(kvm);
3794
3795 lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0;
3796 kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE);
3797
3798 return 0;
c9270132
PM
3799}
3800
cbbc58d4 3801static struct kvmppc_ops kvm_ops_hv = {
3a167bea
AK
3802 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv,
3803 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv,
3804 .get_one_reg = kvmppc_get_one_reg_hv,
3805 .set_one_reg = kvmppc_set_one_reg_hv,
3806 .vcpu_load = kvmppc_core_vcpu_load_hv,
3807 .vcpu_put = kvmppc_core_vcpu_put_hv,
3808 .set_msr = kvmppc_set_msr_hv,
3809 .vcpu_run = kvmppc_vcpu_run_hv,
3810 .vcpu_create = kvmppc_core_vcpu_create_hv,
3811 .vcpu_free = kvmppc_core_vcpu_free_hv,
3812 .check_requests = kvmppc_core_check_requests_hv,
3813 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv,
3814 .flush_memslot = kvmppc_core_flush_memslot_hv,
3815 .prepare_memory_region = kvmppc_core_prepare_memory_region_hv,
3816 .commit_memory_region = kvmppc_core_commit_memory_region_hv,
3817 .unmap_hva = kvm_unmap_hva_hv,
3818 .unmap_hva_range = kvm_unmap_hva_range_hv,
3819 .age_hva = kvm_age_hva_hv,
3820 .test_age_hva = kvm_test_age_hva_hv,
3821 .set_spte_hva = kvm_set_spte_hva_hv,
3822 .mmu_destroy = kvmppc_mmu_destroy_hv,
3823 .free_memslot = kvmppc_core_free_memslot_hv,
3824 .create_memslot = kvmppc_core_create_memslot_hv,
3825 .init_vm = kvmppc_core_init_vm_hv,
3826 .destroy_vm = kvmppc_core_destroy_vm_hv,
3a167bea
AK
3827 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv,
3828 .emulate_op = kvmppc_core_emulate_op_hv,
3829 .emulate_mtspr = kvmppc_core_emulate_mtspr_hv,
3830 .emulate_mfspr = kvmppc_core_emulate_mfspr_hv,
3831 .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv,
3832 .arch_vm_ioctl = kvm_arch_vm_ioctl_hv,
ae2113a4 3833 .hcall_implemented = kvmppc_hcall_impl_hv,
c57875f5
SW
3834#ifdef CONFIG_KVM_XICS
3835 .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv,
3836 .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv,
3837#endif
c9270132
PM
3838 .configure_mmu = kvmhv_configure_mmu,
3839 .get_rmmu_info = kvmhv_get_rmmu_info,
3a167bea
AK
3840};
3841
fd7bacbc
MS
3842static int kvm_init_subcore_bitmap(void)
3843{
3844 int i, j;
3845 int nr_cores = cpu_nr_cores();
3846 struct sibling_subcore_state *sibling_subcore_state;
3847
3848 for (i = 0; i < nr_cores; i++) {
3849 int first_cpu = i * threads_per_core;
3850 int node = cpu_to_node(first_cpu);
3851
3852 /* Ignore if it is already allocated. */
3853 if (paca[first_cpu].sibling_subcore_state)
3854 continue;
3855
3856 sibling_subcore_state =
3857 kmalloc_node(sizeof(struct sibling_subcore_state),
3858 GFP_KERNEL, node);
3859 if (!sibling_subcore_state)
3860 return -ENOMEM;
3861
3862 memset(sibling_subcore_state, 0,
3863 sizeof(struct sibling_subcore_state));
3864
3865 for (j = 0; j < threads_per_core; j++) {
3866 int cpu = first_cpu + j;
3867
3868 paca[cpu].sibling_subcore_state = sibling_subcore_state;
3869 }
3870 }
3871 return 0;
3872}
3873
5a319350
PM
3874static int kvmppc_radix_possible(void)
3875{
3876 return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled();
3877}
3878
3a167bea 3879static int kvmppc_book3s_init_hv(void)
de56a948
PM
3880{
3881 int r;
cbbc58d4
AK
3882 /*
3883 * FIXME!! Do we need to check on all cpus ?
3884 */
3885 r = kvmppc_core_check_processor_compat_hv();
3886 if (r < 0)
739e2425 3887 return -ENODEV;
de56a948 3888
fd7bacbc
MS
3889 r = kvm_init_subcore_bitmap();
3890 if (r)
3891 return r;
3892
f725758b
PM
3893 /*
3894 * We need a way of accessing the XICS interrupt controller,
3895 * either directly, via paca[cpu].kvm_hstate.xics_phys, or
3896 * indirectly, via OPAL.
3897 */
3898#ifdef CONFIG_SMP
3899 if (!get_paca()->kvm_hstate.xics_phys) {
3900 struct device_node *np;
3901
3902 np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc");
3903 if (!np) {
3904 pr_err("KVM-HV: Cannot determine method for accessing XICS\n");
3905 return -ENODEV;
3906 }
3907 }
3908#endif
3909
cbbc58d4
AK
3910 kvm_ops_hv.owner = THIS_MODULE;
3911 kvmppc_hv_ops = &kvm_ops_hv;
de56a948 3912
699a0ea0
PM
3913 init_default_hcalls();
3914
ec257165
PM
3915 init_vcore_lists();
3916
cbbc58d4 3917 r = kvmppc_mmu_hv_init();
5a319350
PM
3918 if (r)
3919 return r;
3920
3921 if (kvmppc_radix_possible())
3922 r = kvmppc_radix_init();
de56a948
PM
3923 return r;
3924}
3925
3a167bea 3926static void kvmppc_book3s_exit_hv(void)
de56a948 3927{
79b6c247 3928 kvmppc_free_host_rm_ops();
5a319350
PM
3929 if (kvmppc_radix_possible())
3930 kvmppc_radix_exit();
cbbc58d4 3931 kvmppc_hv_ops = NULL;
de56a948
PM
3932}
3933
3a167bea
AK
3934module_init(kvmppc_book3s_init_hv);
3935module_exit(kvmppc_book3s_exit_hv);
2ba9f0d8 3936MODULE_LICENSE("GPL");
398a76c6
AG
3937MODULE_ALIAS_MISCDEV(KVM_MINOR);
3938MODULE_ALIAS("devname:kvm");
7c5b06ca 3939