]>
Commit | Line | Data |
---|---|---|
d2912cb1 | 1 | // SPDX-License-Identifier: GPL-2.0-only |
de56a948 PM |
2 | /* |
3 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
4 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
5 | * | |
6 | * Authors: | |
7 | * Paul Mackerras <paulus@au1.ibm.com> | |
8 | * Alexander Graf <agraf@suse.de> | |
9 | * Kevin Wolf <mail@kevin-wolf.de> | |
10 | * | |
11 | * Description: KVM functions specific to running on Book 3S | |
12 | * processors in hypervisor mode (specifically POWER7 and later). | |
13 | * | |
14 | * This file is derived from arch/powerpc/kvm/book3s.c, | |
15 | * by Alexander Graf <agraf@suse.de>. | |
de56a948 PM |
16 | */ |
17 | ||
18 | #include <linux/kvm_host.h> | |
4bb817ed | 19 | #include <linux/kernel.h> |
de56a948 PM |
20 | #include <linux/err.h> |
21 | #include <linux/slab.h> | |
22 | #include <linux/preempt.h> | |
174cd4b1 | 23 | #include <linux/sched/signal.h> |
03441a34 | 24 | #include <linux/sched/stat.h> |
de56a948 | 25 | #include <linux/delay.h> |
66b15db6 | 26 | #include <linux/export.h> |
de56a948 PM |
27 | #include <linux/fs.h> |
28 | #include <linux/anon_inodes.h> | |
07f8ab25 | 29 | #include <linux/cpu.h> |
de56a948 | 30 | #include <linux/cpumask.h> |
aa04b4cc PM |
31 | #include <linux/spinlock.h> |
32 | #include <linux/page-flags.h> | |
2c9097e4 | 33 | #include <linux/srcu.h> |
398a76c6 | 34 | #include <linux/miscdevice.h> |
e23a808b | 35 | #include <linux/debugfs.h> |
d3989143 BH |
36 | #include <linux/gfp.h> |
37 | #include <linux/vmalloc.h> | |
38 | #include <linux/highmem.h> | |
39 | #include <linux/hugetlb.h> | |
40 | #include <linux/kvm_irqfd.h> | |
41 | #include <linux/irqbypass.h> | |
42 | #include <linux/module.h> | |
43 | #include <linux/compiler.h> | |
44 | #include <linux/of.h> | |
de56a948 | 45 | |
d6ee76d3 | 46 | #include <asm/ftrace.h> |
de56a948 | 47 | #include <asm/reg.h> |
57900694 | 48 | #include <asm/ppc-opcode.h> |
6de6638b | 49 | #include <asm/asm-prototypes.h> |
4bad7779 | 50 | #include <asm/archrandom.h> |
e303c087 | 51 | #include <asm/debug.h> |
57900694 | 52 | #include <asm/disassemble.h> |
de56a948 PM |
53 | #include <asm/cputable.h> |
54 | #include <asm/cacheflush.h> | |
7c0f6ba6 | 55 | #include <linux/uaccess.h> |
3a96570f | 56 | #include <asm/interrupt.h> |
de56a948 PM |
57 | #include <asm/io.h> |
58 | #include <asm/kvm_ppc.h> | |
59 | #include <asm/kvm_book3s.h> | |
60 | #include <asm/mmu_context.h> | |
61 | #include <asm/lppaca.h> | |
62 | #include <asm/processor.h> | |
371fefd6 | 63 | #include <asm/cputhreads.h> |
aa04b4cc | 64 | #include <asm/page.h> |
de1d9248 | 65 | #include <asm/hvcall.h> |
ae3a197e | 66 | #include <asm/switch_to.h> |
512691d4 | 67 | #include <asm/smp.h> |
66feed61 | 68 | #include <asm/dbell.h> |
fd7bacbc | 69 | #include <asm/hmi.h> |
c57875f5 | 70 | #include <asm/pnv-pci.h> |
7a84084c | 71 | #include <asm/mmu.h> |
f725758b PM |
72 | #include <asm/opal.h> |
73 | #include <asm/xics.h> | |
5af50993 | 74 | #include <asm/xive.h> |
c1fe190c | 75 | #include <asm/hw_breakpoint.h> |
ca9f4942 | 76 | #include <asm/kvm_book3s_uvmem.h> |
c3262257 | 77 | #include <asm/ultravisor.h> |
d6bdceb6 | 78 | #include <asm/dtl.h> |
f0c6fbbb | 79 | #include <asm/plpar_wrappers.h> |
de56a948 | 80 | |
3a167bea AK |
81 | #include "book3s.h" |
82 | ||
3c78f78a SW |
83 | #define CREATE_TRACE_POINTS |
84 | #include "trace_hv.h" | |
85 | ||
de56a948 PM |
86 | /* #define EXIT_DEBUG */ |
87 | /* #define EXIT_DEBUG_SIMPLE */ | |
88 | /* #define EXIT_DEBUG_INT */ | |
89 | ||
913d3ff9 PM |
90 | /* Used to indicate that a guest page fault needs to be handled */ |
91 | #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) | |
f7af5209 SW |
92 | /* Used to indicate that a guest passthrough interrupt needs to be handled */ |
93 | #define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2) | |
913d3ff9 | 94 | |
c7b67670 PM |
95 | /* Used as a "null" value for timebase values */ |
96 | #define TB_NIL (~(u64)0) | |
97 | ||
699a0ea0 PM |
98 | static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); |
99 | ||
b4deba5c | 100 | static int dynamic_mt_modes = 6; |
57ad583f | 101 | module_param(dynamic_mt_modes, int, 0644); |
b4deba5c | 102 | MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); |
ec257165 | 103 | static int target_smt_mode; |
57ad583f | 104 | module_param(target_smt_mode, int, 0644); |
ec257165 | 105 | MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); |
9678cdaa | 106 | |
aa227864 PM |
107 | static bool one_vm_per_core; |
108 | module_param(one_vm_per_core, bool, S_IRUGO | S_IWUSR); | |
aaae8c79 | 109 | MODULE_PARM_DESC(one_vm_per_core, "Only run vCPUs from the same VM on a core (requires POWER8 or older)"); |
aa227864 | 110 | |
520fe9c6 | 111 | #ifdef CONFIG_KVM_XICS |
a4f1d94e | 112 | static const struct kernel_param_ops module_param_ops = { |
520fe9c6 SW |
113 | .set = param_set_int, |
114 | .get = param_get_int, | |
115 | }; | |
116 | ||
57ad583f | 117 | module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, 0644); |
644abbb2 SW |
118 | MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization"); |
119 | ||
57ad583f | 120 | module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, 0644); |
520fe9c6 SW |
121 | MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); |
122 | #endif | |
123 | ||
aa069a99 PM |
124 | /* If set, guests are allowed to create and control nested guests */ |
125 | static bool nested = true; | |
126 | module_param(nested, bool, S_IRUGO | S_IWUSR); | |
127 | MODULE_PARM_DESC(nested, "Enable nested virtualization (only on POWER9)"); | |
128 | ||
129 | static inline bool nesting_enabled(struct kvm *kvm) | |
130 | { | |
131 | return kvm->arch.nested_enable && kvm_is_radix(kvm); | |
132 | } | |
133 | ||
32fad281 | 134 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
19ccb76a | 135 | |
7aa15842 PM |
136 | /* |
137 | * RWMR values for POWER8. These control the rate at which PURR | |
138 | * and SPURR count and should be set according to the number of | |
139 | * online threads in the vcore being run. | |
140 | */ | |
0abb75b7 NMG |
141 | #define RWMR_RPA_P8_1THREAD 0x164520C62609AECAUL |
142 | #define RWMR_RPA_P8_2THREAD 0x7FFF2908450D8DA9UL | |
143 | #define RWMR_RPA_P8_3THREAD 0x164520C62609AECAUL | |
144 | #define RWMR_RPA_P8_4THREAD 0x199A421245058DA9UL | |
145 | #define RWMR_RPA_P8_5THREAD 0x164520C62609AECAUL | |
146 | #define RWMR_RPA_P8_6THREAD 0x164520C62609AECAUL | |
147 | #define RWMR_RPA_P8_7THREAD 0x164520C62609AECAUL | |
148 | #define RWMR_RPA_P8_8THREAD 0x164520C62609AECAUL | |
7aa15842 PM |
149 | |
150 | static unsigned long p8_rwmr_values[MAX_SMT_THREADS + 1] = { | |
151 | RWMR_RPA_P8_1THREAD, | |
152 | RWMR_RPA_P8_1THREAD, | |
153 | RWMR_RPA_P8_2THREAD, | |
154 | RWMR_RPA_P8_3THREAD, | |
155 | RWMR_RPA_P8_4THREAD, | |
156 | RWMR_RPA_P8_5THREAD, | |
157 | RWMR_RPA_P8_6THREAD, | |
158 | RWMR_RPA_P8_7THREAD, | |
159 | RWMR_RPA_P8_8THREAD, | |
160 | }; | |
161 | ||
7b5f8272 SJS |
162 | static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, |
163 | int *ip) | |
164 | { | |
165 | int i = *ip; | |
166 | struct kvm_vcpu *vcpu; | |
167 | ||
168 | while (++i < MAX_SMT_THREADS) { | |
169 | vcpu = READ_ONCE(vc->runnable_threads[i]); | |
170 | if (vcpu) { | |
171 | *ip = i; | |
172 | return vcpu; | |
173 | } | |
174 | } | |
175 | return NULL; | |
176 | } | |
177 | ||
178 | /* Used to traverse the list of runnable threads for a given vcore */ | |
179 | #define for_each_runnable_thread(i, vcpu, vc) \ | |
180 | for (i = -1; (vcpu = next_runnable_thread(vc, &i)); ) | |
181 | ||
66feed61 PM |
182 | static bool kvmppc_ipi_thread(int cpu) |
183 | { | |
1704a81c PM |
184 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
185 | ||
f3c18e93 PM |
186 | /* If we're a nested hypervisor, fall back to ordinary IPIs for now */ |
187 | if (kvmhv_on_pseries()) | |
188 | return false; | |
189 | ||
1704a81c PM |
190 | /* On POWER9 we can use msgsnd to IPI any cpu */ |
191 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
192 | msg |= get_hard_smp_processor_id(cpu); | |
193 | smp_mb(); | |
194 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
195 | return true; | |
196 | } | |
197 | ||
66feed61 PM |
198 | /* On POWER8 for IPIs to threads in the same core, use msgsnd */ |
199 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { | |
200 | preempt_disable(); | |
201 | if (cpu_first_thread_sibling(cpu) == | |
202 | cpu_first_thread_sibling(smp_processor_id())) { | |
66feed61 PM |
203 | msg |= cpu_thread_in_core(cpu); |
204 | smp_mb(); | |
205 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
206 | preempt_enable(); | |
207 | return true; | |
208 | } | |
209 | preempt_enable(); | |
210 | } | |
211 | ||
212 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) | |
f725758b | 213 | if (cpu >= 0 && cpu < nr_cpu_ids) { |
d2e60075 | 214 | if (paca_ptrs[cpu]->kvm_hstate.xics_phys) { |
f725758b PM |
215 | xics_wake_cpu(cpu); |
216 | return true; | |
217 | } | |
218 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); | |
66feed61 PM |
219 | return true; |
220 | } | |
221 | #endif | |
222 | ||
223 | return false; | |
224 | } | |
225 | ||
3a167bea | 226 | static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) |
54695c30 | 227 | { |
ec257165 | 228 | int cpu; |
da4ad88c | 229 | struct rcuwait *waitp; |
54695c30 | 230 | |
da4ad88c DB |
231 | waitp = kvm_arch_vcpu_get_wait(vcpu); |
232 | if (rcuwait_wake_up(waitp)) | |
0193cc90 | 233 | ++vcpu->stat.generic.halt_wakeup; |
54695c30 | 234 | |
3deda5e5 PM |
235 | cpu = READ_ONCE(vcpu->arch.thread_cpu); |
236 | if (cpu >= 0 && kvmppc_ipi_thread(cpu)) | |
66feed61 | 237 | return; |
54695c30 BH |
238 | |
239 | /* CPU points to the first thread of the core */ | |
ec257165 | 240 | cpu = vcpu->cpu; |
66feed61 PM |
241 | if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) |
242 | smp_send_reschedule(cpu); | |
54695c30 BH |
243 | } |
244 | ||
c7b67670 PM |
245 | /* |
246 | * We use the vcpu_load/put functions to measure stolen time. | |
247 | * Stolen time is counted as time when either the vcpu is able to | |
248 | * run as part of a virtual core, but the task running the vcore | |
249 | * is preempted or sleeping, or when the vcpu needs something done | |
250 | * in the kernel by the task running the vcpu, but that task is | |
251 | * preempted or sleeping. Those two things have to be counted | |
252 | * separately, since one of the vcpu tasks will take on the job | |
253 | * of running the core, and the other vcpu tasks in the vcore will | |
254 | * sleep waiting for it to do that, but that sleep shouldn't count | |
255 | * as stolen time. | |
256 | * | |
257 | * Hence we accumulate stolen time when the vcpu can run as part of | |
258 | * a vcore using vc->stolen_tb, and the stolen time when the vcpu | |
259 | * needs its task to do other things in the kernel (for example, | |
260 | * service a page fault) in busy_stolen. We don't accumulate | |
261 | * stolen time for a vcore when it is inactive, or for a vcpu | |
262 | * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of | |
263 | * a misnomer; it means that the vcpu task is not executing in | |
264 | * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in | |
265 | * the kernel. We don't have any way of dividing up that time | |
266 | * between time that the vcpu is genuinely stopped, time that | |
267 | * the task is actively working on behalf of the vcpu, and time | |
268 | * that the task is preempted, so we don't count any of it as | |
269 | * stolen. | |
270 | * | |
271 | * Updates to busy_stolen are protected by arch.tbacct_lock; | |
2711e248 PM |
272 | * updates to vc->stolen_tb are protected by the vcore->stoltb_lock |
273 | * lock. The stolen times are measured in units of timebase ticks. | |
274 | * (Note that the != TB_NIL checks below are purely defensive; | |
275 | * they should never fail.) | |
c7b67670 PM |
276 | */ |
277 | ||
ec257165 PM |
278 | static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc) |
279 | { | |
280 | unsigned long flags; | |
281 | ||
282 | spin_lock_irqsave(&vc->stoltb_lock, flags); | |
283 | vc->preempt_tb = mftb(); | |
284 | spin_unlock_irqrestore(&vc->stoltb_lock, flags); | |
285 | } | |
286 | ||
287 | static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc) | |
288 | { | |
289 | unsigned long flags; | |
290 | ||
291 | spin_lock_irqsave(&vc->stoltb_lock, flags); | |
292 | if (vc->preempt_tb != TB_NIL) { | |
293 | vc->stolen_tb += mftb() - vc->preempt_tb; | |
294 | vc->preempt_tb = TB_NIL; | |
295 | } | |
296 | spin_unlock_irqrestore(&vc->stoltb_lock, flags); | |
297 | } | |
298 | ||
3a167bea | 299 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
de56a948 | 300 | { |
0456ec4f | 301 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
bf3d32e1 | 302 | unsigned long flags; |
0456ec4f | 303 | |
2711e248 PM |
304 | /* |
305 | * We can test vc->runner without taking the vcore lock, | |
306 | * because only this task ever sets vc->runner to this | |
307 | * vcpu, and once it is set to this vcpu, only this task | |
308 | * ever sets it to NULL. | |
309 | */ | |
ec257165 PM |
310 | if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) |
311 | kvmppc_core_end_stolen(vc); | |
312 | ||
2711e248 | 313 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
c7b67670 PM |
314 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && |
315 | vcpu->arch.busy_preempt != TB_NIL) { | |
316 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | |
317 | vcpu->arch.busy_preempt = TB_NIL; | |
318 | } | |
bf3d32e1 | 319 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
de56a948 PM |
320 | } |
321 | ||
3a167bea | 322 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
de56a948 | 323 | { |
0456ec4f | 324 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
bf3d32e1 | 325 | unsigned long flags; |
0456ec4f | 326 | |
ec257165 PM |
327 | if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) |
328 | kvmppc_core_start_stolen(vc); | |
329 | ||
2711e248 | 330 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
c7b67670 PM |
331 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
332 | vcpu->arch.busy_preempt = mftb(); | |
bf3d32e1 | 333 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
de56a948 PM |
334 | } |
335 | ||
5358a963 | 336 | static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) |
de56a948 PM |
337 | { |
338 | vcpu->arch.pvr = pvr; | |
339 | } | |
340 | ||
4cb4ade1 AP |
341 | /* Dummy value used in computing PCR value below */ |
342 | #define PCR_ARCH_31 (PCR_ARCH_300 << 1) | |
343 | ||
5358a963 | 344 | static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) |
388cc6e1 | 345 | { |
2ee13be3 | 346 | unsigned long host_pcr_bit = 0, guest_pcr_bit = 0; |
388cc6e1 PM |
347 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
348 | ||
2ee13be3 | 349 | /* We can (emulate) our own architecture version and anything older */ |
4cb4ade1 AP |
350 | if (cpu_has_feature(CPU_FTR_ARCH_31)) |
351 | host_pcr_bit = PCR_ARCH_31; | |
352 | else if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
2ee13be3 SJS |
353 | host_pcr_bit = PCR_ARCH_300; |
354 | else if (cpu_has_feature(CPU_FTR_ARCH_207S)) | |
355 | host_pcr_bit = PCR_ARCH_207; | |
356 | else if (cpu_has_feature(CPU_FTR_ARCH_206)) | |
357 | host_pcr_bit = PCR_ARCH_206; | |
358 | else | |
359 | host_pcr_bit = PCR_ARCH_205; | |
360 | ||
361 | /* Determine lowest PCR bit needed to run guest in given PVR level */ | |
362 | guest_pcr_bit = host_pcr_bit; | |
388cc6e1 | 363 | if (arch_compat) { |
388cc6e1 PM |
364 | switch (arch_compat) { |
365 | case PVR_ARCH_205: | |
2ee13be3 | 366 | guest_pcr_bit = PCR_ARCH_205; |
388cc6e1 PM |
367 | break; |
368 | case PVR_ARCH_206: | |
369 | case PVR_ARCH_206p: | |
2ee13be3 | 370 | guest_pcr_bit = PCR_ARCH_206; |
5557ae0e PM |
371 | break; |
372 | case PVR_ARCH_207: | |
2ee13be3 SJS |
373 | guest_pcr_bit = PCR_ARCH_207; |
374 | break; | |
375 | case PVR_ARCH_300: | |
376 | guest_pcr_bit = PCR_ARCH_300; | |
388cc6e1 | 377 | break; |
4cb4ade1 AP |
378 | case PVR_ARCH_31: |
379 | guest_pcr_bit = PCR_ARCH_31; | |
380 | break; | |
388cc6e1 PM |
381 | default: |
382 | return -EINVAL; | |
383 | } | |
384 | } | |
385 | ||
2ee13be3 SJS |
386 | /* Check requested PCR bits don't exceed our capabilities */ |
387 | if (guest_pcr_bit > host_pcr_bit) | |
388 | return -EINVAL; | |
389 | ||
388cc6e1 PM |
390 | spin_lock(&vc->lock); |
391 | vc->arch_compat = arch_compat; | |
13c7bb3c JN |
392 | /* |
393 | * Set all PCR bits for which guest_pcr_bit <= bit < host_pcr_bit | |
394 | * Also set all reserved PCR bits | |
395 | */ | |
396 | vc->pcr = (host_pcr_bit - guest_pcr_bit) | PCR_MASK; | |
388cc6e1 PM |
397 | spin_unlock(&vc->lock); |
398 | ||
399 | return 0; | |
400 | } | |
401 | ||
5358a963 | 402 | static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) |
de56a948 PM |
403 | { |
404 | int r; | |
405 | ||
406 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); | |
407 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", | |
173c520a | 408 | vcpu->arch.regs.nip, vcpu->arch.shregs.msr, vcpu->arch.trap); |
de56a948 PM |
409 | for (r = 0; r < 16; ++r) |
410 | pr_err("r%2d = %.16lx r%d = %.16lx\n", | |
411 | r, kvmppc_get_gpr(vcpu, r), | |
412 | r+16, kvmppc_get_gpr(vcpu, r+16)); | |
413 | pr_err("ctr = %.16lx lr = %.16lx\n", | |
173c520a | 414 | vcpu->arch.regs.ctr, vcpu->arch.regs.link); |
de56a948 PM |
415 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", |
416 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); | |
417 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", | |
418 | vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); | |
419 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", | |
420 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); | |
fd0944ba PM |
421 | pr_err("cr = %.8lx xer = %.16lx dsisr = %.8x\n", |
422 | vcpu->arch.regs.ccr, vcpu->arch.regs.xer, vcpu->arch.shregs.dsisr); | |
de56a948 PM |
423 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); |
424 | pr_err("fault dar = %.16lx dsisr = %.8x\n", | |
425 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
426 | pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); | |
427 | for (r = 0; r < vcpu->arch.slb_max; ++r) | |
428 | pr_err(" ESID = %.16llx VSID = %.16llx\n", | |
429 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); | |
430 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", | |
a0144e2a | 431 | vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, |
de56a948 PM |
432 | vcpu->arch.last_inst); |
433 | } | |
434 | ||
5358a963 | 435 | static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) |
a8606e20 | 436 | { |
5a3f4936 | 437 | return kvm_get_vcpu_by_id(kvm, id); |
a8606e20 PM |
438 | } |
439 | ||
440 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | |
441 | { | |
f13c13a0 | 442 | vpa->__old_status |= LPPACA_OLD_SHARED_PROC; |
02407552 | 443 | vpa->yield_count = cpu_to_be32(1); |
a8606e20 PM |
444 | } |
445 | ||
55b665b0 PM |
446 | static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, |
447 | unsigned long addr, unsigned long len) | |
448 | { | |
449 | /* check address is cacheline aligned */ | |
450 | if (addr & (L1_CACHE_BYTES - 1)) | |
451 | return -EINVAL; | |
452 | spin_lock(&vcpu->arch.vpa_update_lock); | |
453 | if (v->next_gpa != addr || v->len != len) { | |
454 | v->next_gpa = addr; | |
455 | v->len = addr ? len : 0; | |
456 | v->update_pending = 1; | |
457 | } | |
458 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
459 | return 0; | |
460 | } | |
461 | ||
2e25aa5f PM |
462 | /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ |
463 | struct reg_vpa { | |
464 | u32 dummy; | |
465 | union { | |
02407552 AG |
466 | __be16 hword; |
467 | __be32 word; | |
2e25aa5f PM |
468 | } length; |
469 | }; | |
470 | ||
471 | static int vpa_is_registered(struct kvmppc_vpa *vpap) | |
472 | { | |
473 | if (vpap->update_pending) | |
474 | return vpap->next_gpa != 0; | |
475 | return vpap->pinned_addr != NULL; | |
476 | } | |
477 | ||
a8606e20 PM |
478 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, |
479 | unsigned long flags, | |
480 | unsigned long vcpuid, unsigned long vpa) | |
481 | { | |
482 | struct kvm *kvm = vcpu->kvm; | |
93e60249 | 483 | unsigned long len, nb; |
a8606e20 PM |
484 | void *va; |
485 | struct kvm_vcpu *tvcpu; | |
2e25aa5f PM |
486 | int err; |
487 | int subfunc; | |
488 | struct kvmppc_vpa *vpap; | |
a8606e20 PM |
489 | |
490 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); | |
491 | if (!tvcpu) | |
492 | return H_PARAMETER; | |
493 | ||
2e25aa5f PM |
494 | subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; |
495 | if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || | |
496 | subfunc == H_VPA_REG_SLB) { | |
497 | /* Registering new area - address must be cache-line aligned */ | |
498 | if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) | |
a8606e20 | 499 | return H_PARAMETER; |
2e25aa5f PM |
500 | |
501 | /* convert logical addr to kernel addr and read length */ | |
93e60249 PM |
502 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); |
503 | if (va == NULL) | |
b2b2f165 | 504 | return H_PARAMETER; |
2e25aa5f | 505 | if (subfunc == H_VPA_REG_VPA) |
02407552 | 506 | len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); |
a8606e20 | 507 | else |
02407552 | 508 | len = be32_to_cpu(((struct reg_vpa *)va)->length.word); |
c35635ef | 509 | kvmppc_unpin_guest_page(kvm, va, vpa, false); |
2e25aa5f PM |
510 | |
511 | /* Check length */ | |
512 | if (len > nb || len < sizeof(struct reg_vpa)) | |
513 | return H_PARAMETER; | |
514 | } else { | |
515 | vpa = 0; | |
516 | len = 0; | |
517 | } | |
518 | ||
519 | err = H_PARAMETER; | |
520 | vpap = NULL; | |
521 | spin_lock(&tvcpu->arch.vpa_update_lock); | |
522 | ||
523 | switch (subfunc) { | |
524 | case H_VPA_REG_VPA: /* register VPA */ | |
eaac112e NP |
525 | /* |
526 | * The size of our lppaca is 1kB because of the way we align | |
527 | * it for the guest to avoid crossing a 4kB boundary. We only | |
528 | * use 640 bytes of the structure though, so we should accept | |
529 | * clients that set a size of 640. | |
530 | */ | |
499dcd41 NP |
531 | BUILD_BUG_ON(sizeof(struct lppaca) != 640); |
532 | if (len < sizeof(struct lppaca)) | |
a8606e20 | 533 | break; |
2e25aa5f PM |
534 | vpap = &tvcpu->arch.vpa; |
535 | err = 0; | |
536 | break; | |
537 | ||
538 | case H_VPA_REG_DTL: /* register DTL */ | |
539 | if (len < sizeof(struct dtl_entry)) | |
a8606e20 | 540 | break; |
2e25aa5f PM |
541 | len -= len % sizeof(struct dtl_entry); |
542 | ||
543 | /* Check that they have previously registered a VPA */ | |
544 | err = H_RESOURCE; | |
545 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 546 | break; |
2e25aa5f PM |
547 | |
548 | vpap = &tvcpu->arch.dtl; | |
549 | err = 0; | |
550 | break; | |
551 | ||
552 | case H_VPA_REG_SLB: /* register SLB shadow buffer */ | |
553 | /* Check that they have previously registered a VPA */ | |
554 | err = H_RESOURCE; | |
555 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 556 | break; |
2e25aa5f PM |
557 | |
558 | vpap = &tvcpu->arch.slb_shadow; | |
559 | err = 0; | |
560 | break; | |
561 | ||
562 | case H_VPA_DEREG_VPA: /* deregister VPA */ | |
563 | /* Check they don't still have a DTL or SLB buf registered */ | |
564 | err = H_RESOURCE; | |
565 | if (vpa_is_registered(&tvcpu->arch.dtl) || | |
566 | vpa_is_registered(&tvcpu->arch.slb_shadow)) | |
a8606e20 | 567 | break; |
2e25aa5f PM |
568 | |
569 | vpap = &tvcpu->arch.vpa; | |
570 | err = 0; | |
571 | break; | |
572 | ||
573 | case H_VPA_DEREG_DTL: /* deregister DTL */ | |
574 | vpap = &tvcpu->arch.dtl; | |
575 | err = 0; | |
576 | break; | |
577 | ||
578 | case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ | |
579 | vpap = &tvcpu->arch.slb_shadow; | |
580 | err = 0; | |
581 | break; | |
582 | } | |
583 | ||
584 | if (vpap) { | |
585 | vpap->next_gpa = vpa; | |
586 | vpap->len = len; | |
587 | vpap->update_pending = 1; | |
a8606e20 | 588 | } |
93e60249 | 589 | |
2e25aa5f PM |
590 | spin_unlock(&tvcpu->arch.vpa_update_lock); |
591 | ||
93e60249 | 592 | return err; |
a8606e20 PM |
593 | } |
594 | ||
081f323b | 595 | static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) |
2e25aa5f | 596 | { |
081f323b | 597 | struct kvm *kvm = vcpu->kvm; |
2e25aa5f PM |
598 | void *va; |
599 | unsigned long nb; | |
081f323b | 600 | unsigned long gpa; |
2e25aa5f | 601 | |
081f323b PM |
602 | /* |
603 | * We need to pin the page pointed to by vpap->next_gpa, | |
604 | * but we can't call kvmppc_pin_guest_page under the lock | |
605 | * as it does get_user_pages() and down_read(). So we | |
606 | * have to drop the lock, pin the page, then get the lock | |
607 | * again and check that a new area didn't get registered | |
608 | * in the meantime. | |
609 | */ | |
610 | for (;;) { | |
611 | gpa = vpap->next_gpa; | |
612 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
613 | va = NULL; | |
614 | nb = 0; | |
615 | if (gpa) | |
c35635ef | 616 | va = kvmppc_pin_guest_page(kvm, gpa, &nb); |
081f323b PM |
617 | spin_lock(&vcpu->arch.vpa_update_lock); |
618 | if (gpa == vpap->next_gpa) | |
619 | break; | |
620 | /* sigh... unpin that one and try again */ | |
621 | if (va) | |
c35635ef | 622 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
081f323b PM |
623 | } |
624 | ||
625 | vpap->update_pending = 0; | |
626 | if (va && nb < vpap->len) { | |
627 | /* | |
628 | * If it's now too short, it must be that userspace | |
629 | * has changed the mappings underlying guest memory, | |
630 | * so unregister the region. | |
631 | */ | |
c35635ef | 632 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
081f323b | 633 | va = NULL; |
2e25aa5f PM |
634 | } |
635 | if (vpap->pinned_addr) | |
c35635ef PM |
636 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, |
637 | vpap->dirty); | |
638 | vpap->gpa = gpa; | |
2e25aa5f | 639 | vpap->pinned_addr = va; |
c35635ef | 640 | vpap->dirty = false; |
2e25aa5f PM |
641 | if (va) |
642 | vpap->pinned_end = va + vpap->len; | |
643 | } | |
644 | ||
645 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) | |
646 | { | |
2f12f034 PM |
647 | if (!(vcpu->arch.vpa.update_pending || |
648 | vcpu->arch.slb_shadow.update_pending || | |
649 | vcpu->arch.dtl.update_pending)) | |
650 | return; | |
651 | ||
2e25aa5f PM |
652 | spin_lock(&vcpu->arch.vpa_update_lock); |
653 | if (vcpu->arch.vpa.update_pending) { | |
081f323b | 654 | kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); |
55b665b0 PM |
655 | if (vcpu->arch.vpa.pinned_addr) |
656 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); | |
2e25aa5f PM |
657 | } |
658 | if (vcpu->arch.dtl.update_pending) { | |
081f323b | 659 | kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); |
2e25aa5f PM |
660 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; |
661 | vcpu->arch.dtl_index = 0; | |
662 | } | |
663 | if (vcpu->arch.slb_shadow.update_pending) | |
081f323b | 664 | kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); |
2e25aa5f PM |
665 | spin_unlock(&vcpu->arch.vpa_update_lock); |
666 | } | |
667 | ||
c7b67670 PM |
668 | /* |
669 | * Return the accumulated stolen time for the vcore up until `now'. | |
670 | * The caller should hold the vcore lock. | |
671 | */ | |
672 | static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |
673 | { | |
674 | u64 p; | |
2711e248 | 675 | unsigned long flags; |
c7b67670 | 676 | |
2711e248 PM |
677 | spin_lock_irqsave(&vc->stoltb_lock, flags); |
678 | p = vc->stolen_tb; | |
c7b67670 | 679 | if (vc->vcore_state != VCORE_INACTIVE && |
2711e248 PM |
680 | vc->preempt_tb != TB_NIL) |
681 | p += now - vc->preempt_tb; | |
682 | spin_unlock_irqrestore(&vc->stoltb_lock, flags); | |
c7b67670 PM |
683 | return p; |
684 | } | |
685 | ||
0456ec4f PM |
686 | static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, |
687 | struct kvmppc_vcore *vc) | |
688 | { | |
689 | struct dtl_entry *dt; | |
690 | struct lppaca *vpa; | |
c7b67670 PM |
691 | unsigned long stolen; |
692 | unsigned long core_stolen; | |
693 | u64 now; | |
8b24e69f | 694 | unsigned long flags; |
0456ec4f PM |
695 | |
696 | dt = vcpu->arch.dtl_ptr; | |
697 | vpa = vcpu->arch.vpa.pinned_addr; | |
c7b67670 PM |
698 | now = mftb(); |
699 | core_stolen = vcore_stolen_time(vc, now); | |
700 | stolen = core_stolen - vcpu->arch.stolen_logged; | |
701 | vcpu->arch.stolen_logged = core_stolen; | |
8b24e69f | 702 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
c7b67670 PM |
703 | stolen += vcpu->arch.busy_stolen; |
704 | vcpu->arch.busy_stolen = 0; | |
8b24e69f | 705 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
0456ec4f PM |
706 | if (!dt || !vpa) |
707 | return; | |
708 | memset(dt, 0, sizeof(struct dtl_entry)); | |
709 | dt->dispatch_reason = 7; | |
02407552 AG |
710 | dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); |
711 | dt->timebase = cpu_to_be64(now + vc->tb_offset); | |
712 | dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); | |
713 | dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); | |
714 | dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); | |
0456ec4f PM |
715 | ++dt; |
716 | if (dt == vcpu->arch.dtl.pinned_end) | |
717 | dt = vcpu->arch.dtl.pinned_addr; | |
718 | vcpu->arch.dtl_ptr = dt; | |
719 | /* order writing *dt vs. writing vpa->dtl_idx */ | |
720 | smp_wmb(); | |
02407552 | 721 | vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); |
c35635ef | 722 | vcpu->arch.dtl.dirty = true; |
0456ec4f PM |
723 | } |
724 | ||
1da4e2f4 PM |
725 | /* See if there is a doorbell interrupt pending for a vcpu */ |
726 | static bool kvmppc_doorbell_pending(struct kvm_vcpu *vcpu) | |
727 | { | |
728 | int thr; | |
729 | struct kvmppc_vcore *vc; | |
730 | ||
57900694 PM |
731 | if (vcpu->arch.doorbell_request) |
732 | return true; | |
733 | /* | |
734 | * Ensure that the read of vcore->dpdes comes after the read | |
735 | * of vcpu->doorbell_request. This barrier matches the | |
6fabc9f2 | 736 | * smp_wmb() in kvmppc_guest_entry_inject(). |
57900694 PM |
737 | */ |
738 | smp_rmb(); | |
1da4e2f4 PM |
739 | vc = vcpu->arch.vcore; |
740 | thr = vcpu->vcpu_id - vc->first_vcpuid; | |
741 | return !!(vc->dpdes & (1 << thr)); | |
742 | } | |
743 | ||
9642382e MN |
744 | static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) |
745 | { | |
746 | if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) | |
747 | return true; | |
748 | if ((!vcpu->arch.vcore->arch_compat) && | |
749 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
750 | return true; | |
751 | return false; | |
752 | } | |
753 | ||
754 | static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, | |
755 | unsigned long resource, unsigned long value1, | |
756 | unsigned long value2) | |
757 | { | |
758 | switch (resource) { | |
759 | case H_SET_MODE_RESOURCE_SET_CIABR: | |
760 | if (!kvmppc_power8_compatible(vcpu)) | |
761 | return H_P2; | |
762 | if (value2) | |
763 | return H_P4; | |
764 | if (mflags) | |
765 | return H_UNSUPPORTED_FLAG_START; | |
766 | /* Guests can't breakpoint the hypervisor */ | |
767 | if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) | |
768 | return H_P3; | |
769 | vcpu->arch.ciabr = value1; | |
770 | return H_SUCCESS; | |
6f3fe297 | 771 | case H_SET_MODE_RESOURCE_SET_DAWR0: |
9642382e MN |
772 | if (!kvmppc_power8_compatible(vcpu)) |
773 | return H_P2; | |
398e712c MN |
774 | if (!ppc_breakpoint_available()) |
775 | return H_P2; | |
9642382e MN |
776 | if (mflags) |
777 | return H_UNSUPPORTED_FLAG_START; | |
778 | if (value2 & DABRX_HYP) | |
779 | return H_P4; | |
122954ed RB |
780 | vcpu->arch.dawr0 = value1; |
781 | vcpu->arch.dawrx0 = value2; | |
9642382e | 782 | return H_SUCCESS; |
bd1de1a0 RB |
783 | case H_SET_MODE_RESOURCE_SET_DAWR1: |
784 | if (!kvmppc_power8_compatible(vcpu)) | |
785 | return H_P2; | |
786 | if (!ppc_breakpoint_available()) | |
787 | return H_P2; | |
788 | if (!cpu_has_feature(CPU_FTR_DAWR1)) | |
789 | return H_P2; | |
790 | if (!vcpu->kvm->arch.dawr1_enabled) | |
791 | return H_FUNCTION; | |
792 | if (mflags) | |
793 | return H_UNSUPPORTED_FLAG_START; | |
794 | if (value2 & DABRX_HYP) | |
795 | return H_P4; | |
796 | vcpu->arch.dawr1 = value1; | |
797 | vcpu->arch.dawrx1 = value2; | |
798 | return H_SUCCESS; | |
55d70042 | 799 | case H_SET_MODE_RESOURCE_ADDR_TRANS_MODE: |
bcc92a0d NP |
800 | /* |
801 | * KVM does not support mflags=2 (AIL=2) and AIL=1 is reserved. | |
802 | * Keep this in synch with kvmppc_filter_guest_lpcr_hv. | |
803 | */ | |
2e1ae9cd NP |
804 | if (cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG) && |
805 | kvmhv_vcpu_is_radix(vcpu) && mflags == 3) | |
55d70042 NP |
806 | return H_UNSUPPORTED_FLAG_START; |
807 | return H_TOO_HARD; | |
9642382e MN |
808 | default: |
809 | return H_TOO_HARD; | |
810 | } | |
811 | } | |
812 | ||
2d34d1c3 SJS |
813 | /* Copy guest memory in place - must reside within a single memslot */ |
814 | static int kvmppc_copy_guest(struct kvm *kvm, gpa_t to, gpa_t from, | |
815 | unsigned long len) | |
816 | { | |
817 | struct kvm_memory_slot *to_memslot = NULL; | |
818 | struct kvm_memory_slot *from_memslot = NULL; | |
819 | unsigned long to_addr, from_addr; | |
820 | int r; | |
821 | ||
822 | /* Get HPA for from address */ | |
823 | from_memslot = gfn_to_memslot(kvm, from >> PAGE_SHIFT); | |
824 | if (!from_memslot) | |
825 | return -EFAULT; | |
826 | if ((from + len) >= ((from_memslot->base_gfn + from_memslot->npages) | |
827 | << PAGE_SHIFT)) | |
828 | return -EINVAL; | |
829 | from_addr = gfn_to_hva_memslot(from_memslot, from >> PAGE_SHIFT); | |
830 | if (kvm_is_error_hva(from_addr)) | |
831 | return -EFAULT; | |
832 | from_addr |= (from & (PAGE_SIZE - 1)); | |
833 | ||
834 | /* Get HPA for to address */ | |
835 | to_memslot = gfn_to_memslot(kvm, to >> PAGE_SHIFT); | |
836 | if (!to_memslot) | |
837 | return -EFAULT; | |
838 | if ((to + len) >= ((to_memslot->base_gfn + to_memslot->npages) | |
839 | << PAGE_SHIFT)) | |
840 | return -EINVAL; | |
841 | to_addr = gfn_to_hva_memslot(to_memslot, to >> PAGE_SHIFT); | |
842 | if (kvm_is_error_hva(to_addr)) | |
843 | return -EFAULT; | |
844 | to_addr |= (to & (PAGE_SIZE - 1)); | |
845 | ||
846 | /* Perform copy */ | |
847 | r = raw_copy_in_user((void __user *)to_addr, (void __user *)from_addr, | |
848 | len); | |
849 | if (r) | |
850 | return -EFAULT; | |
851 | mark_page_dirty(kvm, to >> PAGE_SHIFT); | |
852 | return 0; | |
853 | } | |
854 | ||
855 | static long kvmppc_h_page_init(struct kvm_vcpu *vcpu, unsigned long flags, | |
856 | unsigned long dest, unsigned long src) | |
857 | { | |
858 | u64 pg_sz = SZ_4K; /* 4K page size */ | |
859 | u64 pg_mask = SZ_4K - 1; | |
860 | int ret; | |
861 | ||
862 | /* Check for invalid flags (H_PAGE_SET_LOANED covers all CMO flags) */ | |
863 | if (flags & ~(H_ICACHE_INVALIDATE | H_ICACHE_SYNCHRONIZE | | |
864 | H_ZERO_PAGE | H_COPY_PAGE | H_PAGE_SET_LOANED)) | |
865 | return H_PARAMETER; | |
866 | ||
867 | /* dest (and src if copy_page flag set) must be page aligned */ | |
868 | if ((dest & pg_mask) || ((flags & H_COPY_PAGE) && (src & pg_mask))) | |
869 | return H_PARAMETER; | |
870 | ||
871 | /* zero and/or copy the page as determined by the flags */ | |
872 | if (flags & H_COPY_PAGE) { | |
873 | ret = kvmppc_copy_guest(vcpu->kvm, dest, src, pg_sz); | |
874 | if (ret < 0) | |
875 | return H_PARAMETER; | |
876 | } else if (flags & H_ZERO_PAGE) { | |
877 | ret = kvm_clear_guest(vcpu->kvm, dest, pg_sz); | |
878 | if (ret < 0) | |
879 | return H_PARAMETER; | |
880 | } | |
881 | ||
882 | /* We can ignore the remaining flags */ | |
883 | ||
884 | return H_SUCCESS; | |
885 | } | |
886 | ||
90fd09f8 SB |
887 | static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) |
888 | { | |
889 | struct kvmppc_vcore *vcore = target->arch.vcore; | |
890 | ||
891 | /* | |
892 | * We expect to have been called by the real mode handler | |
893 | * (kvmppc_rm_h_confer()) which would have directly returned | |
894 | * H_SUCCESS if the source vcore wasn't idle (e.g. if it may | |
895 | * have useful work to do and should not confer) so we don't | |
896 | * recheck that here. | |
9dc2babc NP |
897 | * |
898 | * In the case of the P9 single vcpu per vcore case, the real | |
899 | * mode handler is not called but no other threads are in the | |
900 | * source vcore. | |
90fd09f8 SB |
901 | */ |
902 | ||
903 | spin_lock(&vcore->lock); | |
904 | if (target->arch.state == KVMPPC_VCPU_RUNNABLE && | |
ec257165 PM |
905 | vcore->vcore_state != VCORE_INACTIVE && |
906 | vcore->runner) | |
90fd09f8 SB |
907 | target = vcore->runner; |
908 | spin_unlock(&vcore->lock); | |
909 | ||
910 | return kvm_vcpu_yield_to(target); | |
911 | } | |
912 | ||
913 | static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) | |
914 | { | |
915 | int yield_count = 0; | |
916 | struct lppaca *lppaca; | |
917 | ||
918 | spin_lock(&vcpu->arch.vpa_update_lock); | |
919 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; | |
920 | if (lppaca) | |
ecb6d618 | 921 | yield_count = be32_to_cpu(lppaca->yield_count); |
90fd09f8 SB |
922 | spin_unlock(&vcpu->arch.vpa_update_lock); |
923 | return yield_count; | |
924 | } | |
925 | ||
53324b51 BR |
926 | /* |
927 | * H_RPT_INVALIDATE hcall handler for nested guests. | |
928 | * | |
929 | * Handles only nested process-scoped invalidation requests in L0. | |
930 | */ | |
931 | static int kvmppc_nested_h_rpt_invalidate(struct kvm_vcpu *vcpu) | |
932 | { | |
933 | unsigned long type = kvmppc_get_gpr(vcpu, 6); | |
934 | unsigned long pid, pg_sizes, start, end; | |
935 | ||
936 | /* | |
937 | * The partition-scoped invalidations aren't handled here in L0. | |
938 | */ | |
939 | if (type & H_RPTI_TYPE_NESTED) | |
940 | return RESUME_HOST; | |
941 | ||
942 | pid = kvmppc_get_gpr(vcpu, 4); | |
943 | pg_sizes = kvmppc_get_gpr(vcpu, 7); | |
944 | start = kvmppc_get_gpr(vcpu, 8); | |
945 | end = kvmppc_get_gpr(vcpu, 9); | |
946 | ||
947 | do_h_rpt_invalidate_prt(pid, vcpu->arch.nested->shadow_lpid, | |
948 | type, pg_sizes, start, end); | |
949 | ||
950 | kvmppc_set_gpr(vcpu, 3, H_SUCCESS); | |
951 | return RESUME_GUEST; | |
952 | } | |
953 | ||
f0c6fbbb BR |
954 | static long kvmppc_h_rpt_invalidate(struct kvm_vcpu *vcpu, |
955 | unsigned long id, unsigned long target, | |
956 | unsigned long type, unsigned long pg_sizes, | |
957 | unsigned long start, unsigned long end) | |
958 | { | |
959 | if (!kvm_is_radix(vcpu->kvm)) | |
960 | return H_UNSUPPORTED; | |
961 | ||
962 | if (end < start) | |
963 | return H_P5; | |
964 | ||
965 | /* | |
966 | * Partition-scoped invalidation for nested guests. | |
f0c6fbbb | 967 | */ |
53324b51 BR |
968 | if (type & H_RPTI_TYPE_NESTED) { |
969 | if (!nesting_enabled(vcpu->kvm)) | |
970 | return H_FUNCTION; | |
971 | ||
972 | /* Support only cores as target */ | |
973 | if (target != H_RPTI_TARGET_CMMU) | |
974 | return H_P2; | |
975 | ||
976 | return do_h_rpt_invalidate_pat(vcpu, id, type, pg_sizes, | |
977 | start, end); | |
978 | } | |
f0c6fbbb BR |
979 | |
980 | /* | |
981 | * Process-scoped invalidation for L1 guests. | |
982 | */ | |
983 | do_h_rpt_invalidate_prt(id, vcpu->kvm->arch.lpid, | |
984 | type, pg_sizes, start, end); | |
985 | return H_SUCCESS; | |
986 | } | |
987 | ||
a8606e20 PM |
988 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
989 | { | |
a9aa86e0 | 990 | struct kvm *kvm = vcpu->kvm; |
a8606e20 PM |
991 | unsigned long req = kvmppc_get_gpr(vcpu, 3); |
992 | unsigned long target, ret = H_SUCCESS; | |
90fd09f8 | 993 | int yield_count; |
a8606e20 | 994 | struct kvm_vcpu *tvcpu; |
8e591cb7 | 995 | int idx, rc; |
a8606e20 | 996 | |
699a0ea0 PM |
997 | if (req <= MAX_HCALL_OPCODE && |
998 | !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) | |
999 | return RESUME_HOST; | |
1000 | ||
a8606e20 | 1001 | switch (req) { |
6165d5dd NP |
1002 | case H_REMOVE: |
1003 | ret = kvmppc_h_remove(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1004 | kvmppc_get_gpr(vcpu, 5), | |
1005 | kvmppc_get_gpr(vcpu, 6)); | |
1006 | if (ret == H_TOO_HARD) | |
1007 | return RESUME_HOST; | |
1008 | break; | |
1009 | case H_ENTER: | |
1010 | ret = kvmppc_h_enter(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1011 | kvmppc_get_gpr(vcpu, 5), | |
1012 | kvmppc_get_gpr(vcpu, 6), | |
1013 | kvmppc_get_gpr(vcpu, 7)); | |
1014 | if (ret == H_TOO_HARD) | |
1015 | return RESUME_HOST; | |
1016 | break; | |
1017 | case H_READ: | |
1018 | ret = kvmppc_h_read(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1019 | kvmppc_get_gpr(vcpu, 5)); | |
1020 | if (ret == H_TOO_HARD) | |
1021 | return RESUME_HOST; | |
1022 | break; | |
1023 | case H_CLEAR_MOD: | |
1024 | ret = kvmppc_h_clear_mod(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1025 | kvmppc_get_gpr(vcpu, 5)); | |
1026 | if (ret == H_TOO_HARD) | |
1027 | return RESUME_HOST; | |
1028 | break; | |
1029 | case H_CLEAR_REF: | |
1030 | ret = kvmppc_h_clear_ref(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1031 | kvmppc_get_gpr(vcpu, 5)); | |
1032 | if (ret == H_TOO_HARD) | |
1033 | return RESUME_HOST; | |
1034 | break; | |
1035 | case H_PROTECT: | |
1036 | ret = kvmppc_h_protect(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1037 | kvmppc_get_gpr(vcpu, 5), | |
1038 | kvmppc_get_gpr(vcpu, 6)); | |
1039 | if (ret == H_TOO_HARD) | |
1040 | return RESUME_HOST; | |
1041 | break; | |
1042 | case H_BULK_REMOVE: | |
1043 | ret = kvmppc_h_bulk_remove(vcpu); | |
1044 | if (ret == H_TOO_HARD) | |
1045 | return RESUME_HOST; | |
1046 | break; | |
1047 | ||
a8606e20 | 1048 | case H_CEDE: |
a8606e20 PM |
1049 | break; |
1050 | case H_PROD: | |
1051 | target = kvmppc_get_gpr(vcpu, 4); | |
a9aa86e0 | 1052 | tvcpu = kvmppc_find_vcpu(kvm, target); |
a8606e20 PM |
1053 | if (!tvcpu) { |
1054 | ret = H_PARAMETER; | |
1055 | break; | |
1056 | } | |
1057 | tvcpu->arch.prodded = 1; | |
1058 | smp_mb(); | |
8464c884 PM |
1059 | if (tvcpu->arch.ceded) |
1060 | kvmppc_fast_vcpu_kick_hv(tvcpu); | |
a8606e20 PM |
1061 | break; |
1062 | case H_CONFER: | |
42d7604d PM |
1063 | target = kvmppc_get_gpr(vcpu, 4); |
1064 | if (target == -1) | |
1065 | break; | |
a9aa86e0 | 1066 | tvcpu = kvmppc_find_vcpu(kvm, target); |
42d7604d PM |
1067 | if (!tvcpu) { |
1068 | ret = H_PARAMETER; | |
1069 | break; | |
1070 | } | |
90fd09f8 SB |
1071 | yield_count = kvmppc_get_gpr(vcpu, 5); |
1072 | if (kvmppc_get_yield_count(tvcpu) != yield_count) | |
1073 | break; | |
1074 | kvm_arch_vcpu_yield_to(tvcpu); | |
a8606e20 PM |
1075 | break; |
1076 | case H_REGISTER_VPA: | |
1077 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1078 | kvmppc_get_gpr(vcpu, 5), | |
1079 | kvmppc_get_gpr(vcpu, 6)); | |
1080 | break; | |
8e591cb7 | 1081 | case H_RTAS: |
a9aa86e0 | 1082 | if (list_empty(&kvm->arch.rtas_tokens)) |
8e591cb7 ME |
1083 | return RESUME_HOST; |
1084 | ||
a9aa86e0 | 1085 | idx = srcu_read_lock(&kvm->srcu); |
8e591cb7 | 1086 | rc = kvmppc_rtas_hcall(vcpu); |
a9aa86e0 | 1087 | srcu_read_unlock(&kvm->srcu, idx); |
8e591cb7 ME |
1088 | |
1089 | if (rc == -ENOENT) | |
1090 | return RESUME_HOST; | |
1091 | else if (rc == 0) | |
1092 | break; | |
1093 | ||
1094 | /* Send the error out to userspace via KVM_RUN */ | |
1095 | return rc; | |
99342cf8 DG |
1096 | case H_LOGICAL_CI_LOAD: |
1097 | ret = kvmppc_h_logical_ci_load(vcpu); | |
1098 | if (ret == H_TOO_HARD) | |
1099 | return RESUME_HOST; | |
1100 | break; | |
1101 | case H_LOGICAL_CI_STORE: | |
1102 | ret = kvmppc_h_logical_ci_store(vcpu); | |
1103 | if (ret == H_TOO_HARD) | |
1104 | return RESUME_HOST; | |
1105 | break; | |
9642382e MN |
1106 | case H_SET_MODE: |
1107 | ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1108 | kvmppc_get_gpr(vcpu, 5), | |
1109 | kvmppc_get_gpr(vcpu, 6), | |
1110 | kvmppc_get_gpr(vcpu, 7)); | |
1111 | if (ret == H_TOO_HARD) | |
1112 | return RESUME_HOST; | |
1113 | break; | |
bc5ad3f3 BH |
1114 | case H_XIRR: |
1115 | case H_CPPR: | |
1116 | case H_EOI: | |
1117 | case H_IPI: | |
8e44ddc3 PM |
1118 | case H_IPOLL: |
1119 | case H_XIRR_X: | |
bc5ad3f3 | 1120 | if (kvmppc_xics_enabled(vcpu)) { |
03f95332 | 1121 | if (xics_on_xive()) { |
5af50993 BH |
1122 | ret = H_NOT_AVAILABLE; |
1123 | return RESUME_GUEST; | |
1124 | } | |
bc5ad3f3 BH |
1125 | ret = kvmppc_xics_hcall(vcpu, req); |
1126 | break; | |
d3695aa4 AK |
1127 | } |
1128 | return RESUME_HOST; | |
4bad7779 PM |
1129 | case H_SET_DABR: |
1130 | ret = kvmppc_h_set_dabr(vcpu, kvmppc_get_gpr(vcpu, 4)); | |
1131 | break; | |
1132 | case H_SET_XDABR: | |
1133 | ret = kvmppc_h_set_xdabr(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1134 | kvmppc_get_gpr(vcpu, 5)); | |
1135 | break; | |
e40542af | 1136 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
4bad7779 PM |
1137 | case H_GET_TCE: |
1138 | ret = kvmppc_h_get_tce(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1139 | kvmppc_get_gpr(vcpu, 5)); | |
1140 | if (ret == H_TOO_HARD) | |
1141 | return RESUME_HOST; | |
1142 | break; | |
d3695aa4 AK |
1143 | case H_PUT_TCE: |
1144 | ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1145 | kvmppc_get_gpr(vcpu, 5), | |
1146 | kvmppc_get_gpr(vcpu, 6)); | |
1147 | if (ret == H_TOO_HARD) | |
1148 | return RESUME_HOST; | |
1149 | break; | |
1150 | case H_PUT_TCE_INDIRECT: | |
1151 | ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1152 | kvmppc_get_gpr(vcpu, 5), | |
1153 | kvmppc_get_gpr(vcpu, 6), | |
1154 | kvmppc_get_gpr(vcpu, 7)); | |
1155 | if (ret == H_TOO_HARD) | |
1156 | return RESUME_HOST; | |
1157 | break; | |
1158 | case H_STUFF_TCE: | |
1159 | ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1160 | kvmppc_get_gpr(vcpu, 5), | |
1161 | kvmppc_get_gpr(vcpu, 6), | |
1162 | kvmppc_get_gpr(vcpu, 7)); | |
1163 | if (ret == H_TOO_HARD) | |
1164 | return RESUME_HOST; | |
1165 | break; | |
e40542af | 1166 | #endif |
4bad7779 | 1167 | case H_RANDOM: |
2ac78e0c | 1168 | if (!arch_get_random_seed_long(&vcpu->arch.regs.gpr[4])) |
4bad7779 PM |
1169 | ret = H_HARDWARE; |
1170 | break; | |
f0c6fbbb BR |
1171 | case H_RPT_INVALIDATE: |
1172 | ret = kvmppc_h_rpt_invalidate(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1173 | kvmppc_get_gpr(vcpu, 5), | |
1174 | kvmppc_get_gpr(vcpu, 6), | |
1175 | kvmppc_get_gpr(vcpu, 7), | |
1176 | kvmppc_get_gpr(vcpu, 8), | |
1177 | kvmppc_get_gpr(vcpu, 9)); | |
1178 | break; | |
8e3f5fc1 PM |
1179 | |
1180 | case H_SET_PARTITION_TABLE: | |
1181 | ret = H_FUNCTION; | |
a9aa86e0 | 1182 | if (nesting_enabled(kvm)) |
8e3f5fc1 PM |
1183 | ret = kvmhv_set_partition_table(vcpu); |
1184 | break; | |
1185 | case H_ENTER_NESTED: | |
1186 | ret = H_FUNCTION; | |
a9aa86e0 | 1187 | if (!nesting_enabled(kvm)) |
360cae31 PM |
1188 | break; |
1189 | ret = kvmhv_enter_nested_guest(vcpu); | |
1190 | if (ret == H_INTERRUPT) { | |
1191 | kvmppc_set_gpr(vcpu, 3, 0); | |
6c08ec12 | 1192 | vcpu->arch.hcall_needed = 0; |
360cae31 | 1193 | return -EINTR; |
873db2cd SJS |
1194 | } else if (ret == H_TOO_HARD) { |
1195 | kvmppc_set_gpr(vcpu, 3, 0); | |
1196 | vcpu->arch.hcall_needed = 0; | |
1197 | return RESUME_HOST; | |
360cae31 | 1198 | } |
8e3f5fc1 PM |
1199 | break; |
1200 | case H_TLB_INVALIDATE: | |
1201 | ret = H_FUNCTION; | |
a9aa86e0 | 1202 | if (nesting_enabled(kvm)) |
aa069a99 | 1203 | ret = kvmhv_do_nested_tlbie(vcpu); |
8e3f5fc1 | 1204 | break; |
6ff887b8 SJS |
1205 | case H_COPY_TOFROM_GUEST: |
1206 | ret = H_FUNCTION; | |
a9aa86e0 | 1207 | if (nesting_enabled(kvm)) |
6ff887b8 SJS |
1208 | ret = kvmhv_copy_tofrom_guest_nested(vcpu); |
1209 | break; | |
2d34d1c3 SJS |
1210 | case H_PAGE_INIT: |
1211 | ret = kvmppc_h_page_init(vcpu, kvmppc_get_gpr(vcpu, 4), | |
1212 | kvmppc_get_gpr(vcpu, 5), | |
1213 | kvmppc_get_gpr(vcpu, 6)); | |
1214 | break; | |
ca9f4942 | 1215 | case H_SVM_PAGE_IN: |
8c47b6ff LD |
1216 | ret = H_UNSUPPORTED; |
1217 | if (kvmppc_get_srr1(vcpu) & MSR_S) | |
a9aa86e0 | 1218 | ret = kvmppc_h_svm_page_in(kvm, |
8c47b6ff LD |
1219 | kvmppc_get_gpr(vcpu, 4), |
1220 | kvmppc_get_gpr(vcpu, 5), | |
1221 | kvmppc_get_gpr(vcpu, 6)); | |
ca9f4942 BR |
1222 | break; |
1223 | case H_SVM_PAGE_OUT: | |
8c47b6ff LD |
1224 | ret = H_UNSUPPORTED; |
1225 | if (kvmppc_get_srr1(vcpu) & MSR_S) | |
a9aa86e0 | 1226 | ret = kvmppc_h_svm_page_out(kvm, |
8c47b6ff LD |
1227 | kvmppc_get_gpr(vcpu, 4), |
1228 | kvmppc_get_gpr(vcpu, 5), | |
1229 | kvmppc_get_gpr(vcpu, 6)); | |
ca9f4942 BR |
1230 | break; |
1231 | case H_SVM_INIT_START: | |
8c47b6ff LD |
1232 | ret = H_UNSUPPORTED; |
1233 | if (kvmppc_get_srr1(vcpu) & MSR_S) | |
a9aa86e0 | 1234 | ret = kvmppc_h_svm_init_start(kvm); |
ca9f4942 BR |
1235 | break; |
1236 | case H_SVM_INIT_DONE: | |
8c47b6ff LD |
1237 | ret = H_UNSUPPORTED; |
1238 | if (kvmppc_get_srr1(vcpu) & MSR_S) | |
a9aa86e0 | 1239 | ret = kvmppc_h_svm_init_done(kvm); |
ca9f4942 | 1240 | break; |
3a43970d | 1241 | case H_SVM_INIT_ABORT: |
e3326ae3 LD |
1242 | /* |
1243 | * Even if that call is made by the Ultravisor, the SSR1 value | |
1244 | * is the guest context one, with the secure bit clear as it has | |
1245 | * not yet been secured. So we can't check it here. | |
1246 | * Instead the kvm->arch.secure_guest flag is checked inside | |
1247 | * kvmppc_h_svm_init_abort(). | |
1248 | */ | |
a9aa86e0 | 1249 | ret = kvmppc_h_svm_init_abort(kvm); |
3a43970d | 1250 | break; |
ca9f4942 | 1251 | |
a8606e20 PM |
1252 | default: |
1253 | return RESUME_HOST; | |
1254 | } | |
6165d5dd | 1255 | WARN_ON_ONCE(ret == H_TOO_HARD); |
a8606e20 PM |
1256 | kvmppc_set_gpr(vcpu, 3, ret); |
1257 | vcpu->arch.hcall_needed = 0; | |
1258 | return RESUME_GUEST; | |
1259 | } | |
1260 | ||
4bad7779 | 1261 | /* |
9dc2babc NP |
1262 | * Handle H_CEDE in the P9 path where we don't call the real-mode hcall |
1263 | * handlers in book3s_hv_rmhandlers.S. | |
1264 | * | |
4bad7779 PM |
1265 | * This has to be done early, not in kvmppc_pseries_do_hcall(), so |
1266 | * that the cede logic in kvmppc_run_single_vcpu() works properly. | |
1267 | */ | |
9dc2babc | 1268 | static void kvmppc_cede(struct kvm_vcpu *vcpu) |
4bad7779 PM |
1269 | { |
1270 | vcpu->arch.shregs.msr |= MSR_EE; | |
1271 | vcpu->arch.ceded = 1; | |
1272 | smp_mb(); | |
1273 | if (vcpu->arch.prodded) { | |
1274 | vcpu->arch.prodded = 0; | |
1275 | smp_mb(); | |
1276 | vcpu->arch.ceded = 0; | |
1277 | } | |
1278 | } | |
1279 | ||
ae2113a4 PM |
1280 | static int kvmppc_hcall_impl_hv(unsigned long cmd) |
1281 | { | |
1282 | switch (cmd) { | |
1283 | case H_CEDE: | |
1284 | case H_PROD: | |
1285 | case H_CONFER: | |
1286 | case H_REGISTER_VPA: | |
9642382e | 1287 | case H_SET_MODE: |
99342cf8 DG |
1288 | case H_LOGICAL_CI_LOAD: |
1289 | case H_LOGICAL_CI_STORE: | |
ae2113a4 PM |
1290 | #ifdef CONFIG_KVM_XICS |
1291 | case H_XIRR: | |
1292 | case H_CPPR: | |
1293 | case H_EOI: | |
1294 | case H_IPI: | |
1295 | case H_IPOLL: | |
1296 | case H_XIRR_X: | |
1297 | #endif | |
2d34d1c3 | 1298 | case H_PAGE_INIT: |
f0c6fbbb | 1299 | case H_RPT_INVALIDATE: |
ae2113a4 PM |
1300 | return 1; |
1301 | } | |
1302 | ||
1303 | /* See if it's in the real-mode table */ | |
1304 | return kvmppc_hcall_impl_hv_realmode(cmd); | |
1305 | } | |
1306 | ||
8c99d345 | 1307 | static int kvmppc_emulate_debug_inst(struct kvm_vcpu *vcpu) |
a59c1d9e MS |
1308 | { |
1309 | u32 last_inst; | |
1310 | ||
1311 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != | |
1312 | EMULATE_DONE) { | |
1313 | /* | |
1314 | * Fetch failed, so return to guest and | |
1315 | * try executing it again. | |
1316 | */ | |
1317 | return RESUME_GUEST; | |
1318 | } | |
1319 | ||
1320 | if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { | |
8c99d345 TZ |
1321 | vcpu->run->exit_reason = KVM_EXIT_DEBUG; |
1322 | vcpu->run->debug.arch.address = kvmppc_get_pc(vcpu); | |
a59c1d9e MS |
1323 | return RESUME_HOST; |
1324 | } else { | |
1325 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
1326 | return RESUME_GUEST; | |
1327 | } | |
1328 | } | |
1329 | ||
57900694 PM |
1330 | static void do_nothing(void *x) |
1331 | { | |
1332 | } | |
1333 | ||
1334 | static unsigned long kvmppc_read_dpdes(struct kvm_vcpu *vcpu) | |
1335 | { | |
1336 | int thr, cpu, pcpu, nthreads; | |
1337 | struct kvm_vcpu *v; | |
1338 | unsigned long dpdes; | |
1339 | ||
1340 | nthreads = vcpu->kvm->arch.emul_smt_mode; | |
1341 | dpdes = 0; | |
1342 | cpu = vcpu->vcpu_id & ~(nthreads - 1); | |
1343 | for (thr = 0; thr < nthreads; ++thr, ++cpu) { | |
1344 | v = kvmppc_find_vcpu(vcpu->kvm, cpu); | |
1345 | if (!v) | |
1346 | continue; | |
1347 | /* | |
1348 | * If the vcpu is currently running on a physical cpu thread, | |
1349 | * interrupt it in order to pull it out of the guest briefly, | |
1350 | * which will update its vcore->dpdes value. | |
1351 | */ | |
1352 | pcpu = READ_ONCE(v->cpu); | |
1353 | if (pcpu >= 0) | |
1354 | smp_call_function_single(pcpu, do_nothing, NULL, 1); | |
1355 | if (kvmppc_doorbell_pending(v)) | |
1356 | dpdes |= 1 << thr; | |
1357 | } | |
1358 | return dpdes; | |
1359 | } | |
1360 | ||
1361 | /* | |
1362 | * On POWER9, emulate doorbell-related instructions in order to | |
1363 | * give the guest the illusion of running on a multi-threaded core. | |
1364 | * The instructions emulated are msgsndp, msgclrp, mfspr TIR, | |
1365 | * and mfspr DPDES. | |
1366 | */ | |
1367 | static int kvmppc_emulate_doorbell_instr(struct kvm_vcpu *vcpu) | |
1368 | { | |
1369 | u32 inst, rb, thr; | |
1370 | unsigned long arg; | |
1371 | struct kvm *kvm = vcpu->kvm; | |
1372 | struct kvm_vcpu *tvcpu; | |
1373 | ||
57900694 PM |
1374 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst) != EMULATE_DONE) |
1375 | return RESUME_GUEST; | |
1376 | if (get_op(inst) != 31) | |
1377 | return EMULATE_FAIL; | |
1378 | rb = get_rb(inst); | |
1379 | thr = vcpu->vcpu_id & (kvm->arch.emul_smt_mode - 1); | |
1380 | switch (get_xop(inst)) { | |
1381 | case OP_31_XOP_MSGSNDP: | |
1382 | arg = kvmppc_get_gpr(vcpu, rb); | |
87fb4978 | 1383 | if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) |
57900694 | 1384 | break; |
87fb4978 | 1385 | arg &= 0x7f; |
57900694 PM |
1386 | if (arg >= kvm->arch.emul_smt_mode) |
1387 | break; | |
1388 | tvcpu = kvmppc_find_vcpu(kvm, vcpu->vcpu_id - thr + arg); | |
1389 | if (!tvcpu) | |
1390 | break; | |
1391 | if (!tvcpu->arch.doorbell_request) { | |
1392 | tvcpu->arch.doorbell_request = 1; | |
1393 | kvmppc_fast_vcpu_kick_hv(tvcpu); | |
1394 | } | |
1395 | break; | |
1396 | case OP_31_XOP_MSGCLRP: | |
1397 | arg = kvmppc_get_gpr(vcpu, rb); | |
87fb4978 | 1398 | if (((arg >> 27) & 0x1f) != PPC_DBELL_SERVER) |
57900694 PM |
1399 | break; |
1400 | vcpu->arch.vcore->dpdes = 0; | |
1401 | vcpu->arch.doorbell_request = 0; | |
1402 | break; | |
1403 | case OP_31_XOP_MFSPR: | |
1404 | switch (get_sprn(inst)) { | |
1405 | case SPRN_TIR: | |
1406 | arg = thr; | |
1407 | break; | |
1408 | case SPRN_DPDES: | |
1409 | arg = kvmppc_read_dpdes(vcpu); | |
1410 | break; | |
1411 | default: | |
1412 | return EMULATE_FAIL; | |
1413 | } | |
1414 | kvmppc_set_gpr(vcpu, get_rt(inst), arg); | |
1415 | break; | |
1416 | default: | |
1417 | return EMULATE_FAIL; | |
1418 | } | |
1419 | kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); | |
1420 | return RESUME_GUEST; | |
1421 | } | |
1422 | ||
8c99d345 | 1423 | static int kvmppc_handle_exit_hv(struct kvm_vcpu *vcpu, |
3a167bea | 1424 | struct task_struct *tsk) |
de56a948 | 1425 | { |
8c99d345 | 1426 | struct kvm_run *run = vcpu->run; |
de56a948 PM |
1427 | int r = RESUME_HOST; |
1428 | ||
1429 | vcpu->stat.sum_exits++; | |
1430 | ||
1c9e3d51 PM |
1431 | /* |
1432 | * This can happen if an interrupt occurs in the last stages | |
1433 | * of guest entry or the first stages of guest exit (i.e. after | |
1434 | * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV | |
1435 | * and before setting it to KVM_GUEST_MODE_HOST_HV). | |
1436 | * That can happen due to a bug, or due to a machine check | |
1437 | * occurring at just the wrong time. | |
1438 | */ | |
1439 | if (vcpu->arch.shregs.msr & MSR_HV) { | |
1440 | printk(KERN_EMERG "KVM trap in HV mode!\n"); | |
1441 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | |
1442 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | |
1443 | vcpu->arch.shregs.msr); | |
1444 | kvmppc_dump_regs(vcpu); | |
1445 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
1446 | run->hw.hardware_exit_reason = vcpu->arch.trap; | |
1447 | return RESUME_HOST; | |
1448 | } | |
de56a948 PM |
1449 | run->exit_reason = KVM_EXIT_UNKNOWN; |
1450 | run->ready_for_interrupt_injection = 1; | |
1451 | switch (vcpu->arch.trap) { | |
1452 | /* We're good on these - the host merely wanted to get our attention */ | |
1453 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | |
1454 | vcpu->stat.dec_exits++; | |
1455 | r = RESUME_GUEST; | |
1456 | break; | |
1457 | case BOOK3S_INTERRUPT_EXTERNAL: | |
5d00f66b | 1458 | case BOOK3S_INTERRUPT_H_DOORBELL: |
84f7139c | 1459 | case BOOK3S_INTERRUPT_H_VIRT: |
de56a948 PM |
1460 | vcpu->stat.ext_intr_exits++; |
1461 | r = RESUME_GUEST; | |
1462 | break; | |
6de6638b | 1463 | /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ |
dee6f24c | 1464 | case BOOK3S_INTERRUPT_HMI: |
de56a948 | 1465 | case BOOK3S_INTERRUPT_PERFMON: |
6de6638b | 1466 | case BOOK3S_INTERRUPT_SYSTEM_RESET: |
de56a948 PM |
1467 | r = RESUME_GUEST; |
1468 | break; | |
1d15ffdf NP |
1469 | case BOOK3S_INTERRUPT_MACHINE_CHECK: { |
1470 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, | |
1471 | DEFAULT_RATELIMIT_BURST); | |
1472 | /* | |
1473 | * Print the MCE event to host console. Ratelimit so the guest | |
1474 | * can't flood the host log. | |
1475 | */ | |
1476 | if (__ratelimit(&rs)) | |
1477 | machine_check_print_event_info(&vcpu->arch.mce_evt,false, true); | |
884dfb72 PM |
1478 | |
1479 | /* | |
1480 | * If the guest can do FWNMI, exit to userspace so it can | |
1481 | * deliver a FWNMI to the guest. | |
1482 | * Otherwise we synthesize a machine check for the guest | |
1483 | * so that it knows that the machine check occurred. | |
1484 | */ | |
1485 | if (!vcpu->kvm->arch.fwnmi_enabled) { | |
1486 | ulong flags = vcpu->arch.shregs.msr & 0x083c0000; | |
1487 | kvmppc_core_queue_machine_check(vcpu, flags); | |
1488 | r = RESUME_GUEST; | |
1489 | break; | |
1490 | } | |
1491 | ||
e20bbd3d AP |
1492 | /* Exit to guest with KVM_EXIT_NMI as exit reason */ |
1493 | run->exit_reason = KVM_EXIT_NMI; | |
1494 | run->hw.hardware_exit_reason = vcpu->arch.trap; | |
1495 | /* Clear out the old NMI status from run->flags */ | |
1496 | run->flags &= ~KVM_RUN_PPC_NMI_DISP_MASK; | |
1497 | /* Now set the NMI status */ | |
1498 | if (vcpu->arch.mce_evt.disposition == MCE_DISPOSITION_RECOVERED) | |
1499 | run->flags |= KVM_RUN_PPC_NMI_DISP_FULLY_RECOV; | |
1500 | else | |
1501 | run->flags |= KVM_RUN_PPC_NMI_DISP_NOT_RECOV; | |
1502 | ||
1503 | r = RESUME_HOST; | |
b4072df4 | 1504 | break; |
1d15ffdf | 1505 | } |
de56a948 PM |
1506 | case BOOK3S_INTERRUPT_PROGRAM: |
1507 | { | |
1508 | ulong flags; | |
1509 | /* | |
1510 | * Normally program interrupts are delivered directly | |
1511 | * to the guest by the hardware, but we can get here | |
1512 | * as a result of a hypervisor emulation interrupt | |
1513 | * (e40) getting turned into a 700 by BML RTAS. | |
1514 | */ | |
1515 | flags = vcpu->arch.shregs.msr & 0x1f0000ull; | |
1516 | kvmppc_core_queue_program(vcpu, flags); | |
1517 | r = RESUME_GUEST; | |
1518 | break; | |
1519 | } | |
1520 | case BOOK3S_INTERRUPT_SYSCALL: | |
1521 | { | |
de56a948 PM |
1522 | int i; |
1523 | ||
9dc2babc NP |
1524 | if (unlikely(vcpu->arch.shregs.msr & MSR_PR)) { |
1525 | /* | |
1526 | * Guest userspace executed sc 1. This can only be | |
1527 | * reached by the P9 path because the old path | |
1528 | * handles this case in realmode hcall handlers. | |
9dc2babc | 1529 | */ |
ac3c8b41 NP |
1530 | if (!kvmhv_vcpu_is_radix(vcpu)) { |
1531 | /* | |
1532 | * A guest could be running PR KVM, so this | |
1533 | * may be a PR KVM hcall. It must be reflected | |
1534 | * to the guest kernel as a sc interrupt. | |
1535 | */ | |
1536 | kvmppc_core_queue_syscall(vcpu); | |
1537 | } else { | |
1538 | /* | |
1539 | * Radix guests can not run PR KVM or nested HV | |
1540 | * hash guests which might run PR KVM, so this | |
1541 | * is always a privilege fault. Send a program | |
1542 | * check to guest kernel. | |
1543 | */ | |
1544 | kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV); | |
1545 | } | |
9dc2babc NP |
1546 | r = RESUME_GUEST; |
1547 | break; | |
1548 | } | |
27025a60 | 1549 | |
9dc2babc NP |
1550 | /* |
1551 | * hcall - gather args and set exit_reason. This will next be | |
1552 | * handled by kvmppc_pseries_do_hcall which may be able to deal | |
1553 | * with it and resume guest, or may punt to userspace. | |
1554 | */ | |
de56a948 PM |
1555 | run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); |
1556 | for (i = 0; i < 9; ++i) | |
1557 | run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); | |
1558 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
1559 | vcpu->arch.hcall_needed = 1; | |
1560 | r = RESUME_HOST; | |
1561 | break; | |
1562 | } | |
1563 | /* | |
342d3db7 PM |
1564 | * We get these next two if the guest accesses a page which it thinks |
1565 | * it has mapped but which is not actually present, either because | |
1566 | * it is for an emulated I/O device or because the corresonding | |
6165d5dd NP |
1567 | * host page has been paged out. |
1568 | * | |
1569 | * Any other HDSI/HISI interrupts have been handled already for P7/8 | |
1570 | * guests. For POWER9 hash guests not using rmhandlers, basic hash | |
1571 | * fault handling is done here. | |
de56a948 | 1572 | */ |
6165d5dd NP |
1573 | case BOOK3S_INTERRUPT_H_DATA_STORAGE: { |
1574 | unsigned long vsid; | |
1575 | long err; | |
1576 | ||
1577 | if (vcpu->arch.fault_dsisr == HDSISR_CANARY) { | |
89d35b23 | 1578 | r = RESUME_GUEST; /* Just retry if it's the canary */ |
6165d5dd NP |
1579 | break; |
1580 | } | |
1581 | ||
1582 | if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { | |
1583 | /* | |
1584 | * Radix doesn't require anything, and pre-ISAv3.0 hash | |
1585 | * already attempted to handle this in rmhandlers. The | |
1586 | * hash fault handling below is v3 only (it uses ASDR | |
1587 | * via fault_gpa). | |
1588 | */ | |
1589 | r = RESUME_PAGE_FAULT; | |
1590 | break; | |
1591 | } | |
1592 | ||
1593 | if (!(vcpu->arch.fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT))) { | |
1594 | kvmppc_core_queue_data_storage(vcpu, | |
1595 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
1596 | r = RESUME_GUEST; | |
1597 | break; | |
1598 | } | |
1599 | ||
1600 | if (!(vcpu->arch.shregs.msr & MSR_DR)) | |
1601 | vsid = vcpu->kvm->arch.vrma_slb_v; | |
1602 | else | |
1603 | vsid = vcpu->arch.fault_gpa; | |
1604 | ||
1605 | err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, | |
1606 | vsid, vcpu->arch.fault_dsisr, true); | |
1607 | if (err == 0) { | |
1608 | r = RESUME_GUEST; | |
1609 | } else if (err == -1 || err == -2) { | |
1610 | r = RESUME_PAGE_FAULT; | |
1611 | } else { | |
1612 | kvmppc_core_queue_data_storage(vcpu, | |
1613 | vcpu->arch.fault_dar, err); | |
1614 | r = RESUME_GUEST; | |
1615 | } | |
de56a948 | 1616 | break; |
6165d5dd NP |
1617 | } |
1618 | case BOOK3S_INTERRUPT_H_INST_STORAGE: { | |
1619 | unsigned long vsid; | |
1620 | long err; | |
1621 | ||
913d3ff9 | 1622 | vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); |
32eb150a PM |
1623 | vcpu->arch.fault_dsisr = vcpu->arch.shregs.msr & |
1624 | DSISR_SRR1_MATCH_64S; | |
6165d5dd NP |
1625 | if (kvm_is_radix(vcpu->kvm) || !cpu_has_feature(CPU_FTR_ARCH_300)) { |
1626 | /* | |
1627 | * Radix doesn't require anything, and pre-ISAv3.0 hash | |
1628 | * already attempted to handle this in rmhandlers. The | |
1629 | * hash fault handling below is v3 only (it uses ASDR | |
1630 | * via fault_gpa). | |
1631 | */ | |
1632 | if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) | |
1633 | vcpu->arch.fault_dsisr |= DSISR_ISSTORE; | |
1634 | r = RESUME_PAGE_FAULT; | |
1635 | break; | |
1636 | } | |
1637 | ||
1638 | if (!(vcpu->arch.fault_dsisr & SRR1_ISI_NOPT)) { | |
1639 | kvmppc_core_queue_inst_storage(vcpu, | |
1640 | vcpu->arch.fault_dsisr); | |
1641 | r = RESUME_GUEST; | |
1642 | break; | |
1643 | } | |
1644 | ||
1645 | if (!(vcpu->arch.shregs.msr & MSR_IR)) | |
1646 | vsid = vcpu->kvm->arch.vrma_slb_v; | |
1647 | else | |
1648 | vsid = vcpu->arch.fault_gpa; | |
1649 | ||
1650 | err = kvmppc_hpte_hv_fault(vcpu, vcpu->arch.fault_dar, | |
1651 | vsid, vcpu->arch.fault_dsisr, false); | |
1652 | if (err == 0) { | |
1653 | r = RESUME_GUEST; | |
1654 | } else if (err == -1) { | |
1655 | r = RESUME_PAGE_FAULT; | |
1656 | } else { | |
1657 | kvmppc_core_queue_inst_storage(vcpu, err); | |
1658 | r = RESUME_GUEST; | |
1659 | } | |
de56a948 | 1660 | break; |
6165d5dd NP |
1661 | } |
1662 | ||
de56a948 PM |
1663 | /* |
1664 | * This occurs if the guest executes an illegal instruction. | |
a59c1d9e MS |
1665 | * If the guest debug is disabled, generate a program interrupt |
1666 | * to the guest. If guest debug is enabled, we need to check | |
1667 | * whether the instruction is a software breakpoint instruction. | |
1668 | * Accordingly return to Guest or Host. | |
de56a948 PM |
1669 | */ |
1670 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | |
4a157d61 PM |
1671 | if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) |
1672 | vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? | |
1673 | swab32(vcpu->arch.emul_inst) : | |
1674 | vcpu->arch.emul_inst; | |
a59c1d9e | 1675 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { |
8c99d345 | 1676 | r = kvmppc_emulate_debug_inst(vcpu); |
a59c1d9e MS |
1677 | } else { |
1678 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
1679 | r = RESUME_GUEST; | |
1680 | } | |
bd3048b8 ME |
1681 | break; |
1682 | /* | |
1683 | * This occurs if the guest (kernel or userspace), does something that | |
57900694 PM |
1684 | * is prohibited by HFSCR. |
1685 | * On POWER9, this could be a doorbell instruction that we need | |
1686 | * to emulate. | |
1687 | * Otherwise, we just generate a program interrupt to the guest. | |
bd3048b8 ME |
1688 | */ |
1689 | case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: | |
57900694 | 1690 | r = EMULATE_FAIL; |
36ee41d1 | 1691 | if (((vcpu->arch.hfscr >> 56) == FSCR_MSGP_LG) && |
53655ddd | 1692 | cpu_has_feature(CPU_FTR_ARCH_300)) |
57900694 PM |
1693 | r = kvmppc_emulate_doorbell_instr(vcpu); |
1694 | if (r == EMULATE_FAIL) { | |
1695 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
1696 | r = RESUME_GUEST; | |
1697 | } | |
de56a948 | 1698 | break; |
4bb3c7a0 PM |
1699 | |
1700 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
1701 | case BOOK3S_INTERRUPT_HV_SOFTPATCH: | |
1702 | /* | |
1703 | * This occurs for various TM-related instructions that | |
1704 | * we need to emulate on POWER9 DD2.2. We have already | |
1705 | * handled the cases where the guest was in real-suspend | |
1706 | * mode and was transitioning to transactional state. | |
1707 | */ | |
1708 | r = kvmhv_p9_tm_emulation(vcpu); | |
1709 | break; | |
1710 | #endif | |
1711 | ||
f7af5209 SW |
1712 | case BOOK3S_INTERRUPT_HV_RM_HARD: |
1713 | r = RESUME_PASSTHROUGH; | |
1714 | break; | |
de56a948 PM |
1715 | default: |
1716 | kvmppc_dump_regs(vcpu); | |
1717 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | |
1718 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | |
1719 | vcpu->arch.shregs.msr); | |
f3271d4c | 1720 | run->hw.hardware_exit_reason = vcpu->arch.trap; |
de56a948 | 1721 | r = RESUME_HOST; |
de56a948 PM |
1722 | break; |
1723 | } | |
1724 | ||
de56a948 PM |
1725 | return r; |
1726 | } | |
1727 | ||
8c99d345 | 1728 | static int kvmppc_handle_nested_exit(struct kvm_vcpu *vcpu) |
360cae31 PM |
1729 | { |
1730 | int r; | |
1731 | int srcu_idx; | |
1732 | ||
1733 | vcpu->stat.sum_exits++; | |
1734 | ||
1735 | /* | |
1736 | * This can happen if an interrupt occurs in the last stages | |
1737 | * of guest entry or the first stages of guest exit (i.e. after | |
1738 | * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV | |
1739 | * and before setting it to KVM_GUEST_MODE_HOST_HV). | |
1740 | * That can happen due to a bug, or due to a machine check | |
1741 | * occurring at just the wrong time. | |
1742 | */ | |
1743 | if (vcpu->arch.shregs.msr & MSR_HV) { | |
1744 | pr_emerg("KVM trap in HV mode while nested!\n"); | |
1745 | pr_emerg("trap=0x%x | pc=0x%lx | msr=0x%llx\n", | |
1746 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | |
1747 | vcpu->arch.shregs.msr); | |
1748 | kvmppc_dump_regs(vcpu); | |
1749 | return RESUME_HOST; | |
1750 | } | |
1751 | switch (vcpu->arch.trap) { | |
1752 | /* We're good on these - the host merely wanted to get our attention */ | |
1753 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | |
1754 | vcpu->stat.dec_exits++; | |
1755 | r = RESUME_GUEST; | |
1756 | break; | |
1757 | case BOOK3S_INTERRUPT_EXTERNAL: | |
1758 | vcpu->stat.ext_intr_exits++; | |
1759 | r = RESUME_HOST; | |
1760 | break; | |
1761 | case BOOK3S_INTERRUPT_H_DOORBELL: | |
1762 | case BOOK3S_INTERRUPT_H_VIRT: | |
1763 | vcpu->stat.ext_intr_exits++; | |
1764 | r = RESUME_GUEST; | |
1765 | break; | |
1766 | /* SR/HMI/PMI are HV interrupts that host has handled. Resume guest.*/ | |
1767 | case BOOK3S_INTERRUPT_HMI: | |
1768 | case BOOK3S_INTERRUPT_PERFMON: | |
1769 | case BOOK3S_INTERRUPT_SYSTEM_RESET: | |
1770 | r = RESUME_GUEST; | |
1771 | break; | |
1772 | case BOOK3S_INTERRUPT_MACHINE_CHECK: | |
1d15ffdf NP |
1773 | { |
1774 | static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL, | |
1775 | DEFAULT_RATELIMIT_BURST); | |
360cae31 PM |
1776 | /* Pass the machine check to the L1 guest */ |
1777 | r = RESUME_HOST; | |
1778 | /* Print the MCE event to host console. */ | |
1d15ffdf NP |
1779 | if (__ratelimit(&rs)) |
1780 | machine_check_print_event_info(&vcpu->arch.mce_evt, false, true); | |
360cae31 | 1781 | break; |
1d15ffdf | 1782 | } |
360cae31 PM |
1783 | /* |
1784 | * We get these next two if the guest accesses a page which it thinks | |
1785 | * it has mapped but which is not actually present, either because | |
1786 | * it is for an emulated I/O device or because the corresonding | |
1787 | * host page has been paged out. | |
1788 | */ | |
1789 | case BOOK3S_INTERRUPT_H_DATA_STORAGE: | |
1790 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | |
8c99d345 | 1791 | r = kvmhv_nested_page_fault(vcpu); |
360cae31 PM |
1792 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); |
1793 | break; | |
1794 | case BOOK3S_INTERRUPT_H_INST_STORAGE: | |
1795 | vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); | |
1796 | vcpu->arch.fault_dsisr = kvmppc_get_msr(vcpu) & | |
1797 | DSISR_SRR1_MATCH_64S; | |
1798 | if (vcpu->arch.shregs.msr & HSRR1_HISI_WRITE) | |
1799 | vcpu->arch.fault_dsisr |= DSISR_ISSTORE; | |
1800 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | |
8c99d345 | 1801 | r = kvmhv_nested_page_fault(vcpu); |
360cae31 PM |
1802 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); |
1803 | break; | |
1804 | ||
1805 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
1806 | case BOOK3S_INTERRUPT_HV_SOFTPATCH: | |
1807 | /* | |
1808 | * This occurs for various TM-related instructions that | |
1809 | * we need to emulate on POWER9 DD2.2. We have already | |
1810 | * handled the cases where the guest was in real-suspend | |
1811 | * mode and was transitioning to transactional state. | |
1812 | */ | |
1813 | r = kvmhv_p9_tm_emulation(vcpu); | |
1814 | break; | |
1815 | #endif | |
1816 | ||
1817 | case BOOK3S_INTERRUPT_HV_RM_HARD: | |
1818 | vcpu->arch.trap = 0; | |
1819 | r = RESUME_GUEST; | |
03f95332 | 1820 | if (!xics_on_xive()) |
360cae31 PM |
1821 | kvmppc_xics_rm_complete(vcpu, 0); |
1822 | break; | |
53324b51 BR |
1823 | case BOOK3S_INTERRUPT_SYSCALL: |
1824 | { | |
1825 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | |
1826 | ||
1827 | /* | |
1828 | * The H_RPT_INVALIDATE hcalls issued by nested | |
1829 | * guests for process-scoped invalidations when | |
1830 | * GTSE=0, are handled here in L0. | |
1831 | */ | |
1832 | if (req == H_RPT_INVALIDATE) { | |
1833 | r = kvmppc_nested_h_rpt_invalidate(vcpu); | |
1834 | break; | |
1835 | } | |
1836 | ||
1837 | r = RESUME_HOST; | |
1838 | break; | |
1839 | } | |
360cae31 PM |
1840 | default: |
1841 | r = RESUME_HOST; | |
1842 | break; | |
1843 | } | |
1844 | ||
1845 | return r; | |
1846 | } | |
1847 | ||
3a167bea AK |
1848 | static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, |
1849 | struct kvm_sregs *sregs) | |
de56a948 PM |
1850 | { |
1851 | int i; | |
1852 | ||
de56a948 | 1853 | memset(sregs, 0, sizeof(struct kvm_sregs)); |
87916442 | 1854 | sregs->pvr = vcpu->arch.pvr; |
de56a948 PM |
1855 | for (i = 0; i < vcpu->arch.slb_max; i++) { |
1856 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; | |
1857 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
1858 | } | |
1859 | ||
1860 | return 0; | |
1861 | } | |
1862 | ||
3a167bea AK |
1863 | static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, |
1864 | struct kvm_sregs *sregs) | |
de56a948 PM |
1865 | { |
1866 | int i, j; | |
1867 | ||
9333e6c4 PM |
1868 | /* Only accept the same PVR as the host's, since we can't spoof it */ |
1869 | if (sregs->pvr != vcpu->arch.pvr) | |
1870 | return -EINVAL; | |
de56a948 PM |
1871 | |
1872 | j = 0; | |
1873 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
1874 | if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { | |
1875 | vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; | |
1876 | vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; | |
1877 | ++j; | |
1878 | } | |
1879 | } | |
1880 | vcpu->arch.slb_max = j; | |
1881 | ||
1882 | return 0; | |
1883 | } | |
1884 | ||
67145ef4 NP |
1885 | /* |
1886 | * Enforce limits on guest LPCR values based on hardware availability, | |
1887 | * guest configuration, and possibly hypervisor support and security | |
1888 | * concerns. | |
1889 | */ | |
1890 | unsigned long kvmppc_filter_lpcr_hv(struct kvm *kvm, unsigned long lpcr) | |
1891 | { | |
72c15287 NP |
1892 | /* LPCR_TC only applies to HPT guests */ |
1893 | if (kvm_is_radix(kvm)) | |
1894 | lpcr &= ~LPCR_TC; | |
1895 | ||
67145ef4 NP |
1896 | /* On POWER8 and above, userspace can modify AIL */ |
1897 | if (!cpu_has_feature(CPU_FTR_ARCH_207S)) | |
1898 | lpcr &= ~LPCR_AIL; | |
bcc92a0d NP |
1899 | if ((lpcr & LPCR_AIL) != LPCR_AIL_3) |
1900 | lpcr &= ~LPCR_AIL; /* LPCR[AIL]=1/2 is disallowed */ | |
2e1ae9cd NP |
1901 | /* |
1902 | * On some POWER9s we force AIL off for radix guests to prevent | |
1903 | * executing in MSR[HV]=1 mode with the MMU enabled and PIDR set to | |
1904 | * guest, which can result in Q0 translations with LPID=0 PID=PIDR to | |
1905 | * be cached, which the host TLB management does not expect. | |
1906 | */ | |
1907 | if (kvm_is_radix(kvm) && cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) | |
1908 | lpcr &= ~LPCR_AIL; | |
67145ef4 NP |
1909 | |
1910 | /* | |
1911 | * On POWER9, allow userspace to enable large decrementer for the | |
1912 | * guest, whether or not the host has it enabled. | |
1913 | */ | |
1914 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
1915 | lpcr &= ~LPCR_LD; | |
1916 | ||
1917 | return lpcr; | |
1918 | } | |
1919 | ||
1920 | static void verify_lpcr(struct kvm *kvm, unsigned long lpcr) | |
1921 | { | |
1922 | if (lpcr != kvmppc_filter_lpcr_hv(kvm, lpcr)) { | |
1923 | WARN_ONCE(1, "lpcr 0x%lx differs from filtered 0x%lx\n", | |
1924 | lpcr, kvmppc_filter_lpcr_hv(kvm, lpcr)); | |
1925 | } | |
1926 | } | |
1927 | ||
a0840240 AK |
1928 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
1929 | bool preserve_top32) | |
a0144e2a | 1930 | { |
8f902b00 | 1931 | struct kvm *kvm = vcpu->kvm; |
a0144e2a PM |
1932 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
1933 | u64 mask; | |
1934 | ||
1935 | spin_lock(&vc->lock); | |
67145ef4 NP |
1936 | |
1937 | /* | |
1938 | * Userspace can only modify | |
1939 | * DPFD (default prefetch depth), ILE (interrupt little-endian), | |
1940 | * TC (translation control), AIL (alternate interrupt location), | |
1941 | * LD (large decrementer). | |
1942 | * These are subject to restrictions from kvmppc_filter_lcpr_hv(). | |
1943 | */ | |
1944 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC | LPCR_AIL | LPCR_LD; | |
1945 | ||
1946 | /* Broken 32-bit version of LPCR must not clear top bits */ | |
1947 | if (preserve_top32) | |
1948 | mask &= 0xFFFFFFFF; | |
1949 | ||
1950 | new_lpcr = kvmppc_filter_lpcr_hv(kvm, | |
1951 | (vc->lpcr & ~mask) | (new_lpcr & mask)); | |
1952 | ||
d682916a AB |
1953 | /* |
1954 | * If ILE (interrupt little-endian) has changed, update the | |
1955 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. | |
1956 | */ | |
1957 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { | |
d682916a AB |
1958 | struct kvm_vcpu *vcpu; |
1959 | int i; | |
1960 | ||
d682916a AB |
1961 | kvm_for_each_vcpu(i, vcpu, kvm) { |
1962 | if (vcpu->arch.vcore != vc) | |
1963 | continue; | |
1964 | if (new_lpcr & LPCR_ILE) | |
1965 | vcpu->arch.intr_msr |= MSR_LE; | |
1966 | else | |
1967 | vcpu->arch.intr_msr &= ~MSR_LE; | |
1968 | } | |
d682916a AB |
1969 | } |
1970 | ||
67145ef4 | 1971 | vc->lpcr = new_lpcr; |
a0840240 | 1972 | |
a0144e2a PM |
1973 | spin_unlock(&vc->lock); |
1974 | } | |
1975 | ||
3a167bea AK |
1976 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
1977 | union kvmppc_one_reg *val) | |
31f3438e | 1978 | { |
a136a8bd PM |
1979 | int r = 0; |
1980 | long int i; | |
31f3438e | 1981 | |
a136a8bd | 1982 | switch (id) { |
a59c1d9e MS |
1983 | case KVM_REG_PPC_DEBUG_INST: |
1984 | *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); | |
1985 | break; | |
31f3438e | 1986 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
1987 | *val = get_reg_val(id, 0); |
1988 | break; | |
1989 | case KVM_REG_PPC_DABR: | |
1990 | *val = get_reg_val(id, vcpu->arch.dabr); | |
1991 | break; | |
8563bf52 PM |
1992 | case KVM_REG_PPC_DABRX: |
1993 | *val = get_reg_val(id, vcpu->arch.dabrx); | |
1994 | break; | |
a136a8bd PM |
1995 | case KVM_REG_PPC_DSCR: |
1996 | *val = get_reg_val(id, vcpu->arch.dscr); | |
1997 | break; | |
1998 | case KVM_REG_PPC_PURR: | |
1999 | *val = get_reg_val(id, vcpu->arch.purr); | |
2000 | break; | |
2001 | case KVM_REG_PPC_SPURR: | |
2002 | *val = get_reg_val(id, vcpu->arch.spurr); | |
2003 | break; | |
2004 | case KVM_REG_PPC_AMR: | |
2005 | *val = get_reg_val(id, vcpu->arch.amr); | |
2006 | break; | |
2007 | case KVM_REG_PPC_UAMOR: | |
2008 | *val = get_reg_val(id, vcpu->arch.uamor); | |
2009 | break; | |
7e4a145e | 2010 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: |
a136a8bd PM |
2011 | i = id - KVM_REG_PPC_MMCR0; |
2012 | *val = get_reg_val(id, vcpu->arch.mmcr[i]); | |
2013 | break; | |
7e4a145e AR |
2014 | case KVM_REG_PPC_MMCR2: |
2015 | *val = get_reg_val(id, vcpu->arch.mmcr[2]); | |
2016 | break; | |
2017 | case KVM_REG_PPC_MMCRA: | |
2018 | *val = get_reg_val(id, vcpu->arch.mmcra); | |
2019 | break; | |
2020 | case KVM_REG_PPC_MMCRS: | |
2021 | *val = get_reg_val(id, vcpu->arch.mmcrs); | |
2022 | break; | |
5752fe0b AR |
2023 | case KVM_REG_PPC_MMCR3: |
2024 | *val = get_reg_val(id, vcpu->arch.mmcr[3]); | |
2025 | break; | |
a136a8bd PM |
2026 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: |
2027 | i = id - KVM_REG_PPC_PMC1; | |
2028 | *val = get_reg_val(id, vcpu->arch.pmc[i]); | |
31f3438e | 2029 | break; |
b005255e MN |
2030 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: |
2031 | i = id - KVM_REG_PPC_SPMC1; | |
2032 | *val = get_reg_val(id, vcpu->arch.spmc[i]); | |
2033 | break; | |
14941789 PM |
2034 | case KVM_REG_PPC_SIAR: |
2035 | *val = get_reg_val(id, vcpu->arch.siar); | |
2036 | break; | |
2037 | case KVM_REG_PPC_SDAR: | |
2038 | *val = get_reg_val(id, vcpu->arch.sdar); | |
2039 | break; | |
b005255e | 2040 | case KVM_REG_PPC_SIER: |
5752fe0b AR |
2041 | *val = get_reg_val(id, vcpu->arch.sier[0]); |
2042 | break; | |
2043 | case KVM_REG_PPC_SIER2: | |
2044 | *val = get_reg_val(id, vcpu->arch.sier[1]); | |
2045 | break; | |
2046 | case KVM_REG_PPC_SIER3: | |
2047 | *val = get_reg_val(id, vcpu->arch.sier[2]); | |
a8bd19ef | 2048 | break; |
b005255e MN |
2049 | case KVM_REG_PPC_IAMR: |
2050 | *val = get_reg_val(id, vcpu->arch.iamr); | |
2051 | break; | |
b005255e MN |
2052 | case KVM_REG_PPC_PSPB: |
2053 | *val = get_reg_val(id, vcpu->arch.pspb); | |
2054 | break; | |
b005255e | 2055 | case KVM_REG_PPC_DPDES: |
ff42df49 PM |
2056 | /* |
2057 | * On POWER9, where we are emulating msgsndp etc., | |
2058 | * we return 1 bit for each vcpu, which can come from | |
2059 | * either vcore->dpdes or doorbell_request. | |
2060 | * On POWER8, doorbell_request is 0. | |
2061 | */ | |
2062 | *val = get_reg_val(id, vcpu->arch.vcore->dpdes | | |
2063 | vcpu->arch.doorbell_request); | |
b005255e | 2064 | break; |
88b02cf9 PM |
2065 | case KVM_REG_PPC_VTB: |
2066 | *val = get_reg_val(id, vcpu->arch.vcore->vtb); | |
2067 | break; | |
b005255e | 2068 | case KVM_REG_PPC_DAWR: |
122954ed | 2069 | *val = get_reg_val(id, vcpu->arch.dawr0); |
b005255e MN |
2070 | break; |
2071 | case KVM_REG_PPC_DAWRX: | |
122954ed | 2072 | *val = get_reg_val(id, vcpu->arch.dawrx0); |
b005255e | 2073 | break; |
bd1de1a0 RB |
2074 | case KVM_REG_PPC_DAWR1: |
2075 | *val = get_reg_val(id, vcpu->arch.dawr1); | |
2076 | break; | |
2077 | case KVM_REG_PPC_DAWRX1: | |
2078 | *val = get_reg_val(id, vcpu->arch.dawrx1); | |
2079 | break; | |
b005255e MN |
2080 | case KVM_REG_PPC_CIABR: |
2081 | *val = get_reg_val(id, vcpu->arch.ciabr); | |
2082 | break; | |
b005255e MN |
2083 | case KVM_REG_PPC_CSIGR: |
2084 | *val = get_reg_val(id, vcpu->arch.csigr); | |
2085 | break; | |
2086 | case KVM_REG_PPC_TACR: | |
2087 | *val = get_reg_val(id, vcpu->arch.tacr); | |
2088 | break; | |
2089 | case KVM_REG_PPC_TCSCR: | |
2090 | *val = get_reg_val(id, vcpu->arch.tcscr); | |
2091 | break; | |
2092 | case KVM_REG_PPC_PID: | |
2093 | *val = get_reg_val(id, vcpu->arch.pid); | |
2094 | break; | |
2095 | case KVM_REG_PPC_ACOP: | |
2096 | *val = get_reg_val(id, vcpu->arch.acop); | |
2097 | break; | |
2098 | case KVM_REG_PPC_WORT: | |
2099 | *val = get_reg_val(id, vcpu->arch.wort); | |
a8bd19ef | 2100 | break; |
e9cf1e08 PM |
2101 | case KVM_REG_PPC_TIDR: |
2102 | *val = get_reg_val(id, vcpu->arch.tid); | |
2103 | break; | |
2104 | case KVM_REG_PPC_PSSCR: | |
2105 | *val = get_reg_val(id, vcpu->arch.psscr); | |
2106 | break; | |
55b665b0 PM |
2107 | case KVM_REG_PPC_VPA_ADDR: |
2108 | spin_lock(&vcpu->arch.vpa_update_lock); | |
2109 | *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); | |
2110 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
2111 | break; | |
2112 | case KVM_REG_PPC_VPA_SLB: | |
2113 | spin_lock(&vcpu->arch.vpa_update_lock); | |
2114 | val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; | |
2115 | val->vpaval.length = vcpu->arch.slb_shadow.len; | |
2116 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
2117 | break; | |
2118 | case KVM_REG_PPC_VPA_DTL: | |
2119 | spin_lock(&vcpu->arch.vpa_update_lock); | |
2120 | val->vpaval.addr = vcpu->arch.dtl.next_gpa; | |
2121 | val->vpaval.length = vcpu->arch.dtl.len; | |
2122 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
2123 | break; | |
93b0f4dc PM |
2124 | case KVM_REG_PPC_TB_OFFSET: |
2125 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); | |
2126 | break; | |
a0144e2a | 2127 | case KVM_REG_PPC_LPCR: |
a0840240 | 2128 | case KVM_REG_PPC_LPCR_64: |
a0144e2a PM |
2129 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); |
2130 | break; | |
4b8473c9 PM |
2131 | case KVM_REG_PPC_PPR: |
2132 | *val = get_reg_val(id, vcpu->arch.ppr); | |
2133 | break; | |
a7d80d01 MN |
2134 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
2135 | case KVM_REG_PPC_TFHAR: | |
2136 | *val = get_reg_val(id, vcpu->arch.tfhar); | |
2137 | break; | |
2138 | case KVM_REG_PPC_TFIAR: | |
2139 | *val = get_reg_val(id, vcpu->arch.tfiar); | |
2140 | break; | |
2141 | case KVM_REG_PPC_TEXASR: | |
2142 | *val = get_reg_val(id, vcpu->arch.texasr); | |
2143 | break; | |
2144 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
2145 | i = id - KVM_REG_PPC_TM_GPR0; | |
2146 | *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); | |
2147 | break; | |
2148 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
2149 | { | |
2150 | int j; | |
2151 | i = id - KVM_REG_PPC_TM_VSR0; | |
2152 | if (i < 32) | |
2153 | for (j = 0; j < TS_FPRWIDTH; j++) | |
2154 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; | |
2155 | else { | |
2156 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
2157 | val->vval = vcpu->arch.vr_tm.vr[i-32]; | |
2158 | else | |
2159 | r = -ENXIO; | |
2160 | } | |
2161 | break; | |
2162 | } | |
2163 | case KVM_REG_PPC_TM_CR: | |
2164 | *val = get_reg_val(id, vcpu->arch.cr_tm); | |
2165 | break; | |
0d808df0 PM |
2166 | case KVM_REG_PPC_TM_XER: |
2167 | *val = get_reg_val(id, vcpu->arch.xer_tm); | |
2168 | break; | |
a7d80d01 MN |
2169 | case KVM_REG_PPC_TM_LR: |
2170 | *val = get_reg_val(id, vcpu->arch.lr_tm); | |
2171 | break; | |
2172 | case KVM_REG_PPC_TM_CTR: | |
2173 | *val = get_reg_val(id, vcpu->arch.ctr_tm); | |
2174 | break; | |
2175 | case KVM_REG_PPC_TM_FPSCR: | |
2176 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); | |
2177 | break; | |
2178 | case KVM_REG_PPC_TM_AMR: | |
2179 | *val = get_reg_val(id, vcpu->arch.amr_tm); | |
2180 | break; | |
2181 | case KVM_REG_PPC_TM_PPR: | |
2182 | *val = get_reg_val(id, vcpu->arch.ppr_tm); | |
2183 | break; | |
2184 | case KVM_REG_PPC_TM_VRSAVE: | |
2185 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); | |
2186 | break; | |
2187 | case KVM_REG_PPC_TM_VSCR: | |
2188 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
2189 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); | |
2190 | else | |
2191 | r = -ENXIO; | |
2192 | break; | |
2193 | case KVM_REG_PPC_TM_DSCR: | |
2194 | *val = get_reg_val(id, vcpu->arch.dscr_tm); | |
2195 | break; | |
2196 | case KVM_REG_PPC_TM_TAR: | |
2197 | *val = get_reg_val(id, vcpu->arch.tar_tm); | |
2198 | break; | |
2199 | #endif | |
388cc6e1 PM |
2200 | case KVM_REG_PPC_ARCH_COMPAT: |
2201 | *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); | |
2202 | break; | |
5855564c PM |
2203 | case KVM_REG_PPC_DEC_EXPIRY: |
2204 | *val = get_reg_val(id, vcpu->arch.dec_expires + | |
2205 | vcpu->arch.vcore->tb_offset); | |
2206 | break; | |
a1f15826 PM |
2207 | case KVM_REG_PPC_ONLINE: |
2208 | *val = get_reg_val(id, vcpu->arch.online); | |
2209 | break; | |
30323418 PM |
2210 | case KVM_REG_PPC_PTCR: |
2211 | *val = get_reg_val(id, vcpu->kvm->arch.l1_ptcr); | |
2212 | break; | |
31f3438e | 2213 | default: |
a136a8bd | 2214 | r = -EINVAL; |
31f3438e PM |
2215 | break; |
2216 | } | |
2217 | ||
2218 | return r; | |
2219 | } | |
2220 | ||
3a167bea AK |
2221 | static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
2222 | union kvmppc_one_reg *val) | |
31f3438e | 2223 | { |
a136a8bd PM |
2224 | int r = 0; |
2225 | long int i; | |
55b665b0 | 2226 | unsigned long addr, len; |
31f3438e | 2227 | |
a136a8bd | 2228 | switch (id) { |
31f3438e | 2229 | case KVM_REG_PPC_HIOR: |
31f3438e | 2230 | /* Only allow this to be set to zero */ |
a136a8bd | 2231 | if (set_reg_val(id, *val)) |
31f3438e PM |
2232 | r = -EINVAL; |
2233 | break; | |
a136a8bd PM |
2234 | case KVM_REG_PPC_DABR: |
2235 | vcpu->arch.dabr = set_reg_val(id, *val); | |
2236 | break; | |
8563bf52 PM |
2237 | case KVM_REG_PPC_DABRX: |
2238 | vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; | |
2239 | break; | |
a136a8bd PM |
2240 | case KVM_REG_PPC_DSCR: |
2241 | vcpu->arch.dscr = set_reg_val(id, *val); | |
2242 | break; | |
2243 | case KVM_REG_PPC_PURR: | |
2244 | vcpu->arch.purr = set_reg_val(id, *val); | |
2245 | break; | |
2246 | case KVM_REG_PPC_SPURR: | |
2247 | vcpu->arch.spurr = set_reg_val(id, *val); | |
2248 | break; | |
2249 | case KVM_REG_PPC_AMR: | |
2250 | vcpu->arch.amr = set_reg_val(id, *val); | |
2251 | break; | |
2252 | case KVM_REG_PPC_UAMOR: | |
2253 | vcpu->arch.uamor = set_reg_val(id, *val); | |
2254 | break; | |
7e4a145e | 2255 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCR1: |
a136a8bd PM |
2256 | i = id - KVM_REG_PPC_MMCR0; |
2257 | vcpu->arch.mmcr[i] = set_reg_val(id, *val); | |
2258 | break; | |
7e4a145e AR |
2259 | case KVM_REG_PPC_MMCR2: |
2260 | vcpu->arch.mmcr[2] = set_reg_val(id, *val); | |
2261 | break; | |
2262 | case KVM_REG_PPC_MMCRA: | |
2263 | vcpu->arch.mmcra = set_reg_val(id, *val); | |
2264 | break; | |
2265 | case KVM_REG_PPC_MMCRS: | |
2266 | vcpu->arch.mmcrs = set_reg_val(id, *val); | |
2267 | break; | |
5752fe0b AR |
2268 | case KVM_REG_PPC_MMCR3: |
2269 | *val = get_reg_val(id, vcpu->arch.mmcr[3]); | |
2270 | break; | |
a136a8bd PM |
2271 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: |
2272 | i = id - KVM_REG_PPC_PMC1; | |
2273 | vcpu->arch.pmc[i] = set_reg_val(id, *val); | |
2274 | break; | |
b005255e MN |
2275 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: |
2276 | i = id - KVM_REG_PPC_SPMC1; | |
2277 | vcpu->arch.spmc[i] = set_reg_val(id, *val); | |
2278 | break; | |
14941789 PM |
2279 | case KVM_REG_PPC_SIAR: |
2280 | vcpu->arch.siar = set_reg_val(id, *val); | |
2281 | break; | |
2282 | case KVM_REG_PPC_SDAR: | |
2283 | vcpu->arch.sdar = set_reg_val(id, *val); | |
2284 | break; | |
b005255e | 2285 | case KVM_REG_PPC_SIER: |
5752fe0b AR |
2286 | vcpu->arch.sier[0] = set_reg_val(id, *val); |
2287 | break; | |
2288 | case KVM_REG_PPC_SIER2: | |
2289 | vcpu->arch.sier[1] = set_reg_val(id, *val); | |
2290 | break; | |
2291 | case KVM_REG_PPC_SIER3: | |
2292 | vcpu->arch.sier[2] = set_reg_val(id, *val); | |
a8bd19ef | 2293 | break; |
b005255e MN |
2294 | case KVM_REG_PPC_IAMR: |
2295 | vcpu->arch.iamr = set_reg_val(id, *val); | |
2296 | break; | |
b005255e MN |
2297 | case KVM_REG_PPC_PSPB: |
2298 | vcpu->arch.pspb = set_reg_val(id, *val); | |
2299 | break; | |
b005255e MN |
2300 | case KVM_REG_PPC_DPDES: |
2301 | vcpu->arch.vcore->dpdes = set_reg_val(id, *val); | |
2302 | break; | |
88b02cf9 PM |
2303 | case KVM_REG_PPC_VTB: |
2304 | vcpu->arch.vcore->vtb = set_reg_val(id, *val); | |
2305 | break; | |
b005255e | 2306 | case KVM_REG_PPC_DAWR: |
122954ed | 2307 | vcpu->arch.dawr0 = set_reg_val(id, *val); |
b005255e MN |
2308 | break; |
2309 | case KVM_REG_PPC_DAWRX: | |
122954ed | 2310 | vcpu->arch.dawrx0 = set_reg_val(id, *val) & ~DAWRX_HYP; |
b005255e | 2311 | break; |
bd1de1a0 RB |
2312 | case KVM_REG_PPC_DAWR1: |
2313 | vcpu->arch.dawr1 = set_reg_val(id, *val); | |
2314 | break; | |
2315 | case KVM_REG_PPC_DAWRX1: | |
2316 | vcpu->arch.dawrx1 = set_reg_val(id, *val) & ~DAWRX_HYP; | |
2317 | break; | |
b005255e MN |
2318 | case KVM_REG_PPC_CIABR: |
2319 | vcpu->arch.ciabr = set_reg_val(id, *val); | |
2320 | /* Don't allow setting breakpoints in hypervisor code */ | |
2321 | if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) | |
2322 | vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ | |
2323 | break; | |
b005255e MN |
2324 | case KVM_REG_PPC_CSIGR: |
2325 | vcpu->arch.csigr = set_reg_val(id, *val); | |
2326 | break; | |
2327 | case KVM_REG_PPC_TACR: | |
2328 | vcpu->arch.tacr = set_reg_val(id, *val); | |
2329 | break; | |
2330 | case KVM_REG_PPC_TCSCR: | |
2331 | vcpu->arch.tcscr = set_reg_val(id, *val); | |
2332 | break; | |
2333 | case KVM_REG_PPC_PID: | |
2334 | vcpu->arch.pid = set_reg_val(id, *val); | |
2335 | break; | |
2336 | case KVM_REG_PPC_ACOP: | |
2337 | vcpu->arch.acop = set_reg_val(id, *val); | |
2338 | break; | |
2339 | case KVM_REG_PPC_WORT: | |
2340 | vcpu->arch.wort = set_reg_val(id, *val); | |
a8bd19ef | 2341 | break; |
e9cf1e08 PM |
2342 | case KVM_REG_PPC_TIDR: |
2343 | vcpu->arch.tid = set_reg_val(id, *val); | |
2344 | break; | |
2345 | case KVM_REG_PPC_PSSCR: | |
2346 | vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; | |
2347 | break; | |
55b665b0 PM |
2348 | case KVM_REG_PPC_VPA_ADDR: |
2349 | addr = set_reg_val(id, *val); | |
2350 | r = -EINVAL; | |
2351 | if (!addr && (vcpu->arch.slb_shadow.next_gpa || | |
2352 | vcpu->arch.dtl.next_gpa)) | |
2353 | break; | |
2354 | r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); | |
2355 | break; | |
2356 | case KVM_REG_PPC_VPA_SLB: | |
2357 | addr = val->vpaval.addr; | |
2358 | len = val->vpaval.length; | |
2359 | r = -EINVAL; | |
2360 | if (addr && !vcpu->arch.vpa.next_gpa) | |
2361 | break; | |
2362 | r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); | |
2363 | break; | |
2364 | case KVM_REG_PPC_VPA_DTL: | |
2365 | addr = val->vpaval.addr; | |
2366 | len = val->vpaval.length; | |
2367 | r = -EINVAL; | |
9f8c8c78 PM |
2368 | if (addr && (len < sizeof(struct dtl_entry) || |
2369 | !vcpu->arch.vpa.next_gpa)) | |
55b665b0 PM |
2370 | break; |
2371 | len -= len % sizeof(struct dtl_entry); | |
2372 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); | |
2373 | break; | |
93b0f4dc PM |
2374 | case KVM_REG_PPC_TB_OFFSET: |
2375 | /* round up to multiple of 2^24 */ | |
2376 | vcpu->arch.vcore->tb_offset = | |
2377 | ALIGN(set_reg_val(id, *val), 1UL << 24); | |
2378 | break; | |
a0144e2a | 2379 | case KVM_REG_PPC_LPCR: |
a0840240 AK |
2380 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); |
2381 | break; | |
2382 | case KVM_REG_PPC_LPCR_64: | |
2383 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); | |
a0144e2a | 2384 | break; |
4b8473c9 PM |
2385 | case KVM_REG_PPC_PPR: |
2386 | vcpu->arch.ppr = set_reg_val(id, *val); | |
2387 | break; | |
a7d80d01 MN |
2388 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
2389 | case KVM_REG_PPC_TFHAR: | |
2390 | vcpu->arch.tfhar = set_reg_val(id, *val); | |
2391 | break; | |
2392 | case KVM_REG_PPC_TFIAR: | |
2393 | vcpu->arch.tfiar = set_reg_val(id, *val); | |
2394 | break; | |
2395 | case KVM_REG_PPC_TEXASR: | |
2396 | vcpu->arch.texasr = set_reg_val(id, *val); | |
2397 | break; | |
2398 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
2399 | i = id - KVM_REG_PPC_TM_GPR0; | |
2400 | vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); | |
2401 | break; | |
2402 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
2403 | { | |
2404 | int j; | |
2405 | i = id - KVM_REG_PPC_TM_VSR0; | |
2406 | if (i < 32) | |
2407 | for (j = 0; j < TS_FPRWIDTH; j++) | |
2408 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; | |
2409 | else | |
2410 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
2411 | vcpu->arch.vr_tm.vr[i-32] = val->vval; | |
2412 | else | |
2413 | r = -ENXIO; | |
2414 | break; | |
2415 | } | |
2416 | case KVM_REG_PPC_TM_CR: | |
2417 | vcpu->arch.cr_tm = set_reg_val(id, *val); | |
2418 | break; | |
0d808df0 PM |
2419 | case KVM_REG_PPC_TM_XER: |
2420 | vcpu->arch.xer_tm = set_reg_val(id, *val); | |
2421 | break; | |
a7d80d01 MN |
2422 | case KVM_REG_PPC_TM_LR: |
2423 | vcpu->arch.lr_tm = set_reg_val(id, *val); | |
2424 | break; | |
2425 | case KVM_REG_PPC_TM_CTR: | |
2426 | vcpu->arch.ctr_tm = set_reg_val(id, *val); | |
2427 | break; | |
2428 | case KVM_REG_PPC_TM_FPSCR: | |
2429 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); | |
2430 | break; | |
2431 | case KVM_REG_PPC_TM_AMR: | |
2432 | vcpu->arch.amr_tm = set_reg_val(id, *val); | |
2433 | break; | |
2434 | case KVM_REG_PPC_TM_PPR: | |
2435 | vcpu->arch.ppr_tm = set_reg_val(id, *val); | |
2436 | break; | |
2437 | case KVM_REG_PPC_TM_VRSAVE: | |
2438 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); | |
2439 | break; | |
2440 | case KVM_REG_PPC_TM_VSCR: | |
2441 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
2442 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); | |
2443 | else | |
2444 | r = - ENXIO; | |
2445 | break; | |
2446 | case KVM_REG_PPC_TM_DSCR: | |
2447 | vcpu->arch.dscr_tm = set_reg_val(id, *val); | |
2448 | break; | |
2449 | case KVM_REG_PPC_TM_TAR: | |
2450 | vcpu->arch.tar_tm = set_reg_val(id, *val); | |
2451 | break; | |
2452 | #endif | |
388cc6e1 PM |
2453 | case KVM_REG_PPC_ARCH_COMPAT: |
2454 | r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); | |
2455 | break; | |
5855564c PM |
2456 | case KVM_REG_PPC_DEC_EXPIRY: |
2457 | vcpu->arch.dec_expires = set_reg_val(id, *val) - | |
2458 | vcpu->arch.vcore->tb_offset; | |
2459 | break; | |
a1f15826 | 2460 | case KVM_REG_PPC_ONLINE: |
7aa15842 PM |
2461 | i = set_reg_val(id, *val); |
2462 | if (i && !vcpu->arch.online) | |
2463 | atomic_inc(&vcpu->arch.vcore->online_count); | |
2464 | else if (!i && vcpu->arch.online) | |
2465 | atomic_dec(&vcpu->arch.vcore->online_count); | |
2466 | vcpu->arch.online = i; | |
a1f15826 | 2467 | break; |
30323418 PM |
2468 | case KVM_REG_PPC_PTCR: |
2469 | vcpu->kvm->arch.l1_ptcr = set_reg_val(id, *val); | |
2470 | break; | |
31f3438e | 2471 | default: |
a136a8bd | 2472 | r = -EINVAL; |
31f3438e PM |
2473 | break; |
2474 | } | |
2475 | ||
2476 | return r; | |
2477 | } | |
2478 | ||
45c940ba PM |
2479 | /* |
2480 | * On POWER9, threads are independent and can be in different partitions. | |
2481 | * Therefore we consider each thread to be a subcore. | |
2482 | * There is a restriction that all threads have to be in the same | |
2483 | * MMU mode (radix or HPT), unfortunately, but since we only support | |
2484 | * HPT guests on a HPT host so far, that isn't an impediment yet. | |
2485 | */ | |
516f7898 | 2486 | static int threads_per_vcore(struct kvm *kvm) |
45c940ba | 2487 | { |
aaae8c79 | 2488 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
45c940ba PM |
2489 | return 1; |
2490 | return threads_per_subcore; | |
2491 | } | |
2492 | ||
1e175d2e | 2493 | static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int id) |
de9bdd1a SS |
2494 | { |
2495 | struct kvmppc_vcore *vcore; | |
2496 | ||
2497 | vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); | |
2498 | ||
2499 | if (vcore == NULL) | |
2500 | return NULL; | |
2501 | ||
de9bdd1a | 2502 | spin_lock_init(&vcore->lock); |
2711e248 | 2503 | spin_lock_init(&vcore->stoltb_lock); |
da4ad88c | 2504 | rcuwait_init(&vcore->wait); |
de9bdd1a SS |
2505 | vcore->preempt_tb = TB_NIL; |
2506 | vcore->lpcr = kvm->arch.lpcr; | |
1e175d2e | 2507 | vcore->first_vcpuid = id; |
de9bdd1a | 2508 | vcore->kvm = kvm; |
ec257165 | 2509 | INIT_LIST_HEAD(&vcore->preempt_list); |
de9bdd1a SS |
2510 | |
2511 | return vcore; | |
2512 | } | |
2513 | ||
b6c295df PM |
2514 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
2515 | static struct debugfs_timings_element { | |
2516 | const char *name; | |
2517 | size_t offset; | |
2518 | } timings[] = { | |
2519 | {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, | |
2520 | {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, | |
2521 | {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, | |
2522 | {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, | |
2523 | {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, | |
2524 | }; | |
2525 | ||
4bb817ed | 2526 | #define N_TIMINGS (ARRAY_SIZE(timings)) |
b6c295df PM |
2527 | |
2528 | struct debugfs_timings_state { | |
2529 | struct kvm_vcpu *vcpu; | |
2530 | unsigned int buflen; | |
2531 | char buf[N_TIMINGS * 100]; | |
2532 | }; | |
2533 | ||
2534 | static int debugfs_timings_open(struct inode *inode, struct file *file) | |
2535 | { | |
2536 | struct kvm_vcpu *vcpu = inode->i_private; | |
2537 | struct debugfs_timings_state *p; | |
2538 | ||
2539 | p = kzalloc(sizeof(*p), GFP_KERNEL); | |
2540 | if (!p) | |
2541 | return -ENOMEM; | |
2542 | ||
2543 | kvm_get_kvm(vcpu->kvm); | |
2544 | p->vcpu = vcpu; | |
2545 | file->private_data = p; | |
2546 | ||
2547 | return nonseekable_open(inode, file); | |
2548 | } | |
2549 | ||
2550 | static int debugfs_timings_release(struct inode *inode, struct file *file) | |
2551 | { | |
2552 | struct debugfs_timings_state *p = file->private_data; | |
2553 | ||
2554 | kvm_put_kvm(p->vcpu->kvm); | |
2555 | kfree(p); | |
2556 | return 0; | |
2557 | } | |
2558 | ||
2559 | static ssize_t debugfs_timings_read(struct file *file, char __user *buf, | |
2560 | size_t len, loff_t *ppos) | |
2561 | { | |
2562 | struct debugfs_timings_state *p = file->private_data; | |
2563 | struct kvm_vcpu *vcpu = p->vcpu; | |
2564 | char *s, *buf_end; | |
2565 | struct kvmhv_tb_accumulator tb; | |
2566 | u64 count; | |
2567 | loff_t pos; | |
2568 | ssize_t n; | |
2569 | int i, loops; | |
2570 | bool ok; | |
2571 | ||
2572 | if (!p->buflen) { | |
2573 | s = p->buf; | |
2574 | buf_end = s + sizeof(p->buf); | |
2575 | for (i = 0; i < N_TIMINGS; ++i) { | |
2576 | struct kvmhv_tb_accumulator *acc; | |
2577 | ||
2578 | acc = (struct kvmhv_tb_accumulator *) | |
2579 | ((unsigned long)vcpu + timings[i].offset); | |
2580 | ok = false; | |
2581 | for (loops = 0; loops < 1000; ++loops) { | |
2582 | count = acc->seqcount; | |
2583 | if (!(count & 1)) { | |
2584 | smp_rmb(); | |
2585 | tb = *acc; | |
2586 | smp_rmb(); | |
2587 | if (count == acc->seqcount) { | |
2588 | ok = true; | |
2589 | break; | |
2590 | } | |
2591 | } | |
2592 | udelay(1); | |
2593 | } | |
2594 | if (!ok) | |
2595 | snprintf(s, buf_end - s, "%s: stuck\n", | |
2596 | timings[i].name); | |
2597 | else | |
2598 | snprintf(s, buf_end - s, | |
2599 | "%s: %llu %llu %llu %llu\n", | |
2600 | timings[i].name, count / 2, | |
2601 | tb_to_ns(tb.tb_total), | |
2602 | tb_to_ns(tb.tb_min), | |
2603 | tb_to_ns(tb.tb_max)); | |
2604 | s += strlen(s); | |
2605 | } | |
2606 | p->buflen = s - p->buf; | |
2607 | } | |
2608 | ||
2609 | pos = *ppos; | |
2610 | if (pos >= p->buflen) | |
2611 | return 0; | |
2612 | if (len > p->buflen - pos) | |
2613 | len = p->buflen - pos; | |
2614 | n = copy_to_user(buf, p->buf + pos, len); | |
2615 | if (n) { | |
2616 | if (n == len) | |
2617 | return -EFAULT; | |
2618 | len -= n; | |
2619 | } | |
2620 | *ppos = pos + len; | |
2621 | return len; | |
2622 | } | |
2623 | ||
2624 | static ssize_t debugfs_timings_write(struct file *file, const char __user *buf, | |
2625 | size_t len, loff_t *ppos) | |
2626 | { | |
2627 | return -EACCES; | |
2628 | } | |
2629 | ||
2630 | static const struct file_operations debugfs_timings_ops = { | |
2631 | .owner = THIS_MODULE, | |
2632 | .open = debugfs_timings_open, | |
2633 | .release = debugfs_timings_release, | |
2634 | .read = debugfs_timings_read, | |
2635 | .write = debugfs_timings_write, | |
2636 | .llseek = generic_file_llseek, | |
2637 | }; | |
2638 | ||
2639 | /* Create a debugfs directory for the vcpu */ | |
2640 | static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) | |
2641 | { | |
2642 | char buf[16]; | |
2643 | struct kvm *kvm = vcpu->kvm; | |
2644 | ||
2645 | snprintf(buf, sizeof(buf), "vcpu%u", id); | |
b6c295df | 2646 | vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); |
c4fd527f GKH |
2647 | debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, vcpu, |
2648 | &debugfs_timings_ops); | |
b6c295df PM |
2649 | } |
2650 | ||
2651 | #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ | |
2652 | static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) | |
2653 | { | |
2654 | } | |
2655 | #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ | |
2656 | ||
ff030fdf | 2657 | static int kvmppc_core_vcpu_create_hv(struct kvm_vcpu *vcpu) |
de56a948 | 2658 | { |
3c313524 | 2659 | int err; |
371fefd6 PM |
2660 | int core; |
2661 | struct kvmppc_vcore *vcore; | |
ff030fdf SC |
2662 | struct kvm *kvm; |
2663 | unsigned int id; | |
de56a948 | 2664 | |
ff030fdf SC |
2665 | kvm = vcpu->kvm; |
2666 | id = vcpu->vcpu_id; | |
de56a948 PM |
2667 | |
2668 | vcpu->arch.shared = &vcpu->arch.shregs; | |
5deb8e7a AG |
2669 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
2670 | /* | |
2671 | * The shared struct is never shared on HV, | |
2672 | * so we can always use host endianness | |
2673 | */ | |
2674 | #ifdef __BIG_ENDIAN__ | |
2675 | vcpu->arch.shared_big_endian = true; | |
2676 | #else | |
2677 | vcpu->arch.shared_big_endian = false; | |
2678 | #endif | |
2679 | #endif | |
de56a948 PM |
2680 | vcpu->arch.mmcr[0] = MMCR0_FC; |
2681 | vcpu->arch.ctrl = CTRL_RUNLATCH; | |
2682 | /* default to host PVR, since we can't spoof it */ | |
3a167bea | 2683 | kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); |
2e25aa5f | 2684 | spin_lock_init(&vcpu->arch.vpa_update_lock); |
c7b67670 PM |
2685 | spin_lock_init(&vcpu->arch.tbacct_lock); |
2686 | vcpu->arch.busy_preempt = TB_NIL; | |
d682916a | 2687 | vcpu->arch.intr_msr = MSR_SF | MSR_ME; |
de56a948 | 2688 | |
769377f7 PM |
2689 | /* |
2690 | * Set the default HFSCR for the guest from the host value. | |
2691 | * This value is only used on POWER9. | |
57900694 | 2692 | * On POWER9, we want to virtualize the doorbell facility, so we |
f3c99f97 PM |
2693 | * don't set the HFSCR_MSGP bit, and that causes those instructions |
2694 | * to trap and then we emulate them. | |
769377f7 | 2695 | */ |
f3c99f97 | 2696 | vcpu->arch.hfscr = HFSCR_TAR | HFSCR_EBB | HFSCR_PM | HFSCR_BHRB | |
4cb4ade1 | 2697 | HFSCR_DSCR | HFSCR_VECVSX | HFSCR_FP | HFSCR_PREFIX; |
f3c99f97 PM |
2698 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
2699 | vcpu->arch.hfscr &= mfspr(SPRN_HFSCR); | |
bd31ecf4 | 2700 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
f3c99f97 PM |
2701 | if (cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) |
2702 | vcpu->arch.hfscr |= HFSCR_TM; | |
bd31ecf4 | 2703 | #endif |
f3c99f97 PM |
2704 | } |
2705 | if (cpu_has_feature(CPU_FTR_TM_COMP)) | |
4bb3c7a0 | 2706 | vcpu->arch.hfscr |= HFSCR_TM; |
769377f7 | 2707 | |
de56a948 PM |
2708 | kvmppc_mmu_book3s_hv_init(vcpu); |
2709 | ||
8455d79e | 2710 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
371fefd6 PM |
2711 | |
2712 | init_waitqueue_head(&vcpu->arch.cpu_run); | |
2713 | ||
2714 | mutex_lock(&kvm->lock); | |
3c313524 PM |
2715 | vcore = NULL; |
2716 | err = -EINVAL; | |
1e175d2e | 2717 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { |
b5c6f760 PM |
2718 | if (id >= (KVM_MAX_VCPUS * kvm->arch.emul_smt_mode)) { |
2719 | pr_devel("KVM: VCPU ID too high\n"); | |
2720 | core = KVM_MAX_VCORES; | |
2721 | } else { | |
2722 | BUG_ON(kvm->arch.smt_mode != 1); | |
2723 | core = kvmppc_pack_vcpu_id(kvm, id); | |
2724 | } | |
1e175d2e SB |
2725 | } else { |
2726 | core = id / kvm->arch.smt_mode; | |
2727 | } | |
3c313524 PM |
2728 | if (core < KVM_MAX_VCORES) { |
2729 | vcore = kvm->arch.vcores[core]; | |
1e175d2e SB |
2730 | if (vcore && cpu_has_feature(CPU_FTR_ARCH_300)) { |
2731 | pr_devel("KVM: collision on id %u", id); | |
2732 | vcore = NULL; | |
2733 | } else if (!vcore) { | |
0d4ee88d PM |
2734 | /* |
2735 | * Take mmu_setup_lock for mutual exclusion | |
2736 | * with kvmppc_update_lpcr(). | |
2737 | */ | |
3c313524 | 2738 | err = -ENOMEM; |
1e175d2e SB |
2739 | vcore = kvmppc_vcore_create(kvm, |
2740 | id & ~(kvm->arch.smt_mode - 1)); | |
0d4ee88d | 2741 | mutex_lock(&kvm->arch.mmu_setup_lock); |
3c313524 PM |
2742 | kvm->arch.vcores[core] = vcore; |
2743 | kvm->arch.online_vcores++; | |
0d4ee88d | 2744 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
3c313524 | 2745 | } |
371fefd6 PM |
2746 | } |
2747 | mutex_unlock(&kvm->lock); | |
2748 | ||
2749 | if (!vcore) | |
ff030fdf | 2750 | return err; |
371fefd6 PM |
2751 | |
2752 | spin_lock(&vcore->lock); | |
2753 | ++vcore->num_threads; | |
371fefd6 PM |
2754 | spin_unlock(&vcore->lock); |
2755 | vcpu->arch.vcore = vcore; | |
e0b7ec05 | 2756 | vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; |
ec257165 | 2757 | vcpu->arch.thread_cpu = -1; |
a29ebeaf | 2758 | vcpu->arch.prev_cpu = -1; |
371fefd6 | 2759 | |
af8f38b3 AG |
2760 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
2761 | kvmppc_sanity_check(vcpu); | |
2762 | ||
b6c295df PM |
2763 | debugfs_vcpu_init(vcpu, id); |
2764 | ||
c50bfbdc | 2765 | return 0; |
de56a948 PM |
2766 | } |
2767 | ||
3c313524 PM |
2768 | static int kvmhv_set_smt_mode(struct kvm *kvm, unsigned long smt_mode, |
2769 | unsigned long flags) | |
2770 | { | |
2771 | int err; | |
57900694 | 2772 | int esmt = 0; |
3c313524 PM |
2773 | |
2774 | if (flags) | |
2775 | return -EINVAL; | |
2776 | if (smt_mode > MAX_SMT_THREADS || !is_power_of_2(smt_mode)) | |
2777 | return -EINVAL; | |
2778 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { | |
2779 | /* | |
2780 | * On POWER8 (or POWER7), the threading mode is "strict", | |
2781 | * so we pack smt_mode vcpus per vcore. | |
2782 | */ | |
2783 | if (smt_mode > threads_per_subcore) | |
2784 | return -EINVAL; | |
2785 | } else { | |
2786 | /* | |
2787 | * On POWER9, the threading mode is "loose", | |
2788 | * so each vcpu gets its own vcore. | |
2789 | */ | |
57900694 | 2790 | esmt = smt_mode; |
3c313524 PM |
2791 | smt_mode = 1; |
2792 | } | |
2793 | mutex_lock(&kvm->lock); | |
2794 | err = -EBUSY; | |
2795 | if (!kvm->arch.online_vcores) { | |
2796 | kvm->arch.smt_mode = smt_mode; | |
57900694 | 2797 | kvm->arch.emul_smt_mode = esmt; |
3c313524 PM |
2798 | err = 0; |
2799 | } | |
2800 | mutex_unlock(&kvm->lock); | |
2801 | ||
2802 | return err; | |
2803 | } | |
2804 | ||
c35635ef PM |
2805 | static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) |
2806 | { | |
2807 | if (vpa->pinned_addr) | |
2808 | kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, | |
2809 | vpa->dirty); | |
2810 | } | |
2811 | ||
3a167bea | 2812 | static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) |
de56a948 | 2813 | { |
2e25aa5f | 2814 | spin_lock(&vcpu->arch.vpa_update_lock); |
c35635ef PM |
2815 | unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); |
2816 | unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); | |
2817 | unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); | |
2e25aa5f | 2818 | spin_unlock(&vcpu->arch.vpa_update_lock); |
de56a948 PM |
2819 | } |
2820 | ||
3a167bea AK |
2821 | static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) |
2822 | { | |
2823 | /* Indicate we want to get back into the guest */ | |
2824 | return 1; | |
2825 | } | |
2826 | ||
19ccb76a | 2827 | static void kvmppc_set_timer(struct kvm_vcpu *vcpu) |
371fefd6 | 2828 | { |
19ccb76a | 2829 | unsigned long dec_nsec, now; |
371fefd6 | 2830 | |
19ccb76a PM |
2831 | now = get_tb(); |
2832 | if (now > vcpu->arch.dec_expires) { | |
2833 | /* decrementer has already gone negative */ | |
2834 | kvmppc_core_queue_dec(vcpu); | |
7e28e60e | 2835 | kvmppc_core_prepare_to_enter(vcpu); |
19ccb76a | 2836 | return; |
371fefd6 | 2837 | } |
c43befca | 2838 | dec_nsec = tb_to_ns(vcpu->arch.dec_expires - now); |
8b0e1953 | 2839 | hrtimer_start(&vcpu->arch.dec_timer, dec_nsec, HRTIMER_MODE_REL); |
19ccb76a | 2840 | vcpu->arch.timer_running = 1; |
371fefd6 PM |
2841 | } |
2842 | ||
8b24e69f | 2843 | extern int __kvmppc_vcore_entry(void); |
de56a948 | 2844 | |
371fefd6 PM |
2845 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, |
2846 | struct kvm_vcpu *vcpu) | |
de56a948 | 2847 | { |
c7b67670 PM |
2848 | u64 now; |
2849 | ||
371fefd6 PM |
2850 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
2851 | return; | |
bf3d32e1 | 2852 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
c7b67670 PM |
2853 | now = mftb(); |
2854 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | |
2855 | vcpu->arch.stolen_logged; | |
2856 | vcpu->arch.busy_preempt = now; | |
2857 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | |
bf3d32e1 | 2858 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
371fefd6 | 2859 | --vc->n_runnable; |
7b5f8272 | 2860 | WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); |
371fefd6 PM |
2861 | } |
2862 | ||
f0888f70 PM |
2863 | static int kvmppc_grab_hwthread(int cpu) |
2864 | { | |
2865 | struct paca_struct *tpaca; | |
b754c739 | 2866 | long timeout = 10000; |
f0888f70 | 2867 | |
d2e60075 | 2868 | tpaca = paca_ptrs[cpu]; |
f0888f70 PM |
2869 | |
2870 | /* Ensure the thread won't go into the kernel if it wakes */ | |
7b444c67 | 2871 | tpaca->kvm_hstate.kvm_vcpu = NULL; |
b4deba5c | 2872 | tpaca->kvm_hstate.kvm_vcore = NULL; |
5d5b99cd PM |
2873 | tpaca->kvm_hstate.napping = 0; |
2874 | smp_wmb(); | |
2875 | tpaca->kvm_hstate.hwthread_req = 1; | |
f0888f70 PM |
2876 | |
2877 | /* | |
2878 | * If the thread is already executing in the kernel (e.g. handling | |
2879 | * a stray interrupt), wait for it to get back to nap mode. | |
2880 | * The smp_mb() is to ensure that our setting of hwthread_req | |
2881 | * is visible before we look at hwthread_state, so if this | |
2882 | * races with the code at system_reset_pSeries and the thread | |
2883 | * misses our setting of hwthread_req, we are sure to see its | |
2884 | * setting of hwthread_state, and vice versa. | |
2885 | */ | |
2886 | smp_mb(); | |
2887 | while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { | |
2888 | if (--timeout <= 0) { | |
2889 | pr_err("KVM: couldn't grab cpu %d\n", cpu); | |
2890 | return -EBUSY; | |
2891 | } | |
2892 | udelay(1); | |
2893 | } | |
2894 | return 0; | |
2895 | } | |
2896 | ||
2897 | static void kvmppc_release_hwthread(int cpu) | |
2898 | { | |
2899 | struct paca_struct *tpaca; | |
2900 | ||
d2e60075 | 2901 | tpaca = paca_ptrs[cpu]; |
31a4d448 | 2902 | tpaca->kvm_hstate.hwthread_req = 0; |
f0888f70 | 2903 | tpaca->kvm_hstate.kvm_vcpu = NULL; |
b4deba5c PM |
2904 | tpaca->kvm_hstate.kvm_vcore = NULL; |
2905 | tpaca->kvm_hstate.kvm_split_mode = NULL; | |
f0888f70 PM |
2906 | } |
2907 | ||
a29ebeaf PM |
2908 | static void radix_flush_cpu(struct kvm *kvm, int cpu, struct kvm_vcpu *vcpu) |
2909 | { | |
9d0b048d SJS |
2910 | struct kvm_nested_guest *nested = vcpu->arch.nested; |
2911 | cpumask_t *cpu_in_guest; | |
a29ebeaf PM |
2912 | int i; |
2913 | ||
77bbbc0c | 2914 | cpu = cpu_first_tlb_thread_sibling(cpu); |
9d0b048d SJS |
2915 | if (nested) { |
2916 | cpumask_set_cpu(cpu, &nested->need_tlb_flush); | |
2917 | cpu_in_guest = &nested->cpu_in_guest; | |
2918 | } else { | |
2919 | cpumask_set_cpu(cpu, &kvm->arch.need_tlb_flush); | |
2920 | cpu_in_guest = &kvm->arch.cpu_in_guest; | |
2921 | } | |
a29ebeaf PM |
2922 | /* |
2923 | * Make sure setting of bit in need_tlb_flush precedes | |
2924 | * testing of cpu_in_guest bits. The matching barrier on | |
2925 | * the other side is the first smp_mb() in kvmppc_run_core(). | |
2926 | */ | |
2927 | smp_mb(); | |
77bbbc0c SJS |
2928 | for (i = cpu; i <= cpu_last_tlb_thread_sibling(cpu); |
2929 | i += cpu_tlb_thread_sibling_step()) | |
2930 | if (cpumask_test_cpu(i, cpu_in_guest)) | |
2931 | smp_call_function_single(i, do_nothing, NULL, 1); | |
a29ebeaf PM |
2932 | } |
2933 | ||
8b24e69f PM |
2934 | static void kvmppc_prepare_radix_vcpu(struct kvm_vcpu *vcpu, int pcpu) |
2935 | { | |
9d0b048d | 2936 | struct kvm_nested_guest *nested = vcpu->arch.nested; |
8b24e69f | 2937 | struct kvm *kvm = vcpu->kvm; |
9d0b048d SJS |
2938 | int prev_cpu; |
2939 | ||
2940 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | |
2941 | return; | |
2942 | ||
2943 | if (nested) | |
2944 | prev_cpu = nested->prev_cpu[vcpu->arch.nested_vcpu_id]; | |
2945 | else | |
2946 | prev_cpu = vcpu->arch.prev_cpu; | |
8b24e69f PM |
2947 | |
2948 | /* | |
2949 | * With radix, the guest can do TLB invalidations itself, | |
2950 | * and it could choose to use the local form (tlbiel) if | |
2951 | * it is invalidating a translation that has only ever been | |
2952 | * used on one vcpu. However, that doesn't mean it has | |
2953 | * only ever been used on one physical cpu, since vcpus | |
2954 | * can move around between pcpus. To cope with this, when | |
2955 | * a vcpu moves from one pcpu to another, we need to tell | |
2956 | * any vcpus running on the same core as this vcpu previously | |
2957 | * ran to flush the TLB. The TLB is shared between threads, | |
2958 | * so we use a single bit in .need_tlb_flush for all 4 threads. | |
2959 | */ | |
9d0b048d SJS |
2960 | if (prev_cpu != pcpu) { |
2961 | if (prev_cpu >= 0 && | |
77bbbc0c SJS |
2962 | cpu_first_tlb_thread_sibling(prev_cpu) != |
2963 | cpu_first_tlb_thread_sibling(pcpu)) | |
9d0b048d SJS |
2964 | radix_flush_cpu(kvm, prev_cpu, vcpu); |
2965 | if (nested) | |
2966 | nested->prev_cpu[vcpu->arch.nested_vcpu_id] = pcpu; | |
2967 | else | |
2968 | vcpu->arch.prev_cpu = pcpu; | |
2969 | } | |
2970 | } | |
2971 | ||
b4deba5c | 2972 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) |
371fefd6 PM |
2973 | { |
2974 | int cpu; | |
2975 | struct paca_struct *tpaca; | |
a29ebeaf | 2976 | struct kvm *kvm = vc->kvm; |
371fefd6 | 2977 | |
b4deba5c PM |
2978 | cpu = vc->pcpu; |
2979 | if (vcpu) { | |
2980 | if (vcpu->arch.timer_running) { | |
2981 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | |
2982 | vcpu->arch.timer_running = 0; | |
2983 | } | |
2984 | cpu += vcpu->arch.ptid; | |
898b25b2 | 2985 | vcpu->cpu = vc->pcpu; |
b4deba5c | 2986 | vcpu->arch.thread_cpu = cpu; |
a29ebeaf | 2987 | cpumask_set_cpu(cpu, &kvm->arch.cpu_in_guest); |
19ccb76a | 2988 | } |
d2e60075 | 2989 | tpaca = paca_ptrs[cpu]; |
5d5b99cd | 2990 | tpaca->kvm_hstate.kvm_vcpu = vcpu; |
898b25b2 | 2991 | tpaca->kvm_hstate.ptid = cpu - vc->pcpu; |
4bb3c7a0 | 2992 | tpaca->kvm_hstate.fake_suspend = 0; |
ec257165 | 2993 | /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ |
371fefd6 | 2994 | smp_wmb(); |
898b25b2 | 2995 | tpaca->kvm_hstate.kvm_vcore = vc; |
5d5b99cd | 2996 | if (cpu != smp_processor_id()) |
66feed61 | 2997 | kvmppc_ipi_thread(cpu); |
371fefd6 | 2998 | } |
de56a948 | 2999 | |
516f7898 | 3000 | static void kvmppc_wait_for_nap(int n_threads) |
371fefd6 | 3001 | { |
5d5b99cd PM |
3002 | int cpu = smp_processor_id(); |
3003 | int i, loops; | |
371fefd6 | 3004 | |
45c940ba PM |
3005 | if (n_threads <= 1) |
3006 | return; | |
5d5b99cd PM |
3007 | for (loops = 0; loops < 1000000; ++loops) { |
3008 | /* | |
3009 | * Check if all threads are finished. | |
b4deba5c | 3010 | * We set the vcore pointer when starting a thread |
5d5b99cd | 3011 | * and the thread clears it when finished, so we look |
b4deba5c | 3012 | * for any threads that still have a non-NULL vcore ptr. |
5d5b99cd | 3013 | */ |
45c940ba | 3014 | for (i = 1; i < n_threads; ++i) |
d2e60075 | 3015 | if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) |
5d5b99cd | 3016 | break; |
45c940ba | 3017 | if (i == n_threads) { |
5d5b99cd PM |
3018 | HMT_medium(); |
3019 | return; | |
371fefd6 | 3020 | } |
5d5b99cd | 3021 | HMT_low(); |
371fefd6 PM |
3022 | } |
3023 | HMT_medium(); | |
45c940ba | 3024 | for (i = 1; i < n_threads; ++i) |
d2e60075 | 3025 | if (paca_ptrs[cpu + i]->kvm_hstate.kvm_vcore) |
5d5b99cd | 3026 | pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); |
371fefd6 PM |
3027 | } |
3028 | ||
3029 | /* | |
3030 | * Check that we are on thread 0 and that any other threads in | |
7b444c67 PM |
3031 | * this core are off-line. Then grab the threads so they can't |
3032 | * enter the kernel. | |
371fefd6 PM |
3033 | */ |
3034 | static int on_primary_thread(void) | |
3035 | { | |
3036 | int cpu = smp_processor_id(); | |
3102f784 | 3037 | int thr; |
371fefd6 | 3038 | |
3102f784 ME |
3039 | /* Are we on a primary subcore? */ |
3040 | if (cpu_thread_in_subcore(cpu)) | |
371fefd6 | 3041 | return 0; |
3102f784 ME |
3042 | |
3043 | thr = 0; | |
3044 | while (++thr < threads_per_subcore) | |
371fefd6 PM |
3045 | if (cpu_online(cpu + thr)) |
3046 | return 0; | |
7b444c67 PM |
3047 | |
3048 | /* Grab all hw threads so they can't go into the kernel */ | |
3102f784 | 3049 | for (thr = 1; thr < threads_per_subcore; ++thr) { |
7b444c67 PM |
3050 | if (kvmppc_grab_hwthread(cpu + thr)) { |
3051 | /* Couldn't grab one; let the others go */ | |
3052 | do { | |
3053 | kvmppc_release_hwthread(cpu + thr); | |
3054 | } while (--thr > 0); | |
3055 | return 0; | |
3056 | } | |
3057 | } | |
371fefd6 PM |
3058 | return 1; |
3059 | } | |
3060 | ||
ec257165 PM |
3061 | /* |
3062 | * A list of virtual cores for each physical CPU. | |
3063 | * These are vcores that could run but their runner VCPU tasks are | |
3064 | * (or may be) preempted. | |
3065 | */ | |
3066 | struct preempted_vcore_list { | |
3067 | struct list_head list; | |
3068 | spinlock_t lock; | |
3069 | }; | |
3070 | ||
3071 | static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores); | |
3072 | ||
3073 | static void init_vcore_lists(void) | |
3074 | { | |
3075 | int cpu; | |
3076 | ||
3077 | for_each_possible_cpu(cpu) { | |
3078 | struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu); | |
3079 | spin_lock_init(&lp->lock); | |
3080 | INIT_LIST_HEAD(&lp->list); | |
3081 | } | |
3082 | } | |
3083 | ||
3084 | static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) | |
3085 | { | |
3086 | struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); | |
3087 | ||
3088 | vc->vcore_state = VCORE_PREEMPT; | |
3089 | vc->pcpu = smp_processor_id(); | |
516f7898 | 3090 | if (vc->num_threads < threads_per_vcore(vc->kvm)) { |
ec257165 PM |
3091 | spin_lock(&lp->lock); |
3092 | list_add_tail(&vc->preempt_list, &lp->list); | |
3093 | spin_unlock(&lp->lock); | |
3094 | } | |
3095 | ||
3096 | /* Start accumulating stolen time */ | |
3097 | kvmppc_core_start_stolen(vc); | |
3098 | } | |
3099 | ||
3100 | static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) | |
3101 | { | |
402813fe | 3102 | struct preempted_vcore_list *lp; |
ec257165 PM |
3103 | |
3104 | kvmppc_core_end_stolen(vc); | |
3105 | if (!list_empty(&vc->preempt_list)) { | |
402813fe | 3106 | lp = &per_cpu(preempted_vcores, vc->pcpu); |
ec257165 PM |
3107 | spin_lock(&lp->lock); |
3108 | list_del_init(&vc->preempt_list); | |
3109 | spin_unlock(&lp->lock); | |
3110 | } | |
3111 | vc->vcore_state = VCORE_INACTIVE; | |
3112 | } | |
3113 | ||
b4deba5c PM |
3114 | /* |
3115 | * This stores information about the virtual cores currently | |
3116 | * assigned to a physical core. | |
3117 | */ | |
ec257165 | 3118 | struct core_info { |
b4deba5c PM |
3119 | int n_subcores; |
3120 | int max_subcore_threads; | |
ec257165 | 3121 | int total_threads; |
b4deba5c | 3122 | int subcore_threads[MAX_SUBCORES]; |
898b25b2 | 3123 | struct kvmppc_vcore *vc[MAX_SUBCORES]; |
ec257165 PM |
3124 | }; |
3125 | ||
b4deba5c PM |
3126 | /* |
3127 | * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7 | |
516f7898 | 3128 | * respectively in 2-way micro-threading (split-core) mode on POWER8. |
b4deba5c PM |
3129 | */ |
3130 | static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; | |
3131 | ||
ec257165 PM |
3132 | static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) |
3133 | { | |
3134 | memset(cip, 0, sizeof(*cip)); | |
b4deba5c PM |
3135 | cip->n_subcores = 1; |
3136 | cip->max_subcore_threads = vc->num_threads; | |
ec257165 | 3137 | cip->total_threads = vc->num_threads; |
b4deba5c | 3138 | cip->subcore_threads[0] = vc->num_threads; |
898b25b2 | 3139 | cip->vc[0] = vc; |
b4deba5c PM |
3140 | } |
3141 | ||
3142 | static bool subcore_config_ok(int n_subcores, int n_threads) | |
3143 | { | |
516f7898 | 3144 | /* |
00608e1f PM |
3145 | * POWER9 "SMT4" cores are permanently in what is effectively a 4-way |
3146 | * split-core mode, with one thread per subcore. | |
516f7898 PM |
3147 | */ |
3148 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
3149 | return n_subcores <= 4 && n_threads == 1; | |
3150 | ||
3151 | /* On POWER8, can only dynamically split if unsplit to begin with */ | |
b4deba5c PM |
3152 | if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS) |
3153 | return false; | |
3154 | if (n_subcores > MAX_SUBCORES) | |
3155 | return false; | |
3156 | if (n_subcores > 1) { | |
3157 | if (!(dynamic_mt_modes & 2)) | |
3158 | n_subcores = 4; | |
3159 | if (n_subcores > 2 && !(dynamic_mt_modes & 4)) | |
3160 | return false; | |
3161 | } | |
3162 | ||
3163 | return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS; | |
ec257165 PM |
3164 | } |
3165 | ||
898b25b2 | 3166 | static void init_vcore_to_run(struct kvmppc_vcore *vc) |
ec257165 | 3167 | { |
ec257165 PM |
3168 | vc->entry_exit_map = 0; |
3169 | vc->in_guest = 0; | |
3170 | vc->napping_threads = 0; | |
3171 | vc->conferring_threads = 0; | |
57b8daa7 | 3172 | vc->tb_offset_applied = 0; |
ec257165 PM |
3173 | } |
3174 | ||
b4deba5c PM |
3175 | static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) |
3176 | { | |
3177 | int n_threads = vc->num_threads; | |
3178 | int sub; | |
3179 | ||
3180 | if (!cpu_has_feature(CPU_FTR_ARCH_207S)) | |
3181 | return false; | |
3182 | ||
aa227864 PM |
3183 | /* In one_vm_per_core mode, require all vcores to be from the same vm */ |
3184 | if (one_vm_per_core && vc->kvm != cip->vc[0]->kvm) | |
3185 | return false; | |
3186 | ||
b4deba5c PM |
3187 | if (n_threads < cip->max_subcore_threads) |
3188 | n_threads = cip->max_subcore_threads; | |
b009031f | 3189 | if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) |
b4deba5c | 3190 | return false; |
b009031f | 3191 | cip->max_subcore_threads = n_threads; |
b4deba5c PM |
3192 | |
3193 | sub = cip->n_subcores; | |
3194 | ++cip->n_subcores; | |
3195 | cip->total_threads += vc->num_threads; | |
3196 | cip->subcore_threads[sub] = vc->num_threads; | |
898b25b2 PM |
3197 | cip->vc[sub] = vc; |
3198 | init_vcore_to_run(vc); | |
3199 | list_del_init(&vc->preempt_list); | |
b4deba5c PM |
3200 | |
3201 | return true; | |
3202 | } | |
3203 | ||
b4deba5c PM |
3204 | /* |
3205 | * Work out whether it is possible to piggyback the execution of | |
3206 | * vcore *pvc onto the execution of the other vcores described in *cip. | |
3207 | */ | |
3208 | static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip, | |
3209 | int target_threads) | |
3210 | { | |
b4deba5c PM |
3211 | if (cip->total_threads + pvc->num_threads > target_threads) |
3212 | return false; | |
b4deba5c | 3213 | |
b009031f | 3214 | return can_dynamic_split(pvc, cip); |
b4deba5c PM |
3215 | } |
3216 | ||
d911f0be PM |
3217 | static void prepare_threads(struct kvmppc_vcore *vc) |
3218 | { | |
7b5f8272 SJS |
3219 | int i; |
3220 | struct kvm_vcpu *vcpu; | |
d911f0be | 3221 | |
7b5f8272 | 3222 | for_each_runnable_thread(i, vcpu, vc) { |
d911f0be PM |
3223 | if (signal_pending(vcpu->arch.run_task)) |
3224 | vcpu->arch.ret = -EINTR; | |
3225 | else if (vcpu->arch.vpa.update_pending || | |
3226 | vcpu->arch.slb_shadow.update_pending || | |
3227 | vcpu->arch.dtl.update_pending) | |
3228 | vcpu->arch.ret = RESUME_GUEST; | |
3229 | else | |
3230 | continue; | |
3231 | kvmppc_remove_runnable(vc, vcpu); | |
3232 | wake_up(&vcpu->arch.cpu_run); | |
3233 | } | |
3234 | } | |
3235 | ||
ec257165 PM |
3236 | static void collect_piggybacks(struct core_info *cip, int target_threads) |
3237 | { | |
3238 | struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); | |
3239 | struct kvmppc_vcore *pvc, *vcnext; | |
3240 | ||
3241 | spin_lock(&lp->lock); | |
3242 | list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { | |
3243 | if (!spin_trylock(&pvc->lock)) | |
3244 | continue; | |
3245 | prepare_threads(pvc); | |
d28eafc5 | 3246 | if (!pvc->n_runnable || !pvc->kvm->arch.mmu_ready) { |
ec257165 PM |
3247 | list_del_init(&pvc->preempt_list); |
3248 | if (pvc->runner == NULL) { | |
3249 | pvc->vcore_state = VCORE_INACTIVE; | |
3250 | kvmppc_core_end_stolen(pvc); | |
3251 | } | |
3252 | spin_unlock(&pvc->lock); | |
3253 | continue; | |
3254 | } | |
3255 | if (!can_piggyback(pvc, cip, target_threads)) { | |
3256 | spin_unlock(&pvc->lock); | |
3257 | continue; | |
3258 | } | |
3259 | kvmppc_core_end_stolen(pvc); | |
3260 | pvc->vcore_state = VCORE_PIGGYBACK; | |
3261 | if (cip->total_threads >= target_threads) | |
3262 | break; | |
3263 | } | |
3264 | spin_unlock(&lp->lock); | |
3265 | } | |
3266 | ||
d28eafc5 | 3267 | static bool recheck_signals_and_mmu(struct core_info *cip) |
8b24e69f PM |
3268 | { |
3269 | int sub, i; | |
3270 | struct kvm_vcpu *vcpu; | |
d28eafc5 | 3271 | struct kvmppc_vcore *vc; |
8b24e69f | 3272 | |
d28eafc5 PM |
3273 | for (sub = 0; sub < cip->n_subcores; ++sub) { |
3274 | vc = cip->vc[sub]; | |
3275 | if (!vc->kvm->arch.mmu_ready) | |
3276 | return true; | |
3277 | for_each_runnable_thread(i, vcpu, vc) | |
8b24e69f PM |
3278 | if (signal_pending(vcpu->arch.run_task)) |
3279 | return true; | |
d28eafc5 | 3280 | } |
8b24e69f PM |
3281 | return false; |
3282 | } | |
3283 | ||
ec257165 | 3284 | static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) |
25fedfca | 3285 | { |
7b5f8272 | 3286 | int still_running = 0, i; |
25fedfca PM |
3287 | u64 now; |
3288 | long ret; | |
7b5f8272 | 3289 | struct kvm_vcpu *vcpu; |
25fedfca | 3290 | |
ec257165 | 3291 | spin_lock(&vc->lock); |
25fedfca | 3292 | now = get_tb(); |
7b5f8272 | 3293 | for_each_runnable_thread(i, vcpu, vc) { |
53655ddd PM |
3294 | /* |
3295 | * It's safe to unlock the vcore in the loop here, because | |
3296 | * for_each_runnable_thread() is safe against removal of | |
3297 | * the vcpu, and the vcore state is VCORE_EXITING here, | |
3298 | * so any vcpus becoming runnable will have their arch.trap | |
3299 | * set to zero and can't actually run in the guest. | |
3300 | */ | |
3301 | spin_unlock(&vc->lock); | |
25fedfca PM |
3302 | /* cancel pending dec exception if dec is positive */ |
3303 | if (now < vcpu->arch.dec_expires && | |
3304 | kvmppc_core_pending_dec(vcpu)) | |
3305 | kvmppc_core_dequeue_dec(vcpu); | |
3306 | ||
3307 | trace_kvm_guest_exit(vcpu); | |
3308 | ||
3309 | ret = RESUME_GUEST; | |
3310 | if (vcpu->arch.trap) | |
8c99d345 | 3311 | ret = kvmppc_handle_exit_hv(vcpu, |
25fedfca PM |
3312 | vcpu->arch.run_task); |
3313 | ||
3314 | vcpu->arch.ret = ret; | |
3315 | vcpu->arch.trap = 0; | |
3316 | ||
53655ddd | 3317 | spin_lock(&vc->lock); |
ec257165 PM |
3318 | if (is_kvmppc_resume_guest(vcpu->arch.ret)) { |
3319 | if (vcpu->arch.pending_exceptions) | |
3320 | kvmppc_core_prepare_to_enter(vcpu); | |
3321 | if (vcpu->arch.ceded) | |
25fedfca | 3322 | kvmppc_set_timer(vcpu); |
ec257165 PM |
3323 | else |
3324 | ++still_running; | |
3325 | } else { | |
25fedfca PM |
3326 | kvmppc_remove_runnable(vc, vcpu); |
3327 | wake_up(&vcpu->arch.cpu_run); | |
3328 | } | |
3329 | } | |
ec257165 | 3330 | if (!is_master) { |
563a1e93 | 3331 | if (still_running > 0) { |
ec257165 | 3332 | kvmppc_vcore_preempt(vc); |
563a1e93 PM |
3333 | } else if (vc->runner) { |
3334 | vc->vcore_state = VCORE_PREEMPT; | |
3335 | kvmppc_core_start_stolen(vc); | |
3336 | } else { | |
3337 | vc->vcore_state = VCORE_INACTIVE; | |
3338 | } | |
ec257165 PM |
3339 | if (vc->n_runnable > 0 && vc->runner == NULL) { |
3340 | /* make sure there's a candidate runner awake */ | |
7b5f8272 SJS |
3341 | i = -1; |
3342 | vcpu = next_runnable_thread(vc, &i); | |
ec257165 PM |
3343 | wake_up(&vcpu->arch.cpu_run); |
3344 | } | |
3345 | } | |
3346 | spin_unlock(&vc->lock); | |
25fedfca PM |
3347 | } |
3348 | ||
b8e6a87c SW |
3349 | /* |
3350 | * Clear core from the list of active host cores as we are about to | |
3351 | * enter the guest. Only do this if it is the primary thread of the | |
3352 | * core (not if a subcore) that is entering the guest. | |
3353 | */ | |
3f7cd919 | 3354 | static inline int kvmppc_clear_host_core(unsigned int cpu) |
b8e6a87c SW |
3355 | { |
3356 | int core; | |
3357 | ||
3358 | if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) | |
3f7cd919 | 3359 | return 0; |
b8e6a87c SW |
3360 | /* |
3361 | * Memory barrier can be omitted here as we will do a smp_wmb() | |
3362 | * later in kvmppc_start_thread and we need ensure that state is | |
3363 | * visible to other CPUs only after we enter guest. | |
3364 | */ | |
3365 | core = cpu >> threads_shift; | |
3366 | kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; | |
3f7cd919 | 3367 | return 0; |
b8e6a87c SW |
3368 | } |
3369 | ||
3370 | /* | |
3371 | * Advertise this core as an active host core since we exited the guest | |
3372 | * Only need to do this if it is the primary thread of the core that is | |
3373 | * exiting. | |
3374 | */ | |
3f7cd919 | 3375 | static inline int kvmppc_set_host_core(unsigned int cpu) |
b8e6a87c SW |
3376 | { |
3377 | int core; | |
3378 | ||
3379 | if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) | |
3f7cd919 | 3380 | return 0; |
b8e6a87c SW |
3381 | |
3382 | /* | |
3383 | * Memory barrier can be omitted here because we do a spin_unlock | |
3384 | * immediately after this which provides the memory barrier. | |
3385 | */ | |
3386 | core = cpu >> threads_shift; | |
3387 | kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; | |
3f7cd919 | 3388 | return 0; |
b8e6a87c SW |
3389 | } |
3390 | ||
8b24e69f PM |
3391 | static void set_irq_happened(int trap) |
3392 | { | |
3393 | switch (trap) { | |
3394 | case BOOK3S_INTERRUPT_EXTERNAL: | |
3395 | local_paca->irq_happened |= PACA_IRQ_EE; | |
3396 | break; | |
3397 | case BOOK3S_INTERRUPT_H_DOORBELL: | |
3398 | local_paca->irq_happened |= PACA_IRQ_DBELL; | |
3399 | break; | |
3400 | case BOOK3S_INTERRUPT_HMI: | |
3401 | local_paca->irq_happened |= PACA_IRQ_HMI; | |
3402 | break; | |
6de6638b NP |
3403 | case BOOK3S_INTERRUPT_SYSTEM_RESET: |
3404 | replay_system_reset(); | |
3405 | break; | |
8b24e69f PM |
3406 | } |
3407 | } | |
3408 | ||
371fefd6 PM |
3409 | /* |
3410 | * Run a set of guest threads on a physical core. | |
3411 | * Called with vc->lock held. | |
3412 | */ | |
66feed61 | 3413 | static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) |
371fefd6 | 3414 | { |
7b5f8272 | 3415 | struct kvm_vcpu *vcpu; |
d911f0be | 3416 | int i; |
2c9097e4 | 3417 | int srcu_idx; |
ec257165 | 3418 | struct core_info core_info; |
898b25b2 | 3419 | struct kvmppc_vcore *pvc; |
b4deba5c PM |
3420 | struct kvm_split_mode split_info, *sip; |
3421 | int split, subcore_size, active; | |
3422 | int sub; | |
3423 | bool thr0_done; | |
3424 | unsigned long cmd_bit, stat_bit; | |
ec257165 PM |
3425 | int pcpu, thr; |
3426 | int target_threads; | |
45c940ba | 3427 | int controlled_threads; |
8b24e69f | 3428 | int trap; |
516f7898 | 3429 | bool is_power8; |
371fefd6 | 3430 | |
fae5c9f3 NP |
3431 | if (WARN_ON_ONCE(cpu_has_feature(CPU_FTR_ARCH_300))) |
3432 | return; | |
3433 | ||
d911f0be PM |
3434 | /* |
3435 | * Remove from the list any threads that have a signal pending | |
3436 | * or need a VPA update done | |
3437 | */ | |
3438 | prepare_threads(vc); | |
3439 | ||
3440 | /* if the runner is no longer runnable, let the caller pick a new one */ | |
3441 | if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) | |
3442 | return; | |
081f323b PM |
3443 | |
3444 | /* | |
d911f0be | 3445 | * Initialize *vc. |
081f323b | 3446 | */ |
898b25b2 | 3447 | init_vcore_to_run(vc); |
2711e248 | 3448 | vc->preempt_tb = TB_NIL; |
081f323b | 3449 | |
45c940ba PM |
3450 | /* |
3451 | * Number of threads that we will be controlling: the same as | |
3452 | * the number of threads per subcore, except on POWER9, | |
3453 | * where it's 1 because the threads are (mostly) independent. | |
3454 | */ | |
516f7898 | 3455 | controlled_threads = threads_per_vcore(vc->kvm); |
45c940ba | 3456 | |
7b444c67 | 3457 | /* |
3102f784 ME |
3458 | * Make sure we are running on primary threads, and that secondary |
3459 | * threads are offline. Also check if the number of threads in this | |
3460 | * guest are greater than the current system threads per guest. | |
7b444c67 | 3461 | */ |
b1b1697a NP |
3462 | if ((controlled_threads > 1) && |
3463 | ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { | |
7b5f8272 | 3464 | for_each_runnable_thread(i, vcpu, vc) { |
7b444c67 | 3465 | vcpu->arch.ret = -EBUSY; |
25fedfca PM |
3466 | kvmppc_remove_runnable(vc, vcpu); |
3467 | wake_up(&vcpu->arch.cpu_run); | |
3468 | } | |
7b444c67 PM |
3469 | goto out; |
3470 | } | |
3471 | ||
ec257165 PM |
3472 | /* |
3473 | * See if we could run any other vcores on the physical core | |
3474 | * along with this one. | |
3475 | */ | |
3476 | init_core_info(&core_info, vc); | |
3477 | pcpu = smp_processor_id(); | |
45c940ba | 3478 | target_threads = controlled_threads; |
ec257165 PM |
3479 | if (target_smt_mode && target_smt_mode < target_threads) |
3480 | target_threads = target_smt_mode; | |
3481 | if (vc->num_threads < target_threads) | |
3482 | collect_piggybacks(&core_info, target_threads); | |
3102f784 | 3483 | |
8b24e69f PM |
3484 | /* |
3485 | * Hard-disable interrupts, and check resched flag and signals. | |
3486 | * If we need to reschedule or deliver a signal, clean up | |
3487 | * and return without going into the guest(s). | |
072df813 | 3488 | * If the mmu_ready flag has been cleared, don't go into the |
38c53af8 | 3489 | * guest because that means a HPT resize operation is in progress. |
8b24e69f PM |
3490 | */ |
3491 | local_irq_disable(); | |
3492 | hard_irq_disable(); | |
3493 | if (lazy_irq_pending() || need_resched() || | |
d28eafc5 | 3494 | recheck_signals_and_mmu(&core_info)) { |
8b24e69f PM |
3495 | local_irq_enable(); |
3496 | vc->vcore_state = VCORE_INACTIVE; | |
3497 | /* Unlock all except the primary vcore */ | |
3498 | for (sub = 1; sub < core_info.n_subcores; ++sub) { | |
3499 | pvc = core_info.vc[sub]; | |
3500 | /* Put back on to the preempted vcores list */ | |
3501 | kvmppc_vcore_preempt(pvc); | |
3502 | spin_unlock(&pvc->lock); | |
3503 | } | |
3504 | for (i = 0; i < controlled_threads; ++i) | |
3505 | kvmppc_release_hwthread(pcpu + i); | |
3506 | return; | |
3507 | } | |
3508 | ||
3509 | kvmppc_clear_host_core(pcpu); | |
3510 | ||
b4deba5c PM |
3511 | /* Decide on micro-threading (split-core) mode */ |
3512 | subcore_size = threads_per_subcore; | |
3513 | cmd_bit = stat_bit = 0; | |
3514 | split = core_info.n_subcores; | |
3515 | sip = NULL; | |
fae5c9f3 | 3516 | is_power8 = cpu_has_feature(CPU_FTR_ARCH_207S); |
516f7898 | 3517 | |
b1b1697a | 3518 | if (split > 1) { |
b4deba5c PM |
3519 | sip = &split_info; |
3520 | memset(&split_info, 0, sizeof(split_info)); | |
b4deba5c | 3521 | for (sub = 0; sub < core_info.n_subcores; ++sub) |
898b25b2 | 3522 | split_info.vc[sub] = core_info.vc[sub]; |
516f7898 PM |
3523 | |
3524 | if (is_power8) { | |
3525 | if (split == 2 && (dynamic_mt_modes & 2)) { | |
3526 | cmd_bit = HID0_POWER8_1TO2LPAR; | |
3527 | stat_bit = HID0_POWER8_2LPARMODE; | |
3528 | } else { | |
3529 | split = 4; | |
3530 | cmd_bit = HID0_POWER8_1TO4LPAR; | |
3531 | stat_bit = HID0_POWER8_4LPARMODE; | |
3532 | } | |
3533 | subcore_size = MAX_SMT_THREADS / split; | |
3534 | split_info.rpr = mfspr(SPRN_RPR); | |
3535 | split_info.pmmar = mfspr(SPRN_PMMAR); | |
3536 | split_info.ldbar = mfspr(SPRN_LDBAR); | |
3537 | split_info.subcore_size = subcore_size; | |
3538 | } else { | |
3539 | split_info.subcore_size = 1; | |
3540 | } | |
3541 | ||
b4deba5c PM |
3542 | /* order writes to split_info before kvm_split_mode pointer */ |
3543 | smp_wmb(); | |
3544 | } | |
c0101509 PM |
3545 | |
3546 | for (thr = 0; thr < controlled_threads; ++thr) { | |
d2e60075 NP |
3547 | struct paca_struct *paca = paca_ptrs[pcpu + thr]; |
3548 | ||
d2e60075 NP |
3549 | paca->kvm_hstate.napping = 0; |
3550 | paca->kvm_hstate.kvm_split_mode = sip; | |
c0101509 | 3551 | } |
b4deba5c | 3552 | |
516f7898 | 3553 | /* Initiate micro-threading (split-core) on POWER8 if required */ |
b4deba5c PM |
3554 | if (cmd_bit) { |
3555 | unsigned long hid0 = mfspr(SPRN_HID0); | |
3556 | ||
3557 | hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS; | |
3558 | mb(); | |
3559 | mtspr(SPRN_HID0, hid0); | |
3560 | isync(); | |
3561 | for (;;) { | |
3562 | hid0 = mfspr(SPRN_HID0); | |
3563 | if (hid0 & stat_bit) | |
3564 | break; | |
3565 | cpu_relax(); | |
ec257165 | 3566 | } |
2e25aa5f | 3567 | } |
3102f784 | 3568 | |
7aa15842 PM |
3569 | /* |
3570 | * On POWER8, set RWMR register. | |
3571 | * Since it only affects PURR and SPURR, it doesn't affect | |
3572 | * the host, so we don't save/restore the host value. | |
3573 | */ | |
3574 | if (is_power8) { | |
3575 | unsigned long rwmr_val = RWMR_RPA_P8_8THREAD; | |
3576 | int n_online = atomic_read(&vc->online_count); | |
3577 | ||
3578 | /* | |
3579 | * Use the 8-thread value if we're doing split-core | |
3580 | * or if the vcore's online count looks bogus. | |
3581 | */ | |
3582 | if (split == 1 && threads_per_subcore == MAX_SMT_THREADS && | |
3583 | n_online >= 1 && n_online <= MAX_SMT_THREADS) | |
3584 | rwmr_val = p8_rwmr_values[n_online]; | |
3585 | mtspr(SPRN_RWMR, rwmr_val); | |
3586 | } | |
3587 | ||
b4deba5c PM |
3588 | /* Start all the threads */ |
3589 | active = 0; | |
3590 | for (sub = 0; sub < core_info.n_subcores; ++sub) { | |
516f7898 | 3591 | thr = is_power8 ? subcore_thread_map[sub] : sub; |
b4deba5c PM |
3592 | thr0_done = false; |
3593 | active |= 1 << thr; | |
898b25b2 PM |
3594 | pvc = core_info.vc[sub]; |
3595 | pvc->pcpu = pcpu + thr; | |
3596 | for_each_runnable_thread(i, vcpu, pvc) { | |
3597 | kvmppc_start_thread(vcpu, pvc); | |
3598 | kvmppc_create_dtl_entry(vcpu, pvc); | |
3599 | trace_kvm_guest_enter(vcpu); | |
3600 | if (!vcpu->arch.ptid) | |
3601 | thr0_done = true; | |
3602 | active |= 1 << (thr + vcpu->arch.ptid); | |
b4deba5c | 3603 | } |
898b25b2 PM |
3604 | /* |
3605 | * We need to start the first thread of each subcore | |
3606 | * even if it doesn't have a vcpu. | |
3607 | */ | |
3608 | if (!thr0_done) | |
3609 | kvmppc_start_thread(NULL, pvc); | |
2e25aa5f | 3610 | } |
371fefd6 | 3611 | |
7f235328 GS |
3612 | /* |
3613 | * Ensure that split_info.do_nap is set after setting | |
3614 | * the vcore pointer in the PACA of the secondaries. | |
3615 | */ | |
3616 | smp_mb(); | |
7f235328 | 3617 | |
b4deba5c PM |
3618 | /* |
3619 | * When doing micro-threading, poke the inactive threads as well. | |
3620 | * This gets them to the nap instruction after kvm_do_nap, | |
3621 | * which reduces the time taken to unsplit later. | |
3622 | */ | |
b1b1697a | 3623 | if (cmd_bit) { |
516f7898 | 3624 | split_info.do_nap = 1; /* ask secondaries to nap when done */ |
b4deba5c PM |
3625 | for (thr = 1; thr < threads_per_subcore; ++thr) |
3626 | if (!(active & (1 << thr))) | |
3627 | kvmppc_ipi_thread(pcpu + thr); | |
516f7898 | 3628 | } |
e0b7ec05 | 3629 | |
2f12f034 | 3630 | vc->vcore_state = VCORE_RUNNING; |
19ccb76a | 3631 | preempt_disable(); |
3c78f78a SW |
3632 | |
3633 | trace_kvmppc_run_core(vc, 0); | |
3634 | ||
b4deba5c | 3635 | for (sub = 0; sub < core_info.n_subcores; ++sub) |
898b25b2 | 3636 | spin_unlock(&core_info.vc[sub]->lock); |
de56a948 | 3637 | |
61bd0f66 | 3638 | guest_enter_irqoff(); |
2c9097e4 | 3639 | |
e0b7ec05 | 3640 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); |
2c9097e4 | 3641 | |
a4bc64d3 NR |
3642 | this_cpu_disable_ftrace(); |
3643 | ||
3309bec8 AK |
3644 | /* |
3645 | * Interrupts will be enabled once we get into the guest, | |
3646 | * so tell lockdep that we're about to enable interrupts. | |
3647 | */ | |
3648 | trace_hardirqs_on(); | |
3649 | ||
8b24e69f | 3650 | trap = __kvmppc_vcore_entry(); |
de56a948 | 3651 | |
3309bec8 AK |
3652 | trace_hardirqs_off(); |
3653 | ||
a4bc64d3 NR |
3654 | this_cpu_enable_ftrace(); |
3655 | ||
ec257165 PM |
3656 | srcu_read_unlock(&vc->kvm->srcu, srcu_idx); |
3657 | ||
8b24e69f PM |
3658 | set_irq_happened(trap); |
3659 | ||
ec257165 | 3660 | spin_lock(&vc->lock); |
371fefd6 | 3661 | /* prevent other vcpu threads from doing kvmppc_start_thread() now */ |
19ccb76a | 3662 | vc->vcore_state = VCORE_EXITING; |
371fefd6 | 3663 | |
19ccb76a | 3664 | /* wait for secondary threads to finish writing their state to memory */ |
516f7898 | 3665 | kvmppc_wait_for_nap(controlled_threads); |
b4deba5c PM |
3666 | |
3667 | /* Return to whole-core mode if we split the core earlier */ | |
516f7898 | 3668 | if (cmd_bit) { |
b4deba5c PM |
3669 | unsigned long hid0 = mfspr(SPRN_HID0); |
3670 | unsigned long loops = 0; | |
3671 | ||
3672 | hid0 &= ~HID0_POWER8_DYNLPARDIS; | |
3673 | stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; | |
3674 | mb(); | |
3675 | mtspr(SPRN_HID0, hid0); | |
3676 | isync(); | |
3677 | for (;;) { | |
3678 | hid0 = mfspr(SPRN_HID0); | |
3679 | if (!(hid0 & stat_bit)) | |
3680 | break; | |
3681 | cpu_relax(); | |
3682 | ++loops; | |
3683 | } | |
b1b1697a | 3684 | split_info.do_nap = 0; |
b4deba5c PM |
3685 | } |
3686 | ||
8b24e69f PM |
3687 | kvmppc_set_host_core(pcpu); |
3688 | ||
11266528 NP |
3689 | guest_exit_irqoff(); |
3690 | ||
8b24e69f PM |
3691 | local_irq_enable(); |
3692 | ||
b4deba5c | 3693 | /* Let secondaries go back to the offline loop */ |
45c940ba | 3694 | for (i = 0; i < controlled_threads; ++i) { |
b4deba5c PM |
3695 | kvmppc_release_hwthread(pcpu + i); |
3696 | if (sip && sip->napped[i]) | |
3697 | kvmppc_ipi_thread(pcpu + i); | |
a29ebeaf | 3698 | cpumask_clear_cpu(pcpu + i, &vc->kvm->arch.cpu_in_guest); |
b4deba5c PM |
3699 | } |
3700 | ||
371fefd6 | 3701 | spin_unlock(&vc->lock); |
2c9097e4 | 3702 | |
371fefd6 PM |
3703 | /* make sure updates to secondary vcpu structs are visible now */ |
3704 | smp_mb(); | |
de56a948 | 3705 | |
36ee41d1 PM |
3706 | preempt_enable(); |
3707 | ||
898b25b2 PM |
3708 | for (sub = 0; sub < core_info.n_subcores; ++sub) { |
3709 | pvc = core_info.vc[sub]; | |
3710 | post_guest_process(pvc, pvc == vc); | |
3711 | } | |
de56a948 | 3712 | |
913d3ff9 | 3713 | spin_lock(&vc->lock); |
de56a948 PM |
3714 | |
3715 | out: | |
19ccb76a | 3716 | vc->vcore_state = VCORE_INACTIVE; |
3c78f78a | 3717 | trace_kvmppc_run_core(vc, 1); |
371fefd6 PM |
3718 | } |
3719 | ||
edba6aff | 3720 | static void load_spr_state(struct kvm_vcpu *vcpu) |
95a6432c | 3721 | { |
edba6aff NP |
3722 | mtspr(SPRN_DSCR, vcpu->arch.dscr); |
3723 | mtspr(SPRN_IAMR, vcpu->arch.iamr); | |
3724 | mtspr(SPRN_PSPB, vcpu->arch.pspb); | |
3725 | mtspr(SPRN_FSCR, vcpu->arch.fscr); | |
3726 | mtspr(SPRN_TAR, vcpu->arch.tar); | |
3727 | mtspr(SPRN_EBBHR, vcpu->arch.ebbhr); | |
3728 | mtspr(SPRN_EBBRR, vcpu->arch.ebbrr); | |
3729 | mtspr(SPRN_BESCR, vcpu->arch.bescr); | |
3730 | mtspr(SPRN_WORT, vcpu->arch.wort); | |
3731 | mtspr(SPRN_TIDR, vcpu->arch.tid); | |
3732 | mtspr(SPRN_AMR, vcpu->arch.amr); | |
3733 | mtspr(SPRN_UAMOR, vcpu->arch.uamor); | |
95a6432c | 3734 | |
35dfb43c | 3735 | /* |
edba6aff NP |
3736 | * DAR, DSISR, and for nested HV, SPRGs must be set with MSR[RI] |
3737 | * clear (or hstate set appropriately to catch those registers | |
3738 | * being clobbered if we take a MCE or SRESET), so those are done | |
3739 | * later. | |
35dfb43c | 3740 | */ |
95a6432c | 3741 | |
edba6aff NP |
3742 | if (!(vcpu->arch.ctrl & 1)) |
3743 | mtspr(SPRN_CTRLT, mfspr(SPRN_CTRLF) & ~1); | |
3744 | } | |
95a6432c | 3745 | |
edba6aff NP |
3746 | static void store_spr_state(struct kvm_vcpu *vcpu) |
3747 | { | |
3748 | vcpu->arch.ctrl = mfspr(SPRN_CTRLF); | |
95a6432c | 3749 | |
edba6aff NP |
3750 | vcpu->arch.iamr = mfspr(SPRN_IAMR); |
3751 | vcpu->arch.pspb = mfspr(SPRN_PSPB); | |
3752 | vcpu->arch.fscr = mfspr(SPRN_FSCR); | |
3753 | vcpu->arch.tar = mfspr(SPRN_TAR); | |
3754 | vcpu->arch.ebbhr = mfspr(SPRN_EBBHR); | |
3755 | vcpu->arch.ebbrr = mfspr(SPRN_EBBRR); | |
3756 | vcpu->arch.bescr = mfspr(SPRN_BESCR); | |
3757 | vcpu->arch.wort = mfspr(SPRN_WORT); | |
3758 | vcpu->arch.tid = mfspr(SPRN_TIDR); | |
3759 | vcpu->arch.amr = mfspr(SPRN_AMR); | |
3760 | vcpu->arch.uamor = mfspr(SPRN_UAMOR); | |
3761 | vcpu->arch.dscr = mfspr(SPRN_DSCR); | |
3762 | } | |
95a6432c | 3763 | |
edba6aff NP |
3764 | /* |
3765 | * Privileged (non-hypervisor) host registers to save. | |
3766 | */ | |
3767 | struct p9_host_os_sprs { | |
3768 | unsigned long dscr; | |
3769 | unsigned long tidr; | |
3770 | unsigned long iamr; | |
3771 | unsigned long amr; | |
3772 | unsigned long fscr; | |
3773 | }; | |
95a6432c | 3774 | |
edba6aff NP |
3775 | static void save_p9_host_os_sprs(struct p9_host_os_sprs *host_os_sprs) |
3776 | { | |
3777 | host_os_sprs->dscr = mfspr(SPRN_DSCR); | |
3778 | host_os_sprs->tidr = mfspr(SPRN_TIDR); | |
3779 | host_os_sprs->iamr = mfspr(SPRN_IAMR); | |
3780 | host_os_sprs->amr = mfspr(SPRN_AMR); | |
3781 | host_os_sprs->fscr = mfspr(SPRN_FSCR); | |
3782 | } | |
95a6432c | 3783 | |
edba6aff NP |
3784 | /* vcpu guest regs must already be saved */ |
3785 | static void restore_p9_host_os_sprs(struct kvm_vcpu *vcpu, | |
3786 | struct p9_host_os_sprs *host_os_sprs) | |
3787 | { | |
3788 | mtspr(SPRN_PSPB, 0); | |
3789 | mtspr(SPRN_WORT, 0); | |
3790 | mtspr(SPRN_UAMOR, 0); | |
dc462267 | 3791 | |
edba6aff NP |
3792 | mtspr(SPRN_DSCR, host_os_sprs->dscr); |
3793 | mtspr(SPRN_TIDR, host_os_sprs->tidr); | |
3794 | mtspr(SPRN_IAMR, host_os_sprs->iamr); | |
95a6432c | 3795 | |
edba6aff NP |
3796 | if (host_os_sprs->amr != vcpu->arch.amr) |
3797 | mtspr(SPRN_AMR, host_os_sprs->amr); | |
95a6432c | 3798 | |
edba6aff NP |
3799 | if (host_os_sprs->fscr != vcpu->arch.fscr) |
3800 | mtspr(SPRN_FSCR, host_os_sprs->fscr); | |
95a6432c | 3801 | |
edba6aff NP |
3802 | /* Save guest CTRL register, set runlatch to 1 */ |
3803 | if (!(vcpu->arch.ctrl & 1)) | |
3804 | mtspr(SPRN_CTRLT, 1); | |
3805 | } | |
95a6432c | 3806 | |
9dc2babc NP |
3807 | static inline bool hcall_is_xics(unsigned long req) |
3808 | { | |
3809 | return req == H_EOI || req == H_CPPR || req == H_IPI || | |
3810 | req == H_IPOLL || req == H_XIRR || req == H_XIRR_X; | |
95a6432c PM |
3811 | } |
3812 | ||
3813 | /* | |
fae5c9f3 | 3814 | * Guest entry for POWER9 and later CPUs. |
95a6432c | 3815 | */ |
cf59eb13 | 3816 | static int kvmhv_p9_guest_entry(struct kvm_vcpu *vcpu, u64 time_limit, |
360cae31 | 3817 | unsigned long lpcr) |
95a6432c PM |
3818 | { |
3819 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | |
edba6aff | 3820 | struct p9_host_os_sprs host_os_sprs; |
95a6432c PM |
3821 | s64 dec; |
3822 | u64 tb; | |
3823 | int trap, save_pmu; | |
3824 | ||
89d35b23 NP |
3825 | WARN_ON_ONCE(vcpu->arch.ceded); |
3826 | ||
95a6432c PM |
3827 | dec = mfspr(SPRN_DEC); |
3828 | tb = mftb(); | |
35dfb43c | 3829 | if (dec < 0) |
95a6432c PM |
3830 | return BOOK3S_INTERRUPT_HV_DECREMENTER; |
3831 | local_paca->kvm_hstate.dec_expires = dec + tb; | |
3832 | if (local_paca->kvm_hstate.dec_expires < time_limit) | |
3833 | time_limit = local_paca->kvm_hstate.dec_expires; | |
3834 | ||
edba6aff | 3835 | save_p9_host_os_sprs(&host_os_sprs); |
95a6432c PM |
3836 | |
3837 | kvmhv_save_host_pmu(); /* saves it to PACA kvm_hstate */ | |
3838 | ||
3839 | kvmppc_subcore_enter_guest(); | |
3840 | ||
3841 | vc->entry_exit_map = 1; | |
3842 | vc->in_guest = 1; | |
3843 | ||
3844 | if (vcpu->arch.vpa.pinned_addr) { | |
3845 | struct lppaca *lp = vcpu->arch.vpa.pinned_addr; | |
3846 | u32 yield_count = be32_to_cpu(lp->yield_count) + 1; | |
3847 | lp->yield_count = cpu_to_be32(yield_count); | |
3848 | vcpu->arch.vpa.dirty = 1; | |
3849 | } | |
3850 | ||
3851 | if (cpu_has_feature(CPU_FTR_TM) || | |
3852 | cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) | |
3853 | kvmppc_restore_tm_hv(vcpu, vcpu->arch.shregs.msr, true); | |
3854 | ||
3855 | kvmhv_load_guest_pmu(vcpu); | |
3856 | ||
3857 | msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); | |
3858 | load_fp_state(&vcpu->arch.fp); | |
3859 | #ifdef CONFIG_ALTIVEC | |
3860 | load_vr_state(&vcpu->arch.vr); | |
3861 | #endif | |
44b198ae | 3862 | mtspr(SPRN_VRSAVE, vcpu->arch.vrsave); |
95a6432c | 3863 | |
edba6aff | 3864 | load_spr_state(vcpu); |
95a6432c | 3865 | |
6ffe2c6e NP |
3866 | /* |
3867 | * When setting DEC, we must always deal with irq_work_raise via NMI vs | |
3868 | * setting DEC. The problem occurs right as we switch into guest mode | |
3869 | * if a NMI hits and sets pending work and sets DEC, then that will | |
3870 | * apply to the guest and not bring us back to the host. | |
3871 | * | |
3872 | * irq_work_raise could check a flag (or possibly LPCR[HDICE] for | |
3873 | * example) and set HDEC to 1? That wouldn't solve the nested hv | |
3874 | * case which needs to abort the hcall or zero the time limit. | |
3875 | * | |
3876 | * XXX: Another day's problem. | |
3877 | */ | |
95a6432c PM |
3878 | mtspr(SPRN_DEC, vcpu->arch.dec_expires - mftb()); |
3879 | ||
360cae31 | 3880 | if (kvmhv_on_pseries()) { |
c8b4083d SJS |
3881 | /* |
3882 | * We need to save and restore the guest visible part of the | |
3883 | * psscr (i.e. using SPRN_PSSCR_PR) since the hypervisor | |
3884 | * doesn't do this for us. Note only required if pseries since | |
c00366e2 | 3885 | * this is done in kvmhv_vcpu_entry_p9() below otherwise. |
c8b4083d SJS |
3886 | */ |
3887 | unsigned long host_psscr; | |
360cae31 PM |
3888 | /* call our hypervisor to load up HV regs and go */ |
3889 | struct hv_guest_state hvregs; | |
3890 | ||
c8b4083d SJS |
3891 | host_psscr = mfspr(SPRN_PSSCR_PR); |
3892 | mtspr(SPRN_PSSCR_PR, vcpu->arch.psscr); | |
360cae31 PM |
3893 | kvmhv_save_hv_regs(vcpu, &hvregs); |
3894 | hvregs.lpcr = lpcr; | |
3895 | vcpu->arch.regs.msr = vcpu->arch.shregs.msr; | |
3896 | hvregs.version = HV_GUEST_STATE_VERSION; | |
3897 | if (vcpu->arch.nested) { | |
3898 | hvregs.lpid = vcpu->arch.nested->shadow_lpid; | |
3899 | hvregs.vcpu_token = vcpu->arch.nested_vcpu_id; | |
3900 | } else { | |
3901 | hvregs.lpid = vcpu->kvm->arch.lpid; | |
3902 | hvregs.vcpu_token = vcpu->vcpu_id; | |
3903 | } | |
3904 | hvregs.hdec_expiry = time_limit; | |
6d770e3f NP |
3905 | mtspr(SPRN_DAR, vcpu->arch.shregs.dar); |
3906 | mtspr(SPRN_DSISR, vcpu->arch.shregs.dsisr); | |
360cae31 PM |
3907 | trap = plpar_hcall_norets(H_ENTER_NESTED, __pa(&hvregs), |
3908 | __pa(&vcpu->arch.regs)); | |
3909 | kvmhv_restore_hv_return_state(vcpu, &hvregs); | |
3910 | vcpu->arch.shregs.msr = vcpu->arch.regs.msr; | |
3911 | vcpu->arch.shregs.dar = mfspr(SPRN_DAR); | |
3912 | vcpu->arch.shregs.dsisr = mfspr(SPRN_DSISR); | |
c8b4083d SJS |
3913 | vcpu->arch.psscr = mfspr(SPRN_PSSCR_PR); |
3914 | mtspr(SPRN_PSSCR_PR, host_psscr); | |
4bad7779 PM |
3915 | |
3916 | /* H_CEDE has to be handled now, not later */ | |
3917 | if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && | |
3918 | kvmppc_get_gpr(vcpu, 3) == H_CEDE) { | |
9dc2babc | 3919 | kvmppc_cede(vcpu); |
1f50cc17 | 3920 | kvmppc_set_gpr(vcpu, 3, 0); |
4bad7779 PM |
3921 | trap = 0; |
3922 | } | |
360cae31 | 3923 | } else { |
09512c29 | 3924 | kvmppc_xive_push_vcpu(vcpu); |
c00366e2 | 3925 | trap = kvmhv_vcpu_entry_p9(vcpu, time_limit, lpcr); |
9dc2babc NP |
3926 | if (trap == BOOK3S_INTERRUPT_SYSCALL && !vcpu->arch.nested && |
3927 | !(vcpu->arch.shregs.msr & MSR_PR)) { | |
3928 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | |
3929 | ||
3930 | /* H_CEDE has to be handled now, not later */ | |
3931 | if (req == H_CEDE) { | |
3932 | kvmppc_cede(vcpu); | |
3933 | kvmppc_xive_rearm_escalation(vcpu); /* may un-cede */ | |
3934 | kvmppc_set_gpr(vcpu, 3, 0); | |
3935 | trap = 0; | |
3936 | ||
3937 | /* XICS hcalls must be handled before xive is pulled */ | |
3938 | } else if (hcall_is_xics(req)) { | |
3939 | int ret; | |
3940 | ||
3941 | ret = kvmppc_xive_xics_hcall(vcpu, req); | |
3942 | if (ret != H_TOO_HARD) { | |
3943 | kvmppc_set_gpr(vcpu, 3, ret); | |
3944 | trap = 0; | |
3945 | } | |
3946 | } | |
3947 | } | |
09512c29 | 3948 | kvmppc_xive_pull_vcpu(vcpu); |
89d35b23 | 3949 | |
079a09a5 NP |
3950 | if (kvm_is_radix(vcpu->kvm)) |
3951 | vcpu->arch.slb_max = 0; | |
95a6432c PM |
3952 | } |
3953 | ||
95a6432c | 3954 | dec = mfspr(SPRN_DEC); |
86953770 SJS |
3955 | if (!(lpcr & LPCR_LD)) /* Sign extend if not using large decrementer */ |
3956 | dec = (s32) dec; | |
95a6432c PM |
3957 | tb = mftb(); |
3958 | vcpu->arch.dec_expires = dec + tb; | |
3959 | vcpu->cpu = -1; | |
3960 | vcpu->arch.thread_cpu = -1; | |
95a6432c | 3961 | |
edba6aff | 3962 | store_spr_state(vcpu); |
95a6432c | 3963 | |
edba6aff | 3964 | restore_p9_host_os_sprs(vcpu, &host_os_sprs); |
25edcc50 | 3965 | |
95a6432c PM |
3966 | msr_check_and_set(MSR_FP | MSR_VEC | MSR_VSX); |
3967 | store_fp_state(&vcpu->arch.fp); | |
3968 | #ifdef CONFIG_ALTIVEC | |
3969 | store_vr_state(&vcpu->arch.vr); | |
3970 | #endif | |
44b198ae | 3971 | vcpu->arch.vrsave = mfspr(SPRN_VRSAVE); |
95a6432c PM |
3972 | |
3973 | if (cpu_has_feature(CPU_FTR_TM) || | |
3974 | cpu_has_feature(CPU_FTR_P9_TM_HV_ASSIST)) | |
3975 | kvmppc_save_tm_hv(vcpu, vcpu->arch.shregs.msr, true); | |
3976 | ||
3977 | save_pmu = 1; | |
3978 | if (vcpu->arch.vpa.pinned_addr) { | |
3979 | struct lppaca *lp = vcpu->arch.vpa.pinned_addr; | |
3980 | u32 yield_count = be32_to_cpu(lp->yield_count) + 1; | |
3981 | lp->yield_count = cpu_to_be32(yield_count); | |
3982 | vcpu->arch.vpa.dirty = 1; | |
3983 | save_pmu = lp->pmcregs_in_use; | |
3984 | } | |
63279eeb SJS |
3985 | /* Must save pmu if this guest is capable of running nested guests */ |
3986 | save_pmu |= nesting_enabled(vcpu->kvm); | |
95a6432c PM |
3987 | |
3988 | kvmhv_save_guest_pmu(vcpu, save_pmu); | |
3989 | ||
3990 | vc->entry_exit_map = 0x101; | |
3991 | vc->in_guest = 0; | |
3992 | ||
3993 | mtspr(SPRN_DEC, local_paca->kvm_hstate.dec_expires - mftb()); | |
6ffe2c6e NP |
3994 | /* We may have raced with new irq work */ |
3995 | if (test_irq_work_pending()) | |
3996 | set_dec(1); | |
d724c9e5 | 3997 | mtspr(SPRN_SPRG_VDSO_WRITE, local_paca->sprg_vdso); |
95a6432c PM |
3998 | |
3999 | kvmhv_load_host_pmu(); | |
4000 | ||
4001 | kvmppc_subcore_exit_guest(); | |
4002 | ||
4003 | return trap; | |
4004 | } | |
4005 | ||
19ccb76a PM |
4006 | /* |
4007 | * Wait for some other vcpu thread to execute us, and | |
4008 | * wake us up when we need to handle something in the host. | |
4009 | */ | |
ec257165 PM |
4010 | static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, |
4011 | struct kvm_vcpu *vcpu, int wait_state) | |
371fefd6 | 4012 | { |
371fefd6 PM |
4013 | DEFINE_WAIT(wait); |
4014 | ||
19ccb76a | 4015 | prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); |
ec257165 PM |
4016 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { |
4017 | spin_unlock(&vc->lock); | |
19ccb76a | 4018 | schedule(); |
ec257165 PM |
4019 | spin_lock(&vc->lock); |
4020 | } | |
19ccb76a PM |
4021 | finish_wait(&vcpu->arch.cpu_run, &wait); |
4022 | } | |
4023 | ||
0cda69dd SJS |
4024 | static void grow_halt_poll_ns(struct kvmppc_vcore *vc) |
4025 | { | |
7fa08e71 NW |
4026 | if (!halt_poll_ns_grow) |
4027 | return; | |
4028 | ||
dee339b5 NW |
4029 | vc->halt_poll_ns *= halt_poll_ns_grow; |
4030 | if (vc->halt_poll_ns < halt_poll_ns_grow_start) | |
49113d36 | 4031 | vc->halt_poll_ns = halt_poll_ns_grow_start; |
0cda69dd SJS |
4032 | } |
4033 | ||
4034 | static void shrink_halt_poll_ns(struct kvmppc_vcore *vc) | |
4035 | { | |
4036 | if (halt_poll_ns_shrink == 0) | |
4037 | vc->halt_poll_ns = 0; | |
4038 | else | |
4039 | vc->halt_poll_ns /= halt_poll_ns_shrink; | |
4040 | } | |
4041 | ||
ee3308a2 PM |
4042 | #ifdef CONFIG_KVM_XICS |
4043 | static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) | |
4044 | { | |
03f95332 | 4045 | if (!xics_on_xive()) |
ee3308a2 | 4046 | return false; |
2267ea76 | 4047 | return vcpu->arch.irq_pending || vcpu->arch.xive_saved_state.pipr < |
ee3308a2 PM |
4048 | vcpu->arch.xive_saved_state.cppr; |
4049 | } | |
4050 | #else | |
4051 | static inline bool xive_interrupt_pending(struct kvm_vcpu *vcpu) | |
4052 | { | |
4053 | return false; | |
4054 | } | |
4055 | #endif /* CONFIG_KVM_XICS */ | |
4056 | ||
1da4e2f4 PM |
4057 | static bool kvmppc_vcpu_woken(struct kvm_vcpu *vcpu) |
4058 | { | |
4059 | if (vcpu->arch.pending_exceptions || vcpu->arch.prodded || | |
ee3308a2 | 4060 | kvmppc_doorbell_pending(vcpu) || xive_interrupt_pending(vcpu)) |
1da4e2f4 PM |
4061 | return true; |
4062 | ||
4063 | return false; | |
4064 | } | |
4065 | ||
908a0935 SJS |
4066 | /* |
4067 | * Check to see if any of the runnable vcpus on the vcore have pending | |
0cda69dd SJS |
4068 | * exceptions or are no longer ceded |
4069 | */ | |
4070 | static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc) | |
4071 | { | |
4072 | struct kvm_vcpu *vcpu; | |
4073 | int i; | |
4074 | ||
4075 | for_each_runnable_thread(i, vcpu, vc) { | |
1da4e2f4 | 4076 | if (!vcpu->arch.ceded || kvmppc_vcpu_woken(vcpu)) |
0cda69dd SJS |
4077 | return 1; |
4078 | } | |
4079 | ||
4080 | return 0; | |
4081 | } | |
4082 | ||
19ccb76a PM |
4083 | /* |
4084 | * All the vcpus in this vcore are idle, so wait for a decrementer | |
4085 | * or external interrupt to one of the vcpus. vc->lock is held. | |
4086 | */ | |
4087 | static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) | |
4088 | { | |
2a27f514 | 4089 | ktime_t cur, start_poll, start_wait; |
0cda69dd | 4090 | int do_sleep = 1; |
0cda69dd | 4091 | u64 block_ns; |
1bc5d59c | 4092 | |
0cda69dd | 4093 | /* Poll for pending exceptions and ceded state */ |
2a27f514 | 4094 | cur = start_poll = ktime_get(); |
0cda69dd | 4095 | if (vc->halt_poll_ns) { |
2a27f514 | 4096 | ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); |
0193cc90 | 4097 | ++vc->runner->stat.generic.halt_attempted_poll; |
1bc5d59c | 4098 | |
0cda69dd SJS |
4099 | vc->vcore_state = VCORE_POLLING; |
4100 | spin_unlock(&vc->lock); | |
4101 | ||
4102 | do { | |
4103 | if (kvmppc_vcore_check_block(vc)) { | |
4104 | do_sleep = 0; | |
4105 | break; | |
4106 | } | |
4107 | cur = ktime_get(); | |
6bd5b743 | 4108 | } while (kvm_vcpu_can_poll(cur, stop)); |
0cda69dd SJS |
4109 | |
4110 | spin_lock(&vc->lock); | |
4111 | vc->vcore_state = VCORE_INACTIVE; | |
4112 | ||
2a27f514 | 4113 | if (!do_sleep) { |
0193cc90 | 4114 | ++vc->runner->stat.generic.halt_successful_poll; |
0cda69dd | 4115 | goto out; |
2a27f514 | 4116 | } |
1bc5d59c SW |
4117 | } |
4118 | ||
da4ad88c DB |
4119 | prepare_to_rcuwait(&vc->wait); |
4120 | set_current_state(TASK_INTERRUPTIBLE); | |
0cda69dd | 4121 | if (kvmppc_vcore_check_block(vc)) { |
da4ad88c | 4122 | finish_rcuwait(&vc->wait); |
0cda69dd | 4123 | do_sleep = 0; |
2a27f514 SJS |
4124 | /* If we polled, count this as a successful poll */ |
4125 | if (vc->halt_poll_ns) | |
0193cc90 | 4126 | ++vc->runner->stat.generic.halt_successful_poll; |
0cda69dd | 4127 | goto out; |
1bc5d59c SW |
4128 | } |
4129 | ||
2a27f514 SJS |
4130 | start_wait = ktime_get(); |
4131 | ||
19ccb76a | 4132 | vc->vcore_state = VCORE_SLEEPING; |
3c78f78a | 4133 | trace_kvmppc_vcore_blocked(vc, 0); |
19ccb76a | 4134 | spin_unlock(&vc->lock); |
913d3ff9 | 4135 | schedule(); |
da4ad88c | 4136 | finish_rcuwait(&vc->wait); |
19ccb76a PM |
4137 | spin_lock(&vc->lock); |
4138 | vc->vcore_state = VCORE_INACTIVE; | |
3c78f78a | 4139 | trace_kvmppc_vcore_blocked(vc, 1); |
2a27f514 | 4140 | ++vc->runner->stat.halt_successful_wait; |
0cda69dd SJS |
4141 | |
4142 | cur = ktime_get(); | |
4143 | ||
4144 | out: | |
2a27f514 SJS |
4145 | block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); |
4146 | ||
4147 | /* Attribute wait time */ | |
4148 | if (do_sleep) { | |
4149 | vc->runner->stat.halt_wait_ns += | |
4150 | ktime_to_ns(cur) - ktime_to_ns(start_wait); | |
4151 | /* Attribute failed poll time */ | |
4152 | if (vc->halt_poll_ns) | |
0193cc90 | 4153 | vc->runner->stat.generic.halt_poll_fail_ns += |
2a27f514 SJS |
4154 | ktime_to_ns(start_wait) - |
4155 | ktime_to_ns(start_poll); | |
4156 | } else { | |
4157 | /* Attribute successful poll time */ | |
4158 | if (vc->halt_poll_ns) | |
0193cc90 | 4159 | vc->runner->stat.generic.halt_poll_success_ns += |
2a27f514 SJS |
4160 | ktime_to_ns(cur) - |
4161 | ktime_to_ns(start_poll); | |
4162 | } | |
0cda69dd SJS |
4163 | |
4164 | /* Adjust poll time */ | |
307d93e4 | 4165 | if (halt_poll_ns) { |
0cda69dd SJS |
4166 | if (block_ns <= vc->halt_poll_ns) |
4167 | ; | |
4168 | /* We slept and blocked for longer than the max halt time */ | |
307d93e4 | 4169 | else if (vc->halt_poll_ns && block_ns > halt_poll_ns) |
0cda69dd SJS |
4170 | shrink_halt_poll_ns(vc); |
4171 | /* We slept and our poll time is too small */ | |
307d93e4 SJS |
4172 | else if (vc->halt_poll_ns < halt_poll_ns && |
4173 | block_ns < halt_poll_ns) | |
0cda69dd | 4174 | grow_halt_poll_ns(vc); |
e03f3921 SJS |
4175 | if (vc->halt_poll_ns > halt_poll_ns) |
4176 | vc->halt_poll_ns = halt_poll_ns; | |
0cda69dd SJS |
4177 | } else |
4178 | vc->halt_poll_ns = 0; | |
4179 | ||
4180 | trace_kvmppc_vcore_wakeup(do_sleep, block_ns); | |
19ccb76a | 4181 | } |
371fefd6 | 4182 | |
360cae31 PM |
4183 | /* |
4184 | * This never fails for a radix guest, as none of the operations it does | |
4185 | * for a radix guest can fail or have a way to report failure. | |
360cae31 | 4186 | */ |
432953b4 PM |
4187 | static int kvmhv_setup_mmu(struct kvm_vcpu *vcpu) |
4188 | { | |
4189 | int r = 0; | |
4190 | struct kvm *kvm = vcpu->kvm; | |
4191 | ||
0d4ee88d | 4192 | mutex_lock(&kvm->arch.mmu_setup_lock); |
432953b4 PM |
4193 | if (!kvm->arch.mmu_ready) { |
4194 | if (!kvm_is_radix(kvm)) | |
4195 | r = kvmppc_hv_setup_htab_rma(vcpu); | |
4196 | if (!r) { | |
4197 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
4198 | kvmppc_setup_partition_table(kvm); | |
4199 | kvm->arch.mmu_ready = 1; | |
4200 | } | |
4201 | } | |
0d4ee88d | 4202 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
432953b4 PM |
4203 | return r; |
4204 | } | |
4205 | ||
8c99d345 | 4206 | static int kvmppc_run_vcpu(struct kvm_vcpu *vcpu) |
19ccb76a | 4207 | { |
8c99d345 | 4208 | struct kvm_run *run = vcpu->run; |
38c53af8 | 4209 | int n_ceded, i, r; |
19ccb76a | 4210 | struct kvmppc_vcore *vc; |
7b5f8272 | 4211 | struct kvm_vcpu *v; |
9e368f29 | 4212 | |
3c78f78a SW |
4213 | trace_kvmppc_run_vcpu_enter(vcpu); |
4214 | ||
8c99d345 | 4215 | run->exit_reason = 0; |
371fefd6 PM |
4216 | vcpu->arch.ret = RESUME_GUEST; |
4217 | vcpu->arch.trap = 0; | |
2f12f034 | 4218 | kvmppc_update_vpas(vcpu); |
371fefd6 | 4219 | |
371fefd6 PM |
4220 | /* |
4221 | * Synchronize with other threads in this virtual core | |
4222 | */ | |
4223 | vc = vcpu->arch.vcore; | |
4224 | spin_lock(&vc->lock); | |
19ccb76a | 4225 | vcpu->arch.ceded = 0; |
371fefd6 | 4226 | vcpu->arch.run_task = current; |
c7b67670 | 4227 | vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); |
19ccb76a | 4228 | vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; |
c7b67670 | 4229 | vcpu->arch.busy_preempt = TB_NIL; |
7b5f8272 | 4230 | WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); |
371fefd6 PM |
4231 | ++vc->n_runnable; |
4232 | ||
19ccb76a PM |
4233 | /* |
4234 | * This happens the first time this is called for a vcpu. | |
4235 | * If the vcore is already running, we may be able to start | |
4236 | * this thread straight away and have it join in. | |
4237 | */ | |
8455d79e | 4238 | if (!signal_pending(current)) { |
c0093f1a PM |
4239 | if ((vc->vcore_state == VCORE_PIGGYBACK || |
4240 | vc->vcore_state == VCORE_RUNNING) && | |
ec257165 | 4241 | !VCORE_IS_EXITING(vc)) { |
2f12f034 | 4242 | kvmppc_create_dtl_entry(vcpu, vc); |
b4deba5c | 4243 | kvmppc_start_thread(vcpu, vc); |
3c78f78a | 4244 | trace_kvm_guest_enter(vcpu); |
8455d79e | 4245 | } else if (vc->vcore_state == VCORE_SLEEPING) { |
da4ad88c | 4246 | rcuwait_wake_up(&vc->wait); |
371fefd6 PM |
4247 | } |
4248 | ||
8455d79e | 4249 | } |
371fefd6 | 4250 | |
19ccb76a PM |
4251 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
4252 | !signal_pending(current)) { | |
072df813 PM |
4253 | /* See if the MMU is ready to go */ |
4254 | if (!vcpu->kvm->arch.mmu_ready) { | |
38c53af8 | 4255 | spin_unlock(&vc->lock); |
432953b4 | 4256 | r = kvmhv_setup_mmu(vcpu); |
38c53af8 PM |
4257 | spin_lock(&vc->lock); |
4258 | if (r) { | |
8c99d345 TZ |
4259 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; |
4260 | run->fail_entry. | |
432953b4 | 4261 | hardware_entry_failure_reason = 0; |
38c53af8 PM |
4262 | vcpu->arch.ret = r; |
4263 | break; | |
4264 | } | |
4265 | } | |
4266 | ||
ec257165 PM |
4267 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) |
4268 | kvmppc_vcore_end_preempt(vc); | |
4269 | ||
8455d79e | 4270 | if (vc->vcore_state != VCORE_INACTIVE) { |
ec257165 | 4271 | kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); |
19ccb76a PM |
4272 | continue; |
4273 | } | |
7b5f8272 | 4274 | for_each_runnable_thread(i, v, vc) { |
7e28e60e | 4275 | kvmppc_core_prepare_to_enter(v); |
19ccb76a PM |
4276 | if (signal_pending(v->arch.run_task)) { |
4277 | kvmppc_remove_runnable(vc, v); | |
4278 | v->stat.signal_exits++; | |
2610a57f | 4279 | v->run->exit_reason = KVM_EXIT_INTR; |
19ccb76a PM |
4280 | v->arch.ret = -EINTR; |
4281 | wake_up(&v->arch.cpu_run); | |
4282 | } | |
4283 | } | |
8455d79e PM |
4284 | if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
4285 | break; | |
8455d79e | 4286 | n_ceded = 0; |
7b5f8272 | 4287 | for_each_runnable_thread(i, v, vc) { |
1da4e2f4 | 4288 | if (!kvmppc_vcpu_woken(v)) |
8455d79e | 4289 | n_ceded += v->arch.ceded; |
4619ac88 PM |
4290 | else |
4291 | v->arch.ceded = 0; | |
4292 | } | |
25fedfca PM |
4293 | vc->runner = vcpu; |
4294 | if (n_ceded == vc->n_runnable) { | |
8455d79e | 4295 | kvmppc_vcore_blocked(vc); |
c56dadf3 | 4296 | } else if (need_resched()) { |
ec257165 | 4297 | kvmppc_vcore_preempt(vc); |
25fedfca PM |
4298 | /* Let something else run */ |
4299 | cond_resched_lock(&vc->lock); | |
ec257165 PM |
4300 | if (vc->vcore_state == VCORE_PREEMPT) |
4301 | kvmppc_vcore_end_preempt(vc); | |
25fedfca | 4302 | } else { |
8455d79e | 4303 | kvmppc_run_core(vc); |
25fedfca | 4304 | } |
0456ec4f | 4305 | vc->runner = NULL; |
19ccb76a | 4306 | } |
371fefd6 | 4307 | |
8455d79e PM |
4308 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
4309 | (vc->vcore_state == VCORE_RUNNING || | |
5fc3e64f PM |
4310 | vc->vcore_state == VCORE_EXITING || |
4311 | vc->vcore_state == VCORE_PIGGYBACK)) | |
ec257165 | 4312 | kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); |
8455d79e | 4313 | |
5fc3e64f PM |
4314 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) |
4315 | kvmppc_vcore_end_preempt(vc); | |
4316 | ||
8455d79e PM |
4317 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { |
4318 | kvmppc_remove_runnable(vc, vcpu); | |
4319 | vcpu->stat.signal_exits++; | |
8c99d345 | 4320 | run->exit_reason = KVM_EXIT_INTR; |
8455d79e PM |
4321 | vcpu->arch.ret = -EINTR; |
4322 | } | |
4323 | ||
4324 | if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { | |
4325 | /* Wake up some vcpu to run the core */ | |
7b5f8272 SJS |
4326 | i = -1; |
4327 | v = next_runnable_thread(vc, &i); | |
8455d79e | 4328 | wake_up(&v->arch.cpu_run); |
371fefd6 PM |
4329 | } |
4330 | ||
8c99d345 | 4331 | trace_kvmppc_run_vcpu_exit(vcpu); |
371fefd6 | 4332 | spin_unlock(&vc->lock); |
371fefd6 | 4333 | return vcpu->arch.ret; |
de56a948 PM |
4334 | } |
4335 | ||
8c99d345 | 4336 | int kvmhv_run_single_vcpu(struct kvm_vcpu *vcpu, u64 time_limit, |
360cae31 | 4337 | unsigned long lpcr) |
95a6432c | 4338 | { |
8c99d345 | 4339 | struct kvm_run *run = vcpu->run; |
9d0b048d | 4340 | int trap, r, pcpu; |
48013cbc | 4341 | int srcu_idx; |
95a6432c PM |
4342 | struct kvmppc_vcore *vc; |
4343 | struct kvm *kvm = vcpu->kvm; | |
360cae31 | 4344 | struct kvm_nested_guest *nested = vcpu->arch.nested; |
95a6432c PM |
4345 | |
4346 | trace_kvmppc_run_vcpu_enter(vcpu); | |
4347 | ||
8c99d345 | 4348 | run->exit_reason = 0; |
95a6432c PM |
4349 | vcpu->arch.ret = RESUME_GUEST; |
4350 | vcpu->arch.trap = 0; | |
4351 | ||
4352 | vc = vcpu->arch.vcore; | |
4353 | vcpu->arch.ceded = 0; | |
4354 | vcpu->arch.run_task = current; | |
95a6432c PM |
4355 | vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); |
4356 | vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; | |
4357 | vcpu->arch.busy_preempt = TB_NIL; | |
4358 | vcpu->arch.last_inst = KVM_INST_FETCH_FAILED; | |
4359 | vc->runnable_threads[0] = vcpu; | |
4360 | vc->n_runnable = 1; | |
4361 | vc->runner = vcpu; | |
4362 | ||
4363 | /* See if the MMU is ready to go */ | |
079a09a5 NP |
4364 | if (!kvm->arch.mmu_ready) { |
4365 | r = kvmhv_setup_mmu(vcpu); | |
4366 | if (r) { | |
4367 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; | |
4368 | run->fail_entry.hardware_entry_failure_reason = 0; | |
4369 | vcpu->arch.ret = r; | |
4370 | return r; | |
4371 | } | |
4372 | } | |
95a6432c PM |
4373 | |
4374 | if (need_resched()) | |
4375 | cond_resched(); | |
4376 | ||
4377 | kvmppc_update_vpas(vcpu); | |
4378 | ||
4379 | init_vcore_to_run(vc); | |
4380 | vc->preempt_tb = TB_NIL; | |
4381 | ||
4382 | preempt_disable(); | |
4383 | pcpu = smp_processor_id(); | |
4384 | vc->pcpu = pcpu; | |
079a09a5 NP |
4385 | if (kvm_is_radix(kvm)) |
4386 | kvmppc_prepare_radix_vcpu(vcpu, pcpu); | |
95a6432c PM |
4387 | |
4388 | local_irq_disable(); | |
4389 | hard_irq_disable(); | |
4390 | if (signal_pending(current)) | |
4391 | goto sigpend; | |
4392 | if (lazy_irq_pending() || need_resched() || !kvm->arch.mmu_ready) | |
4393 | goto out; | |
4394 | ||
360cae31 PM |
4395 | if (!nested) { |
4396 | kvmppc_core_prepare_to_enter(vcpu); | |
4397 | if (vcpu->arch.doorbell_request) { | |
4398 | vc->dpdes = 1; | |
4399 | smp_wmb(); | |
4400 | vcpu->arch.doorbell_request = 0; | |
4401 | } | |
4402 | if (test_bit(BOOK3S_IRQPRIO_EXTERNAL, | |
4403 | &vcpu->arch.pending_exceptions)) | |
4404 | lpcr |= LPCR_MER; | |
4405 | } else if (vcpu->arch.pending_exceptions || | |
4406 | vcpu->arch.doorbell_request || | |
4407 | xive_interrupt_pending(vcpu)) { | |
4408 | vcpu->arch.ret = RESUME_HOST; | |
4409 | goto out; | |
4410 | } | |
95a6432c PM |
4411 | |
4412 | kvmppc_clear_host_core(pcpu); | |
4413 | ||
95a6432c PM |
4414 | local_paca->kvm_hstate.napping = 0; |
4415 | local_paca->kvm_hstate.kvm_split_mode = NULL; | |
4416 | kvmppc_start_thread(vcpu, vc); | |
4417 | kvmppc_create_dtl_entry(vcpu, vc); | |
4418 | trace_kvm_guest_enter(vcpu); | |
4419 | ||
4420 | vc->vcore_state = VCORE_RUNNING; | |
4421 | trace_kvmppc_run_core(vc, 0); | |
4422 | ||
95a6432c PM |
4423 | guest_enter_irqoff(); |
4424 | ||
4425 | srcu_idx = srcu_read_lock(&kvm->srcu); | |
4426 | ||
4427 | this_cpu_disable_ftrace(); | |
4428 | ||
1b28d553 PM |
4429 | /* Tell lockdep that we're about to enable interrupts */ |
4430 | trace_hardirqs_on(); | |
4431 | ||
360cae31 | 4432 | trap = kvmhv_p9_guest_entry(vcpu, time_limit, lpcr); |
95a6432c PM |
4433 | vcpu->arch.trap = trap; |
4434 | ||
1b28d553 PM |
4435 | trace_hardirqs_off(); |
4436 | ||
95a6432c PM |
4437 | this_cpu_enable_ftrace(); |
4438 | ||
4439 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
4440 | ||
95a6432c PM |
4441 | set_irq_happened(trap); |
4442 | ||
4443 | kvmppc_set_host_core(pcpu); | |
4444 | ||
11266528 NP |
4445 | guest_exit_irqoff(); |
4446 | ||
95a6432c | 4447 | local_irq_enable(); |
95a6432c PM |
4448 | |
4449 | cpumask_clear_cpu(pcpu, &kvm->arch.cpu_in_guest); | |
4450 | ||
4451 | preempt_enable(); | |
4452 | ||
3c25ab35 SJS |
4453 | /* |
4454 | * cancel pending decrementer exception if DEC is now positive, or if | |
4455 | * entering a nested guest in which case the decrementer is now owned | |
4456 | * by L2 and the L1 decrementer is provided in hdec_expires | |
4457 | */ | |
4458 | if (kvmppc_core_pending_dec(vcpu) && | |
4459 | ((get_tb() < vcpu->arch.dec_expires) || | |
4460 | (trap == BOOK3S_INTERRUPT_SYSCALL && | |
4461 | kvmppc_get_gpr(vcpu, 3) == H_ENTER_NESTED))) | |
95a6432c PM |
4462 | kvmppc_core_dequeue_dec(vcpu); |
4463 | ||
4464 | trace_kvm_guest_exit(vcpu); | |
4465 | r = RESUME_GUEST; | |
360cae31 PM |
4466 | if (trap) { |
4467 | if (!nested) | |
8c99d345 | 4468 | r = kvmppc_handle_exit_hv(vcpu, current); |
360cae31 | 4469 | else |
8c99d345 | 4470 | r = kvmppc_handle_nested_exit(vcpu); |
360cae31 | 4471 | } |
95a6432c PM |
4472 | vcpu->arch.ret = r; |
4473 | ||
4474 | if (is_kvmppc_resume_guest(r) && vcpu->arch.ceded && | |
4475 | !kvmppc_vcpu_woken(vcpu)) { | |
4476 | kvmppc_set_timer(vcpu); | |
4477 | while (vcpu->arch.ceded && !kvmppc_vcpu_woken(vcpu)) { | |
4478 | if (signal_pending(current)) { | |
4479 | vcpu->stat.signal_exits++; | |
8c99d345 | 4480 | run->exit_reason = KVM_EXIT_INTR; |
95a6432c PM |
4481 | vcpu->arch.ret = -EINTR; |
4482 | break; | |
4483 | } | |
4484 | spin_lock(&vc->lock); | |
4485 | kvmppc_vcore_blocked(vc); | |
4486 | spin_unlock(&vc->lock); | |
4487 | } | |
4488 | } | |
4489 | vcpu->arch.ceded = 0; | |
4490 | ||
4491 | vc->vcore_state = VCORE_INACTIVE; | |
4492 | trace_kvmppc_run_core(vc, 1); | |
4493 | ||
4494 | done: | |
4495 | kvmppc_remove_runnable(vc, vcpu); | |
8c99d345 | 4496 | trace_kvmppc_run_vcpu_exit(vcpu); |
95a6432c PM |
4497 | |
4498 | return vcpu->arch.ret; | |
4499 | ||
4500 | sigpend: | |
4501 | vcpu->stat.signal_exits++; | |
8c99d345 | 4502 | run->exit_reason = KVM_EXIT_INTR; |
95a6432c PM |
4503 | vcpu->arch.ret = -EINTR; |
4504 | out: | |
4505 | local_irq_enable(); | |
4506 | preempt_enable(); | |
4507 | goto done; | |
4508 | } | |
4509 | ||
8c99d345 | 4510 | static int kvmppc_vcpu_run_hv(struct kvm_vcpu *vcpu) |
a8606e20 | 4511 | { |
8c99d345 | 4512 | struct kvm_run *run = vcpu->run; |
a8606e20 | 4513 | int r; |
913d3ff9 | 4514 | int srcu_idx; |
ca8efa1d | 4515 | unsigned long ebb_regs[3] = {}; /* shut up GCC */ |
4c3bb4cc PM |
4516 | unsigned long user_tar = 0; |
4517 | unsigned int user_vrsave; | |
1b151ce4 | 4518 | struct kvm *kvm; |
a8606e20 | 4519 | |
af8f38b3 AG |
4520 | if (!vcpu->arch.sane) { |
4521 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
4522 | return -EINVAL; | |
4523 | } | |
4524 | ||
46a704f8 PM |
4525 | /* |
4526 | * Don't allow entry with a suspended transaction, because | |
4527 | * the guest entry/exit code will lose it. | |
4528 | * If the guest has TM enabled, save away their TM-related SPRs | |
4529 | * (they will get restored by the TM unavailable interrupt). | |
4530 | */ | |
4531 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM | |
4532 | if (cpu_has_feature(CPU_FTR_TM) && current->thread.regs && | |
4533 | (current->thread.regs->msr & MSR_TM)) { | |
4534 | if (MSR_TM_ACTIVE(current->thread.regs->msr)) { | |
4535 | run->exit_reason = KVM_EXIT_FAIL_ENTRY; | |
4536 | run->fail_entry.hardware_entry_failure_reason = 0; | |
4537 | return -EINVAL; | |
4538 | } | |
e4705715 PM |
4539 | /* Enable TM so we can read the TM SPRs */ |
4540 | mtmsr(mfmsr() | MSR_TM); | |
46a704f8 PM |
4541 | current->thread.tm_tfhar = mfspr(SPRN_TFHAR); |
4542 | current->thread.tm_tfiar = mfspr(SPRN_TFIAR); | |
4543 | current->thread.tm_texasr = mfspr(SPRN_TEXASR); | |
4544 | current->thread.regs->msr &= ~MSR_TM; | |
4545 | } | |
4546 | #endif | |
4547 | ||
7aa15842 PM |
4548 | /* |
4549 | * Force online to 1 for the sake of old userspace which doesn't | |
4550 | * set it. | |
4551 | */ | |
4552 | if (!vcpu->arch.online) { | |
4553 | atomic_inc(&vcpu->arch.vcore->online_count); | |
4554 | vcpu->arch.online = 1; | |
4555 | } | |
4556 | ||
25051b5a SW |
4557 | kvmppc_core_prepare_to_enter(vcpu); |
4558 | ||
19ccb76a PM |
4559 | /* No need to go into the guest when all we'll do is come back out */ |
4560 | if (signal_pending(current)) { | |
4561 | run->exit_reason = KVM_EXIT_INTR; | |
4562 | return -EINTR; | |
4563 | } | |
4564 | ||
1b151ce4 PM |
4565 | kvm = vcpu->kvm; |
4566 | atomic_inc(&kvm->arch.vcpus_running); | |
4567 | /* Order vcpus_running vs. mmu_ready, see kvmppc_alloc_reset_hpt */ | |
32fad281 PM |
4568 | smp_mb(); |
4569 | ||
579e633e AB |
4570 | flush_all_to_thread(current); |
4571 | ||
4c3bb4cc | 4572 | /* Save userspace EBB and other register values */ |
ca8efa1d PM |
4573 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { |
4574 | ebb_regs[0] = mfspr(SPRN_EBBHR); | |
4575 | ebb_regs[1] = mfspr(SPRN_EBBRR); | |
4576 | ebb_regs[2] = mfspr(SPRN_BESCR); | |
4c3bb4cc | 4577 | user_tar = mfspr(SPRN_TAR); |
ca8efa1d | 4578 | } |
4c3bb4cc | 4579 | user_vrsave = mfspr(SPRN_VRSAVE); |
ca8efa1d | 4580 | |
da4ad88c | 4581 | vcpu->arch.waitp = &vcpu->arch.vcore->wait; |
8a9c8925 | 4582 | vcpu->arch.pgdir = kvm->mm->pgd; |
c7b67670 | 4583 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
19ccb76a | 4584 | |
a8606e20 | 4585 | do { |
0bf7e1b2 | 4586 | if (cpu_has_feature(CPU_FTR_ARCH_300)) |
8c99d345 | 4587 | r = kvmhv_run_single_vcpu(vcpu, ~(u64)0, |
360cae31 | 4588 | vcpu->arch.vcore->lpcr); |
95a6432c | 4589 | else |
8c99d345 | 4590 | r = kvmppc_run_vcpu(vcpu); |
a8606e20 | 4591 | |
9dc2babc NP |
4592 | if (run->exit_reason == KVM_EXIT_PAPR_HCALL) { |
4593 | if (WARN_ON_ONCE(vcpu->arch.shregs.msr & MSR_PR)) { | |
4594 | /* | |
4595 | * These should have been caught reflected | |
4596 | * into the guest by now. Final sanity check: | |
4597 | * don't allow userspace to execute hcalls in | |
4598 | * the hypervisor. | |
4599 | */ | |
4600 | r = RESUME_GUEST; | |
4601 | continue; | |
4602 | } | |
3c78f78a | 4603 | trace_kvm_hcall_enter(vcpu); |
a8606e20 | 4604 | r = kvmppc_pseries_do_hcall(vcpu); |
3c78f78a | 4605 | trace_kvm_hcall_exit(vcpu, r); |
7e28e60e | 4606 | kvmppc_core_prepare_to_enter(vcpu); |
913d3ff9 | 4607 | } else if (r == RESUME_PAGE_FAULT) { |
432953b4 | 4608 | srcu_idx = srcu_read_lock(&kvm->srcu); |
8c99d345 | 4609 | r = kvmppc_book3s_hv_page_fault(vcpu, |
913d3ff9 | 4610 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); |
432953b4 | 4611 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
5af50993 | 4612 | } else if (r == RESUME_PASSTHROUGH) { |
03f95332 | 4613 | if (WARN_ON(xics_on_xive())) |
5af50993 BH |
4614 | r = H_SUCCESS; |
4615 | else | |
4616 | r = kvmppc_xics_rm_complete(vcpu, 0); | |
4617 | } | |
e59d24e6 | 4618 | } while (is_kvmppc_resume_guest(r)); |
32fad281 | 4619 | |
4c3bb4cc | 4620 | /* Restore userspace EBB and other register values */ |
ca8efa1d PM |
4621 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { |
4622 | mtspr(SPRN_EBBHR, ebb_regs[0]); | |
4623 | mtspr(SPRN_EBBRR, ebb_regs[1]); | |
4624 | mtspr(SPRN_BESCR, ebb_regs[2]); | |
4c3bb4cc | 4625 | mtspr(SPRN_TAR, user_tar); |
ca8efa1d | 4626 | } |
4c3bb4cc | 4627 | mtspr(SPRN_VRSAVE, user_vrsave); |
ca8efa1d | 4628 | |
c7b67670 | 4629 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
432953b4 | 4630 | atomic_dec(&kvm->arch.vcpus_running); |
59dc5bfc NP |
4631 | |
4632 | srr_regs_clobbered(); | |
4633 | ||
a8606e20 PM |
4634 | return r; |
4635 | } | |
4636 | ||
5b74716e | 4637 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, |
8dc6cca5 | 4638 | int shift, int sllp) |
5b74716e | 4639 | { |
8dc6cca5 PM |
4640 | (*sps)->page_shift = shift; |
4641 | (*sps)->slb_enc = sllp; | |
4642 | (*sps)->enc[0].page_shift = shift; | |
4643 | (*sps)->enc[0].pte_enc = kvmppc_pgsize_lp_encoding(shift, shift); | |
1f365bb0 | 4644 | /* |
8dc6cca5 | 4645 | * Add 16MB MPSS support (may get filtered out by userspace) |
1f365bb0 | 4646 | */ |
8dc6cca5 PM |
4647 | if (shift != 24) { |
4648 | int penc = kvmppc_pgsize_lp_encoding(shift, 24); | |
4649 | if (penc != -1) { | |
4650 | (*sps)->enc[1].page_shift = 24; | |
4651 | (*sps)->enc[1].pte_enc = penc; | |
4652 | } | |
1f365bb0 | 4653 | } |
5b74716e BH |
4654 | (*sps)++; |
4655 | } | |
4656 | ||
3a167bea AK |
4657 | static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, |
4658 | struct kvm_ppc_smmu_info *info) | |
5b74716e BH |
4659 | { |
4660 | struct kvm_ppc_one_seg_page_size *sps; | |
4661 | ||
e3bfed1d PM |
4662 | /* |
4663 | * POWER7, POWER8 and POWER9 all support 32 storage keys for data. | |
4664 | * POWER7 doesn't support keys for instruction accesses, | |
4665 | * POWER8 and POWER9 do. | |
4666 | */ | |
4667 | info->data_keys = 32; | |
4668 | info->instr_keys = cpu_has_feature(CPU_FTR_ARCH_207S) ? 32 : 0; | |
4669 | ||
8dc6cca5 PM |
4670 | /* POWER7, 8 and 9 all have 1T segments and 32-entry SLB */ |
4671 | info->flags = KVM_PPC_PAGE_SIZES_REAL | KVM_PPC_1T_SEGMENTS; | |
4672 | info->slb_size = 32; | |
5b74716e BH |
4673 | |
4674 | /* We only support these sizes for now, and no muti-size segments */ | |
4675 | sps = &info->sps[0]; | |
8dc6cca5 PM |
4676 | kvmppc_add_seg_page_size(&sps, 12, 0); |
4677 | kvmppc_add_seg_page_size(&sps, 16, SLB_VSID_L | SLB_VSID_LP_01); | |
4678 | kvmppc_add_seg_page_size(&sps, 24, SLB_VSID_L); | |
5b74716e | 4679 | |
901f8c3f PM |
4680 | /* If running as a nested hypervisor, we don't support HPT guests */ |
4681 | if (kvmhv_on_pseries()) | |
4682 | info->flags |= KVM_PPC_NO_HASH; | |
4683 | ||
5b74716e BH |
4684 | return 0; |
4685 | } | |
4686 | ||
82ed3616 PM |
4687 | /* |
4688 | * Get (and clear) the dirty memory log for a memory slot. | |
4689 | */ | |
3a167bea AK |
4690 | static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, |
4691 | struct kvm_dirty_log *log) | |
82ed3616 | 4692 | { |
9f6b8029 | 4693 | struct kvm_memslots *slots; |
82ed3616 | 4694 | struct kvm_memory_slot *memslot; |
8f7b79b8 | 4695 | int i, r; |
82ed3616 | 4696 | unsigned long n; |
e641a317 | 4697 | unsigned long *buf, *p; |
8f7b79b8 | 4698 | struct kvm_vcpu *vcpu; |
82ed3616 PM |
4699 | |
4700 | mutex_lock(&kvm->slots_lock); | |
4701 | ||
4702 | r = -EINVAL; | |
bbacc0c1 | 4703 | if (log->slot >= KVM_USER_MEM_SLOTS) |
82ed3616 PM |
4704 | goto out; |
4705 | ||
9f6b8029 PB |
4706 | slots = kvm_memslots(kvm); |
4707 | memslot = id_to_memslot(slots, log->slot); | |
82ed3616 | 4708 | r = -ENOENT; |
0577d1ab | 4709 | if (!memslot || !memslot->dirty_bitmap) |
82ed3616 PM |
4710 | goto out; |
4711 | ||
8f7b79b8 | 4712 | /* |
e641a317 PM |
4713 | * Use second half of bitmap area because both HPT and radix |
4714 | * accumulate bits in the first half. | |
8f7b79b8 | 4715 | */ |
82ed3616 | 4716 | n = kvm_dirty_bitmap_bytes(memslot); |
8f7b79b8 PM |
4717 | buf = memslot->dirty_bitmap + n / sizeof(long); |
4718 | memset(buf, 0, n); | |
82ed3616 | 4719 | |
8f7b79b8 PM |
4720 | if (kvm_is_radix(kvm)) |
4721 | r = kvmppc_hv_get_dirty_log_radix(kvm, memslot, buf); | |
4722 | else | |
4723 | r = kvmppc_hv_get_dirty_log_hpt(kvm, memslot, buf); | |
82ed3616 PM |
4724 | if (r) |
4725 | goto out; | |
4726 | ||
e641a317 PM |
4727 | /* |
4728 | * We accumulate dirty bits in the first half of the | |
4729 | * memslot's dirty_bitmap area, for when pages are paged | |
4730 | * out or modified by the host directly. Pick up these | |
4731 | * bits and add them to the map. | |
4732 | */ | |
4733 | p = memslot->dirty_bitmap; | |
4734 | for (i = 0; i < n / sizeof(long); ++i) | |
4735 | buf[i] |= xchg(&p[i], 0); | |
4736 | ||
8f7b79b8 PM |
4737 | /* Harvest dirty bits from VPA and DTL updates */ |
4738 | /* Note: we never modify the SLB shadow buffer areas */ | |
4739 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
4740 | spin_lock(&vcpu->arch.vpa_update_lock); | |
4741 | kvmppc_harvest_vpa_dirty(&vcpu->arch.vpa, memslot, buf); | |
4742 | kvmppc_harvest_vpa_dirty(&vcpu->arch.dtl, memslot, buf); | |
4743 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
4744 | } | |
4745 | ||
82ed3616 | 4746 | r = -EFAULT; |
8f7b79b8 | 4747 | if (copy_to_user(log->dirty_bitmap, buf, n)) |
82ed3616 PM |
4748 | goto out; |
4749 | ||
4750 | r = 0; | |
4751 | out: | |
4752 | mutex_unlock(&kvm->slots_lock); | |
4753 | return r; | |
4754 | } | |
4755 | ||
e96c81ee | 4756 | static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *slot) |
a66b48c3 | 4757 | { |
e96c81ee SC |
4758 | vfree(slot->arch.rmap); |
4759 | slot->arch.rmap = NULL; | |
a66b48c3 PM |
4760 | } |
4761 | ||
82307e67 SC |
4762 | static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, |
4763 | struct kvm_memory_slot *slot, | |
4764 | const struct kvm_userspace_memory_region *mem, | |
4765 | enum kvm_mr_change change) | |
a66b48c3 | 4766 | { |
82307e67 | 4767 | unsigned long npages = mem->memory_size >> PAGE_SHIFT; |
aa04b4cc | 4768 | |
82307e67 SC |
4769 | if (change == KVM_MR_CREATE) { |
4770 | slot->arch.rmap = vzalloc(array_size(npages, | |
4771 | sizeof(*slot->arch.rmap))); | |
4772 | if (!slot->arch.rmap) | |
4773 | return -ENOMEM; | |
4774 | } | |
aa04b4cc | 4775 | |
a66b48c3 | 4776 | return 0; |
c77162de PM |
4777 | } |
4778 | ||
3a167bea | 4779 | static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, |
09170a49 | 4780 | const struct kvm_userspace_memory_region *mem, |
f36f3f28 | 4781 | const struct kvm_memory_slot *old, |
f032b734 BR |
4782 | const struct kvm_memory_slot *new, |
4783 | enum kvm_mr_change change) | |
c77162de | 4784 | { |
dfe49dbd | 4785 | unsigned long npages = mem->memory_size >> PAGE_SHIFT; |
dfe49dbd | 4786 | |
a56ee9f8 YX |
4787 | /* |
4788 | * If we are making a new memslot, it might make | |
4789 | * some address that was previously cached as emulated | |
4790 | * MMIO be no longer emulated MMIO, so invalidate | |
4791 | * all the caches of emulated MMIO translations. | |
4792 | */ | |
4793 | if (npages) | |
4794 | atomic64_inc(&kvm->arch.mmio_update); | |
5af3e9d0 PM |
4795 | |
4796 | /* | |
4797 | * For change == KVM_MR_MOVE or KVM_MR_DELETE, higher levels | |
4798 | * have already called kvm_arch_flush_shadow_memslot() to | |
4799 | * flush shadow mappings. For KVM_MR_CREATE we have no | |
4800 | * previous mappings. So the only case to handle is | |
4801 | * KVM_MR_FLAGS_ONLY when the KVM_MEM_LOG_DIRTY_PAGES bit | |
4802 | * has been changed. | |
4803 | * For radix guests, we flush on setting KVM_MEM_LOG_DIRTY_PAGES | |
4804 | * to get rid of any THP PTEs in the partition-scoped page tables | |
4805 | * so we can track dirtiness at the page level; we flush when | |
4806 | * clearing KVM_MEM_LOG_DIRTY_PAGES so that we can go back to | |
4807 | * using THP PTEs. | |
4808 | */ | |
4809 | if (change == KVM_MR_FLAGS_ONLY && kvm_is_radix(kvm) && | |
4810 | ((new->flags ^ old->flags) & KVM_MEM_LOG_DIRTY_PAGES)) | |
4811 | kvmppc_radix_flush_memslot(kvm, old); | |
c3262257 BR |
4812 | /* |
4813 | * If UV hasn't yet called H_SVM_INIT_START, don't register memslots. | |
4814 | */ | |
4815 | if (!kvm->arch.secure_guest) | |
4816 | return; | |
4817 | ||
4818 | switch (change) { | |
4819 | case KVM_MR_CREATE: | |
a2ce7200 LD |
4820 | /* |
4821 | * @TODO kvmppc_uvmem_memslot_create() can fail and | |
4822 | * return error. Fix this. | |
4823 | */ | |
4824 | kvmppc_uvmem_memslot_create(kvm, new); | |
c3262257 BR |
4825 | break; |
4826 | case KVM_MR_DELETE: | |
a2ce7200 | 4827 | kvmppc_uvmem_memslot_delete(kvm, old); |
c3262257 BR |
4828 | break; |
4829 | default: | |
4830 | /* TODO: Handle KVM_MR_MOVE */ | |
4831 | break; | |
4832 | } | |
c77162de PM |
4833 | } |
4834 | ||
a0144e2a PM |
4835 | /* |
4836 | * Update LPCR values in kvm->arch and in vcores. | |
0d4ee88d PM |
4837 | * Caller must hold kvm->arch.mmu_setup_lock (for mutual exclusion |
4838 | * of kvm->arch.lpcr update). | |
a0144e2a PM |
4839 | */ |
4840 | void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) | |
4841 | { | |
4842 | long int i; | |
4843 | u32 cores_done = 0; | |
4844 | ||
4845 | if ((kvm->arch.lpcr & mask) == lpcr) | |
4846 | return; | |
4847 | ||
4848 | kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; | |
4849 | ||
4850 | for (i = 0; i < KVM_MAX_VCORES; ++i) { | |
4851 | struct kvmppc_vcore *vc = kvm->arch.vcores[i]; | |
4852 | if (!vc) | |
4853 | continue; | |
67145ef4 | 4854 | |
a0144e2a PM |
4855 | spin_lock(&vc->lock); |
4856 | vc->lpcr = (vc->lpcr & ~mask) | lpcr; | |
67145ef4 | 4857 | verify_lpcr(kvm, vc->lpcr); |
a0144e2a PM |
4858 | spin_unlock(&vc->lock); |
4859 | if (++cores_done >= kvm->arch.online_vcores) | |
4860 | break; | |
4861 | } | |
4862 | } | |
4863 | ||
ded13fc1 | 4864 | void kvmppc_setup_partition_table(struct kvm *kvm) |
7a84084c PM |
4865 | { |
4866 | unsigned long dw0, dw1; | |
4867 | ||
8cf4ecc0 PM |
4868 | if (!kvm_is_radix(kvm)) { |
4869 | /* PS field - page size for VRMA */ | |
4870 | dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | | |
4871 | ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); | |
4872 | /* HTABSIZE and HTABORG fields */ | |
4873 | dw0 |= kvm->arch.sdr1; | |
7a84084c | 4874 | |
8cf4ecc0 PM |
4875 | /* Second dword as set by userspace */ |
4876 | dw1 = kvm->arch.process_table; | |
4877 | } else { | |
4878 | dw0 = PATB_HR | radix__get_tree_size() | | |
4879 | __pa(kvm->arch.pgtable) | RADIX_PGD_INDEX_SIZE; | |
4880 | dw1 = PATB_GR | kvm->arch.process_table; | |
4881 | } | |
8e3f5fc1 | 4882 | kvmhv_set_ptbl_entry(kvm->arch.lpid, dw0, dw1); |
7a84084c PM |
4883 | } |
4884 | ||
1b151ce4 PM |
4885 | /* |
4886 | * Set up HPT (hashed page table) and RMA (real-mode area). | |
0d4ee88d | 4887 | * Must be called with kvm->arch.mmu_setup_lock held. |
1b151ce4 | 4888 | */ |
32fad281 | 4889 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) |
c77162de PM |
4890 | { |
4891 | int err = 0; | |
4892 | struct kvm *kvm = vcpu->kvm; | |
c77162de PM |
4893 | unsigned long hva; |
4894 | struct kvm_memory_slot *memslot; | |
4895 | struct vm_area_struct *vma; | |
a0144e2a | 4896 | unsigned long lpcr = 0, senc; |
c77162de | 4897 | unsigned long psize, porder; |
2c9097e4 | 4898 | int srcu_idx; |
c77162de | 4899 | |
32fad281 | 4900 | /* Allocate hashed page table (if not done already) and reset it */ |
3f9d4f5a | 4901 | if (!kvm->arch.hpt.virt) { |
aae0777f DG |
4902 | int order = KVM_DEFAULT_HPT_ORDER; |
4903 | struct kvm_hpt_info info; | |
4904 | ||
4905 | err = kvmppc_allocate_hpt(&info, order); | |
4906 | /* If we get here, it means userspace didn't specify a | |
4907 | * size explicitly. So, try successively smaller | |
4908 | * sizes if the default failed. */ | |
4909 | while ((err == -ENOMEM) && --order >= PPC_MIN_HPT_ORDER) | |
4910 | err = kvmppc_allocate_hpt(&info, order); | |
4911 | ||
4912 | if (err < 0) { | |
32fad281 PM |
4913 | pr_err("KVM: Couldn't alloc HPT\n"); |
4914 | goto out; | |
4915 | } | |
aae0777f DG |
4916 | |
4917 | kvmppc_set_hpt(kvm, &info); | |
32fad281 PM |
4918 | } |
4919 | ||
c77162de | 4920 | /* Look up the memslot for guest physical address 0 */ |
2c9097e4 | 4921 | srcu_idx = srcu_read_lock(&kvm->srcu); |
c77162de | 4922 | memslot = gfn_to_memslot(kvm, 0); |
aa04b4cc | 4923 | |
c77162de PM |
4924 | /* We must have some memory at 0 by now */ |
4925 | err = -EINVAL; | |
4926 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
2c9097e4 | 4927 | goto out_srcu; |
c77162de PM |
4928 | |
4929 | /* Look up the VMA for the start of this memory slot */ | |
4930 | hva = memslot->userspace_addr; | |
d8ed45c5 | 4931 | mmap_read_lock(kvm->mm); |
900c83f8 LH |
4932 | vma = vma_lookup(kvm->mm, hva); |
4933 | if (!vma || (vma->vm_flags & VM_IO)) | |
c77162de PM |
4934 | goto up_out; |
4935 | ||
4936 | psize = vma_kernel_pagesize(vma); | |
c77162de | 4937 | |
d8ed45c5 | 4938 | mmap_read_unlock(kvm->mm); |
c77162de | 4939 | |
c17b98cf | 4940 | /* We can handle 4k, 64k or 16M pages in the VRMA */ |
debd574f PM |
4941 | if (psize >= 0x1000000) |
4942 | psize = 0x1000000; | |
4943 | else if (psize >= 0x10000) | |
4944 | psize = 0x10000; | |
4945 | else | |
4946 | psize = 0x1000; | |
4947 | porder = __ilog2(psize); | |
c77162de | 4948 | |
c17b98cf PM |
4949 | senc = slb_pgsize_encoding(psize); |
4950 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | | |
4951 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
c17b98cf PM |
4952 | /* Create HPTEs in the hash page table for the VRMA */ |
4953 | kvmppc_map_vrma(vcpu, memslot, porder); | |
aa04b4cc | 4954 | |
7a84084c PM |
4955 | /* Update VRMASD field in the LPCR */ |
4956 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { | |
4957 | /* the -4 is to account for senc values starting at 0x10 */ | |
4958 | lpcr = senc << (LPCR_VRMASD_SH - 4); | |
4959 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); | |
7a84084c | 4960 | } |
a0144e2a | 4961 | |
1b151ce4 | 4962 | /* Order updates to kvm->arch.lpcr etc. vs. mmu_ready */ |
c77162de | 4963 | smp_wmb(); |
c77162de | 4964 | err = 0; |
2c9097e4 PM |
4965 | out_srcu: |
4966 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
c77162de | 4967 | out: |
c77162de | 4968 | return err; |
b2b2f165 | 4969 | |
c77162de | 4970 | up_out: |
d8ed45c5 | 4971 | mmap_read_unlock(kvm->mm); |
505d6421 | 4972 | goto out_srcu; |
de56a948 PM |
4973 | } |
4974 | ||
0d4ee88d PM |
4975 | /* |
4976 | * Must be called with kvm->arch.mmu_setup_lock held and | |
4977 | * mmu_ready = 0 and no vcpus running. | |
4978 | */ | |
18c3640c PM |
4979 | int kvmppc_switch_mmu_to_hpt(struct kvm *kvm) |
4980 | { | |
aa069a99 | 4981 | if (nesting_enabled(kvm)) |
8e3f5fc1 | 4982 | kvmhv_release_all_nested(kvm); |
234ff0b7 PM |
4983 | kvmppc_rmap_reset(kvm); |
4984 | kvm->arch.process_table = 0; | |
b1c5356e | 4985 | /* Mutual exclusion with kvm_unmap_gfn_range etc. */ |
234ff0b7 PM |
4986 | spin_lock(&kvm->mmu_lock); |
4987 | kvm->arch.radix = 0; | |
4988 | spin_unlock(&kvm->mmu_lock); | |
18c3640c PM |
4989 | kvmppc_free_radix(kvm); |
4990 | kvmppc_update_lpcr(kvm, LPCR_VPM1, | |
4991 | LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); | |
18c3640c PM |
4992 | return 0; |
4993 | } | |
4994 | ||
0d4ee88d PM |
4995 | /* |
4996 | * Must be called with kvm->arch.mmu_setup_lock held and | |
4997 | * mmu_ready = 0 and no vcpus running. | |
4998 | */ | |
18c3640c PM |
4999 | int kvmppc_switch_mmu_to_radix(struct kvm *kvm) |
5000 | { | |
5001 | int err; | |
5002 | ||
5003 | err = kvmppc_init_vm_radix(kvm); | |
5004 | if (err) | |
5005 | return err; | |
234ff0b7 | 5006 | kvmppc_rmap_reset(kvm); |
b1c5356e | 5007 | /* Mutual exclusion with kvm_unmap_gfn_range etc. */ |
234ff0b7 PM |
5008 | spin_lock(&kvm->mmu_lock); |
5009 | kvm->arch.radix = 1; | |
5010 | spin_unlock(&kvm->mmu_lock); | |
18c3640c PM |
5011 | kvmppc_free_hpt(&kvm->arch.hpt); |
5012 | kvmppc_update_lpcr(kvm, LPCR_UPRT | LPCR_GTSE | LPCR_HR, | |
5013 | LPCR_VPM1 | LPCR_UPRT | LPCR_GTSE | LPCR_HR); | |
18c3640c PM |
5014 | return 0; |
5015 | } | |
5016 | ||
79b6c247 SW |
5017 | #ifdef CONFIG_KVM_XICS |
5018 | /* | |
5019 | * Allocate a per-core structure for managing state about which cores are | |
5020 | * running in the host versus the guest and for exchanging data between | |
5021 | * real mode KVM and CPU running in the host. | |
5022 | * This is only done for the first VM. | |
5023 | * The allocated structure stays even if all VMs have stopped. | |
5024 | * It is only freed when the kvm-hv module is unloaded. | |
5025 | * It's OK for this routine to fail, we just don't support host | |
5026 | * core operations like redirecting H_IPI wakeups. | |
5027 | */ | |
5028 | void kvmppc_alloc_host_rm_ops(void) | |
5029 | { | |
5030 | struct kvmppc_host_rm_ops *ops; | |
5031 | unsigned long l_ops; | |
5032 | int cpu, core; | |
5033 | int size; | |
5034 | ||
5035 | /* Not the first time here ? */ | |
5036 | if (kvmppc_host_rm_ops_hv != NULL) | |
5037 | return; | |
5038 | ||
5039 | ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL); | |
5040 | if (!ops) | |
5041 | return; | |
5042 | ||
5043 | size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core); | |
5044 | ops->rm_core = kzalloc(size, GFP_KERNEL); | |
5045 | ||
5046 | if (!ops->rm_core) { | |
5047 | kfree(ops); | |
5048 | return; | |
5049 | } | |
5050 | ||
419af25f | 5051 | cpus_read_lock(); |
6f3bb809 | 5052 | |
79b6c247 SW |
5053 | for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { |
5054 | if (!cpu_online(cpu)) | |
5055 | continue; | |
5056 | ||
5057 | core = cpu >> threads_shift; | |
5058 | ops->rm_core[core].rm_state.in_host = 1; | |
5059 | } | |
5060 | ||
0c2a6606 SW |
5061 | ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; |
5062 | ||
79b6c247 SW |
5063 | /* |
5064 | * Make the contents of the kvmppc_host_rm_ops structure visible | |
5065 | * to other CPUs before we assign it to the global variable. | |
5066 | * Do an atomic assignment (no locks used here), but if someone | |
5067 | * beats us to it, just free our copy and return. | |
5068 | */ | |
5069 | smp_wmb(); | |
5070 | l_ops = (unsigned long) ops; | |
5071 | ||
5072 | if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { | |
419af25f | 5073 | cpus_read_unlock(); |
79b6c247 SW |
5074 | kfree(ops->rm_core); |
5075 | kfree(ops); | |
6f3bb809 | 5076 | return; |
79b6c247 | 5077 | } |
6f3bb809 | 5078 | |
419af25f SAS |
5079 | cpuhp_setup_state_nocalls_cpuslocked(CPUHP_KVM_PPC_BOOK3S_PREPARE, |
5080 | "ppc/kvm_book3s:prepare", | |
5081 | kvmppc_set_host_core, | |
5082 | kvmppc_clear_host_core); | |
5083 | cpus_read_unlock(); | |
79b6c247 SW |
5084 | } |
5085 | ||
5086 | void kvmppc_free_host_rm_ops(void) | |
5087 | { | |
5088 | if (kvmppc_host_rm_ops_hv) { | |
3f7cd919 | 5089 | cpuhp_remove_state_nocalls(CPUHP_KVM_PPC_BOOK3S_PREPARE); |
79b6c247 SW |
5090 | kfree(kvmppc_host_rm_ops_hv->rm_core); |
5091 | kfree(kvmppc_host_rm_ops_hv); | |
5092 | kvmppc_host_rm_ops_hv = NULL; | |
5093 | } | |
5094 | } | |
5095 | #endif | |
5096 | ||
3a167bea | 5097 | static int kvmppc_core_init_vm_hv(struct kvm *kvm) |
de56a948 | 5098 | { |
32fad281 | 5099 | unsigned long lpcr, lpid; |
e23a808b | 5100 | char buf[32]; |
8cf4ecc0 | 5101 | int ret; |
de56a948 | 5102 | |
ca9f4942 BR |
5103 | mutex_init(&kvm->arch.uvmem_lock); |
5104 | INIT_LIST_HEAD(&kvm->arch.uvmem_pfns); | |
0d4ee88d PM |
5105 | mutex_init(&kvm->arch.mmu_setup_lock); |
5106 | ||
32fad281 PM |
5107 | /* Allocate the guest's logical partition ID */ |
5108 | ||
5109 | lpid = kvmppc_alloc_lpid(); | |
5d226ae5 | 5110 | if ((long)lpid < 0) |
32fad281 PM |
5111 | return -ENOMEM; |
5112 | kvm->arch.lpid = lpid; | |
de56a948 | 5113 | |
79b6c247 SW |
5114 | kvmppc_alloc_host_rm_ops(); |
5115 | ||
8e3f5fc1 PM |
5116 | kvmhv_vm_nested_init(kvm); |
5117 | ||
1b400ba0 PM |
5118 | /* |
5119 | * Since we don't flush the TLB when tearing down a VM, | |
5120 | * and this lpid might have previously been used, | |
5121 | * make sure we flush on each core before running the new VM. | |
7c5b06ca PM |
5122 | * On POWER9, the tlbie in mmu_partition_table_set_entry() |
5123 | * does this flush for us. | |
1b400ba0 | 5124 | */ |
7c5b06ca PM |
5125 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
5126 | cpumask_setall(&kvm->arch.need_tlb_flush); | |
1b400ba0 | 5127 | |
699a0ea0 PM |
5128 | /* Start out with the default set of hcalls enabled */ |
5129 | memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, | |
5130 | sizeof(kvm->arch.enabled_hcalls)); | |
5131 | ||
7a84084c PM |
5132 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
5133 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); | |
aa04b4cc | 5134 | |
c17b98cf | 5135 | /* Init LPCR for virtual RMA mode */ |
f3c99f97 PM |
5136 | if (cpu_has_feature(CPU_FTR_HVMODE)) { |
5137 | kvm->arch.host_lpid = mfspr(SPRN_LPID); | |
5138 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); | |
5139 | lpcr &= LPCR_PECE | LPCR_LPES; | |
5140 | } else { | |
5141 | lpcr = 0; | |
5142 | } | |
c17b98cf PM |
5143 | lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | |
5144 | LPCR_VPM0 | LPCR_VPM1; | |
5145 | kvm->arch.vrma_slb_v = SLB_VSID_B_1T | | |
5146 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
5147 | /* On POWER8 turn on online bit to enable PURR/SPURR */ | |
5148 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | |
5149 | lpcr |= LPCR_ONL; | |
84f7139c PM |
5150 | /* |
5151 | * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed) | |
5152 | * Set HVICE bit to enable hypervisor virtualization interrupts. | |
5af50993 BH |
5153 | * Set HEIC to prevent OS interrupts to go to hypervisor (should |
5154 | * be unnecessary but better safe than sorry in case we re-enable | |
5155 | * EE in HV mode with this LPCR still set) | |
84f7139c PM |
5156 | */ |
5157 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
7a84084c | 5158 | lpcr &= ~LPCR_VPM0; |
5af50993 BH |
5159 | lpcr |= LPCR_HVICE | LPCR_HEIC; |
5160 | ||
5161 | /* | |
5162 | * If xive is enabled, we route 0x500 interrupts directly | |
5163 | * to the guest. | |
5164 | */ | |
03f95332 | 5165 | if (xics_on_xive()) |
5af50993 | 5166 | lpcr |= LPCR_LPES; |
84f7139c PM |
5167 | } |
5168 | ||
8cf4ecc0 | 5169 | /* |
18c3640c | 5170 | * If the host uses radix, the guest starts out as radix. |
8cf4ecc0 PM |
5171 | */ |
5172 | if (radix_enabled()) { | |
5173 | kvm->arch.radix = 1; | |
1b151ce4 | 5174 | kvm->arch.mmu_ready = 1; |
8cf4ecc0 PM |
5175 | lpcr &= ~LPCR_VPM1; |
5176 | lpcr |= LPCR_UPRT | LPCR_GTSE | LPCR_HR; | |
5177 | ret = kvmppc_init_vm_radix(kvm); | |
5178 | if (ret) { | |
5179 | kvmppc_free_lpid(kvm->arch.lpid); | |
5180 | return ret; | |
5181 | } | |
5182 | kvmppc_setup_partition_table(kvm); | |
5183 | } | |
5184 | ||
67145ef4 | 5185 | verify_lpcr(kvm, lpcr); |
9e368f29 | 5186 | kvm->arch.lpcr = lpcr; |
aa04b4cc | 5187 | |
5e985969 DG |
5188 | /* Initialization for future HPT resizes */ |
5189 | kvm->arch.resize_hpt = NULL; | |
5190 | ||
7c5b06ca PM |
5191 | /* |
5192 | * Work out how many sets the TLB has, for the use of | |
5193 | * the TLB invalidation loop in book3s_hv_rmhandlers.S. | |
5194 | */ | |
e8063940 AK |
5195 | if (cpu_has_feature(CPU_FTR_ARCH_31)) { |
5196 | /* | |
5197 | * P10 will flush all the congruence class with a single tlbiel | |
5198 | */ | |
5199 | kvm->arch.tlb_sets = 1; | |
5200 | } else if (radix_enabled()) | |
8cf4ecc0 PM |
5201 | kvm->arch.tlb_sets = POWER9_TLB_SETS_RADIX; /* 128 */ |
5202 | else if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
7c5b06ca PM |
5203 | kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ |
5204 | else if (cpu_has_feature(CPU_FTR_ARCH_207S)) | |
5205 | kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ | |
5206 | else | |
5207 | kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ | |
5208 | ||
512691d4 | 5209 | /* |
441c19c8 ME |
5210 | * Track that we now have a HV mode VM active. This blocks secondary |
5211 | * CPU threads from coming online. | |
512691d4 | 5212 | */ |
aaae8c79 | 5213 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
8cf4ecc0 | 5214 | kvm_hv_vm_activated(); |
512691d4 | 5215 | |
3c313524 PM |
5216 | /* |
5217 | * Initialize smt_mode depending on processor. | |
5218 | * POWER8 and earlier have to use "strict" threading, where | |
5219 | * all vCPUs in a vcore have to run on the same (sub)core, | |
5220 | * whereas on POWER9 the threads can each run a different | |
5221 | * guest. | |
5222 | */ | |
5223 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
5224 | kvm->arch.smt_mode = threads_per_subcore; | |
5225 | else | |
5226 | kvm->arch.smt_mode = 1; | |
57900694 | 5227 | kvm->arch.emul_smt_mode = 1; |
3c313524 | 5228 | |
e23a808b PM |
5229 | /* |
5230 | * Create a debugfs directory for the VM | |
5231 | */ | |
5232 | snprintf(buf, sizeof(buf), "vm%d", current->pid); | |
5233 | kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); | |
929f45e3 | 5234 | kvmppc_mmu_debugfs_init(kvm); |
9a94d3ee PM |
5235 | if (radix_enabled()) |
5236 | kvmhv_radix_debugfs_init(kvm); | |
e23a808b | 5237 | |
54738c09 | 5238 | return 0; |
de56a948 PM |
5239 | } |
5240 | ||
f1378b1c PM |
5241 | static void kvmppc_free_vcores(struct kvm *kvm) |
5242 | { | |
5243 | long int i; | |
5244 | ||
23316316 | 5245 | for (i = 0; i < KVM_MAX_VCORES; ++i) |
f1378b1c PM |
5246 | kfree(kvm->arch.vcores[i]); |
5247 | kvm->arch.online_vcores = 0; | |
5248 | } | |
5249 | ||
3a167bea | 5250 | static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) |
de56a948 | 5251 | { |
e23a808b PM |
5252 | debugfs_remove_recursive(kvm->arch.debugfs_dir); |
5253 | ||
aaae8c79 | 5254 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
8cf4ecc0 | 5255 | kvm_hv_vm_deactivated(); |
512691d4 | 5256 | |
f1378b1c | 5257 | kvmppc_free_vcores(kvm); |
aa04b4cc | 5258 | |
8cf4ecc0 | 5259 | |
5a319350 PM |
5260 | if (kvm_is_radix(kvm)) |
5261 | kvmppc_free_radix(kvm); | |
5262 | else | |
aae0777f | 5263 | kvmppc_free_hpt(&kvm->arch.hpt); |
c57875f5 | 5264 | |
89329c0b SJS |
5265 | /* Perform global invalidation and return lpid to the pool */ |
5266 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
aa069a99 | 5267 | if (nesting_enabled(kvm)) |
8e3f5fc1 | 5268 | kvmhv_release_all_nested(kvm); |
89329c0b | 5269 | kvm->arch.process_table = 0; |
d89c69f4 PM |
5270 | if (kvm->arch.secure_guest) |
5271 | uv_svm_terminate(kvm->arch.lpid); | |
8e3f5fc1 | 5272 | kvmhv_set_ptbl_entry(kvm->arch.lpid, 0, 0); |
89329c0b | 5273 | } |
ca9f4942 | 5274 | |
89329c0b SJS |
5275 | kvmppc_free_lpid(kvm->arch.lpid); |
5276 | ||
c57875f5 | 5277 | kvmppc_free_pimap(kvm); |
de56a948 PM |
5278 | } |
5279 | ||
3a167bea | 5280 | /* We don't need to emulate any privileged instructions or dcbz */ |
8c99d345 | 5281 | static int kvmppc_core_emulate_op_hv(struct kvm_vcpu *vcpu, |
3a167bea | 5282 | unsigned int inst, int *advance) |
de56a948 | 5283 | { |
3a167bea | 5284 | return EMULATE_FAIL; |
de56a948 PM |
5285 | } |
5286 | ||
3a167bea AK |
5287 | static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, |
5288 | ulong spr_val) | |
de56a948 PM |
5289 | { |
5290 | return EMULATE_FAIL; | |
5291 | } | |
5292 | ||
3a167bea AK |
5293 | static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, |
5294 | ulong *spr_val) | |
de56a948 PM |
5295 | { |
5296 | return EMULATE_FAIL; | |
5297 | } | |
5298 | ||
3a167bea | 5299 | static int kvmppc_core_check_processor_compat_hv(void) |
de56a948 | 5300 | { |
de760db4 PM |
5301 | if (cpu_has_feature(CPU_FTR_HVMODE) && |
5302 | cpu_has_feature(CPU_FTR_ARCH_206)) | |
5303 | return 0; | |
50de596d | 5304 | |
de760db4 PM |
5305 | /* POWER9 in radix mode is capable of being a nested hypervisor. */ |
5306 | if (cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled()) | |
5307 | return 0; | |
5308 | ||
5309 | return -EIO; | |
de56a948 PM |
5310 | } |
5311 | ||
8daaafc8 SW |
5312 | #ifdef CONFIG_KVM_XICS |
5313 | ||
5314 | void kvmppc_free_pimap(struct kvm *kvm) | |
5315 | { | |
5316 | kfree(kvm->arch.pimap); | |
5317 | } | |
5318 | ||
c57875f5 | 5319 | static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void) |
8daaafc8 SW |
5320 | { |
5321 | return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL); | |
5322 | } | |
c57875f5 SW |
5323 | |
5324 | static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) | |
5325 | { | |
5326 | struct irq_desc *desc; | |
5327 | struct kvmppc_irq_map *irq_map; | |
5328 | struct kvmppc_passthru_irqmap *pimap; | |
5329 | struct irq_chip *chip; | |
5af50993 | 5330 | int i, rc = 0; |
c57875f5 | 5331 | |
644abbb2 SW |
5332 | if (!kvm_irq_bypass) |
5333 | return 1; | |
5334 | ||
c57875f5 SW |
5335 | desc = irq_to_desc(host_irq); |
5336 | if (!desc) | |
5337 | return -EIO; | |
5338 | ||
5339 | mutex_lock(&kvm->lock); | |
5340 | ||
5341 | pimap = kvm->arch.pimap; | |
5342 | if (pimap == NULL) { | |
5343 | /* First call, allocate structure to hold IRQ map */ | |
5344 | pimap = kvmppc_alloc_pimap(); | |
5345 | if (pimap == NULL) { | |
5346 | mutex_unlock(&kvm->lock); | |
5347 | return -ENOMEM; | |
5348 | } | |
5349 | kvm->arch.pimap = pimap; | |
5350 | } | |
5351 | ||
5352 | /* | |
5353 | * For now, we only support interrupts for which the EOI operation | |
5354 | * is an OPAL call followed by a write to XIRR, since that's | |
5af50993 | 5355 | * what our real-mode EOI code does, or a XIVE interrupt |
c57875f5 SW |
5356 | */ |
5357 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
5af50993 | 5358 | if (!chip || !(is_pnv_opal_msi(chip) || is_xive_irq(chip))) { |
c57875f5 SW |
5359 | pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", |
5360 | host_irq, guest_gsi); | |
5361 | mutex_unlock(&kvm->lock); | |
5362 | return -ENOENT; | |
5363 | } | |
5364 | ||
5365 | /* | |
5366 | * See if we already have an entry for this guest IRQ number. | |
5367 | * If it's mapped to a hardware IRQ number, that's an error, | |
5368 | * otherwise re-use this entry. | |
5369 | */ | |
5370 | for (i = 0; i < pimap->n_mapped; i++) { | |
5371 | if (guest_gsi == pimap->mapped[i].v_hwirq) { | |
5372 | if (pimap->mapped[i].r_hwirq) { | |
5373 | mutex_unlock(&kvm->lock); | |
5374 | return -EINVAL; | |
5375 | } | |
5376 | break; | |
5377 | } | |
5378 | } | |
5379 | ||
5380 | if (i == KVMPPC_PIRQ_MAPPED) { | |
5381 | mutex_unlock(&kvm->lock); | |
5382 | return -EAGAIN; /* table is full */ | |
5383 | } | |
5384 | ||
5385 | irq_map = &pimap->mapped[i]; | |
5386 | ||
5387 | irq_map->v_hwirq = guest_gsi; | |
c57875f5 SW |
5388 | irq_map->desc = desc; |
5389 | ||
e3c13e56 SW |
5390 | /* |
5391 | * Order the above two stores before the next to serialize with | |
5392 | * the KVM real mode handler. | |
5393 | */ | |
5394 | smp_wmb(); | |
5395 | irq_map->r_hwirq = desc->irq_data.hwirq; | |
5396 | ||
c57875f5 SW |
5397 | if (i == pimap->n_mapped) |
5398 | pimap->n_mapped++; | |
5399 | ||
03f95332 | 5400 | if (xics_on_xive()) |
5af50993 BH |
5401 | rc = kvmppc_xive_set_mapped(kvm, guest_gsi, desc); |
5402 | else | |
5403 | kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); | |
5404 | if (rc) | |
5405 | irq_map->r_hwirq = 0; | |
5d375199 | 5406 | |
c57875f5 SW |
5407 | mutex_unlock(&kvm->lock); |
5408 | ||
5409 | return 0; | |
5410 | } | |
5411 | ||
5412 | static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) | |
5413 | { | |
5414 | struct irq_desc *desc; | |
5415 | struct kvmppc_passthru_irqmap *pimap; | |
5af50993 | 5416 | int i, rc = 0; |
c57875f5 | 5417 | |
644abbb2 SW |
5418 | if (!kvm_irq_bypass) |
5419 | return 0; | |
5420 | ||
c57875f5 SW |
5421 | desc = irq_to_desc(host_irq); |
5422 | if (!desc) | |
5423 | return -EIO; | |
5424 | ||
5425 | mutex_lock(&kvm->lock); | |
a1c52e1c ME |
5426 | if (!kvm->arch.pimap) |
5427 | goto unlock; | |
c57875f5 | 5428 | |
c57875f5 SW |
5429 | pimap = kvm->arch.pimap; |
5430 | ||
5431 | for (i = 0; i < pimap->n_mapped; i++) { | |
5432 | if (guest_gsi == pimap->mapped[i].v_hwirq) | |
5433 | break; | |
5434 | } | |
5435 | ||
5436 | if (i == pimap->n_mapped) { | |
5437 | mutex_unlock(&kvm->lock); | |
5438 | return -ENODEV; | |
5439 | } | |
5440 | ||
03f95332 | 5441 | if (xics_on_xive()) |
5af50993 BH |
5442 | rc = kvmppc_xive_clr_mapped(kvm, guest_gsi, pimap->mapped[i].desc); |
5443 | else | |
5444 | kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); | |
5d375199 | 5445 | |
5af50993 | 5446 | /* invalidate the entry (what do do on error from the above ?) */ |
c57875f5 SW |
5447 | pimap->mapped[i].r_hwirq = 0; |
5448 | ||
5449 | /* | |
5450 | * We don't free this structure even when the count goes to | |
5451 | * zero. The structure is freed when we destroy the VM. | |
5452 | */ | |
a1c52e1c | 5453 | unlock: |
c57875f5 | 5454 | mutex_unlock(&kvm->lock); |
5af50993 | 5455 | return rc; |
c57875f5 SW |
5456 | } |
5457 | ||
5458 | static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons, | |
5459 | struct irq_bypass_producer *prod) | |
5460 | { | |
5461 | int ret = 0; | |
5462 | struct kvm_kernel_irqfd *irqfd = | |
5463 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
5464 | ||
5465 | irqfd->producer = prod; | |
5466 | ||
5467 | ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); | |
5468 | if (ret) | |
5469 | pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n", | |
5470 | prod->irq, irqfd->gsi, ret); | |
5471 | ||
5472 | return ret; | |
5473 | } | |
5474 | ||
5475 | static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons, | |
5476 | struct irq_bypass_producer *prod) | |
5477 | { | |
5478 | int ret; | |
5479 | struct kvm_kernel_irqfd *irqfd = | |
5480 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
5481 | ||
5482 | irqfd->producer = NULL; | |
5483 | ||
5484 | /* | |
5485 | * When producer of consumer is unregistered, we change back to | |
5486 | * default external interrupt handling mode - KVM real mode | |
5487 | * will switch back to host. | |
5488 | */ | |
5489 | ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); | |
5490 | if (ret) | |
5491 | pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n", | |
5492 | prod->irq, irqfd->gsi, ret); | |
5493 | } | |
8daaafc8 SW |
5494 | #endif |
5495 | ||
3a167bea AK |
5496 | static long kvm_arch_vm_ioctl_hv(struct file *filp, |
5497 | unsigned int ioctl, unsigned long arg) | |
5498 | { | |
5499 | struct kvm *kvm __maybe_unused = filp->private_data; | |
5500 | void __user *argp = (void __user *)arg; | |
5501 | long r; | |
5502 | ||
5503 | switch (ioctl) { | |
5504 | ||
3a167bea AK |
5505 | case KVM_PPC_ALLOCATE_HTAB: { |
5506 | u32 htab_order; | |
5507 | ||
05e6295d FR |
5508 | /* If we're a nested hypervisor, we currently only support radix */ |
5509 | if (kvmhv_on_pseries()) { | |
5510 | r = -EOPNOTSUPP; | |
5511 | break; | |
5512 | } | |
5513 | ||
3a167bea AK |
5514 | r = -EFAULT; |
5515 | if (get_user(htab_order, (u32 __user *)argp)) | |
5516 | break; | |
f98a8bf9 | 5517 | r = kvmppc_alloc_reset_hpt(kvm, htab_order); |
3a167bea AK |
5518 | if (r) |
5519 | break; | |
3a167bea AK |
5520 | r = 0; |
5521 | break; | |
5522 | } | |
5523 | ||
5524 | case KVM_PPC_GET_HTAB_FD: { | |
5525 | struct kvm_get_htab_fd ghf; | |
5526 | ||
5527 | r = -EFAULT; | |
5528 | if (copy_from_user(&ghf, argp, sizeof(ghf))) | |
5529 | break; | |
5530 | r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); | |
5531 | break; | |
5532 | } | |
5533 | ||
5e985969 DG |
5534 | case KVM_PPC_RESIZE_HPT_PREPARE: { |
5535 | struct kvm_ppc_resize_hpt rhpt; | |
5536 | ||
5537 | r = -EFAULT; | |
5538 | if (copy_from_user(&rhpt, argp, sizeof(rhpt))) | |
5539 | break; | |
5540 | ||
5541 | r = kvm_vm_ioctl_resize_hpt_prepare(kvm, &rhpt); | |
5542 | break; | |
5543 | } | |
5544 | ||
5545 | case KVM_PPC_RESIZE_HPT_COMMIT: { | |
5546 | struct kvm_ppc_resize_hpt rhpt; | |
5547 | ||
5548 | r = -EFAULT; | |
5549 | if (copy_from_user(&rhpt, argp, sizeof(rhpt))) | |
5550 | break; | |
5551 | ||
5552 | r = kvm_vm_ioctl_resize_hpt_commit(kvm, &rhpt); | |
5553 | break; | |
5554 | } | |
5555 | ||
3a167bea AK |
5556 | default: |
5557 | r = -ENOTTY; | |
5558 | } | |
5559 | ||
5560 | return r; | |
5561 | } | |
5562 | ||
699a0ea0 PM |
5563 | /* |
5564 | * List of hcall numbers to enable by default. | |
5565 | * For compatibility with old userspace, we enable by default | |
5566 | * all hcalls that were implemented before the hcall-enabling | |
5567 | * facility was added. Note this list should not include H_RTAS. | |
5568 | */ | |
5569 | static unsigned int default_hcall_list[] = { | |
5570 | H_REMOVE, | |
5571 | H_ENTER, | |
5572 | H_READ, | |
5573 | H_PROTECT, | |
5574 | H_BULK_REMOVE, | |
0fd85cb8 | 5575 | #ifdef CONFIG_SPAPR_TCE_IOMMU |
699a0ea0 PM |
5576 | H_GET_TCE, |
5577 | H_PUT_TCE, | |
0fd85cb8 | 5578 | #endif |
699a0ea0 PM |
5579 | H_SET_DABR, |
5580 | H_SET_XDABR, | |
5581 | H_CEDE, | |
5582 | H_PROD, | |
5583 | H_CONFER, | |
5584 | H_REGISTER_VPA, | |
5585 | #ifdef CONFIG_KVM_XICS | |
5586 | H_EOI, | |
5587 | H_CPPR, | |
5588 | H_IPI, | |
5589 | H_IPOLL, | |
5590 | H_XIRR, | |
5591 | H_XIRR_X, | |
5592 | #endif | |
5593 | 0 | |
5594 | }; | |
5595 | ||
5596 | static void init_default_hcalls(void) | |
5597 | { | |
5598 | int i; | |
ae2113a4 | 5599 | unsigned int hcall; |
699a0ea0 | 5600 | |
ae2113a4 PM |
5601 | for (i = 0; default_hcall_list[i]; ++i) { |
5602 | hcall = default_hcall_list[i]; | |
5603 | WARN_ON(!kvmppc_hcall_impl_hv(hcall)); | |
5604 | __set_bit(hcall / 4, default_enabled_hcalls); | |
5605 | } | |
699a0ea0 PM |
5606 | } |
5607 | ||
c9270132 PM |
5608 | static int kvmhv_configure_mmu(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg) |
5609 | { | |
468808bd | 5610 | unsigned long lpcr; |
8cf4ecc0 | 5611 | int radix; |
18c3640c | 5612 | int err; |
468808bd PM |
5613 | |
5614 | /* If not on a POWER9, reject it */ | |
5615 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) | |
5616 | return -ENODEV; | |
5617 | ||
5618 | /* If any unknown flags set, reject it */ | |
5619 | if (cfg->flags & ~(KVM_PPC_MMUV3_RADIX | KVM_PPC_MMUV3_GTSE)) | |
5620 | return -EINVAL; | |
5621 | ||
468808bd | 5622 | /* GR (guest radix) bit in process_table field must match */ |
18c3640c | 5623 | radix = !!(cfg->flags & KVM_PPC_MMUV3_RADIX); |
8cf4ecc0 | 5624 | if (!!(cfg->process_table & PATB_GR) != radix) |
468808bd PM |
5625 | return -EINVAL; |
5626 | ||
5627 | /* Process table size field must be reasonable, i.e. <= 24 */ | |
5628 | if ((cfg->process_table & PRTS_MASK) > 24) | |
5629 | return -EINVAL; | |
5630 | ||
18c3640c PM |
5631 | /* We can change a guest to/from radix now, if the host is radix */ |
5632 | if (radix && !radix_enabled()) | |
5633 | return -EINVAL; | |
5634 | ||
de760db4 PM |
5635 | /* If we're a nested hypervisor, we currently only support radix */ |
5636 | if (kvmhv_on_pseries() && !radix) | |
5637 | return -EINVAL; | |
5638 | ||
0d4ee88d | 5639 | mutex_lock(&kvm->arch.mmu_setup_lock); |
18c3640c PM |
5640 | if (radix != kvm_is_radix(kvm)) { |
5641 | if (kvm->arch.mmu_ready) { | |
5642 | kvm->arch.mmu_ready = 0; | |
5643 | /* order mmu_ready vs. vcpus_running */ | |
5644 | smp_mb(); | |
5645 | if (atomic_read(&kvm->arch.vcpus_running)) { | |
5646 | kvm->arch.mmu_ready = 1; | |
5647 | err = -EBUSY; | |
5648 | goto out_unlock; | |
5649 | } | |
5650 | } | |
5651 | if (radix) | |
5652 | err = kvmppc_switch_mmu_to_radix(kvm); | |
5653 | else | |
5654 | err = kvmppc_switch_mmu_to_hpt(kvm); | |
5655 | if (err) | |
5656 | goto out_unlock; | |
5657 | } | |
5658 | ||
468808bd PM |
5659 | kvm->arch.process_table = cfg->process_table; |
5660 | kvmppc_setup_partition_table(kvm); | |
5661 | ||
5662 | lpcr = (cfg->flags & KVM_PPC_MMUV3_GTSE) ? LPCR_GTSE : 0; | |
5663 | kvmppc_update_lpcr(kvm, lpcr, LPCR_GTSE); | |
18c3640c | 5664 | err = 0; |
468808bd | 5665 | |
18c3640c | 5666 | out_unlock: |
0d4ee88d | 5667 | mutex_unlock(&kvm->arch.mmu_setup_lock); |
18c3640c | 5668 | return err; |
c9270132 PM |
5669 | } |
5670 | ||
aa069a99 PM |
5671 | static int kvmhv_enable_nested(struct kvm *kvm) |
5672 | { | |
5673 | if (!nested) | |
5674 | return -EPERM; | |
cbcff8b1 | 5675 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
aa069a99 | 5676 | return -ENODEV; |
0bf7e1b2 | 5677 | if (!radix_enabled()) |
aa069a99 PM |
5678 | return -ENODEV; |
5679 | ||
5680 | /* kvm == NULL means the caller is testing if the capability exists */ | |
5681 | if (kvm) | |
5682 | kvm->arch.nested_enable = true; | |
5683 | return 0; | |
5684 | } | |
5685 | ||
dceadcf9 SJS |
5686 | static int kvmhv_load_from_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, |
5687 | int size) | |
5688 | { | |
5689 | int rc = -EINVAL; | |
5690 | ||
5691 | if (kvmhv_vcpu_is_radix(vcpu)) { | |
5692 | rc = kvmhv_copy_from_guest_radix(vcpu, *eaddr, ptr, size); | |
5693 | ||
5694 | if (rc > 0) | |
5695 | rc = -EINVAL; | |
5696 | } | |
5697 | ||
5698 | /* For now quadrants are the only way to access nested guest memory */ | |
5699 | if (rc && vcpu->arch.nested) | |
5700 | rc = -EAGAIN; | |
5701 | ||
5702 | return rc; | |
5703 | } | |
5704 | ||
5705 | static int kvmhv_store_to_eaddr(struct kvm_vcpu *vcpu, ulong *eaddr, void *ptr, | |
5706 | int size) | |
5707 | { | |
5708 | int rc = -EINVAL; | |
5709 | ||
5710 | if (kvmhv_vcpu_is_radix(vcpu)) { | |
5711 | rc = kvmhv_copy_to_guest_radix(vcpu, *eaddr, ptr, size); | |
5712 | ||
5713 | if (rc > 0) | |
5714 | rc = -EINVAL; | |
5715 | } | |
5716 | ||
5717 | /* For now quadrants are the only way to access nested guest memory */ | |
5718 | if (rc && vcpu->arch.nested) | |
5719 | rc = -EAGAIN; | |
5720 | ||
5721 | return rc; | |
5722 | } | |
5723 | ||
22945688 BR |
5724 | static void unpin_vpa_reset(struct kvm *kvm, struct kvmppc_vpa *vpa) |
5725 | { | |
5726 | unpin_vpa(kvm, vpa); | |
5727 | vpa->gpa = 0; | |
5728 | vpa->pinned_addr = NULL; | |
5729 | vpa->dirty = false; | |
5730 | vpa->update_pending = 0; | |
5731 | } | |
5732 | ||
9a5788c6 PM |
5733 | /* |
5734 | * Enable a guest to become a secure VM, or test whether | |
5735 | * that could be enabled. | |
5736 | * Called when the KVM_CAP_PPC_SECURE_GUEST capability is | |
5737 | * tested (kvm == NULL) or enabled (kvm != NULL). | |
5738 | */ | |
5739 | static int kvmhv_enable_svm(struct kvm *kvm) | |
5740 | { | |
5741 | if (!kvmppc_uvmem_available()) | |
5742 | return -EINVAL; | |
5743 | if (kvm) | |
5744 | kvm->arch.svm_enabled = 1; | |
5745 | return 0; | |
5746 | } | |
5747 | ||
22945688 BR |
5748 | /* |
5749 | * IOCTL handler to turn off secure mode of guest | |
5750 | * | |
5751 | * - Release all device pages | |
5752 | * - Issue ucall to terminate the guest on the UV side | |
5753 | * - Unpin the VPA pages. | |
5754 | * - Reinit the partition scoped page tables | |
5755 | */ | |
5756 | static int kvmhv_svm_off(struct kvm *kvm) | |
5757 | { | |
5758 | struct kvm_vcpu *vcpu; | |
5759 | int mmu_was_ready; | |
5760 | int srcu_idx; | |
5761 | int ret = 0; | |
5762 | int i; | |
5763 | ||
5764 | if (!(kvm->arch.secure_guest & KVMPPC_SECURE_INIT_START)) | |
5765 | return ret; | |
5766 | ||
5767 | mutex_lock(&kvm->arch.mmu_setup_lock); | |
5768 | mmu_was_ready = kvm->arch.mmu_ready; | |
5769 | if (kvm->arch.mmu_ready) { | |
5770 | kvm->arch.mmu_ready = 0; | |
5771 | /* order mmu_ready vs. vcpus_running */ | |
5772 | smp_mb(); | |
5773 | if (atomic_read(&kvm->arch.vcpus_running)) { | |
5774 | kvm->arch.mmu_ready = 1; | |
5775 | ret = -EBUSY; | |
5776 | goto out; | |
5777 | } | |
5778 | } | |
5779 | ||
5780 | srcu_idx = srcu_read_lock(&kvm->srcu); | |
5781 | for (i = 0; i < KVM_ADDRESS_SPACE_NUM; i++) { | |
5782 | struct kvm_memory_slot *memslot; | |
5783 | struct kvm_memslots *slots = __kvm_memslots(kvm, i); | |
5784 | ||
5785 | if (!slots) | |
5786 | continue; | |
5787 | ||
5788 | kvm_for_each_memslot(memslot, slots) { | |
ce477a7a | 5789 | kvmppc_uvmem_drop_pages(memslot, kvm, true); |
22945688 BR |
5790 | uv_unregister_mem_slot(kvm->arch.lpid, memslot->id); |
5791 | } | |
5792 | } | |
5793 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
5794 | ||
5795 | ret = uv_svm_terminate(kvm->arch.lpid); | |
5796 | if (ret != U_SUCCESS) { | |
5797 | ret = -EINVAL; | |
5798 | goto out; | |
5799 | } | |
5800 | ||
5801 | /* | |
5802 | * When secure guest is reset, all the guest pages are sent | |
5803 | * to UV via UV_PAGE_IN before the non-boot vcpus get a | |
5804 | * chance to run and unpin their VPA pages. Unpinning of all | |
5805 | * VPA pages is done here explicitly so that VPA pages | |
5806 | * can be migrated to the secure side. | |
5807 | * | |
5808 | * This is required to for the secure SMP guest to reboot | |
5809 | * correctly. | |
5810 | */ | |
5811 | kvm_for_each_vcpu(i, vcpu, kvm) { | |
5812 | spin_lock(&vcpu->arch.vpa_update_lock); | |
5813 | unpin_vpa_reset(kvm, &vcpu->arch.dtl); | |
5814 | unpin_vpa_reset(kvm, &vcpu->arch.slb_shadow); | |
5815 | unpin_vpa_reset(kvm, &vcpu->arch.vpa); | |
5816 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
5817 | } | |
5818 | ||
5819 | kvmppc_setup_partition_table(kvm); | |
5820 | kvm->arch.secure_guest = 0; | |
5821 | kvm->arch.mmu_ready = mmu_was_ready; | |
5822 | out: | |
5823 | mutex_unlock(&kvm->arch.mmu_setup_lock); | |
5824 | return ret; | |
5825 | } | |
5826 | ||
d9a47eda RB |
5827 | static int kvmhv_enable_dawr1(struct kvm *kvm) |
5828 | { | |
5829 | if (!cpu_has_feature(CPU_FTR_DAWR1)) | |
5830 | return -ENODEV; | |
5831 | ||
5832 | /* kvm == NULL means the caller is testing if the capability exists */ | |
5833 | if (kvm) | |
5834 | kvm->arch.dawr1_enabled = true; | |
5835 | return 0; | |
5836 | } | |
5837 | ||
a722076e FR |
5838 | static bool kvmppc_hash_v3_possible(void) |
5839 | { | |
fae5c9f3 | 5840 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
a722076e FR |
5841 | return false; |
5842 | ||
fae5c9f3 | 5843 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
a722076e FR |
5844 | return false; |
5845 | ||
fae5c9f3 NP |
5846 | /* |
5847 | * POWER9 chips before version 2.02 can't have some threads in | |
5848 | * HPT mode and some in radix mode on the same core. | |
5849 | */ | |
5850 | if (radix_enabled()) { | |
5851 | unsigned int pvr = mfspr(SPRN_PVR); | |
5852 | if ((pvr >> 16) == PVR_POWER9 && | |
5853 | (((pvr & 0xe000) == 0 && (pvr & 0xfff) < 0x202) || | |
5854 | ((pvr & 0xe000) == 0x2000 && (pvr & 0xfff) < 0x101))) | |
5855 | return false; | |
5856 | } | |
5857 | ||
5858 | return true; | |
a722076e FR |
5859 | } |
5860 | ||
cbbc58d4 | 5861 | static struct kvmppc_ops kvm_ops_hv = { |
3a167bea AK |
5862 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, |
5863 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, | |
5864 | .get_one_reg = kvmppc_get_one_reg_hv, | |
5865 | .set_one_reg = kvmppc_set_one_reg_hv, | |
5866 | .vcpu_load = kvmppc_core_vcpu_load_hv, | |
5867 | .vcpu_put = kvmppc_core_vcpu_put_hv, | |
87a45e07 | 5868 | .inject_interrupt = kvmppc_inject_interrupt_hv, |
3a167bea AK |
5869 | .set_msr = kvmppc_set_msr_hv, |
5870 | .vcpu_run = kvmppc_vcpu_run_hv, | |
5871 | .vcpu_create = kvmppc_core_vcpu_create_hv, | |
5872 | .vcpu_free = kvmppc_core_vcpu_free_hv, | |
5873 | .check_requests = kvmppc_core_check_requests_hv, | |
5874 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, | |
5875 | .flush_memslot = kvmppc_core_flush_memslot_hv, | |
5876 | .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, | |
5877 | .commit_memory_region = kvmppc_core_commit_memory_region_hv, | |
b1c5356e SC |
5878 | .unmap_gfn_range = kvm_unmap_gfn_range_hv, |
5879 | .age_gfn = kvm_age_gfn_hv, | |
5880 | .test_age_gfn = kvm_test_age_gfn_hv, | |
5881 | .set_spte_gfn = kvm_set_spte_gfn_hv, | |
3a167bea | 5882 | .free_memslot = kvmppc_core_free_memslot_hv, |
3a167bea AK |
5883 | .init_vm = kvmppc_core_init_vm_hv, |
5884 | .destroy_vm = kvmppc_core_destroy_vm_hv, | |
3a167bea AK |
5885 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, |
5886 | .emulate_op = kvmppc_core_emulate_op_hv, | |
5887 | .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, | |
5888 | .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, | |
5889 | .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, | |
5890 | .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, | |
ae2113a4 | 5891 | .hcall_implemented = kvmppc_hcall_impl_hv, |
c57875f5 SW |
5892 | #ifdef CONFIG_KVM_XICS |
5893 | .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, | |
5894 | .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, | |
5895 | #endif | |
c9270132 PM |
5896 | .configure_mmu = kvmhv_configure_mmu, |
5897 | .get_rmmu_info = kvmhv_get_rmmu_info, | |
3c313524 | 5898 | .set_smt_mode = kvmhv_set_smt_mode, |
aa069a99 | 5899 | .enable_nested = kvmhv_enable_nested, |
dceadcf9 SJS |
5900 | .load_from_eaddr = kvmhv_load_from_eaddr, |
5901 | .store_to_eaddr = kvmhv_store_to_eaddr, | |
9a5788c6 | 5902 | .enable_svm = kvmhv_enable_svm, |
22945688 | 5903 | .svm_off = kvmhv_svm_off, |
d9a47eda | 5904 | .enable_dawr1 = kvmhv_enable_dawr1, |
a722076e | 5905 | .hash_v3_possible = kvmppc_hash_v3_possible, |
3a167bea AK |
5906 | }; |
5907 | ||
fd7bacbc MS |
5908 | static int kvm_init_subcore_bitmap(void) |
5909 | { | |
5910 | int i, j; | |
5911 | int nr_cores = cpu_nr_cores(); | |
5912 | struct sibling_subcore_state *sibling_subcore_state; | |
5913 | ||
5914 | for (i = 0; i < nr_cores; i++) { | |
5915 | int first_cpu = i * threads_per_core; | |
5916 | int node = cpu_to_node(first_cpu); | |
5917 | ||
5918 | /* Ignore if it is already allocated. */ | |
d2e60075 | 5919 | if (paca_ptrs[first_cpu]->sibling_subcore_state) |
fd7bacbc MS |
5920 | continue; |
5921 | ||
5922 | sibling_subcore_state = | |
08434ab4 | 5923 | kzalloc_node(sizeof(struct sibling_subcore_state), |
fd7bacbc MS |
5924 | GFP_KERNEL, node); |
5925 | if (!sibling_subcore_state) | |
5926 | return -ENOMEM; | |
5927 | ||
fd7bacbc MS |
5928 | |
5929 | for (j = 0; j < threads_per_core; j++) { | |
5930 | int cpu = first_cpu + j; | |
5931 | ||
d2e60075 NP |
5932 | paca_ptrs[cpu]->sibling_subcore_state = |
5933 | sibling_subcore_state; | |
fd7bacbc MS |
5934 | } |
5935 | } | |
5936 | return 0; | |
5937 | } | |
5938 | ||
5a319350 PM |
5939 | static int kvmppc_radix_possible(void) |
5940 | { | |
5941 | return cpu_has_feature(CPU_FTR_ARCH_300) && radix_enabled(); | |
5942 | } | |
5943 | ||
3a167bea | 5944 | static int kvmppc_book3s_init_hv(void) |
de56a948 PM |
5945 | { |
5946 | int r; | |
2275d7b5 NP |
5947 | |
5948 | if (!tlbie_capable) { | |
5949 | pr_err("KVM-HV: Host does not support TLBIE\n"); | |
5950 | return -ENODEV; | |
5951 | } | |
5952 | ||
cbbc58d4 AK |
5953 | /* |
5954 | * FIXME!! Do we need to check on all cpus ? | |
5955 | */ | |
5956 | r = kvmppc_core_check_processor_compat_hv(); | |
5957 | if (r < 0) | |
739e2425 | 5958 | return -ENODEV; |
de56a948 | 5959 | |
8e3f5fc1 PM |
5960 | r = kvmhv_nested_init(); |
5961 | if (r) | |
5962 | return r; | |
5963 | ||
fd7bacbc MS |
5964 | r = kvm_init_subcore_bitmap(); |
5965 | if (r) | |
5966 | return r; | |
5967 | ||
f725758b PM |
5968 | /* |
5969 | * We need a way of accessing the XICS interrupt controller, | |
d2e60075 | 5970 | * either directly, via paca_ptrs[cpu]->kvm_hstate.xics_phys, or |
f725758b PM |
5971 | * indirectly, via OPAL. |
5972 | */ | |
5973 | #ifdef CONFIG_SMP | |
03f95332 | 5974 | if (!xics_on_xive() && !kvmhv_on_pseries() && |
f3c18e93 | 5975 | !local_paca->kvm_hstate.xics_phys) { |
f725758b PM |
5976 | struct device_node *np; |
5977 | ||
5978 | np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); | |
5979 | if (!np) { | |
5980 | pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); | |
5981 | return -ENODEV; | |
5982 | } | |
51eaa08f NMG |
5983 | /* presence of intc confirmed - node can be dropped again */ |
5984 | of_node_put(np); | |
f725758b PM |
5985 | } |
5986 | #endif | |
5987 | ||
cbbc58d4 AK |
5988 | kvm_ops_hv.owner = THIS_MODULE; |
5989 | kvmppc_hv_ops = &kvm_ops_hv; | |
de56a948 | 5990 | |
699a0ea0 PM |
5991 | init_default_hcalls(); |
5992 | ||
ec257165 PM |
5993 | init_vcore_lists(); |
5994 | ||
cbbc58d4 | 5995 | r = kvmppc_mmu_hv_init(); |
5a319350 PM |
5996 | if (r) |
5997 | return r; | |
5998 | ||
5999 | if (kvmppc_radix_possible()) | |
6000 | r = kvmppc_radix_init(); | |
00608e1f | 6001 | |
ca9f4942 BR |
6002 | r = kvmppc_uvmem_init(); |
6003 | if (r < 0) | |
6004 | pr_err("KVM-HV: kvmppc_uvmem_init failed %d\n", r); | |
6005 | ||
de56a948 PM |
6006 | return r; |
6007 | } | |
6008 | ||
3a167bea | 6009 | static void kvmppc_book3s_exit_hv(void) |
de56a948 | 6010 | { |
ca9f4942 | 6011 | kvmppc_uvmem_free(); |
79b6c247 | 6012 | kvmppc_free_host_rm_ops(); |
5a319350 PM |
6013 | if (kvmppc_radix_possible()) |
6014 | kvmppc_radix_exit(); | |
cbbc58d4 | 6015 | kvmppc_hv_ops = NULL; |
8e3f5fc1 | 6016 | kvmhv_nested_exit(); |
de56a948 PM |
6017 | } |
6018 | ||
3a167bea AK |
6019 | module_init(kvmppc_book3s_init_hv); |
6020 | module_exit(kvmppc_book3s_exit_hv); | |
2ba9f0d8 | 6021 | MODULE_LICENSE("GPL"); |
398a76c6 AG |
6022 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
6023 | MODULE_ALIAS("devname:kvm"); |