]>
Commit | Line | Data |
---|---|---|
de56a948 PM |
1 | /* |
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
3 | * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved. | |
4 | * | |
5 | * Authors: | |
6 | * Paul Mackerras <paulus@au1.ibm.com> | |
7 | * Alexander Graf <agraf@suse.de> | |
8 | * Kevin Wolf <mail@kevin-wolf.de> | |
9 | * | |
10 | * Description: KVM functions specific to running on Book 3S | |
11 | * processors in hypervisor mode (specifically POWER7 and later). | |
12 | * | |
13 | * This file is derived from arch/powerpc/kvm/book3s.c, | |
14 | * by Alexander Graf <agraf@suse.de>. | |
15 | * | |
16 | * This program is free software; you can redistribute it and/or modify | |
17 | * it under the terms of the GNU General Public License, version 2, as | |
18 | * published by the Free Software Foundation. | |
19 | */ | |
20 | ||
21 | #include <linux/kvm_host.h> | |
22 | #include <linux/err.h> | |
23 | #include <linux/slab.h> | |
24 | #include <linux/preempt.h> | |
25 | #include <linux/sched.h> | |
26 | #include <linux/delay.h> | |
66b15db6 | 27 | #include <linux/export.h> |
de56a948 PM |
28 | #include <linux/fs.h> |
29 | #include <linux/anon_inodes.h> | |
07f8ab25 | 30 | #include <linux/cpu.h> |
de56a948 | 31 | #include <linux/cpumask.h> |
aa04b4cc PM |
32 | #include <linux/spinlock.h> |
33 | #include <linux/page-flags.h> | |
2c9097e4 | 34 | #include <linux/srcu.h> |
398a76c6 | 35 | #include <linux/miscdevice.h> |
e23a808b | 36 | #include <linux/debugfs.h> |
de56a948 PM |
37 | |
38 | #include <asm/reg.h> | |
39 | #include <asm/cputable.h> | |
40 | #include <asm/cacheflush.h> | |
41 | #include <asm/tlbflush.h> | |
42 | #include <asm/uaccess.h> | |
43 | #include <asm/io.h> | |
44 | #include <asm/kvm_ppc.h> | |
45 | #include <asm/kvm_book3s.h> | |
46 | #include <asm/mmu_context.h> | |
47 | #include <asm/lppaca.h> | |
48 | #include <asm/processor.h> | |
371fefd6 | 49 | #include <asm/cputhreads.h> |
aa04b4cc | 50 | #include <asm/page.h> |
de1d9248 | 51 | #include <asm/hvcall.h> |
ae3a197e | 52 | #include <asm/switch_to.h> |
512691d4 | 53 | #include <asm/smp.h> |
66feed61 | 54 | #include <asm/dbell.h> |
fd7bacbc | 55 | #include <asm/hmi.h> |
c57875f5 | 56 | #include <asm/pnv-pci.h> |
7a84084c | 57 | #include <asm/mmu.h> |
f725758b PM |
58 | #include <asm/opal.h> |
59 | #include <asm/xics.h> | |
de56a948 | 60 | #include <linux/gfp.h> |
de56a948 PM |
61 | #include <linux/vmalloc.h> |
62 | #include <linux/highmem.h> | |
c77162de | 63 | #include <linux/hugetlb.h> |
c57875f5 SW |
64 | #include <linux/kvm_irqfd.h> |
65 | #include <linux/irqbypass.h> | |
2ba9f0d8 | 66 | #include <linux/module.h> |
7b5f8272 | 67 | #include <linux/compiler.h> |
f725758b | 68 | #include <linux/of.h> |
de56a948 | 69 | |
3a167bea AK |
70 | #include "book3s.h" |
71 | ||
3c78f78a SW |
72 | #define CREATE_TRACE_POINTS |
73 | #include "trace_hv.h" | |
74 | ||
de56a948 PM |
75 | /* #define EXIT_DEBUG */ |
76 | /* #define EXIT_DEBUG_SIMPLE */ | |
77 | /* #define EXIT_DEBUG_INT */ | |
78 | ||
913d3ff9 PM |
79 | /* Used to indicate that a guest page fault needs to be handled */ |
80 | #define RESUME_PAGE_FAULT (RESUME_GUEST | RESUME_FLAG_ARCH1) | |
f7af5209 SW |
81 | /* Used to indicate that a guest passthrough interrupt needs to be handled */ |
82 | #define RESUME_PASSTHROUGH (RESUME_GUEST | RESUME_FLAG_ARCH2) | |
913d3ff9 | 83 | |
c7b67670 PM |
84 | /* Used as a "null" value for timebase values */ |
85 | #define TB_NIL (~(u64)0) | |
86 | ||
699a0ea0 PM |
87 | static DECLARE_BITMAP(default_enabled_hcalls, MAX_HCALL_OPCODE/4 + 1); |
88 | ||
b4deba5c PM |
89 | static int dynamic_mt_modes = 6; |
90 | module_param(dynamic_mt_modes, int, S_IRUGO | S_IWUSR); | |
91 | MODULE_PARM_DESC(dynamic_mt_modes, "Set of allowed dynamic micro-threading modes: 0 (= none), 2, 4, or 6 (= 2 or 4)"); | |
ec257165 PM |
92 | static int target_smt_mode; |
93 | module_param(target_smt_mode, int, S_IRUGO | S_IWUSR); | |
94 | MODULE_PARM_DESC(target_smt_mode, "Target threads per core (0 = max)"); | |
9678cdaa | 95 | |
520fe9c6 SW |
96 | #ifdef CONFIG_KVM_XICS |
97 | static struct kernel_param_ops module_param_ops = { | |
98 | .set = param_set_int, | |
99 | .get = param_get_int, | |
100 | }; | |
101 | ||
644abbb2 SW |
102 | module_param_cb(kvm_irq_bypass, &module_param_ops, &kvm_irq_bypass, |
103 | S_IRUGO | S_IWUSR); | |
104 | MODULE_PARM_DESC(kvm_irq_bypass, "Bypass passthrough interrupt optimization"); | |
105 | ||
520fe9c6 SW |
106 | module_param_cb(h_ipi_redirect, &module_param_ops, &h_ipi_redirect, |
107 | S_IRUGO | S_IWUSR); | |
108 | MODULE_PARM_DESC(h_ipi_redirect, "Redirect H_IPI wakeup to a free host core"); | |
109 | #endif | |
110 | ||
0cda69dd SJS |
111 | /* Maximum halt poll interval defaults to KVM_HALT_POLL_NS_DEFAULT */ |
112 | static unsigned int halt_poll_max_ns = KVM_HALT_POLL_NS_DEFAULT; | |
113 | module_param(halt_poll_max_ns, uint, S_IRUGO | S_IWUSR); | |
114 | MODULE_PARM_DESC(halt_poll_max_ns, "Maximum halt poll time in ns"); | |
115 | ||
116 | /* Factor by which the vcore halt poll interval is grown, default is to double | |
117 | */ | |
118 | static unsigned int halt_poll_ns_grow = 2; | |
119 | module_param(halt_poll_ns_grow, int, S_IRUGO); | |
120 | MODULE_PARM_DESC(halt_poll_ns_grow, "Factor halt poll time is grown by"); | |
121 | ||
122 | /* Factor by which the vcore halt poll interval is shrunk, default is to reset | |
123 | */ | |
124 | static unsigned int halt_poll_ns_shrink; | |
125 | module_param(halt_poll_ns_shrink, int, S_IRUGO); | |
126 | MODULE_PARM_DESC(halt_poll_ns_shrink, "Factor halt poll time is shrunk by"); | |
127 | ||
19ccb76a | 128 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu); |
32fad281 | 129 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu); |
19ccb76a | 130 | |
7b5f8272 SJS |
131 | static inline struct kvm_vcpu *next_runnable_thread(struct kvmppc_vcore *vc, |
132 | int *ip) | |
133 | { | |
134 | int i = *ip; | |
135 | struct kvm_vcpu *vcpu; | |
136 | ||
137 | while (++i < MAX_SMT_THREADS) { | |
138 | vcpu = READ_ONCE(vc->runnable_threads[i]); | |
139 | if (vcpu) { | |
140 | *ip = i; | |
141 | return vcpu; | |
142 | } | |
143 | } | |
144 | return NULL; | |
145 | } | |
146 | ||
147 | /* Used to traverse the list of runnable threads for a given vcore */ | |
148 | #define for_each_runnable_thread(i, vcpu, vc) \ | |
149 | for (i = -1; (vcpu = next_runnable_thread(vc, &i)); ) | |
150 | ||
66feed61 PM |
151 | static bool kvmppc_ipi_thread(int cpu) |
152 | { | |
1704a81c PM |
153 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
154 | ||
155 | /* On POWER9 we can use msgsnd to IPI any cpu */ | |
156 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
157 | msg |= get_hard_smp_processor_id(cpu); | |
158 | smp_mb(); | |
159 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
160 | return true; | |
161 | } | |
162 | ||
66feed61 PM |
163 | /* On POWER8 for IPIs to threads in the same core, use msgsnd */ |
164 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) { | |
165 | preempt_disable(); | |
166 | if (cpu_first_thread_sibling(cpu) == | |
167 | cpu_first_thread_sibling(smp_processor_id())) { | |
66feed61 PM |
168 | msg |= cpu_thread_in_core(cpu); |
169 | smp_mb(); | |
170 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
171 | preempt_enable(); | |
172 | return true; | |
173 | } | |
174 | preempt_enable(); | |
175 | } | |
176 | ||
177 | #if defined(CONFIG_PPC_ICP_NATIVE) && defined(CONFIG_SMP) | |
f725758b PM |
178 | if (cpu >= 0 && cpu < nr_cpu_ids) { |
179 | if (paca[cpu].kvm_hstate.xics_phys) { | |
180 | xics_wake_cpu(cpu); | |
181 | return true; | |
182 | } | |
183 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); | |
66feed61 PM |
184 | return true; |
185 | } | |
186 | #endif | |
187 | ||
188 | return false; | |
189 | } | |
190 | ||
3a167bea | 191 | static void kvmppc_fast_vcpu_kick_hv(struct kvm_vcpu *vcpu) |
54695c30 | 192 | { |
ec257165 | 193 | int cpu; |
8577370f | 194 | struct swait_queue_head *wqp; |
54695c30 BH |
195 | |
196 | wqp = kvm_arch_vcpu_wq(vcpu); | |
8577370f MT |
197 | if (swait_active(wqp)) { |
198 | swake_up(wqp); | |
54695c30 BH |
199 | ++vcpu->stat.halt_wakeup; |
200 | } | |
201 | ||
ec257165 | 202 | if (kvmppc_ipi_thread(vcpu->arch.thread_cpu)) |
66feed61 | 203 | return; |
54695c30 BH |
204 | |
205 | /* CPU points to the first thread of the core */ | |
ec257165 | 206 | cpu = vcpu->cpu; |
66feed61 PM |
207 | if (cpu >= 0 && cpu < nr_cpu_ids && cpu_online(cpu)) |
208 | smp_send_reschedule(cpu); | |
54695c30 BH |
209 | } |
210 | ||
c7b67670 PM |
211 | /* |
212 | * We use the vcpu_load/put functions to measure stolen time. | |
213 | * Stolen time is counted as time when either the vcpu is able to | |
214 | * run as part of a virtual core, but the task running the vcore | |
215 | * is preempted or sleeping, or when the vcpu needs something done | |
216 | * in the kernel by the task running the vcpu, but that task is | |
217 | * preempted or sleeping. Those two things have to be counted | |
218 | * separately, since one of the vcpu tasks will take on the job | |
219 | * of running the core, and the other vcpu tasks in the vcore will | |
220 | * sleep waiting for it to do that, but that sleep shouldn't count | |
221 | * as stolen time. | |
222 | * | |
223 | * Hence we accumulate stolen time when the vcpu can run as part of | |
224 | * a vcore using vc->stolen_tb, and the stolen time when the vcpu | |
225 | * needs its task to do other things in the kernel (for example, | |
226 | * service a page fault) in busy_stolen. We don't accumulate | |
227 | * stolen time for a vcore when it is inactive, or for a vcpu | |
228 | * when it is in state RUNNING or NOTREADY. NOTREADY is a bit of | |
229 | * a misnomer; it means that the vcpu task is not executing in | |
230 | * the KVM_VCPU_RUN ioctl, i.e. it is in userspace or elsewhere in | |
231 | * the kernel. We don't have any way of dividing up that time | |
232 | * between time that the vcpu is genuinely stopped, time that | |
233 | * the task is actively working on behalf of the vcpu, and time | |
234 | * that the task is preempted, so we don't count any of it as | |
235 | * stolen. | |
236 | * | |
237 | * Updates to busy_stolen are protected by arch.tbacct_lock; | |
2711e248 PM |
238 | * updates to vc->stolen_tb are protected by the vcore->stoltb_lock |
239 | * lock. The stolen times are measured in units of timebase ticks. | |
240 | * (Note that the != TB_NIL checks below are purely defensive; | |
241 | * they should never fail.) | |
c7b67670 PM |
242 | */ |
243 | ||
ec257165 PM |
244 | static void kvmppc_core_start_stolen(struct kvmppc_vcore *vc) |
245 | { | |
246 | unsigned long flags; | |
247 | ||
248 | spin_lock_irqsave(&vc->stoltb_lock, flags); | |
249 | vc->preempt_tb = mftb(); | |
250 | spin_unlock_irqrestore(&vc->stoltb_lock, flags); | |
251 | } | |
252 | ||
253 | static void kvmppc_core_end_stolen(struct kvmppc_vcore *vc) | |
254 | { | |
255 | unsigned long flags; | |
256 | ||
257 | spin_lock_irqsave(&vc->stoltb_lock, flags); | |
258 | if (vc->preempt_tb != TB_NIL) { | |
259 | vc->stolen_tb += mftb() - vc->preempt_tb; | |
260 | vc->preempt_tb = TB_NIL; | |
261 | } | |
262 | spin_unlock_irqrestore(&vc->stoltb_lock, flags); | |
263 | } | |
264 | ||
3a167bea | 265 | static void kvmppc_core_vcpu_load_hv(struct kvm_vcpu *vcpu, int cpu) |
de56a948 | 266 | { |
0456ec4f | 267 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
bf3d32e1 | 268 | unsigned long flags; |
0456ec4f | 269 | |
2711e248 PM |
270 | /* |
271 | * We can test vc->runner without taking the vcore lock, | |
272 | * because only this task ever sets vc->runner to this | |
273 | * vcpu, and once it is set to this vcpu, only this task | |
274 | * ever sets it to NULL. | |
275 | */ | |
ec257165 PM |
276 | if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) |
277 | kvmppc_core_end_stolen(vc); | |
278 | ||
2711e248 | 279 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
c7b67670 PM |
280 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST && |
281 | vcpu->arch.busy_preempt != TB_NIL) { | |
282 | vcpu->arch.busy_stolen += mftb() - vcpu->arch.busy_preempt; | |
283 | vcpu->arch.busy_preempt = TB_NIL; | |
284 | } | |
bf3d32e1 | 285 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
de56a948 PM |
286 | } |
287 | ||
3a167bea | 288 | static void kvmppc_core_vcpu_put_hv(struct kvm_vcpu *vcpu) |
de56a948 | 289 | { |
0456ec4f | 290 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
bf3d32e1 | 291 | unsigned long flags; |
0456ec4f | 292 | |
ec257165 PM |
293 | if (vc->runner == vcpu && vc->vcore_state >= VCORE_SLEEPING) |
294 | kvmppc_core_start_stolen(vc); | |
295 | ||
2711e248 | 296 | spin_lock_irqsave(&vcpu->arch.tbacct_lock, flags); |
c7b67670 PM |
297 | if (vcpu->arch.state == KVMPPC_VCPU_BUSY_IN_HOST) |
298 | vcpu->arch.busy_preempt = mftb(); | |
bf3d32e1 | 299 | spin_unlock_irqrestore(&vcpu->arch.tbacct_lock, flags); |
de56a948 PM |
300 | } |
301 | ||
3a167bea | 302 | static void kvmppc_set_msr_hv(struct kvm_vcpu *vcpu, u64 msr) |
de56a948 | 303 | { |
c20875a3 PM |
304 | /* |
305 | * Check for illegal transactional state bit combination | |
306 | * and if we find it, force the TS field to a safe state. | |
307 | */ | |
308 | if ((msr & MSR_TS_MASK) == MSR_TS_MASK) | |
309 | msr &= ~MSR_TS_MASK; | |
de56a948 | 310 | vcpu->arch.shregs.msr = msr; |
19ccb76a | 311 | kvmppc_end_cede(vcpu); |
de56a948 PM |
312 | } |
313 | ||
5358a963 | 314 | static void kvmppc_set_pvr_hv(struct kvm_vcpu *vcpu, u32 pvr) |
de56a948 PM |
315 | { |
316 | vcpu->arch.pvr = pvr; | |
317 | } | |
318 | ||
5358a963 | 319 | static int kvmppc_set_arch_compat(struct kvm_vcpu *vcpu, u32 arch_compat) |
388cc6e1 PM |
320 | { |
321 | unsigned long pcr = 0; | |
322 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | |
323 | ||
324 | if (arch_compat) { | |
388cc6e1 PM |
325 | switch (arch_compat) { |
326 | case PVR_ARCH_205: | |
5557ae0e PM |
327 | /* |
328 | * If an arch bit is set in PCR, all the defined | |
329 | * higher-order arch bits also have to be set. | |
330 | */ | |
331 | pcr = PCR_ARCH_206 | PCR_ARCH_205; | |
388cc6e1 PM |
332 | break; |
333 | case PVR_ARCH_206: | |
334 | case PVR_ARCH_206p: | |
5557ae0e PM |
335 | pcr = PCR_ARCH_206; |
336 | break; | |
337 | case PVR_ARCH_207: | |
388cc6e1 PM |
338 | break; |
339 | default: | |
340 | return -EINVAL; | |
341 | } | |
5557ae0e PM |
342 | |
343 | if (!cpu_has_feature(CPU_FTR_ARCH_207S)) { | |
344 | /* POWER7 can't emulate POWER8 */ | |
345 | if (!(pcr & PCR_ARCH_206)) | |
346 | return -EINVAL; | |
347 | pcr &= ~PCR_ARCH_206; | |
348 | } | |
388cc6e1 PM |
349 | } |
350 | ||
351 | spin_lock(&vc->lock); | |
352 | vc->arch_compat = arch_compat; | |
353 | vc->pcr = pcr; | |
354 | spin_unlock(&vc->lock); | |
355 | ||
356 | return 0; | |
357 | } | |
358 | ||
5358a963 | 359 | static void kvmppc_dump_regs(struct kvm_vcpu *vcpu) |
de56a948 PM |
360 | { |
361 | int r; | |
362 | ||
363 | pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id); | |
364 | pr_err("pc = %.16lx msr = %.16llx trap = %x\n", | |
365 | vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap); | |
366 | for (r = 0; r < 16; ++r) | |
367 | pr_err("r%2d = %.16lx r%d = %.16lx\n", | |
368 | r, kvmppc_get_gpr(vcpu, r), | |
369 | r+16, kvmppc_get_gpr(vcpu, r+16)); | |
370 | pr_err("ctr = %.16lx lr = %.16lx\n", | |
371 | vcpu->arch.ctr, vcpu->arch.lr); | |
372 | pr_err("srr0 = %.16llx srr1 = %.16llx\n", | |
373 | vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1); | |
374 | pr_err("sprg0 = %.16llx sprg1 = %.16llx\n", | |
375 | vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1); | |
376 | pr_err("sprg2 = %.16llx sprg3 = %.16llx\n", | |
377 | vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3); | |
378 | pr_err("cr = %.8x xer = %.16lx dsisr = %.8x\n", | |
379 | vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr); | |
380 | pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar); | |
381 | pr_err("fault dar = %.16lx dsisr = %.8x\n", | |
382 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
383 | pr_err("SLB (%d entries):\n", vcpu->arch.slb_max); | |
384 | for (r = 0; r < vcpu->arch.slb_max; ++r) | |
385 | pr_err(" ESID = %.16llx VSID = %.16llx\n", | |
386 | vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv); | |
387 | pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n", | |
a0144e2a | 388 | vcpu->arch.vcore->lpcr, vcpu->kvm->arch.sdr1, |
de56a948 PM |
389 | vcpu->arch.last_inst); |
390 | } | |
391 | ||
5358a963 | 392 | static struct kvm_vcpu *kvmppc_find_vcpu(struct kvm *kvm, int id) |
a8606e20 | 393 | { |
e09fefde | 394 | struct kvm_vcpu *ret; |
a8606e20 PM |
395 | |
396 | mutex_lock(&kvm->lock); | |
e09fefde | 397 | ret = kvm_get_vcpu_by_id(kvm, id); |
a8606e20 PM |
398 | mutex_unlock(&kvm->lock); |
399 | return ret; | |
400 | } | |
401 | ||
402 | static void init_vpa(struct kvm_vcpu *vcpu, struct lppaca *vpa) | |
403 | { | |
f13c13a0 | 404 | vpa->__old_status |= LPPACA_OLD_SHARED_PROC; |
02407552 | 405 | vpa->yield_count = cpu_to_be32(1); |
a8606e20 PM |
406 | } |
407 | ||
55b665b0 PM |
408 | static int set_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *v, |
409 | unsigned long addr, unsigned long len) | |
410 | { | |
411 | /* check address is cacheline aligned */ | |
412 | if (addr & (L1_CACHE_BYTES - 1)) | |
413 | return -EINVAL; | |
414 | spin_lock(&vcpu->arch.vpa_update_lock); | |
415 | if (v->next_gpa != addr || v->len != len) { | |
416 | v->next_gpa = addr; | |
417 | v->len = addr ? len : 0; | |
418 | v->update_pending = 1; | |
419 | } | |
420 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
421 | return 0; | |
422 | } | |
423 | ||
2e25aa5f PM |
424 | /* Length for a per-processor buffer is passed in at offset 4 in the buffer */ |
425 | struct reg_vpa { | |
426 | u32 dummy; | |
427 | union { | |
02407552 AG |
428 | __be16 hword; |
429 | __be32 word; | |
2e25aa5f PM |
430 | } length; |
431 | }; | |
432 | ||
433 | static int vpa_is_registered(struct kvmppc_vpa *vpap) | |
434 | { | |
435 | if (vpap->update_pending) | |
436 | return vpap->next_gpa != 0; | |
437 | return vpap->pinned_addr != NULL; | |
438 | } | |
439 | ||
a8606e20 PM |
440 | static unsigned long do_h_register_vpa(struct kvm_vcpu *vcpu, |
441 | unsigned long flags, | |
442 | unsigned long vcpuid, unsigned long vpa) | |
443 | { | |
444 | struct kvm *kvm = vcpu->kvm; | |
93e60249 | 445 | unsigned long len, nb; |
a8606e20 PM |
446 | void *va; |
447 | struct kvm_vcpu *tvcpu; | |
2e25aa5f PM |
448 | int err; |
449 | int subfunc; | |
450 | struct kvmppc_vpa *vpap; | |
a8606e20 PM |
451 | |
452 | tvcpu = kvmppc_find_vcpu(kvm, vcpuid); | |
453 | if (!tvcpu) | |
454 | return H_PARAMETER; | |
455 | ||
2e25aa5f PM |
456 | subfunc = (flags >> H_VPA_FUNC_SHIFT) & H_VPA_FUNC_MASK; |
457 | if (subfunc == H_VPA_REG_VPA || subfunc == H_VPA_REG_DTL || | |
458 | subfunc == H_VPA_REG_SLB) { | |
459 | /* Registering new area - address must be cache-line aligned */ | |
460 | if ((vpa & (L1_CACHE_BYTES - 1)) || !vpa) | |
a8606e20 | 461 | return H_PARAMETER; |
2e25aa5f PM |
462 | |
463 | /* convert logical addr to kernel addr and read length */ | |
93e60249 PM |
464 | va = kvmppc_pin_guest_page(kvm, vpa, &nb); |
465 | if (va == NULL) | |
b2b2f165 | 466 | return H_PARAMETER; |
2e25aa5f | 467 | if (subfunc == H_VPA_REG_VPA) |
02407552 | 468 | len = be16_to_cpu(((struct reg_vpa *)va)->length.hword); |
a8606e20 | 469 | else |
02407552 | 470 | len = be32_to_cpu(((struct reg_vpa *)va)->length.word); |
c35635ef | 471 | kvmppc_unpin_guest_page(kvm, va, vpa, false); |
2e25aa5f PM |
472 | |
473 | /* Check length */ | |
474 | if (len > nb || len < sizeof(struct reg_vpa)) | |
475 | return H_PARAMETER; | |
476 | } else { | |
477 | vpa = 0; | |
478 | len = 0; | |
479 | } | |
480 | ||
481 | err = H_PARAMETER; | |
482 | vpap = NULL; | |
483 | spin_lock(&tvcpu->arch.vpa_update_lock); | |
484 | ||
485 | switch (subfunc) { | |
486 | case H_VPA_REG_VPA: /* register VPA */ | |
487 | if (len < sizeof(struct lppaca)) | |
a8606e20 | 488 | break; |
2e25aa5f PM |
489 | vpap = &tvcpu->arch.vpa; |
490 | err = 0; | |
491 | break; | |
492 | ||
493 | case H_VPA_REG_DTL: /* register DTL */ | |
494 | if (len < sizeof(struct dtl_entry)) | |
a8606e20 | 495 | break; |
2e25aa5f PM |
496 | len -= len % sizeof(struct dtl_entry); |
497 | ||
498 | /* Check that they have previously registered a VPA */ | |
499 | err = H_RESOURCE; | |
500 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 501 | break; |
2e25aa5f PM |
502 | |
503 | vpap = &tvcpu->arch.dtl; | |
504 | err = 0; | |
505 | break; | |
506 | ||
507 | case H_VPA_REG_SLB: /* register SLB shadow buffer */ | |
508 | /* Check that they have previously registered a VPA */ | |
509 | err = H_RESOURCE; | |
510 | if (!vpa_is_registered(&tvcpu->arch.vpa)) | |
a8606e20 | 511 | break; |
2e25aa5f PM |
512 | |
513 | vpap = &tvcpu->arch.slb_shadow; | |
514 | err = 0; | |
515 | break; | |
516 | ||
517 | case H_VPA_DEREG_VPA: /* deregister VPA */ | |
518 | /* Check they don't still have a DTL or SLB buf registered */ | |
519 | err = H_RESOURCE; | |
520 | if (vpa_is_registered(&tvcpu->arch.dtl) || | |
521 | vpa_is_registered(&tvcpu->arch.slb_shadow)) | |
a8606e20 | 522 | break; |
2e25aa5f PM |
523 | |
524 | vpap = &tvcpu->arch.vpa; | |
525 | err = 0; | |
526 | break; | |
527 | ||
528 | case H_VPA_DEREG_DTL: /* deregister DTL */ | |
529 | vpap = &tvcpu->arch.dtl; | |
530 | err = 0; | |
531 | break; | |
532 | ||
533 | case H_VPA_DEREG_SLB: /* deregister SLB shadow buffer */ | |
534 | vpap = &tvcpu->arch.slb_shadow; | |
535 | err = 0; | |
536 | break; | |
537 | } | |
538 | ||
539 | if (vpap) { | |
540 | vpap->next_gpa = vpa; | |
541 | vpap->len = len; | |
542 | vpap->update_pending = 1; | |
a8606e20 | 543 | } |
93e60249 | 544 | |
2e25aa5f PM |
545 | spin_unlock(&tvcpu->arch.vpa_update_lock); |
546 | ||
93e60249 | 547 | return err; |
a8606e20 PM |
548 | } |
549 | ||
081f323b | 550 | static void kvmppc_update_vpa(struct kvm_vcpu *vcpu, struct kvmppc_vpa *vpap) |
2e25aa5f | 551 | { |
081f323b | 552 | struct kvm *kvm = vcpu->kvm; |
2e25aa5f PM |
553 | void *va; |
554 | unsigned long nb; | |
081f323b | 555 | unsigned long gpa; |
2e25aa5f | 556 | |
081f323b PM |
557 | /* |
558 | * We need to pin the page pointed to by vpap->next_gpa, | |
559 | * but we can't call kvmppc_pin_guest_page under the lock | |
560 | * as it does get_user_pages() and down_read(). So we | |
561 | * have to drop the lock, pin the page, then get the lock | |
562 | * again and check that a new area didn't get registered | |
563 | * in the meantime. | |
564 | */ | |
565 | for (;;) { | |
566 | gpa = vpap->next_gpa; | |
567 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
568 | va = NULL; | |
569 | nb = 0; | |
570 | if (gpa) | |
c35635ef | 571 | va = kvmppc_pin_guest_page(kvm, gpa, &nb); |
081f323b PM |
572 | spin_lock(&vcpu->arch.vpa_update_lock); |
573 | if (gpa == vpap->next_gpa) | |
574 | break; | |
575 | /* sigh... unpin that one and try again */ | |
576 | if (va) | |
c35635ef | 577 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
081f323b PM |
578 | } |
579 | ||
580 | vpap->update_pending = 0; | |
581 | if (va && nb < vpap->len) { | |
582 | /* | |
583 | * If it's now too short, it must be that userspace | |
584 | * has changed the mappings underlying guest memory, | |
585 | * so unregister the region. | |
586 | */ | |
c35635ef | 587 | kvmppc_unpin_guest_page(kvm, va, gpa, false); |
081f323b | 588 | va = NULL; |
2e25aa5f PM |
589 | } |
590 | if (vpap->pinned_addr) | |
c35635ef PM |
591 | kvmppc_unpin_guest_page(kvm, vpap->pinned_addr, vpap->gpa, |
592 | vpap->dirty); | |
593 | vpap->gpa = gpa; | |
2e25aa5f | 594 | vpap->pinned_addr = va; |
c35635ef | 595 | vpap->dirty = false; |
2e25aa5f PM |
596 | if (va) |
597 | vpap->pinned_end = va + vpap->len; | |
598 | } | |
599 | ||
600 | static void kvmppc_update_vpas(struct kvm_vcpu *vcpu) | |
601 | { | |
2f12f034 PM |
602 | if (!(vcpu->arch.vpa.update_pending || |
603 | vcpu->arch.slb_shadow.update_pending || | |
604 | vcpu->arch.dtl.update_pending)) | |
605 | return; | |
606 | ||
2e25aa5f PM |
607 | spin_lock(&vcpu->arch.vpa_update_lock); |
608 | if (vcpu->arch.vpa.update_pending) { | |
081f323b | 609 | kvmppc_update_vpa(vcpu, &vcpu->arch.vpa); |
55b665b0 PM |
610 | if (vcpu->arch.vpa.pinned_addr) |
611 | init_vpa(vcpu, vcpu->arch.vpa.pinned_addr); | |
2e25aa5f PM |
612 | } |
613 | if (vcpu->arch.dtl.update_pending) { | |
081f323b | 614 | kvmppc_update_vpa(vcpu, &vcpu->arch.dtl); |
2e25aa5f PM |
615 | vcpu->arch.dtl_ptr = vcpu->arch.dtl.pinned_addr; |
616 | vcpu->arch.dtl_index = 0; | |
617 | } | |
618 | if (vcpu->arch.slb_shadow.update_pending) | |
081f323b | 619 | kvmppc_update_vpa(vcpu, &vcpu->arch.slb_shadow); |
2e25aa5f PM |
620 | spin_unlock(&vcpu->arch.vpa_update_lock); |
621 | } | |
622 | ||
c7b67670 PM |
623 | /* |
624 | * Return the accumulated stolen time for the vcore up until `now'. | |
625 | * The caller should hold the vcore lock. | |
626 | */ | |
627 | static u64 vcore_stolen_time(struct kvmppc_vcore *vc, u64 now) | |
628 | { | |
629 | u64 p; | |
2711e248 | 630 | unsigned long flags; |
c7b67670 | 631 | |
2711e248 PM |
632 | spin_lock_irqsave(&vc->stoltb_lock, flags); |
633 | p = vc->stolen_tb; | |
c7b67670 | 634 | if (vc->vcore_state != VCORE_INACTIVE && |
2711e248 PM |
635 | vc->preempt_tb != TB_NIL) |
636 | p += now - vc->preempt_tb; | |
637 | spin_unlock_irqrestore(&vc->stoltb_lock, flags); | |
c7b67670 PM |
638 | return p; |
639 | } | |
640 | ||
0456ec4f PM |
641 | static void kvmppc_create_dtl_entry(struct kvm_vcpu *vcpu, |
642 | struct kvmppc_vcore *vc) | |
643 | { | |
644 | struct dtl_entry *dt; | |
645 | struct lppaca *vpa; | |
c7b67670 PM |
646 | unsigned long stolen; |
647 | unsigned long core_stolen; | |
648 | u64 now; | |
0456ec4f PM |
649 | |
650 | dt = vcpu->arch.dtl_ptr; | |
651 | vpa = vcpu->arch.vpa.pinned_addr; | |
c7b67670 PM |
652 | now = mftb(); |
653 | core_stolen = vcore_stolen_time(vc, now); | |
654 | stolen = core_stolen - vcpu->arch.stolen_logged; | |
655 | vcpu->arch.stolen_logged = core_stolen; | |
bf3d32e1 | 656 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
c7b67670 PM |
657 | stolen += vcpu->arch.busy_stolen; |
658 | vcpu->arch.busy_stolen = 0; | |
bf3d32e1 | 659 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
0456ec4f PM |
660 | if (!dt || !vpa) |
661 | return; | |
662 | memset(dt, 0, sizeof(struct dtl_entry)); | |
663 | dt->dispatch_reason = 7; | |
02407552 AG |
664 | dt->processor_id = cpu_to_be16(vc->pcpu + vcpu->arch.ptid); |
665 | dt->timebase = cpu_to_be64(now + vc->tb_offset); | |
666 | dt->enqueue_to_dispatch_time = cpu_to_be32(stolen); | |
667 | dt->srr0 = cpu_to_be64(kvmppc_get_pc(vcpu)); | |
668 | dt->srr1 = cpu_to_be64(vcpu->arch.shregs.msr); | |
0456ec4f PM |
669 | ++dt; |
670 | if (dt == vcpu->arch.dtl.pinned_end) | |
671 | dt = vcpu->arch.dtl.pinned_addr; | |
672 | vcpu->arch.dtl_ptr = dt; | |
673 | /* order writing *dt vs. writing vpa->dtl_idx */ | |
674 | smp_wmb(); | |
02407552 | 675 | vpa->dtl_idx = cpu_to_be64(++vcpu->arch.dtl_index); |
c35635ef | 676 | vcpu->arch.dtl.dirty = true; |
0456ec4f PM |
677 | } |
678 | ||
9642382e MN |
679 | static bool kvmppc_power8_compatible(struct kvm_vcpu *vcpu) |
680 | { | |
681 | if (vcpu->arch.vcore->arch_compat >= PVR_ARCH_207) | |
682 | return true; | |
683 | if ((!vcpu->arch.vcore->arch_compat) && | |
684 | cpu_has_feature(CPU_FTR_ARCH_207S)) | |
685 | return true; | |
686 | return false; | |
687 | } | |
688 | ||
689 | static int kvmppc_h_set_mode(struct kvm_vcpu *vcpu, unsigned long mflags, | |
690 | unsigned long resource, unsigned long value1, | |
691 | unsigned long value2) | |
692 | { | |
693 | switch (resource) { | |
694 | case H_SET_MODE_RESOURCE_SET_CIABR: | |
695 | if (!kvmppc_power8_compatible(vcpu)) | |
696 | return H_P2; | |
697 | if (value2) | |
698 | return H_P4; | |
699 | if (mflags) | |
700 | return H_UNSUPPORTED_FLAG_START; | |
701 | /* Guests can't breakpoint the hypervisor */ | |
702 | if ((value1 & CIABR_PRIV) == CIABR_PRIV_HYPER) | |
703 | return H_P3; | |
704 | vcpu->arch.ciabr = value1; | |
705 | return H_SUCCESS; | |
706 | case H_SET_MODE_RESOURCE_SET_DAWR: | |
707 | if (!kvmppc_power8_compatible(vcpu)) | |
708 | return H_P2; | |
709 | if (mflags) | |
710 | return H_UNSUPPORTED_FLAG_START; | |
711 | if (value2 & DABRX_HYP) | |
712 | return H_P4; | |
713 | vcpu->arch.dawr = value1; | |
714 | vcpu->arch.dawrx = value2; | |
715 | return H_SUCCESS; | |
716 | default: | |
717 | return H_TOO_HARD; | |
718 | } | |
719 | } | |
720 | ||
90fd09f8 SB |
721 | static int kvm_arch_vcpu_yield_to(struct kvm_vcpu *target) |
722 | { | |
723 | struct kvmppc_vcore *vcore = target->arch.vcore; | |
724 | ||
725 | /* | |
726 | * We expect to have been called by the real mode handler | |
727 | * (kvmppc_rm_h_confer()) which would have directly returned | |
728 | * H_SUCCESS if the source vcore wasn't idle (e.g. if it may | |
729 | * have useful work to do and should not confer) so we don't | |
730 | * recheck that here. | |
731 | */ | |
732 | ||
733 | spin_lock(&vcore->lock); | |
734 | if (target->arch.state == KVMPPC_VCPU_RUNNABLE && | |
ec257165 PM |
735 | vcore->vcore_state != VCORE_INACTIVE && |
736 | vcore->runner) | |
90fd09f8 SB |
737 | target = vcore->runner; |
738 | spin_unlock(&vcore->lock); | |
739 | ||
740 | return kvm_vcpu_yield_to(target); | |
741 | } | |
742 | ||
743 | static int kvmppc_get_yield_count(struct kvm_vcpu *vcpu) | |
744 | { | |
745 | int yield_count = 0; | |
746 | struct lppaca *lppaca; | |
747 | ||
748 | spin_lock(&vcpu->arch.vpa_update_lock); | |
749 | lppaca = (struct lppaca *)vcpu->arch.vpa.pinned_addr; | |
750 | if (lppaca) | |
ecb6d618 | 751 | yield_count = be32_to_cpu(lppaca->yield_count); |
90fd09f8 SB |
752 | spin_unlock(&vcpu->arch.vpa_update_lock); |
753 | return yield_count; | |
754 | } | |
755 | ||
a8606e20 PM |
756 | int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu) |
757 | { | |
758 | unsigned long req = kvmppc_get_gpr(vcpu, 3); | |
759 | unsigned long target, ret = H_SUCCESS; | |
90fd09f8 | 760 | int yield_count; |
a8606e20 | 761 | struct kvm_vcpu *tvcpu; |
8e591cb7 | 762 | int idx, rc; |
a8606e20 | 763 | |
699a0ea0 PM |
764 | if (req <= MAX_HCALL_OPCODE && |
765 | !test_bit(req/4, vcpu->kvm->arch.enabled_hcalls)) | |
766 | return RESUME_HOST; | |
767 | ||
a8606e20 PM |
768 | switch (req) { |
769 | case H_CEDE: | |
a8606e20 PM |
770 | break; |
771 | case H_PROD: | |
772 | target = kvmppc_get_gpr(vcpu, 4); | |
773 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); | |
774 | if (!tvcpu) { | |
775 | ret = H_PARAMETER; | |
776 | break; | |
777 | } | |
778 | tvcpu->arch.prodded = 1; | |
779 | smp_mb(); | |
780 | if (vcpu->arch.ceded) { | |
8577370f MT |
781 | if (swait_active(&vcpu->wq)) { |
782 | swake_up(&vcpu->wq); | |
a8606e20 PM |
783 | vcpu->stat.halt_wakeup++; |
784 | } | |
785 | } | |
786 | break; | |
787 | case H_CONFER: | |
42d7604d PM |
788 | target = kvmppc_get_gpr(vcpu, 4); |
789 | if (target == -1) | |
790 | break; | |
791 | tvcpu = kvmppc_find_vcpu(vcpu->kvm, target); | |
792 | if (!tvcpu) { | |
793 | ret = H_PARAMETER; | |
794 | break; | |
795 | } | |
90fd09f8 SB |
796 | yield_count = kvmppc_get_gpr(vcpu, 5); |
797 | if (kvmppc_get_yield_count(tvcpu) != yield_count) | |
798 | break; | |
799 | kvm_arch_vcpu_yield_to(tvcpu); | |
a8606e20 PM |
800 | break; |
801 | case H_REGISTER_VPA: | |
802 | ret = do_h_register_vpa(vcpu, kvmppc_get_gpr(vcpu, 4), | |
803 | kvmppc_get_gpr(vcpu, 5), | |
804 | kvmppc_get_gpr(vcpu, 6)); | |
805 | break; | |
8e591cb7 ME |
806 | case H_RTAS: |
807 | if (list_empty(&vcpu->kvm->arch.rtas_tokens)) | |
808 | return RESUME_HOST; | |
809 | ||
c9438092 | 810 | idx = srcu_read_lock(&vcpu->kvm->srcu); |
8e591cb7 | 811 | rc = kvmppc_rtas_hcall(vcpu); |
c9438092 | 812 | srcu_read_unlock(&vcpu->kvm->srcu, idx); |
8e591cb7 ME |
813 | |
814 | if (rc == -ENOENT) | |
815 | return RESUME_HOST; | |
816 | else if (rc == 0) | |
817 | break; | |
818 | ||
819 | /* Send the error out to userspace via KVM_RUN */ | |
820 | return rc; | |
99342cf8 DG |
821 | case H_LOGICAL_CI_LOAD: |
822 | ret = kvmppc_h_logical_ci_load(vcpu); | |
823 | if (ret == H_TOO_HARD) | |
824 | return RESUME_HOST; | |
825 | break; | |
826 | case H_LOGICAL_CI_STORE: | |
827 | ret = kvmppc_h_logical_ci_store(vcpu); | |
828 | if (ret == H_TOO_HARD) | |
829 | return RESUME_HOST; | |
830 | break; | |
9642382e MN |
831 | case H_SET_MODE: |
832 | ret = kvmppc_h_set_mode(vcpu, kvmppc_get_gpr(vcpu, 4), | |
833 | kvmppc_get_gpr(vcpu, 5), | |
834 | kvmppc_get_gpr(vcpu, 6), | |
835 | kvmppc_get_gpr(vcpu, 7)); | |
836 | if (ret == H_TOO_HARD) | |
837 | return RESUME_HOST; | |
838 | break; | |
bc5ad3f3 BH |
839 | case H_XIRR: |
840 | case H_CPPR: | |
841 | case H_EOI: | |
842 | case H_IPI: | |
8e44ddc3 PM |
843 | case H_IPOLL: |
844 | case H_XIRR_X: | |
bc5ad3f3 BH |
845 | if (kvmppc_xics_enabled(vcpu)) { |
846 | ret = kvmppc_xics_hcall(vcpu, req); | |
847 | break; | |
d3695aa4 AK |
848 | } |
849 | return RESUME_HOST; | |
850 | case H_PUT_TCE: | |
851 | ret = kvmppc_h_put_tce(vcpu, kvmppc_get_gpr(vcpu, 4), | |
852 | kvmppc_get_gpr(vcpu, 5), | |
853 | kvmppc_get_gpr(vcpu, 6)); | |
854 | if (ret == H_TOO_HARD) | |
855 | return RESUME_HOST; | |
856 | break; | |
857 | case H_PUT_TCE_INDIRECT: | |
858 | ret = kvmppc_h_put_tce_indirect(vcpu, kvmppc_get_gpr(vcpu, 4), | |
859 | kvmppc_get_gpr(vcpu, 5), | |
860 | kvmppc_get_gpr(vcpu, 6), | |
861 | kvmppc_get_gpr(vcpu, 7)); | |
862 | if (ret == H_TOO_HARD) | |
863 | return RESUME_HOST; | |
864 | break; | |
865 | case H_STUFF_TCE: | |
866 | ret = kvmppc_h_stuff_tce(vcpu, kvmppc_get_gpr(vcpu, 4), | |
867 | kvmppc_get_gpr(vcpu, 5), | |
868 | kvmppc_get_gpr(vcpu, 6), | |
869 | kvmppc_get_gpr(vcpu, 7)); | |
870 | if (ret == H_TOO_HARD) | |
871 | return RESUME_HOST; | |
872 | break; | |
a8606e20 PM |
873 | default: |
874 | return RESUME_HOST; | |
875 | } | |
876 | kvmppc_set_gpr(vcpu, 3, ret); | |
877 | vcpu->arch.hcall_needed = 0; | |
878 | return RESUME_GUEST; | |
879 | } | |
880 | ||
ae2113a4 PM |
881 | static int kvmppc_hcall_impl_hv(unsigned long cmd) |
882 | { | |
883 | switch (cmd) { | |
884 | case H_CEDE: | |
885 | case H_PROD: | |
886 | case H_CONFER: | |
887 | case H_REGISTER_VPA: | |
9642382e | 888 | case H_SET_MODE: |
99342cf8 DG |
889 | case H_LOGICAL_CI_LOAD: |
890 | case H_LOGICAL_CI_STORE: | |
ae2113a4 PM |
891 | #ifdef CONFIG_KVM_XICS |
892 | case H_XIRR: | |
893 | case H_CPPR: | |
894 | case H_EOI: | |
895 | case H_IPI: | |
896 | case H_IPOLL: | |
897 | case H_XIRR_X: | |
898 | #endif | |
899 | return 1; | |
900 | } | |
901 | ||
902 | /* See if it's in the real-mode table */ | |
903 | return kvmppc_hcall_impl_hv_realmode(cmd); | |
904 | } | |
905 | ||
a59c1d9e MS |
906 | static int kvmppc_emulate_debug_inst(struct kvm_run *run, |
907 | struct kvm_vcpu *vcpu) | |
908 | { | |
909 | u32 last_inst; | |
910 | ||
911 | if (kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst) != | |
912 | EMULATE_DONE) { | |
913 | /* | |
914 | * Fetch failed, so return to guest and | |
915 | * try executing it again. | |
916 | */ | |
917 | return RESUME_GUEST; | |
918 | } | |
919 | ||
920 | if (last_inst == KVMPPC_INST_SW_BREAKPOINT) { | |
921 | run->exit_reason = KVM_EXIT_DEBUG; | |
922 | run->debug.arch.address = kvmppc_get_pc(vcpu); | |
923 | return RESUME_HOST; | |
924 | } else { | |
925 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
926 | return RESUME_GUEST; | |
927 | } | |
928 | } | |
929 | ||
3a167bea AK |
930 | static int kvmppc_handle_exit_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, |
931 | struct task_struct *tsk) | |
de56a948 PM |
932 | { |
933 | int r = RESUME_HOST; | |
934 | ||
935 | vcpu->stat.sum_exits++; | |
936 | ||
1c9e3d51 PM |
937 | /* |
938 | * This can happen if an interrupt occurs in the last stages | |
939 | * of guest entry or the first stages of guest exit (i.e. after | |
940 | * setting paca->kvm_hstate.in_guest to KVM_GUEST_MODE_GUEST_HV | |
941 | * and before setting it to KVM_GUEST_MODE_HOST_HV). | |
942 | * That can happen due to a bug, or due to a machine check | |
943 | * occurring at just the wrong time. | |
944 | */ | |
945 | if (vcpu->arch.shregs.msr & MSR_HV) { | |
946 | printk(KERN_EMERG "KVM trap in HV mode!\n"); | |
947 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | |
948 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | |
949 | vcpu->arch.shregs.msr); | |
950 | kvmppc_dump_regs(vcpu); | |
951 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
952 | run->hw.hardware_exit_reason = vcpu->arch.trap; | |
953 | return RESUME_HOST; | |
954 | } | |
de56a948 PM |
955 | run->exit_reason = KVM_EXIT_UNKNOWN; |
956 | run->ready_for_interrupt_injection = 1; | |
957 | switch (vcpu->arch.trap) { | |
958 | /* We're good on these - the host merely wanted to get our attention */ | |
959 | case BOOK3S_INTERRUPT_HV_DECREMENTER: | |
960 | vcpu->stat.dec_exits++; | |
961 | r = RESUME_GUEST; | |
962 | break; | |
963 | case BOOK3S_INTERRUPT_EXTERNAL: | |
5d00f66b | 964 | case BOOK3S_INTERRUPT_H_DOORBELL: |
84f7139c | 965 | case BOOK3S_INTERRUPT_H_VIRT: |
de56a948 PM |
966 | vcpu->stat.ext_intr_exits++; |
967 | r = RESUME_GUEST; | |
968 | break; | |
dee6f24c MS |
969 | /* HMI is hypervisor interrupt and host has handled it. Resume guest.*/ |
970 | case BOOK3S_INTERRUPT_HMI: | |
de56a948 PM |
971 | case BOOK3S_INTERRUPT_PERFMON: |
972 | r = RESUME_GUEST; | |
973 | break; | |
b4072df4 PM |
974 | case BOOK3S_INTERRUPT_MACHINE_CHECK: |
975 | /* | |
976 | * Deliver a machine check interrupt to the guest. | |
977 | * We have to do this, even if the host has handled the | |
978 | * machine check, because machine checks use SRR0/1 and | |
979 | * the interrupt might have trashed guest state in them. | |
980 | */ | |
981 | kvmppc_book3s_queue_irqprio(vcpu, | |
982 | BOOK3S_INTERRUPT_MACHINE_CHECK); | |
983 | r = RESUME_GUEST; | |
984 | break; | |
de56a948 PM |
985 | case BOOK3S_INTERRUPT_PROGRAM: |
986 | { | |
987 | ulong flags; | |
988 | /* | |
989 | * Normally program interrupts are delivered directly | |
990 | * to the guest by the hardware, but we can get here | |
991 | * as a result of a hypervisor emulation interrupt | |
992 | * (e40) getting turned into a 700 by BML RTAS. | |
993 | */ | |
994 | flags = vcpu->arch.shregs.msr & 0x1f0000ull; | |
995 | kvmppc_core_queue_program(vcpu, flags); | |
996 | r = RESUME_GUEST; | |
997 | break; | |
998 | } | |
999 | case BOOK3S_INTERRUPT_SYSCALL: | |
1000 | { | |
1001 | /* hcall - punt to userspace */ | |
1002 | int i; | |
1003 | ||
27025a60 LPF |
1004 | /* hypercall with MSR_PR has already been handled in rmode, |
1005 | * and never reaches here. | |
1006 | */ | |
1007 | ||
de56a948 PM |
1008 | run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3); |
1009 | for (i = 0; i < 9; ++i) | |
1010 | run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i); | |
1011 | run->exit_reason = KVM_EXIT_PAPR_HCALL; | |
1012 | vcpu->arch.hcall_needed = 1; | |
1013 | r = RESUME_HOST; | |
1014 | break; | |
1015 | } | |
1016 | /* | |
342d3db7 PM |
1017 | * We get these next two if the guest accesses a page which it thinks |
1018 | * it has mapped but which is not actually present, either because | |
1019 | * it is for an emulated I/O device or because the corresonding | |
1020 | * host page has been paged out. Any other HDSI/HISI interrupts | |
1021 | * have been handled already. | |
de56a948 PM |
1022 | */ |
1023 | case BOOK3S_INTERRUPT_H_DATA_STORAGE: | |
913d3ff9 | 1024 | r = RESUME_PAGE_FAULT; |
de56a948 PM |
1025 | break; |
1026 | case BOOK3S_INTERRUPT_H_INST_STORAGE: | |
913d3ff9 PM |
1027 | vcpu->arch.fault_dar = kvmppc_get_pc(vcpu); |
1028 | vcpu->arch.fault_dsisr = 0; | |
1029 | r = RESUME_PAGE_FAULT; | |
de56a948 PM |
1030 | break; |
1031 | /* | |
1032 | * This occurs if the guest executes an illegal instruction. | |
a59c1d9e MS |
1033 | * If the guest debug is disabled, generate a program interrupt |
1034 | * to the guest. If guest debug is enabled, we need to check | |
1035 | * whether the instruction is a software breakpoint instruction. | |
1036 | * Accordingly return to Guest or Host. | |
de56a948 PM |
1037 | */ |
1038 | case BOOK3S_INTERRUPT_H_EMUL_ASSIST: | |
4a157d61 PM |
1039 | if (vcpu->arch.emul_inst != KVM_INST_FETCH_FAILED) |
1040 | vcpu->arch.last_inst = kvmppc_need_byteswap(vcpu) ? | |
1041 | swab32(vcpu->arch.emul_inst) : | |
1042 | vcpu->arch.emul_inst; | |
a59c1d9e MS |
1043 | if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) { |
1044 | r = kvmppc_emulate_debug_inst(run, vcpu); | |
1045 | } else { | |
1046 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
1047 | r = RESUME_GUEST; | |
1048 | } | |
bd3048b8 ME |
1049 | break; |
1050 | /* | |
1051 | * This occurs if the guest (kernel or userspace), does something that | |
1052 | * is prohibited by HFSCR. We just generate a program interrupt to | |
1053 | * the guest. | |
1054 | */ | |
1055 | case BOOK3S_INTERRUPT_H_FAC_UNAVAIL: | |
1056 | kvmppc_core_queue_program(vcpu, SRR1_PROGILL); | |
de56a948 PM |
1057 | r = RESUME_GUEST; |
1058 | break; | |
f7af5209 SW |
1059 | case BOOK3S_INTERRUPT_HV_RM_HARD: |
1060 | r = RESUME_PASSTHROUGH; | |
1061 | break; | |
de56a948 PM |
1062 | default: |
1063 | kvmppc_dump_regs(vcpu); | |
1064 | printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n", | |
1065 | vcpu->arch.trap, kvmppc_get_pc(vcpu), | |
1066 | vcpu->arch.shregs.msr); | |
f3271d4c | 1067 | run->hw.hardware_exit_reason = vcpu->arch.trap; |
de56a948 | 1068 | r = RESUME_HOST; |
de56a948 PM |
1069 | break; |
1070 | } | |
1071 | ||
de56a948 PM |
1072 | return r; |
1073 | } | |
1074 | ||
3a167bea AK |
1075 | static int kvm_arch_vcpu_ioctl_get_sregs_hv(struct kvm_vcpu *vcpu, |
1076 | struct kvm_sregs *sregs) | |
de56a948 PM |
1077 | { |
1078 | int i; | |
1079 | ||
de56a948 | 1080 | memset(sregs, 0, sizeof(struct kvm_sregs)); |
87916442 | 1081 | sregs->pvr = vcpu->arch.pvr; |
de56a948 PM |
1082 | for (i = 0; i < vcpu->arch.slb_max; i++) { |
1083 | sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige; | |
1084 | sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv; | |
1085 | } | |
1086 | ||
1087 | return 0; | |
1088 | } | |
1089 | ||
3a167bea AK |
1090 | static int kvm_arch_vcpu_ioctl_set_sregs_hv(struct kvm_vcpu *vcpu, |
1091 | struct kvm_sregs *sregs) | |
de56a948 PM |
1092 | { |
1093 | int i, j; | |
1094 | ||
9333e6c4 PM |
1095 | /* Only accept the same PVR as the host's, since we can't spoof it */ |
1096 | if (sregs->pvr != vcpu->arch.pvr) | |
1097 | return -EINVAL; | |
de56a948 PM |
1098 | |
1099 | j = 0; | |
1100 | for (i = 0; i < vcpu->arch.slb_nr; i++) { | |
1101 | if (sregs->u.s.ppc64.slb[i].slbe & SLB_ESID_V) { | |
1102 | vcpu->arch.slb[j].orige = sregs->u.s.ppc64.slb[i].slbe; | |
1103 | vcpu->arch.slb[j].origv = sregs->u.s.ppc64.slb[i].slbv; | |
1104 | ++j; | |
1105 | } | |
1106 | } | |
1107 | vcpu->arch.slb_max = j; | |
1108 | ||
1109 | return 0; | |
1110 | } | |
1111 | ||
a0840240 AK |
1112 | static void kvmppc_set_lpcr(struct kvm_vcpu *vcpu, u64 new_lpcr, |
1113 | bool preserve_top32) | |
a0144e2a | 1114 | { |
8f902b00 | 1115 | struct kvm *kvm = vcpu->kvm; |
a0144e2a PM |
1116 | struct kvmppc_vcore *vc = vcpu->arch.vcore; |
1117 | u64 mask; | |
1118 | ||
8f902b00 | 1119 | mutex_lock(&kvm->lock); |
a0144e2a | 1120 | spin_lock(&vc->lock); |
d682916a AB |
1121 | /* |
1122 | * If ILE (interrupt little-endian) has changed, update the | |
1123 | * MSR_LE bit in the intr_msr for each vcpu in this vcore. | |
1124 | */ | |
1125 | if ((new_lpcr & LPCR_ILE) != (vc->lpcr & LPCR_ILE)) { | |
d682916a AB |
1126 | struct kvm_vcpu *vcpu; |
1127 | int i; | |
1128 | ||
d682916a AB |
1129 | kvm_for_each_vcpu(i, vcpu, kvm) { |
1130 | if (vcpu->arch.vcore != vc) | |
1131 | continue; | |
1132 | if (new_lpcr & LPCR_ILE) | |
1133 | vcpu->arch.intr_msr |= MSR_LE; | |
1134 | else | |
1135 | vcpu->arch.intr_msr &= ~MSR_LE; | |
1136 | } | |
d682916a AB |
1137 | } |
1138 | ||
a0144e2a PM |
1139 | /* |
1140 | * Userspace can only modify DPFD (default prefetch depth), | |
1141 | * ILE (interrupt little-endian) and TC (translation control). | |
e0622bd9 | 1142 | * On POWER8 userspace can also modify AIL (alt. interrupt loc.) |
a0144e2a PM |
1143 | */ |
1144 | mask = LPCR_DPFD | LPCR_ILE | LPCR_TC; | |
e0622bd9 PM |
1145 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) |
1146 | mask |= LPCR_AIL; | |
a0840240 AK |
1147 | |
1148 | /* Broken 32-bit version of LPCR must not clear top bits */ | |
1149 | if (preserve_top32) | |
1150 | mask &= 0xFFFFFFFF; | |
a0144e2a PM |
1151 | vc->lpcr = (vc->lpcr & ~mask) | (new_lpcr & mask); |
1152 | spin_unlock(&vc->lock); | |
8f902b00 | 1153 | mutex_unlock(&kvm->lock); |
a0144e2a PM |
1154 | } |
1155 | ||
3a167bea AK |
1156 | static int kvmppc_get_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
1157 | union kvmppc_one_reg *val) | |
31f3438e | 1158 | { |
a136a8bd PM |
1159 | int r = 0; |
1160 | long int i; | |
31f3438e | 1161 | |
a136a8bd | 1162 | switch (id) { |
a59c1d9e MS |
1163 | case KVM_REG_PPC_DEBUG_INST: |
1164 | *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT); | |
1165 | break; | |
31f3438e | 1166 | case KVM_REG_PPC_HIOR: |
a136a8bd PM |
1167 | *val = get_reg_val(id, 0); |
1168 | break; | |
1169 | case KVM_REG_PPC_DABR: | |
1170 | *val = get_reg_val(id, vcpu->arch.dabr); | |
1171 | break; | |
8563bf52 PM |
1172 | case KVM_REG_PPC_DABRX: |
1173 | *val = get_reg_val(id, vcpu->arch.dabrx); | |
1174 | break; | |
a136a8bd PM |
1175 | case KVM_REG_PPC_DSCR: |
1176 | *val = get_reg_val(id, vcpu->arch.dscr); | |
1177 | break; | |
1178 | case KVM_REG_PPC_PURR: | |
1179 | *val = get_reg_val(id, vcpu->arch.purr); | |
1180 | break; | |
1181 | case KVM_REG_PPC_SPURR: | |
1182 | *val = get_reg_val(id, vcpu->arch.spurr); | |
1183 | break; | |
1184 | case KVM_REG_PPC_AMR: | |
1185 | *val = get_reg_val(id, vcpu->arch.amr); | |
1186 | break; | |
1187 | case KVM_REG_PPC_UAMOR: | |
1188 | *val = get_reg_val(id, vcpu->arch.uamor); | |
1189 | break; | |
b005255e | 1190 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: |
a136a8bd PM |
1191 | i = id - KVM_REG_PPC_MMCR0; |
1192 | *val = get_reg_val(id, vcpu->arch.mmcr[i]); | |
1193 | break; | |
1194 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: | |
1195 | i = id - KVM_REG_PPC_PMC1; | |
1196 | *val = get_reg_val(id, vcpu->arch.pmc[i]); | |
31f3438e | 1197 | break; |
b005255e MN |
1198 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: |
1199 | i = id - KVM_REG_PPC_SPMC1; | |
1200 | *val = get_reg_val(id, vcpu->arch.spmc[i]); | |
1201 | break; | |
14941789 PM |
1202 | case KVM_REG_PPC_SIAR: |
1203 | *val = get_reg_val(id, vcpu->arch.siar); | |
1204 | break; | |
1205 | case KVM_REG_PPC_SDAR: | |
1206 | *val = get_reg_val(id, vcpu->arch.sdar); | |
1207 | break; | |
b005255e MN |
1208 | case KVM_REG_PPC_SIER: |
1209 | *val = get_reg_val(id, vcpu->arch.sier); | |
a8bd19ef | 1210 | break; |
b005255e MN |
1211 | case KVM_REG_PPC_IAMR: |
1212 | *val = get_reg_val(id, vcpu->arch.iamr); | |
1213 | break; | |
b005255e MN |
1214 | case KVM_REG_PPC_PSPB: |
1215 | *val = get_reg_val(id, vcpu->arch.pspb); | |
1216 | break; | |
b005255e MN |
1217 | case KVM_REG_PPC_DPDES: |
1218 | *val = get_reg_val(id, vcpu->arch.vcore->dpdes); | |
1219 | break; | |
88b02cf9 PM |
1220 | case KVM_REG_PPC_VTB: |
1221 | *val = get_reg_val(id, vcpu->arch.vcore->vtb); | |
1222 | break; | |
b005255e MN |
1223 | case KVM_REG_PPC_DAWR: |
1224 | *val = get_reg_val(id, vcpu->arch.dawr); | |
1225 | break; | |
1226 | case KVM_REG_PPC_DAWRX: | |
1227 | *val = get_reg_val(id, vcpu->arch.dawrx); | |
1228 | break; | |
1229 | case KVM_REG_PPC_CIABR: | |
1230 | *val = get_reg_val(id, vcpu->arch.ciabr); | |
1231 | break; | |
b005255e MN |
1232 | case KVM_REG_PPC_CSIGR: |
1233 | *val = get_reg_val(id, vcpu->arch.csigr); | |
1234 | break; | |
1235 | case KVM_REG_PPC_TACR: | |
1236 | *val = get_reg_val(id, vcpu->arch.tacr); | |
1237 | break; | |
1238 | case KVM_REG_PPC_TCSCR: | |
1239 | *val = get_reg_val(id, vcpu->arch.tcscr); | |
1240 | break; | |
1241 | case KVM_REG_PPC_PID: | |
1242 | *val = get_reg_val(id, vcpu->arch.pid); | |
1243 | break; | |
1244 | case KVM_REG_PPC_ACOP: | |
1245 | *val = get_reg_val(id, vcpu->arch.acop); | |
1246 | break; | |
1247 | case KVM_REG_PPC_WORT: | |
1248 | *val = get_reg_val(id, vcpu->arch.wort); | |
a8bd19ef | 1249 | break; |
e9cf1e08 PM |
1250 | case KVM_REG_PPC_TIDR: |
1251 | *val = get_reg_val(id, vcpu->arch.tid); | |
1252 | break; | |
1253 | case KVM_REG_PPC_PSSCR: | |
1254 | *val = get_reg_val(id, vcpu->arch.psscr); | |
1255 | break; | |
55b665b0 PM |
1256 | case KVM_REG_PPC_VPA_ADDR: |
1257 | spin_lock(&vcpu->arch.vpa_update_lock); | |
1258 | *val = get_reg_val(id, vcpu->arch.vpa.next_gpa); | |
1259 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
1260 | break; | |
1261 | case KVM_REG_PPC_VPA_SLB: | |
1262 | spin_lock(&vcpu->arch.vpa_update_lock); | |
1263 | val->vpaval.addr = vcpu->arch.slb_shadow.next_gpa; | |
1264 | val->vpaval.length = vcpu->arch.slb_shadow.len; | |
1265 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
1266 | break; | |
1267 | case KVM_REG_PPC_VPA_DTL: | |
1268 | spin_lock(&vcpu->arch.vpa_update_lock); | |
1269 | val->vpaval.addr = vcpu->arch.dtl.next_gpa; | |
1270 | val->vpaval.length = vcpu->arch.dtl.len; | |
1271 | spin_unlock(&vcpu->arch.vpa_update_lock); | |
1272 | break; | |
93b0f4dc PM |
1273 | case KVM_REG_PPC_TB_OFFSET: |
1274 | *val = get_reg_val(id, vcpu->arch.vcore->tb_offset); | |
1275 | break; | |
a0144e2a | 1276 | case KVM_REG_PPC_LPCR: |
a0840240 | 1277 | case KVM_REG_PPC_LPCR_64: |
a0144e2a PM |
1278 | *val = get_reg_val(id, vcpu->arch.vcore->lpcr); |
1279 | break; | |
4b8473c9 PM |
1280 | case KVM_REG_PPC_PPR: |
1281 | *val = get_reg_val(id, vcpu->arch.ppr); | |
1282 | break; | |
a7d80d01 MN |
1283 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1284 | case KVM_REG_PPC_TFHAR: | |
1285 | *val = get_reg_val(id, vcpu->arch.tfhar); | |
1286 | break; | |
1287 | case KVM_REG_PPC_TFIAR: | |
1288 | *val = get_reg_val(id, vcpu->arch.tfiar); | |
1289 | break; | |
1290 | case KVM_REG_PPC_TEXASR: | |
1291 | *val = get_reg_val(id, vcpu->arch.texasr); | |
1292 | break; | |
1293 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
1294 | i = id - KVM_REG_PPC_TM_GPR0; | |
1295 | *val = get_reg_val(id, vcpu->arch.gpr_tm[i]); | |
1296 | break; | |
1297 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
1298 | { | |
1299 | int j; | |
1300 | i = id - KVM_REG_PPC_TM_VSR0; | |
1301 | if (i < 32) | |
1302 | for (j = 0; j < TS_FPRWIDTH; j++) | |
1303 | val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j]; | |
1304 | else { | |
1305 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1306 | val->vval = vcpu->arch.vr_tm.vr[i-32]; | |
1307 | else | |
1308 | r = -ENXIO; | |
1309 | } | |
1310 | break; | |
1311 | } | |
1312 | case KVM_REG_PPC_TM_CR: | |
1313 | *val = get_reg_val(id, vcpu->arch.cr_tm); | |
1314 | break; | |
0d808df0 PM |
1315 | case KVM_REG_PPC_TM_XER: |
1316 | *val = get_reg_val(id, vcpu->arch.xer_tm); | |
1317 | break; | |
a7d80d01 MN |
1318 | case KVM_REG_PPC_TM_LR: |
1319 | *val = get_reg_val(id, vcpu->arch.lr_tm); | |
1320 | break; | |
1321 | case KVM_REG_PPC_TM_CTR: | |
1322 | *val = get_reg_val(id, vcpu->arch.ctr_tm); | |
1323 | break; | |
1324 | case KVM_REG_PPC_TM_FPSCR: | |
1325 | *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr); | |
1326 | break; | |
1327 | case KVM_REG_PPC_TM_AMR: | |
1328 | *val = get_reg_val(id, vcpu->arch.amr_tm); | |
1329 | break; | |
1330 | case KVM_REG_PPC_TM_PPR: | |
1331 | *val = get_reg_val(id, vcpu->arch.ppr_tm); | |
1332 | break; | |
1333 | case KVM_REG_PPC_TM_VRSAVE: | |
1334 | *val = get_reg_val(id, vcpu->arch.vrsave_tm); | |
1335 | break; | |
1336 | case KVM_REG_PPC_TM_VSCR: | |
1337 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1338 | *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]); | |
1339 | else | |
1340 | r = -ENXIO; | |
1341 | break; | |
1342 | case KVM_REG_PPC_TM_DSCR: | |
1343 | *val = get_reg_val(id, vcpu->arch.dscr_tm); | |
1344 | break; | |
1345 | case KVM_REG_PPC_TM_TAR: | |
1346 | *val = get_reg_val(id, vcpu->arch.tar_tm); | |
1347 | break; | |
1348 | #endif | |
388cc6e1 PM |
1349 | case KVM_REG_PPC_ARCH_COMPAT: |
1350 | *val = get_reg_val(id, vcpu->arch.vcore->arch_compat); | |
1351 | break; | |
31f3438e | 1352 | default: |
a136a8bd | 1353 | r = -EINVAL; |
31f3438e PM |
1354 | break; |
1355 | } | |
1356 | ||
1357 | return r; | |
1358 | } | |
1359 | ||
3a167bea AK |
1360 | static int kvmppc_set_one_reg_hv(struct kvm_vcpu *vcpu, u64 id, |
1361 | union kvmppc_one_reg *val) | |
31f3438e | 1362 | { |
a136a8bd PM |
1363 | int r = 0; |
1364 | long int i; | |
55b665b0 | 1365 | unsigned long addr, len; |
31f3438e | 1366 | |
a136a8bd | 1367 | switch (id) { |
31f3438e | 1368 | case KVM_REG_PPC_HIOR: |
31f3438e | 1369 | /* Only allow this to be set to zero */ |
a136a8bd | 1370 | if (set_reg_val(id, *val)) |
31f3438e PM |
1371 | r = -EINVAL; |
1372 | break; | |
a136a8bd PM |
1373 | case KVM_REG_PPC_DABR: |
1374 | vcpu->arch.dabr = set_reg_val(id, *val); | |
1375 | break; | |
8563bf52 PM |
1376 | case KVM_REG_PPC_DABRX: |
1377 | vcpu->arch.dabrx = set_reg_val(id, *val) & ~DABRX_HYP; | |
1378 | break; | |
a136a8bd PM |
1379 | case KVM_REG_PPC_DSCR: |
1380 | vcpu->arch.dscr = set_reg_val(id, *val); | |
1381 | break; | |
1382 | case KVM_REG_PPC_PURR: | |
1383 | vcpu->arch.purr = set_reg_val(id, *val); | |
1384 | break; | |
1385 | case KVM_REG_PPC_SPURR: | |
1386 | vcpu->arch.spurr = set_reg_val(id, *val); | |
1387 | break; | |
1388 | case KVM_REG_PPC_AMR: | |
1389 | vcpu->arch.amr = set_reg_val(id, *val); | |
1390 | break; | |
1391 | case KVM_REG_PPC_UAMOR: | |
1392 | vcpu->arch.uamor = set_reg_val(id, *val); | |
1393 | break; | |
b005255e | 1394 | case KVM_REG_PPC_MMCR0 ... KVM_REG_PPC_MMCRS: |
a136a8bd PM |
1395 | i = id - KVM_REG_PPC_MMCR0; |
1396 | vcpu->arch.mmcr[i] = set_reg_val(id, *val); | |
1397 | break; | |
1398 | case KVM_REG_PPC_PMC1 ... KVM_REG_PPC_PMC8: | |
1399 | i = id - KVM_REG_PPC_PMC1; | |
1400 | vcpu->arch.pmc[i] = set_reg_val(id, *val); | |
1401 | break; | |
b005255e MN |
1402 | case KVM_REG_PPC_SPMC1 ... KVM_REG_PPC_SPMC2: |
1403 | i = id - KVM_REG_PPC_SPMC1; | |
1404 | vcpu->arch.spmc[i] = set_reg_val(id, *val); | |
1405 | break; | |
14941789 PM |
1406 | case KVM_REG_PPC_SIAR: |
1407 | vcpu->arch.siar = set_reg_val(id, *val); | |
1408 | break; | |
1409 | case KVM_REG_PPC_SDAR: | |
1410 | vcpu->arch.sdar = set_reg_val(id, *val); | |
1411 | break; | |
b005255e MN |
1412 | case KVM_REG_PPC_SIER: |
1413 | vcpu->arch.sier = set_reg_val(id, *val); | |
a8bd19ef | 1414 | break; |
b005255e MN |
1415 | case KVM_REG_PPC_IAMR: |
1416 | vcpu->arch.iamr = set_reg_val(id, *val); | |
1417 | break; | |
b005255e MN |
1418 | case KVM_REG_PPC_PSPB: |
1419 | vcpu->arch.pspb = set_reg_val(id, *val); | |
1420 | break; | |
b005255e MN |
1421 | case KVM_REG_PPC_DPDES: |
1422 | vcpu->arch.vcore->dpdes = set_reg_val(id, *val); | |
1423 | break; | |
88b02cf9 PM |
1424 | case KVM_REG_PPC_VTB: |
1425 | vcpu->arch.vcore->vtb = set_reg_val(id, *val); | |
1426 | break; | |
b005255e MN |
1427 | case KVM_REG_PPC_DAWR: |
1428 | vcpu->arch.dawr = set_reg_val(id, *val); | |
1429 | break; | |
1430 | case KVM_REG_PPC_DAWRX: | |
1431 | vcpu->arch.dawrx = set_reg_val(id, *val) & ~DAWRX_HYP; | |
1432 | break; | |
1433 | case KVM_REG_PPC_CIABR: | |
1434 | vcpu->arch.ciabr = set_reg_val(id, *val); | |
1435 | /* Don't allow setting breakpoints in hypervisor code */ | |
1436 | if ((vcpu->arch.ciabr & CIABR_PRIV) == CIABR_PRIV_HYPER) | |
1437 | vcpu->arch.ciabr &= ~CIABR_PRIV; /* disable */ | |
1438 | break; | |
b005255e MN |
1439 | case KVM_REG_PPC_CSIGR: |
1440 | vcpu->arch.csigr = set_reg_val(id, *val); | |
1441 | break; | |
1442 | case KVM_REG_PPC_TACR: | |
1443 | vcpu->arch.tacr = set_reg_val(id, *val); | |
1444 | break; | |
1445 | case KVM_REG_PPC_TCSCR: | |
1446 | vcpu->arch.tcscr = set_reg_val(id, *val); | |
1447 | break; | |
1448 | case KVM_REG_PPC_PID: | |
1449 | vcpu->arch.pid = set_reg_val(id, *val); | |
1450 | break; | |
1451 | case KVM_REG_PPC_ACOP: | |
1452 | vcpu->arch.acop = set_reg_val(id, *val); | |
1453 | break; | |
1454 | case KVM_REG_PPC_WORT: | |
1455 | vcpu->arch.wort = set_reg_val(id, *val); | |
a8bd19ef | 1456 | break; |
e9cf1e08 PM |
1457 | case KVM_REG_PPC_TIDR: |
1458 | vcpu->arch.tid = set_reg_val(id, *val); | |
1459 | break; | |
1460 | case KVM_REG_PPC_PSSCR: | |
1461 | vcpu->arch.psscr = set_reg_val(id, *val) & PSSCR_GUEST_VIS; | |
1462 | break; | |
55b665b0 PM |
1463 | case KVM_REG_PPC_VPA_ADDR: |
1464 | addr = set_reg_val(id, *val); | |
1465 | r = -EINVAL; | |
1466 | if (!addr && (vcpu->arch.slb_shadow.next_gpa || | |
1467 | vcpu->arch.dtl.next_gpa)) | |
1468 | break; | |
1469 | r = set_vpa(vcpu, &vcpu->arch.vpa, addr, sizeof(struct lppaca)); | |
1470 | break; | |
1471 | case KVM_REG_PPC_VPA_SLB: | |
1472 | addr = val->vpaval.addr; | |
1473 | len = val->vpaval.length; | |
1474 | r = -EINVAL; | |
1475 | if (addr && !vcpu->arch.vpa.next_gpa) | |
1476 | break; | |
1477 | r = set_vpa(vcpu, &vcpu->arch.slb_shadow, addr, len); | |
1478 | break; | |
1479 | case KVM_REG_PPC_VPA_DTL: | |
1480 | addr = val->vpaval.addr; | |
1481 | len = val->vpaval.length; | |
1482 | r = -EINVAL; | |
9f8c8c78 PM |
1483 | if (addr && (len < sizeof(struct dtl_entry) || |
1484 | !vcpu->arch.vpa.next_gpa)) | |
55b665b0 PM |
1485 | break; |
1486 | len -= len % sizeof(struct dtl_entry); | |
1487 | r = set_vpa(vcpu, &vcpu->arch.dtl, addr, len); | |
1488 | break; | |
93b0f4dc PM |
1489 | case KVM_REG_PPC_TB_OFFSET: |
1490 | /* round up to multiple of 2^24 */ | |
1491 | vcpu->arch.vcore->tb_offset = | |
1492 | ALIGN(set_reg_val(id, *val), 1UL << 24); | |
1493 | break; | |
a0144e2a | 1494 | case KVM_REG_PPC_LPCR: |
a0840240 AK |
1495 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), true); |
1496 | break; | |
1497 | case KVM_REG_PPC_LPCR_64: | |
1498 | kvmppc_set_lpcr(vcpu, set_reg_val(id, *val), false); | |
a0144e2a | 1499 | break; |
4b8473c9 PM |
1500 | case KVM_REG_PPC_PPR: |
1501 | vcpu->arch.ppr = set_reg_val(id, *val); | |
1502 | break; | |
a7d80d01 MN |
1503 | #ifdef CONFIG_PPC_TRANSACTIONAL_MEM |
1504 | case KVM_REG_PPC_TFHAR: | |
1505 | vcpu->arch.tfhar = set_reg_val(id, *val); | |
1506 | break; | |
1507 | case KVM_REG_PPC_TFIAR: | |
1508 | vcpu->arch.tfiar = set_reg_val(id, *val); | |
1509 | break; | |
1510 | case KVM_REG_PPC_TEXASR: | |
1511 | vcpu->arch.texasr = set_reg_val(id, *val); | |
1512 | break; | |
1513 | case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31: | |
1514 | i = id - KVM_REG_PPC_TM_GPR0; | |
1515 | vcpu->arch.gpr_tm[i] = set_reg_val(id, *val); | |
1516 | break; | |
1517 | case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63: | |
1518 | { | |
1519 | int j; | |
1520 | i = id - KVM_REG_PPC_TM_VSR0; | |
1521 | if (i < 32) | |
1522 | for (j = 0; j < TS_FPRWIDTH; j++) | |
1523 | vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j]; | |
1524 | else | |
1525 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1526 | vcpu->arch.vr_tm.vr[i-32] = val->vval; | |
1527 | else | |
1528 | r = -ENXIO; | |
1529 | break; | |
1530 | } | |
1531 | case KVM_REG_PPC_TM_CR: | |
1532 | vcpu->arch.cr_tm = set_reg_val(id, *val); | |
1533 | break; | |
0d808df0 PM |
1534 | case KVM_REG_PPC_TM_XER: |
1535 | vcpu->arch.xer_tm = set_reg_val(id, *val); | |
1536 | break; | |
a7d80d01 MN |
1537 | case KVM_REG_PPC_TM_LR: |
1538 | vcpu->arch.lr_tm = set_reg_val(id, *val); | |
1539 | break; | |
1540 | case KVM_REG_PPC_TM_CTR: | |
1541 | vcpu->arch.ctr_tm = set_reg_val(id, *val); | |
1542 | break; | |
1543 | case KVM_REG_PPC_TM_FPSCR: | |
1544 | vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val); | |
1545 | break; | |
1546 | case KVM_REG_PPC_TM_AMR: | |
1547 | vcpu->arch.amr_tm = set_reg_val(id, *val); | |
1548 | break; | |
1549 | case KVM_REG_PPC_TM_PPR: | |
1550 | vcpu->arch.ppr_tm = set_reg_val(id, *val); | |
1551 | break; | |
1552 | case KVM_REG_PPC_TM_VRSAVE: | |
1553 | vcpu->arch.vrsave_tm = set_reg_val(id, *val); | |
1554 | break; | |
1555 | case KVM_REG_PPC_TM_VSCR: | |
1556 | if (cpu_has_feature(CPU_FTR_ALTIVEC)) | |
1557 | vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val); | |
1558 | else | |
1559 | r = - ENXIO; | |
1560 | break; | |
1561 | case KVM_REG_PPC_TM_DSCR: | |
1562 | vcpu->arch.dscr_tm = set_reg_val(id, *val); | |
1563 | break; | |
1564 | case KVM_REG_PPC_TM_TAR: | |
1565 | vcpu->arch.tar_tm = set_reg_val(id, *val); | |
1566 | break; | |
1567 | #endif | |
388cc6e1 PM |
1568 | case KVM_REG_PPC_ARCH_COMPAT: |
1569 | r = kvmppc_set_arch_compat(vcpu, set_reg_val(id, *val)); | |
1570 | break; | |
31f3438e | 1571 | default: |
a136a8bd | 1572 | r = -EINVAL; |
31f3438e PM |
1573 | break; |
1574 | } | |
1575 | ||
1576 | return r; | |
1577 | } | |
1578 | ||
de9bdd1a SS |
1579 | static struct kvmppc_vcore *kvmppc_vcore_create(struct kvm *kvm, int core) |
1580 | { | |
1581 | struct kvmppc_vcore *vcore; | |
1582 | ||
1583 | vcore = kzalloc(sizeof(struct kvmppc_vcore), GFP_KERNEL); | |
1584 | ||
1585 | if (vcore == NULL) | |
1586 | return NULL; | |
1587 | ||
de9bdd1a | 1588 | spin_lock_init(&vcore->lock); |
2711e248 | 1589 | spin_lock_init(&vcore->stoltb_lock); |
8577370f | 1590 | init_swait_queue_head(&vcore->wq); |
de9bdd1a SS |
1591 | vcore->preempt_tb = TB_NIL; |
1592 | vcore->lpcr = kvm->arch.lpcr; | |
1593 | vcore->first_vcpuid = core * threads_per_subcore; | |
1594 | vcore->kvm = kvm; | |
ec257165 | 1595 | INIT_LIST_HEAD(&vcore->preempt_list); |
de9bdd1a SS |
1596 | |
1597 | return vcore; | |
1598 | } | |
1599 | ||
b6c295df PM |
1600 | #ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING |
1601 | static struct debugfs_timings_element { | |
1602 | const char *name; | |
1603 | size_t offset; | |
1604 | } timings[] = { | |
1605 | {"rm_entry", offsetof(struct kvm_vcpu, arch.rm_entry)}, | |
1606 | {"rm_intr", offsetof(struct kvm_vcpu, arch.rm_intr)}, | |
1607 | {"rm_exit", offsetof(struct kvm_vcpu, arch.rm_exit)}, | |
1608 | {"guest", offsetof(struct kvm_vcpu, arch.guest_time)}, | |
1609 | {"cede", offsetof(struct kvm_vcpu, arch.cede_time)}, | |
1610 | }; | |
1611 | ||
1612 | #define N_TIMINGS (sizeof(timings) / sizeof(timings[0])) | |
1613 | ||
1614 | struct debugfs_timings_state { | |
1615 | struct kvm_vcpu *vcpu; | |
1616 | unsigned int buflen; | |
1617 | char buf[N_TIMINGS * 100]; | |
1618 | }; | |
1619 | ||
1620 | static int debugfs_timings_open(struct inode *inode, struct file *file) | |
1621 | { | |
1622 | struct kvm_vcpu *vcpu = inode->i_private; | |
1623 | struct debugfs_timings_state *p; | |
1624 | ||
1625 | p = kzalloc(sizeof(*p), GFP_KERNEL); | |
1626 | if (!p) | |
1627 | return -ENOMEM; | |
1628 | ||
1629 | kvm_get_kvm(vcpu->kvm); | |
1630 | p->vcpu = vcpu; | |
1631 | file->private_data = p; | |
1632 | ||
1633 | return nonseekable_open(inode, file); | |
1634 | } | |
1635 | ||
1636 | static int debugfs_timings_release(struct inode *inode, struct file *file) | |
1637 | { | |
1638 | struct debugfs_timings_state *p = file->private_data; | |
1639 | ||
1640 | kvm_put_kvm(p->vcpu->kvm); | |
1641 | kfree(p); | |
1642 | return 0; | |
1643 | } | |
1644 | ||
1645 | static ssize_t debugfs_timings_read(struct file *file, char __user *buf, | |
1646 | size_t len, loff_t *ppos) | |
1647 | { | |
1648 | struct debugfs_timings_state *p = file->private_data; | |
1649 | struct kvm_vcpu *vcpu = p->vcpu; | |
1650 | char *s, *buf_end; | |
1651 | struct kvmhv_tb_accumulator tb; | |
1652 | u64 count; | |
1653 | loff_t pos; | |
1654 | ssize_t n; | |
1655 | int i, loops; | |
1656 | bool ok; | |
1657 | ||
1658 | if (!p->buflen) { | |
1659 | s = p->buf; | |
1660 | buf_end = s + sizeof(p->buf); | |
1661 | for (i = 0; i < N_TIMINGS; ++i) { | |
1662 | struct kvmhv_tb_accumulator *acc; | |
1663 | ||
1664 | acc = (struct kvmhv_tb_accumulator *) | |
1665 | ((unsigned long)vcpu + timings[i].offset); | |
1666 | ok = false; | |
1667 | for (loops = 0; loops < 1000; ++loops) { | |
1668 | count = acc->seqcount; | |
1669 | if (!(count & 1)) { | |
1670 | smp_rmb(); | |
1671 | tb = *acc; | |
1672 | smp_rmb(); | |
1673 | if (count == acc->seqcount) { | |
1674 | ok = true; | |
1675 | break; | |
1676 | } | |
1677 | } | |
1678 | udelay(1); | |
1679 | } | |
1680 | if (!ok) | |
1681 | snprintf(s, buf_end - s, "%s: stuck\n", | |
1682 | timings[i].name); | |
1683 | else | |
1684 | snprintf(s, buf_end - s, | |
1685 | "%s: %llu %llu %llu %llu\n", | |
1686 | timings[i].name, count / 2, | |
1687 | tb_to_ns(tb.tb_total), | |
1688 | tb_to_ns(tb.tb_min), | |
1689 | tb_to_ns(tb.tb_max)); | |
1690 | s += strlen(s); | |
1691 | } | |
1692 | p->buflen = s - p->buf; | |
1693 | } | |
1694 | ||
1695 | pos = *ppos; | |
1696 | if (pos >= p->buflen) | |
1697 | return 0; | |
1698 | if (len > p->buflen - pos) | |
1699 | len = p->buflen - pos; | |
1700 | n = copy_to_user(buf, p->buf + pos, len); | |
1701 | if (n) { | |
1702 | if (n == len) | |
1703 | return -EFAULT; | |
1704 | len -= n; | |
1705 | } | |
1706 | *ppos = pos + len; | |
1707 | return len; | |
1708 | } | |
1709 | ||
1710 | static ssize_t debugfs_timings_write(struct file *file, const char __user *buf, | |
1711 | size_t len, loff_t *ppos) | |
1712 | { | |
1713 | return -EACCES; | |
1714 | } | |
1715 | ||
1716 | static const struct file_operations debugfs_timings_ops = { | |
1717 | .owner = THIS_MODULE, | |
1718 | .open = debugfs_timings_open, | |
1719 | .release = debugfs_timings_release, | |
1720 | .read = debugfs_timings_read, | |
1721 | .write = debugfs_timings_write, | |
1722 | .llseek = generic_file_llseek, | |
1723 | }; | |
1724 | ||
1725 | /* Create a debugfs directory for the vcpu */ | |
1726 | static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) | |
1727 | { | |
1728 | char buf[16]; | |
1729 | struct kvm *kvm = vcpu->kvm; | |
1730 | ||
1731 | snprintf(buf, sizeof(buf), "vcpu%u", id); | |
1732 | if (IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) | |
1733 | return; | |
1734 | vcpu->arch.debugfs_dir = debugfs_create_dir(buf, kvm->arch.debugfs_dir); | |
1735 | if (IS_ERR_OR_NULL(vcpu->arch.debugfs_dir)) | |
1736 | return; | |
1737 | vcpu->arch.debugfs_timings = | |
1738 | debugfs_create_file("timings", 0444, vcpu->arch.debugfs_dir, | |
1739 | vcpu, &debugfs_timings_ops); | |
1740 | } | |
1741 | ||
1742 | #else /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ | |
1743 | static void debugfs_vcpu_init(struct kvm_vcpu *vcpu, unsigned int id) | |
1744 | { | |
1745 | } | |
1746 | #endif /* CONFIG_KVM_BOOK3S_HV_EXIT_TIMING */ | |
1747 | ||
3a167bea AK |
1748 | static struct kvm_vcpu *kvmppc_core_vcpu_create_hv(struct kvm *kvm, |
1749 | unsigned int id) | |
de56a948 PM |
1750 | { |
1751 | struct kvm_vcpu *vcpu; | |
371fefd6 PM |
1752 | int err = -EINVAL; |
1753 | int core; | |
1754 | struct kvmppc_vcore *vcore; | |
de56a948 | 1755 | |
3102f784 | 1756 | core = id / threads_per_subcore; |
371fefd6 PM |
1757 | if (core >= KVM_MAX_VCORES) |
1758 | goto out; | |
1759 | ||
1760 | err = -ENOMEM; | |
6b75e6bf | 1761 | vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL); |
de56a948 PM |
1762 | if (!vcpu) |
1763 | goto out; | |
1764 | ||
1765 | err = kvm_vcpu_init(vcpu, kvm, id); | |
1766 | if (err) | |
1767 | goto free_vcpu; | |
1768 | ||
1769 | vcpu->arch.shared = &vcpu->arch.shregs; | |
5deb8e7a AG |
1770 | #ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE |
1771 | /* | |
1772 | * The shared struct is never shared on HV, | |
1773 | * so we can always use host endianness | |
1774 | */ | |
1775 | #ifdef __BIG_ENDIAN__ | |
1776 | vcpu->arch.shared_big_endian = true; | |
1777 | #else | |
1778 | vcpu->arch.shared_big_endian = false; | |
1779 | #endif | |
1780 | #endif | |
de56a948 PM |
1781 | vcpu->arch.mmcr[0] = MMCR0_FC; |
1782 | vcpu->arch.ctrl = CTRL_RUNLATCH; | |
1783 | /* default to host PVR, since we can't spoof it */ | |
3a167bea | 1784 | kvmppc_set_pvr_hv(vcpu, mfspr(SPRN_PVR)); |
2e25aa5f | 1785 | spin_lock_init(&vcpu->arch.vpa_update_lock); |
c7b67670 PM |
1786 | spin_lock_init(&vcpu->arch.tbacct_lock); |
1787 | vcpu->arch.busy_preempt = TB_NIL; | |
d682916a | 1788 | vcpu->arch.intr_msr = MSR_SF | MSR_ME; |
de56a948 | 1789 | |
de56a948 PM |
1790 | kvmppc_mmu_book3s_hv_init(vcpu); |
1791 | ||
8455d79e | 1792 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
371fefd6 PM |
1793 | |
1794 | init_waitqueue_head(&vcpu->arch.cpu_run); | |
1795 | ||
1796 | mutex_lock(&kvm->lock); | |
1797 | vcore = kvm->arch.vcores[core]; | |
1798 | if (!vcore) { | |
de9bdd1a | 1799 | vcore = kvmppc_vcore_create(kvm, core); |
371fefd6 | 1800 | kvm->arch.vcores[core] = vcore; |
1b400ba0 | 1801 | kvm->arch.online_vcores++; |
371fefd6 PM |
1802 | } |
1803 | mutex_unlock(&kvm->lock); | |
1804 | ||
1805 | if (!vcore) | |
1806 | goto free_vcpu; | |
1807 | ||
1808 | spin_lock(&vcore->lock); | |
1809 | ++vcore->num_threads; | |
371fefd6 PM |
1810 | spin_unlock(&vcore->lock); |
1811 | vcpu->arch.vcore = vcore; | |
e0b7ec05 | 1812 | vcpu->arch.ptid = vcpu->vcpu_id - vcore->first_vcpuid; |
ec257165 | 1813 | vcpu->arch.thread_cpu = -1; |
371fefd6 | 1814 | |
af8f38b3 AG |
1815 | vcpu->arch.cpu_type = KVM_CPU_3S_64; |
1816 | kvmppc_sanity_check(vcpu); | |
1817 | ||
b6c295df PM |
1818 | debugfs_vcpu_init(vcpu, id); |
1819 | ||
de56a948 PM |
1820 | return vcpu; |
1821 | ||
1822 | free_vcpu: | |
6b75e6bf | 1823 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
de56a948 PM |
1824 | out: |
1825 | return ERR_PTR(err); | |
1826 | } | |
1827 | ||
c35635ef PM |
1828 | static void unpin_vpa(struct kvm *kvm, struct kvmppc_vpa *vpa) |
1829 | { | |
1830 | if (vpa->pinned_addr) | |
1831 | kvmppc_unpin_guest_page(kvm, vpa->pinned_addr, vpa->gpa, | |
1832 | vpa->dirty); | |
1833 | } | |
1834 | ||
3a167bea | 1835 | static void kvmppc_core_vcpu_free_hv(struct kvm_vcpu *vcpu) |
de56a948 | 1836 | { |
2e25aa5f | 1837 | spin_lock(&vcpu->arch.vpa_update_lock); |
c35635ef PM |
1838 | unpin_vpa(vcpu->kvm, &vcpu->arch.dtl); |
1839 | unpin_vpa(vcpu->kvm, &vcpu->arch.slb_shadow); | |
1840 | unpin_vpa(vcpu->kvm, &vcpu->arch.vpa); | |
2e25aa5f | 1841 | spin_unlock(&vcpu->arch.vpa_update_lock); |
de56a948 | 1842 | kvm_vcpu_uninit(vcpu); |
6b75e6bf | 1843 | kmem_cache_free(kvm_vcpu_cache, vcpu); |
de56a948 PM |
1844 | } |
1845 | ||
3a167bea AK |
1846 | static int kvmppc_core_check_requests_hv(struct kvm_vcpu *vcpu) |
1847 | { | |
1848 | /* Indicate we want to get back into the guest */ | |
1849 | return 1; | |
1850 | } | |
1851 | ||
19ccb76a | 1852 | static void kvmppc_set_timer(struct kvm_vcpu *vcpu) |
371fefd6 | 1853 | { |
19ccb76a | 1854 | unsigned long dec_nsec, now; |
371fefd6 | 1855 | |
19ccb76a PM |
1856 | now = get_tb(); |
1857 | if (now > vcpu->arch.dec_expires) { | |
1858 | /* decrementer has already gone negative */ | |
1859 | kvmppc_core_queue_dec(vcpu); | |
7e28e60e | 1860 | kvmppc_core_prepare_to_enter(vcpu); |
19ccb76a | 1861 | return; |
371fefd6 | 1862 | } |
19ccb76a PM |
1863 | dec_nsec = (vcpu->arch.dec_expires - now) * NSEC_PER_SEC |
1864 | / tb_ticks_per_sec; | |
1865 | hrtimer_start(&vcpu->arch.dec_timer, ktime_set(0, dec_nsec), | |
1866 | HRTIMER_MODE_REL); | |
1867 | vcpu->arch.timer_running = 1; | |
371fefd6 PM |
1868 | } |
1869 | ||
19ccb76a | 1870 | static void kvmppc_end_cede(struct kvm_vcpu *vcpu) |
371fefd6 | 1871 | { |
19ccb76a PM |
1872 | vcpu->arch.ceded = 0; |
1873 | if (vcpu->arch.timer_running) { | |
1874 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | |
1875 | vcpu->arch.timer_running = 0; | |
1876 | } | |
371fefd6 PM |
1877 | } |
1878 | ||
e0b7ec05 | 1879 | extern void __kvmppc_vcore_entry(void); |
de56a948 | 1880 | |
371fefd6 PM |
1881 | static void kvmppc_remove_runnable(struct kvmppc_vcore *vc, |
1882 | struct kvm_vcpu *vcpu) | |
de56a948 | 1883 | { |
c7b67670 PM |
1884 | u64 now; |
1885 | ||
371fefd6 PM |
1886 | if (vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
1887 | return; | |
bf3d32e1 | 1888 | spin_lock_irq(&vcpu->arch.tbacct_lock); |
c7b67670 PM |
1889 | now = mftb(); |
1890 | vcpu->arch.busy_stolen += vcore_stolen_time(vc, now) - | |
1891 | vcpu->arch.stolen_logged; | |
1892 | vcpu->arch.busy_preempt = now; | |
1893 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; | |
bf3d32e1 | 1894 | spin_unlock_irq(&vcpu->arch.tbacct_lock); |
371fefd6 | 1895 | --vc->n_runnable; |
7b5f8272 | 1896 | WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], NULL); |
371fefd6 PM |
1897 | } |
1898 | ||
f0888f70 PM |
1899 | static int kvmppc_grab_hwthread(int cpu) |
1900 | { | |
1901 | struct paca_struct *tpaca; | |
b754c739 | 1902 | long timeout = 10000; |
f0888f70 PM |
1903 | |
1904 | tpaca = &paca[cpu]; | |
1905 | ||
1906 | /* Ensure the thread won't go into the kernel if it wakes */ | |
7b444c67 | 1907 | tpaca->kvm_hstate.kvm_vcpu = NULL; |
b4deba5c | 1908 | tpaca->kvm_hstate.kvm_vcore = NULL; |
5d5b99cd PM |
1909 | tpaca->kvm_hstate.napping = 0; |
1910 | smp_wmb(); | |
1911 | tpaca->kvm_hstate.hwthread_req = 1; | |
f0888f70 PM |
1912 | |
1913 | /* | |
1914 | * If the thread is already executing in the kernel (e.g. handling | |
1915 | * a stray interrupt), wait for it to get back to nap mode. | |
1916 | * The smp_mb() is to ensure that our setting of hwthread_req | |
1917 | * is visible before we look at hwthread_state, so if this | |
1918 | * races with the code at system_reset_pSeries and the thread | |
1919 | * misses our setting of hwthread_req, we are sure to see its | |
1920 | * setting of hwthread_state, and vice versa. | |
1921 | */ | |
1922 | smp_mb(); | |
1923 | while (tpaca->kvm_hstate.hwthread_state == KVM_HWTHREAD_IN_KERNEL) { | |
1924 | if (--timeout <= 0) { | |
1925 | pr_err("KVM: couldn't grab cpu %d\n", cpu); | |
1926 | return -EBUSY; | |
1927 | } | |
1928 | udelay(1); | |
1929 | } | |
1930 | return 0; | |
1931 | } | |
1932 | ||
1933 | static void kvmppc_release_hwthread(int cpu) | |
1934 | { | |
1935 | struct paca_struct *tpaca; | |
1936 | ||
1937 | tpaca = &paca[cpu]; | |
1938 | tpaca->kvm_hstate.hwthread_req = 0; | |
1939 | tpaca->kvm_hstate.kvm_vcpu = NULL; | |
b4deba5c PM |
1940 | tpaca->kvm_hstate.kvm_vcore = NULL; |
1941 | tpaca->kvm_hstate.kvm_split_mode = NULL; | |
f0888f70 PM |
1942 | } |
1943 | ||
b4deba5c | 1944 | static void kvmppc_start_thread(struct kvm_vcpu *vcpu, struct kvmppc_vcore *vc) |
371fefd6 PM |
1945 | { |
1946 | int cpu; | |
1947 | struct paca_struct *tpaca; | |
ec257165 | 1948 | struct kvmppc_vcore *mvc = vc->master_vcore; |
371fefd6 | 1949 | |
b4deba5c PM |
1950 | cpu = vc->pcpu; |
1951 | if (vcpu) { | |
1952 | if (vcpu->arch.timer_running) { | |
1953 | hrtimer_try_to_cancel(&vcpu->arch.dec_timer); | |
1954 | vcpu->arch.timer_running = 0; | |
1955 | } | |
1956 | cpu += vcpu->arch.ptid; | |
1957 | vcpu->cpu = mvc->pcpu; | |
1958 | vcpu->arch.thread_cpu = cpu; | |
19ccb76a | 1959 | } |
371fefd6 | 1960 | tpaca = &paca[cpu]; |
5d5b99cd | 1961 | tpaca->kvm_hstate.kvm_vcpu = vcpu; |
ec257165 | 1962 | tpaca->kvm_hstate.ptid = cpu - mvc->pcpu; |
ec257165 | 1963 | /* Order stores to hstate.kvm_vcpu etc. before store to kvm_vcore */ |
371fefd6 | 1964 | smp_wmb(); |
b4deba5c | 1965 | tpaca->kvm_hstate.kvm_vcore = mvc; |
5d5b99cd | 1966 | if (cpu != smp_processor_id()) |
66feed61 | 1967 | kvmppc_ipi_thread(cpu); |
371fefd6 | 1968 | } |
de56a948 | 1969 | |
5d5b99cd | 1970 | static void kvmppc_wait_for_nap(void) |
371fefd6 | 1971 | { |
5d5b99cd PM |
1972 | int cpu = smp_processor_id(); |
1973 | int i, loops; | |
371fefd6 | 1974 | |
5d5b99cd PM |
1975 | for (loops = 0; loops < 1000000; ++loops) { |
1976 | /* | |
1977 | * Check if all threads are finished. | |
b4deba5c | 1978 | * We set the vcore pointer when starting a thread |
5d5b99cd | 1979 | * and the thread clears it when finished, so we look |
b4deba5c | 1980 | * for any threads that still have a non-NULL vcore ptr. |
5d5b99cd PM |
1981 | */ |
1982 | for (i = 1; i < threads_per_subcore; ++i) | |
b4deba5c | 1983 | if (paca[cpu + i].kvm_hstate.kvm_vcore) |
5d5b99cd PM |
1984 | break; |
1985 | if (i == threads_per_subcore) { | |
1986 | HMT_medium(); | |
1987 | return; | |
371fefd6 | 1988 | } |
5d5b99cd | 1989 | HMT_low(); |
371fefd6 PM |
1990 | } |
1991 | HMT_medium(); | |
5d5b99cd | 1992 | for (i = 1; i < threads_per_subcore; ++i) |
b4deba5c | 1993 | if (paca[cpu + i].kvm_hstate.kvm_vcore) |
5d5b99cd | 1994 | pr_err("KVM: CPU %d seems to be stuck\n", cpu + i); |
371fefd6 PM |
1995 | } |
1996 | ||
1997 | /* | |
1998 | * Check that we are on thread 0 and that any other threads in | |
7b444c67 PM |
1999 | * this core are off-line. Then grab the threads so they can't |
2000 | * enter the kernel. | |
371fefd6 PM |
2001 | */ |
2002 | static int on_primary_thread(void) | |
2003 | { | |
2004 | int cpu = smp_processor_id(); | |
3102f784 | 2005 | int thr; |
371fefd6 | 2006 | |
3102f784 ME |
2007 | /* Are we on a primary subcore? */ |
2008 | if (cpu_thread_in_subcore(cpu)) | |
371fefd6 | 2009 | return 0; |
3102f784 ME |
2010 | |
2011 | thr = 0; | |
2012 | while (++thr < threads_per_subcore) | |
371fefd6 PM |
2013 | if (cpu_online(cpu + thr)) |
2014 | return 0; | |
7b444c67 PM |
2015 | |
2016 | /* Grab all hw threads so they can't go into the kernel */ | |
3102f784 | 2017 | for (thr = 1; thr < threads_per_subcore; ++thr) { |
7b444c67 PM |
2018 | if (kvmppc_grab_hwthread(cpu + thr)) { |
2019 | /* Couldn't grab one; let the others go */ | |
2020 | do { | |
2021 | kvmppc_release_hwthread(cpu + thr); | |
2022 | } while (--thr > 0); | |
2023 | return 0; | |
2024 | } | |
2025 | } | |
371fefd6 PM |
2026 | return 1; |
2027 | } | |
2028 | ||
ec257165 PM |
2029 | /* |
2030 | * A list of virtual cores for each physical CPU. | |
2031 | * These are vcores that could run but their runner VCPU tasks are | |
2032 | * (or may be) preempted. | |
2033 | */ | |
2034 | struct preempted_vcore_list { | |
2035 | struct list_head list; | |
2036 | spinlock_t lock; | |
2037 | }; | |
2038 | ||
2039 | static DEFINE_PER_CPU(struct preempted_vcore_list, preempted_vcores); | |
2040 | ||
2041 | static void init_vcore_lists(void) | |
2042 | { | |
2043 | int cpu; | |
2044 | ||
2045 | for_each_possible_cpu(cpu) { | |
2046 | struct preempted_vcore_list *lp = &per_cpu(preempted_vcores, cpu); | |
2047 | spin_lock_init(&lp->lock); | |
2048 | INIT_LIST_HEAD(&lp->list); | |
2049 | } | |
2050 | } | |
2051 | ||
2052 | static void kvmppc_vcore_preempt(struct kvmppc_vcore *vc) | |
2053 | { | |
2054 | struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); | |
2055 | ||
2056 | vc->vcore_state = VCORE_PREEMPT; | |
2057 | vc->pcpu = smp_processor_id(); | |
2058 | if (vc->num_threads < threads_per_subcore) { | |
2059 | spin_lock(&lp->lock); | |
2060 | list_add_tail(&vc->preempt_list, &lp->list); | |
2061 | spin_unlock(&lp->lock); | |
2062 | } | |
2063 | ||
2064 | /* Start accumulating stolen time */ | |
2065 | kvmppc_core_start_stolen(vc); | |
2066 | } | |
2067 | ||
2068 | static void kvmppc_vcore_end_preempt(struct kvmppc_vcore *vc) | |
2069 | { | |
402813fe | 2070 | struct preempted_vcore_list *lp; |
ec257165 PM |
2071 | |
2072 | kvmppc_core_end_stolen(vc); | |
2073 | if (!list_empty(&vc->preempt_list)) { | |
402813fe | 2074 | lp = &per_cpu(preempted_vcores, vc->pcpu); |
ec257165 PM |
2075 | spin_lock(&lp->lock); |
2076 | list_del_init(&vc->preempt_list); | |
2077 | spin_unlock(&lp->lock); | |
2078 | } | |
2079 | vc->vcore_state = VCORE_INACTIVE; | |
2080 | } | |
2081 | ||
b4deba5c PM |
2082 | /* |
2083 | * This stores information about the virtual cores currently | |
2084 | * assigned to a physical core. | |
2085 | */ | |
ec257165 | 2086 | struct core_info { |
b4deba5c PM |
2087 | int n_subcores; |
2088 | int max_subcore_threads; | |
ec257165 | 2089 | int total_threads; |
b4deba5c PM |
2090 | int subcore_threads[MAX_SUBCORES]; |
2091 | struct kvm *subcore_vm[MAX_SUBCORES]; | |
2092 | struct list_head vcs[MAX_SUBCORES]; | |
ec257165 PM |
2093 | }; |
2094 | ||
b4deba5c PM |
2095 | /* |
2096 | * This mapping means subcores 0 and 1 can use threads 0-3 and 4-7 | |
2097 | * respectively in 2-way micro-threading (split-core) mode. | |
2098 | */ | |
2099 | static int subcore_thread_map[MAX_SUBCORES] = { 0, 4, 2, 6 }; | |
2100 | ||
ec257165 PM |
2101 | static void init_core_info(struct core_info *cip, struct kvmppc_vcore *vc) |
2102 | { | |
b4deba5c PM |
2103 | int sub; |
2104 | ||
ec257165 | 2105 | memset(cip, 0, sizeof(*cip)); |
b4deba5c PM |
2106 | cip->n_subcores = 1; |
2107 | cip->max_subcore_threads = vc->num_threads; | |
ec257165 | 2108 | cip->total_threads = vc->num_threads; |
b4deba5c PM |
2109 | cip->subcore_threads[0] = vc->num_threads; |
2110 | cip->subcore_vm[0] = vc->kvm; | |
2111 | for (sub = 0; sub < MAX_SUBCORES; ++sub) | |
2112 | INIT_LIST_HEAD(&cip->vcs[sub]); | |
2113 | list_add_tail(&vc->preempt_list, &cip->vcs[0]); | |
2114 | } | |
2115 | ||
2116 | static bool subcore_config_ok(int n_subcores, int n_threads) | |
2117 | { | |
2118 | /* Can only dynamically split if unsplit to begin with */ | |
2119 | if (n_subcores > 1 && threads_per_subcore < MAX_SMT_THREADS) | |
2120 | return false; | |
2121 | if (n_subcores > MAX_SUBCORES) | |
2122 | return false; | |
2123 | if (n_subcores > 1) { | |
2124 | if (!(dynamic_mt_modes & 2)) | |
2125 | n_subcores = 4; | |
2126 | if (n_subcores > 2 && !(dynamic_mt_modes & 4)) | |
2127 | return false; | |
2128 | } | |
2129 | ||
2130 | return n_subcores * roundup_pow_of_two(n_threads) <= MAX_SMT_THREADS; | |
ec257165 PM |
2131 | } |
2132 | ||
2133 | static void init_master_vcore(struct kvmppc_vcore *vc) | |
2134 | { | |
2135 | vc->master_vcore = vc; | |
2136 | vc->entry_exit_map = 0; | |
2137 | vc->in_guest = 0; | |
2138 | vc->napping_threads = 0; | |
2139 | vc->conferring_threads = 0; | |
2140 | } | |
2141 | ||
b4deba5c PM |
2142 | static bool can_dynamic_split(struct kvmppc_vcore *vc, struct core_info *cip) |
2143 | { | |
2144 | int n_threads = vc->num_threads; | |
2145 | int sub; | |
2146 | ||
2147 | if (!cpu_has_feature(CPU_FTR_ARCH_207S)) | |
2148 | return false; | |
2149 | ||
2150 | if (n_threads < cip->max_subcore_threads) | |
2151 | n_threads = cip->max_subcore_threads; | |
b009031f | 2152 | if (!subcore_config_ok(cip->n_subcores + 1, n_threads)) |
b4deba5c | 2153 | return false; |
b009031f | 2154 | cip->max_subcore_threads = n_threads; |
b4deba5c PM |
2155 | |
2156 | sub = cip->n_subcores; | |
2157 | ++cip->n_subcores; | |
2158 | cip->total_threads += vc->num_threads; | |
2159 | cip->subcore_threads[sub] = vc->num_threads; | |
2160 | cip->subcore_vm[sub] = vc->kvm; | |
2161 | init_master_vcore(vc); | |
28d057c8 | 2162 | list_move_tail(&vc->preempt_list, &cip->vcs[sub]); |
b4deba5c PM |
2163 | |
2164 | return true; | |
2165 | } | |
2166 | ||
b4deba5c PM |
2167 | /* |
2168 | * Work out whether it is possible to piggyback the execution of | |
2169 | * vcore *pvc onto the execution of the other vcores described in *cip. | |
2170 | */ | |
2171 | static bool can_piggyback(struct kvmppc_vcore *pvc, struct core_info *cip, | |
2172 | int target_threads) | |
2173 | { | |
b4deba5c PM |
2174 | if (cip->total_threads + pvc->num_threads > target_threads) |
2175 | return false; | |
b4deba5c | 2176 | |
b009031f | 2177 | return can_dynamic_split(pvc, cip); |
b4deba5c PM |
2178 | } |
2179 | ||
d911f0be PM |
2180 | static void prepare_threads(struct kvmppc_vcore *vc) |
2181 | { | |
7b5f8272 SJS |
2182 | int i; |
2183 | struct kvm_vcpu *vcpu; | |
d911f0be | 2184 | |
7b5f8272 | 2185 | for_each_runnable_thread(i, vcpu, vc) { |
d911f0be PM |
2186 | if (signal_pending(vcpu->arch.run_task)) |
2187 | vcpu->arch.ret = -EINTR; | |
2188 | else if (vcpu->arch.vpa.update_pending || | |
2189 | vcpu->arch.slb_shadow.update_pending || | |
2190 | vcpu->arch.dtl.update_pending) | |
2191 | vcpu->arch.ret = RESUME_GUEST; | |
2192 | else | |
2193 | continue; | |
2194 | kvmppc_remove_runnable(vc, vcpu); | |
2195 | wake_up(&vcpu->arch.cpu_run); | |
2196 | } | |
2197 | } | |
2198 | ||
ec257165 PM |
2199 | static void collect_piggybacks(struct core_info *cip, int target_threads) |
2200 | { | |
2201 | struct preempted_vcore_list *lp = this_cpu_ptr(&preempted_vcores); | |
2202 | struct kvmppc_vcore *pvc, *vcnext; | |
2203 | ||
2204 | spin_lock(&lp->lock); | |
2205 | list_for_each_entry_safe(pvc, vcnext, &lp->list, preempt_list) { | |
2206 | if (!spin_trylock(&pvc->lock)) | |
2207 | continue; | |
2208 | prepare_threads(pvc); | |
2209 | if (!pvc->n_runnable) { | |
2210 | list_del_init(&pvc->preempt_list); | |
2211 | if (pvc->runner == NULL) { | |
2212 | pvc->vcore_state = VCORE_INACTIVE; | |
2213 | kvmppc_core_end_stolen(pvc); | |
2214 | } | |
2215 | spin_unlock(&pvc->lock); | |
2216 | continue; | |
2217 | } | |
2218 | if (!can_piggyback(pvc, cip, target_threads)) { | |
2219 | spin_unlock(&pvc->lock); | |
2220 | continue; | |
2221 | } | |
2222 | kvmppc_core_end_stolen(pvc); | |
2223 | pvc->vcore_state = VCORE_PIGGYBACK; | |
2224 | if (cip->total_threads >= target_threads) | |
2225 | break; | |
2226 | } | |
2227 | spin_unlock(&lp->lock); | |
2228 | } | |
2229 | ||
2230 | static void post_guest_process(struct kvmppc_vcore *vc, bool is_master) | |
25fedfca | 2231 | { |
7b5f8272 | 2232 | int still_running = 0, i; |
25fedfca PM |
2233 | u64 now; |
2234 | long ret; | |
7b5f8272 | 2235 | struct kvm_vcpu *vcpu; |
25fedfca | 2236 | |
ec257165 | 2237 | spin_lock(&vc->lock); |
25fedfca | 2238 | now = get_tb(); |
7b5f8272 | 2239 | for_each_runnable_thread(i, vcpu, vc) { |
25fedfca PM |
2240 | /* cancel pending dec exception if dec is positive */ |
2241 | if (now < vcpu->arch.dec_expires && | |
2242 | kvmppc_core_pending_dec(vcpu)) | |
2243 | kvmppc_core_dequeue_dec(vcpu); | |
2244 | ||
2245 | trace_kvm_guest_exit(vcpu); | |
2246 | ||
2247 | ret = RESUME_GUEST; | |
2248 | if (vcpu->arch.trap) | |
2249 | ret = kvmppc_handle_exit_hv(vcpu->arch.kvm_run, vcpu, | |
2250 | vcpu->arch.run_task); | |
2251 | ||
2252 | vcpu->arch.ret = ret; | |
2253 | vcpu->arch.trap = 0; | |
2254 | ||
ec257165 PM |
2255 | if (is_kvmppc_resume_guest(vcpu->arch.ret)) { |
2256 | if (vcpu->arch.pending_exceptions) | |
2257 | kvmppc_core_prepare_to_enter(vcpu); | |
2258 | if (vcpu->arch.ceded) | |
25fedfca | 2259 | kvmppc_set_timer(vcpu); |
ec257165 PM |
2260 | else |
2261 | ++still_running; | |
2262 | } else { | |
25fedfca PM |
2263 | kvmppc_remove_runnable(vc, vcpu); |
2264 | wake_up(&vcpu->arch.cpu_run); | |
2265 | } | |
2266 | } | |
ec257165 PM |
2267 | list_del_init(&vc->preempt_list); |
2268 | if (!is_master) { | |
563a1e93 | 2269 | if (still_running > 0) { |
ec257165 | 2270 | kvmppc_vcore_preempt(vc); |
563a1e93 PM |
2271 | } else if (vc->runner) { |
2272 | vc->vcore_state = VCORE_PREEMPT; | |
2273 | kvmppc_core_start_stolen(vc); | |
2274 | } else { | |
2275 | vc->vcore_state = VCORE_INACTIVE; | |
2276 | } | |
ec257165 PM |
2277 | if (vc->n_runnable > 0 && vc->runner == NULL) { |
2278 | /* make sure there's a candidate runner awake */ | |
7b5f8272 SJS |
2279 | i = -1; |
2280 | vcpu = next_runnable_thread(vc, &i); | |
ec257165 PM |
2281 | wake_up(&vcpu->arch.cpu_run); |
2282 | } | |
2283 | } | |
2284 | spin_unlock(&vc->lock); | |
25fedfca PM |
2285 | } |
2286 | ||
b8e6a87c SW |
2287 | /* |
2288 | * Clear core from the list of active host cores as we are about to | |
2289 | * enter the guest. Only do this if it is the primary thread of the | |
2290 | * core (not if a subcore) that is entering the guest. | |
2291 | */ | |
2292 | static inline void kvmppc_clear_host_core(int cpu) | |
2293 | { | |
2294 | int core; | |
2295 | ||
2296 | if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) | |
2297 | return; | |
2298 | /* | |
2299 | * Memory barrier can be omitted here as we will do a smp_wmb() | |
2300 | * later in kvmppc_start_thread and we need ensure that state is | |
2301 | * visible to other CPUs only after we enter guest. | |
2302 | */ | |
2303 | core = cpu >> threads_shift; | |
2304 | kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 0; | |
2305 | } | |
2306 | ||
2307 | /* | |
2308 | * Advertise this core as an active host core since we exited the guest | |
2309 | * Only need to do this if it is the primary thread of the core that is | |
2310 | * exiting. | |
2311 | */ | |
2312 | static inline void kvmppc_set_host_core(int cpu) | |
2313 | { | |
2314 | int core; | |
2315 | ||
2316 | if (!kvmppc_host_rm_ops_hv || cpu_thread_in_core(cpu)) | |
2317 | return; | |
2318 | ||
2319 | /* | |
2320 | * Memory barrier can be omitted here because we do a spin_unlock | |
2321 | * immediately after this which provides the memory barrier. | |
2322 | */ | |
2323 | core = cpu >> threads_shift; | |
2324 | kvmppc_host_rm_ops_hv->rm_core[core].rm_state.in_host = 1; | |
2325 | } | |
2326 | ||
371fefd6 PM |
2327 | /* |
2328 | * Run a set of guest threads on a physical core. | |
2329 | * Called with vc->lock held. | |
2330 | */ | |
66feed61 | 2331 | static noinline void kvmppc_run_core(struct kvmppc_vcore *vc) |
371fefd6 | 2332 | { |
7b5f8272 | 2333 | struct kvm_vcpu *vcpu; |
d911f0be | 2334 | int i; |
2c9097e4 | 2335 | int srcu_idx; |
ec257165 PM |
2336 | struct core_info core_info; |
2337 | struct kvmppc_vcore *pvc, *vcnext; | |
b4deba5c PM |
2338 | struct kvm_split_mode split_info, *sip; |
2339 | int split, subcore_size, active; | |
2340 | int sub; | |
2341 | bool thr0_done; | |
2342 | unsigned long cmd_bit, stat_bit; | |
ec257165 PM |
2343 | int pcpu, thr; |
2344 | int target_threads; | |
371fefd6 | 2345 | |
d911f0be PM |
2346 | /* |
2347 | * Remove from the list any threads that have a signal pending | |
2348 | * or need a VPA update done | |
2349 | */ | |
2350 | prepare_threads(vc); | |
2351 | ||
2352 | /* if the runner is no longer runnable, let the caller pick a new one */ | |
2353 | if (vc->runner->arch.state != KVMPPC_VCPU_RUNNABLE) | |
2354 | return; | |
081f323b PM |
2355 | |
2356 | /* | |
d911f0be | 2357 | * Initialize *vc. |
081f323b | 2358 | */ |
ec257165 | 2359 | init_master_vcore(vc); |
2711e248 | 2360 | vc->preempt_tb = TB_NIL; |
081f323b | 2361 | |
7b444c67 | 2362 | /* |
3102f784 ME |
2363 | * Make sure we are running on primary threads, and that secondary |
2364 | * threads are offline. Also check if the number of threads in this | |
2365 | * guest are greater than the current system threads per guest. | |
7b444c67 | 2366 | */ |
3102f784 ME |
2367 | if ((threads_per_core > 1) && |
2368 | ((vc->num_threads > threads_per_subcore) || !on_primary_thread())) { | |
7b5f8272 | 2369 | for_each_runnable_thread(i, vcpu, vc) { |
7b444c67 | 2370 | vcpu->arch.ret = -EBUSY; |
25fedfca PM |
2371 | kvmppc_remove_runnable(vc, vcpu); |
2372 | wake_up(&vcpu->arch.cpu_run); | |
2373 | } | |
7b444c67 PM |
2374 | goto out; |
2375 | } | |
2376 | ||
ec257165 PM |
2377 | /* |
2378 | * See if we could run any other vcores on the physical core | |
2379 | * along with this one. | |
2380 | */ | |
2381 | init_core_info(&core_info, vc); | |
2382 | pcpu = smp_processor_id(); | |
2383 | target_threads = threads_per_subcore; | |
2384 | if (target_smt_mode && target_smt_mode < target_threads) | |
2385 | target_threads = target_smt_mode; | |
2386 | if (vc->num_threads < target_threads) | |
2387 | collect_piggybacks(&core_info, target_threads); | |
3102f784 | 2388 | |
b4deba5c PM |
2389 | /* Decide on micro-threading (split-core) mode */ |
2390 | subcore_size = threads_per_subcore; | |
2391 | cmd_bit = stat_bit = 0; | |
2392 | split = core_info.n_subcores; | |
2393 | sip = NULL; | |
2394 | if (split > 1) { | |
2395 | /* threads_per_subcore must be MAX_SMT_THREADS (8) here */ | |
2396 | if (split == 2 && (dynamic_mt_modes & 2)) { | |
2397 | cmd_bit = HID0_POWER8_1TO2LPAR; | |
2398 | stat_bit = HID0_POWER8_2LPARMODE; | |
2399 | } else { | |
2400 | split = 4; | |
2401 | cmd_bit = HID0_POWER8_1TO4LPAR; | |
2402 | stat_bit = HID0_POWER8_4LPARMODE; | |
2403 | } | |
2404 | subcore_size = MAX_SMT_THREADS / split; | |
2405 | sip = &split_info; | |
2406 | memset(&split_info, 0, sizeof(split_info)); | |
2407 | split_info.rpr = mfspr(SPRN_RPR); | |
2408 | split_info.pmmar = mfspr(SPRN_PMMAR); | |
2409 | split_info.ldbar = mfspr(SPRN_LDBAR); | |
2410 | split_info.subcore_size = subcore_size; | |
2411 | for (sub = 0; sub < core_info.n_subcores; ++sub) | |
2412 | split_info.master_vcs[sub] = | |
2413 | list_first_entry(&core_info.vcs[sub], | |
2414 | struct kvmppc_vcore, preempt_list); | |
2415 | /* order writes to split_info before kvm_split_mode pointer */ | |
2416 | smp_wmb(); | |
2417 | } | |
2418 | pcpu = smp_processor_id(); | |
2419 | for (thr = 0; thr < threads_per_subcore; ++thr) | |
2420 | paca[pcpu + thr].kvm_hstate.kvm_split_mode = sip; | |
2421 | ||
2422 | /* Initiate micro-threading (split-core) if required */ | |
2423 | if (cmd_bit) { | |
2424 | unsigned long hid0 = mfspr(SPRN_HID0); | |
2425 | ||
2426 | hid0 |= cmd_bit | HID0_POWER8_DYNLPARDIS; | |
2427 | mb(); | |
2428 | mtspr(SPRN_HID0, hid0); | |
2429 | isync(); | |
2430 | for (;;) { | |
2431 | hid0 = mfspr(SPRN_HID0); | |
2432 | if (hid0 & stat_bit) | |
2433 | break; | |
2434 | cpu_relax(); | |
ec257165 | 2435 | } |
2e25aa5f | 2436 | } |
3102f784 | 2437 | |
b8e6a87c SW |
2438 | kvmppc_clear_host_core(pcpu); |
2439 | ||
b4deba5c PM |
2440 | /* Start all the threads */ |
2441 | active = 0; | |
2442 | for (sub = 0; sub < core_info.n_subcores; ++sub) { | |
2443 | thr = subcore_thread_map[sub]; | |
2444 | thr0_done = false; | |
2445 | active |= 1 << thr; | |
2446 | list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) { | |
2447 | pvc->pcpu = pcpu + thr; | |
7b5f8272 | 2448 | for_each_runnable_thread(i, vcpu, pvc) { |
b4deba5c PM |
2449 | kvmppc_start_thread(vcpu, pvc); |
2450 | kvmppc_create_dtl_entry(vcpu, pvc); | |
2451 | trace_kvm_guest_enter(vcpu); | |
2452 | if (!vcpu->arch.ptid) | |
2453 | thr0_done = true; | |
2454 | active |= 1 << (thr + vcpu->arch.ptid); | |
2455 | } | |
2456 | /* | |
2457 | * We need to start the first thread of each subcore | |
2458 | * even if it doesn't have a vcpu. | |
2459 | */ | |
2460 | if (pvc->master_vcore == pvc && !thr0_done) | |
2461 | kvmppc_start_thread(NULL, pvc); | |
2462 | thr += pvc->num_threads; | |
2463 | } | |
2e25aa5f | 2464 | } |
371fefd6 | 2465 | |
7f235328 GS |
2466 | /* |
2467 | * Ensure that split_info.do_nap is set after setting | |
2468 | * the vcore pointer in the PACA of the secondaries. | |
2469 | */ | |
2470 | smp_mb(); | |
2471 | if (cmd_bit) | |
2472 | split_info.do_nap = 1; /* ask secondaries to nap when done */ | |
2473 | ||
b4deba5c PM |
2474 | /* |
2475 | * When doing micro-threading, poke the inactive threads as well. | |
2476 | * This gets them to the nap instruction after kvm_do_nap, | |
2477 | * which reduces the time taken to unsplit later. | |
2478 | */ | |
2479 | if (split > 1) | |
2480 | for (thr = 1; thr < threads_per_subcore; ++thr) | |
2481 | if (!(active & (1 << thr))) | |
2482 | kvmppc_ipi_thread(pcpu + thr); | |
e0b7ec05 | 2483 | |
2f12f034 | 2484 | vc->vcore_state = VCORE_RUNNING; |
19ccb76a | 2485 | preempt_disable(); |
3c78f78a SW |
2486 | |
2487 | trace_kvmppc_run_core(vc, 0); | |
2488 | ||
b4deba5c PM |
2489 | for (sub = 0; sub < core_info.n_subcores; ++sub) |
2490 | list_for_each_entry(pvc, &core_info.vcs[sub], preempt_list) | |
2491 | spin_unlock(&pvc->lock); | |
de56a948 | 2492 | |
6edaa530 | 2493 | guest_enter(); |
2c9097e4 | 2494 | |
e0b7ec05 | 2495 | srcu_idx = srcu_read_lock(&vc->kvm->srcu); |
2c9097e4 | 2496 | |
e0b7ec05 | 2497 | __kvmppc_vcore_entry(); |
de56a948 | 2498 | |
ec257165 PM |
2499 | srcu_read_unlock(&vc->kvm->srcu, srcu_idx); |
2500 | ||
2501 | spin_lock(&vc->lock); | |
371fefd6 | 2502 | /* prevent other vcpu threads from doing kvmppc_start_thread() now */ |
19ccb76a | 2503 | vc->vcore_state = VCORE_EXITING; |
371fefd6 | 2504 | |
19ccb76a | 2505 | /* wait for secondary threads to finish writing their state to memory */ |
5d5b99cd | 2506 | kvmppc_wait_for_nap(); |
b4deba5c PM |
2507 | |
2508 | /* Return to whole-core mode if we split the core earlier */ | |
2509 | if (split > 1) { | |
2510 | unsigned long hid0 = mfspr(SPRN_HID0); | |
2511 | unsigned long loops = 0; | |
2512 | ||
2513 | hid0 &= ~HID0_POWER8_DYNLPARDIS; | |
2514 | stat_bit = HID0_POWER8_2LPARMODE | HID0_POWER8_4LPARMODE; | |
2515 | mb(); | |
2516 | mtspr(SPRN_HID0, hid0); | |
2517 | isync(); | |
2518 | for (;;) { | |
2519 | hid0 = mfspr(SPRN_HID0); | |
2520 | if (!(hid0 & stat_bit)) | |
2521 | break; | |
2522 | cpu_relax(); | |
2523 | ++loops; | |
2524 | } | |
2525 | split_info.do_nap = 0; | |
2526 | } | |
2527 | ||
2528 | /* Let secondaries go back to the offline loop */ | |
2529 | for (i = 0; i < threads_per_subcore; ++i) { | |
2530 | kvmppc_release_hwthread(pcpu + i); | |
2531 | if (sip && sip->napped[i]) | |
2532 | kvmppc_ipi_thread(pcpu + i); | |
2533 | } | |
2534 | ||
b8e6a87c SW |
2535 | kvmppc_set_host_core(pcpu); |
2536 | ||
371fefd6 | 2537 | spin_unlock(&vc->lock); |
2c9097e4 | 2538 | |
371fefd6 PM |
2539 | /* make sure updates to secondary vcpu structs are visible now */ |
2540 | smp_mb(); | |
6edaa530 | 2541 | guest_exit(); |
de56a948 | 2542 | |
b4deba5c PM |
2543 | for (sub = 0; sub < core_info.n_subcores; ++sub) |
2544 | list_for_each_entry_safe(pvc, vcnext, &core_info.vcs[sub], | |
2545 | preempt_list) | |
2546 | post_guest_process(pvc, pvc == vc); | |
de56a948 | 2547 | |
913d3ff9 | 2548 | spin_lock(&vc->lock); |
ec257165 | 2549 | preempt_enable(); |
de56a948 PM |
2550 | |
2551 | out: | |
19ccb76a | 2552 | vc->vcore_state = VCORE_INACTIVE; |
3c78f78a | 2553 | trace_kvmppc_run_core(vc, 1); |
371fefd6 PM |
2554 | } |
2555 | ||
19ccb76a PM |
2556 | /* |
2557 | * Wait for some other vcpu thread to execute us, and | |
2558 | * wake us up when we need to handle something in the host. | |
2559 | */ | |
ec257165 PM |
2560 | static void kvmppc_wait_for_exec(struct kvmppc_vcore *vc, |
2561 | struct kvm_vcpu *vcpu, int wait_state) | |
371fefd6 | 2562 | { |
371fefd6 PM |
2563 | DEFINE_WAIT(wait); |
2564 | ||
19ccb76a | 2565 | prepare_to_wait(&vcpu->arch.cpu_run, &wait, wait_state); |
ec257165 PM |
2566 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { |
2567 | spin_unlock(&vc->lock); | |
19ccb76a | 2568 | schedule(); |
ec257165 PM |
2569 | spin_lock(&vc->lock); |
2570 | } | |
19ccb76a PM |
2571 | finish_wait(&vcpu->arch.cpu_run, &wait); |
2572 | } | |
2573 | ||
0cda69dd SJS |
2574 | static void grow_halt_poll_ns(struct kvmppc_vcore *vc) |
2575 | { | |
2576 | /* 10us base */ | |
2577 | if (vc->halt_poll_ns == 0 && halt_poll_ns_grow) | |
2578 | vc->halt_poll_ns = 10000; | |
2579 | else | |
2580 | vc->halt_poll_ns *= halt_poll_ns_grow; | |
2581 | ||
2582 | if (vc->halt_poll_ns > halt_poll_max_ns) | |
2583 | vc->halt_poll_ns = halt_poll_max_ns; | |
2584 | } | |
2585 | ||
2586 | static void shrink_halt_poll_ns(struct kvmppc_vcore *vc) | |
2587 | { | |
2588 | if (halt_poll_ns_shrink == 0) | |
2589 | vc->halt_poll_ns = 0; | |
2590 | else | |
2591 | vc->halt_poll_ns /= halt_poll_ns_shrink; | |
2592 | } | |
2593 | ||
2594 | /* Check to see if any of the runnable vcpus on the vcore have pending | |
2595 | * exceptions or are no longer ceded | |
2596 | */ | |
2597 | static int kvmppc_vcore_check_block(struct kvmppc_vcore *vc) | |
2598 | { | |
2599 | struct kvm_vcpu *vcpu; | |
2600 | int i; | |
2601 | ||
2602 | for_each_runnable_thread(i, vcpu, vc) { | |
2603 | if (vcpu->arch.pending_exceptions || !vcpu->arch.ceded) | |
2604 | return 1; | |
2605 | } | |
2606 | ||
2607 | return 0; | |
2608 | } | |
2609 | ||
19ccb76a PM |
2610 | /* |
2611 | * All the vcpus in this vcore are idle, so wait for a decrementer | |
2612 | * or external interrupt to one of the vcpus. vc->lock is held. | |
2613 | */ | |
2614 | static void kvmppc_vcore_blocked(struct kvmppc_vcore *vc) | |
2615 | { | |
2a27f514 | 2616 | ktime_t cur, start_poll, start_wait; |
0cda69dd | 2617 | int do_sleep = 1; |
0cda69dd | 2618 | u64 block_ns; |
8577370f | 2619 | DECLARE_SWAITQUEUE(wait); |
1bc5d59c | 2620 | |
0cda69dd | 2621 | /* Poll for pending exceptions and ceded state */ |
2a27f514 | 2622 | cur = start_poll = ktime_get(); |
0cda69dd | 2623 | if (vc->halt_poll_ns) { |
2a27f514 SJS |
2624 | ktime_t stop = ktime_add_ns(start_poll, vc->halt_poll_ns); |
2625 | ++vc->runner->stat.halt_attempted_poll; | |
1bc5d59c | 2626 | |
0cda69dd SJS |
2627 | vc->vcore_state = VCORE_POLLING; |
2628 | spin_unlock(&vc->lock); | |
2629 | ||
2630 | do { | |
2631 | if (kvmppc_vcore_check_block(vc)) { | |
2632 | do_sleep = 0; | |
2633 | break; | |
2634 | } | |
2635 | cur = ktime_get(); | |
2636 | } while (single_task_running() && ktime_before(cur, stop)); | |
2637 | ||
2638 | spin_lock(&vc->lock); | |
2639 | vc->vcore_state = VCORE_INACTIVE; | |
2640 | ||
2a27f514 SJS |
2641 | if (!do_sleep) { |
2642 | ++vc->runner->stat.halt_successful_poll; | |
0cda69dd | 2643 | goto out; |
2a27f514 | 2644 | } |
1bc5d59c SW |
2645 | } |
2646 | ||
0cda69dd SJS |
2647 | prepare_to_swait(&vc->wq, &wait, TASK_INTERRUPTIBLE); |
2648 | ||
2649 | if (kvmppc_vcore_check_block(vc)) { | |
8577370f | 2650 | finish_swait(&vc->wq, &wait); |
0cda69dd | 2651 | do_sleep = 0; |
2a27f514 SJS |
2652 | /* If we polled, count this as a successful poll */ |
2653 | if (vc->halt_poll_ns) | |
2654 | ++vc->runner->stat.halt_successful_poll; | |
0cda69dd | 2655 | goto out; |
1bc5d59c SW |
2656 | } |
2657 | ||
2a27f514 SJS |
2658 | start_wait = ktime_get(); |
2659 | ||
19ccb76a | 2660 | vc->vcore_state = VCORE_SLEEPING; |
3c78f78a | 2661 | trace_kvmppc_vcore_blocked(vc, 0); |
19ccb76a | 2662 | spin_unlock(&vc->lock); |
913d3ff9 | 2663 | schedule(); |
8577370f | 2664 | finish_swait(&vc->wq, &wait); |
19ccb76a PM |
2665 | spin_lock(&vc->lock); |
2666 | vc->vcore_state = VCORE_INACTIVE; | |
3c78f78a | 2667 | trace_kvmppc_vcore_blocked(vc, 1); |
2a27f514 | 2668 | ++vc->runner->stat.halt_successful_wait; |
0cda69dd SJS |
2669 | |
2670 | cur = ktime_get(); | |
2671 | ||
2672 | out: | |
2a27f514 SJS |
2673 | block_ns = ktime_to_ns(cur) - ktime_to_ns(start_poll); |
2674 | ||
2675 | /* Attribute wait time */ | |
2676 | if (do_sleep) { | |
2677 | vc->runner->stat.halt_wait_ns += | |
2678 | ktime_to_ns(cur) - ktime_to_ns(start_wait); | |
2679 | /* Attribute failed poll time */ | |
2680 | if (vc->halt_poll_ns) | |
2681 | vc->runner->stat.halt_poll_fail_ns += | |
2682 | ktime_to_ns(start_wait) - | |
2683 | ktime_to_ns(start_poll); | |
2684 | } else { | |
2685 | /* Attribute successful poll time */ | |
2686 | if (vc->halt_poll_ns) | |
2687 | vc->runner->stat.halt_poll_success_ns += | |
2688 | ktime_to_ns(cur) - | |
2689 | ktime_to_ns(start_poll); | |
2690 | } | |
0cda69dd SJS |
2691 | |
2692 | /* Adjust poll time */ | |
2693 | if (halt_poll_max_ns) { | |
2694 | if (block_ns <= vc->halt_poll_ns) | |
2695 | ; | |
2696 | /* We slept and blocked for longer than the max halt time */ | |
2697 | else if (vc->halt_poll_ns && block_ns > halt_poll_max_ns) | |
2698 | shrink_halt_poll_ns(vc); | |
2699 | /* We slept and our poll time is too small */ | |
2700 | else if (vc->halt_poll_ns < halt_poll_max_ns && | |
2701 | block_ns < halt_poll_max_ns) | |
2702 | grow_halt_poll_ns(vc); | |
2703 | } else | |
2704 | vc->halt_poll_ns = 0; | |
2705 | ||
2706 | trace_kvmppc_vcore_wakeup(do_sleep, block_ns); | |
19ccb76a | 2707 | } |
371fefd6 | 2708 | |
19ccb76a PM |
2709 | static int kvmppc_run_vcpu(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu) |
2710 | { | |
7b5f8272 | 2711 | int n_ceded, i; |
19ccb76a | 2712 | struct kvmppc_vcore *vc; |
7b5f8272 | 2713 | struct kvm_vcpu *v; |
9e368f29 | 2714 | |
3c78f78a SW |
2715 | trace_kvmppc_run_vcpu_enter(vcpu); |
2716 | ||
371fefd6 PM |
2717 | kvm_run->exit_reason = 0; |
2718 | vcpu->arch.ret = RESUME_GUEST; | |
2719 | vcpu->arch.trap = 0; | |
2f12f034 | 2720 | kvmppc_update_vpas(vcpu); |
371fefd6 | 2721 | |
371fefd6 PM |
2722 | /* |
2723 | * Synchronize with other threads in this virtual core | |
2724 | */ | |
2725 | vc = vcpu->arch.vcore; | |
2726 | spin_lock(&vc->lock); | |
19ccb76a | 2727 | vcpu->arch.ceded = 0; |
371fefd6 PM |
2728 | vcpu->arch.run_task = current; |
2729 | vcpu->arch.kvm_run = kvm_run; | |
c7b67670 | 2730 | vcpu->arch.stolen_logged = vcore_stolen_time(vc, mftb()); |
19ccb76a | 2731 | vcpu->arch.state = KVMPPC_VCPU_RUNNABLE; |
c7b67670 | 2732 | vcpu->arch.busy_preempt = TB_NIL; |
7b5f8272 | 2733 | WRITE_ONCE(vc->runnable_threads[vcpu->arch.ptid], vcpu); |
371fefd6 PM |
2734 | ++vc->n_runnable; |
2735 | ||
19ccb76a PM |
2736 | /* |
2737 | * This happens the first time this is called for a vcpu. | |
2738 | * If the vcore is already running, we may be able to start | |
2739 | * this thread straight away and have it join in. | |
2740 | */ | |
8455d79e | 2741 | if (!signal_pending(current)) { |
ec257165 PM |
2742 | if (vc->vcore_state == VCORE_PIGGYBACK) { |
2743 | struct kvmppc_vcore *mvc = vc->master_vcore; | |
2744 | if (spin_trylock(&mvc->lock)) { | |
2745 | if (mvc->vcore_state == VCORE_RUNNING && | |
2746 | !VCORE_IS_EXITING(mvc)) { | |
2747 | kvmppc_create_dtl_entry(vcpu, vc); | |
b4deba5c | 2748 | kvmppc_start_thread(vcpu, vc); |
ec257165 PM |
2749 | trace_kvm_guest_enter(vcpu); |
2750 | } | |
2751 | spin_unlock(&mvc->lock); | |
2752 | } | |
2753 | } else if (vc->vcore_state == VCORE_RUNNING && | |
2754 | !VCORE_IS_EXITING(vc)) { | |
2f12f034 | 2755 | kvmppc_create_dtl_entry(vcpu, vc); |
b4deba5c | 2756 | kvmppc_start_thread(vcpu, vc); |
3c78f78a | 2757 | trace_kvm_guest_enter(vcpu); |
8455d79e | 2758 | } else if (vc->vcore_state == VCORE_SLEEPING) { |
8577370f | 2759 | swake_up(&vc->wq); |
371fefd6 PM |
2760 | } |
2761 | ||
8455d79e | 2762 | } |
371fefd6 | 2763 | |
19ccb76a PM |
2764 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
2765 | !signal_pending(current)) { | |
ec257165 PM |
2766 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) |
2767 | kvmppc_vcore_end_preempt(vc); | |
2768 | ||
8455d79e | 2769 | if (vc->vcore_state != VCORE_INACTIVE) { |
ec257165 | 2770 | kvmppc_wait_for_exec(vc, vcpu, TASK_INTERRUPTIBLE); |
19ccb76a PM |
2771 | continue; |
2772 | } | |
7b5f8272 | 2773 | for_each_runnable_thread(i, v, vc) { |
7e28e60e | 2774 | kvmppc_core_prepare_to_enter(v); |
19ccb76a PM |
2775 | if (signal_pending(v->arch.run_task)) { |
2776 | kvmppc_remove_runnable(vc, v); | |
2777 | v->stat.signal_exits++; | |
2778 | v->arch.kvm_run->exit_reason = KVM_EXIT_INTR; | |
2779 | v->arch.ret = -EINTR; | |
2780 | wake_up(&v->arch.cpu_run); | |
2781 | } | |
2782 | } | |
8455d79e PM |
2783 | if (!vc->n_runnable || vcpu->arch.state != KVMPPC_VCPU_RUNNABLE) |
2784 | break; | |
8455d79e | 2785 | n_ceded = 0; |
7b5f8272 | 2786 | for_each_runnable_thread(i, v, vc) { |
8455d79e PM |
2787 | if (!v->arch.pending_exceptions) |
2788 | n_ceded += v->arch.ceded; | |
4619ac88 PM |
2789 | else |
2790 | v->arch.ceded = 0; | |
2791 | } | |
25fedfca PM |
2792 | vc->runner = vcpu; |
2793 | if (n_ceded == vc->n_runnable) { | |
8455d79e | 2794 | kvmppc_vcore_blocked(vc); |
c56dadf3 | 2795 | } else if (need_resched()) { |
ec257165 | 2796 | kvmppc_vcore_preempt(vc); |
25fedfca PM |
2797 | /* Let something else run */ |
2798 | cond_resched_lock(&vc->lock); | |
ec257165 PM |
2799 | if (vc->vcore_state == VCORE_PREEMPT) |
2800 | kvmppc_vcore_end_preempt(vc); | |
25fedfca | 2801 | } else { |
8455d79e | 2802 | kvmppc_run_core(vc); |
25fedfca | 2803 | } |
0456ec4f | 2804 | vc->runner = NULL; |
19ccb76a | 2805 | } |
371fefd6 | 2806 | |
8455d79e PM |
2807 | while (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE && |
2808 | (vc->vcore_state == VCORE_RUNNING || | |
5fc3e64f PM |
2809 | vc->vcore_state == VCORE_EXITING || |
2810 | vc->vcore_state == VCORE_PIGGYBACK)) | |
ec257165 | 2811 | kvmppc_wait_for_exec(vc, vcpu, TASK_UNINTERRUPTIBLE); |
8455d79e | 2812 | |
5fc3e64f PM |
2813 | if (vc->vcore_state == VCORE_PREEMPT && vc->runner == NULL) |
2814 | kvmppc_vcore_end_preempt(vc); | |
2815 | ||
8455d79e PM |
2816 | if (vcpu->arch.state == KVMPPC_VCPU_RUNNABLE) { |
2817 | kvmppc_remove_runnable(vc, vcpu); | |
2818 | vcpu->stat.signal_exits++; | |
2819 | kvm_run->exit_reason = KVM_EXIT_INTR; | |
2820 | vcpu->arch.ret = -EINTR; | |
2821 | } | |
2822 | ||
2823 | if (vc->n_runnable && vc->vcore_state == VCORE_INACTIVE) { | |
2824 | /* Wake up some vcpu to run the core */ | |
7b5f8272 SJS |
2825 | i = -1; |
2826 | v = next_runnable_thread(vc, &i); | |
8455d79e | 2827 | wake_up(&v->arch.cpu_run); |
371fefd6 PM |
2828 | } |
2829 | ||
3c78f78a | 2830 | trace_kvmppc_run_vcpu_exit(vcpu, kvm_run); |
371fefd6 | 2831 | spin_unlock(&vc->lock); |
371fefd6 | 2832 | return vcpu->arch.ret; |
de56a948 PM |
2833 | } |
2834 | ||
3a167bea | 2835 | static int kvmppc_vcpu_run_hv(struct kvm_run *run, struct kvm_vcpu *vcpu) |
a8606e20 PM |
2836 | { |
2837 | int r; | |
913d3ff9 | 2838 | int srcu_idx; |
a8606e20 | 2839 | |
af8f38b3 AG |
2840 | if (!vcpu->arch.sane) { |
2841 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; | |
2842 | return -EINVAL; | |
2843 | } | |
2844 | ||
25051b5a SW |
2845 | kvmppc_core_prepare_to_enter(vcpu); |
2846 | ||
19ccb76a PM |
2847 | /* No need to go into the guest when all we'll do is come back out */ |
2848 | if (signal_pending(current)) { | |
2849 | run->exit_reason = KVM_EXIT_INTR; | |
2850 | return -EINTR; | |
2851 | } | |
2852 | ||
32fad281 | 2853 | atomic_inc(&vcpu->kvm->arch.vcpus_running); |
31037eca | 2854 | /* Order vcpus_running vs. hpte_setup_done, see kvmppc_alloc_reset_hpt */ |
32fad281 PM |
2855 | smp_mb(); |
2856 | ||
c17b98cf | 2857 | /* On the first time here, set up HTAB and VRMA */ |
31037eca | 2858 | if (!vcpu->kvm->arch.hpte_setup_done) { |
32fad281 | 2859 | r = kvmppc_hv_setup_htab_rma(vcpu); |
c77162de | 2860 | if (r) |
32fad281 | 2861 | goto out; |
c77162de | 2862 | } |
19ccb76a | 2863 | |
579e633e AB |
2864 | flush_all_to_thread(current); |
2865 | ||
19ccb76a | 2866 | vcpu->arch.wqp = &vcpu->arch.vcore->wq; |
342d3db7 | 2867 | vcpu->arch.pgdir = current->mm->pgd; |
c7b67670 | 2868 | vcpu->arch.state = KVMPPC_VCPU_BUSY_IN_HOST; |
19ccb76a | 2869 | |
a8606e20 PM |
2870 | do { |
2871 | r = kvmppc_run_vcpu(run, vcpu); | |
2872 | ||
2873 | if (run->exit_reason == KVM_EXIT_PAPR_HCALL && | |
2874 | !(vcpu->arch.shregs.msr & MSR_PR)) { | |
3c78f78a | 2875 | trace_kvm_hcall_enter(vcpu); |
a8606e20 | 2876 | r = kvmppc_pseries_do_hcall(vcpu); |
3c78f78a | 2877 | trace_kvm_hcall_exit(vcpu, r); |
7e28e60e | 2878 | kvmppc_core_prepare_to_enter(vcpu); |
913d3ff9 PM |
2879 | } else if (r == RESUME_PAGE_FAULT) { |
2880 | srcu_idx = srcu_read_lock(&vcpu->kvm->srcu); | |
2881 | r = kvmppc_book3s_hv_page_fault(run, vcpu, | |
2882 | vcpu->arch.fault_dar, vcpu->arch.fault_dsisr); | |
2883 | srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx); | |
f7af5209 SW |
2884 | } else if (r == RESUME_PASSTHROUGH) |
2885 | r = kvmppc_xics_rm_complete(vcpu, 0); | |
e59d24e6 | 2886 | } while (is_kvmppc_resume_guest(r)); |
32fad281 PM |
2887 | |
2888 | out: | |
c7b67670 | 2889 | vcpu->arch.state = KVMPPC_VCPU_NOTREADY; |
32fad281 | 2890 | atomic_dec(&vcpu->kvm->arch.vcpus_running); |
a8606e20 PM |
2891 | return r; |
2892 | } | |
2893 | ||
5b74716e BH |
2894 | static void kvmppc_add_seg_page_size(struct kvm_ppc_one_seg_page_size **sps, |
2895 | int linux_psize) | |
2896 | { | |
2897 | struct mmu_psize_def *def = &mmu_psize_defs[linux_psize]; | |
2898 | ||
2899 | if (!def->shift) | |
2900 | return; | |
2901 | (*sps)->page_shift = def->shift; | |
2902 | (*sps)->slb_enc = def->sllp; | |
2903 | (*sps)->enc[0].page_shift = def->shift; | |
b1022fbd | 2904 | (*sps)->enc[0].pte_enc = def->penc[linux_psize]; |
1f365bb0 AK |
2905 | /* |
2906 | * Add 16MB MPSS support if host supports it | |
2907 | */ | |
2908 | if (linux_psize != MMU_PAGE_16M && def->penc[MMU_PAGE_16M] != -1) { | |
2909 | (*sps)->enc[1].page_shift = 24; | |
2910 | (*sps)->enc[1].pte_enc = def->penc[MMU_PAGE_16M]; | |
2911 | } | |
5b74716e BH |
2912 | (*sps)++; |
2913 | } | |
2914 | ||
3a167bea AK |
2915 | static int kvm_vm_ioctl_get_smmu_info_hv(struct kvm *kvm, |
2916 | struct kvm_ppc_smmu_info *info) | |
5b74716e BH |
2917 | { |
2918 | struct kvm_ppc_one_seg_page_size *sps; | |
2919 | ||
2920 | info->flags = KVM_PPC_PAGE_SIZES_REAL; | |
2921 | if (mmu_has_feature(MMU_FTR_1T_SEGMENT)) | |
2922 | info->flags |= KVM_PPC_1T_SEGMENTS; | |
2923 | info->slb_size = mmu_slb_size; | |
2924 | ||
2925 | /* We only support these sizes for now, and no muti-size segments */ | |
2926 | sps = &info->sps[0]; | |
2927 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_4K); | |
2928 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_64K); | |
2929 | kvmppc_add_seg_page_size(&sps, MMU_PAGE_16M); | |
2930 | ||
2931 | return 0; | |
2932 | } | |
2933 | ||
82ed3616 PM |
2934 | /* |
2935 | * Get (and clear) the dirty memory log for a memory slot. | |
2936 | */ | |
3a167bea AK |
2937 | static int kvm_vm_ioctl_get_dirty_log_hv(struct kvm *kvm, |
2938 | struct kvm_dirty_log *log) | |
82ed3616 | 2939 | { |
9f6b8029 | 2940 | struct kvm_memslots *slots; |
82ed3616 PM |
2941 | struct kvm_memory_slot *memslot; |
2942 | int r; | |
2943 | unsigned long n; | |
2944 | ||
2945 | mutex_lock(&kvm->slots_lock); | |
2946 | ||
2947 | r = -EINVAL; | |
bbacc0c1 | 2948 | if (log->slot >= KVM_USER_MEM_SLOTS) |
82ed3616 PM |
2949 | goto out; |
2950 | ||
9f6b8029 PB |
2951 | slots = kvm_memslots(kvm); |
2952 | memslot = id_to_memslot(slots, log->slot); | |
82ed3616 PM |
2953 | r = -ENOENT; |
2954 | if (!memslot->dirty_bitmap) | |
2955 | goto out; | |
2956 | ||
2957 | n = kvm_dirty_bitmap_bytes(memslot); | |
2958 | memset(memslot->dirty_bitmap, 0, n); | |
2959 | ||
dfe49dbd | 2960 | r = kvmppc_hv_get_dirty_log(kvm, memslot, memslot->dirty_bitmap); |
82ed3616 PM |
2961 | if (r) |
2962 | goto out; | |
2963 | ||
2964 | r = -EFAULT; | |
2965 | if (copy_to_user(log->dirty_bitmap, memslot->dirty_bitmap, n)) | |
2966 | goto out; | |
2967 | ||
2968 | r = 0; | |
2969 | out: | |
2970 | mutex_unlock(&kvm->slots_lock); | |
2971 | return r; | |
2972 | } | |
2973 | ||
3a167bea AK |
2974 | static void kvmppc_core_free_memslot_hv(struct kvm_memory_slot *free, |
2975 | struct kvm_memory_slot *dont) | |
a66b48c3 PM |
2976 | { |
2977 | if (!dont || free->arch.rmap != dont->arch.rmap) { | |
2978 | vfree(free->arch.rmap); | |
2979 | free->arch.rmap = NULL; | |
b2b2f165 | 2980 | } |
a66b48c3 PM |
2981 | } |
2982 | ||
3a167bea AK |
2983 | static int kvmppc_core_create_memslot_hv(struct kvm_memory_slot *slot, |
2984 | unsigned long npages) | |
a66b48c3 PM |
2985 | { |
2986 | slot->arch.rmap = vzalloc(npages * sizeof(*slot->arch.rmap)); | |
2987 | if (!slot->arch.rmap) | |
2988 | return -ENOMEM; | |
aa04b4cc | 2989 | |
c77162de PM |
2990 | return 0; |
2991 | } | |
aa04b4cc | 2992 | |
3a167bea AK |
2993 | static int kvmppc_core_prepare_memory_region_hv(struct kvm *kvm, |
2994 | struct kvm_memory_slot *memslot, | |
09170a49 | 2995 | const struct kvm_userspace_memory_region *mem) |
c77162de | 2996 | { |
a66b48c3 | 2997 | return 0; |
c77162de PM |
2998 | } |
2999 | ||
3a167bea | 3000 | static void kvmppc_core_commit_memory_region_hv(struct kvm *kvm, |
09170a49 | 3001 | const struct kvm_userspace_memory_region *mem, |
f36f3f28 PB |
3002 | const struct kvm_memory_slot *old, |
3003 | const struct kvm_memory_slot *new) | |
c77162de | 3004 | { |
dfe49dbd | 3005 | unsigned long npages = mem->memory_size >> PAGE_SHIFT; |
9f6b8029 | 3006 | struct kvm_memslots *slots; |
dfe49dbd PM |
3007 | struct kvm_memory_slot *memslot; |
3008 | ||
a56ee9f8 YX |
3009 | /* |
3010 | * If we are making a new memslot, it might make | |
3011 | * some address that was previously cached as emulated | |
3012 | * MMIO be no longer emulated MMIO, so invalidate | |
3013 | * all the caches of emulated MMIO translations. | |
3014 | */ | |
3015 | if (npages) | |
3016 | atomic64_inc(&kvm->arch.mmio_update); | |
3017 | ||
8482644a | 3018 | if (npages && old->npages) { |
dfe49dbd PM |
3019 | /* |
3020 | * If modifying a memslot, reset all the rmap dirty bits. | |
3021 | * If this is a new memslot, we don't need to do anything | |
3022 | * since the rmap array starts out as all zeroes, | |
3023 | * i.e. no pages are dirty. | |
3024 | */ | |
9f6b8029 PB |
3025 | slots = kvm_memslots(kvm); |
3026 | memslot = id_to_memslot(slots, mem->slot); | |
dfe49dbd PM |
3027 | kvmppc_hv_get_dirty_log(kvm, memslot, NULL); |
3028 | } | |
c77162de PM |
3029 | } |
3030 | ||
a0144e2a PM |
3031 | /* |
3032 | * Update LPCR values in kvm->arch and in vcores. | |
3033 | * Caller must hold kvm->lock. | |
3034 | */ | |
3035 | void kvmppc_update_lpcr(struct kvm *kvm, unsigned long lpcr, unsigned long mask) | |
3036 | { | |
3037 | long int i; | |
3038 | u32 cores_done = 0; | |
3039 | ||
3040 | if ((kvm->arch.lpcr & mask) == lpcr) | |
3041 | return; | |
3042 | ||
3043 | kvm->arch.lpcr = (kvm->arch.lpcr & ~mask) | lpcr; | |
3044 | ||
3045 | for (i = 0; i < KVM_MAX_VCORES; ++i) { | |
3046 | struct kvmppc_vcore *vc = kvm->arch.vcores[i]; | |
3047 | if (!vc) | |
3048 | continue; | |
3049 | spin_lock(&vc->lock); | |
3050 | vc->lpcr = (vc->lpcr & ~mask) | lpcr; | |
3051 | spin_unlock(&vc->lock); | |
3052 | if (++cores_done >= kvm->arch.online_vcores) | |
3053 | break; | |
3054 | } | |
3055 | } | |
3056 | ||
3a167bea AK |
3057 | static void kvmppc_mmu_destroy_hv(struct kvm_vcpu *vcpu) |
3058 | { | |
3059 | return; | |
3060 | } | |
3061 | ||
7a84084c PM |
3062 | static void kvmppc_setup_partition_table(struct kvm *kvm) |
3063 | { | |
3064 | unsigned long dw0, dw1; | |
3065 | ||
3066 | /* PS field - page size for VRMA */ | |
3067 | dw0 = ((kvm->arch.vrma_slb_v & SLB_VSID_L) >> 1) | | |
3068 | ((kvm->arch.vrma_slb_v & SLB_VSID_LP) << 1); | |
3069 | /* HTABSIZE and HTABORG fields */ | |
3070 | dw0 |= kvm->arch.sdr1; | |
3071 | ||
3072 | /* Second dword has GR=0; other fields are unused since UPRT=0 */ | |
3073 | dw1 = 0; | |
3074 | ||
3075 | mmu_partition_table_set_entry(kvm->arch.lpid, dw0, dw1); | |
3076 | } | |
3077 | ||
32fad281 | 3078 | static int kvmppc_hv_setup_htab_rma(struct kvm_vcpu *vcpu) |
c77162de PM |
3079 | { |
3080 | int err = 0; | |
3081 | struct kvm *kvm = vcpu->kvm; | |
c77162de PM |
3082 | unsigned long hva; |
3083 | struct kvm_memory_slot *memslot; | |
3084 | struct vm_area_struct *vma; | |
a0144e2a | 3085 | unsigned long lpcr = 0, senc; |
c77162de | 3086 | unsigned long psize, porder; |
2c9097e4 | 3087 | int srcu_idx; |
c77162de PM |
3088 | |
3089 | mutex_lock(&kvm->lock); | |
31037eca | 3090 | if (kvm->arch.hpte_setup_done) |
c77162de | 3091 | goto out; /* another vcpu beat us to it */ |
aa04b4cc | 3092 | |
32fad281 PM |
3093 | /* Allocate hashed page table (if not done already) and reset it */ |
3094 | if (!kvm->arch.hpt_virt) { | |
3095 | err = kvmppc_alloc_hpt(kvm, NULL); | |
3096 | if (err) { | |
3097 | pr_err("KVM: Couldn't alloc HPT\n"); | |
3098 | goto out; | |
3099 | } | |
3100 | } | |
3101 | ||
c77162de | 3102 | /* Look up the memslot for guest physical address 0 */ |
2c9097e4 | 3103 | srcu_idx = srcu_read_lock(&kvm->srcu); |
c77162de | 3104 | memslot = gfn_to_memslot(kvm, 0); |
aa04b4cc | 3105 | |
c77162de PM |
3106 | /* We must have some memory at 0 by now */ |
3107 | err = -EINVAL; | |
3108 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) | |
2c9097e4 | 3109 | goto out_srcu; |
c77162de PM |
3110 | |
3111 | /* Look up the VMA for the start of this memory slot */ | |
3112 | hva = memslot->userspace_addr; | |
3113 | down_read(¤t->mm->mmap_sem); | |
3114 | vma = find_vma(current->mm, hva); | |
3115 | if (!vma || vma->vm_start > hva || (vma->vm_flags & VM_IO)) | |
3116 | goto up_out; | |
3117 | ||
3118 | psize = vma_kernel_pagesize(vma); | |
da9d1d7f | 3119 | porder = __ilog2(psize); |
c77162de | 3120 | |
c77162de PM |
3121 | up_read(¤t->mm->mmap_sem); |
3122 | ||
c17b98cf PM |
3123 | /* We can handle 4k, 64k or 16M pages in the VRMA */ |
3124 | err = -EINVAL; | |
3125 | if (!(psize == 0x1000 || psize == 0x10000 || | |
3126 | psize == 0x1000000)) | |
3127 | goto out_srcu; | |
c77162de | 3128 | |
c17b98cf PM |
3129 | senc = slb_pgsize_encoding(psize); |
3130 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | | |
3131 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
c17b98cf PM |
3132 | /* Create HPTEs in the hash page table for the VRMA */ |
3133 | kvmppc_map_vrma(vcpu, memslot, porder); | |
aa04b4cc | 3134 | |
7a84084c PM |
3135 | /* Update VRMASD field in the LPCR */ |
3136 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) { | |
3137 | /* the -4 is to account for senc values starting at 0x10 */ | |
3138 | lpcr = senc << (LPCR_VRMASD_SH - 4); | |
3139 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); | |
3140 | } else { | |
3141 | kvmppc_setup_partition_table(kvm); | |
3142 | } | |
a0144e2a | 3143 | |
31037eca | 3144 | /* Order updates to kvm->arch.lpcr etc. vs. hpte_setup_done */ |
c77162de | 3145 | smp_wmb(); |
31037eca | 3146 | kvm->arch.hpte_setup_done = 1; |
c77162de | 3147 | err = 0; |
2c9097e4 PM |
3148 | out_srcu: |
3149 | srcu_read_unlock(&kvm->srcu, srcu_idx); | |
c77162de PM |
3150 | out: |
3151 | mutex_unlock(&kvm->lock); | |
3152 | return err; | |
b2b2f165 | 3153 | |
c77162de PM |
3154 | up_out: |
3155 | up_read(¤t->mm->mmap_sem); | |
505d6421 | 3156 | goto out_srcu; |
de56a948 PM |
3157 | } |
3158 | ||
79b6c247 | 3159 | #ifdef CONFIG_KVM_XICS |
6f3bb809 SW |
3160 | static int kvmppc_cpu_notify(struct notifier_block *self, unsigned long action, |
3161 | void *hcpu) | |
3162 | { | |
3163 | unsigned long cpu = (long)hcpu; | |
3164 | ||
3165 | switch (action) { | |
3166 | case CPU_UP_PREPARE: | |
3167 | case CPU_UP_PREPARE_FROZEN: | |
3168 | kvmppc_set_host_core(cpu); | |
3169 | break; | |
3170 | ||
3171 | #ifdef CONFIG_HOTPLUG_CPU | |
3172 | case CPU_DEAD: | |
3173 | case CPU_DEAD_FROZEN: | |
3174 | case CPU_UP_CANCELED: | |
3175 | case CPU_UP_CANCELED_FROZEN: | |
3176 | kvmppc_clear_host_core(cpu); | |
3177 | break; | |
3178 | #endif | |
3179 | default: | |
3180 | break; | |
3181 | } | |
3182 | ||
3183 | return NOTIFY_OK; | |
3184 | } | |
3185 | ||
3186 | static struct notifier_block kvmppc_cpu_notifier = { | |
3187 | .notifier_call = kvmppc_cpu_notify, | |
3188 | }; | |
3189 | ||
79b6c247 SW |
3190 | /* |
3191 | * Allocate a per-core structure for managing state about which cores are | |
3192 | * running in the host versus the guest and for exchanging data between | |
3193 | * real mode KVM and CPU running in the host. | |
3194 | * This is only done for the first VM. | |
3195 | * The allocated structure stays even if all VMs have stopped. | |
3196 | * It is only freed when the kvm-hv module is unloaded. | |
3197 | * It's OK for this routine to fail, we just don't support host | |
3198 | * core operations like redirecting H_IPI wakeups. | |
3199 | */ | |
3200 | void kvmppc_alloc_host_rm_ops(void) | |
3201 | { | |
3202 | struct kvmppc_host_rm_ops *ops; | |
3203 | unsigned long l_ops; | |
3204 | int cpu, core; | |
3205 | int size; | |
3206 | ||
3207 | /* Not the first time here ? */ | |
3208 | if (kvmppc_host_rm_ops_hv != NULL) | |
3209 | return; | |
3210 | ||
3211 | ops = kzalloc(sizeof(struct kvmppc_host_rm_ops), GFP_KERNEL); | |
3212 | if (!ops) | |
3213 | return; | |
3214 | ||
3215 | size = cpu_nr_cores() * sizeof(struct kvmppc_host_rm_core); | |
3216 | ops->rm_core = kzalloc(size, GFP_KERNEL); | |
3217 | ||
3218 | if (!ops->rm_core) { | |
3219 | kfree(ops); | |
3220 | return; | |
3221 | } | |
3222 | ||
6f3bb809 SW |
3223 | get_online_cpus(); |
3224 | ||
79b6c247 SW |
3225 | for (cpu = 0; cpu < nr_cpu_ids; cpu += threads_per_core) { |
3226 | if (!cpu_online(cpu)) | |
3227 | continue; | |
3228 | ||
3229 | core = cpu >> threads_shift; | |
3230 | ops->rm_core[core].rm_state.in_host = 1; | |
3231 | } | |
3232 | ||
0c2a6606 SW |
3233 | ops->vcpu_kick = kvmppc_fast_vcpu_kick_hv; |
3234 | ||
79b6c247 SW |
3235 | /* |
3236 | * Make the contents of the kvmppc_host_rm_ops structure visible | |
3237 | * to other CPUs before we assign it to the global variable. | |
3238 | * Do an atomic assignment (no locks used here), but if someone | |
3239 | * beats us to it, just free our copy and return. | |
3240 | */ | |
3241 | smp_wmb(); | |
3242 | l_ops = (unsigned long) ops; | |
3243 | ||
3244 | if (cmpxchg64((unsigned long *)&kvmppc_host_rm_ops_hv, 0, l_ops)) { | |
6f3bb809 | 3245 | put_online_cpus(); |
79b6c247 SW |
3246 | kfree(ops->rm_core); |
3247 | kfree(ops); | |
6f3bb809 | 3248 | return; |
79b6c247 | 3249 | } |
6f3bb809 SW |
3250 | |
3251 | register_cpu_notifier(&kvmppc_cpu_notifier); | |
3252 | ||
3253 | put_online_cpus(); | |
79b6c247 SW |
3254 | } |
3255 | ||
3256 | void kvmppc_free_host_rm_ops(void) | |
3257 | { | |
3258 | if (kvmppc_host_rm_ops_hv) { | |
6f3bb809 | 3259 | unregister_cpu_notifier(&kvmppc_cpu_notifier); |
79b6c247 SW |
3260 | kfree(kvmppc_host_rm_ops_hv->rm_core); |
3261 | kfree(kvmppc_host_rm_ops_hv); | |
3262 | kvmppc_host_rm_ops_hv = NULL; | |
3263 | } | |
3264 | } | |
3265 | #endif | |
3266 | ||
3a167bea | 3267 | static int kvmppc_core_init_vm_hv(struct kvm *kvm) |
de56a948 | 3268 | { |
32fad281 | 3269 | unsigned long lpcr, lpid; |
e23a808b | 3270 | char buf[32]; |
de56a948 | 3271 | |
32fad281 PM |
3272 | /* Allocate the guest's logical partition ID */ |
3273 | ||
3274 | lpid = kvmppc_alloc_lpid(); | |
5d226ae5 | 3275 | if ((long)lpid < 0) |
32fad281 PM |
3276 | return -ENOMEM; |
3277 | kvm->arch.lpid = lpid; | |
de56a948 | 3278 | |
79b6c247 SW |
3279 | kvmppc_alloc_host_rm_ops(); |
3280 | ||
1b400ba0 PM |
3281 | /* |
3282 | * Since we don't flush the TLB when tearing down a VM, | |
3283 | * and this lpid might have previously been used, | |
3284 | * make sure we flush on each core before running the new VM. | |
7c5b06ca PM |
3285 | * On POWER9, the tlbie in mmu_partition_table_set_entry() |
3286 | * does this flush for us. | |
1b400ba0 | 3287 | */ |
7c5b06ca PM |
3288 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
3289 | cpumask_setall(&kvm->arch.need_tlb_flush); | |
1b400ba0 | 3290 | |
699a0ea0 PM |
3291 | /* Start out with the default set of hcalls enabled */ |
3292 | memcpy(kvm->arch.enabled_hcalls, default_enabled_hcalls, | |
3293 | sizeof(kvm->arch.enabled_hcalls)); | |
3294 | ||
7a84084c PM |
3295 | if (!cpu_has_feature(CPU_FTR_ARCH_300)) |
3296 | kvm->arch.host_sdr1 = mfspr(SPRN_SDR1); | |
aa04b4cc | 3297 | |
c17b98cf PM |
3298 | /* Init LPCR for virtual RMA mode */ |
3299 | kvm->arch.host_lpid = mfspr(SPRN_LPID); | |
3300 | kvm->arch.host_lpcr = lpcr = mfspr(SPRN_LPCR); | |
3301 | lpcr &= LPCR_PECE | LPCR_LPES; | |
3302 | lpcr |= (4UL << LPCR_DPFD_SH) | LPCR_HDICE | | |
3303 | LPCR_VPM0 | LPCR_VPM1; | |
3304 | kvm->arch.vrma_slb_v = SLB_VSID_B_1T | | |
3305 | (VRMA_VSID << SLB_VSID_SHIFT_1T); | |
3306 | /* On POWER8 turn on online bit to enable PURR/SPURR */ | |
3307 | if (cpu_has_feature(CPU_FTR_ARCH_207S)) | |
3308 | lpcr |= LPCR_ONL; | |
84f7139c PM |
3309 | /* |
3310 | * On POWER9, VPM0 bit is reserved (VPM0=1 behaviour is assumed) | |
3311 | * Set HVICE bit to enable hypervisor virtualization interrupts. | |
3312 | */ | |
3313 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
7a84084c | 3314 | lpcr &= ~LPCR_VPM0; |
84f7139c PM |
3315 | lpcr |= LPCR_HVICE; |
3316 | } | |
3317 | ||
9e368f29 | 3318 | kvm->arch.lpcr = lpcr; |
aa04b4cc | 3319 | |
7c5b06ca PM |
3320 | /* |
3321 | * Work out how many sets the TLB has, for the use of | |
3322 | * the TLB invalidation loop in book3s_hv_rmhandlers.S. | |
3323 | */ | |
3324 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
3325 | kvm->arch.tlb_sets = POWER9_TLB_SETS_HASH; /* 256 */ | |
3326 | else if (cpu_has_feature(CPU_FTR_ARCH_207S)) | |
3327 | kvm->arch.tlb_sets = POWER8_TLB_SETS; /* 512 */ | |
3328 | else | |
3329 | kvm->arch.tlb_sets = POWER7_TLB_SETS; /* 128 */ | |
3330 | ||
512691d4 | 3331 | /* |
441c19c8 ME |
3332 | * Track that we now have a HV mode VM active. This blocks secondary |
3333 | * CPU threads from coming online. | |
512691d4 | 3334 | */ |
441c19c8 | 3335 | kvm_hv_vm_activated(); |
512691d4 | 3336 | |
e23a808b PM |
3337 | /* |
3338 | * Create a debugfs directory for the VM | |
3339 | */ | |
3340 | snprintf(buf, sizeof(buf), "vm%d", current->pid); | |
3341 | kvm->arch.debugfs_dir = debugfs_create_dir(buf, kvm_debugfs_dir); | |
3342 | if (!IS_ERR_OR_NULL(kvm->arch.debugfs_dir)) | |
3343 | kvmppc_mmu_debugfs_init(kvm); | |
3344 | ||
54738c09 | 3345 | return 0; |
de56a948 PM |
3346 | } |
3347 | ||
f1378b1c PM |
3348 | static void kvmppc_free_vcores(struct kvm *kvm) |
3349 | { | |
3350 | long int i; | |
3351 | ||
23316316 | 3352 | for (i = 0; i < KVM_MAX_VCORES; ++i) |
f1378b1c PM |
3353 | kfree(kvm->arch.vcores[i]); |
3354 | kvm->arch.online_vcores = 0; | |
3355 | } | |
3356 | ||
3a167bea | 3357 | static void kvmppc_core_destroy_vm_hv(struct kvm *kvm) |
de56a948 | 3358 | { |
e23a808b PM |
3359 | debugfs_remove_recursive(kvm->arch.debugfs_dir); |
3360 | ||
441c19c8 | 3361 | kvm_hv_vm_deactivated(); |
512691d4 | 3362 | |
f1378b1c | 3363 | kvmppc_free_vcores(kvm); |
aa04b4cc | 3364 | |
de56a948 | 3365 | kvmppc_free_hpt(kvm); |
c57875f5 SW |
3366 | |
3367 | kvmppc_free_pimap(kvm); | |
de56a948 PM |
3368 | } |
3369 | ||
3a167bea AK |
3370 | /* We don't need to emulate any privileged instructions or dcbz */ |
3371 | static int kvmppc_core_emulate_op_hv(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
3372 | unsigned int inst, int *advance) | |
de56a948 | 3373 | { |
3a167bea | 3374 | return EMULATE_FAIL; |
de56a948 PM |
3375 | } |
3376 | ||
3a167bea AK |
3377 | static int kvmppc_core_emulate_mtspr_hv(struct kvm_vcpu *vcpu, int sprn, |
3378 | ulong spr_val) | |
de56a948 PM |
3379 | { |
3380 | return EMULATE_FAIL; | |
3381 | } | |
3382 | ||
3a167bea AK |
3383 | static int kvmppc_core_emulate_mfspr_hv(struct kvm_vcpu *vcpu, int sprn, |
3384 | ulong *spr_val) | |
de56a948 PM |
3385 | { |
3386 | return EMULATE_FAIL; | |
3387 | } | |
3388 | ||
3a167bea | 3389 | static int kvmppc_core_check_processor_compat_hv(void) |
de56a948 | 3390 | { |
c17b98cf PM |
3391 | if (!cpu_has_feature(CPU_FTR_HVMODE) || |
3392 | !cpu_has_feature(CPU_FTR_ARCH_206)) | |
3a167bea | 3393 | return -EIO; |
50de596d AK |
3394 | /* |
3395 | * Disable KVM for Power9, untill the required bits merged. | |
3396 | */ | |
3397 | if (cpu_has_feature(CPU_FTR_ARCH_300)) | |
3398 | return -EIO; | |
3399 | ||
3a167bea | 3400 | return 0; |
de56a948 PM |
3401 | } |
3402 | ||
8daaafc8 SW |
3403 | #ifdef CONFIG_KVM_XICS |
3404 | ||
3405 | void kvmppc_free_pimap(struct kvm *kvm) | |
3406 | { | |
3407 | kfree(kvm->arch.pimap); | |
3408 | } | |
3409 | ||
c57875f5 | 3410 | static struct kvmppc_passthru_irqmap *kvmppc_alloc_pimap(void) |
8daaafc8 SW |
3411 | { |
3412 | return kzalloc(sizeof(struct kvmppc_passthru_irqmap), GFP_KERNEL); | |
3413 | } | |
c57875f5 SW |
3414 | |
3415 | static int kvmppc_set_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) | |
3416 | { | |
3417 | struct irq_desc *desc; | |
3418 | struct kvmppc_irq_map *irq_map; | |
3419 | struct kvmppc_passthru_irqmap *pimap; | |
3420 | struct irq_chip *chip; | |
3421 | int i; | |
3422 | ||
644abbb2 SW |
3423 | if (!kvm_irq_bypass) |
3424 | return 1; | |
3425 | ||
c57875f5 SW |
3426 | desc = irq_to_desc(host_irq); |
3427 | if (!desc) | |
3428 | return -EIO; | |
3429 | ||
3430 | mutex_lock(&kvm->lock); | |
3431 | ||
3432 | pimap = kvm->arch.pimap; | |
3433 | if (pimap == NULL) { | |
3434 | /* First call, allocate structure to hold IRQ map */ | |
3435 | pimap = kvmppc_alloc_pimap(); | |
3436 | if (pimap == NULL) { | |
3437 | mutex_unlock(&kvm->lock); | |
3438 | return -ENOMEM; | |
3439 | } | |
3440 | kvm->arch.pimap = pimap; | |
3441 | } | |
3442 | ||
3443 | /* | |
3444 | * For now, we only support interrupts for which the EOI operation | |
3445 | * is an OPAL call followed by a write to XIRR, since that's | |
3446 | * what our real-mode EOI code does. | |
3447 | */ | |
3448 | chip = irq_data_get_irq_chip(&desc->irq_data); | |
3449 | if (!chip || !is_pnv_opal_msi(chip)) { | |
3450 | pr_warn("kvmppc_set_passthru_irq_hv: Could not assign IRQ map for (%d,%d)\n", | |
3451 | host_irq, guest_gsi); | |
3452 | mutex_unlock(&kvm->lock); | |
3453 | return -ENOENT; | |
3454 | } | |
3455 | ||
3456 | /* | |
3457 | * See if we already have an entry for this guest IRQ number. | |
3458 | * If it's mapped to a hardware IRQ number, that's an error, | |
3459 | * otherwise re-use this entry. | |
3460 | */ | |
3461 | for (i = 0; i < pimap->n_mapped; i++) { | |
3462 | if (guest_gsi == pimap->mapped[i].v_hwirq) { | |
3463 | if (pimap->mapped[i].r_hwirq) { | |
3464 | mutex_unlock(&kvm->lock); | |
3465 | return -EINVAL; | |
3466 | } | |
3467 | break; | |
3468 | } | |
3469 | } | |
3470 | ||
3471 | if (i == KVMPPC_PIRQ_MAPPED) { | |
3472 | mutex_unlock(&kvm->lock); | |
3473 | return -EAGAIN; /* table is full */ | |
3474 | } | |
3475 | ||
3476 | irq_map = &pimap->mapped[i]; | |
3477 | ||
3478 | irq_map->v_hwirq = guest_gsi; | |
c57875f5 SW |
3479 | irq_map->desc = desc; |
3480 | ||
e3c13e56 SW |
3481 | /* |
3482 | * Order the above two stores before the next to serialize with | |
3483 | * the KVM real mode handler. | |
3484 | */ | |
3485 | smp_wmb(); | |
3486 | irq_map->r_hwirq = desc->irq_data.hwirq; | |
3487 | ||
c57875f5 SW |
3488 | if (i == pimap->n_mapped) |
3489 | pimap->n_mapped++; | |
3490 | ||
5d375199 PM |
3491 | kvmppc_xics_set_mapped(kvm, guest_gsi, desc->irq_data.hwirq); |
3492 | ||
c57875f5 SW |
3493 | mutex_unlock(&kvm->lock); |
3494 | ||
3495 | return 0; | |
3496 | } | |
3497 | ||
3498 | static int kvmppc_clr_passthru_irq(struct kvm *kvm, int host_irq, int guest_gsi) | |
3499 | { | |
3500 | struct irq_desc *desc; | |
3501 | struct kvmppc_passthru_irqmap *pimap; | |
3502 | int i; | |
3503 | ||
644abbb2 SW |
3504 | if (!kvm_irq_bypass) |
3505 | return 0; | |
3506 | ||
c57875f5 SW |
3507 | desc = irq_to_desc(host_irq); |
3508 | if (!desc) | |
3509 | return -EIO; | |
3510 | ||
3511 | mutex_lock(&kvm->lock); | |
3512 | ||
3513 | if (kvm->arch.pimap == NULL) { | |
3514 | mutex_unlock(&kvm->lock); | |
3515 | return 0; | |
3516 | } | |
3517 | pimap = kvm->arch.pimap; | |
3518 | ||
3519 | for (i = 0; i < pimap->n_mapped; i++) { | |
3520 | if (guest_gsi == pimap->mapped[i].v_hwirq) | |
3521 | break; | |
3522 | } | |
3523 | ||
3524 | if (i == pimap->n_mapped) { | |
3525 | mutex_unlock(&kvm->lock); | |
3526 | return -ENODEV; | |
3527 | } | |
3528 | ||
5d375199 PM |
3529 | kvmppc_xics_clr_mapped(kvm, guest_gsi, pimap->mapped[i].r_hwirq); |
3530 | ||
c57875f5 SW |
3531 | /* invalidate the entry */ |
3532 | pimap->mapped[i].r_hwirq = 0; | |
3533 | ||
3534 | /* | |
3535 | * We don't free this structure even when the count goes to | |
3536 | * zero. The structure is freed when we destroy the VM. | |
3537 | */ | |
3538 | ||
3539 | mutex_unlock(&kvm->lock); | |
3540 | return 0; | |
3541 | } | |
3542 | ||
3543 | static int kvmppc_irq_bypass_add_producer_hv(struct irq_bypass_consumer *cons, | |
3544 | struct irq_bypass_producer *prod) | |
3545 | { | |
3546 | int ret = 0; | |
3547 | struct kvm_kernel_irqfd *irqfd = | |
3548 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
3549 | ||
3550 | irqfd->producer = prod; | |
3551 | ||
3552 | ret = kvmppc_set_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); | |
3553 | if (ret) | |
3554 | pr_info("kvmppc_set_passthru_irq (irq %d, gsi %d) fails: %d\n", | |
3555 | prod->irq, irqfd->gsi, ret); | |
3556 | ||
3557 | return ret; | |
3558 | } | |
3559 | ||
3560 | static void kvmppc_irq_bypass_del_producer_hv(struct irq_bypass_consumer *cons, | |
3561 | struct irq_bypass_producer *prod) | |
3562 | { | |
3563 | int ret; | |
3564 | struct kvm_kernel_irqfd *irqfd = | |
3565 | container_of(cons, struct kvm_kernel_irqfd, consumer); | |
3566 | ||
3567 | irqfd->producer = NULL; | |
3568 | ||
3569 | /* | |
3570 | * When producer of consumer is unregistered, we change back to | |
3571 | * default external interrupt handling mode - KVM real mode | |
3572 | * will switch back to host. | |
3573 | */ | |
3574 | ret = kvmppc_clr_passthru_irq(irqfd->kvm, prod->irq, irqfd->gsi); | |
3575 | if (ret) | |
3576 | pr_warn("kvmppc_clr_passthru_irq (irq %d, gsi %d) fails: %d\n", | |
3577 | prod->irq, irqfd->gsi, ret); | |
3578 | } | |
8daaafc8 SW |
3579 | #endif |
3580 | ||
3a167bea AK |
3581 | static long kvm_arch_vm_ioctl_hv(struct file *filp, |
3582 | unsigned int ioctl, unsigned long arg) | |
3583 | { | |
3584 | struct kvm *kvm __maybe_unused = filp->private_data; | |
3585 | void __user *argp = (void __user *)arg; | |
3586 | long r; | |
3587 | ||
3588 | switch (ioctl) { | |
3589 | ||
3a167bea AK |
3590 | case KVM_PPC_ALLOCATE_HTAB: { |
3591 | u32 htab_order; | |
3592 | ||
3593 | r = -EFAULT; | |
3594 | if (get_user(htab_order, (u32 __user *)argp)) | |
3595 | break; | |
3596 | r = kvmppc_alloc_reset_hpt(kvm, &htab_order); | |
3597 | if (r) | |
3598 | break; | |
3599 | r = -EFAULT; | |
3600 | if (put_user(htab_order, (u32 __user *)argp)) | |
3601 | break; | |
3602 | r = 0; | |
3603 | break; | |
3604 | } | |
3605 | ||
3606 | case KVM_PPC_GET_HTAB_FD: { | |
3607 | struct kvm_get_htab_fd ghf; | |
3608 | ||
3609 | r = -EFAULT; | |
3610 | if (copy_from_user(&ghf, argp, sizeof(ghf))) | |
3611 | break; | |
3612 | r = kvm_vm_ioctl_get_htab_fd(kvm, &ghf); | |
3613 | break; | |
3614 | } | |
3615 | ||
3616 | default: | |
3617 | r = -ENOTTY; | |
3618 | } | |
3619 | ||
3620 | return r; | |
3621 | } | |
3622 | ||
699a0ea0 PM |
3623 | /* |
3624 | * List of hcall numbers to enable by default. | |
3625 | * For compatibility with old userspace, we enable by default | |
3626 | * all hcalls that were implemented before the hcall-enabling | |
3627 | * facility was added. Note this list should not include H_RTAS. | |
3628 | */ | |
3629 | static unsigned int default_hcall_list[] = { | |
3630 | H_REMOVE, | |
3631 | H_ENTER, | |
3632 | H_READ, | |
3633 | H_PROTECT, | |
3634 | H_BULK_REMOVE, | |
3635 | H_GET_TCE, | |
3636 | H_PUT_TCE, | |
3637 | H_SET_DABR, | |
3638 | H_SET_XDABR, | |
3639 | H_CEDE, | |
3640 | H_PROD, | |
3641 | H_CONFER, | |
3642 | H_REGISTER_VPA, | |
3643 | #ifdef CONFIG_KVM_XICS | |
3644 | H_EOI, | |
3645 | H_CPPR, | |
3646 | H_IPI, | |
3647 | H_IPOLL, | |
3648 | H_XIRR, | |
3649 | H_XIRR_X, | |
3650 | #endif | |
3651 | 0 | |
3652 | }; | |
3653 | ||
3654 | static void init_default_hcalls(void) | |
3655 | { | |
3656 | int i; | |
ae2113a4 | 3657 | unsigned int hcall; |
699a0ea0 | 3658 | |
ae2113a4 PM |
3659 | for (i = 0; default_hcall_list[i]; ++i) { |
3660 | hcall = default_hcall_list[i]; | |
3661 | WARN_ON(!kvmppc_hcall_impl_hv(hcall)); | |
3662 | __set_bit(hcall / 4, default_enabled_hcalls); | |
3663 | } | |
699a0ea0 PM |
3664 | } |
3665 | ||
cbbc58d4 | 3666 | static struct kvmppc_ops kvm_ops_hv = { |
3a167bea AK |
3667 | .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_hv, |
3668 | .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_hv, | |
3669 | .get_one_reg = kvmppc_get_one_reg_hv, | |
3670 | .set_one_reg = kvmppc_set_one_reg_hv, | |
3671 | .vcpu_load = kvmppc_core_vcpu_load_hv, | |
3672 | .vcpu_put = kvmppc_core_vcpu_put_hv, | |
3673 | .set_msr = kvmppc_set_msr_hv, | |
3674 | .vcpu_run = kvmppc_vcpu_run_hv, | |
3675 | .vcpu_create = kvmppc_core_vcpu_create_hv, | |
3676 | .vcpu_free = kvmppc_core_vcpu_free_hv, | |
3677 | .check_requests = kvmppc_core_check_requests_hv, | |
3678 | .get_dirty_log = kvm_vm_ioctl_get_dirty_log_hv, | |
3679 | .flush_memslot = kvmppc_core_flush_memslot_hv, | |
3680 | .prepare_memory_region = kvmppc_core_prepare_memory_region_hv, | |
3681 | .commit_memory_region = kvmppc_core_commit_memory_region_hv, | |
3682 | .unmap_hva = kvm_unmap_hva_hv, | |
3683 | .unmap_hva_range = kvm_unmap_hva_range_hv, | |
3684 | .age_hva = kvm_age_hva_hv, | |
3685 | .test_age_hva = kvm_test_age_hva_hv, | |
3686 | .set_spte_hva = kvm_set_spte_hva_hv, | |
3687 | .mmu_destroy = kvmppc_mmu_destroy_hv, | |
3688 | .free_memslot = kvmppc_core_free_memslot_hv, | |
3689 | .create_memslot = kvmppc_core_create_memslot_hv, | |
3690 | .init_vm = kvmppc_core_init_vm_hv, | |
3691 | .destroy_vm = kvmppc_core_destroy_vm_hv, | |
3a167bea AK |
3692 | .get_smmu_info = kvm_vm_ioctl_get_smmu_info_hv, |
3693 | .emulate_op = kvmppc_core_emulate_op_hv, | |
3694 | .emulate_mtspr = kvmppc_core_emulate_mtspr_hv, | |
3695 | .emulate_mfspr = kvmppc_core_emulate_mfspr_hv, | |
3696 | .fast_vcpu_kick = kvmppc_fast_vcpu_kick_hv, | |
3697 | .arch_vm_ioctl = kvm_arch_vm_ioctl_hv, | |
ae2113a4 | 3698 | .hcall_implemented = kvmppc_hcall_impl_hv, |
c57875f5 SW |
3699 | #ifdef CONFIG_KVM_XICS |
3700 | .irq_bypass_add_producer = kvmppc_irq_bypass_add_producer_hv, | |
3701 | .irq_bypass_del_producer = kvmppc_irq_bypass_del_producer_hv, | |
3702 | #endif | |
3a167bea AK |
3703 | }; |
3704 | ||
fd7bacbc MS |
3705 | static int kvm_init_subcore_bitmap(void) |
3706 | { | |
3707 | int i, j; | |
3708 | int nr_cores = cpu_nr_cores(); | |
3709 | struct sibling_subcore_state *sibling_subcore_state; | |
3710 | ||
3711 | for (i = 0; i < nr_cores; i++) { | |
3712 | int first_cpu = i * threads_per_core; | |
3713 | int node = cpu_to_node(first_cpu); | |
3714 | ||
3715 | /* Ignore if it is already allocated. */ | |
3716 | if (paca[first_cpu].sibling_subcore_state) | |
3717 | continue; | |
3718 | ||
3719 | sibling_subcore_state = | |
3720 | kmalloc_node(sizeof(struct sibling_subcore_state), | |
3721 | GFP_KERNEL, node); | |
3722 | if (!sibling_subcore_state) | |
3723 | return -ENOMEM; | |
3724 | ||
3725 | memset(sibling_subcore_state, 0, | |
3726 | sizeof(struct sibling_subcore_state)); | |
3727 | ||
3728 | for (j = 0; j < threads_per_core; j++) { | |
3729 | int cpu = first_cpu + j; | |
3730 | ||
3731 | paca[cpu].sibling_subcore_state = sibling_subcore_state; | |
3732 | } | |
3733 | } | |
3734 | return 0; | |
3735 | } | |
3736 | ||
3a167bea | 3737 | static int kvmppc_book3s_init_hv(void) |
de56a948 PM |
3738 | { |
3739 | int r; | |
cbbc58d4 AK |
3740 | /* |
3741 | * FIXME!! Do we need to check on all cpus ? | |
3742 | */ | |
3743 | r = kvmppc_core_check_processor_compat_hv(); | |
3744 | if (r < 0) | |
739e2425 | 3745 | return -ENODEV; |
de56a948 | 3746 | |
fd7bacbc MS |
3747 | r = kvm_init_subcore_bitmap(); |
3748 | if (r) | |
3749 | return r; | |
3750 | ||
f725758b PM |
3751 | /* |
3752 | * We need a way of accessing the XICS interrupt controller, | |
3753 | * either directly, via paca[cpu].kvm_hstate.xics_phys, or | |
3754 | * indirectly, via OPAL. | |
3755 | */ | |
3756 | #ifdef CONFIG_SMP | |
3757 | if (!get_paca()->kvm_hstate.xics_phys) { | |
3758 | struct device_node *np; | |
3759 | ||
3760 | np = of_find_compatible_node(NULL, NULL, "ibm,opal-intc"); | |
3761 | if (!np) { | |
3762 | pr_err("KVM-HV: Cannot determine method for accessing XICS\n"); | |
3763 | return -ENODEV; | |
3764 | } | |
3765 | } | |
3766 | #endif | |
3767 | ||
cbbc58d4 AK |
3768 | kvm_ops_hv.owner = THIS_MODULE; |
3769 | kvmppc_hv_ops = &kvm_ops_hv; | |
de56a948 | 3770 | |
699a0ea0 PM |
3771 | init_default_hcalls(); |
3772 | ||
ec257165 PM |
3773 | init_vcore_lists(); |
3774 | ||
cbbc58d4 | 3775 | r = kvmppc_mmu_hv_init(); |
de56a948 PM |
3776 | return r; |
3777 | } | |
3778 | ||
3a167bea | 3779 | static void kvmppc_book3s_exit_hv(void) |
de56a948 | 3780 | { |
79b6c247 | 3781 | kvmppc_free_host_rm_ops(); |
cbbc58d4 | 3782 | kvmppc_hv_ops = NULL; |
de56a948 PM |
3783 | } |
3784 | ||
3a167bea AK |
3785 | module_init(kvmppc_book3s_init_hv); |
3786 | module_exit(kvmppc_book3s_exit_hv); | |
2ba9f0d8 | 3787 | MODULE_LICENSE("GPL"); |
398a76c6 AG |
3788 | MODULE_ALIAS_MISCDEV(KVM_MINOR); |
3789 | MODULE_ALIAS("devname:kvm"); | |
7c5b06ca | 3790 |