]>
Commit | Line | Data |
---|---|---|
aa04b4cc PM |
1 | /* |
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
441c19c8 | 9 | #include <linux/cpu.h> |
aa04b4cc PM |
10 | #include <linux/kvm_host.h> |
11 | #include <linux/preempt.h> | |
66b15db6 | 12 | #include <linux/export.h> |
aa04b4cc PM |
13 | #include <linux/sched.h> |
14 | #include <linux/spinlock.h> | |
aa04b4cc | 15 | #include <linux/init.h> |
fa61a4e3 AK |
16 | #include <linux/memblock.h> |
17 | #include <linux/sizes.h> | |
fc95ca72 | 18 | #include <linux/cma.h> |
90fd09f8 | 19 | #include <linux/bitops.h> |
aa04b4cc PM |
20 | |
21 | #include <asm/cputable.h> | |
22 | #include <asm/kvm_ppc.h> | |
23 | #include <asm/kvm_book3s.h> | |
e928e9cb | 24 | #include <asm/archrandom.h> |
eddb60fb | 25 | #include <asm/xics.h> |
243e2511 | 26 | #include <asm/xive.h> |
66feed61 PM |
27 | #include <asm/dbell.h> |
28 | #include <asm/cputhreads.h> | |
37f55d30 | 29 | #include <asm/io.h> |
f725758b | 30 | #include <asm/opal.h> |
e2702871 | 31 | #include <asm/smp.h> |
aa04b4cc | 32 | |
fc95ca72 JK |
33 | #define KVM_CMA_CHUNK_ORDER 18 |
34 | ||
5af50993 BH |
35 | #include "book3s_xics.h" |
36 | #include "book3s_xive.h" | |
37 | ||
38 | /* | |
39 | * The XIVE module will populate these when it loads | |
40 | */ | |
41 | unsigned long (*__xive_vm_h_xirr)(struct kvm_vcpu *vcpu); | |
42 | unsigned long (*__xive_vm_h_ipoll)(struct kvm_vcpu *vcpu, unsigned long server); | |
43 | int (*__xive_vm_h_ipi)(struct kvm_vcpu *vcpu, unsigned long server, | |
44 | unsigned long mfrr); | |
45 | int (*__xive_vm_h_cppr)(struct kvm_vcpu *vcpu, unsigned long cppr); | |
46 | int (*__xive_vm_h_eoi)(struct kvm_vcpu *vcpu, unsigned long xirr); | |
47 | EXPORT_SYMBOL_GPL(__xive_vm_h_xirr); | |
48 | EXPORT_SYMBOL_GPL(__xive_vm_h_ipoll); | |
49 | EXPORT_SYMBOL_GPL(__xive_vm_h_ipi); | |
50 | EXPORT_SYMBOL_GPL(__xive_vm_h_cppr); | |
51 | EXPORT_SYMBOL_GPL(__xive_vm_h_eoi); | |
52 | ||
fa61a4e3 AK |
53 | /* |
54 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) | |
55 | * should be power of 2. | |
56 | */ | |
57 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ | |
58 | /* | |
59 | * By default we reserve 5% of memory for hash pagetable allocation. | |
60 | */ | |
61 | static unsigned long kvm_cma_resv_ratio = 5; | |
aa04b4cc | 62 | |
fc95ca72 JK |
63 | static struct cma *kvm_cma; |
64 | ||
fa61a4e3 | 65 | static int __init early_parse_kvm_cma_resv(char *p) |
d2a1b483 | 66 | { |
fa61a4e3 | 67 | pr_debug("%s(%s)\n", __func__, p); |
d2a1b483 | 68 | if (!p) |
fa61a4e3 AK |
69 | return -EINVAL; |
70 | return kstrtoul(p, 0, &kvm_cma_resv_ratio); | |
d2a1b483 | 71 | } |
fa61a4e3 | 72 | early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); |
d2a1b483 | 73 | |
db9a290d | 74 | struct page *kvm_alloc_hpt_cma(unsigned long nr_pages) |
d2a1b483 | 75 | { |
c04fa583 | 76 | VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); |
fc95ca72 | 77 | |
e2f466e3 LS |
78 | return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES), |
79 | GFP_KERNEL); | |
d2a1b483 | 80 | } |
db9a290d | 81 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt_cma); |
d2a1b483 | 82 | |
db9a290d | 83 | void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages) |
d2a1b483 | 84 | { |
fc95ca72 | 85 | cma_release(kvm_cma, page, nr_pages); |
d2a1b483 | 86 | } |
db9a290d | 87 | EXPORT_SYMBOL_GPL(kvm_free_hpt_cma); |
d2a1b483 | 88 | |
fa61a4e3 AK |
89 | /** |
90 | * kvm_cma_reserve() - reserve area for kvm hash pagetable | |
91 | * | |
92 | * This function reserves memory from early allocator. It should be | |
14ed7409 | 93 | * called by arch specific code once the memblock allocator |
fa61a4e3 AK |
94 | * has been activated and all other subsystems have already allocated/reserved |
95 | * memory. | |
96 | */ | |
97 | void __init kvm_cma_reserve(void) | |
98 | { | |
99 | unsigned long align_size; | |
100 | struct memblock_region *reg; | |
101 | phys_addr_t selected_size = 0; | |
cec26bc3 AK |
102 | |
103 | /* | |
104 | * We need CMA reservation only when we are in HV mode | |
105 | */ | |
106 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | |
107 | return; | |
fa61a4e3 AK |
108 | /* |
109 | * We cannot use memblock_phys_mem_size() here, because | |
110 | * memblock_analyze() has not been called yet. | |
111 | */ | |
112 | for_each_memblock(memory, reg) | |
113 | selected_size += memblock_region_memory_end_pfn(reg) - | |
114 | memblock_region_memory_base_pfn(reg); | |
115 | ||
116 | selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; | |
117 | if (selected_size) { | |
118 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, | |
119 | (unsigned long)selected_size / SZ_1M); | |
c17b98cf | 120 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; |
c1f733aa | 121 | cma_declare_contiguous(0, selected_size, 0, align_size, |
f318dd08 LA |
122 | KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, "kvm_cma", |
123 | &kvm_cma); | |
fa61a4e3 AK |
124 | } |
125 | } | |
441c19c8 | 126 | |
90fd09f8 SB |
127 | /* |
128 | * Real-mode H_CONFER implementation. | |
129 | * We check if we are the only vcpu out of this virtual core | |
130 | * still running in the guest and not ceded. If so, we pop up | |
131 | * to the virtual-mode implementation; if not, just return to | |
132 | * the guest. | |
133 | */ | |
134 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, | |
135 | unsigned int yield_count) | |
136 | { | |
ec257165 PM |
137 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; |
138 | int ptid = local_paca->kvm_hstate.ptid; | |
90fd09f8 SB |
139 | int threads_running; |
140 | int threads_ceded; | |
141 | int threads_conferring; | |
142 | u64 stop = get_tb() + 10 * tb_ticks_per_usec; | |
143 | int rv = H_SUCCESS; /* => don't yield */ | |
144 | ||
ec257165 | 145 | set_bit(ptid, &vc->conferring_threads); |
7d6c40da PM |
146 | while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { |
147 | threads_running = VCORE_ENTRY_MAP(vc); | |
148 | threads_ceded = vc->napping_threads; | |
149 | threads_conferring = vc->conferring_threads; | |
150 | if ((threads_ceded | threads_conferring) == threads_running) { | |
90fd09f8 SB |
151 | rv = H_TOO_HARD; /* => do yield */ |
152 | break; | |
153 | } | |
154 | } | |
ec257165 | 155 | clear_bit(ptid, &vc->conferring_threads); |
90fd09f8 SB |
156 | return rv; |
157 | } | |
158 | ||
441c19c8 ME |
159 | /* |
160 | * When running HV mode KVM we need to block certain operations while KVM VMs | |
161 | * exist in the system. We use a counter of VMs to track this. | |
162 | * | |
163 | * One of the operations we need to block is onlining of secondaries, so we | |
164 | * protect hv_vm_count with get/put_online_cpus(). | |
165 | */ | |
166 | static atomic_t hv_vm_count; | |
167 | ||
168 | void kvm_hv_vm_activated(void) | |
169 | { | |
170 | get_online_cpus(); | |
171 | atomic_inc(&hv_vm_count); | |
172 | put_online_cpus(); | |
173 | } | |
174 | EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); | |
175 | ||
176 | void kvm_hv_vm_deactivated(void) | |
177 | { | |
178 | get_online_cpus(); | |
179 | atomic_dec(&hv_vm_count); | |
180 | put_online_cpus(); | |
181 | } | |
182 | EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); | |
183 | ||
184 | bool kvm_hv_mode_active(void) | |
185 | { | |
186 | return atomic_read(&hv_vm_count) != 0; | |
187 | } | |
ae2113a4 PM |
188 | |
189 | extern int hcall_real_table[], hcall_real_table_end[]; | |
190 | ||
191 | int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) | |
192 | { | |
193 | cmd /= 4; | |
194 | if (cmd < hcall_real_table_end - hcall_real_table && | |
195 | hcall_real_table[cmd]) | |
196 | return 1; | |
197 | ||
198 | return 0; | |
199 | } | |
200 | EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); | |
e928e9cb ME |
201 | |
202 | int kvmppc_hwrng_present(void) | |
203 | { | |
204 | return powernv_hwrng_present(); | |
205 | } | |
206 | EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); | |
207 | ||
208 | long kvmppc_h_random(struct kvm_vcpu *vcpu) | |
209 | { | |
210 | if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) | |
211 | return H_SUCCESS; | |
212 | ||
213 | return H_HARDWARE; | |
214 | } | |
eddb60fb | 215 | |
eddb60fb | 216 | /* |
66feed61 | 217 | * Send an interrupt or message to another CPU. |
eddb60fb PM |
218 | * The caller needs to include any barrier needed to order writes |
219 | * to memory vs. the IPI/message. | |
220 | */ | |
221 | void kvmhv_rm_send_ipi(int cpu) | |
222 | { | |
d381d7ca | 223 | void __iomem *xics_phys; |
1704a81c | 224 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); |
eddb60fb | 225 | |
1704a81c PM |
226 | /* On POWER9 we can use msgsnd for any destination cpu. */ |
227 | if (cpu_has_feature(CPU_FTR_ARCH_300)) { | |
228 | msg |= get_hard_smp_processor_id(cpu); | |
229 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
230 | return; | |
231 | } | |
5af50993 | 232 | |
1704a81c | 233 | /* On POWER8 for IPIs to threads in the same core, use msgsnd. */ |
66feed61 PM |
234 | if (cpu_has_feature(CPU_FTR_ARCH_207S) && |
235 | cpu_first_thread_sibling(cpu) == | |
236 | cpu_first_thread_sibling(raw_smp_processor_id())) { | |
66feed61 PM |
237 | msg |= cpu_thread_in_core(cpu); |
238 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
239 | return; | |
240 | } | |
241 | ||
243e2511 BH |
242 | /* We should never reach this */ |
243 | if (WARN_ON_ONCE(xive_enabled())) | |
244 | return; | |
245 | ||
66feed61 | 246 | /* Else poke the target with an IPI */ |
eddb60fb | 247 | xics_phys = paca[cpu].kvm_hstate.xics_phys; |
ab9bad0e | 248 | if (xics_phys) |
d381d7ca | 249 | __raw_rm_writeb(IPI_PRIORITY, xics_phys + XICS_MFRR); |
f725758b | 250 | else |
ab9bad0e | 251 | opal_int_set_mfrr(get_hard_smp_processor_id(cpu), IPI_PRIORITY); |
eddb60fb PM |
252 | } |
253 | ||
254 | /* | |
255 | * The following functions are called from the assembly code | |
256 | * in book3s_hv_rmhandlers.S. | |
257 | */ | |
258 | static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) | |
259 | { | |
260 | int cpu = vc->pcpu; | |
261 | ||
262 | /* Order setting of exit map vs. msgsnd/IPI */ | |
263 | smp_mb(); | |
264 | for (; active; active >>= 1, ++cpu) | |
265 | if (active & 1) | |
266 | kvmhv_rm_send_ipi(cpu); | |
267 | } | |
268 | ||
269 | void kvmhv_commence_exit(int trap) | |
270 | { | |
271 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; | |
272 | int ptid = local_paca->kvm_hstate.ptid; | |
b4deba5c PM |
273 | struct kvm_split_mode *sip = local_paca->kvm_hstate.kvm_split_mode; |
274 | int me, ee, i; | |
eddb60fb PM |
275 | |
276 | /* Set our bit in the threads-exiting-guest map in the 0xff00 | |
277 | bits of vcore->entry_exit_map */ | |
278 | me = 0x100 << ptid; | |
279 | do { | |
280 | ee = vc->entry_exit_map; | |
281 | } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); | |
282 | ||
283 | /* Are we the first here? */ | |
284 | if ((ee >> 8) != 0) | |
285 | return; | |
286 | ||
287 | /* | |
288 | * Trigger the other threads in this vcore to exit the guest. | |
289 | * If this is a hypervisor decrementer interrupt then they | |
290 | * will be already on their way out of the guest. | |
291 | */ | |
292 | if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) | |
293 | kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); | |
b4deba5c PM |
294 | |
295 | /* | |
296 | * If we are doing dynamic micro-threading, interrupt the other | |
297 | * subcores to pull them out of their guests too. | |
298 | */ | |
299 | if (!sip) | |
300 | return; | |
301 | ||
302 | for (i = 0; i < MAX_SUBCORES; ++i) { | |
303 | vc = sip->master_vcs[i]; | |
304 | if (!vc) | |
305 | break; | |
306 | do { | |
307 | ee = vc->entry_exit_map; | |
308 | /* Already asked to exit? */ | |
309 | if ((ee >> 8) != 0) | |
310 | break; | |
311 | } while (cmpxchg(&vc->entry_exit_map, ee, | |
312 | ee | VCORE_EXIT_REQ) != ee); | |
313 | if ((ee >> 8) == 0) | |
314 | kvmhv_interrupt_vcore(vc, ee); | |
315 | } | |
eddb60fb | 316 | } |
79b6c247 SW |
317 | |
318 | struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; | |
319 | EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv); | |
37f55d30 | 320 | |
e3c13e56 SW |
321 | #ifdef CONFIG_KVM_XICS |
322 | static struct kvmppc_irq_map *get_irqmap(struct kvmppc_passthru_irqmap *pimap, | |
323 | u32 xisr) | |
324 | { | |
325 | int i; | |
326 | ||
327 | /* | |
328 | * We access the mapped array here without a lock. That | |
329 | * is safe because we never reduce the number of entries | |
330 | * in the array and we never change the v_hwirq field of | |
331 | * an entry once it is set. | |
332 | * | |
333 | * We have also carefully ordered the stores in the writer | |
334 | * and the loads here in the reader, so that if we find a matching | |
335 | * hwirq here, the associated GSI and irq_desc fields are valid. | |
336 | */ | |
337 | for (i = 0; i < pimap->n_mapped; i++) { | |
338 | if (xisr == pimap->mapped[i].r_hwirq) { | |
339 | /* | |
340 | * Order subsequent reads in the caller to serialize | |
341 | * with the writer. | |
342 | */ | |
343 | smp_rmb(); | |
344 | return &pimap->mapped[i]; | |
345 | } | |
346 | } | |
347 | return NULL; | |
348 | } | |
349 | ||
350 | /* | |
351 | * If we have an interrupt that's not an IPI, check if we have a | |
352 | * passthrough adapter and if so, check if this external interrupt | |
353 | * is for the adapter. | |
354 | * We will attempt to deliver the IRQ directly to the target VCPU's | |
355 | * ICP, the virtual ICP (based on affinity - the xive value in ICS). | |
356 | * | |
357 | * If the delivery fails or if this is not for a passthrough adapter, | |
358 | * return to the host to handle this interrupt. We earlier | |
359 | * saved a copy of the XIRR in the PACA, it will be picked up by | |
360 | * the host ICP driver. | |
361 | */ | |
f725758b | 362 | static int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
e3c13e56 SW |
363 | { |
364 | struct kvmppc_passthru_irqmap *pimap; | |
365 | struct kvmppc_irq_map *irq_map; | |
366 | struct kvm_vcpu *vcpu; | |
367 | ||
368 | vcpu = local_paca->kvm_hstate.kvm_vcpu; | |
369 | if (!vcpu) | |
370 | return 1; | |
371 | pimap = kvmppc_get_passthru_irqmap(vcpu->kvm); | |
372 | if (!pimap) | |
373 | return 1; | |
374 | irq_map = get_irqmap(pimap, xisr); | |
375 | if (!irq_map) | |
376 | return 1; | |
377 | ||
378 | /* We're handling this interrupt, generic code doesn't need to */ | |
379 | local_paca->kvm_hstate.saved_xirr = 0; | |
380 | ||
f725758b | 381 | return kvmppc_deliver_irq_passthru(vcpu, xirr, irq_map, pimap, again); |
e3c13e56 SW |
382 | } |
383 | ||
384 | #else | |
e2702871 | 385 | static inline int kvmppc_check_passthru(u32 xisr, __be32 xirr, bool *again) |
e3c13e56 SW |
386 | { |
387 | return 1; | |
388 | } | |
389 | #endif | |
390 | ||
37f55d30 SW |
391 | /* |
392 | * Determine what sort of external interrupt is pending (if any). | |
393 | * Returns: | |
394 | * 0 if no interrupt is pending | |
395 | * 1 if an interrupt is pending that needs to be handled by the host | |
f7af5209 | 396 | * 2 Passthrough that needs completion in the host |
37f55d30 | 397 | * -1 if there was a guest wakeup IPI (which has now been cleared) |
e3c13e56 | 398 | * -2 if there is PCI passthrough external interrupt that was handled |
37f55d30 | 399 | */ |
f725758b | 400 | static long kvmppc_read_one_intr(bool *again); |
37f55d30 SW |
401 | |
402 | long kvmppc_read_intr(void) | |
f725758b PM |
403 | { |
404 | long ret = 0; | |
405 | long rc; | |
406 | bool again; | |
407 | ||
243e2511 BH |
408 | if (xive_enabled()) |
409 | return 1; | |
410 | ||
f725758b PM |
411 | do { |
412 | again = false; | |
413 | rc = kvmppc_read_one_intr(&again); | |
414 | if (rc && (ret == 0 || rc > ret)) | |
415 | ret = rc; | |
416 | } while (again); | |
417 | return ret; | |
418 | } | |
419 | ||
420 | static long kvmppc_read_one_intr(bool *again) | |
37f55d30 | 421 | { |
d381d7ca | 422 | void __iomem *xics_phys; |
37f55d30 SW |
423 | u32 h_xirr; |
424 | __be32 xirr; | |
425 | u32 xisr; | |
426 | u8 host_ipi; | |
f725758b | 427 | int64_t rc; |
37f55d30 | 428 | |
5af50993 BH |
429 | if (xive_enabled()) |
430 | return 1; | |
431 | ||
37f55d30 SW |
432 | /* see if a host IPI is pending */ |
433 | host_ipi = local_paca->kvm_hstate.host_ipi; | |
434 | if (host_ipi) | |
435 | return 1; | |
436 | ||
437 | /* Now read the interrupt from the ICP */ | |
438 | xics_phys = local_paca->kvm_hstate.xics_phys; | |
53af3ba2 | 439 | rc = 0; |
ab9bad0e | 440 | if (!xics_phys) |
53af3ba2 | 441 | rc = opal_int_get_xirr(&xirr, false); |
53af3ba2 | 442 | else |
d381d7ca | 443 | xirr = __raw_rm_readl(xics_phys + XICS_XIRR); |
53af3ba2 PM |
444 | if (rc < 0) |
445 | return 1; | |
37f55d30 SW |
446 | |
447 | /* | |
448 | * Save XIRR for later. Since we get control in reverse endian | |
449 | * on LE systems, save it byte reversed and fetch it back in | |
450 | * host endian. Note that xirr is the value read from the | |
451 | * XIRR register, while h_xirr is the host endian version. | |
452 | */ | |
37f55d30 SW |
453 | h_xirr = be32_to_cpu(xirr); |
454 | local_paca->kvm_hstate.saved_xirr = h_xirr; | |
455 | xisr = h_xirr & 0xffffff; | |
456 | /* | |
457 | * Ensure that the store/load complete to guarantee all side | |
458 | * effects of loading from XIRR has completed | |
459 | */ | |
460 | smp_mb(); | |
461 | ||
462 | /* if nothing pending in the ICP */ | |
463 | if (!xisr) | |
464 | return 0; | |
465 | ||
466 | /* We found something in the ICP... | |
467 | * | |
468 | * If it is an IPI, clear the MFRR and EOI it. | |
469 | */ | |
470 | if (xisr == XICS_IPI) { | |
53af3ba2 | 471 | rc = 0; |
ab9bad0e | 472 | if (xics_phys) { |
d381d7ca BH |
473 | __raw_rm_writeb(0xff, xics_phys + XICS_MFRR); |
474 | __raw_rm_writel(xirr, xics_phys + XICS_XIRR); | |
f725758b | 475 | } else { |
ab9bad0e BH |
476 | opal_int_set_mfrr(hard_smp_processor_id(), 0xff); |
477 | rc = opal_int_eoi(h_xirr); | |
f725758b | 478 | } |
53af3ba2 PM |
479 | /* If rc > 0, there is another interrupt pending */ |
480 | *again = rc > 0; | |
f725758b | 481 | |
37f55d30 SW |
482 | /* |
483 | * Need to ensure side effects of above stores | |
484 | * complete before proceeding. | |
485 | */ | |
486 | smp_mb(); | |
487 | ||
488 | /* | |
489 | * We need to re-check host IPI now in case it got set in the | |
490 | * meantime. If it's clear, we bounce the interrupt to the | |
491 | * guest | |
492 | */ | |
493 | host_ipi = local_paca->kvm_hstate.host_ipi; | |
494 | if (unlikely(host_ipi != 0)) { | |
495 | /* We raced with the host, | |
496 | * we need to resend that IPI, bummer | |
497 | */ | |
ab9bad0e | 498 | if (xics_phys) |
d381d7ca BH |
499 | __raw_rm_writeb(IPI_PRIORITY, |
500 | xics_phys + XICS_MFRR); | |
f725758b | 501 | else |
ab9bad0e BH |
502 | opal_int_set_mfrr(hard_smp_processor_id(), |
503 | IPI_PRIORITY); | |
37f55d30 SW |
504 | /* Let side effects complete */ |
505 | smp_mb(); | |
506 | return 1; | |
507 | } | |
508 | ||
509 | /* OK, it's an IPI for us */ | |
510 | local_paca->kvm_hstate.saved_xirr = 0; | |
511 | return -1; | |
512 | } | |
513 | ||
f725758b | 514 | return kvmppc_check_passthru(xisr, xirr, again); |
37f55d30 | 515 | } |
5af50993 BH |
516 | |
517 | #ifdef CONFIG_KVM_XICS | |
518 | static inline bool is_rm(void) | |
519 | { | |
520 | return !(mfmsr() & MSR_DR); | |
521 | } | |
522 | ||
523 | unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu) | |
524 | { | |
525 | if (xive_enabled()) { | |
526 | if (is_rm()) | |
527 | return xive_rm_h_xirr(vcpu); | |
528 | if (unlikely(!__xive_vm_h_xirr)) | |
529 | return H_NOT_AVAILABLE; | |
530 | return __xive_vm_h_xirr(vcpu); | |
531 | } else | |
532 | return xics_rm_h_xirr(vcpu); | |
533 | } | |
534 | ||
535 | unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu *vcpu) | |
536 | { | |
537 | vcpu->arch.gpr[5] = get_tb(); | |
538 | if (xive_enabled()) { | |
539 | if (is_rm()) | |
540 | return xive_rm_h_xirr(vcpu); | |
541 | if (unlikely(!__xive_vm_h_xirr)) | |
542 | return H_NOT_AVAILABLE; | |
543 | return __xive_vm_h_xirr(vcpu); | |
544 | } else | |
545 | return xics_rm_h_xirr(vcpu); | |
546 | } | |
547 | ||
548 | unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu *vcpu, unsigned long server) | |
549 | { | |
550 | if (xive_enabled()) { | |
551 | if (is_rm()) | |
552 | return xive_rm_h_ipoll(vcpu, server); | |
553 | if (unlikely(!__xive_vm_h_ipoll)) | |
554 | return H_NOT_AVAILABLE; | |
555 | return __xive_vm_h_ipoll(vcpu, server); | |
556 | } else | |
557 | return H_TOO_HARD; | |
558 | } | |
559 | ||
560 | int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, | |
561 | unsigned long mfrr) | |
562 | { | |
563 | if (xive_enabled()) { | |
564 | if (is_rm()) | |
565 | return xive_rm_h_ipi(vcpu, server, mfrr); | |
566 | if (unlikely(!__xive_vm_h_ipi)) | |
567 | return H_NOT_AVAILABLE; | |
568 | return __xive_vm_h_ipi(vcpu, server, mfrr); | |
569 | } else | |
570 | return xics_rm_h_ipi(vcpu, server, mfrr); | |
571 | } | |
572 | ||
573 | int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr) | |
574 | { | |
575 | if (xive_enabled()) { | |
576 | if (is_rm()) | |
577 | return xive_rm_h_cppr(vcpu, cppr); | |
578 | if (unlikely(!__xive_vm_h_cppr)) | |
579 | return H_NOT_AVAILABLE; | |
580 | return __xive_vm_h_cppr(vcpu, cppr); | |
581 | } else | |
582 | return xics_rm_h_cppr(vcpu, cppr); | |
583 | } | |
584 | ||
585 | int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr) | |
586 | { | |
587 | if (xive_enabled()) { | |
588 | if (is_rm()) | |
589 | return xive_rm_h_eoi(vcpu, xirr); | |
590 | if (unlikely(!__xive_vm_h_eoi)) | |
591 | return H_NOT_AVAILABLE; | |
592 | return __xive_vm_h_eoi(vcpu, xirr); | |
593 | } else | |
594 | return xics_rm_h_eoi(vcpu, xirr); | |
595 | } | |
596 | #endif /* CONFIG_KVM_XICS */ |