2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
10 #include <linux/kvm_host.h>
11 #include <linux/preempt.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/sizes.h>
18 #include <linux/cma.h>
19 #include <linux/bitops.h>
21 #include <asm/cputable.h>
22 #include <asm/kvm_ppc.h>
23 #include <asm/kvm_book3s.h>
24 #include <asm/archrandom.h>
26 #include <asm/dbell.h>
27 #include <asm/cputhreads.h>
32 #define KVM_CMA_CHUNK_ORDER 18
35 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
36 * should be power of 2.
38 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
40 * By default we reserve 5% of memory for hash pagetable allocation.
42 static unsigned long kvm_cma_resv_ratio
= 5;
44 static struct cma
*kvm_cma
;
46 static int __init
early_parse_kvm_cma_resv(char *p
)
48 pr_debug("%s(%s)\n", __func__
, p
);
51 return kstrtoul(p
, 0, &kvm_cma_resv_ratio
);
53 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv
);
55 struct page
*kvm_alloc_hpt(unsigned long nr_pages
)
57 VM_BUG_ON(order_base_2(nr_pages
) < KVM_CMA_CHUNK_ORDER
- PAGE_SHIFT
);
59 return cma_alloc(kvm_cma
, nr_pages
, order_base_2(HPT_ALIGN_PAGES
));
61 EXPORT_SYMBOL_GPL(kvm_alloc_hpt
);
63 void kvm_release_hpt(struct page
*page
, unsigned long nr_pages
)
65 cma_release(kvm_cma
, page
, nr_pages
);
67 EXPORT_SYMBOL_GPL(kvm_release_hpt
);
70 * kvm_cma_reserve() - reserve area for kvm hash pagetable
72 * This function reserves memory from early allocator. It should be
73 * called by arch specific code once the memblock allocator
74 * has been activated and all other subsystems have already allocated/reserved
77 void __init
kvm_cma_reserve(void)
79 unsigned long align_size
;
80 struct memblock_region
*reg
;
81 phys_addr_t selected_size
= 0;
84 * We need CMA reservation only when we are in HV mode
86 if (!cpu_has_feature(CPU_FTR_HVMODE
))
89 * We cannot use memblock_phys_mem_size() here, because
90 * memblock_analyze() has not been called yet.
92 for_each_memblock(memory
, reg
)
93 selected_size
+= memblock_region_memory_end_pfn(reg
) -
94 memblock_region_memory_base_pfn(reg
);
96 selected_size
= (selected_size
* kvm_cma_resv_ratio
/ 100) << PAGE_SHIFT
;
98 pr_debug("%s: reserving %ld MiB for global area\n", __func__
,
99 (unsigned long)selected_size
/ SZ_1M
);
100 align_size
= HPT_ALIGN_PAGES
<< PAGE_SHIFT
;
101 cma_declare_contiguous(0, selected_size
, 0, align_size
,
102 KVM_CMA_CHUNK_ORDER
- PAGE_SHIFT
, false, &kvm_cma
);
107 * Real-mode H_CONFER implementation.
108 * We check if we are the only vcpu out of this virtual core
109 * still running in the guest and not ceded. If so, we pop up
110 * to the virtual-mode implementation; if not, just return to
113 long int kvmppc_rm_h_confer(struct kvm_vcpu
*vcpu
, int target
,
114 unsigned int yield_count
)
116 struct kvmppc_vcore
*vc
= local_paca
->kvm_hstate
.kvm_vcore
;
117 int ptid
= local_paca
->kvm_hstate
.ptid
;
120 int threads_conferring
;
121 u64 stop
= get_tb() + 10 * tb_ticks_per_usec
;
122 int rv
= H_SUCCESS
; /* => don't yield */
124 set_bit(ptid
, &vc
->conferring_threads
);
125 while ((get_tb() < stop
) && !VCORE_IS_EXITING(vc
)) {
126 threads_running
= VCORE_ENTRY_MAP(vc
);
127 threads_ceded
= vc
->napping_threads
;
128 threads_conferring
= vc
->conferring_threads
;
129 if ((threads_ceded
| threads_conferring
) == threads_running
) {
130 rv
= H_TOO_HARD
; /* => do yield */
134 clear_bit(ptid
, &vc
->conferring_threads
);
139 * When running HV mode KVM we need to block certain operations while KVM VMs
140 * exist in the system. We use a counter of VMs to track this.
142 * One of the operations we need to block is onlining of secondaries, so we
143 * protect hv_vm_count with get/put_online_cpus().
145 static atomic_t hv_vm_count
;
147 void kvm_hv_vm_activated(void)
150 atomic_inc(&hv_vm_count
);
153 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated
);
155 void kvm_hv_vm_deactivated(void)
158 atomic_dec(&hv_vm_count
);
161 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated
);
163 bool kvm_hv_mode_active(void)
165 return atomic_read(&hv_vm_count
) != 0;
168 extern int hcall_real_table
[], hcall_real_table_end
[];
170 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd
)
173 if (cmd
< hcall_real_table_end
- hcall_real_table
&&
174 hcall_real_table
[cmd
])
179 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode
);
181 int kvmppc_hwrng_present(void)
183 return powernv_hwrng_present();
185 EXPORT_SYMBOL_GPL(kvmppc_hwrng_present
);
187 long kvmppc_h_random(struct kvm_vcpu
*vcpu
)
189 if (powernv_get_random_real_mode(&vcpu
->arch
.gpr
[4]))
195 static inline void rm_writeb(unsigned long paddr
, u8 val
)
197 __asm__
__volatile__("stbcix %0,0,%1"
198 : : "r" (val
), "r" (paddr
) : "memory");
202 * Send an interrupt or message to another CPU.
203 * This can only be called in real mode.
204 * The caller needs to include any barrier needed to order writes
205 * to memory vs. the IPI/message.
207 void kvmhv_rm_send_ipi(int cpu
)
209 unsigned long xics_phys
;
210 unsigned long msg
= PPC_DBELL_TYPE(PPC_DBELL_SERVER
);
212 /* On POWER9 we can use msgsnd for any destination cpu. */
213 if (cpu_has_feature(CPU_FTR_ARCH_300
)) {
214 msg
|= get_hard_smp_processor_id(cpu
);
215 __asm__
__volatile__ (PPC_MSGSND(%0) : : "r" (msg
));
218 /* On POWER8 for IPIs to threads in the same core, use msgsnd. */
219 if (cpu_has_feature(CPU_FTR_ARCH_207S
) &&
220 cpu_first_thread_sibling(cpu
) ==
221 cpu_first_thread_sibling(raw_smp_processor_id())) {
222 msg
|= cpu_thread_in_core(cpu
);
223 __asm__
__volatile__ (PPC_MSGSND(%0) : : "r" (msg
));
227 /* Else poke the target with an IPI */
228 xics_phys
= paca
[cpu
].kvm_hstate
.xics_phys
;
230 rm_writeb(xics_phys
+ XICS_MFRR
, IPI_PRIORITY
);
232 opal_rm_int_set_mfrr(get_hard_smp_processor_id(cpu
),
237 * The following functions are called from the assembly code
238 * in book3s_hv_rmhandlers.S.
240 static void kvmhv_interrupt_vcore(struct kvmppc_vcore
*vc
, int active
)
244 /* Order setting of exit map vs. msgsnd/IPI */
246 for (; active
; active
>>= 1, ++cpu
)
248 kvmhv_rm_send_ipi(cpu
);
251 void kvmhv_commence_exit(int trap
)
253 struct kvmppc_vcore
*vc
= local_paca
->kvm_hstate
.kvm_vcore
;
254 int ptid
= local_paca
->kvm_hstate
.ptid
;
255 struct kvm_split_mode
*sip
= local_paca
->kvm_hstate
.kvm_split_mode
;
258 /* Set our bit in the threads-exiting-guest map in the 0xff00
259 bits of vcore->entry_exit_map */
262 ee
= vc
->entry_exit_map
;
263 } while (cmpxchg(&vc
->entry_exit_map
, ee
, ee
| me
) != ee
);
265 /* Are we the first here? */
270 * Trigger the other threads in this vcore to exit the guest.
271 * If this is a hypervisor decrementer interrupt then they
272 * will be already on their way out of the guest.
274 if (trap
!= BOOK3S_INTERRUPT_HV_DECREMENTER
)
275 kvmhv_interrupt_vcore(vc
, ee
& ~(1 << ptid
));
278 * If we are doing dynamic micro-threading, interrupt the other
279 * subcores to pull them out of their guests too.
284 for (i
= 0; i
< MAX_SUBCORES
; ++i
) {
285 vc
= sip
->master_vcs
[i
];
289 ee
= vc
->entry_exit_map
;
290 /* Already asked to exit? */
293 } while (cmpxchg(&vc
->entry_exit_map
, ee
,
294 ee
| VCORE_EXIT_REQ
) != ee
);
296 kvmhv_interrupt_vcore(vc
, ee
);
300 struct kvmppc_host_rm_ops
*kvmppc_host_rm_ops_hv
;
301 EXPORT_SYMBOL_GPL(kvmppc_host_rm_ops_hv
);
303 #ifdef CONFIG_KVM_XICS
304 static struct kvmppc_irq_map
*get_irqmap(struct kvmppc_passthru_irqmap
*pimap
,
310 * We access the mapped array here without a lock. That
311 * is safe because we never reduce the number of entries
312 * in the array and we never change the v_hwirq field of
313 * an entry once it is set.
315 * We have also carefully ordered the stores in the writer
316 * and the loads here in the reader, so that if we find a matching
317 * hwirq here, the associated GSI and irq_desc fields are valid.
319 for (i
= 0; i
< pimap
->n_mapped
; i
++) {
320 if (xisr
== pimap
->mapped
[i
].r_hwirq
) {
322 * Order subsequent reads in the caller to serialize
326 return &pimap
->mapped
[i
];
333 * If we have an interrupt that's not an IPI, check if we have a
334 * passthrough adapter and if so, check if this external interrupt
335 * is for the adapter.
336 * We will attempt to deliver the IRQ directly to the target VCPU's
337 * ICP, the virtual ICP (based on affinity - the xive value in ICS).
339 * If the delivery fails or if this is not for a passthrough adapter,
340 * return to the host to handle this interrupt. We earlier
341 * saved a copy of the XIRR in the PACA, it will be picked up by
342 * the host ICP driver.
344 static int kvmppc_check_passthru(u32 xisr
, __be32 xirr
, bool *again
)
346 struct kvmppc_passthru_irqmap
*pimap
;
347 struct kvmppc_irq_map
*irq_map
;
348 struct kvm_vcpu
*vcpu
;
350 vcpu
= local_paca
->kvm_hstate
.kvm_vcpu
;
353 pimap
= kvmppc_get_passthru_irqmap(vcpu
->kvm
);
356 irq_map
= get_irqmap(pimap
, xisr
);
360 /* We're handling this interrupt, generic code doesn't need to */
361 local_paca
->kvm_hstate
.saved_xirr
= 0;
363 return kvmppc_deliver_irq_passthru(vcpu
, xirr
, irq_map
, pimap
, again
);
367 static inline int kvmppc_check_passthru(u32 xisr
, __be32 xirr
, bool *again
)
374 * Determine what sort of external interrupt is pending (if any).
376 * 0 if no interrupt is pending
377 * 1 if an interrupt is pending that needs to be handled by the host
378 * 2 Passthrough that needs completion in the host
379 * -1 if there was a guest wakeup IPI (which has now been cleared)
380 * -2 if there is PCI passthrough external interrupt that was handled
382 static long kvmppc_read_one_intr(bool *again
);
384 long kvmppc_read_intr(void)
392 rc
= kvmppc_read_one_intr(&again
);
393 if (rc
&& (ret
== 0 || rc
> ret
))
399 static long kvmppc_read_one_intr(bool *again
)
401 unsigned long xics_phys
;
408 /* see if a host IPI is pending */
409 host_ipi
= local_paca
->kvm_hstate
.host_ipi
;
413 /* Now read the interrupt from the ICP */
414 xics_phys
= local_paca
->kvm_hstate
.xics_phys
;
416 /* Use OPAL to read the XIRR */
417 rc
= opal_rm_int_get_xirr(&xirr
, false);
421 xirr
= _lwzcix(xics_phys
+ XICS_XIRR
);
425 * Save XIRR for later. Since we get control in reverse endian
426 * on LE systems, save it byte reversed and fetch it back in
427 * host endian. Note that xirr is the value read from the
428 * XIRR register, while h_xirr is the host endian version.
430 h_xirr
= be32_to_cpu(xirr
);
431 local_paca
->kvm_hstate
.saved_xirr
= h_xirr
;
432 xisr
= h_xirr
& 0xffffff;
434 * Ensure that the store/load complete to guarantee all side
435 * effects of loading from XIRR has completed
439 /* if nothing pending in the ICP */
443 /* We found something in the ICP...
445 * If it is an IPI, clear the MFRR and EOI it.
447 if (xisr
== XICS_IPI
) {
449 _stbcix(xics_phys
+ XICS_MFRR
, 0xff);
450 _stwcix(xics_phys
+ XICS_XIRR
, xirr
);
452 opal_rm_int_set_mfrr(hard_smp_processor_id(), 0xff);
453 rc
= opal_rm_int_eoi(h_xirr
);
454 /* If rc > 0, there is another interrupt pending */
459 * Need to ensure side effects of above stores
460 * complete before proceeding.
465 * We need to re-check host IPI now in case it got set in the
466 * meantime. If it's clear, we bounce the interrupt to the
469 host_ipi
= local_paca
->kvm_hstate
.host_ipi
;
470 if (unlikely(host_ipi
!= 0)) {
471 /* We raced with the host,
472 * we need to resend that IPI, bummer
475 _stbcix(xics_phys
+ XICS_MFRR
, IPI_PRIORITY
);
477 opal_rm_int_set_mfrr(hard_smp_processor_id(),
479 /* Let side effects complete */
484 /* OK, it's an IPI for us */
485 local_paca
->kvm_hstate
.saved_xirr
= 0;
489 return kvmppc_check_passthru(xisr
, xirr
, again
);