]>
Commit | Line | Data |
---|---|---|
aa04b4cc PM |
1 | /* |
2 | * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> | |
3 | * | |
4 | * This program is free software; you can redistribute it and/or modify | |
5 | * it under the terms of the GNU General Public License, version 2, as | |
6 | * published by the Free Software Foundation. | |
7 | */ | |
8 | ||
441c19c8 | 9 | #include <linux/cpu.h> |
aa04b4cc PM |
10 | #include <linux/kvm_host.h> |
11 | #include <linux/preempt.h> | |
66b15db6 | 12 | #include <linux/export.h> |
aa04b4cc PM |
13 | #include <linux/sched.h> |
14 | #include <linux/spinlock.h> | |
aa04b4cc | 15 | #include <linux/init.h> |
fa61a4e3 AK |
16 | #include <linux/memblock.h> |
17 | #include <linux/sizes.h> | |
fc95ca72 | 18 | #include <linux/cma.h> |
90fd09f8 | 19 | #include <linux/bitops.h> |
aa04b4cc PM |
20 | |
21 | #include <asm/cputable.h> | |
22 | #include <asm/kvm_ppc.h> | |
23 | #include <asm/kvm_book3s.h> | |
e928e9cb | 24 | #include <asm/archrandom.h> |
eddb60fb | 25 | #include <asm/xics.h> |
66feed61 PM |
26 | #include <asm/dbell.h> |
27 | #include <asm/cputhreads.h> | |
aa04b4cc | 28 | |
fc95ca72 JK |
29 | #define KVM_CMA_CHUNK_ORDER 18 |
30 | ||
fa61a4e3 AK |
31 | /* |
32 | * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206) | |
33 | * should be power of 2. | |
34 | */ | |
35 | #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */ | |
36 | /* | |
37 | * By default we reserve 5% of memory for hash pagetable allocation. | |
38 | */ | |
39 | static unsigned long kvm_cma_resv_ratio = 5; | |
aa04b4cc | 40 | |
fc95ca72 JK |
41 | static struct cma *kvm_cma; |
42 | ||
fa61a4e3 | 43 | static int __init early_parse_kvm_cma_resv(char *p) |
d2a1b483 | 44 | { |
fa61a4e3 | 45 | pr_debug("%s(%s)\n", __func__, p); |
d2a1b483 | 46 | if (!p) |
fa61a4e3 AK |
47 | return -EINVAL; |
48 | return kstrtoul(p, 0, &kvm_cma_resv_ratio); | |
d2a1b483 | 49 | } |
fa61a4e3 | 50 | early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv); |
d2a1b483 | 51 | |
fa61a4e3 | 52 | struct page *kvm_alloc_hpt(unsigned long nr_pages) |
d2a1b483 | 53 | { |
c04fa583 | 54 | VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT); |
fc95ca72 | 55 | |
c17b98cf | 56 | return cma_alloc(kvm_cma, nr_pages, order_base_2(HPT_ALIGN_PAGES)); |
d2a1b483 AG |
57 | } |
58 | EXPORT_SYMBOL_GPL(kvm_alloc_hpt); | |
59 | ||
fa61a4e3 | 60 | void kvm_release_hpt(struct page *page, unsigned long nr_pages) |
d2a1b483 | 61 | { |
fc95ca72 | 62 | cma_release(kvm_cma, page, nr_pages); |
d2a1b483 AG |
63 | } |
64 | EXPORT_SYMBOL_GPL(kvm_release_hpt); | |
65 | ||
fa61a4e3 AK |
66 | /** |
67 | * kvm_cma_reserve() - reserve area for kvm hash pagetable | |
68 | * | |
69 | * This function reserves memory from early allocator. It should be | |
14ed7409 | 70 | * called by arch specific code once the memblock allocator |
fa61a4e3 AK |
71 | * has been activated and all other subsystems have already allocated/reserved |
72 | * memory. | |
73 | */ | |
74 | void __init kvm_cma_reserve(void) | |
75 | { | |
76 | unsigned long align_size; | |
77 | struct memblock_region *reg; | |
78 | phys_addr_t selected_size = 0; | |
cec26bc3 AK |
79 | |
80 | /* | |
81 | * We need CMA reservation only when we are in HV mode | |
82 | */ | |
83 | if (!cpu_has_feature(CPU_FTR_HVMODE)) | |
84 | return; | |
fa61a4e3 AK |
85 | /* |
86 | * We cannot use memblock_phys_mem_size() here, because | |
87 | * memblock_analyze() has not been called yet. | |
88 | */ | |
89 | for_each_memblock(memory, reg) | |
90 | selected_size += memblock_region_memory_end_pfn(reg) - | |
91 | memblock_region_memory_base_pfn(reg); | |
92 | ||
93 | selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT; | |
94 | if (selected_size) { | |
95 | pr_debug("%s: reserving %ld MiB for global area\n", __func__, | |
96 | (unsigned long)selected_size / SZ_1M); | |
c17b98cf | 97 | align_size = HPT_ALIGN_PAGES << PAGE_SHIFT; |
c1f733aa JK |
98 | cma_declare_contiguous(0, selected_size, 0, align_size, |
99 | KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma); | |
fa61a4e3 AK |
100 | } |
101 | } | |
441c19c8 | 102 | |
90fd09f8 SB |
103 | /* |
104 | * Real-mode H_CONFER implementation. | |
105 | * We check if we are the only vcpu out of this virtual core | |
106 | * still running in the guest and not ceded. If so, we pop up | |
107 | * to the virtual-mode implementation; if not, just return to | |
108 | * the guest. | |
109 | */ | |
110 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, | |
111 | unsigned int yield_count) | |
112 | { | |
113 | struct kvmppc_vcore *vc = vcpu->arch.vcore; | |
114 | int threads_running; | |
115 | int threads_ceded; | |
116 | int threads_conferring; | |
117 | u64 stop = get_tb() + 10 * tb_ticks_per_usec; | |
118 | int rv = H_SUCCESS; /* => don't yield */ | |
119 | ||
120 | set_bit(vcpu->arch.ptid, &vc->conferring_threads); | |
7d6c40da PM |
121 | while ((get_tb() < stop) && !VCORE_IS_EXITING(vc)) { |
122 | threads_running = VCORE_ENTRY_MAP(vc); | |
123 | threads_ceded = vc->napping_threads; | |
124 | threads_conferring = vc->conferring_threads; | |
125 | if ((threads_ceded | threads_conferring) == threads_running) { | |
90fd09f8 SB |
126 | rv = H_TOO_HARD; /* => do yield */ |
127 | break; | |
128 | } | |
129 | } | |
130 | clear_bit(vcpu->arch.ptid, &vc->conferring_threads); | |
131 | return rv; | |
132 | } | |
133 | ||
441c19c8 ME |
134 | /* |
135 | * When running HV mode KVM we need to block certain operations while KVM VMs | |
136 | * exist in the system. We use a counter of VMs to track this. | |
137 | * | |
138 | * One of the operations we need to block is onlining of secondaries, so we | |
139 | * protect hv_vm_count with get/put_online_cpus(). | |
140 | */ | |
141 | static atomic_t hv_vm_count; | |
142 | ||
143 | void kvm_hv_vm_activated(void) | |
144 | { | |
145 | get_online_cpus(); | |
146 | atomic_inc(&hv_vm_count); | |
147 | put_online_cpus(); | |
148 | } | |
149 | EXPORT_SYMBOL_GPL(kvm_hv_vm_activated); | |
150 | ||
151 | void kvm_hv_vm_deactivated(void) | |
152 | { | |
153 | get_online_cpus(); | |
154 | atomic_dec(&hv_vm_count); | |
155 | put_online_cpus(); | |
156 | } | |
157 | EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated); | |
158 | ||
159 | bool kvm_hv_mode_active(void) | |
160 | { | |
161 | return atomic_read(&hv_vm_count) != 0; | |
162 | } | |
ae2113a4 PM |
163 | |
164 | extern int hcall_real_table[], hcall_real_table_end[]; | |
165 | ||
166 | int kvmppc_hcall_impl_hv_realmode(unsigned long cmd) | |
167 | { | |
168 | cmd /= 4; | |
169 | if (cmd < hcall_real_table_end - hcall_real_table && | |
170 | hcall_real_table[cmd]) | |
171 | return 1; | |
172 | ||
173 | return 0; | |
174 | } | |
175 | EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode); | |
e928e9cb ME |
176 | |
177 | int kvmppc_hwrng_present(void) | |
178 | { | |
179 | return powernv_hwrng_present(); | |
180 | } | |
181 | EXPORT_SYMBOL_GPL(kvmppc_hwrng_present); | |
182 | ||
183 | long kvmppc_h_random(struct kvm_vcpu *vcpu) | |
184 | { | |
185 | if (powernv_get_random_real_mode(&vcpu->arch.gpr[4])) | |
186 | return H_SUCCESS; | |
187 | ||
188 | return H_HARDWARE; | |
189 | } | |
eddb60fb PM |
190 | |
191 | static inline void rm_writeb(unsigned long paddr, u8 val) | |
192 | { | |
193 | __asm__ __volatile__("stbcix %0,0,%1" | |
194 | : : "r" (val), "r" (paddr) : "memory"); | |
195 | } | |
196 | ||
197 | /* | |
66feed61 | 198 | * Send an interrupt or message to another CPU. |
eddb60fb PM |
199 | * This can only be called in real mode. |
200 | * The caller needs to include any barrier needed to order writes | |
201 | * to memory vs. the IPI/message. | |
202 | */ | |
203 | void kvmhv_rm_send_ipi(int cpu) | |
204 | { | |
205 | unsigned long xics_phys; | |
206 | ||
66feed61 PM |
207 | /* On POWER8 for IPIs to threads in the same core, use msgsnd */ |
208 | if (cpu_has_feature(CPU_FTR_ARCH_207S) && | |
209 | cpu_first_thread_sibling(cpu) == | |
210 | cpu_first_thread_sibling(raw_smp_processor_id())) { | |
211 | unsigned long msg = PPC_DBELL_TYPE(PPC_DBELL_SERVER); | |
212 | msg |= cpu_thread_in_core(cpu); | |
213 | __asm__ __volatile__ (PPC_MSGSND(%0) : : "r" (msg)); | |
214 | return; | |
215 | } | |
216 | ||
217 | /* Else poke the target with an IPI */ | |
eddb60fb PM |
218 | xics_phys = paca[cpu].kvm_hstate.xics_phys; |
219 | rm_writeb(xics_phys + XICS_MFRR, IPI_PRIORITY); | |
220 | } | |
221 | ||
222 | /* | |
223 | * The following functions are called from the assembly code | |
224 | * in book3s_hv_rmhandlers.S. | |
225 | */ | |
226 | static void kvmhv_interrupt_vcore(struct kvmppc_vcore *vc, int active) | |
227 | { | |
228 | int cpu = vc->pcpu; | |
229 | ||
230 | /* Order setting of exit map vs. msgsnd/IPI */ | |
231 | smp_mb(); | |
232 | for (; active; active >>= 1, ++cpu) | |
233 | if (active & 1) | |
234 | kvmhv_rm_send_ipi(cpu); | |
235 | } | |
236 | ||
237 | void kvmhv_commence_exit(int trap) | |
238 | { | |
239 | struct kvmppc_vcore *vc = local_paca->kvm_hstate.kvm_vcore; | |
240 | int ptid = local_paca->kvm_hstate.ptid; | |
241 | int me, ee; | |
242 | ||
243 | /* Set our bit in the threads-exiting-guest map in the 0xff00 | |
244 | bits of vcore->entry_exit_map */ | |
245 | me = 0x100 << ptid; | |
246 | do { | |
247 | ee = vc->entry_exit_map; | |
248 | } while (cmpxchg(&vc->entry_exit_map, ee, ee | me) != ee); | |
249 | ||
250 | /* Are we the first here? */ | |
251 | if ((ee >> 8) != 0) | |
252 | return; | |
253 | ||
254 | /* | |
255 | * Trigger the other threads in this vcore to exit the guest. | |
256 | * If this is a hypervisor decrementer interrupt then they | |
257 | * will be already on their way out of the guest. | |
258 | */ | |
259 | if (trap != BOOK3S_INTERRUPT_HV_DECREMENTER) | |
260 | kvmhv_interrupt_vcore(vc, ee & ~(1 << ptid)); | |
261 | } |