]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kvm/book3s_hv_builtin.c
Merge tag 'upstream-3.19-rc1' of git://git.infradead.org/linux-ubifs
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / book3s_hv_builtin.c
1 /*
2 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of the GNU General Public License, version 2, as
6 * published by the Free Software Foundation.
7 */
8
9 #include <linux/cpu.h>
10 #include <linux/kvm_host.h>
11 #include <linux/preempt.h>
12 #include <linux/export.h>
13 #include <linux/sched.h>
14 #include <linux/spinlock.h>
15 #include <linux/init.h>
16 #include <linux/memblock.h>
17 #include <linux/sizes.h>
18 #include <linux/cma.h>
19
20 #include <asm/cputable.h>
21 #include <asm/kvm_ppc.h>
22 #include <asm/kvm_book3s.h>
23
24 #define KVM_CMA_CHUNK_ORDER 18
25
26 /*
27 * Hash page table alignment on newer cpus(CPU_FTR_ARCH_206)
28 * should be power of 2.
29 */
30 #define HPT_ALIGN_PAGES ((1 << 18) >> PAGE_SHIFT) /* 256k */
31 /*
32 * By default we reserve 5% of memory for hash pagetable allocation.
33 */
34 static unsigned long kvm_cma_resv_ratio = 5;
35 /*
36 * We allocate RMAs (real mode areas) for KVM guests from the KVM CMA area.
37 * Each RMA has to be physically contiguous and of a size that the
38 * hardware supports. PPC970 and POWER7 support 64MB, 128MB and 256MB,
39 * and other larger sizes. Since we are unlikely to be allocate that
40 * much physically contiguous memory after the system is up and running,
41 * we preallocate a set of RMAs in early boot using CMA.
42 * should be power of 2.
43 */
44 unsigned long kvm_rma_pages = (1 << 27) >> PAGE_SHIFT; /* 128MB */
45 EXPORT_SYMBOL_GPL(kvm_rma_pages);
46
47 static struct cma *kvm_cma;
48
49 /* Work out RMLS (real mode limit selector) field value for a given RMA size.
50 Assumes POWER7 or PPC970. */
51 static inline int lpcr_rmls(unsigned long rma_size)
52 {
53 switch (rma_size) {
54 case 32ul << 20: /* 32 MB */
55 if (cpu_has_feature(CPU_FTR_ARCH_206))
56 return 8; /* only supported on POWER7 */
57 return -1;
58 case 64ul << 20: /* 64 MB */
59 return 3;
60 case 128ul << 20: /* 128 MB */
61 return 7;
62 case 256ul << 20: /* 256 MB */
63 return 4;
64 case 1ul << 30: /* 1 GB */
65 return 2;
66 case 16ul << 30: /* 16 GB */
67 return 1;
68 case 256ul << 30: /* 256 GB */
69 return 0;
70 default:
71 return -1;
72 }
73 }
74
75 static int __init early_parse_rma_size(char *p)
76 {
77 unsigned long kvm_rma_size;
78
79 pr_debug("%s(%s)\n", __func__, p);
80 if (!p)
81 return -EINVAL;
82 kvm_rma_size = memparse(p, &p);
83 /*
84 * Check that the requested size is one supported in hardware
85 */
86 if (lpcr_rmls(kvm_rma_size) < 0) {
87 pr_err("RMA size of 0x%lx not supported\n", kvm_rma_size);
88 return -EINVAL;
89 }
90 kvm_rma_pages = kvm_rma_size >> PAGE_SHIFT;
91 return 0;
92 }
93 early_param("kvm_rma_size", early_parse_rma_size);
94
95 struct kvm_rma_info *kvm_alloc_rma()
96 {
97 struct page *page;
98 struct kvm_rma_info *ri;
99
100 ri = kmalloc(sizeof(struct kvm_rma_info), GFP_KERNEL);
101 if (!ri)
102 return NULL;
103 page = cma_alloc(kvm_cma, kvm_rma_pages, order_base_2(kvm_rma_pages));
104 if (!page)
105 goto err_out;
106 atomic_set(&ri->use_count, 1);
107 ri->base_pfn = page_to_pfn(page);
108 return ri;
109 err_out:
110 kfree(ri);
111 return NULL;
112 }
113 EXPORT_SYMBOL_GPL(kvm_alloc_rma);
114
115 void kvm_release_rma(struct kvm_rma_info *ri)
116 {
117 if (atomic_dec_and_test(&ri->use_count)) {
118 cma_release(kvm_cma, pfn_to_page(ri->base_pfn), kvm_rma_pages);
119 kfree(ri);
120 }
121 }
122 EXPORT_SYMBOL_GPL(kvm_release_rma);
123
124 static int __init early_parse_kvm_cma_resv(char *p)
125 {
126 pr_debug("%s(%s)\n", __func__, p);
127 if (!p)
128 return -EINVAL;
129 return kstrtoul(p, 0, &kvm_cma_resv_ratio);
130 }
131 early_param("kvm_cma_resv_ratio", early_parse_kvm_cma_resv);
132
133 struct page *kvm_alloc_hpt(unsigned long nr_pages)
134 {
135 unsigned long align_pages = HPT_ALIGN_PAGES;
136
137 VM_BUG_ON(order_base_2(nr_pages) < KVM_CMA_CHUNK_ORDER - PAGE_SHIFT);
138
139 /* Old CPUs require HPT aligned on a multiple of its size */
140 if (!cpu_has_feature(CPU_FTR_ARCH_206))
141 align_pages = nr_pages;
142 return cma_alloc(kvm_cma, nr_pages, order_base_2(align_pages));
143 }
144 EXPORT_SYMBOL_GPL(kvm_alloc_hpt);
145
146 void kvm_release_hpt(struct page *page, unsigned long nr_pages)
147 {
148 cma_release(kvm_cma, page, nr_pages);
149 }
150 EXPORT_SYMBOL_GPL(kvm_release_hpt);
151
152 /**
153 * kvm_cma_reserve() - reserve area for kvm hash pagetable
154 *
155 * This function reserves memory from early allocator. It should be
156 * called by arch specific code once the memblock allocator
157 * has been activated and all other subsystems have already allocated/reserved
158 * memory.
159 */
160 void __init kvm_cma_reserve(void)
161 {
162 unsigned long align_size;
163 struct memblock_region *reg;
164 phys_addr_t selected_size = 0;
165
166 /*
167 * We need CMA reservation only when we are in HV mode
168 */
169 if (!cpu_has_feature(CPU_FTR_HVMODE))
170 return;
171 /*
172 * We cannot use memblock_phys_mem_size() here, because
173 * memblock_analyze() has not been called yet.
174 */
175 for_each_memblock(memory, reg)
176 selected_size += memblock_region_memory_end_pfn(reg) -
177 memblock_region_memory_base_pfn(reg);
178
179 selected_size = (selected_size * kvm_cma_resv_ratio / 100) << PAGE_SHIFT;
180 if (selected_size) {
181 pr_debug("%s: reserving %ld MiB for global area\n", __func__,
182 (unsigned long)selected_size / SZ_1M);
183 /*
184 * Old CPUs require HPT aligned on a multiple of its size. So for them
185 * make the alignment as max size we could request.
186 */
187 if (!cpu_has_feature(CPU_FTR_ARCH_206))
188 align_size = __rounddown_pow_of_two(selected_size);
189 else
190 align_size = HPT_ALIGN_PAGES << PAGE_SHIFT;
191
192 align_size = max(kvm_rma_pages << PAGE_SHIFT, align_size);
193 cma_declare_contiguous(0, selected_size, 0, align_size,
194 KVM_CMA_CHUNK_ORDER - PAGE_SHIFT, false, &kvm_cma);
195 }
196 }
197
198 /*
199 * When running HV mode KVM we need to block certain operations while KVM VMs
200 * exist in the system. We use a counter of VMs to track this.
201 *
202 * One of the operations we need to block is onlining of secondaries, so we
203 * protect hv_vm_count with get/put_online_cpus().
204 */
205 static atomic_t hv_vm_count;
206
207 void kvm_hv_vm_activated(void)
208 {
209 get_online_cpus();
210 atomic_inc(&hv_vm_count);
211 put_online_cpus();
212 }
213 EXPORT_SYMBOL_GPL(kvm_hv_vm_activated);
214
215 void kvm_hv_vm_deactivated(void)
216 {
217 get_online_cpus();
218 atomic_dec(&hv_vm_count);
219 put_online_cpus();
220 }
221 EXPORT_SYMBOL_GPL(kvm_hv_vm_deactivated);
222
223 bool kvm_hv_mode_active(void)
224 {
225 return atomic_read(&hv_vm_count) != 0;
226 }
227
228 extern int hcall_real_table[], hcall_real_table_end[];
229
230 int kvmppc_hcall_impl_hv_realmode(unsigned long cmd)
231 {
232 cmd /= 4;
233 if (cmd < hcall_real_table_end - hcall_real_table &&
234 hcall_real_table[cmd])
235 return 1;
236
237 return 0;
238 }
239 EXPORT_SYMBOL_GPL(kvmppc_hcall_impl_hv_realmode);