]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/powerpc/kvm/book3s_mmu_hpte.c
Merge branch 'tip/perf/urgent-2' of git://git.kernel.org/pub/scm/linux/kernel/git...
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / kvm / book3s_mmu_hpte.c
1 /*
2 * Copyright (C) 2010 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License, version 2, as
9 * published by the Free Software Foundation.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
17 * along with this program; if not, write to the Free Software
18 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
19 */
20
21 #include <linux/kvm_host.h>
22 #include <linux/hash.h>
23 #include <linux/slab.h>
24
25 #include <asm/kvm_ppc.h>
26 #include <asm/kvm_book3s.h>
27 #include <asm/machdep.h>
28 #include <asm/mmu_context.h>
29 #include <asm/hw_irq.h>
30
31 #include "trace.h"
32
33 #define PTE_SIZE 12
34
35 static struct kmem_cache *hpte_cache;
36
37 static inline u64 kvmppc_mmu_hash_pte(u64 eaddr)
38 {
39 return hash_64(eaddr >> PTE_SIZE, HPTEG_HASH_BITS_PTE);
40 }
41
42 static inline u64 kvmppc_mmu_hash_pte_long(u64 eaddr)
43 {
44 return hash_64((eaddr & 0x0ffff000) >> PTE_SIZE,
45 HPTEG_HASH_BITS_PTE_LONG);
46 }
47
48 static inline u64 kvmppc_mmu_hash_vpte(u64 vpage)
49 {
50 return hash_64(vpage & 0xfffffffffULL, HPTEG_HASH_BITS_VPTE);
51 }
52
53 static inline u64 kvmppc_mmu_hash_vpte_long(u64 vpage)
54 {
55 return hash_64((vpage & 0xffffff000ULL) >> 12,
56 HPTEG_HASH_BITS_VPTE_LONG);
57 }
58
59 void kvmppc_mmu_hpte_cache_map(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
60 {
61 u64 index;
62 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
63
64 trace_kvm_book3s_mmu_map(pte);
65
66 spin_lock(&vcpu3s->mmu_lock);
67
68 /* Add to ePTE list */
69 index = kvmppc_mmu_hash_pte(pte->pte.eaddr);
70 hlist_add_head_rcu(&pte->list_pte, &vcpu3s->hpte_hash_pte[index]);
71
72 /* Add to ePTE_long list */
73 index = kvmppc_mmu_hash_pte_long(pte->pte.eaddr);
74 hlist_add_head_rcu(&pte->list_pte_long,
75 &vcpu3s->hpte_hash_pte_long[index]);
76
77 /* Add to vPTE list */
78 index = kvmppc_mmu_hash_vpte(pte->pte.vpage);
79 hlist_add_head_rcu(&pte->list_vpte, &vcpu3s->hpte_hash_vpte[index]);
80
81 /* Add to vPTE_long list */
82 index = kvmppc_mmu_hash_vpte_long(pte->pte.vpage);
83 hlist_add_head_rcu(&pte->list_vpte_long,
84 &vcpu3s->hpte_hash_vpte_long[index]);
85
86 spin_unlock(&vcpu3s->mmu_lock);
87 }
88
89 static void free_pte_rcu(struct rcu_head *head)
90 {
91 struct hpte_cache *pte = container_of(head, struct hpte_cache, rcu_head);
92 kmem_cache_free(hpte_cache, pte);
93 }
94
95 static void invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte)
96 {
97 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
98
99 trace_kvm_book3s_mmu_invalidate(pte);
100
101 /* Different for 32 and 64 bit */
102 kvmppc_mmu_invalidate_pte(vcpu, pte);
103
104 spin_lock(&vcpu3s->mmu_lock);
105
106 /* pte already invalidated in between? */
107 if (hlist_unhashed(&pte->list_pte)) {
108 spin_unlock(&vcpu3s->mmu_lock);
109 return;
110 }
111
112 hlist_del_init_rcu(&pte->list_pte);
113 hlist_del_init_rcu(&pte->list_pte_long);
114 hlist_del_init_rcu(&pte->list_vpte);
115 hlist_del_init_rcu(&pte->list_vpte_long);
116
117 spin_unlock(&vcpu3s->mmu_lock);
118
119 vcpu3s->hpte_cache_count--;
120 call_rcu(&pte->rcu_head, free_pte_rcu);
121 }
122
123 static void kvmppc_mmu_pte_flush_all(struct kvm_vcpu *vcpu)
124 {
125 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
126 struct hpte_cache *pte;
127 int i;
128
129 rcu_read_lock();
130
131 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
132 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
133
134 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
135 invalidate_pte(vcpu, pte);
136 }
137
138 rcu_read_unlock();
139 }
140
141 static void kvmppc_mmu_pte_flush_page(struct kvm_vcpu *vcpu, ulong guest_ea)
142 {
143 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
144 struct hlist_head *list;
145 struct hpte_cache *pte;
146
147 /* Find the list of entries in the map */
148 list = &vcpu3s->hpte_hash_pte[kvmppc_mmu_hash_pte(guest_ea)];
149
150 rcu_read_lock();
151
152 /* Check the list for matching entries and invalidate */
153 hlist_for_each_entry_rcu(pte, list, list_pte)
154 if ((pte->pte.eaddr & ~0xfffUL) == guest_ea)
155 invalidate_pte(vcpu, pte);
156
157 rcu_read_unlock();
158 }
159
160 static void kvmppc_mmu_pte_flush_long(struct kvm_vcpu *vcpu, ulong guest_ea)
161 {
162 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
163 struct hlist_head *list;
164 struct hpte_cache *pte;
165
166 /* Find the list of entries in the map */
167 list = &vcpu3s->hpte_hash_pte_long[
168 kvmppc_mmu_hash_pte_long(guest_ea)];
169
170 rcu_read_lock();
171
172 /* Check the list for matching entries and invalidate */
173 hlist_for_each_entry_rcu(pte, list, list_pte_long)
174 if ((pte->pte.eaddr & 0x0ffff000UL) == guest_ea)
175 invalidate_pte(vcpu, pte);
176
177 rcu_read_unlock();
178 }
179
180 void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
181 {
182 trace_kvm_book3s_mmu_flush("", vcpu, guest_ea, ea_mask);
183 guest_ea &= ea_mask;
184
185 switch (ea_mask) {
186 case ~0xfffUL:
187 kvmppc_mmu_pte_flush_page(vcpu, guest_ea);
188 break;
189 case 0x0ffff000:
190 kvmppc_mmu_pte_flush_long(vcpu, guest_ea);
191 break;
192 case 0:
193 /* Doing a complete flush -> start from scratch */
194 kvmppc_mmu_pte_flush_all(vcpu);
195 break;
196 default:
197 WARN_ON(1);
198 break;
199 }
200 }
201
202 /* Flush with mask 0xfffffffff */
203 static void kvmppc_mmu_pte_vflush_short(struct kvm_vcpu *vcpu, u64 guest_vp)
204 {
205 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
206 struct hlist_head *list;
207 struct hpte_cache *pte;
208 u64 vp_mask = 0xfffffffffULL;
209
210 list = &vcpu3s->hpte_hash_vpte[kvmppc_mmu_hash_vpte(guest_vp)];
211
212 rcu_read_lock();
213
214 /* Check the list for matching entries and invalidate */
215 hlist_for_each_entry_rcu(pte, list, list_vpte)
216 if ((pte->pte.vpage & vp_mask) == guest_vp)
217 invalidate_pte(vcpu, pte);
218
219 rcu_read_unlock();
220 }
221
222 /* Flush with mask 0xffffff000 */
223 static void kvmppc_mmu_pte_vflush_long(struct kvm_vcpu *vcpu, u64 guest_vp)
224 {
225 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
226 struct hlist_head *list;
227 struct hpte_cache *pte;
228 u64 vp_mask = 0xffffff000ULL;
229
230 list = &vcpu3s->hpte_hash_vpte_long[
231 kvmppc_mmu_hash_vpte_long(guest_vp)];
232
233 rcu_read_lock();
234
235 /* Check the list for matching entries and invalidate */
236 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
237 if ((pte->pte.vpage & vp_mask) == guest_vp)
238 invalidate_pte(vcpu, pte);
239
240 rcu_read_unlock();
241 }
242
243 void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
244 {
245 trace_kvm_book3s_mmu_flush("v", vcpu, guest_vp, vp_mask);
246 guest_vp &= vp_mask;
247
248 switch(vp_mask) {
249 case 0xfffffffffULL:
250 kvmppc_mmu_pte_vflush_short(vcpu, guest_vp);
251 break;
252 case 0xffffff000ULL:
253 kvmppc_mmu_pte_vflush_long(vcpu, guest_vp);
254 break;
255 default:
256 WARN_ON(1);
257 return;
258 }
259 }
260
261 void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
262 {
263 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
264 struct hpte_cache *pte;
265 int i;
266
267 trace_kvm_book3s_mmu_flush("p", vcpu, pa_start, pa_end);
268
269 rcu_read_lock();
270
271 for (i = 0; i < HPTEG_HASH_NUM_VPTE_LONG; i++) {
272 struct hlist_head *list = &vcpu3s->hpte_hash_vpte_long[i];
273
274 hlist_for_each_entry_rcu(pte, list, list_vpte_long)
275 if ((pte->pte.raddr >= pa_start) &&
276 (pte->pte.raddr < pa_end))
277 invalidate_pte(vcpu, pte);
278 }
279
280 rcu_read_unlock();
281 }
282
283 struct hpte_cache *kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
284 {
285 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
286 struct hpte_cache *pte;
287
288 pte = kmem_cache_zalloc(hpte_cache, GFP_KERNEL);
289 vcpu3s->hpte_cache_count++;
290
291 if (vcpu3s->hpte_cache_count == HPTEG_CACHE_NUM)
292 kvmppc_mmu_pte_flush_all(vcpu);
293
294 return pte;
295 }
296
297 void kvmppc_mmu_hpte_destroy(struct kvm_vcpu *vcpu)
298 {
299 kvmppc_mmu_pte_flush(vcpu, 0, 0);
300 }
301
302 static void kvmppc_mmu_hpte_init_hash(struct hlist_head *hash_list, int len)
303 {
304 int i;
305
306 for (i = 0; i < len; i++)
307 INIT_HLIST_HEAD(&hash_list[i]);
308 }
309
310 int kvmppc_mmu_hpte_init(struct kvm_vcpu *vcpu)
311 {
312 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
313
314 /* init hpte lookup hashes */
315 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte,
316 ARRAY_SIZE(vcpu3s->hpte_hash_pte));
317 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_pte_long,
318 ARRAY_SIZE(vcpu3s->hpte_hash_pte_long));
319 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte,
320 ARRAY_SIZE(vcpu3s->hpte_hash_vpte));
321 kvmppc_mmu_hpte_init_hash(vcpu3s->hpte_hash_vpte_long,
322 ARRAY_SIZE(vcpu3s->hpte_hash_vpte_long));
323
324 spin_lock_init(&vcpu3s->mmu_lock);
325
326 return 0;
327 }
328
329 int kvmppc_mmu_hpte_sysinit(void)
330 {
331 /* init hpte slab cache */
332 hpte_cache = kmem_cache_create("kvm-spt", sizeof(struct hpte_cache),
333 sizeof(struct hpte_cache), 0, NULL);
334
335 return 0;
336 }
337
338 void kvmppc_mmu_hpte_sysexit(void)
339 {
340 kmem_cache_destroy(hpte_cache);
341 }