]>
Commit | Line | Data |
---|---|---|
0d8dc681 AG |
1 | /* |
2 | * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License, version 2, as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
23 | ||
24 | #include <asm/kvm_ppc.h> | |
25 | #include <asm/kvm_book3s.h> | |
26 | #include <asm/mmu-hash64.h> | |
27 | #include <asm/machdep.h> | |
28 | #include <asm/mmu_context.h> | |
29 | #include <asm/hw_irq.h> | |
72c12535 | 30 | #include "trace_pr.h" |
0d8dc681 AG |
31 | |
32 | #define PTE_SIZE 12 | |
0d8dc681 | 33 | |
fef093be | 34 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
0d8dc681 | 35 | { |
5524a27d | 36 | ppc_md.hpte_invalidate(pte->slot, pte->host_vpn, |
c9029c34 | 37 | pte->pagesize, pte->pagesize, MMU_SEGSIZE_256M, |
0d8dc681 | 38 | false); |
0d8dc681 AG |
39 | } |
40 | ||
41 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using | |
42 | * a hash, so we don't waste cycles on looping */ | |
43 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | |
44 | { | |
b9877ce2 AG |
45 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ |
46 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | |
47 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | |
48 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | |
49 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | |
50 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | |
51 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | |
52 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | |
0d8dc681 AG |
53 | } |
54 | ||
b9877ce2 | 55 | |
0d8dc681 AG |
56 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) |
57 | { | |
58 | struct kvmppc_sid_map *map; | |
59 | u16 sid_map_mask; | |
60 | ||
5deb8e7a | 61 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
0d8dc681 AG |
62 | gvsid |= VSID_PR; |
63 | ||
64 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | |
65 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | |
c22c3196 | 66 | if (map->valid && (map->guest_vsid == gvsid)) { |
928d78be | 67 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
0d8dc681 AG |
68 | return map; |
69 | } | |
70 | ||
71 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; | |
c22c3196 | 72 | if (map->valid && (map->guest_vsid == gvsid)) { |
928d78be | 73 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
0d8dc681 AG |
74 | return map; |
75 | } | |
76 | ||
928d78be | 77 | trace_kvm_book3s_slb_fail(sid_map_mask, gvsid); |
0d8dc681 AG |
78 | return NULL; |
79 | } | |
80 | ||
93b159b4 PM |
81 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, |
82 | bool iswrite) | |
0d8dc681 | 83 | { |
5524a27d | 84 | unsigned long vpn; |
0d8dc681 | 85 | pfn_t hpaddr; |
5524a27d | 86 | ulong hash, hpteg; |
0d8dc681 AG |
87 | u64 vsid; |
88 | int ret; | |
89 | int rflags = 0x192; | |
90 | int vflags = 0; | |
91 | int attempt = 0; | |
92 | struct kvmppc_sid_map *map; | |
468a12c2 | 93 | int r = 0; |
c9029c34 | 94 | int hpsize = MMU_PAGE_4K; |
93b159b4 | 95 | bool writable; |
d78bca72 PM |
96 | unsigned long mmu_seq; |
97 | struct kvm *kvm = vcpu->kvm; | |
98 | struct hpte_cache *cpte; | |
adc0bafe PM |
99 | unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; |
100 | unsigned long pfn; | |
d78bca72 PM |
101 | |
102 | /* used to check for invalidations in progress */ | |
103 | mmu_seq = kvm->mmu_notifier_seq; | |
104 | smp_rmb(); | |
0d8dc681 AG |
105 | |
106 | /* Get host physical address for gpa */ | |
89b68c96 | 107 | pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); |
adc0bafe | 108 | if (is_error_noslot_pfn(pfn)) { |
89b68c96 AG |
109 | printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", |
110 | orig_pte->raddr); | |
468a12c2 AG |
111 | r = -EINVAL; |
112 | goto out; | |
0d8dc681 | 113 | } |
adc0bafe | 114 | hpaddr = pfn << PAGE_SHIFT; |
0d8dc681 AG |
115 | |
116 | /* and write the mapping ea -> hpa into the pt */ | |
117 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); | |
118 | map = find_sid_vsid(vcpu, vsid); | |
119 | if (!map) { | |
ac214671 AG |
120 | ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); |
121 | WARN_ON(ret < 0); | |
0d8dc681 AG |
122 | map = find_sid_vsid(vcpu, vsid); |
123 | } | |
ac214671 AG |
124 | if (!map) { |
125 | printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", | |
126 | vsid, orig_pte->eaddr); | |
127 | WARN_ON(true); | |
468a12c2 AG |
128 | r = -EINVAL; |
129 | goto out; | |
ac214671 | 130 | } |
0d8dc681 | 131 | |
c9029c34 | 132 | vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); |
0d8dc681 | 133 | |
adc0bafe | 134 | kvm_set_pfn_accessed(pfn); |
93b159b4 | 135 | if (!orig_pte->may_write || !writable) |
adc0bafe PM |
136 | rflags |= PP_RXRX; |
137 | else { | |
138 | mark_page_dirty(vcpu->kvm, gfn); | |
139 | kvm_set_pfn_dirty(pfn); | |
140 | } | |
0d8dc681 AG |
141 | |
142 | if (!orig_pte->may_execute) | |
143 | rflags |= HPTE_R_N; | |
249ba1ee | 144 | else |
adc0bafe | 145 | kvmppc_mmu_flush_icache(pfn); |
0d8dc681 | 146 | |
c9029c34 PM |
147 | /* |
148 | * Use 64K pages if possible; otherwise, on 64K page kernels, | |
149 | * we need to transfer 4 more bits from guest real to host real addr. | |
150 | */ | |
151 | if (vsid & VSID_64K) | |
152 | hpsize = MMU_PAGE_64K; | |
153 | else | |
154 | hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); | |
155 | ||
156 | hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M); | |
0d8dc681 | 157 | |
d78bca72 PM |
158 | cpte = kvmppc_mmu_hpte_cache_next(vcpu); |
159 | ||
160 | spin_lock(&kvm->mmu_lock); | |
161 | if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) { | |
162 | r = -EAGAIN; | |
163 | goto out_unlock; | |
164 | } | |
165 | ||
0d8dc681 AG |
166 | map_again: |
167 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | |
168 | ||
169 | /* In case we tried normal mapping already, let's nuke old entries */ | |
170 | if (attempt > 1) | |
468a12c2 AG |
171 | if (ppc_md.hpte_remove(hpteg) < 0) { |
172 | r = -1; | |
d78bca72 | 173 | goto out_unlock; |
468a12c2 | 174 | } |
0d8dc681 | 175 | |
5524a27d | 176 | ret = ppc_md.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, |
c9029c34 | 177 | hpsize, hpsize, MMU_SEGSIZE_256M); |
0d8dc681 AG |
178 | |
179 | if (ret < 0) { | |
180 | /* If we couldn't map a primary PTE, try a secondary */ | |
0d8dc681 | 181 | hash = ~hash; |
20a340ab | 182 | vflags ^= HPTE_V_SECONDARY; |
0d8dc681 | 183 | attempt++; |
0d8dc681 AG |
184 | goto map_again; |
185 | } else { | |
5524a27d AK |
186 | trace_kvm_book3s_64_mmu_map(rflags, hpteg, |
187 | vpn, hpaddr, orig_pte); | |
0d8dc681 | 188 | |
a1eda280 AG |
189 | /* The ppc_md code may give us a secondary entry even though we |
190 | asked for a primary. Fix up. */ | |
191 | if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) { | |
192 | hash = ~hash; | |
193 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | |
194 | } | |
195 | ||
d78bca72 PM |
196 | cpte->slot = hpteg + (ret & 7); |
197 | cpte->host_vpn = vpn; | |
198 | cpte->pte = *orig_pte; | |
adc0bafe | 199 | cpte->pfn = pfn; |
d78bca72 | 200 | cpte->pagesize = hpsize; |
fef093be | 201 | |
d78bca72 PM |
202 | kvmppc_mmu_hpte_cache_map(vcpu, cpte); |
203 | cpte = NULL; | |
0d8dc681 | 204 | } |
d78bca72 PM |
205 | |
206 | out_unlock: | |
207 | spin_unlock(&kvm->mmu_lock); | |
adc0bafe | 208 | kvm_release_pfn_clean(pfn); |
d78bca72 PM |
209 | if (cpte) |
210 | kvmppc_mmu_hpte_cache_free(cpte); | |
0d8dc681 | 211 | |
468a12c2 AG |
212 | out: |
213 | return r; | |
0d8dc681 AG |
214 | } |
215 | ||
93b159b4 PM |
216 | void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) |
217 | { | |
218 | u64 mask = 0xfffffffffULL; | |
219 | u64 vsid; | |
220 | ||
221 | vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); | |
222 | if (vsid & VSID_64K) | |
223 | mask = 0xffffffff0ULL; | |
224 | kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); | |
225 | } | |
226 | ||
0d8dc681 AG |
227 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) |
228 | { | |
229 | struct kvmppc_sid_map *map; | |
230 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
231 | u16 sid_map_mask; | |
232 | static int backwards_map = 0; | |
233 | ||
5deb8e7a | 234 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
0d8dc681 AG |
235 | gvsid |= VSID_PR; |
236 | ||
237 | /* We might get collisions that trap in preceding order, so let's | |
238 | map them differently */ | |
239 | ||
240 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | |
241 | if (backwards_map) | |
242 | sid_map_mask = SID_MAP_MASK - sid_map_mask; | |
243 | ||
244 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | |
245 | ||
246 | /* Make sure we're taking the other map next time */ | |
247 | backwards_map = !backwards_map; | |
248 | ||
249 | /* Uh-oh ... out of mappings. Let's flush! */ | |
ffe36492 BH |
250 | if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { |
251 | vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; | |
0d8dc681 AG |
252 | memset(vcpu_book3s->sid_map, 0, |
253 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); | |
254 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
255 | kvmppc_mmu_flush_segments(vcpu); | |
256 | } | |
ffe36492 | 257 | map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); |
0d8dc681 AG |
258 | |
259 | map->guest_vsid = gvsid; | |
260 | map->valid = true; | |
261 | ||
928d78be | 262 | trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid); |
5156f274 | 263 | |
0d8dc681 AG |
264 | return map; |
265 | } | |
266 | ||
267 | static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |
268 | { | |
468a12c2 | 269 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
0d8dc681 AG |
270 | int i; |
271 | int max_slb_size = 64; | |
272 | int found_inval = -1; | |
273 | int r; | |
274 | ||
0d8dc681 | 275 | /* Are we overwriting? */ |
207438d4 | 276 | for (i = 0; i < svcpu->slb_max; i++) { |
468a12c2 | 277 | if (!(svcpu->slb[i].esid & SLB_ESID_V)) |
0d8dc681 | 278 | found_inval = i; |
468a12c2 AG |
279 | else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { |
280 | r = i; | |
281 | goto out; | |
282 | } | |
0d8dc681 AG |
283 | } |
284 | ||
285 | /* Found a spare entry that was invalidated before */ | |
207438d4 | 286 | if (found_inval >= 0) { |
468a12c2 AG |
287 | r = found_inval; |
288 | goto out; | |
289 | } | |
0d8dc681 AG |
290 | |
291 | /* No spare invalid entry, so create one */ | |
292 | ||
293 | if (mmu_slb_size < 64) | |
294 | max_slb_size = mmu_slb_size; | |
295 | ||
296 | /* Overflowing -> purge */ | |
468a12c2 | 297 | if ((svcpu->slb_max) == max_slb_size) |
0d8dc681 AG |
298 | kvmppc_mmu_flush_segments(vcpu); |
299 | ||
468a12c2 AG |
300 | r = svcpu->slb_max; |
301 | svcpu->slb_max++; | |
0d8dc681 | 302 | |
468a12c2 AG |
303 | out: |
304 | svcpu_put(svcpu); | |
0d8dc681 AG |
305 | return r; |
306 | } | |
307 | ||
308 | int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |
309 | { | |
468a12c2 | 310 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
0d8dc681 AG |
311 | u64 esid = eaddr >> SID_SHIFT; |
312 | u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; | |
313 | u64 slb_vsid = SLB_VSID_USER; | |
314 | u64 gvsid; | |
315 | int slb_index; | |
316 | struct kvmppc_sid_map *map; | |
468a12c2 | 317 | int r = 0; |
0d8dc681 AG |
318 | |
319 | slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); | |
320 | ||
321 | if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { | |
322 | /* Invalidate an entry */ | |
468a12c2 AG |
323 | svcpu->slb[slb_index].esid = 0; |
324 | r = -ENOENT; | |
325 | goto out; | |
0d8dc681 AG |
326 | } |
327 | ||
328 | map = find_sid_vsid(vcpu, gvsid); | |
329 | if (!map) | |
330 | map = create_sid_map(vcpu, gvsid); | |
331 | ||
332 | map->guest_esid = esid; | |
333 | ||
334 | slb_vsid |= (map->host_vsid << 12); | |
335 | slb_vsid &= ~SLB_VSID_KP; | |
336 | slb_esid |= slb_index; | |
337 | ||
c9029c34 PM |
338 | #ifdef CONFIG_PPC_64K_PAGES |
339 | /* Set host segment base page size to 64K if possible */ | |
340 | if (gvsid & VSID_64K) | |
341 | slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp; | |
342 | #endif | |
343 | ||
468a12c2 AG |
344 | svcpu->slb[slb_index].esid = slb_esid; |
345 | svcpu->slb[slb_index].vsid = slb_vsid; | |
0d8dc681 | 346 | |
928d78be | 347 | trace_kvm_book3s_slbmte(slb_vsid, slb_esid); |
0d8dc681 | 348 | |
468a12c2 AG |
349 | out: |
350 | svcpu_put(svcpu); | |
351 | return r; | |
0d8dc681 AG |
352 | } |
353 | ||
0f296829 PM |
354 | void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) |
355 | { | |
356 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | |
357 | ulong seg_mask = -seg_size; | |
358 | int i; | |
359 | ||
207438d4 | 360 | for (i = 0; i < svcpu->slb_max; i++) { |
0f296829 PM |
361 | if ((svcpu->slb[i].esid & SLB_ESID_V) && |
362 | (svcpu->slb[i].esid & seg_mask) == ea) { | |
363 | /* Invalidate this entry */ | |
364 | svcpu->slb[i].esid = 0; | |
365 | } | |
366 | } | |
367 | ||
368 | svcpu_put(svcpu); | |
369 | } | |
370 | ||
0d8dc681 AG |
371 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) |
372 | { | |
468a12c2 | 373 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
207438d4 | 374 | svcpu->slb_max = 0; |
468a12c2 AG |
375 | svcpu->slb[0].esid = 0; |
376 | svcpu_put(svcpu); | |
0d8dc681 AG |
377 | } |
378 | ||
3a167bea | 379 | void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) |
0d8dc681 | 380 | { |
fef093be | 381 | kvmppc_mmu_hpte_destroy(vcpu); |
8b6db3bc | 382 | __destroy_context(to_book3s(vcpu)->context_id[0]); |
9cc5e953 AG |
383 | } |
384 | ||
385 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |
386 | { | |
387 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
388 | int err; | |
389 | ||
390 | err = __init_new_context(); | |
391 | if (err < 0) | |
392 | return -1; | |
8b6db3bc | 393 | vcpu3s->context_id[0] = err; |
9cc5e953 | 394 | |
8ed7b7e9 | 395 | vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1) |
af81d787 | 396 | << ESID_BITS) - 1; |
8ed7b7e9 | 397 | vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS; |
ffe36492 | 398 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; |
9cc5e953 | 399 | |
fef093be AG |
400 | kvmppc_mmu_hpte_init(vcpu); |
401 | ||
9cc5e953 | 402 | return 0; |
0d8dc681 | 403 | } |