]>
Commit | Line | Data |
---|---|---|
0d8dc681 AG |
1 | /* |
2 | * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved. | |
3 | * | |
4 | * Authors: | |
5 | * Alexander Graf <agraf@suse.de> | |
6 | * Kevin Wolf <mail@kevin-wolf.de> | |
7 | * | |
8 | * This program is free software; you can redistribute it and/or modify | |
9 | * it under the terms of the GNU General Public License, version 2, as | |
10 | * published by the Free Software Foundation. | |
11 | * | |
12 | * This program is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
15 | * GNU General Public License for more details. | |
16 | * | |
17 | * You should have received a copy of the GNU General Public License | |
18 | * along with this program; if not, write to the Free Software | |
19 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
20 | */ | |
21 | ||
22 | #include <linux/kvm_host.h> | |
23 | ||
24 | #include <asm/kvm_ppc.h> | |
25 | #include <asm/kvm_book3s.h> | |
f64e8084 | 26 | #include <asm/book3s/64/mmu-hash.h> |
0d8dc681 AG |
27 | #include <asm/machdep.h> |
28 | #include <asm/mmu_context.h> | |
29 | #include <asm/hw_irq.h> | |
72c12535 | 30 | #include "trace_pr.h" |
5358a963 | 31 | #include "book3s.h" |
0d8dc681 AG |
32 | |
33 | #define PTE_SIZE 12 | |
0d8dc681 | 34 | |
fef093be | 35 | void kvmppc_mmu_invalidate_pte(struct kvm_vcpu *vcpu, struct hpte_cache *pte) |
0d8dc681 | 36 | { |
7025776e BH |
37 | mmu_hash_ops.hpte_invalidate(pte->slot, pte->host_vpn, |
38 | pte->pagesize, pte->pagesize, | |
39 | MMU_SEGSIZE_256M, false); | |
0d8dc681 AG |
40 | } |
41 | ||
42 | /* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using | |
43 | * a hash, so we don't waste cycles on looping */ | |
44 | static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid) | |
45 | { | |
b9877ce2 AG |
46 | return (u16)(((gvsid >> (SID_MAP_BITS * 7)) & SID_MAP_MASK) ^ |
47 | ((gvsid >> (SID_MAP_BITS * 6)) & SID_MAP_MASK) ^ | |
48 | ((gvsid >> (SID_MAP_BITS * 5)) & SID_MAP_MASK) ^ | |
49 | ((gvsid >> (SID_MAP_BITS * 4)) & SID_MAP_MASK) ^ | |
50 | ((gvsid >> (SID_MAP_BITS * 3)) & SID_MAP_MASK) ^ | |
51 | ((gvsid >> (SID_MAP_BITS * 2)) & SID_MAP_MASK) ^ | |
52 | ((gvsid >> (SID_MAP_BITS * 1)) & SID_MAP_MASK) ^ | |
53 | ((gvsid >> (SID_MAP_BITS * 0)) & SID_MAP_MASK)); | |
0d8dc681 AG |
54 | } |
55 | ||
b9877ce2 | 56 | |
0d8dc681 AG |
57 | static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid) |
58 | { | |
59 | struct kvmppc_sid_map *map; | |
60 | u16 sid_map_mask; | |
61 | ||
5deb8e7a | 62 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
0d8dc681 AG |
63 | gvsid |= VSID_PR; |
64 | ||
65 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | |
66 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | |
c22c3196 | 67 | if (map->valid && (map->guest_vsid == gvsid)) { |
928d78be | 68 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
0d8dc681 AG |
69 | return map; |
70 | } | |
71 | ||
72 | map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask]; | |
c22c3196 | 73 | if (map->valid && (map->guest_vsid == gvsid)) { |
928d78be | 74 | trace_kvm_book3s_slb_found(gvsid, map->host_vsid); |
0d8dc681 AG |
75 | return map; |
76 | } | |
77 | ||
928d78be | 78 | trace_kvm_book3s_slb_fail(sid_map_mask, gvsid); |
0d8dc681 AG |
79 | return NULL; |
80 | } | |
81 | ||
93b159b4 PM |
82 | int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte, |
83 | bool iswrite) | |
0d8dc681 | 84 | { |
5524a27d | 85 | unsigned long vpn; |
ba049e93 | 86 | kvm_pfn_t hpaddr; |
5524a27d | 87 | ulong hash, hpteg; |
0d8dc681 AG |
88 | u64 vsid; |
89 | int ret; | |
90 | int rflags = 0x192; | |
91 | int vflags = 0; | |
92 | int attempt = 0; | |
93 | struct kvmppc_sid_map *map; | |
468a12c2 | 94 | int r = 0; |
c9029c34 | 95 | int hpsize = MMU_PAGE_4K; |
93b159b4 | 96 | bool writable; |
d78bca72 PM |
97 | unsigned long mmu_seq; |
98 | struct kvm *kvm = vcpu->kvm; | |
99 | struct hpte_cache *cpte; | |
adc0bafe PM |
100 | unsigned long gfn = orig_pte->raddr >> PAGE_SHIFT; |
101 | unsigned long pfn; | |
d78bca72 PM |
102 | |
103 | /* used to check for invalidations in progress */ | |
104 | mmu_seq = kvm->mmu_notifier_seq; | |
105 | smp_rmb(); | |
0d8dc681 AG |
106 | |
107 | /* Get host physical address for gpa */ | |
89b68c96 | 108 | pfn = kvmppc_gpa_to_pfn(vcpu, orig_pte->raddr, iswrite, &writable); |
adc0bafe | 109 | if (is_error_noslot_pfn(pfn)) { |
89b68c96 AG |
110 | printk(KERN_INFO "Couldn't get guest page for gpa %lx!\n", |
111 | orig_pte->raddr); | |
468a12c2 AG |
112 | r = -EINVAL; |
113 | goto out; | |
0d8dc681 | 114 | } |
adc0bafe | 115 | hpaddr = pfn << PAGE_SHIFT; |
0d8dc681 AG |
116 | |
117 | /* and write the mapping ea -> hpa into the pt */ | |
118 | vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid); | |
119 | map = find_sid_vsid(vcpu, vsid); | |
120 | if (!map) { | |
ac214671 AG |
121 | ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr); |
122 | WARN_ON(ret < 0); | |
0d8dc681 AG |
123 | map = find_sid_vsid(vcpu, vsid); |
124 | } | |
ac214671 AG |
125 | if (!map) { |
126 | printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n", | |
127 | vsid, orig_pte->eaddr); | |
128 | WARN_ON(true); | |
468a12c2 AG |
129 | r = -EINVAL; |
130 | goto out; | |
ac214671 | 131 | } |
0d8dc681 | 132 | |
c9029c34 | 133 | vpn = hpt_vpn(orig_pte->eaddr, map->host_vsid, MMU_SEGSIZE_256M); |
0d8dc681 | 134 | |
adc0bafe | 135 | kvm_set_pfn_accessed(pfn); |
93b159b4 | 136 | if (!orig_pte->may_write || !writable) |
adc0bafe PM |
137 | rflags |= PP_RXRX; |
138 | else { | |
139 | mark_page_dirty(vcpu->kvm, gfn); | |
140 | kvm_set_pfn_dirty(pfn); | |
141 | } | |
0d8dc681 AG |
142 | |
143 | if (!orig_pte->may_execute) | |
144 | rflags |= HPTE_R_N; | |
249ba1ee | 145 | else |
adc0bafe | 146 | kvmppc_mmu_flush_icache(pfn); |
0d8dc681 | 147 | |
c9029c34 PM |
148 | /* |
149 | * Use 64K pages if possible; otherwise, on 64K page kernels, | |
150 | * we need to transfer 4 more bits from guest real to host real addr. | |
151 | */ | |
152 | if (vsid & VSID_64K) | |
153 | hpsize = MMU_PAGE_64K; | |
154 | else | |
155 | hpaddr |= orig_pte->raddr & (~0xfffULL & ~PAGE_MASK); | |
156 | ||
157 | hash = hpt_hash(vpn, mmu_psize_defs[hpsize].shift, MMU_SEGSIZE_256M); | |
0d8dc681 | 158 | |
d78bca72 PM |
159 | cpte = kvmppc_mmu_hpte_cache_next(vcpu); |
160 | ||
161 | spin_lock(&kvm->mmu_lock); | |
162 | if (!cpte || mmu_notifier_retry(kvm, mmu_seq)) { | |
163 | r = -EAGAIN; | |
164 | goto out_unlock; | |
165 | } | |
166 | ||
0d8dc681 AG |
167 | map_again: |
168 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | |
169 | ||
170 | /* In case we tried normal mapping already, let's nuke old entries */ | |
171 | if (attempt > 1) | |
7025776e | 172 | if (mmu_hash_ops.hpte_remove(hpteg) < 0) { |
468a12c2 | 173 | r = -1; |
d78bca72 | 174 | goto out_unlock; |
468a12c2 | 175 | } |
0d8dc681 | 176 | |
7025776e BH |
177 | ret = mmu_hash_ops.hpte_insert(hpteg, vpn, hpaddr, rflags, vflags, |
178 | hpsize, hpsize, MMU_SEGSIZE_256M); | |
0d8dc681 AG |
179 | |
180 | if (ret < 0) { | |
181 | /* If we couldn't map a primary PTE, try a secondary */ | |
0d8dc681 | 182 | hash = ~hash; |
20a340ab | 183 | vflags ^= HPTE_V_SECONDARY; |
0d8dc681 | 184 | attempt++; |
0d8dc681 AG |
185 | goto map_again; |
186 | } else { | |
5524a27d AK |
187 | trace_kvm_book3s_64_mmu_map(rflags, hpteg, |
188 | vpn, hpaddr, orig_pte); | |
0d8dc681 | 189 | |
7025776e BH |
190 | /* |
191 | * The mmu_hash_ops code may give us a secondary entry even | |
192 | * though we asked for a primary. Fix up. | |
193 | */ | |
a1eda280 AG |
194 | if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) { |
195 | hash = ~hash; | |
196 | hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP); | |
197 | } | |
198 | ||
d78bca72 PM |
199 | cpte->slot = hpteg + (ret & 7); |
200 | cpte->host_vpn = vpn; | |
201 | cpte->pte = *orig_pte; | |
adc0bafe | 202 | cpte->pfn = pfn; |
d78bca72 | 203 | cpte->pagesize = hpsize; |
fef093be | 204 | |
d78bca72 PM |
205 | kvmppc_mmu_hpte_cache_map(vcpu, cpte); |
206 | cpte = NULL; | |
0d8dc681 | 207 | } |
d78bca72 PM |
208 | |
209 | out_unlock: | |
210 | spin_unlock(&kvm->mmu_lock); | |
adc0bafe | 211 | kvm_release_pfn_clean(pfn); |
d78bca72 PM |
212 | if (cpte) |
213 | kvmppc_mmu_hpte_cache_free(cpte); | |
0d8dc681 | 214 | |
468a12c2 AG |
215 | out: |
216 | return r; | |
0d8dc681 AG |
217 | } |
218 | ||
93b159b4 PM |
219 | void kvmppc_mmu_unmap_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte) |
220 | { | |
221 | u64 mask = 0xfffffffffULL; | |
222 | u64 vsid; | |
223 | ||
224 | vcpu->arch.mmu.esid_to_vsid(vcpu, pte->eaddr >> SID_SHIFT, &vsid); | |
225 | if (vsid & VSID_64K) | |
226 | mask = 0xffffffff0ULL; | |
227 | kvmppc_mmu_pte_vflush(vcpu, pte->vpage, mask); | |
228 | } | |
229 | ||
0d8dc681 AG |
230 | static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid) |
231 | { | |
232 | struct kvmppc_sid_map *map; | |
233 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); | |
234 | u16 sid_map_mask; | |
235 | static int backwards_map = 0; | |
236 | ||
5deb8e7a | 237 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
0d8dc681 AG |
238 | gvsid |= VSID_PR; |
239 | ||
240 | /* We might get collisions that trap in preceding order, so let's | |
241 | map them differently */ | |
242 | ||
243 | sid_map_mask = kvmppc_sid_hash(vcpu, gvsid); | |
244 | if (backwards_map) | |
245 | sid_map_mask = SID_MAP_MASK - sid_map_mask; | |
246 | ||
247 | map = &to_book3s(vcpu)->sid_map[sid_map_mask]; | |
248 | ||
249 | /* Make sure we're taking the other map next time */ | |
250 | backwards_map = !backwards_map; | |
251 | ||
252 | /* Uh-oh ... out of mappings. Let's flush! */ | |
ffe36492 BH |
253 | if (vcpu_book3s->proto_vsid_next == vcpu_book3s->proto_vsid_max) { |
254 | vcpu_book3s->proto_vsid_next = vcpu_book3s->proto_vsid_first; | |
0d8dc681 AG |
255 | memset(vcpu_book3s->sid_map, 0, |
256 | sizeof(struct kvmppc_sid_map) * SID_MAP_NUM); | |
257 | kvmppc_mmu_pte_flush(vcpu, 0, 0); | |
258 | kvmppc_mmu_flush_segments(vcpu); | |
259 | } | |
ffe36492 | 260 | map->host_vsid = vsid_scramble(vcpu_book3s->proto_vsid_next++, 256M); |
0d8dc681 AG |
261 | |
262 | map->guest_vsid = gvsid; | |
263 | map->valid = true; | |
264 | ||
928d78be | 265 | trace_kvm_book3s_slb_map(sid_map_mask, gvsid, map->host_vsid); |
5156f274 | 266 | |
0d8dc681 AG |
267 | return map; |
268 | } | |
269 | ||
270 | static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid) | |
271 | { | |
468a12c2 | 272 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
0d8dc681 AG |
273 | int i; |
274 | int max_slb_size = 64; | |
275 | int found_inval = -1; | |
276 | int r; | |
277 | ||
0d8dc681 | 278 | /* Are we overwriting? */ |
207438d4 | 279 | for (i = 0; i < svcpu->slb_max; i++) { |
468a12c2 | 280 | if (!(svcpu->slb[i].esid & SLB_ESID_V)) |
0d8dc681 | 281 | found_inval = i; |
468a12c2 AG |
282 | else if ((svcpu->slb[i].esid & ESID_MASK) == esid) { |
283 | r = i; | |
284 | goto out; | |
285 | } | |
0d8dc681 AG |
286 | } |
287 | ||
288 | /* Found a spare entry that was invalidated before */ | |
207438d4 | 289 | if (found_inval >= 0) { |
468a12c2 AG |
290 | r = found_inval; |
291 | goto out; | |
292 | } | |
0d8dc681 AG |
293 | |
294 | /* No spare invalid entry, so create one */ | |
295 | ||
296 | if (mmu_slb_size < 64) | |
297 | max_slb_size = mmu_slb_size; | |
298 | ||
299 | /* Overflowing -> purge */ | |
468a12c2 | 300 | if ((svcpu->slb_max) == max_slb_size) |
0d8dc681 AG |
301 | kvmppc_mmu_flush_segments(vcpu); |
302 | ||
468a12c2 AG |
303 | r = svcpu->slb_max; |
304 | svcpu->slb_max++; | |
0d8dc681 | 305 | |
468a12c2 AG |
306 | out: |
307 | svcpu_put(svcpu); | |
0d8dc681 AG |
308 | return r; |
309 | } | |
310 | ||
311 | int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr) | |
312 | { | |
468a12c2 | 313 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
0d8dc681 AG |
314 | u64 esid = eaddr >> SID_SHIFT; |
315 | u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V; | |
316 | u64 slb_vsid = SLB_VSID_USER; | |
317 | u64 gvsid; | |
318 | int slb_index; | |
319 | struct kvmppc_sid_map *map; | |
468a12c2 | 320 | int r = 0; |
0d8dc681 AG |
321 | |
322 | slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK); | |
323 | ||
324 | if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) { | |
325 | /* Invalidate an entry */ | |
468a12c2 AG |
326 | svcpu->slb[slb_index].esid = 0; |
327 | r = -ENOENT; | |
328 | goto out; | |
0d8dc681 AG |
329 | } |
330 | ||
331 | map = find_sid_vsid(vcpu, gvsid); | |
332 | if (!map) | |
333 | map = create_sid_map(vcpu, gvsid); | |
334 | ||
335 | map->guest_esid = esid; | |
336 | ||
337 | slb_vsid |= (map->host_vsid << 12); | |
338 | slb_vsid &= ~SLB_VSID_KP; | |
339 | slb_esid |= slb_index; | |
340 | ||
c9029c34 PM |
341 | #ifdef CONFIG_PPC_64K_PAGES |
342 | /* Set host segment base page size to 64K if possible */ | |
343 | if (gvsid & VSID_64K) | |
344 | slb_vsid |= mmu_psize_defs[MMU_PAGE_64K].sllp; | |
345 | #endif | |
346 | ||
468a12c2 AG |
347 | svcpu->slb[slb_index].esid = slb_esid; |
348 | svcpu->slb[slb_index].vsid = slb_vsid; | |
0d8dc681 | 349 | |
928d78be | 350 | trace_kvm_book3s_slbmte(slb_vsid, slb_esid); |
0d8dc681 | 351 | |
468a12c2 AG |
352 | out: |
353 | svcpu_put(svcpu); | |
354 | return r; | |
0d8dc681 AG |
355 | } |
356 | ||
0f296829 PM |
357 | void kvmppc_mmu_flush_segment(struct kvm_vcpu *vcpu, ulong ea, ulong seg_size) |
358 | { | |
359 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); | |
360 | ulong seg_mask = -seg_size; | |
361 | int i; | |
362 | ||
207438d4 | 363 | for (i = 0; i < svcpu->slb_max; i++) { |
0f296829 PM |
364 | if ((svcpu->slb[i].esid & SLB_ESID_V) && |
365 | (svcpu->slb[i].esid & seg_mask) == ea) { | |
366 | /* Invalidate this entry */ | |
367 | svcpu->slb[i].esid = 0; | |
368 | } | |
369 | } | |
370 | ||
371 | svcpu_put(svcpu); | |
372 | } | |
373 | ||
0d8dc681 AG |
374 | void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu) |
375 | { | |
468a12c2 | 376 | struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu); |
207438d4 | 377 | svcpu->slb_max = 0; |
468a12c2 AG |
378 | svcpu->slb[0].esid = 0; |
379 | svcpu_put(svcpu); | |
0d8dc681 AG |
380 | } |
381 | ||
3a167bea | 382 | void kvmppc_mmu_destroy_pr(struct kvm_vcpu *vcpu) |
0d8dc681 | 383 | { |
fef093be | 384 | kvmppc_mmu_hpte_destroy(vcpu); |
8b6db3bc | 385 | __destroy_context(to_book3s(vcpu)->context_id[0]); |
9cc5e953 AG |
386 | } |
387 | ||
388 | int kvmppc_mmu_init(struct kvm_vcpu *vcpu) | |
389 | { | |
390 | struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu); | |
391 | int err; | |
392 | ||
393 | err = __init_new_context(); | |
394 | if (err < 0) | |
395 | return -1; | |
8b6db3bc | 396 | vcpu3s->context_id[0] = err; |
9cc5e953 | 397 | |
8ed7b7e9 | 398 | vcpu3s->proto_vsid_max = ((u64)(vcpu3s->context_id[0] + 1) |
af81d787 | 399 | << ESID_BITS) - 1; |
8ed7b7e9 | 400 | vcpu3s->proto_vsid_first = (u64)vcpu3s->context_id[0] << ESID_BITS; |
ffe36492 | 401 | vcpu3s->proto_vsid_next = vcpu3s->proto_vsid_first; |
9cc5e953 | 402 | |
fef093be AG |
403 | kvmppc_mmu_hpte_init(vcpu); |
404 | ||
9cc5e953 | 405 | return 0; |
0d8dc681 | 406 | } |