]>
Commit | Line | Data |
---|---|---|
e71b2a39 AG |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright SUSE Linux Products GmbH 2009 | |
16 | * | |
17 | * Authors: Alexander Graf <agraf@suse.de> | |
18 | */ | |
19 | ||
20 | #include <linux/types.h> | |
21 | #include <linux/string.h> | |
22 | #include <linux/kvm.h> | |
23 | #include <linux/kvm_host.h> | |
24 | #include <linux/highmem.h> | |
25 | ||
26 | #include <asm/tlbflush.h> | |
27 | #include <asm/kvm_ppc.h> | |
28 | #include <asm/kvm_book3s.h> | |
0f296829 | 29 | #include <asm/mmu-hash64.h> |
e71b2a39 AG |
30 | |
31 | /* #define DEBUG_MMU */ | |
32 | ||
33 | #ifdef DEBUG_MMU | |
34 | #define dprintk(X...) printk(KERN_INFO X) | |
35 | #else | |
36 | #define dprintk(X...) do { } while(0) | |
37 | #endif | |
38 | ||
39 | static void kvmppc_mmu_book3s_64_reset_msr(struct kvm_vcpu *vcpu) | |
40 | { | |
e5ee5422 | 41 | kvmppc_set_msr(vcpu, vcpu->arch.intr_msr); |
e71b2a39 AG |
42 | } |
43 | ||
44 | static struct kvmppc_slb *kvmppc_mmu_book3s_64_find_slbe( | |
c4befc58 | 45 | struct kvm_vcpu *vcpu, |
e71b2a39 AG |
46 | gva_t eaddr) |
47 | { | |
48 | int i; | |
49 | u64 esid = GET_ESID(eaddr); | |
50 | u64 esid_1t = GET_ESID_1T(eaddr); | |
51 | ||
c4befc58 | 52 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
e71b2a39 AG |
53 | u64 cmp_esid = esid; |
54 | ||
c4befc58 | 55 | if (!vcpu->arch.slb[i].valid) |
e71b2a39 AG |
56 | continue; |
57 | ||
c4befc58 | 58 | if (vcpu->arch.slb[i].tb) |
e71b2a39 AG |
59 | cmp_esid = esid_1t; |
60 | ||
c4befc58 PM |
61 | if (vcpu->arch.slb[i].esid == cmp_esid) |
62 | return &vcpu->arch.slb[i]; | |
e71b2a39 AG |
63 | } |
64 | ||
65 | dprintk("KVM: No SLB entry found for 0x%lx [%llx | %llx]\n", | |
66 | eaddr, esid, esid_1t); | |
c4befc58 PM |
67 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
68 | if (vcpu->arch.slb[i].vsid) | |
4b5c9b7f | 69 | dprintk(" %d: %c%c%c %llx %llx\n", i, |
c4befc58 PM |
70 | vcpu->arch.slb[i].valid ? 'v' : ' ', |
71 | vcpu->arch.slb[i].large ? 'l' : ' ', | |
72 | vcpu->arch.slb[i].tb ? 't' : ' ', | |
73 | vcpu->arch.slb[i].esid, | |
74 | vcpu->arch.slb[i].vsid); | |
e71b2a39 AG |
75 | } |
76 | ||
77 | return NULL; | |
78 | } | |
79 | ||
0f296829 PM |
80 | static int kvmppc_slb_sid_shift(struct kvmppc_slb *slbe) |
81 | { | |
82 | return slbe->tb ? SID_SHIFT_1T : SID_SHIFT; | |
83 | } | |
84 | ||
85 | static u64 kvmppc_slb_offset_mask(struct kvmppc_slb *slbe) | |
86 | { | |
87 | return (1ul << kvmppc_slb_sid_shift(slbe)) - 1; | |
88 | } | |
89 | ||
90 | static u64 kvmppc_slb_calc_vpn(struct kvmppc_slb *slb, gva_t eaddr) | |
91 | { | |
92 | eaddr &= kvmppc_slb_offset_mask(slb); | |
93 | ||
94 | return (eaddr >> VPN_SHIFT) | | |
95 | ((slb->vsid) << (kvmppc_slb_sid_shift(slb) - VPN_SHIFT)); | |
96 | } | |
97 | ||
e71b2a39 AG |
98 | static u64 kvmppc_mmu_book3s_64_ea_to_vp(struct kvm_vcpu *vcpu, gva_t eaddr, |
99 | bool data) | |
100 | { | |
101 | struct kvmppc_slb *slb; | |
102 | ||
c4befc58 | 103 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); |
e71b2a39 AG |
104 | if (!slb) |
105 | return 0; | |
106 | ||
0f296829 | 107 | return kvmppc_slb_calc_vpn(slb, eaddr); |
e71b2a39 AG |
108 | } |
109 | ||
a4a0f252 PM |
110 | static int mmu_pagesize(int mmu_pg) |
111 | { | |
112 | switch (mmu_pg) { | |
113 | case MMU_PAGE_64K: | |
114 | return 16; | |
115 | case MMU_PAGE_16M: | |
116 | return 24; | |
117 | } | |
118 | return 12; | |
119 | } | |
120 | ||
e71b2a39 AG |
121 | static int kvmppc_mmu_book3s_64_get_pagesize(struct kvmppc_slb *slbe) |
122 | { | |
a4a0f252 | 123 | return mmu_pagesize(slbe->base_page_size); |
e71b2a39 AG |
124 | } |
125 | ||
126 | static u32 kvmppc_mmu_book3s_64_get_page(struct kvmppc_slb *slbe, gva_t eaddr) | |
127 | { | |
128 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); | |
0f296829 PM |
129 | |
130 | return ((eaddr & kvmppc_slb_offset_mask(slbe)) >> p); | |
e71b2a39 AG |
131 | } |
132 | ||
3ff95502 | 133 | static hva_t kvmppc_mmu_book3s_64_get_pteg(struct kvm_vcpu *vcpu, |
e71b2a39 AG |
134 | struct kvmppc_slb *slbe, gva_t eaddr, |
135 | bool second) | |
136 | { | |
3ff95502 | 137 | struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu); |
e71b2a39 | 138 | u64 hash, pteg, htabsize; |
0f296829 | 139 | u32 ssize; |
e71b2a39 | 140 | hva_t r; |
0f296829 | 141 | u64 vpn; |
e71b2a39 | 142 | |
e71b2a39 AG |
143 | htabsize = ((1 << ((vcpu_book3s->sdr1 & 0x1f) + 11)) - 1); |
144 | ||
0f296829 PM |
145 | vpn = kvmppc_slb_calc_vpn(slbe, eaddr); |
146 | ssize = slbe->tb ? MMU_SEGSIZE_1T : MMU_SEGSIZE_256M; | |
147 | hash = hpt_hash(vpn, kvmppc_mmu_book3s_64_get_pagesize(slbe), ssize); | |
e71b2a39 AG |
148 | if (second) |
149 | hash = ~hash; | |
150 | hash &= ((1ULL << 39ULL) - 1ULL); | |
151 | hash &= htabsize; | |
152 | hash <<= 7ULL; | |
153 | ||
154 | pteg = vcpu_book3s->sdr1 & 0xfffffffffffc0000ULL; | |
155 | pteg |= hash; | |
156 | ||
157 | dprintk("MMU: page=0x%x sdr1=0x%llx pteg=0x%llx vsid=0x%llx\n", | |
158 | page, vcpu_book3s->sdr1, pteg, slbe->vsid); | |
159 | ||
04fcc11b AG |
160 | /* When running a PAPR guest, SDR1 contains a HVA address instead |
161 | of a GPA */ | |
3ff95502 | 162 | if (vcpu->arch.papr_enabled) |
04fcc11b AG |
163 | r = pteg; |
164 | else | |
3ff95502 | 165 | r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT); |
04fcc11b | 166 | |
e71b2a39 AG |
167 | if (kvm_is_error_hva(r)) |
168 | return r; | |
169 | return r | (pteg & ~PAGE_MASK); | |
170 | } | |
171 | ||
172 | static u64 kvmppc_mmu_book3s_64_get_avpn(struct kvmppc_slb *slbe, gva_t eaddr) | |
173 | { | |
174 | int p = kvmppc_mmu_book3s_64_get_pagesize(slbe); | |
175 | u64 avpn; | |
176 | ||
177 | avpn = kvmppc_mmu_book3s_64_get_page(slbe, eaddr); | |
0f296829 | 178 | avpn |= slbe->vsid << (kvmppc_slb_sid_shift(slbe) - p); |
e71b2a39 | 179 | |
a4a0f252 PM |
180 | if (p < 16) |
181 | avpn >>= ((80 - p) - 56) - 8; /* 16 - p */ | |
e71b2a39 | 182 | else |
a4a0f252 | 183 | avpn <<= p - 16; |
e71b2a39 AG |
184 | |
185 | return avpn; | |
186 | } | |
187 | ||
a4a0f252 PM |
188 | /* |
189 | * Return page size encoded in the second word of a HPTE, or | |
190 | * -1 for an invalid encoding for the base page size indicated by | |
191 | * the SLB entry. This doesn't handle mixed pagesize segments yet. | |
192 | */ | |
193 | static int decode_pagesize(struct kvmppc_slb *slbe, u64 r) | |
194 | { | |
195 | switch (slbe->base_page_size) { | |
196 | case MMU_PAGE_64K: | |
197 | if ((r & 0xf000) == 0x1000) | |
198 | return MMU_PAGE_64K; | |
199 | break; | |
200 | case MMU_PAGE_16M: | |
201 | if ((r & 0xff000) == 0) | |
202 | return MMU_PAGE_16M; | |
203 | break; | |
204 | } | |
205 | return -1; | |
206 | } | |
207 | ||
e71b2a39 | 208 | static int kvmppc_mmu_book3s_64_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
93b159b4 PM |
209 | struct kvmppc_pte *gpte, bool data, |
210 | bool iswrite) | |
e71b2a39 | 211 | { |
e71b2a39 AG |
212 | struct kvmppc_slb *slbe; |
213 | hva_t ptegp; | |
214 | u64 pteg[16]; | |
215 | u64 avpn = 0; | |
7e48c101 PM |
216 | u64 v, r; |
217 | u64 v_val, v_mask; | |
218 | u64 eaddr_mask; | |
e71b2a39 | 219 | int i; |
7e48c101 | 220 | u8 pp, key = 0; |
e71b2a39 | 221 | bool found = false; |
7e48c101 | 222 | bool second = false; |
a4a0f252 | 223 | int pgsize; |
e8508940 AG |
224 | ulong mp_ea = vcpu->arch.magic_page_ea; |
225 | ||
226 | /* Magic page override */ | |
227 | if (unlikely(mp_ea) && | |
228 | unlikely((eaddr & ~0xfffULL) == (mp_ea & ~0xfffULL)) && | |
5deb8e7a | 229 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
e8508940 AG |
230 | gpte->eaddr = eaddr; |
231 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); | |
232 | gpte->raddr = vcpu->arch.magic_page_pa | (gpte->raddr & 0xfff); | |
233 | gpte->raddr &= KVM_PAM; | |
234 | gpte->may_execute = true; | |
235 | gpte->may_read = true; | |
236 | gpte->may_write = true; | |
a4a0f252 | 237 | gpte->page_size = MMU_PAGE_4K; |
e8508940 AG |
238 | |
239 | return 0; | |
240 | } | |
e71b2a39 | 241 | |
c4befc58 | 242 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, eaddr); |
e71b2a39 AG |
243 | if (!slbe) |
244 | goto no_seg_found; | |
245 | ||
0f296829 | 246 | avpn = kvmppc_mmu_book3s_64_get_avpn(slbe, eaddr); |
7e48c101 PM |
247 | v_val = avpn & HPTE_V_AVPN; |
248 | ||
0f296829 | 249 | if (slbe->tb) |
7e48c101 PM |
250 | v_val |= SLB_VSID_B_1T; |
251 | if (slbe->large) | |
252 | v_val |= HPTE_V_LARGE; | |
253 | v_val |= HPTE_V_VALID; | |
254 | ||
255 | v_mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_LARGE | HPTE_V_VALID | | |
256 | HPTE_V_SECONDARY; | |
0f296829 | 257 | |
a4a0f252 PM |
258 | pgsize = slbe->large ? MMU_PAGE_16M : MMU_PAGE_4K; |
259 | ||
9308ab8e PM |
260 | mutex_lock(&vcpu->kvm->arch.hpt_mutex); |
261 | ||
e71b2a39 | 262 | do_second: |
3ff95502 | 263 | ptegp = kvmppc_mmu_book3s_64_get_pteg(vcpu, slbe, eaddr, second); |
e71b2a39 AG |
264 | if (kvm_is_error_hva(ptegp)) |
265 | goto no_page_found; | |
266 | ||
e71b2a39 AG |
267 | if(copy_from_user(pteg, (void __user *)ptegp, sizeof(pteg))) { |
268 | printk(KERN_ERR "KVM can't copy data from 0x%lx!\n", ptegp); | |
269 | goto no_page_found; | |
270 | } | |
271 | ||
5deb8e7a | 272 | if ((kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Kp) |
e71b2a39 | 273 | key = 4; |
5deb8e7a | 274 | else if (!(kvmppc_get_msr(vcpu) & MSR_PR) && slbe->Ks) |
e71b2a39 AG |
275 | key = 4; |
276 | ||
277 | for (i=0; i<16; i+=2) { | |
4e509af9 AG |
278 | u64 pte0 = be64_to_cpu(pteg[i]); |
279 | u64 pte1 = be64_to_cpu(pteg[i + 1]); | |
280 | ||
7e48c101 | 281 | /* Check all relevant fields of 1st dword */ |
4e509af9 | 282 | if ((pte0 & v_mask) == v_val) { |
a4a0f252 PM |
283 | /* If large page bit is set, check pgsize encoding */ |
284 | if (slbe->large && | |
285 | (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) { | |
4e509af9 | 286 | pgsize = decode_pagesize(slbe, pte1); |
a4a0f252 PM |
287 | if (pgsize < 0) |
288 | continue; | |
289 | } | |
e71b2a39 AG |
290 | found = true; |
291 | break; | |
292 | } | |
293 | } | |
294 | ||
7e48c101 PM |
295 | if (!found) { |
296 | if (second) | |
297 | goto no_page_found; | |
298 | v_val |= HPTE_V_SECONDARY; | |
299 | second = true; | |
300 | goto do_second; | |
301 | } | |
e71b2a39 | 302 | |
4e509af9 AG |
303 | v = be64_to_cpu(pteg[i]); |
304 | r = be64_to_cpu(pteg[i+1]); | |
7e48c101 | 305 | pp = (r & HPTE_R_PP) | key; |
03a9c903 PM |
306 | if (r & HPTE_R_PP0) |
307 | pp |= 8; | |
7e48c101 PM |
308 | |
309 | gpte->eaddr = eaddr; | |
310 | gpte->vpage = kvmppc_mmu_book3s_64_ea_to_vp(vcpu, eaddr, data); | |
a4a0f252 PM |
311 | |
312 | eaddr_mask = (1ull << mmu_pagesize(pgsize)) - 1; | |
7e48c101 | 313 | gpte->raddr = (r & HPTE_R_RPN & ~eaddr_mask) | (eaddr & eaddr_mask); |
a4a0f252 | 314 | gpte->page_size = pgsize; |
7e48c101 | 315 | gpte->may_execute = ((r & HPTE_R_N) ? false : true); |
f3383cf8 AG |
316 | if (unlikely(vcpu->arch.disable_kernel_nx) && |
317 | !(kvmppc_get_msr(vcpu) & MSR_PR)) | |
318 | gpte->may_execute = true; | |
7e48c101 PM |
319 | gpte->may_read = false; |
320 | gpte->may_write = false; | |
321 | ||
322 | switch (pp) { | |
323 | case 0: | |
324 | case 1: | |
325 | case 2: | |
326 | case 6: | |
327 | gpte->may_write = true; | |
328 | /* fall through */ | |
329 | case 3: | |
330 | case 5: | |
331 | case 7: | |
03a9c903 | 332 | case 10: |
7e48c101 PM |
333 | gpte->may_read = true; |
334 | break; | |
335 | } | |
e71b2a39 | 336 | |
7e48c101 PM |
337 | dprintk("KVM MMU: Translated 0x%lx [0x%llx] -> 0x%llx " |
338 | "-> 0x%lx\n", | |
339 | eaddr, avpn, gpte->vpage, gpte->raddr); | |
e71b2a39 | 340 | |
7e48c101 PM |
341 | /* Update PTE R and C bits, so the guest's swapper knows we used the |
342 | * page */ | |
9308ab8e PM |
343 | if (gpte->may_read && !(r & HPTE_R_R)) { |
344 | /* | |
345 | * Set the accessed flag. | |
346 | * We have to write this back with a single byte write | |
347 | * because another vcpu may be accessing this on | |
348 | * non-PAPR platforms such as mac99, and this is | |
349 | * what real hardware does. | |
350 | */ | |
740f834e | 351 | char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); |
7e48c101 | 352 | r |= HPTE_R_R; |
9308ab8e | 353 | put_user(r >> 8, addr + 6); |
7e48c101 | 354 | } |
93b159b4 PM |
355 | if (iswrite && gpte->may_write && !(r & HPTE_R_C)) { |
356 | /* Set the dirty flag */ | |
9308ab8e | 357 | /* Use a single byte write */ |
740f834e | 358 | char __user *addr = (char __user *) (ptegp + (i + 1) * sizeof(u64)); |
7e48c101 | 359 | r |= HPTE_R_C; |
9308ab8e | 360 | put_user(r, addr + 7); |
e71b2a39 AG |
361 | } |
362 | ||
9308ab8e | 363 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
7e48c101 | 364 | |
93b159b4 | 365 | if (!gpte->may_read || (iswrite && !gpte->may_write)) |
7e48c101 PM |
366 | return -EPERM; |
367 | return 0; | |
368 | ||
e71b2a39 | 369 | no_page_found: |
9308ab8e | 370 | mutex_unlock(&vcpu->kvm->arch.hpt_mutex); |
e71b2a39 AG |
371 | return -ENOENT; |
372 | ||
373 | no_seg_found: | |
e71b2a39 AG |
374 | dprintk("KVM MMU: Trigger segment fault\n"); |
375 | return -EINVAL; | |
376 | } | |
377 | ||
378 | static void kvmppc_mmu_book3s_64_slbmte(struct kvm_vcpu *vcpu, u64 rs, u64 rb) | |
379 | { | |
e71b2a39 AG |
380 | u64 esid, esid_1t; |
381 | int slb_nr; | |
382 | struct kvmppc_slb *slbe; | |
383 | ||
384 | dprintk("KVM MMU: slbmte(0x%llx, 0x%llx)\n", rs, rb); | |
385 | ||
e71b2a39 AG |
386 | esid = GET_ESID(rb); |
387 | esid_1t = GET_ESID_1T(rb); | |
388 | slb_nr = rb & 0xfff; | |
389 | ||
c4befc58 | 390 | if (slb_nr > vcpu->arch.slb_nr) |
e71b2a39 AG |
391 | return; |
392 | ||
c4befc58 | 393 | slbe = &vcpu->arch.slb[slb_nr]; |
e71b2a39 AG |
394 | |
395 | slbe->large = (rs & SLB_VSID_L) ? 1 : 0; | |
4b5c9b7f AG |
396 | slbe->tb = (rs & SLB_VSID_B_1T) ? 1 : 0; |
397 | slbe->esid = slbe->tb ? esid_1t : esid; | |
0f296829 | 398 | slbe->vsid = (rs & ~SLB_VSID_B) >> (kvmppc_slb_sid_shift(slbe) - 16); |
e71b2a39 AG |
399 | slbe->valid = (rb & SLB_ESID_V) ? 1 : 0; |
400 | slbe->Ks = (rs & SLB_VSID_KS) ? 1 : 0; | |
401 | slbe->Kp = (rs & SLB_VSID_KP) ? 1 : 0; | |
402 | slbe->nx = (rs & SLB_VSID_N) ? 1 : 0; | |
403 | slbe->class = (rs & SLB_VSID_C) ? 1 : 0; | |
404 | ||
a4a0f252 PM |
405 | slbe->base_page_size = MMU_PAGE_4K; |
406 | if (slbe->large) { | |
407 | if (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE) { | |
408 | switch (rs & SLB_VSID_LP) { | |
409 | case SLB_VSID_LP_00: | |
410 | slbe->base_page_size = MMU_PAGE_16M; | |
411 | break; | |
412 | case SLB_VSID_LP_01: | |
413 | slbe->base_page_size = MMU_PAGE_64K; | |
414 | break; | |
415 | } | |
416 | } else | |
417 | slbe->base_page_size = MMU_PAGE_16M; | |
418 | } | |
419 | ||
e71b2a39 AG |
420 | slbe->orige = rb & (ESID_MASK | SLB_ESID_V); |
421 | slbe->origv = rs; | |
422 | ||
423 | /* Map the new segment */ | |
424 | kvmppc_mmu_map_segment(vcpu, esid << SID_SHIFT); | |
425 | } | |
426 | ||
427 | static u64 kvmppc_mmu_book3s_64_slbmfee(struct kvm_vcpu *vcpu, u64 slb_nr) | |
428 | { | |
e71b2a39 AG |
429 | struct kvmppc_slb *slbe; |
430 | ||
c4befc58 | 431 | if (slb_nr > vcpu->arch.slb_nr) |
e71b2a39 AG |
432 | return 0; |
433 | ||
c4befc58 | 434 | slbe = &vcpu->arch.slb[slb_nr]; |
e71b2a39 AG |
435 | |
436 | return slbe->orige; | |
437 | } | |
438 | ||
439 | static u64 kvmppc_mmu_book3s_64_slbmfev(struct kvm_vcpu *vcpu, u64 slb_nr) | |
440 | { | |
e71b2a39 AG |
441 | struct kvmppc_slb *slbe; |
442 | ||
c4befc58 | 443 | if (slb_nr > vcpu->arch.slb_nr) |
e71b2a39 AG |
444 | return 0; |
445 | ||
c4befc58 | 446 | slbe = &vcpu->arch.slb[slb_nr]; |
e71b2a39 AG |
447 | |
448 | return slbe->origv; | |
449 | } | |
450 | ||
451 | static void kvmppc_mmu_book3s_64_slbie(struct kvm_vcpu *vcpu, u64 ea) | |
452 | { | |
e71b2a39 | 453 | struct kvmppc_slb *slbe; |
0f296829 | 454 | u64 seg_size; |
e71b2a39 AG |
455 | |
456 | dprintk("KVM MMU: slbie(0x%llx)\n", ea); | |
457 | ||
c4befc58 | 458 | slbe = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); |
e71b2a39 AG |
459 | |
460 | if (!slbe) | |
461 | return; | |
462 | ||
463 | dprintk("KVM MMU: slbie(0x%llx, 0x%llx)\n", ea, slbe->esid); | |
464 | ||
465 | slbe->valid = false; | |
681562cd PM |
466 | slbe->orige = 0; |
467 | slbe->origv = 0; | |
e71b2a39 | 468 | |
0f296829 PM |
469 | seg_size = 1ull << kvmppc_slb_sid_shift(slbe); |
470 | kvmppc_mmu_flush_segment(vcpu, ea & ~(seg_size - 1), seg_size); | |
e71b2a39 AG |
471 | } |
472 | ||
473 | static void kvmppc_mmu_book3s_64_slbia(struct kvm_vcpu *vcpu) | |
474 | { | |
e71b2a39 AG |
475 | int i; |
476 | ||
477 | dprintk("KVM MMU: slbia()\n"); | |
478 | ||
681562cd | 479 | for (i = 1; i < vcpu->arch.slb_nr; i++) { |
c4befc58 | 480 | vcpu->arch.slb[i].valid = false; |
681562cd PM |
481 | vcpu->arch.slb[i].orige = 0; |
482 | vcpu->arch.slb[i].origv = 0; | |
483 | } | |
e71b2a39 | 484 | |
5deb8e7a | 485 | if (kvmppc_get_msr(vcpu) & MSR_IR) { |
e71b2a39 | 486 | kvmppc_mmu_flush_segments(vcpu); |
c7f38f46 | 487 | kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)); |
e71b2a39 AG |
488 | } |
489 | } | |
490 | ||
491 | static void kvmppc_mmu_book3s_64_mtsrin(struct kvm_vcpu *vcpu, u32 srnum, | |
492 | ulong value) | |
493 | { | |
494 | u64 rb = 0, rs = 0; | |
495 | ||
5279aeb4 AG |
496 | /* |
497 | * According to Book3 2.01 mtsrin is implemented as: | |
498 | * | |
499 | * The SLB entry specified by (RB)32:35 is loaded from register | |
500 | * RS, as follows. | |
501 | * | |
502 | * SLBE Bit Source SLB Field | |
503 | * | |
504 | * 0:31 0x0000_0000 ESID-0:31 | |
505 | * 32:35 (RB)32:35 ESID-32:35 | |
506 | * 36 0b1 V | |
507 | * 37:61 0x00_0000|| 0b0 VSID-0:24 | |
508 | * 62:88 (RS)37:63 VSID-25:51 | |
509 | * 89:91 (RS)33:35 Ks Kp N | |
510 | * 92 (RS)36 L ((RS)36 must be 0b0) | |
511 | * 93 0b0 C | |
512 | */ | |
513 | ||
514 | dprintk("KVM MMU: mtsrin(0x%x, 0x%lx)\n", srnum, value); | |
515 | ||
e71b2a39 AG |
516 | /* ESID = srnum */ |
517 | rb |= (srnum & 0xf) << 28; | |
518 | /* Set the valid bit */ | |
519 | rb |= 1 << 27; | |
520 | /* Index = ESID */ | |
521 | rb |= srnum; | |
522 | ||
523 | /* VSID = VSID */ | |
524 | rs |= (value & 0xfffffff) << 12; | |
525 | /* flags = flags */ | |
5279aeb4 | 526 | rs |= ((value >> 28) & 0x7) << 9; |
e71b2a39 AG |
527 | |
528 | kvmppc_mmu_book3s_64_slbmte(vcpu, rs, rb); | |
529 | } | |
530 | ||
531 | static void kvmppc_mmu_book3s_64_tlbie(struct kvm_vcpu *vcpu, ulong va, | |
532 | bool large) | |
533 | { | |
534 | u64 mask = 0xFFFFFFFFFULL; | |
9308ab8e PM |
535 | long i; |
536 | struct kvm_vcpu *v; | |
e71b2a39 AG |
537 | |
538 | dprintk("KVM MMU: tlbie(0x%lx)\n", va); | |
539 | ||
a4a0f252 PM |
540 | /* |
541 | * The tlbie instruction changed behaviour starting with | |
542 | * POWER6. POWER6 and later don't have the large page flag | |
543 | * in the instruction but in the RB value, along with bits | |
544 | * indicating page and segment sizes. | |
545 | */ | |
546 | if (vcpu->arch.hflags & BOOK3S_HFLAG_NEW_TLBIE) { | |
547 | /* POWER6 or later */ | |
548 | if (va & 1) { /* L bit */ | |
549 | if ((va & 0xf000) == 0x1000) | |
550 | mask = 0xFFFFFFFF0ULL; /* 64k page */ | |
551 | else | |
552 | mask = 0xFFFFFF000ULL; /* 16M page */ | |
553 | } | |
554 | } else { | |
555 | /* older processors, e.g. PPC970 */ | |
556 | if (large) | |
557 | mask = 0xFFFFFF000ULL; | |
558 | } | |
9308ab8e PM |
559 | /* flush this VA on all vcpus */ |
560 | kvm_for_each_vcpu(i, v, vcpu->kvm) | |
561 | kvmppc_mmu_pte_vflush(v, va >> 12, mask); | |
e71b2a39 AG |
562 | } |
563 | ||
c9029c34 PM |
564 | #ifdef CONFIG_PPC_64K_PAGES |
565 | static int segment_contains_magic_page(struct kvm_vcpu *vcpu, ulong esid) | |
566 | { | |
567 | ulong mp_ea = vcpu->arch.magic_page_ea; | |
568 | ||
5deb8e7a | 569 | return mp_ea && !(kvmppc_get_msr(vcpu) & MSR_PR) && |
c9029c34 PM |
570 | (mp_ea >> SID_SHIFT) == esid; |
571 | } | |
572 | #endif | |
573 | ||
af7b4d10 | 574 | static int kvmppc_mmu_book3s_64_esid_to_vsid(struct kvm_vcpu *vcpu, ulong esid, |
e71b2a39 AG |
575 | u64 *vsid) |
576 | { | |
f7bc74e1 AG |
577 | ulong ea = esid << SID_SHIFT; |
578 | struct kvmppc_slb *slb; | |
579 | u64 gvsid = esid; | |
e8508940 | 580 | ulong mp_ea = vcpu->arch.magic_page_ea; |
c9029c34 | 581 | int pagesize = MMU_PAGE_64K; |
5deb8e7a | 582 | u64 msr = kvmppc_get_msr(vcpu); |
f7bc74e1 | 583 | |
5deb8e7a | 584 | if (msr & (MSR_DR|MSR_IR)) { |
c4befc58 | 585 | slb = kvmppc_mmu_book3s_64_find_slbe(vcpu, ea); |
0f296829 | 586 | if (slb) { |
f7bc74e1 | 587 | gvsid = slb->vsid; |
c9029c34 | 588 | pagesize = slb->base_page_size; |
0f296829 PM |
589 | if (slb->tb) { |
590 | gvsid <<= SID_SHIFT_1T - SID_SHIFT; | |
591 | gvsid |= esid & ((1ul << (SID_SHIFT_1T - SID_SHIFT)) - 1); | |
592 | gvsid |= VSID_1T; | |
593 | } | |
594 | } | |
f7bc74e1 AG |
595 | } |
596 | ||
5deb8e7a | 597 | switch (msr & (MSR_DR|MSR_IR)) { |
e71b2a39 | 598 | case 0: |
c9029c34 | 599 | gvsid = VSID_REAL | esid; |
e71b2a39 AG |
600 | break; |
601 | case MSR_IR: | |
c9029c34 | 602 | gvsid |= VSID_REAL_IR; |
e71b2a39 AG |
603 | break; |
604 | case MSR_DR: | |
c9029c34 | 605 | gvsid |= VSID_REAL_DR; |
e71b2a39 AG |
606 | break; |
607 | case MSR_DR|MSR_IR: | |
f7bc74e1 | 608 | if (!slb) |
e8508940 | 609 | goto no_slb; |
e71b2a39 AG |
610 | |
611 | break; | |
e71b2a39 AG |
612 | default: |
613 | BUG(); | |
614 | break; | |
615 | } | |
616 | ||
c9029c34 PM |
617 | #ifdef CONFIG_PPC_64K_PAGES |
618 | /* | |
619 | * Mark this as a 64k segment if the host is using | |
620 | * 64k pages, the host MMU supports 64k pages and | |
621 | * the guest segment page size is >= 64k, | |
622 | * but not if this segment contains the magic page. | |
623 | */ | |
624 | if (pagesize >= MMU_PAGE_64K && | |
625 | mmu_psize_defs[MMU_PAGE_64K].shift && | |
626 | !segment_contains_magic_page(vcpu, esid)) | |
627 | gvsid |= VSID_64K; | |
628 | #endif | |
629 | ||
5deb8e7a | 630 | if (kvmppc_get_msr(vcpu) & MSR_PR) |
c9029c34 | 631 | gvsid |= VSID_PR; |
63556441 | 632 | |
c9029c34 | 633 | *vsid = gvsid; |
e71b2a39 | 634 | return 0; |
e8508940 AG |
635 | |
636 | no_slb: | |
637 | /* Catch magic page case */ | |
638 | if (unlikely(mp_ea) && | |
639 | unlikely(esid == (mp_ea >> SID_SHIFT)) && | |
5deb8e7a | 640 | !(kvmppc_get_msr(vcpu) & MSR_PR)) { |
e8508940 AG |
641 | *vsid = VSID_REAL | esid; |
642 | return 0; | |
643 | } | |
644 | ||
645 | return -EINVAL; | |
e71b2a39 AG |
646 | } |
647 | ||
648 | static bool kvmppc_mmu_book3s_64_is_dcbz32(struct kvm_vcpu *vcpu) | |
649 | { | |
650 | return (to_book3s(vcpu)->hid[5] & 0x80); | |
651 | } | |
652 | ||
653 | void kvmppc_mmu_book3s_64_init(struct kvm_vcpu *vcpu) | |
654 | { | |
655 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; | |
656 | ||
657 | mmu->mfsrin = NULL; | |
658 | mmu->mtsrin = kvmppc_mmu_book3s_64_mtsrin; | |
659 | mmu->slbmte = kvmppc_mmu_book3s_64_slbmte; | |
660 | mmu->slbmfee = kvmppc_mmu_book3s_64_slbmfee; | |
661 | mmu->slbmfev = kvmppc_mmu_book3s_64_slbmfev; | |
662 | mmu->slbie = kvmppc_mmu_book3s_64_slbie; | |
663 | mmu->slbia = kvmppc_mmu_book3s_64_slbia; | |
664 | mmu->xlate = kvmppc_mmu_book3s_64_xlate; | |
665 | mmu->reset_msr = kvmppc_mmu_book3s_64_reset_msr; | |
666 | mmu->tlbie = kvmppc_mmu_book3s_64_tlbie; | |
667 | mmu->esid_to_vsid = kvmppc_mmu_book3s_64_esid_to_vsid; | |
668 | mmu->ea_to_vp = kvmppc_mmu_book3s_64_ea_to_vp; | |
669 | mmu->is_dcbz32 = kvmppc_mmu_book3s_64_is_dcbz32; | |
e15a1137 AG |
670 | |
671 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; | |
e71b2a39 | 672 | } |