]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/mmu-hash64.c
Merge remote-tracking branch 'remotes/ericb/tags/pull-nbd-2018-05-04' into staging
[mirror_qemu.git] / target / ppc / mmu-hash64.c
1 /*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/error-report.h"
25 #include "sysemu/hw_accel.h"
26 #include "kvm_ppc.h"
27 #include "mmu-hash64.h"
28 #include "exec/log.h"
29 #include "hw/hw.h"
30 #include "mmu-book3s-v3.h"
31
32 //#define DEBUG_SLB
33
34 #ifdef DEBUG_SLB
35 # define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
36 #else
37 # define LOG_SLB(...) do { } while (0)
38 #endif
39
40 /*
41 * SLB handling
42 */
43
44 static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
45 {
46 CPUPPCState *env = &cpu->env;
47 uint64_t esid_256M, esid_1T;
48 int n;
49
50 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
51
52 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
53 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
54
55 for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
56 ppc_slb_t *slb = &env->slb[n];
57
58 LOG_SLB("%s: slot %d %016" PRIx64 " %016"
59 PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
60 /* We check for 1T matches on all MMUs here - if the MMU
61 * doesn't have 1T segment support, we will have prevented 1T
62 * entries from being inserted in the slbmte code. */
63 if (((slb->esid == esid_256M) &&
64 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
65 || ((slb->esid == esid_1T) &&
66 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
67 return slb;
68 }
69 }
70
71 return NULL;
72 }
73
74 void dump_slb(FILE *f, fprintf_function cpu_fprintf, PowerPCCPU *cpu)
75 {
76 CPUPPCState *env = &cpu->env;
77 int i;
78 uint64_t slbe, slbv;
79
80 cpu_synchronize_state(CPU(cpu));
81
82 cpu_fprintf(f, "SLB\tESID\t\t\tVSID\n");
83 for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
84 slbe = env->slb[i].esid;
85 slbv = env->slb[i].vsid;
86 if (slbe == 0 && slbv == 0) {
87 continue;
88 }
89 cpu_fprintf(f, "%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
90 i, slbe, slbv);
91 }
92 }
93
94 void helper_slbia(CPUPPCState *env)
95 {
96 PowerPCCPU *cpu = ppc_env_get_cpu(env);
97 int n;
98
99 /* XXX: Warning: slbia never invalidates the first segment */
100 for (n = 1; n < cpu->hash64_opts->slb_size; n++) {
101 ppc_slb_t *slb = &env->slb[n];
102
103 if (slb->esid & SLB_ESID_V) {
104 slb->esid &= ~SLB_ESID_V;
105 /* XXX: given the fact that segment size is 256 MB or 1TB,
106 * and we still don't have a tlb_flush_mask(env, n, mask)
107 * in QEMU, we just invalidate all TLBs
108 */
109 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
110 }
111 }
112 }
113
114 static void __helper_slbie(CPUPPCState *env, target_ulong addr,
115 target_ulong global)
116 {
117 PowerPCCPU *cpu = ppc_env_get_cpu(env);
118 ppc_slb_t *slb;
119
120 slb = slb_lookup(cpu, addr);
121 if (!slb) {
122 return;
123 }
124
125 if (slb->esid & SLB_ESID_V) {
126 slb->esid &= ~SLB_ESID_V;
127
128 /* XXX: given the fact that segment size is 256 MB or 1TB,
129 * and we still don't have a tlb_flush_mask(env, n, mask)
130 * in QEMU, we just invalidate all TLBs
131 */
132 env->tlb_need_flush |=
133 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
134 }
135 }
136
137 void helper_slbie(CPUPPCState *env, target_ulong addr)
138 {
139 __helper_slbie(env, addr, false);
140 }
141
142 void helper_slbieg(CPUPPCState *env, target_ulong addr)
143 {
144 __helper_slbie(env, addr, true);
145 }
146
147 int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
148 target_ulong esid, target_ulong vsid)
149 {
150 CPUPPCState *env = &cpu->env;
151 ppc_slb_t *slb = &env->slb[slot];
152 const PPCHash64SegmentPageSizes *sps = NULL;
153 int i;
154
155 if (slot >= cpu->hash64_opts->slb_size) {
156 return -1; /* Bad slot number */
157 }
158 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
159 return -1; /* Reserved bits set */
160 }
161 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
162 return -1; /* Bad segment size */
163 }
164 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
165 return -1; /* 1T segment on MMU that doesn't support it */
166 }
167
168 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
169 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
170
171 if (!sps1->page_shift) {
172 break;
173 }
174
175 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
176 sps = sps1;
177 break;
178 }
179 }
180
181 if (!sps) {
182 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
183 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
184 slot, esid, vsid);
185 return -1;
186 }
187
188 slb->esid = esid;
189 slb->vsid = vsid;
190 slb->sps = sps;
191
192 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
193 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
194 slb->esid, slb->vsid);
195
196 return 0;
197 }
198
199 static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
200 target_ulong *rt)
201 {
202 CPUPPCState *env = &cpu->env;
203 int slot = rb & 0xfff;
204 ppc_slb_t *slb = &env->slb[slot];
205
206 if (slot >= cpu->hash64_opts->slb_size) {
207 return -1;
208 }
209
210 *rt = slb->esid;
211 return 0;
212 }
213
214 static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
215 target_ulong *rt)
216 {
217 CPUPPCState *env = &cpu->env;
218 int slot = rb & 0xfff;
219 ppc_slb_t *slb = &env->slb[slot];
220
221 if (slot >= cpu->hash64_opts->slb_size) {
222 return -1;
223 }
224
225 *rt = slb->vsid;
226 return 0;
227 }
228
229 static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
230 target_ulong *rt)
231 {
232 CPUPPCState *env = &cpu->env;
233 ppc_slb_t *slb;
234
235 if (!msr_is_64bit(env, env->msr)) {
236 rb &= 0xffffffff;
237 }
238 slb = slb_lookup(cpu, rb);
239 if (slb == NULL) {
240 *rt = (target_ulong)-1ul;
241 } else {
242 *rt = slb->vsid;
243 }
244 return 0;
245 }
246
247 void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
248 {
249 PowerPCCPU *cpu = ppc_env_get_cpu(env);
250
251 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
252 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
253 POWERPC_EXCP_INVAL, GETPC());
254 }
255 }
256
257 target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
258 {
259 PowerPCCPU *cpu = ppc_env_get_cpu(env);
260 target_ulong rt = 0;
261
262 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
263 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
264 POWERPC_EXCP_INVAL, GETPC());
265 }
266 return rt;
267 }
268
269 target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
270 {
271 PowerPCCPU *cpu = ppc_env_get_cpu(env);
272 target_ulong rt = 0;
273
274 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
275 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
276 POWERPC_EXCP_INVAL, GETPC());
277 }
278 return rt;
279 }
280
281 target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
282 {
283 PowerPCCPU *cpu = ppc_env_get_cpu(env);
284 target_ulong rt = 0;
285
286 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
287 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
288 POWERPC_EXCP_INVAL, GETPC());
289 }
290 return rt;
291 }
292
293 /* Check No-Execute or Guarded Storage */
294 static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
295 ppc_hash_pte64_t pte)
296 {
297 /* Exec permissions CANNOT take away read or write permissions */
298 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
299 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
300 }
301
302 /* Check Basic Storage Protection */
303 static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
304 ppc_slb_t *slb, ppc_hash_pte64_t pte)
305 {
306 CPUPPCState *env = &cpu->env;
307 unsigned pp, key;
308 /* Some pp bit combinations have undefined behaviour, so default
309 * to no access in those cases */
310 int prot = 0;
311
312 key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
313 : (slb->vsid & SLB_VSID_KS));
314 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
315
316 if (key == 0) {
317 switch (pp) {
318 case 0x0:
319 case 0x1:
320 case 0x2:
321 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
322 break;
323
324 case 0x3:
325 case 0x6:
326 prot = PAGE_READ | PAGE_EXEC;
327 break;
328 }
329 } else {
330 switch (pp) {
331 case 0x0:
332 case 0x6:
333 break;
334
335 case 0x1:
336 case 0x3:
337 prot = PAGE_READ | PAGE_EXEC;
338 break;
339
340 case 0x2:
341 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
342 break;
343 }
344 }
345
346 return prot;
347 }
348
349 /* Check the instruction access permissions specified in the IAMR */
350 static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
351 {
352 CPUPPCState *env = &cpu->env;
353 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
354
355 /*
356 * An instruction fetch is permitted if the IAMR bit is 0.
357 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
358 * can only take away EXEC permissions not READ or WRITE permissions.
359 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
360 * EXEC permissions are allowed.
361 */
362 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
363 PAGE_READ | PAGE_WRITE | PAGE_EXEC;
364 }
365
366 static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
367 {
368 CPUPPCState *env = &cpu->env;
369 int key, amrbits;
370 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
371
372 /* Only recent MMUs implement Virtual Page Class Key Protection */
373 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
374 return prot;
375 }
376
377 key = HPTE64_R_KEY(pte.pte1);
378 amrbits = (env->spr[SPR_AMR] >> 2*(31 - key)) & 0x3;
379
380 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
381 /* env->spr[SPR_AMR]); */
382
383 /*
384 * A store is permitted if the AMR bit is 0. Remove write
385 * protection if it is set.
386 */
387 if (amrbits & 0x2) {
388 prot &= ~PAGE_WRITE;
389 }
390 /*
391 * A load is permitted if the AMR bit is 0. Remove read
392 * protection if it is set.
393 */
394 if (amrbits & 0x1) {
395 prot &= ~PAGE_READ;
396 }
397
398 switch (env->mmu_model) {
399 /*
400 * MMU version 2.07 and later support IAMR
401 * Check if the IAMR allows the instruction access - it will return
402 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
403 * if it does (and prot will be unchanged indicating execution support).
404 */
405 case POWERPC_MMU_2_07:
406 case POWERPC_MMU_3_00:
407 prot &= ppc_hash64_iamr_prot(cpu, key);
408 break;
409 default:
410 break;
411 }
412
413 return prot;
414 }
415
416 const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
417 hwaddr ptex, int n)
418 {
419 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
420 hwaddr base = ppc_hash64_hpt_base(cpu);
421 hwaddr plen = n * HASH_PTE_SIZE_64;
422 const ppc_hash_pte64_t *hptes;
423
424 if (cpu->vhyp) {
425 PPCVirtualHypervisorClass *vhc =
426 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
427 return vhc->map_hptes(cpu->vhyp, ptex, n);
428 }
429
430 if (!base) {
431 return NULL;
432 }
433
434 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false);
435 if (plen < (n * HASH_PTE_SIZE_64)) {
436 hw_error("%s: Unable to map all requested HPTEs\n", __func__);
437 }
438 return hptes;
439 }
440
441 void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
442 hwaddr ptex, int n)
443 {
444 if (cpu->vhyp) {
445 PPCVirtualHypervisorClass *vhc =
446 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
447 vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
448 return;
449 }
450
451 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
452 false, n * HASH_PTE_SIZE_64);
453 }
454
455 static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
456 uint64_t pte0, uint64_t pte1)
457 {
458 int i;
459
460 if (!(pte0 & HPTE64_V_LARGE)) {
461 if (sps->page_shift != 12) {
462 /* 4kiB page in a non 4kiB segment */
463 return 0;
464 }
465 /* Normal 4kiB page */
466 return 12;
467 }
468
469 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
470 const PPCHash64PageSize *ps = &sps->enc[i];
471 uint64_t mask;
472
473 if (!ps->page_shift) {
474 break;
475 }
476
477 if (ps->page_shift == 12) {
478 /* L bit is set so this can't be a 4kiB page */
479 continue;
480 }
481
482 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
483
484 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
485 return ps->page_shift;
486 }
487 }
488
489 return 0; /* Bad page size encoding */
490 }
491
492 static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
493 const PPCHash64SegmentPageSizes *sps,
494 target_ulong ptem,
495 ppc_hash_pte64_t *pte, unsigned *pshift)
496 {
497 int i;
498 const ppc_hash_pte64_t *pteg;
499 target_ulong pte0, pte1;
500 target_ulong ptex;
501
502 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
503 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
504 if (!pteg) {
505 return -1;
506 }
507 for (i = 0; i < HPTES_PER_GROUP; i++) {
508 pte0 = ppc_hash64_hpte0(cpu, pteg, i);
509 pte1 = ppc_hash64_hpte1(cpu, pteg, i);
510
511 /* This compares V, B, H (secondary) and the AVPN */
512 if (HPTE64_V_COMPARE(pte0, ptem)) {
513 *pshift = hpte_page_shift(sps, pte0, pte1);
514 /*
515 * If there is no match, ignore the PTE, it could simply
516 * be for a different segment size encoding and the
517 * architecture specifies we should not match. Linux will
518 * potentially leave behind PTEs for the wrong base page
519 * size when demoting segments.
520 */
521 if (*pshift == 0) {
522 continue;
523 }
524 /* We don't do anything with pshift yet as qemu TLB only deals
525 * with 4K pages anyway
526 */
527 pte->pte0 = pte0;
528 pte->pte1 = pte1;
529 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
530 return ptex + i;
531 }
532 }
533 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
534 /*
535 * We didn't find a valid entry.
536 */
537 return -1;
538 }
539
540 static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
541 ppc_slb_t *slb, target_ulong eaddr,
542 ppc_hash_pte64_t *pte, unsigned *pshift)
543 {
544 CPUPPCState *env = &cpu->env;
545 hwaddr hash, ptex;
546 uint64_t vsid, epnmask, epn, ptem;
547 const PPCHash64SegmentPageSizes *sps = slb->sps;
548
549 /* The SLB store path should prevent any bad page size encodings
550 * getting in there, so: */
551 assert(sps);
552
553 /* If ISL is set in LPCR we need to clamp the page size to 4K */
554 if (env->spr[SPR_LPCR] & LPCR_ISL) {
555 /* We assume that when using TCG, 4k is first entry of SPS */
556 sps = &cpu->hash64_opts->sps[0];
557 assert(sps->page_shift == 12);
558 }
559
560 epnmask = ~((1ULL << sps->page_shift) - 1);
561
562 if (slb->vsid & SLB_VSID_B) {
563 /* 1TB segment */
564 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
565 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
566 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
567 } else {
568 /* 256M segment */
569 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
570 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
571 hash = vsid ^ (epn >> sps->page_shift);
572 }
573 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
574 ptem |= HPTE64_V_VALID;
575
576 /* Page address translation */
577 qemu_log_mask(CPU_LOG_MMU,
578 "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
579 " hash " TARGET_FMT_plx "\n",
580 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
581
582 /* Primary PTEG lookup */
583 qemu_log_mask(CPU_LOG_MMU,
584 "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
585 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
586 " hash=" TARGET_FMT_plx "\n",
587 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
588 vsid, ptem, hash);
589 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
590
591 if (ptex == -1) {
592 /* Secondary PTEG lookup */
593 ptem |= HPTE64_V_SECONDARY;
594 qemu_log_mask(CPU_LOG_MMU,
595 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
596 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
597 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
598 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
599
600 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
601 }
602
603 return ptex;
604 }
605
606 unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
607 uint64_t pte0, uint64_t pte1)
608 {
609 int i;
610
611 if (!(pte0 & HPTE64_V_LARGE)) {
612 return 12;
613 }
614
615 /*
616 * The encodings in env->sps need to be carefully chosen so that
617 * this gives an unambiguous result.
618 */
619 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
620 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
621 unsigned shift;
622
623 if (!sps->page_shift) {
624 break;
625 }
626
627 shift = hpte_page_shift(sps, pte0, pte1);
628 if (shift) {
629 return shift;
630 }
631 }
632
633 return 0;
634 }
635
636 static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
637 {
638 CPUPPCState *env = &POWERPC_CPU(cs)->env;
639 bool vpm;
640
641 if (msr_ir) {
642 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
643 } else {
644 switch (env->mmu_model) {
645 case POWERPC_MMU_3_00:
646 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
647 vpm = true;
648 break;
649 default:
650 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
651 break;
652 }
653 }
654 if (vpm && !msr_hv) {
655 cs->exception_index = POWERPC_EXCP_HISI;
656 } else {
657 cs->exception_index = POWERPC_EXCP_ISI;
658 }
659 env->error_code = error_code;
660 }
661
662 static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
663 {
664 CPUPPCState *env = &POWERPC_CPU(cs)->env;
665 bool vpm;
666
667 if (msr_dr) {
668 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
669 } else {
670 switch (env->mmu_model) {
671 case POWERPC_MMU_3_00:
672 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
673 vpm = true;
674 break;
675 default:
676 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
677 break;
678 }
679 }
680 if (vpm && !msr_hv) {
681 cs->exception_index = POWERPC_EXCP_HDSI;
682 env->spr[SPR_HDAR] = dar;
683 env->spr[SPR_HDSISR] = dsisr;
684 } else {
685 cs->exception_index = POWERPC_EXCP_DSI;
686 env->spr[SPR_DAR] = dar;
687 env->spr[SPR_DSISR] = dsisr;
688 }
689 env->error_code = 0;
690 }
691
692
693 int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
694 int rwx, int mmu_idx)
695 {
696 CPUState *cs = CPU(cpu);
697 CPUPPCState *env = &cpu->env;
698 ppc_slb_t *slb;
699 unsigned apshift;
700 hwaddr ptex;
701 ppc_hash_pte64_t pte;
702 int exec_prot, pp_prot, amr_prot, prot;
703 uint64_t new_pte1;
704 const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
705 hwaddr raddr;
706
707 assert((rwx == 0) || (rwx == 1) || (rwx == 2));
708
709 /* Note on LPCR usage: 970 uses HID4, but our special variant
710 * of store_spr copies relevant fields into env->spr[SPR_LPCR].
711 * Similarily we filter unimplemented bits when storing into
712 * LPCR depending on the MMU version. This code can thus just
713 * use the LPCR "as-is".
714 */
715
716 /* 1. Handle real mode accesses */
717 if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
718 /* Translation is supposedly "off" */
719 /* In real mode the top 4 effective address bits are (mostly) ignored */
720 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
721
722 /* In HV mode, add HRMOR if top EA bit is clear */
723 if (msr_hv || !env->has_hv_mode) {
724 if (!(eaddr >> 63)) {
725 raddr |= env->spr[SPR_HRMOR];
726 }
727 } else {
728 /* Otherwise, check VPM for RMA vs VRMA */
729 if (env->spr[SPR_LPCR] & LPCR_VPM0) {
730 slb = &env->vrma_slb;
731 if (slb->sps) {
732 goto skip_slb_search;
733 }
734 /* Not much else to do here */
735 cs->exception_index = POWERPC_EXCP_MCHECK;
736 env->error_code = 0;
737 return 1;
738 } else if (raddr < env->rmls) {
739 /* RMA. Check bounds in RMLS */
740 raddr |= env->spr[SPR_RMOR];
741 } else {
742 /* The access failed, generate the approriate interrupt */
743 if (rwx == 2) {
744 ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
745 } else {
746 int dsisr = DSISR_PROTFAULT;
747 if (rwx == 1) {
748 dsisr |= DSISR_ISSTORE;
749 }
750 ppc_hash64_set_dsi(cs, eaddr, dsisr);
751 }
752 return 1;
753 }
754 }
755 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
756 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
757 TARGET_PAGE_SIZE);
758 return 0;
759 }
760
761 /* 2. Translation is on, so look up the SLB */
762 slb = slb_lookup(cpu, eaddr);
763 if (!slb) {
764 /* No entry found, check if in-memory segment tables are in use */
765 if (ppc64_use_proc_tbl(cpu)) {
766 /* TODO - Unsupported */
767 error_report("Segment Table Support Unimplemented");
768 exit(1);
769 }
770 /* Segment still not found, generate the appropriate interrupt */
771 if (rwx == 2) {
772 cs->exception_index = POWERPC_EXCP_ISEG;
773 env->error_code = 0;
774 } else {
775 cs->exception_index = POWERPC_EXCP_DSEG;
776 env->error_code = 0;
777 env->spr[SPR_DAR] = eaddr;
778 }
779 return 1;
780 }
781
782 skip_slb_search:
783
784 /* 3. Check for segment level no-execute violation */
785 if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
786 ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD);
787 return 1;
788 }
789
790 /* 4. Locate the PTE in the hash table */
791 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
792 if (ptex == -1) {
793 if (rwx == 2) {
794 ppc_hash64_set_isi(cs, SRR1_NOPTE);
795 } else {
796 int dsisr = DSISR_NOPTE;
797 if (rwx == 1) {
798 dsisr |= DSISR_ISSTORE;
799 }
800 ppc_hash64_set_dsi(cs, eaddr, dsisr);
801 }
802 return 1;
803 }
804 qemu_log_mask(CPU_LOG_MMU,
805 "found PTE at index %08" HWADDR_PRIx "\n", ptex);
806
807 /* 5. Check access permissions */
808
809 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
810 pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
811 amr_prot = ppc_hash64_amr_prot(cpu, pte);
812 prot = exec_prot & pp_prot & amr_prot;
813
814 if ((need_prot[rwx] & ~prot) != 0) {
815 /* Access right violation */
816 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
817 if (rwx == 2) {
818 int srr1 = 0;
819 if (PAGE_EXEC & ~exec_prot) {
820 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
821 } else if (PAGE_EXEC & ~pp_prot) {
822 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
823 }
824 if (PAGE_EXEC & ~amr_prot) {
825 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
826 }
827 ppc_hash64_set_isi(cs, srr1);
828 } else {
829 int dsisr = 0;
830 if (need_prot[rwx] & ~pp_prot) {
831 dsisr |= DSISR_PROTFAULT;
832 }
833 if (rwx == 1) {
834 dsisr |= DSISR_ISSTORE;
835 }
836 if (need_prot[rwx] & ~amr_prot) {
837 dsisr |= DSISR_AMR;
838 }
839 ppc_hash64_set_dsi(cs, eaddr, dsisr);
840 }
841 return 1;
842 }
843
844 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
845
846 /* 6. Update PTE referenced and changed bits if necessary */
847
848 new_pte1 = pte.pte1 | HPTE64_R_R; /* set referenced bit */
849 if (rwx == 1) {
850 new_pte1 |= HPTE64_R_C; /* set changed (dirty) bit */
851 } else {
852 /* Treat the page as read-only for now, so that a later write
853 * will pass through this function again to set the C bit */
854 prot &= ~PAGE_WRITE;
855 }
856
857 if (new_pte1 != pte.pte1) {
858 ppc_hash64_store_hpte(cpu, ptex, pte.pte0, new_pte1);
859 }
860
861 /* 7. Determine the real address from the PTE */
862
863 raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
864
865 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
866 prot, mmu_idx, 1ULL << apshift);
867
868 return 0;
869 }
870
871 hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
872 {
873 CPUPPCState *env = &cpu->env;
874 ppc_slb_t *slb;
875 hwaddr ptex, raddr;
876 ppc_hash_pte64_t pte;
877 unsigned apshift;
878
879 /* Handle real mode */
880 if (msr_dr == 0) {
881 /* In real mode the top 4 effective address bits are ignored */
882 raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
883
884 /* In HV mode, add HRMOR if top EA bit is clear */
885 if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
886 return raddr | env->spr[SPR_HRMOR];
887 }
888
889 /* Otherwise, check VPM for RMA vs VRMA */
890 if (env->spr[SPR_LPCR] & LPCR_VPM0) {
891 slb = &env->vrma_slb;
892 if (!slb->sps) {
893 return -1;
894 }
895 } else if (raddr < env->rmls) {
896 /* RMA. Check bounds in RMLS */
897 return raddr | env->spr[SPR_RMOR];
898 } else {
899 return -1;
900 }
901 } else {
902 slb = slb_lookup(cpu, addr);
903 if (!slb) {
904 return -1;
905 }
906 }
907
908 ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
909 if (ptex == -1) {
910 return -1;
911 }
912
913 return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
914 & TARGET_PAGE_MASK;
915 }
916
917 void ppc_hash64_store_hpte(PowerPCCPU *cpu, hwaddr ptex,
918 uint64_t pte0, uint64_t pte1)
919 {
920 hwaddr base = ppc_hash64_hpt_base(cpu);
921 hwaddr offset = ptex * HASH_PTE_SIZE_64;
922
923 if (cpu->vhyp) {
924 PPCVirtualHypervisorClass *vhc =
925 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
926 vhc->store_hpte(cpu->vhyp, ptex, pte0, pte1);
927 return;
928 }
929
930 stq_phys(CPU(cpu)->as, base + offset, pte0);
931 stq_phys(CPU(cpu)->as, base + offset + HASH_PTE_SIZE_64 / 2, pte1);
932 }
933
934 void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
935 target_ulong pte0, target_ulong pte1)
936 {
937 /*
938 * XXX: given the fact that there are too many segments to
939 * invalidate, and we still don't have a tlb_flush_mask(env, n,
940 * mask) in QEMU, we just invalidate all TLBs
941 */
942 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
943 }
944
945 static void ppc_hash64_update_rmls(PowerPCCPU *cpu)
946 {
947 CPUPPCState *env = &cpu->env;
948 uint64_t lpcr = env->spr[SPR_LPCR];
949
950 /*
951 * This is the full 4 bits encoding of POWER8. Previous
952 * CPUs only support a subset of these but the filtering
953 * is done when writing LPCR
954 */
955 switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
956 case 0x8: /* 32MB */
957 env->rmls = 0x2000000ull;
958 break;
959 case 0x3: /* 64MB */
960 env->rmls = 0x4000000ull;
961 break;
962 case 0x7: /* 128MB */
963 env->rmls = 0x8000000ull;
964 break;
965 case 0x4: /* 256MB */
966 env->rmls = 0x10000000ull;
967 break;
968 case 0x2: /* 1GB */
969 env->rmls = 0x40000000ull;
970 break;
971 case 0x1: /* 16GB */
972 env->rmls = 0x400000000ull;
973 break;
974 default:
975 /* What to do here ??? */
976 env->rmls = 0;
977 }
978 }
979
980 static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
981 {
982 CPUPPCState *env = &cpu->env;
983 const PPCHash64SegmentPageSizes *sps = NULL;
984 target_ulong esid, vsid, lpcr;
985 ppc_slb_t *slb = &env->vrma_slb;
986 uint32_t vrmasd;
987 int i;
988
989 /* First clear it */
990 slb->esid = slb->vsid = 0;
991 slb->sps = NULL;
992
993 /* Is VRMA enabled ? */
994 lpcr = env->spr[SPR_LPCR];
995 if (!(lpcr & LPCR_VPM0)) {
996 return;
997 }
998
999 /* Make one up. Mostly ignore the ESID which will not be
1000 * needed for translation
1001 */
1002 vsid = SLB_VSID_VRMA;
1003 vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
1004 vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
1005 esid = SLB_ESID_V;
1006
1007 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
1008 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
1009
1010 if (!sps1->page_shift) {
1011 break;
1012 }
1013
1014 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
1015 sps = sps1;
1016 break;
1017 }
1018 }
1019
1020 if (!sps) {
1021 error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
1022 " vsid 0x"TARGET_FMT_lx, esid, vsid);
1023 return;
1024 }
1025
1026 slb->vsid = vsid;
1027 slb->esid = esid;
1028 slb->sps = sps;
1029 }
1030
1031 void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
1032 {
1033 CPUPPCState *env = &cpu->env;
1034 uint64_t lpcr = 0;
1035
1036 /* Filter out bits */
1037 switch (env->mmu_model) {
1038 case POWERPC_MMU_64B: /* 970 */
1039 if (val & 0x40) {
1040 lpcr |= LPCR_LPES0;
1041 }
1042 if (val & 0x8000000000000000ull) {
1043 lpcr |= LPCR_LPES1;
1044 }
1045 if (val & 0x20) {
1046 lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
1047 }
1048 if (val & 0x4000000000000000ull) {
1049 lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
1050 }
1051 if (val & 0x2000000000000000ull) {
1052 lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
1053 }
1054 env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
1055
1056 /* XXX We could also write LPID from HID4 here
1057 * but since we don't tag any translation on it
1058 * it doesn't actually matter
1059 */
1060 /* XXX For proper emulation of 970 we also need
1061 * to dig HRMOR out of HID5
1062 */
1063 break;
1064 case POWERPC_MMU_2_03: /* P5p */
1065 lpcr = val & (LPCR_RMLS | LPCR_ILE |
1066 LPCR_LPES0 | LPCR_LPES1 |
1067 LPCR_RMI | LPCR_HDICE);
1068 break;
1069 case POWERPC_MMU_2_06: /* P7 */
1070 lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
1071 LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1072 LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
1073 LPCR_MER | LPCR_TC |
1074 LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
1075 break;
1076 case POWERPC_MMU_2_07: /* P8 */
1077 lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
1078 LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1079 LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
1080 LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
1081 LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
1082 break;
1083 case POWERPC_MMU_3_00: /* P9 */
1084 lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
1085 (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
1086 LPCR_UPRT | LPCR_EVIRT | LPCR_ONL |
1087 (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
1088 LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC |
1089 LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE);
1090 break;
1091 default:
1092 ;
1093 }
1094 env->spr[SPR_LPCR] = lpcr;
1095 ppc_hash64_update_rmls(cpu);
1096 ppc_hash64_update_vrma(cpu);
1097 }
1098
1099 void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1100 {
1101 PowerPCCPU *cpu = ppc_env_get_cpu(env);
1102
1103 ppc_store_lpcr(cpu, val);
1104 }
1105
1106 void ppc_hash64_init(PowerPCCPU *cpu)
1107 {
1108 CPUPPCState *env = &cpu->env;
1109 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1110
1111 if (!pcc->hash64_opts) {
1112 assert(!(env->mmu_model & POWERPC_MMU_64));
1113 return;
1114 }
1115
1116 cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
1117 }
1118
1119 void ppc_hash64_finalize(PowerPCCPU *cpu)
1120 {
1121 g_free(cpu->hash64_opts);
1122 }
1123
1124 const PPCHash64Options ppc_hash64_opts_basic = {
1125 .flags = 0,
1126 .slb_size = 64,
1127 .sps = {
1128 { .page_shift = 12, /* 4K */
1129 .slb_enc = 0,
1130 .enc = { { .page_shift = 12, .pte_enc = 0 } }
1131 },
1132 { .page_shift = 24, /* 16M */
1133 .slb_enc = 0x100,
1134 .enc = { { .page_shift = 24, .pte_enc = 0 } }
1135 },
1136 },
1137 };
1138
1139 const PPCHash64Options ppc_hash64_opts_POWER7 = {
1140 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
1141 .slb_size = 32,
1142 .sps = {
1143 {
1144 .page_shift = 12, /* 4K */
1145 .slb_enc = 0,
1146 .enc = { { .page_shift = 12, .pte_enc = 0 },
1147 { .page_shift = 16, .pte_enc = 0x7 },
1148 { .page_shift = 24, .pte_enc = 0x38 }, },
1149 },
1150 {
1151 .page_shift = 16, /* 64K */
1152 .slb_enc = SLB_VSID_64K,
1153 .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1154 { .page_shift = 24, .pte_enc = 0x8 }, },
1155 },
1156 {
1157 .page_shift = 24, /* 16M */
1158 .slb_enc = SLB_VSID_16M,
1159 .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1160 },
1161 {
1162 .page_shift = 34, /* 16G */
1163 .slb_enc = SLB_VSID_16G,
1164 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1165 },
1166 }
1167 };