]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/mmu-radix64.c
Merge tag 'for_upstream' of https://git.kernel.org/pub/scm/virt/kvm/mst/qemu into...
[mirror_qemu.git] / target / ppc / mmu-radix64.c
1 /*
2 * PowerPC Radix MMU mulation helpers for QEMU.
3 *
4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/error-report.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_ppc.h"
26 #include "exec/log.h"
27 #include "internal.h"
28 #include "mmu-radix64.h"
29 #include "mmu-book3s-v3.h"
30
31 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
32 vaddr eaddr,
33 uint64_t *lpid, uint64_t *pid)
34 {
35 /* When EA(2:11) are nonzero, raise a segment interrupt */
36 if (eaddr & ~R_EADDR_VALID_MASK) {
37 return false;
38 }
39
40 if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */
41 switch (eaddr & R_EADDR_QUADRANT) {
42 case R_EADDR_QUADRANT0:
43 *lpid = 0;
44 *pid = env->spr[SPR_BOOKS_PID];
45 break;
46 case R_EADDR_QUADRANT1:
47 *lpid = env->spr[SPR_LPIDR];
48 *pid = env->spr[SPR_BOOKS_PID];
49 break;
50 case R_EADDR_QUADRANT2:
51 *lpid = env->spr[SPR_LPIDR];
52 *pid = 0;
53 break;
54 case R_EADDR_QUADRANT3:
55 *lpid = 0;
56 *pid = 0;
57 break;
58 default:
59 g_assert_not_reached();
60 }
61 } else { /* !MSR[HV] -> Guest */
62 switch (eaddr & R_EADDR_QUADRANT) {
63 case R_EADDR_QUADRANT0: /* Guest application */
64 *lpid = env->spr[SPR_LPIDR];
65 *pid = env->spr[SPR_BOOKS_PID];
66 break;
67 case R_EADDR_QUADRANT1: /* Illegal */
68 case R_EADDR_QUADRANT2:
69 return false;
70 case R_EADDR_QUADRANT3: /* Guest OS */
71 *lpid = env->spr[SPR_LPIDR];
72 *pid = 0; /* pid set to 0 -> addresses guest operating system */
73 break;
74 default:
75 g_assert_not_reached();
76 }
77 }
78
79 return true;
80 }
81
82 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
83 vaddr eaddr)
84 {
85 CPUState *cs = CPU(cpu);
86 CPUPPCState *env = &cpu->env;
87
88 switch (access_type) {
89 case MMU_INST_FETCH:
90 /* Instruction Segment Interrupt */
91 cs->exception_index = POWERPC_EXCP_ISEG;
92 break;
93 case MMU_DATA_STORE:
94 case MMU_DATA_LOAD:
95 /* Data Segment Interrupt */
96 cs->exception_index = POWERPC_EXCP_DSEG;
97 env->spr[SPR_DAR] = eaddr;
98 break;
99 default:
100 g_assert_not_reached();
101 }
102 env->error_code = 0;
103 }
104
105 static inline const char *access_str(MMUAccessType access_type)
106 {
107 return access_type == MMU_DATA_LOAD ? "reading" :
108 (access_type == MMU_DATA_STORE ? "writing" : "execute");
109 }
110
111 static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
112 vaddr eaddr, uint32_t cause)
113 {
114 CPUState *cs = CPU(cpu);
115 CPUPPCState *env = &cpu->env;
116
117 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
118 __func__, access_str(access_type),
119 eaddr, cause);
120
121 switch (access_type) {
122 case MMU_INST_FETCH:
123 /* Instruction Storage Interrupt */
124 cs->exception_index = POWERPC_EXCP_ISI;
125 env->error_code = cause;
126 break;
127 case MMU_DATA_STORE:
128 cause |= DSISR_ISSTORE;
129 /* fall through */
130 case MMU_DATA_LOAD:
131 /* Data Storage Interrupt */
132 cs->exception_index = POWERPC_EXCP_DSI;
133 env->spr[SPR_DSISR] = cause;
134 env->spr[SPR_DAR] = eaddr;
135 env->error_code = 0;
136 break;
137 default:
138 g_assert_not_reached();
139 }
140 }
141
142 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
143 vaddr eaddr, hwaddr g_raddr, uint32_t cause)
144 {
145 CPUState *cs = CPU(cpu);
146 CPUPPCState *env = &cpu->env;
147
148 env->error_code = 0;
149 if (cause & DSISR_PRTABLE_FAULT) {
150 /* HDSI PRTABLE_FAULT gets the originating access type in error_code */
151 env->error_code = access_type;
152 access_type = MMU_DATA_LOAD;
153 }
154
155 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
156 HWADDR_PRIx" cause %08x\n",
157 __func__, access_str(access_type),
158 eaddr, g_raddr, cause);
159
160 switch (access_type) {
161 case MMU_INST_FETCH:
162 /* H Instruction Storage Interrupt */
163 cs->exception_index = POWERPC_EXCP_HISI;
164 env->spr[SPR_ASDR] = g_raddr;
165 env->error_code = cause;
166 break;
167 case MMU_DATA_STORE:
168 cause |= DSISR_ISSTORE;
169 /* fall through */
170 case MMU_DATA_LOAD:
171 /* H Data Storage Interrupt */
172 cs->exception_index = POWERPC_EXCP_HDSI;
173 env->spr[SPR_HDSISR] = cause;
174 env->spr[SPR_HDAR] = eaddr;
175 env->spr[SPR_ASDR] = g_raddr;
176 break;
177 default:
178 g_assert_not_reached();
179 }
180 }
181
182 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
183 uint64_t pte, int *fault_cause, int *prot,
184 int mmu_idx, bool partition_scoped)
185 {
186 CPUPPCState *env = &cpu->env;
187 int need_prot;
188
189 /* Check Page Attributes (pte58:59) */
190 if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
191 /*
192 * Radix PTE entries with the non-idempotent I/O attribute are treated
193 * as guarded storage
194 */
195 *fault_cause |= SRR1_NOEXEC_GUARD;
196 return true;
197 }
198
199 /* Determine permissions allowed by Encoded Access Authority */
200 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) &&
201 FIELD_EX64(env->msr, MSR, PR)) {
202 *prot = 0;
203 } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
204 partition_scoped) {
205 *prot = ppc_radix64_get_prot_eaa(pte);
206 } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
207 *prot = ppc_radix64_get_prot_eaa(pte);
208 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
209 }
210
211 /* Check if requested access type is allowed */
212 need_prot = prot_for_access_type(access_type);
213 if (need_prot & ~*prot) { /* Page Protected for that Access */
214 *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
215 DSISR_PROTFAULT;
216 return true;
217 }
218
219 return false;
220 }
221
222 static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
223 uint64_t pte, hwaddr pte_addr, int *prot)
224 {
225 CPUState *cs = CPU(cpu);
226 uint64_t npte;
227
228 npte = pte | R_PTE_R; /* Always set reference bit */
229
230 if (access_type == MMU_DATA_STORE) { /* Store/Write */
231 npte |= R_PTE_C; /* Set change bit */
232 } else {
233 /*
234 * Treat the page as read-only for now, so that a later write
235 * will pass through this function again to set the C bit.
236 */
237 *prot &= ~PAGE_WRITE;
238 }
239
240 if (pte ^ npte) { /* If pte has changed then write it back */
241 stq_phys(cs->as, pte_addr, npte);
242 }
243 }
244
245 static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls)
246 {
247 bool ret;
248
249 /*
250 * Check if this is a valid level, according to POWER9 and POWER10
251 * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively:
252 * Supported Radix Tree Configurations and Resulting Page Sizes.
253 *
254 * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future
255 * CPUs that supports a different Radix MMU configuration will need their
256 * own implementation.
257 */
258 switch (level) {
259 case 0: /* Root Page Dir */
260 ret = psize == 52 && nls == 13;
261 break;
262 case 1:
263 case 2:
264 ret = nls == 9;
265 break;
266 case 3:
267 ret = nls == 9 || nls == 5;
268 break;
269 default:
270 ret = false;
271 }
272
273 if (unlikely(!ret)) {
274 qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: "
275 "level %d size %d nls %"PRIu64"\n",
276 level, psize, nls);
277 }
278 return ret;
279 }
280
281 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
282 uint64_t *pte_addr, uint64_t *nls,
283 int *psize, uint64_t *pte, int *fault_cause)
284 {
285 uint64_t index, mask, nlb, pde;
286
287 /* Read page <directory/table> entry from guest address space */
288 pde = ldq_phys(as, *pte_addr);
289 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
290 *fault_cause |= DSISR_NOPTE;
291 return 1;
292 }
293
294 *pte = pde;
295 *psize -= *nls;
296 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
297 *nls = pde & R_PDE_NLS;
298 index = eaddr >> (*psize - *nls); /* Shift */
299 index &= ((1UL << *nls) - 1); /* Mask */
300 nlb = pde & R_PDE_NLB;
301 mask = MAKE_64BIT_MASK(0, *nls + 3);
302
303 if (nlb & mask) {
304 qemu_log_mask(LOG_GUEST_ERROR,
305 "%s: misaligned page dir/table base: 0x"TARGET_FMT_lx
306 " page dir size: 0x"TARGET_FMT_lx"\n",
307 __func__, nlb, mask + 1);
308 nlb &= ~mask;
309 }
310 *pte_addr = nlb + index * sizeof(pde);
311 }
312 return 0;
313 }
314
315 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
316 uint64_t base_addr, uint64_t nls,
317 hwaddr *raddr, int *psize, uint64_t *pte,
318 int *fault_cause, hwaddr *pte_addr)
319 {
320 uint64_t index, pde, rpn, mask;
321 int level = 0;
322
323 index = eaddr >> (*psize - nls); /* Shift */
324 index &= ((1UL << nls) - 1); /* Mask */
325 mask = MAKE_64BIT_MASK(0, nls + 3);
326
327 if (base_addr & mask) {
328 qemu_log_mask(LOG_GUEST_ERROR,
329 "%s: misaligned page dir base: 0x"TARGET_FMT_lx
330 " page dir size: 0x"TARGET_FMT_lx"\n",
331 __func__, base_addr, mask + 1);
332 base_addr &= ~mask;
333 }
334 *pte_addr = base_addr + index * sizeof(pde);
335
336 do {
337 int ret;
338
339 if (!ppc_radix64_is_valid_level(level++, *psize, nls)) {
340 *fault_cause |= DSISR_R_BADCONFIG;
341 return 1;
342 }
343
344 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
345 fault_cause);
346 if (ret) {
347 return ret;
348 }
349 } while (!(pde & R_PTE_LEAF));
350
351 *pte = pde;
352 rpn = pde & R_PTE_RPN;
353 mask = (1UL << *psize) - 1;
354
355 /* Or high bits of rpn and low bits to ea to form whole real addr */
356 *raddr = (rpn & ~mask) | (eaddr & mask);
357 return 0;
358 }
359
360 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
361 {
362 CPUPPCState *env = &cpu->env;
363
364 if (!(pate->dw0 & PATE0_HR)) {
365 return false;
366 }
367 if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) {
368 return false;
369 }
370 if ((pate->dw0 & PATE1_R_PRTS) < 5) {
371 return false;
372 }
373 /* More checks ... */
374 return true;
375 }
376
377 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
378 MMUAccessType orig_access_type,
379 vaddr eaddr, hwaddr g_raddr,
380 ppc_v3_pate_t pate,
381 hwaddr *h_raddr, int *h_prot,
382 int *h_page_size, bool pde_addr,
383 int mmu_idx, bool guest_visible)
384 {
385 MMUAccessType access_type = orig_access_type;
386 int fault_cause = 0;
387 hwaddr pte_addr;
388 uint64_t pte;
389
390 if (pde_addr) {
391 /*
392 * Translation of process-scoped tables/directories is performed as
393 * a read-access.
394 */
395 access_type = MMU_DATA_LOAD;
396 }
397
398 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
399 " mmu_idx %u 0x%"HWADDR_PRIx"\n",
400 __func__, access_str(access_type),
401 eaddr, mmu_idx, g_raddr);
402
403 *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
404 /* No valid pte or access denied due to protection */
405 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
406 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
407 &pte, &fault_cause, &pte_addr) ||
408 ppc_radix64_check_prot(cpu, access_type, pte,
409 &fault_cause, h_prot, mmu_idx, true)) {
410 if (pde_addr) { /* address being translated was that of a guest pde */
411 fault_cause |= DSISR_PRTABLE_FAULT;
412 }
413 if (guest_visible) {
414 ppc_radix64_raise_hsi(cpu, orig_access_type,
415 eaddr, g_raddr, fault_cause);
416 }
417 return 1;
418 }
419
420 if (guest_visible) {
421 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
422 }
423
424 return 0;
425 }
426
427 /*
428 * The spapr vhc has a flat partition scope provided by qemu memory when
429 * not nested.
430 *
431 * When running a nested guest, the addressing is 2-level radix on top of the
432 * vhc memory, so it works practically identically to the bare metal 2-level
433 * radix. So that code is selected directly. A cleaner and more flexible nested
434 * hypervisor implementation would allow the vhc to provide a ->nested_xlate()
435 * function but that is not required for the moment.
436 */
437 static bool vhyp_flat_addressing(PowerPCCPU *cpu)
438 {
439 if (cpu->vhyp) {
440 return !vhyp_cpu_in_nested(cpu);
441 }
442 return false;
443 }
444
445 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
446 MMUAccessType access_type,
447 vaddr eaddr, uint64_t pid,
448 ppc_v3_pate_t pate, hwaddr *g_raddr,
449 int *g_prot, int *g_page_size,
450 int mmu_idx, bool guest_visible)
451 {
452 CPUState *cs = CPU(cpu);
453 CPUPPCState *env = &cpu->env;
454 uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte;
455 int fault_cause = 0, h_page_size, h_prot;
456 hwaddr h_raddr, pte_addr;
457 int ret;
458
459 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
460 " mmu_idx %u pid %"PRIu64"\n",
461 __func__, access_str(access_type),
462 eaddr, mmu_idx, pid);
463
464 prtb = (pate.dw1 & PATE1_R_PRTB);
465 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
466 if (prtb & (size - 1)) {
467 /* Process Table not properly aligned */
468 if (guest_visible) {
469 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG);
470 }
471 return 1;
472 }
473
474 /* Index Process Table by PID to Find Corresponding Process Table Entry */
475 offset = pid * sizeof(struct prtb_entry);
476 if (offset >= size) {
477 /* offset exceeds size of the process table */
478 if (guest_visible) {
479 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
480 }
481 return 1;
482 }
483 prtbe_addr = prtb + offset;
484
485 if (vhyp_flat_addressing(cpu)) {
486 prtbe0 = ldq_phys(cs->as, prtbe_addr);
487 } else {
488 /*
489 * Process table addresses are subject to partition-scoped
490 * translation
491 *
492 * On a Radix host, the partition-scoped page table for LPID=0
493 * is only used to translate the effective addresses of the
494 * process table entries.
495 */
496 /* mmu_idx is 5 because we're translating from hypervisor scope */
497 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
498 prtbe_addr, pate, &h_raddr,
499 &h_prot, &h_page_size, true,
500 5, guest_visible);
501 if (ret) {
502 return ret;
503 }
504 prtbe0 = ldq_phys(cs->as, h_raddr);
505 }
506
507 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
508 *g_page_size = PRTBE_R_GET_RTS(prtbe0);
509 base_addr = prtbe0 & PRTBE_R_RPDB;
510 nls = prtbe0 & PRTBE_R_RPDS;
511 if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) {
512 /*
513 * Can treat process table addresses as real addresses
514 */
515 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
516 nls, g_raddr, g_page_size, &pte,
517 &fault_cause, &pte_addr);
518 if (ret) {
519 /* No valid PTE */
520 if (guest_visible) {
521 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
522 }
523 return ret;
524 }
525 } else {
526 uint64_t rpn, mask;
527 int level = 0;
528
529 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
530 index &= ((1UL << nls) - 1); /* Mask */
531 pte_addr = base_addr + (index * sizeof(pte));
532
533 /*
534 * Each process table address is subject to a partition-scoped
535 * translation
536 */
537 do {
538 /* mmu_idx is 5 because we're translating from hypervisor scope */
539 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
540 pte_addr, pate, &h_raddr,
541 &h_prot, &h_page_size,
542 true, 5, guest_visible);
543 if (ret) {
544 return ret;
545 }
546
547 if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) {
548 fault_cause |= DSISR_R_BADCONFIG;
549 ret = 1;
550 } else {
551 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK,
552 &h_raddr, &nls, g_page_size,
553 &pte, &fault_cause);
554 }
555
556 if (ret) {
557 /* No valid pte */
558 if (guest_visible) {
559 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
560 }
561 return ret;
562 }
563 pte_addr = h_raddr;
564 } while (!(pte & R_PTE_LEAF));
565
566 rpn = pte & R_PTE_RPN;
567 mask = (1UL << *g_page_size) - 1;
568
569 /* Or high bits of rpn and low bits to ea to form whole real addr */
570 *g_raddr = (rpn & ~mask) | (eaddr & mask);
571 }
572
573 if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause,
574 g_prot, mmu_idx, false)) {
575 /* Access denied due to protection */
576 if (guest_visible) {
577 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
578 }
579 return 1;
580 }
581
582 if (guest_visible) {
583 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
584 }
585
586 return 0;
587 }
588
589 /*
590 * Radix tree translation is a 2 steps translation process:
591 *
592 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr
593 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
594 *
595 * MSR[HV]
596 * +-------------+----------------+---------------+
597 * | | HV = 0 | HV = 1 |
598 * +-------------+----------------+---------------+
599 * | Relocation | Partition | No |
600 * | = Off | Scoped | Translation |
601 * Relocation +-------------+----------------+---------------+
602 * | Relocation | Partition & | Process |
603 * | = On | Process Scoped | Scoped |
604 * +-------------+----------------+---------------+
605 */
606 static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
607 MMUAccessType access_type, hwaddr *raddr,
608 int *psizep, int *protp, int mmu_idx,
609 bool guest_visible)
610 {
611 CPUPPCState *env = &cpu->env;
612 uint64_t lpid, pid;
613 ppc_v3_pate_t pate;
614 int psize, prot;
615 hwaddr g_raddr;
616 bool relocation;
617
618 assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp));
619
620 relocation = !mmuidx_real(mmu_idx);
621
622 /* HV or virtual hypervisor Real Mode Access */
623 if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
624 /* In real mode top 4 effective addr bits (mostly) ignored */
625 *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
626
627 /* In HV mode, add HRMOR if top EA bit is clear */
628 if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
629 if (!(eaddr >> 63)) {
630 *raddr |= env->spr[SPR_HRMOR];
631 }
632 }
633 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
634 *psizep = TARGET_PAGE_BITS;
635 return true;
636 }
637
638 /*
639 * Check UPRT (we avoid the check in real mode to deal with
640 * transitional states during kexec.
641 */
642 if (guest_visible && !ppc64_use_proc_tbl(cpu)) {
643 qemu_log_mask(LOG_GUEST_ERROR,
644 "LPCR:UPRT not set in radix mode ! LPCR="
645 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
646 }
647
648 /* Virtual Mode Access - get the fully qualified address */
649 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
650 if (guest_visible) {
651 ppc_radix64_raise_segi(cpu, access_type, eaddr);
652 }
653 return false;
654 }
655
656 /* Get Partition Table */
657 if (cpu->vhyp) {
658 PPCVirtualHypervisorClass *vhc;
659 vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
660 if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
661 if (guest_visible) {
662 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
663 DSISR_R_BADCONFIG);
664 }
665 return false;
666 }
667 } else {
668 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
669 if (guest_visible) {
670 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
671 DSISR_R_BADCONFIG);
672 }
673 return false;
674 }
675 if (!validate_pate(cpu, lpid, &pate)) {
676 if (guest_visible) {
677 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
678 DSISR_R_BADCONFIG);
679 }
680 return false;
681 }
682 }
683
684 *psizep = INT_MAX;
685 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
686
687 /*
688 * Perform process-scoped translation if relocation enabled.
689 *
690 * - Translates an effective address to a host real address in
691 * quadrants 0 and 3 when HV=1.
692 *
693 * - Translates an effective address to a guest real address.
694 */
695 if (relocation) {
696 int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
697 pate, &g_raddr, &prot,
698 &psize, mmu_idx, guest_visible);
699 if (ret) {
700 return false;
701 }
702 *psizep = MIN(*psizep, psize);
703 *protp &= prot;
704 } else {
705 g_raddr = eaddr & R_EADDR_MASK;
706 }
707
708 if (vhyp_flat_addressing(cpu)) {
709 *raddr = g_raddr;
710 } else {
711 /*
712 * Perform partition-scoped translation if !HV or HV access to
713 * quadrants 1 or 2. Translates a guest real address to a host
714 * real address.
715 */
716 if (lpid || !mmuidx_hv(mmu_idx)) {
717 int ret;
718
719 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
720 g_raddr, pate, raddr,
721 &prot, &psize, false,
722 mmu_idx, guest_visible);
723 if (ret) {
724 return false;
725 }
726 *psizep = MIN(*psizep, psize);
727 *protp &= prot;
728 } else {
729 *raddr = g_raddr;
730 }
731 }
732
733 return true;
734 }
735
736 bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
737 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
738 bool guest_visible)
739 {
740 bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
741 psizep, protp, mmu_idx, guest_visible);
742
743 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
744 " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
745 __func__, access_str(access_type),
746 eaddr, mmu_idx,
747 *protp & PAGE_READ ? 'r' : '-',
748 *protp & PAGE_WRITE ? 'w' : '-',
749 *protp & PAGE_EXEC ? 'x' : '-',
750 *raddrp);
751
752 return ret;
753 }