]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/mmu-radix64.c
target/ppc: fix ISI fault cause for Radix MMU
[mirror_qemu.git] / target / ppc / mmu-radix64.c
1 /*
2 * PowerPC Radix MMU mulation helpers for QEMU.
3 *
4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "qemu/error-report.h"
24 #include "sysemu/kvm.h"
25 #include "kvm_ppc.h"
26 #include "exec/log.h"
27 #include "internal.h"
28 #include "mmu-radix64.h"
29 #include "mmu-book3s-v3.h"
30
31 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
32 vaddr eaddr,
33 uint64_t *lpid, uint64_t *pid)
34 {
35 /* When EA(2:11) are nonzero, raise a segment interrupt */
36 if (eaddr & ~R_EADDR_VALID_MASK) {
37 return false;
38 }
39
40 if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
41 switch (eaddr & R_EADDR_QUADRANT) {
42 case R_EADDR_QUADRANT0:
43 *lpid = 0;
44 *pid = env->spr[SPR_BOOKS_PID];
45 break;
46 case R_EADDR_QUADRANT1:
47 *lpid = env->spr[SPR_LPIDR];
48 *pid = env->spr[SPR_BOOKS_PID];
49 break;
50 case R_EADDR_QUADRANT2:
51 *lpid = env->spr[SPR_LPIDR];
52 *pid = 0;
53 break;
54 case R_EADDR_QUADRANT3:
55 *lpid = 0;
56 *pid = 0;
57 break;
58 default:
59 g_assert_not_reached();
60 }
61 } else { /* !MSR[HV] -> Guest */
62 switch (eaddr & R_EADDR_QUADRANT) {
63 case R_EADDR_QUADRANT0: /* Guest application */
64 *lpid = env->spr[SPR_LPIDR];
65 *pid = env->spr[SPR_BOOKS_PID];
66 break;
67 case R_EADDR_QUADRANT1: /* Illegal */
68 case R_EADDR_QUADRANT2:
69 return false;
70 case R_EADDR_QUADRANT3: /* Guest OS */
71 *lpid = env->spr[SPR_LPIDR];
72 *pid = 0; /* pid set to 0 -> addresses guest operating system */
73 break;
74 default:
75 g_assert_not_reached();
76 }
77 }
78
79 return true;
80 }
81
82 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type,
83 vaddr eaddr)
84 {
85 CPUState *cs = CPU(cpu);
86 CPUPPCState *env = &cpu->env;
87
88 switch (access_type) {
89 case MMU_INST_FETCH:
90 /* Instruction Segment Interrupt */
91 cs->exception_index = POWERPC_EXCP_ISEG;
92 break;
93 case MMU_DATA_STORE:
94 case MMU_DATA_LOAD:
95 /* Data Segment Interrupt */
96 cs->exception_index = POWERPC_EXCP_DSEG;
97 env->spr[SPR_DAR] = eaddr;
98 break;
99 default:
100 g_assert_not_reached();
101 }
102 env->error_code = 0;
103 }
104
105 static inline const char *access_str(MMUAccessType access_type)
106 {
107 return access_type == MMU_DATA_LOAD ? "reading" :
108 (access_type == MMU_DATA_STORE ? "writing" : "execute");
109 }
110
111 static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type,
112 vaddr eaddr, uint32_t cause)
113 {
114 CPUState *cs = CPU(cpu);
115 CPUPPCState *env = &cpu->env;
116
117 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n",
118 __func__, access_str(access_type),
119 eaddr, cause);
120
121 switch (access_type) {
122 case MMU_INST_FETCH:
123 /* Instruction Storage Interrupt */
124 cs->exception_index = POWERPC_EXCP_ISI;
125 env->error_code = cause;
126 break;
127 case MMU_DATA_STORE:
128 cause |= DSISR_ISSTORE;
129 /* fall through */
130 case MMU_DATA_LOAD:
131 /* Data Storage Interrupt */
132 cs->exception_index = POWERPC_EXCP_DSI;
133 env->spr[SPR_DSISR] = cause;
134 env->spr[SPR_DAR] = eaddr;
135 env->error_code = 0;
136 break;
137 default:
138 g_assert_not_reached();
139 }
140 }
141
142 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type,
143 vaddr eaddr, hwaddr g_raddr, uint32_t cause)
144 {
145 CPUState *cs = CPU(cpu);
146 CPUPPCState *env = &cpu->env;
147
148 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%"
149 HWADDR_PRIx" cause %08x\n",
150 __func__, access_str(access_type),
151 eaddr, g_raddr, cause);
152
153 switch (access_type) {
154 case MMU_INST_FETCH:
155 /* H Instruction Storage Interrupt */
156 cs->exception_index = POWERPC_EXCP_HISI;
157 env->spr[SPR_ASDR] = g_raddr;
158 env->error_code = cause;
159 break;
160 case MMU_DATA_STORE:
161 cause |= DSISR_ISSTORE;
162 /* fall through */
163 case MMU_DATA_LOAD:
164 /* H Data Storage Interrupt */
165 cs->exception_index = POWERPC_EXCP_HDSI;
166 env->spr[SPR_HDSISR] = cause;
167 env->spr[SPR_HDAR] = eaddr;
168 env->spr[SPR_ASDR] = g_raddr;
169 env->error_code = 0;
170 break;
171 default:
172 g_assert_not_reached();
173 }
174 }
175
176 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type,
177 uint64_t pte, int *fault_cause, int *prot,
178 int mmu_idx, bool partition_scoped)
179 {
180 CPUPPCState *env = &cpu->env;
181 int need_prot;
182
183 /* Check Page Attributes (pte58:59) */
184 if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) {
185 /*
186 * Radix PTE entries with the non-idempotent I/O attribute are treated
187 * as guarded storage
188 */
189 *fault_cause |= SRR1_NOEXEC_GUARD;
190 return true;
191 }
192
193 /* Determine permissions allowed by Encoded Access Authority */
194 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
195 *prot = 0;
196 } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) ||
197 partition_scoped) {
198 *prot = ppc_radix64_get_prot_eaa(pte);
199 } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
200 *prot = ppc_radix64_get_prot_eaa(pte);
201 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
202 }
203
204 /* Check if requested access type is allowed */
205 need_prot = prot_for_access_type(access_type);
206 if (need_prot & ~*prot) { /* Page Protected for that Access */
207 *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD :
208 DSISR_PROTFAULT;
209 return true;
210 }
211
212 return false;
213 }
214
215 static void ppc_radix64_set_rc(PowerPCCPU *cpu, MMUAccessType access_type,
216 uint64_t pte, hwaddr pte_addr, int *prot)
217 {
218 CPUState *cs = CPU(cpu);
219 uint64_t npte;
220
221 npte = pte | R_PTE_R; /* Always set reference bit */
222
223 if (access_type == MMU_DATA_STORE) { /* Store/Write */
224 npte |= R_PTE_C; /* Set change bit */
225 } else {
226 /*
227 * Treat the page as read-only for now, so that a later write
228 * will pass through this function again to set the C bit.
229 */
230 *prot &= ~PAGE_WRITE;
231 }
232
233 if (pte ^ npte) { /* If pte has changed then write it back */
234 stq_phys(cs->as, pte_addr, npte);
235 }
236 }
237
238 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
239 uint64_t *pte_addr, uint64_t *nls,
240 int *psize, uint64_t *pte, int *fault_cause)
241 {
242 uint64_t index, pde;
243
244 if (*nls < 5) { /* Directory maps less than 2**5 entries */
245 *fault_cause |= DSISR_R_BADCONFIG;
246 return 1;
247 }
248
249 /* Read page <directory/table> entry from guest address space */
250 pde = ldq_phys(as, *pte_addr);
251 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
252 *fault_cause |= DSISR_NOPTE;
253 return 1;
254 }
255
256 *pte = pde;
257 *psize -= *nls;
258 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
259 *nls = pde & R_PDE_NLS;
260 index = eaddr >> (*psize - *nls); /* Shift */
261 index &= ((1UL << *nls) - 1); /* Mask */
262 *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
263 }
264 return 0;
265 }
266
267 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
268 uint64_t base_addr, uint64_t nls,
269 hwaddr *raddr, int *psize, uint64_t *pte,
270 int *fault_cause, hwaddr *pte_addr)
271 {
272 uint64_t index, pde, rpn , mask;
273
274 if (nls < 5) { /* Directory maps less than 2**5 entries */
275 *fault_cause |= DSISR_R_BADCONFIG;
276 return 1;
277 }
278
279 index = eaddr >> (*psize - nls); /* Shift */
280 index &= ((1UL << nls) - 1); /* Mask */
281 *pte_addr = base_addr + (index * sizeof(pde));
282 do {
283 int ret;
284
285 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
286 fault_cause);
287 if (ret) {
288 return ret;
289 }
290 } while (!(pde & R_PTE_LEAF));
291
292 *pte = pde;
293 rpn = pde & R_PTE_RPN;
294 mask = (1UL << *psize) - 1;
295
296 /* Or high bits of rpn and low bits to ea to form whole real addr */
297 *raddr = (rpn & ~mask) | (eaddr & mask);
298 return 0;
299 }
300
301 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
302 {
303 CPUPPCState *env = &cpu->env;
304
305 if (!(pate->dw0 & PATE0_HR)) {
306 return false;
307 }
308 if (lpid == 0 && !msr_hv) {
309 return false;
310 }
311 if ((pate->dw0 & PATE1_R_PRTS) < 5) {
312 return false;
313 }
314 /* More checks ... */
315 return true;
316 }
317
318 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu,
319 MMUAccessType access_type,
320 vaddr eaddr, hwaddr g_raddr,
321 ppc_v3_pate_t pate,
322 hwaddr *h_raddr, int *h_prot,
323 int *h_page_size, bool pde_addr,
324 int mmu_idx, bool guest_visible)
325 {
326 int fault_cause = 0;
327 hwaddr pte_addr;
328 uint64_t pte;
329
330 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
331 " mmu_idx %u 0x%"HWADDR_PRIx"\n",
332 __func__, access_str(access_type),
333 eaddr, mmu_idx, g_raddr);
334
335 *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
336 /* No valid pte or access denied due to protection */
337 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
338 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
339 &pte, &fault_cause, &pte_addr) ||
340 ppc_radix64_check_prot(cpu, access_type, pte,
341 &fault_cause, h_prot, mmu_idx, true)) {
342 if (pde_addr) { /* address being translated was that of a guest pde */
343 fault_cause |= DSISR_PRTABLE_FAULT;
344 }
345 if (guest_visible) {
346 ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, fault_cause);
347 }
348 return 1;
349 }
350
351 if (guest_visible) {
352 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, h_prot);
353 }
354
355 return 0;
356 }
357
358 /*
359 * The spapr vhc has a flat partition scope provided by qemu memory when
360 * not nested.
361 *
362 * When running a nested guest, the addressing is 2-level radix on top of the
363 * vhc memory, so it works practically identically to the bare metal 2-level
364 * radix. So that code is selected directly. A cleaner and more flexible nested
365 * hypervisor implementation would allow the vhc to provide a ->nested_xlate()
366 * function but that is not required for the moment.
367 */
368 static bool vhyp_flat_addressing(PowerPCCPU *cpu)
369 {
370 if (cpu->vhyp) {
371 return !vhyp_cpu_in_nested(cpu);
372 }
373 return false;
374 }
375
376 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu,
377 MMUAccessType access_type,
378 vaddr eaddr, uint64_t pid,
379 ppc_v3_pate_t pate, hwaddr *g_raddr,
380 int *g_prot, int *g_page_size,
381 int mmu_idx, bool guest_visible)
382 {
383 CPUState *cs = CPU(cpu);
384 CPUPPCState *env = &cpu->env;
385 uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte;
386 int fault_cause = 0, h_page_size, h_prot;
387 hwaddr h_raddr, pte_addr;
388 int ret;
389
390 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
391 " mmu_idx %u pid %"PRIu64"\n",
392 __func__, access_str(access_type),
393 eaddr, mmu_idx, pid);
394
395 /* Index Process Table by PID to Find Corresponding Process Table Entry */
396 offset = pid * sizeof(struct prtb_entry);
397 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
398 if (offset >= size) {
399 /* offset exceeds size of the process table */
400 if (guest_visible) {
401 ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE);
402 }
403 return 1;
404 }
405 prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
406
407 if (vhyp_flat_addressing(cpu)) {
408 prtbe0 = ldq_phys(cs->as, prtbe_addr);
409 } else {
410 /*
411 * Process table addresses are subject to partition-scoped
412 * translation
413 *
414 * On a Radix host, the partition-scoped page table for LPID=0
415 * is only used to translate the effective addresses of the
416 * process table entries.
417 */
418 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
419 pate, &h_raddr, &h_prot,
420 &h_page_size, true,
421 /* mmu_idx is 5 because we're translating from hypervisor scope */
422 5, guest_visible);
423 if (ret) {
424 return ret;
425 }
426 prtbe0 = ldq_phys(cs->as, h_raddr);
427 }
428
429 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
430 *g_page_size = PRTBE_R_GET_RTS(prtbe0);
431 base_addr = prtbe0 & PRTBE_R_RPDB;
432 nls = prtbe0 & PRTBE_R_RPDS;
433 if (msr_hv || vhyp_flat_addressing(cpu)) {
434 /*
435 * Can treat process table addresses as real addresses
436 */
437 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
438 nls, g_raddr, g_page_size, &pte,
439 &fault_cause, &pte_addr);
440 if (ret) {
441 /* No valid PTE */
442 if (guest_visible) {
443 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
444 }
445 return ret;
446 }
447 } else {
448 uint64_t rpn, mask;
449
450 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
451 index &= ((1UL << nls) - 1); /* Mask */
452 pte_addr = base_addr + (index * sizeof(pte));
453
454 /*
455 * Each process table address is subject to a partition-scoped
456 * translation
457 */
458 do {
459 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
460 pate, &h_raddr, &h_prot,
461 &h_page_size, true,
462 /* mmu_idx is 5 because we're translating from hypervisor scope */
463 5, guest_visible);
464 if (ret) {
465 return ret;
466 }
467
468 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
469 &nls, g_page_size, &pte, &fault_cause);
470 if (ret) {
471 /* No valid pte */
472 if (guest_visible) {
473 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
474 }
475 return ret;
476 }
477 pte_addr = h_raddr;
478 } while (!(pte & R_PTE_LEAF));
479
480 rpn = pte & R_PTE_RPN;
481 mask = (1UL << *g_page_size) - 1;
482
483 /* Or high bits of rpn and low bits to ea to form whole real addr */
484 *g_raddr = (rpn & ~mask) | (eaddr & mask);
485 }
486
487 if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause,
488 g_prot, mmu_idx, false)) {
489 /* Access denied due to protection */
490 if (guest_visible) {
491 ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause);
492 }
493 return 1;
494 }
495
496 if (guest_visible) {
497 ppc_radix64_set_rc(cpu, access_type, pte, pte_addr, g_prot);
498 }
499
500 return 0;
501 }
502
503 /*
504 * Radix tree translation is a 2 steps translation process:
505 *
506 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr
507 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
508 *
509 * MSR[HV]
510 * +-------------+----------------+---------------+
511 * | | HV = 0 | HV = 1 |
512 * +-------------+----------------+---------------+
513 * | Relocation | Partition | No |
514 * | = Off | Scoped | Translation |
515 * Relocation +-------------+----------------+---------------+
516 * | Relocation | Partition & | Process |
517 * | = On | Process Scoped | Scoped |
518 * +-------------+----------------+---------------+
519 */
520 static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr,
521 MMUAccessType access_type, hwaddr *raddr,
522 int *psizep, int *protp, int mmu_idx,
523 bool guest_visible)
524 {
525 CPUPPCState *env = &cpu->env;
526 uint64_t lpid, pid;
527 ppc_v3_pate_t pate;
528 int psize, prot;
529 hwaddr g_raddr;
530 bool relocation;
531
532 assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp));
533
534 relocation = !mmuidx_real(mmu_idx);
535
536 /* HV or virtual hypervisor Real Mode Access */
537 if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) {
538 /* In real mode top 4 effective addr bits (mostly) ignored */
539 *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
540
541 /* In HV mode, add HRMOR if top EA bit is clear */
542 if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) {
543 if (!(eaddr >> 63)) {
544 *raddr |= env->spr[SPR_HRMOR];
545 }
546 }
547 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
548 *psizep = TARGET_PAGE_BITS;
549 return true;
550 }
551
552 /*
553 * Check UPRT (we avoid the check in real mode to deal with
554 * transitional states during kexec.
555 */
556 if (guest_visible && !ppc64_use_proc_tbl(cpu)) {
557 qemu_log_mask(LOG_GUEST_ERROR,
558 "LPCR:UPRT not set in radix mode ! LPCR="
559 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
560 }
561
562 /* Virtual Mode Access - get the fully qualified address */
563 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
564 if (guest_visible) {
565 ppc_radix64_raise_segi(cpu, access_type, eaddr);
566 }
567 return false;
568 }
569
570 /* Get Process Table */
571 if (cpu->vhyp) {
572 PPCVirtualHypervisorClass *vhc;
573 vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
574 if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) {
575 if (guest_visible) {
576 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
577 DSISR_R_BADCONFIG);
578 }
579 return false;
580 }
581 } else {
582 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
583 if (guest_visible) {
584 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
585 DSISR_R_BADCONFIG);
586 }
587 return false;
588 }
589 if (!validate_pate(cpu, lpid, &pate)) {
590 if (guest_visible) {
591 ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr,
592 DSISR_R_BADCONFIG);
593 }
594 return false;
595 }
596 }
597
598 *psizep = INT_MAX;
599 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
600
601 /*
602 * Perform process-scoped translation if relocation enabled.
603 *
604 * - Translates an effective address to a host real address in
605 * quadrants 0 and 3 when HV=1.
606 *
607 * - Translates an effective address to a guest real address.
608 */
609 if (relocation) {
610 int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid,
611 pate, &g_raddr, &prot,
612 &psize, mmu_idx, guest_visible);
613 if (ret) {
614 return false;
615 }
616 *psizep = MIN(*psizep, psize);
617 *protp &= prot;
618 } else {
619 g_raddr = eaddr & R_EADDR_MASK;
620 }
621
622 if (vhyp_flat_addressing(cpu)) {
623 *raddr = g_raddr;
624 } else {
625 /*
626 * Perform partition-scoped translation if !HV or HV access to
627 * quadrants 1 or 2. Translates a guest real address to a host
628 * real address.
629 */
630 if (lpid || !mmuidx_hv(mmu_idx)) {
631 int ret;
632
633 ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr,
634 g_raddr, pate, raddr,
635 &prot, &psize, false,
636 mmu_idx, guest_visible);
637 if (ret) {
638 return false;
639 }
640 *psizep = MIN(*psizep, psize);
641 *protp &= prot;
642 } else {
643 *raddr = g_raddr;
644 }
645 }
646
647 return true;
648 }
649
650 bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type,
651 hwaddr *raddrp, int *psizep, int *protp, int mmu_idx,
652 bool guest_visible)
653 {
654 bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp,
655 psizep, protp, mmu_idx, guest_visible);
656
657 qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx
658 " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n",
659 __func__, access_str(access_type),
660 eaddr, mmu_idx,
661 *protp & PAGE_READ ? 'r' : '-',
662 *protp & PAGE_WRITE ? 'w' : '-',
663 *protp & PAGE_EXEC ? 'x' : '-',
664 *raddrp);
665
666 return ret;
667 }