]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/mmu-radix64.c
powerpc tcg: Fix Lesser GPL version number
[mirror_qemu.git] / target / ppc / mmu-radix64.c
1 /*
2 * PowerPC Radix MMU mulation helpers for QEMU.
3 *
4 * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qemu/error-report.h"
25 #include "sysemu/kvm.h"
26 #include "kvm_ppc.h"
27 #include "exec/log.h"
28 #include "mmu-radix64.h"
29 #include "mmu-book3s-v3.h"
30
31 static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env,
32 vaddr eaddr,
33 uint64_t *lpid, uint64_t *pid)
34 {
35 if (msr_hv) { /* MSR[HV] -> Hypervisor/bare metal */
36 switch (eaddr & R_EADDR_QUADRANT) {
37 case R_EADDR_QUADRANT0:
38 *lpid = 0;
39 *pid = env->spr[SPR_BOOKS_PID];
40 break;
41 case R_EADDR_QUADRANT1:
42 *lpid = env->spr[SPR_LPIDR];
43 *pid = env->spr[SPR_BOOKS_PID];
44 break;
45 case R_EADDR_QUADRANT2:
46 *lpid = env->spr[SPR_LPIDR];
47 *pid = 0;
48 break;
49 case R_EADDR_QUADRANT3:
50 *lpid = 0;
51 *pid = 0;
52 break;
53 default:
54 g_assert_not_reached();
55 }
56 } else { /* !MSR[HV] -> Guest */
57 switch (eaddr & R_EADDR_QUADRANT) {
58 case R_EADDR_QUADRANT0: /* Guest application */
59 *lpid = env->spr[SPR_LPIDR];
60 *pid = env->spr[SPR_BOOKS_PID];
61 break;
62 case R_EADDR_QUADRANT1: /* Illegal */
63 case R_EADDR_QUADRANT2:
64 return false;
65 case R_EADDR_QUADRANT3: /* Guest OS */
66 *lpid = env->spr[SPR_LPIDR];
67 *pid = 0; /* pid set to 0 -> addresses guest operating system */
68 break;
69 default:
70 g_assert_not_reached();
71 }
72 }
73
74 return true;
75 }
76
77 static void ppc_radix64_raise_segi(PowerPCCPU *cpu, int rwx, vaddr eaddr)
78 {
79 CPUState *cs = CPU(cpu);
80 CPUPPCState *env = &cpu->env;
81
82 if (rwx == 2) { /* Instruction Segment Interrupt */
83 cs->exception_index = POWERPC_EXCP_ISEG;
84 } else { /* Data Segment Interrupt */
85 cs->exception_index = POWERPC_EXCP_DSEG;
86 env->spr[SPR_DAR] = eaddr;
87 }
88 env->error_code = 0;
89 }
90
91 static void ppc_radix64_raise_si(PowerPCCPU *cpu, int rwx, vaddr eaddr,
92 uint32_t cause)
93 {
94 CPUState *cs = CPU(cpu);
95 CPUPPCState *env = &cpu->env;
96
97 if (rwx == 2) { /* Instruction Storage Interrupt */
98 cs->exception_index = POWERPC_EXCP_ISI;
99 env->error_code = cause;
100 } else { /* Data Storage Interrupt */
101 cs->exception_index = POWERPC_EXCP_DSI;
102 if (rwx == 1) { /* Write -> Store */
103 cause |= DSISR_ISSTORE;
104 }
105 env->spr[SPR_DSISR] = cause;
106 env->spr[SPR_DAR] = eaddr;
107 env->error_code = 0;
108 }
109 }
110
111 static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, int rwx, vaddr eaddr,
112 hwaddr g_raddr, uint32_t cause)
113 {
114 CPUState *cs = CPU(cpu);
115 CPUPPCState *env = &cpu->env;
116
117 if (rwx == 2) { /* H Instruction Storage Interrupt */
118 cs->exception_index = POWERPC_EXCP_HISI;
119 env->spr[SPR_ASDR] = g_raddr;
120 env->error_code = cause;
121 } else { /* H Data Storage Interrupt */
122 cs->exception_index = POWERPC_EXCP_HDSI;
123 if (rwx == 1) { /* Write -> Store */
124 cause |= DSISR_ISSTORE;
125 }
126 env->spr[SPR_HDSISR] = cause;
127 env->spr[SPR_HDAR] = eaddr;
128 env->spr[SPR_ASDR] = g_raddr;
129 env->error_code = 0;
130 }
131 }
132
133 static bool ppc_radix64_check_prot(PowerPCCPU *cpu, int rwx, uint64_t pte,
134 int *fault_cause, int *prot,
135 bool partition_scoped)
136 {
137 CPUPPCState *env = &cpu->env;
138 const int need_prot[] = { PAGE_READ, PAGE_WRITE, PAGE_EXEC };
139
140 /* Check Page Attributes (pte58:59) */
141 if (((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO) && (rwx == 2)) {
142 /*
143 * Radix PTE entries with the non-idempotent I/O attribute are treated
144 * as guarded storage
145 */
146 *fault_cause |= SRR1_NOEXEC_GUARD;
147 return true;
148 }
149
150 /* Determine permissions allowed by Encoded Access Authority */
151 if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && msr_pr) {
152 *prot = 0;
153 } else if (msr_pr || (pte & R_PTE_EAA_PRIV) || partition_scoped) {
154 *prot = ppc_radix64_get_prot_eaa(pte);
155 } else { /* !msr_pr && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */
156 *prot = ppc_radix64_get_prot_eaa(pte);
157 *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */
158 }
159
160 /* Check if requested access type is allowed */
161 if (need_prot[rwx] & ~(*prot)) { /* Page Protected for that Access */
162 *fault_cause |= DSISR_PROTFAULT;
163 return true;
164 }
165
166 return false;
167 }
168
169 static void ppc_radix64_set_rc(PowerPCCPU *cpu, int rwx, uint64_t pte,
170 hwaddr pte_addr, int *prot)
171 {
172 CPUState *cs = CPU(cpu);
173 uint64_t npte;
174
175 npte = pte | R_PTE_R; /* Always set reference bit */
176
177 if (rwx == 1) { /* Store/Write */
178 npte |= R_PTE_C; /* Set change bit */
179 } else {
180 /*
181 * Treat the page as read-only for now, so that a later write
182 * will pass through this function again to set the C bit.
183 */
184 *prot &= ~PAGE_WRITE;
185 }
186
187 if (pte ^ npte) { /* If pte has changed then write it back */
188 stq_phys(cs->as, pte_addr, npte);
189 }
190 }
191
192 static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr,
193 uint64_t *pte_addr, uint64_t *nls,
194 int *psize, uint64_t *pte, int *fault_cause)
195 {
196 uint64_t index, pde;
197
198 if (*nls < 5) { /* Directory maps less than 2**5 entries */
199 *fault_cause |= DSISR_R_BADCONFIG;
200 return 1;
201 }
202
203 /* Read page <directory/table> entry from guest address space */
204 pde = ldq_phys(as, *pte_addr);
205 if (!(pde & R_PTE_VALID)) { /* Invalid Entry */
206 *fault_cause |= DSISR_NOPTE;
207 return 1;
208 }
209
210 *pte = pde;
211 *psize -= *nls;
212 if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */
213 *nls = pde & R_PDE_NLS;
214 index = eaddr >> (*psize - *nls); /* Shift */
215 index &= ((1UL << *nls) - 1); /* Mask */
216 *pte_addr = (pde & R_PDE_NLB) + (index * sizeof(pde));
217 }
218 return 0;
219 }
220
221 static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr,
222 uint64_t base_addr, uint64_t nls,
223 hwaddr *raddr, int *psize, uint64_t *pte,
224 int *fault_cause, hwaddr *pte_addr)
225 {
226 uint64_t index, pde, rpn , mask;
227
228 if (nls < 5) { /* Directory maps less than 2**5 entries */
229 *fault_cause |= DSISR_R_BADCONFIG;
230 return 1;
231 }
232
233 index = eaddr >> (*psize - nls); /* Shift */
234 index &= ((1UL << nls) - 1); /* Mask */
235 *pte_addr = base_addr + (index * sizeof(pde));
236 do {
237 int ret;
238
239 ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde,
240 fault_cause);
241 if (ret) {
242 return ret;
243 }
244 } while (!(pde & R_PTE_LEAF));
245
246 *pte = pde;
247 rpn = pde & R_PTE_RPN;
248 mask = (1UL << *psize) - 1;
249
250 /* Or high bits of rpn and low bits to ea to form whole real addr */
251 *raddr = (rpn & ~mask) | (eaddr & mask);
252 return 0;
253 }
254
255 static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate)
256 {
257 CPUPPCState *env = &cpu->env;
258
259 if (!(pate->dw0 & PATE0_HR)) {
260 return false;
261 }
262 if (lpid == 0 && !msr_hv) {
263 return false;
264 }
265 if ((pate->dw0 & PATE1_R_PRTS) < 5) {
266 return false;
267 }
268 /* More checks ... */
269 return true;
270 }
271
272 static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, int rwx,
273 vaddr eaddr, hwaddr g_raddr,
274 ppc_v3_pate_t pate,
275 hwaddr *h_raddr, int *h_prot,
276 int *h_page_size, bool pde_addr,
277 bool guest_visible)
278 {
279 int fault_cause = 0;
280 hwaddr pte_addr;
281 uint64_t pte;
282
283 *h_page_size = PRTBE_R_GET_RTS(pate.dw0);
284 /* No valid pte or access denied due to protection */
285 if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB,
286 pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size,
287 &pte, &fault_cause, &pte_addr) ||
288 ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, h_prot, true)) {
289 if (pde_addr) { /* address being translated was that of a guest pde */
290 fault_cause |= DSISR_PRTABLE_FAULT;
291 }
292 if (guest_visible) {
293 ppc_radix64_raise_hsi(cpu, rwx, eaddr, g_raddr, fault_cause);
294 }
295 return 1;
296 }
297
298 if (guest_visible) {
299 ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, h_prot);
300 }
301
302 return 0;
303 }
304
305 static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, int rwx,
306 vaddr eaddr, uint64_t pid,
307 ppc_v3_pate_t pate, hwaddr *g_raddr,
308 int *g_prot, int *g_page_size,
309 bool guest_visible)
310 {
311 CPUState *cs = CPU(cpu);
312 CPUPPCState *env = &cpu->env;
313 uint64_t offset, size, prtbe_addr, prtbe0, base_addr, nls, index, pte;
314 int fault_cause = 0, h_page_size, h_prot;
315 hwaddr h_raddr, pte_addr;
316 int ret;
317
318 /* Index Process Table by PID to Find Corresponding Process Table Entry */
319 offset = pid * sizeof(struct prtb_entry);
320 size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12);
321 if (offset >= size) {
322 /* offset exceeds size of the process table */
323 if (guest_visible) {
324 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
325 }
326 return 1;
327 }
328 prtbe_addr = (pate.dw1 & PATE1_R_PRTB) + offset;
329
330 if (cpu->vhyp) {
331 prtbe0 = ldq_phys(cs->as, prtbe_addr);
332 } else {
333 /*
334 * Process table addresses are subject to partition-scoped
335 * translation
336 *
337 * On a Radix host, the partition-scoped page table for LPID=0
338 * is only used to translate the effective addresses of the
339 * process table entries.
340 */
341 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, prtbe_addr,
342 pate, &h_raddr, &h_prot,
343 &h_page_size, true,
344 guest_visible);
345 if (ret) {
346 return ret;
347 }
348 prtbe0 = ldq_phys(cs->as, h_raddr);
349 }
350
351 /* Walk Radix Tree from Process Table Entry to Convert EA to RA */
352 *g_page_size = PRTBE_R_GET_RTS(prtbe0);
353 base_addr = prtbe0 & PRTBE_R_RPDB;
354 nls = prtbe0 & PRTBE_R_RPDS;
355 if (msr_hv || cpu->vhyp) {
356 /*
357 * Can treat process table addresses as real addresses
358 */
359 ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr,
360 nls, g_raddr, g_page_size, &pte,
361 &fault_cause, &pte_addr);
362 if (ret) {
363 /* No valid PTE */
364 if (guest_visible) {
365 ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
366 }
367 return ret;
368 }
369 } else {
370 uint64_t rpn, mask;
371
372 index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */
373 index &= ((1UL << nls) - 1); /* Mask */
374 pte_addr = base_addr + (index * sizeof(pte));
375
376 /*
377 * Each process table address is subject to a partition-scoped
378 * translation
379 */
380 do {
381 ret = ppc_radix64_partition_scoped_xlate(cpu, 0, eaddr, pte_addr,
382 pate, &h_raddr, &h_prot,
383 &h_page_size, true,
384 guest_visible);
385 if (ret) {
386 return ret;
387 }
388
389 ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, &h_raddr,
390 &nls, g_page_size, &pte, &fault_cause);
391 if (ret) {
392 /* No valid pte */
393 if (guest_visible) {
394 ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
395 }
396 return ret;
397 }
398 pte_addr = h_raddr;
399 } while (!(pte & R_PTE_LEAF));
400
401 rpn = pte & R_PTE_RPN;
402 mask = (1UL << *g_page_size) - 1;
403
404 /* Or high bits of rpn and low bits to ea to form whole real addr */
405 *g_raddr = (rpn & ~mask) | (eaddr & mask);
406 }
407
408 if (ppc_radix64_check_prot(cpu, rwx, pte, &fault_cause, g_prot, false)) {
409 /* Access denied due to protection */
410 if (guest_visible) {
411 ppc_radix64_raise_si(cpu, rwx, eaddr, fault_cause);
412 }
413 return 1;
414 }
415
416 if (guest_visible) {
417 ppc_radix64_set_rc(cpu, rwx, pte, pte_addr, g_prot);
418 }
419
420 return 0;
421 }
422
423 /*
424 * Radix tree translation is a 2 steps translation process:
425 *
426 * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr
427 * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr
428 *
429 * MSR[HV]
430 * +-------------+----------------+---------------+
431 * | | HV = 0 | HV = 1 |
432 * +-------------+----------------+---------------+
433 * | Relocation | Partition | No |
434 * | = Off | Scoped | Translation |
435 * Relocation +-------------+----------------+---------------+
436 * | Relocation | Partition & | Process |
437 * | = On | Process Scoped | Scoped |
438 * +-------------+----------------+---------------+
439 */
440 static int ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, int rwx,
441 bool relocation,
442 hwaddr *raddr, int *psizep, int *protp,
443 bool guest_visible)
444 {
445 CPUPPCState *env = &cpu->env;
446 uint64_t lpid, pid;
447 ppc_v3_pate_t pate;
448 int psize, prot;
449 hwaddr g_raddr;
450
451 /* Virtual Mode Access - get the fully qualified address */
452 if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) {
453 if (guest_visible) {
454 ppc_radix64_raise_segi(cpu, rwx, eaddr);
455 }
456 return 1;
457 }
458
459 /* Get Process Table */
460 if (cpu->vhyp) {
461 PPCVirtualHypervisorClass *vhc;
462 vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
463 vhc->get_pate(cpu->vhyp, &pate);
464 } else {
465 if (!ppc64_v3_get_pate(cpu, lpid, &pate)) {
466 if (guest_visible) {
467 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_NOPTE);
468 }
469 return 1;
470 }
471 if (!validate_pate(cpu, lpid, &pate)) {
472 if (guest_visible) {
473 ppc_radix64_raise_si(cpu, rwx, eaddr, DSISR_R_BADCONFIG);
474 }
475 return 1;
476 }
477 }
478
479 *psizep = INT_MAX;
480 *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
481
482 /*
483 * Perform process-scoped translation if relocation enabled.
484 *
485 * - Translates an effective address to a host real address in
486 * quadrants 0 and 3 when HV=1.
487 *
488 * - Translates an effective address to a guest real address.
489 */
490 if (relocation) {
491 int ret = ppc_radix64_process_scoped_xlate(cpu, rwx, eaddr, pid,
492 pate, &g_raddr, &prot,
493 &psize, guest_visible);
494 if (ret) {
495 return ret;
496 }
497 *psizep = MIN(*psizep, psize);
498 *protp &= prot;
499 } else {
500 g_raddr = eaddr & R_EADDR_MASK;
501 }
502
503 if (cpu->vhyp) {
504 *raddr = g_raddr;
505 } else {
506 /*
507 * Perform partition-scoped translation if !HV or HV access to
508 * quadrants 1 or 2. Translates a guest real address to a host
509 * real address.
510 */
511 if (lpid || !msr_hv) {
512 int ret;
513
514 ret = ppc_radix64_partition_scoped_xlate(cpu, rwx, eaddr, g_raddr,
515 pate, raddr, &prot, &psize,
516 false, guest_visible);
517 if (ret) {
518 return ret;
519 }
520 *psizep = MIN(*psizep, psize);
521 *protp &= prot;
522 } else {
523 *raddr = g_raddr;
524 }
525 }
526
527 return 0;
528 }
529
530 int ppc_radix64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr, int rwx,
531 int mmu_idx)
532 {
533 CPUState *cs = CPU(cpu);
534 CPUPPCState *env = &cpu->env;
535 int page_size, prot;
536 bool relocation;
537 hwaddr raddr;
538
539 assert(!(msr_hv && cpu->vhyp));
540 assert((rwx == 0) || (rwx == 1) || (rwx == 2));
541
542 relocation = ((rwx == 2) && (msr_ir == 1)) || ((rwx != 2) && (msr_dr == 1));
543 /* HV or virtual hypervisor Real Mode Access */
544 if (!relocation && (msr_hv || cpu->vhyp)) {
545 /* In real mode top 4 effective addr bits (mostly) ignored */
546 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
547
548 /* In HV mode, add HRMOR if top EA bit is clear */
549 if (msr_hv || !env->has_hv_mode) {
550 if (!(eaddr >> 63)) {
551 raddr |= env->spr[SPR_HRMOR];
552 }
553 }
554 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
555 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
556 TARGET_PAGE_SIZE);
557 return 0;
558 }
559
560 /*
561 * Check UPRT (we avoid the check in real mode to deal with
562 * transitional states during kexec.
563 */
564 if (!ppc64_use_proc_tbl(cpu)) {
565 qemu_log_mask(LOG_GUEST_ERROR,
566 "LPCR:UPRT not set in radix mode ! LPCR="
567 TARGET_FMT_lx "\n", env->spr[SPR_LPCR]);
568 }
569
570 /* Translate eaddr to raddr (where raddr is addr qemu needs for access) */
571 if (ppc_radix64_xlate(cpu, eaddr, rwx, relocation, &raddr,
572 &page_size, &prot, true)) {
573 return 1;
574 }
575
576 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
577 prot, mmu_idx, 1UL << page_size);
578 return 0;
579 }
580
581 hwaddr ppc_radix64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong eaddr)
582 {
583 CPUPPCState *env = &cpu->env;
584 int psize, prot;
585 hwaddr raddr;
586
587 /* Handle Real Mode */
588 if ((msr_dr == 0) && (msr_hv || cpu->vhyp)) {
589 /* In real mode top 4 effective addr bits (mostly) ignored */
590 return eaddr & 0x0FFFFFFFFFFFFFFFULL;
591 }
592
593 if (ppc_radix64_xlate(cpu, eaddr, 0, msr_dr, &raddr, &psize,
594 &prot, false)) {
595 return -1;
596 }
597
598 return raddr & TARGET_PAGE_MASK;
599 }