]>
Commit | Line | Data |
---|---|---|
d5fee0bb SJS |
1 | /* |
2 | * PowerPC Radix MMU mulation helpers for QEMU. | |
3 | * | |
4 | * Copyright (c) 2016 Suraj Jitindar Singh, IBM Corporation | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
6bd039cd | 9 | * version 2.1 of the License, or (at your option) any later version. |
d5fee0bb SJS |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
d5fee0bb SJS |
21 | #include "cpu.h" |
22 | #include "exec/exec-all.h" | |
d5fee0bb SJS |
23 | #include "qemu/error-report.h" |
24 | #include "sysemu/kvm.h" | |
25 | #include "kvm_ppc.h" | |
26 | #include "exec/log.h" | |
182357db | 27 | #include "internal.h" |
d5fee0bb SJS |
28 | #include "mmu-radix64.h" |
29 | #include "mmu-book3s-v3.h" | |
30 | ||
18304226 GK |
31 | static bool ppc_radix64_get_fully_qualified_addr(const CPUPPCState *env, |
32 | vaddr eaddr, | |
d5fee0bb SJS |
33 | uint64_t *lpid, uint64_t *pid) |
34 | { | |
9b4eaee4 CLG |
35 | /* When EA(2:11) are nonzero, raise a segment interrupt */ |
36 | if (eaddr & ~R_EADDR_VALID_MASK) { | |
37 | return false; | |
38 | } | |
39 | ||
9de754d3 | 40 | if (FIELD_EX64(env->msr, MSR, HV)) { /* MSR[HV] -> Hypervisor/bare metal */ |
539c6e73 BH |
41 | switch (eaddr & R_EADDR_QUADRANT) { |
42 | case R_EADDR_QUADRANT0: | |
43 | *lpid = 0; | |
44 | *pid = env->spr[SPR_BOOKS_PID]; | |
45 | break; | |
46 | case R_EADDR_QUADRANT1: | |
47 | *lpid = env->spr[SPR_LPIDR]; | |
48 | *pid = env->spr[SPR_BOOKS_PID]; | |
49 | break; | |
50 | case R_EADDR_QUADRANT2: | |
51 | *lpid = env->spr[SPR_LPIDR]; | |
52 | *pid = 0; | |
53 | break; | |
54 | case R_EADDR_QUADRANT3: | |
55 | *lpid = 0; | |
56 | *pid = 0; | |
57 | break; | |
7caee782 GK |
58 | default: |
59 | g_assert_not_reached(); | |
539c6e73 BH |
60 | } |
61 | } else { /* !MSR[HV] -> Guest */ | |
d5fee0bb SJS |
62 | switch (eaddr & R_EADDR_QUADRANT) { |
63 | case R_EADDR_QUADRANT0: /* Guest application */ | |
64 | *lpid = env->spr[SPR_LPIDR]; | |
65 | *pid = env->spr[SPR_BOOKS_PID]; | |
66 | break; | |
67 | case R_EADDR_QUADRANT1: /* Illegal */ | |
68 | case R_EADDR_QUADRANT2: | |
69 | return false; | |
70 | case R_EADDR_QUADRANT3: /* Guest OS */ | |
71 | *lpid = env->spr[SPR_LPIDR]; | |
72 | *pid = 0; /* pid set to 0 -> addresses guest operating system */ | |
73 | break; | |
7caee782 GK |
74 | default: |
75 | g_assert_not_reached(); | |
d5fee0bb SJS |
76 | } |
77 | } | |
78 | ||
79 | return true; | |
80 | } | |
81 | ||
13c5fdba RH |
82 | static void ppc_radix64_raise_segi(PowerPCCPU *cpu, MMUAccessType access_type, |
83 | vaddr eaddr) | |
d5fee0bb SJS |
84 | { |
85 | CPUState *cs = CPU(cpu); | |
86 | CPUPPCState *env = &cpu->env; | |
87 | ||
13c5fdba RH |
88 | switch (access_type) { |
89 | case MMU_INST_FETCH: | |
90 | /* Instruction Segment Interrupt */ | |
d5fee0bb | 91 | cs->exception_index = POWERPC_EXCP_ISEG; |
13c5fdba RH |
92 | break; |
93 | case MMU_DATA_STORE: | |
94 | case MMU_DATA_LOAD: | |
95 | /* Data Segment Interrupt */ | |
d5fee0bb SJS |
96 | cs->exception_index = POWERPC_EXCP_DSEG; |
97 | env->spr[SPR_DAR] = eaddr; | |
13c5fdba RH |
98 | break; |
99 | default: | |
100 | g_assert_not_reached(); | |
d5fee0bb SJS |
101 | } |
102 | env->error_code = 0; | |
103 | } | |
104 | ||
932de569 CLG |
105 | static inline const char *access_str(MMUAccessType access_type) |
106 | { | |
107 | return access_type == MMU_DATA_LOAD ? "reading" : | |
108 | (access_type == MMU_DATA_STORE ? "writing" : "execute"); | |
109 | } | |
110 | ||
13c5fdba RH |
111 | static void ppc_radix64_raise_si(PowerPCCPU *cpu, MMUAccessType access_type, |
112 | vaddr eaddr, uint32_t cause) | |
d5fee0bb SJS |
113 | { |
114 | CPUState *cs = CPU(cpu); | |
115 | CPUPPCState *env = &cpu->env; | |
116 | ||
932de569 CLG |
117 | qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" cause %08x\n", |
118 | __func__, access_str(access_type), | |
119 | eaddr, cause); | |
120 | ||
13c5fdba RH |
121 | switch (access_type) { |
122 | case MMU_INST_FETCH: | |
123 | /* Instruction Storage Interrupt */ | |
d5fee0bb SJS |
124 | cs->exception_index = POWERPC_EXCP_ISI; |
125 | env->error_code = cause; | |
13c5fdba RH |
126 | break; |
127 | case MMU_DATA_STORE: | |
128 | cause |= DSISR_ISSTORE; | |
129 | /* fall through */ | |
130 | case MMU_DATA_LOAD: | |
131 | /* Data Storage Interrupt */ | |
d5fee0bb | 132 | cs->exception_index = POWERPC_EXCP_DSI; |
d5fee0bb SJS |
133 | env->spr[SPR_DSISR] = cause; |
134 | env->spr[SPR_DAR] = eaddr; | |
135 | env->error_code = 0; | |
13c5fdba RH |
136 | break; |
137 | default: | |
138 | g_assert_not_reached(); | |
d5fee0bb SJS |
139 | } |
140 | } | |
141 | ||
13c5fdba RH |
142 | static void ppc_radix64_raise_hsi(PowerPCCPU *cpu, MMUAccessType access_type, |
143 | vaddr eaddr, hwaddr g_raddr, uint32_t cause) | |
d04ea940 CLG |
144 | { |
145 | CPUState *cs = CPU(cpu); | |
146 | CPUPPCState *env = &cpu->env; | |
147 | ||
5a5d3b23 NP |
148 | env->error_code = 0; |
149 | if (cause & DSISR_PRTABLE_FAULT) { | |
150 | /* HDSI PRTABLE_FAULT gets the originating access type in error_code */ | |
151 | env->error_code = access_type; | |
152 | access_type = MMU_DATA_LOAD; | |
153 | } | |
154 | ||
932de569 CLG |
155 | qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx" 0x%" |
156 | HWADDR_PRIx" cause %08x\n", | |
157 | __func__, access_str(access_type), | |
158 | eaddr, g_raddr, cause); | |
159 | ||
13c5fdba RH |
160 | switch (access_type) { |
161 | case MMU_INST_FETCH: | |
162 | /* H Instruction Storage Interrupt */ | |
d04ea940 CLG |
163 | cs->exception_index = POWERPC_EXCP_HISI; |
164 | env->spr[SPR_ASDR] = g_raddr; | |
165 | env->error_code = cause; | |
13c5fdba RH |
166 | break; |
167 | case MMU_DATA_STORE: | |
168 | cause |= DSISR_ISSTORE; | |
169 | /* fall through */ | |
170 | case MMU_DATA_LOAD: | |
171 | /* H Data Storage Interrupt */ | |
d04ea940 | 172 | cs->exception_index = POWERPC_EXCP_HDSI; |
d04ea940 CLG |
173 | env->spr[SPR_HDSISR] = cause; |
174 | env->spr[SPR_HDAR] = eaddr; | |
175 | env->spr[SPR_ASDR] = g_raddr; | |
13c5fdba RH |
176 | break; |
177 | default: | |
178 | g_assert_not_reached(); | |
d04ea940 CLG |
179 | } |
180 | } | |
d5fee0bb | 181 | |
13c5fdba RH |
182 | static bool ppc_radix64_check_prot(PowerPCCPU *cpu, MMUAccessType access_type, |
183 | uint64_t pte, int *fault_cause, int *prot, | |
3f9f76d5 | 184 | int mmu_idx, bool partition_scoped) |
d5fee0bb SJS |
185 | { |
186 | CPUPPCState *env = &cpu->env; | |
182357db | 187 | int need_prot; |
d5fee0bb SJS |
188 | |
189 | /* Check Page Attributes (pte58:59) */ | |
13c5fdba | 190 | if ((pte & R_PTE_ATT) == R_PTE_ATT_NI_IO && access_type == MMU_INST_FETCH) { |
d5fee0bb SJS |
191 | /* |
192 | * Radix PTE entries with the non-idempotent I/O attribute are treated | |
193 | * as guarded storage | |
194 | */ | |
195 | *fault_cause |= SRR1_NOEXEC_GUARD; | |
196 | return true; | |
197 | } | |
198 | ||
199 | /* Determine permissions allowed by Encoded Access Authority */ | |
d41ccf6e VC |
200 | if (!partition_scoped && (pte & R_PTE_EAA_PRIV) && |
201 | FIELD_EX64(env->msr, MSR, PR)) { | |
d5fee0bb | 202 | *prot = 0; |
3f9f76d5 BL |
203 | } else if (mmuidx_pr(mmu_idx) || (pte & R_PTE_EAA_PRIV) || |
204 | partition_scoped) { | |
d5fee0bb | 205 | *prot = ppc_radix64_get_prot_eaa(pte); |
d41ccf6e | 206 | } else { /* !MSR_PR && !(pte & R_PTE_EAA_PRIV) && !partition_scoped */ |
d5fee0bb SJS |
207 | *prot = ppc_radix64_get_prot_eaa(pte); |
208 | *prot &= ppc_radix64_get_prot_amr(cpu); /* Least combined permissions */ | |
209 | } | |
210 | ||
211 | /* Check if requested access type is allowed */ | |
13c5fdba | 212 | need_prot = prot_for_access_type(access_type); |
182357db | 213 | if (need_prot & ~*prot) { /* Page Protected for that Access */ |
c6242335 LL |
214 | *fault_cause |= access_type == MMU_INST_FETCH ? SRR1_NOEXEC_GUARD : |
215 | DSISR_PROTFAULT; | |
d5fee0bb SJS |
216 | return true; |
217 | } | |
218 | ||
219 | return false; | |
220 | } | |
221 | ||
0fdf05d7 | 222 | static int ppc_radix64_check_rc(MMUAccessType access_type, uint64_t pte) |
d5fee0bb | 223 | { |
0fdf05d7 SA |
224 | switch (access_type) { |
225 | case MMU_DATA_STORE: | |
226 | if (!(pte & R_PTE_C)) { | |
227 | break; | |
228 | } | |
229 | /* fall through */ | |
230 | case MMU_INST_FETCH: | |
231 | case MMU_DATA_LOAD: | |
232 | if (!(pte & R_PTE_R)) { | |
233 | break; | |
234 | } | |
d5fee0bb | 235 | |
0fdf05d7 SA |
236 | /* R/C bits are already set appropriately for this access */ |
237 | return 0; | |
d5fee0bb SJS |
238 | } |
239 | ||
0fdf05d7 | 240 | return 1; |
d5fee0bb SJS |
241 | } |
242 | ||
47e83d91 LL |
243 | static bool ppc_radix64_is_valid_level(int level, int psize, uint64_t nls) |
244 | { | |
fb22d743 LL |
245 | bool ret; |
246 | ||
47e83d91 LL |
247 | /* |
248 | * Check if this is a valid level, according to POWER9 and POWER10 | |
249 | * Processor User's Manuals, sections 4.10.4.1 and 5.10.6.1, respectively: | |
250 | * Supported Radix Tree Configurations and Resulting Page Sizes. | |
251 | * | |
252 | * Note: these checks are specific to POWER9 and POWER10 CPUs. Any future | |
253 | * CPUs that supports a different Radix MMU configuration will need their | |
254 | * own implementation. | |
255 | */ | |
256 | switch (level) { | |
257 | case 0: /* Root Page Dir */ | |
fb22d743 LL |
258 | ret = psize == 52 && nls == 13; |
259 | break; | |
47e83d91 LL |
260 | case 1: |
261 | case 2: | |
fb22d743 LL |
262 | ret = nls == 9; |
263 | break; | |
47e83d91 | 264 | case 3: |
fb22d743 LL |
265 | ret = nls == 9 || nls == 5; |
266 | break; | |
47e83d91 | 267 | default: |
fb22d743 LL |
268 | ret = false; |
269 | } | |
270 | ||
271 | if (unlikely(!ret)) { | |
272 | qemu_log_mask(LOG_GUEST_ERROR, "invalid radix configuration: " | |
273 | "level %d size %d nls %"PRIu64"\n", | |
274 | level, psize, nls); | |
47e83d91 | 275 | } |
fb22d743 | 276 | return ret; |
47e83d91 LL |
277 | } |
278 | ||
6bffd48b CLG |
279 | static int ppc_radix64_next_level(AddressSpace *as, vaddr eaddr, |
280 | uint64_t *pte_addr, uint64_t *nls, | |
281 | int *psize, uint64_t *pte, int *fault_cause) | |
d5fee0bb | 282 | { |
d2066bc5 | 283 | uint64_t index, mask, nlb, pde; |
d5fee0bb | 284 | |
d5fee0bb | 285 | /* Read page <directory/table> entry from guest address space */ |
6bffd48b CLG |
286 | pde = ldq_phys(as, *pte_addr); |
287 | if (!(pde & R_PTE_VALID)) { /* Invalid Entry */ | |
d5fee0bb | 288 | *fault_cause |= DSISR_NOPTE; |
6bffd48b | 289 | return 1; |
d5fee0bb SJS |
290 | } |
291 | ||
6bffd48b CLG |
292 | *pte = pde; |
293 | *psize -= *nls; | |
294 | if (!(pde & R_PTE_LEAF)) { /* Prepare for next iteration */ | |
295 | *nls = pde & R_PDE_NLS; | |
296 | index = eaddr >> (*psize - *nls); /* Shift */ | |
297 | index &= ((1UL << *nls) - 1); /* Mask */ | |
d2066bc5 LL |
298 | nlb = pde & R_PDE_NLB; |
299 | mask = MAKE_64BIT_MASK(0, *nls + 3); | |
300 | ||
301 | if (nlb & mask) { | |
302 | qemu_log_mask(LOG_GUEST_ERROR, | |
303 | "%s: misaligned page dir/table base: 0x"TARGET_FMT_lx | |
304 | " page dir size: 0x"TARGET_FMT_lx"\n", | |
305 | __func__, nlb, mask + 1); | |
306 | nlb &= ~mask; | |
307 | } | |
308 | *pte_addr = nlb + index * sizeof(pde); | |
6bffd48b CLG |
309 | } |
310 | return 0; | |
311 | } | |
d5fee0bb | 312 | |
6bffd48b CLG |
313 | static int ppc_radix64_walk_tree(AddressSpace *as, vaddr eaddr, |
314 | uint64_t base_addr, uint64_t nls, | |
315 | hwaddr *raddr, int *psize, uint64_t *pte, | |
316 | int *fault_cause, hwaddr *pte_addr) | |
317 | { | |
47e83d91 LL |
318 | uint64_t index, pde, rpn, mask; |
319 | int level = 0; | |
d5fee0bb | 320 | |
6bffd48b | 321 | index = eaddr >> (*psize - nls); /* Shift */ |
d2066bc5 LL |
322 | index &= ((1UL << nls) - 1); /* Mask */ |
323 | mask = MAKE_64BIT_MASK(0, nls + 3); | |
324 | ||
325 | if (base_addr & mask) { | |
326 | qemu_log_mask(LOG_GUEST_ERROR, | |
327 | "%s: misaligned page dir base: 0x"TARGET_FMT_lx | |
328 | " page dir size: 0x"TARGET_FMT_lx"\n", | |
329 | __func__, base_addr, mask + 1); | |
330 | base_addr &= ~mask; | |
331 | } | |
332 | *pte_addr = base_addr + index * sizeof(pde); | |
333 | ||
6bffd48b CLG |
334 | do { |
335 | int ret; | |
336 | ||
47e83d91 LL |
337 | if (!ppc_radix64_is_valid_level(level++, *psize, nls)) { |
338 | *fault_cause |= DSISR_R_BADCONFIG; | |
339 | return 1; | |
340 | } | |
341 | ||
6bffd48b CLG |
342 | ret = ppc_radix64_next_level(as, eaddr, pte_addr, &nls, psize, &pde, |
343 | fault_cause); | |
344 | if (ret) { | |
345 | return ret; | |
346 | } | |
347 | } while (!(pde & R_PTE_LEAF)); | |
348 | ||
349 | *pte = pde; | |
350 | rpn = pde & R_PTE_RPN; | |
351 | mask = (1UL << *psize) - 1; | |
352 | ||
353 | /* Or high bits of rpn and low bits to ea to form whole real addr */ | |
354 | *raddr = (rpn & ~mask) | (eaddr & mask); | |
355 | return 0; | |
d5fee0bb SJS |
356 | } |
357 | ||
539c6e73 BH |
358 | static bool validate_pate(PowerPCCPU *cpu, uint64_t lpid, ppc_v3_pate_t *pate) |
359 | { | |
360 | CPUPPCState *env = &cpu->env; | |
361 | ||
362 | if (!(pate->dw0 & PATE0_HR)) { | |
363 | return false; | |
364 | } | |
9de754d3 | 365 | if (lpid == 0 && !FIELD_EX64(env->msr, MSR, HV)) { |
539c6e73 BH |
366 | return false; |
367 | } | |
933abb9c SJS |
368 | if ((pate->dw0 & PATE1_R_PRTS) < 5) { |
369 | return false; | |
370 | } | |
539c6e73 BH |
371 | /* More checks ... */ |
372 | return true; | |
373 | } | |
374 | ||
13c5fdba | 375 | static int ppc_radix64_partition_scoped_xlate(PowerPCCPU *cpu, |
5a5d3b23 | 376 | MMUAccessType orig_access_type, |
d04ea940 CLG |
377 | vaddr eaddr, hwaddr g_raddr, |
378 | ppc_v3_pate_t pate, | |
379 | hwaddr *h_raddr, int *h_prot, | |
380 | int *h_page_size, bool pde_addr, | |
0fdf05d7 SA |
381 | int mmu_idx, uint64_t lpid, |
382 | bool guest_visible) | |
d04ea940 | 383 | { |
5a5d3b23 | 384 | MMUAccessType access_type = orig_access_type; |
d04ea940 CLG |
385 | int fault_cause = 0; |
386 | hwaddr pte_addr; | |
387 | uint64_t pte; | |
388 | ||
74574c38 NP |
389 | if (pde_addr) { |
390 | /* | |
391 | * Translation of process-scoped tables/directories is performed as | |
392 | * a read-access. | |
393 | */ | |
394 | access_type = MMU_DATA_LOAD; | |
395 | } | |
396 | ||
932de569 | 397 | qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx |
36387ca5 | 398 | " mmu_idx %u 0x%"HWADDR_PRIx"\n", |
932de569 | 399 | __func__, access_str(access_type), |
36387ca5 | 400 | eaddr, mmu_idx, g_raddr); |
932de569 | 401 | |
d04ea940 CLG |
402 | *h_page_size = PRTBE_R_GET_RTS(pate.dw0); |
403 | /* No valid pte or access denied due to protection */ | |
404 | if (ppc_radix64_walk_tree(CPU(cpu)->as, g_raddr, pate.dw0 & PRTBE_R_RPDB, | |
405 | pate.dw0 & PRTBE_R_RPDS, h_raddr, h_page_size, | |
406 | &pte, &fault_cause, &pte_addr) || | |
3f9f76d5 BL |
407 | ppc_radix64_check_prot(cpu, access_type, pte, |
408 | &fault_cause, h_prot, mmu_idx, true)) { | |
b577031c | 409 | if (pde_addr) { /* address being translated was that of a guest pde */ |
d04ea940 | 410 | fault_cause |= DSISR_PRTABLE_FAULT; |
b577031c | 411 | } |
e606a558 | 412 | if (guest_visible) { |
5a5d3b23 NP |
413 | ppc_radix64_raise_hsi(cpu, orig_access_type, |
414 | eaddr, g_raddr, fault_cause); | |
d04ea940 CLG |
415 | } |
416 | return 1; | |
417 | } | |
418 | ||
e606a558 | 419 | if (guest_visible) { |
0fdf05d7 SA |
420 | if (ppc_radix64_check_rc(access_type, pte)) { |
421 | /* | |
422 | * Per ISA 3.1 Book III, 7.5.3 and 7.5.5, failure to set R/C during | |
423 | * partition-scoped translation when effLPID = 0 results in normal | |
424 | * (non-Hypervisor) Data and Instruction Storage Interrupts | |
425 | * respectively. | |
426 | * | |
427 | * ISA 3.0 is ambiguous about this, but tests on POWER9 hardware | |
428 | * seem to exhibit the same behavior. | |
429 | */ | |
430 | if (lpid > 0) { | |
431 | ppc_radix64_raise_hsi(cpu, access_type, eaddr, g_raddr, | |
432 | DSISR_ATOMIC_RC); | |
433 | } else { | |
434 | ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC); | |
435 | } | |
436 | return 1; | |
437 | } | |
e606a558 | 438 | } |
d04ea940 CLG |
439 | |
440 | return 0; | |
441 | } | |
442 | ||
4dce0bde | 443 | /* |
7cebc5db NP |
444 | * The spapr vhc has a flat partition scope provided by qemu memory when |
445 | * not nested. | |
446 | * | |
447 | * When running a nested guest, the addressing is 2-level radix on top of the | |
448 | * vhc memory, so it works practically identically to the bare metal 2-level | |
449 | * radix. So that code is selected directly. A cleaner and more flexible nested | |
450 | * hypervisor implementation would allow the vhc to provide a ->nested_xlate() | |
451 | * function but that is not required for the moment. | |
4dce0bde NP |
452 | */ |
453 | static bool vhyp_flat_addressing(PowerPCCPU *cpu) | |
454 | { | |
455 | if (cpu->vhyp) { | |
7cebc5db | 456 | return !vhyp_cpu_in_nested(cpu); |
4dce0bde NP |
457 | } |
458 | return false; | |
459 | } | |
460 | ||
13c5fdba RH |
461 | static int ppc_radix64_process_scoped_xlate(PowerPCCPU *cpu, |
462 | MMUAccessType access_type, | |
d92baf00 CLG |
463 | vaddr eaddr, uint64_t pid, |
464 | ppc_v3_pate_t pate, hwaddr *g_raddr, | |
465 | int *g_prot, int *g_page_size, | |
0fdf05d7 SA |
466 | int mmu_idx, uint64_t lpid, |
467 | bool guest_visible) | |
d92baf00 CLG |
468 | { |
469 | CPUState *cs = CPU(cpu); | |
d04ea940 | 470 | CPUPPCState *env = &cpu->env; |
3c2e80ad | 471 | uint64_t offset, size, prtb, prtbe_addr, prtbe0, base_addr, nls, index, pte; |
d04ea940 CLG |
472 | int fault_cause = 0, h_page_size, h_prot; |
473 | hwaddr h_raddr, pte_addr; | |
6bffd48b | 474 | int ret; |
d92baf00 | 475 | |
932de569 CLG |
476 | qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx |
477 | " mmu_idx %u pid %"PRIu64"\n", | |
478 | __func__, access_str(access_type), | |
479 | eaddr, mmu_idx, pid); | |
480 | ||
3c2e80ad LL |
481 | prtb = (pate.dw1 & PATE1_R_PRTB); |
482 | size = 1ULL << ((pate.dw1 & PATE1_R_PRTS) + 12); | |
483 | if (prtb & (size - 1)) { | |
484 | /* Process Table not properly aligned */ | |
485 | if (guest_visible) { | |
486 | ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_R_BADCONFIG); | |
487 | } | |
488 | return 1; | |
489 | } | |
490 | ||
d92baf00 CLG |
491 | /* Index Process Table by PID to Find Corresponding Process Table Entry */ |
492 | offset = pid * sizeof(struct prtb_entry); | |
d92baf00 CLG |
493 | if (offset >= size) { |
494 | /* offset exceeds size of the process table */ | |
e606a558 | 495 | if (guest_visible) { |
13c5fdba | 496 | ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_NOPTE); |
d92baf00 CLG |
497 | } |
498 | return 1; | |
499 | } | |
3c2e80ad | 500 | prtbe_addr = prtb + offset; |
d04ea940 | 501 | |
4dce0bde | 502 | if (vhyp_flat_addressing(cpu)) { |
d04ea940 CLG |
503 | prtbe0 = ldq_phys(cs->as, prtbe_addr); |
504 | } else { | |
505 | /* | |
506 | * Process table addresses are subject to partition-scoped | |
507 | * translation | |
508 | * | |
509 | * On a Radix host, the partition-scoped page table for LPID=0 | |
510 | * is only used to translate the effective addresses of the | |
511 | * process table entries. | |
512 | */ | |
74574c38 NP |
513 | /* mmu_idx is 5 because we're translating from hypervisor scope */ |
514 | ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, | |
515 | prtbe_addr, pate, &h_raddr, | |
516 | &h_prot, &h_page_size, true, | |
0fdf05d7 | 517 | 5, lpid, guest_visible); |
d04ea940 CLG |
518 | if (ret) { |
519 | return ret; | |
520 | } | |
521 | prtbe0 = ldq_phys(cs->as, h_raddr); | |
522 | } | |
d92baf00 CLG |
523 | |
524 | /* Walk Radix Tree from Process Table Entry to Convert EA to RA */ | |
525 | *g_page_size = PRTBE_R_GET_RTS(prtbe0); | |
d04ea940 CLG |
526 | base_addr = prtbe0 & PRTBE_R_RPDB; |
527 | nls = prtbe0 & PRTBE_R_RPDS; | |
9de754d3 | 528 | if (FIELD_EX64(env->msr, MSR, HV) || vhyp_flat_addressing(cpu)) { |
d04ea940 CLG |
529 | /* |
530 | * Can treat process table addresses as real addresses | |
531 | */ | |
532 | ret = ppc_radix64_walk_tree(cs->as, eaddr & R_EADDR_MASK, base_addr, | |
533 | nls, g_raddr, g_page_size, &pte, | |
534 | &fault_cause, &pte_addr); | |
535 | if (ret) { | |
536 | /* No valid PTE */ | |
e606a558 | 537 | if (guest_visible) { |
13c5fdba | 538 | ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); |
d04ea940 CLG |
539 | } |
540 | return ret; | |
541 | } | |
542 | } else { | |
543 | uint64_t rpn, mask; | |
47e83d91 | 544 | int level = 0; |
d04ea940 CLG |
545 | |
546 | index = (eaddr & R_EADDR_MASK) >> (*g_page_size - nls); /* Shift */ | |
547 | index &= ((1UL << nls) - 1); /* Mask */ | |
548 | pte_addr = base_addr + (index * sizeof(pte)); | |
549 | ||
550 | /* | |
551 | * Each process table address is subject to a partition-scoped | |
552 | * translation | |
553 | */ | |
554 | do { | |
3f9f76d5 | 555 | /* mmu_idx is 5 because we're translating from hypervisor scope */ |
74574c38 NP |
556 | ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, |
557 | pte_addr, pate, &h_raddr, | |
558 | &h_prot, &h_page_size, | |
0fdf05d7 SA |
559 | true, 5, lpid, |
560 | guest_visible); | |
d04ea940 CLG |
561 | if (ret) { |
562 | return ret; | |
563 | } | |
564 | ||
47e83d91 LL |
565 | if (!ppc_radix64_is_valid_level(level++, *g_page_size, nls)) { |
566 | fault_cause |= DSISR_R_BADCONFIG; | |
fb22d743 LL |
567 | ret = 1; |
568 | } else { | |
569 | ret = ppc_radix64_next_level(cs->as, eaddr & R_EADDR_MASK, | |
570 | &h_raddr, &nls, g_page_size, | |
571 | &pte, &fault_cause); | |
47e83d91 LL |
572 | } |
573 | ||
d04ea940 CLG |
574 | if (ret) { |
575 | /* No valid pte */ | |
e606a558 | 576 | if (guest_visible) { |
13c5fdba | 577 | ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); |
d04ea940 CLG |
578 | } |
579 | return ret; | |
580 | } | |
581 | pte_addr = h_raddr; | |
582 | } while (!(pte & R_PTE_LEAF)); | |
583 | ||
584 | rpn = pte & R_PTE_RPN; | |
585 | mask = (1UL << *g_page_size) - 1; | |
586 | ||
587 | /* Or high bits of rpn and low bits to ea to form whole real addr */ | |
588 | *g_raddr = (rpn & ~mask) | (eaddr & mask); | |
589 | } | |
590 | ||
3f9f76d5 BL |
591 | if (ppc_radix64_check_prot(cpu, access_type, pte, &fault_cause, |
592 | g_prot, mmu_idx, false)) { | |
d04ea940 | 593 | /* Access denied due to protection */ |
e606a558 | 594 | if (guest_visible) { |
13c5fdba | 595 | ppc_radix64_raise_si(cpu, access_type, eaddr, fault_cause); |
d92baf00 CLG |
596 | } |
597 | return 1; | |
598 | } | |
599 | ||
e606a558 | 600 | if (guest_visible) { |
0fdf05d7 SA |
601 | /* R/C bits not appropriately set for access */ |
602 | if (ppc_radix64_check_rc(access_type, pte)) { | |
603 | ppc_radix64_raise_si(cpu, access_type, eaddr, DSISR_ATOMIC_RC); | |
604 | return 1; | |
605 | } | |
e606a558 | 606 | } |
d92baf00 CLG |
607 | |
608 | return 0; | |
609 | } | |
610 | ||
d04ea940 CLG |
611 | /* |
612 | * Radix tree translation is a 2 steps translation process: | |
613 | * | |
614 | * 1. Process-scoped translation: Guest Eff Addr -> Guest Real Addr | |
615 | * 2. Partition-scoped translation: Guest Real Addr -> Host Real Addr | |
616 | * | |
617 | * MSR[HV] | |
618 | * +-------------+----------------+---------------+ | |
619 | * | | HV = 0 | HV = 1 | | |
620 | * +-------------+----------------+---------------+ | |
621 | * | Relocation | Partition | No | | |
622 | * | = Off | Scoped | Translation | | |
623 | * Relocation +-------------+----------------+---------------+ | |
624 | * | Relocation | Partition & | Process | | |
625 | * | = On | Process Scoped | Scoped | | |
626 | * +-------------+----------------+---------------+ | |
627 | */ | |
932de569 CLG |
628 | static bool ppc_radix64_xlate_impl(PowerPCCPU *cpu, vaddr eaddr, |
629 | MMUAccessType access_type, hwaddr *raddr, | |
630 | int *psizep, int *protp, int mmu_idx, | |
631 | bool guest_visible) | |
d92baf00 | 632 | { |
d04ea940 | 633 | CPUPPCState *env = &cpu->env; |
7caee782 | 634 | uint64_t lpid, pid; |
d92baf00 CLG |
635 | ppc_v3_pate_t pate; |
636 | int psize, prot; | |
637 | hwaddr g_raddr; | |
42a61124 RH |
638 | bool relocation; |
639 | ||
3f9f76d5 | 640 | assert(!(mmuidx_hv(mmu_idx) && cpu->vhyp)); |
42a61124 | 641 | |
3f9f76d5 | 642 | relocation = !mmuidx_real(mmu_idx); |
42a61124 RH |
643 | |
644 | /* HV or virtual hypervisor Real Mode Access */ | |
4dce0bde | 645 | if (!relocation && (mmuidx_hv(mmu_idx) || vhyp_flat_addressing(cpu))) { |
42a61124 RH |
646 | /* In real mode top 4 effective addr bits (mostly) ignored */ |
647 | *raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL; | |
648 | ||
649 | /* In HV mode, add HRMOR if top EA bit is clear */ | |
3f9f76d5 | 650 | if (mmuidx_hv(mmu_idx) || !env->has_hv_mode) { |
42a61124 RH |
651 | if (!(eaddr >> 63)) { |
652 | *raddr |= env->spr[SPR_HRMOR]; | |
653 | } | |
654 | } | |
655 | *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
656 | *psizep = TARGET_PAGE_BITS; | |
077a3704 | 657 | return true; |
42a61124 RH |
658 | } |
659 | ||
660 | /* | |
661 | * Check UPRT (we avoid the check in real mode to deal with | |
662 | * transitional states during kexec. | |
663 | */ | |
664 | if (guest_visible && !ppc64_use_proc_tbl(cpu)) { | |
665 | qemu_log_mask(LOG_GUEST_ERROR, | |
666 | "LPCR:UPRT not set in radix mode ! LPCR=" | |
667 | TARGET_FMT_lx "\n", env->spr[SPR_LPCR]); | |
668 | } | |
d92baf00 CLG |
669 | |
670 | /* Virtual Mode Access - get the fully qualified address */ | |
671 | if (!ppc_radix64_get_fully_qualified_addr(&cpu->env, eaddr, &lpid, &pid)) { | |
e606a558 | 672 | if (guest_visible) { |
13c5fdba | 673 | ppc_radix64_raise_segi(cpu, access_type, eaddr); |
d92baf00 | 674 | } |
077a3704 | 675 | return false; |
d92baf00 CLG |
676 | } |
677 | ||
3c2e80ad | 678 | /* Get Partition Table */ |
d92baf00 CLG |
679 | if (cpu->vhyp) { |
680 | PPCVirtualHypervisorClass *vhc; | |
681 | vhc = PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp); | |
f32d4ab4 NP |
682 | if (!vhc->get_pate(cpu->vhyp, cpu, lpid, &pate)) { |
683 | if (guest_visible) { | |
684 | ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, | |
685 | DSISR_R_BADCONFIG); | |
686 | } | |
687 | return false; | |
688 | } | |
d92baf00 CLG |
689 | } else { |
690 | if (!ppc64_v3_get_pate(cpu, lpid, &pate)) { | |
e606a558 | 691 | if (guest_visible) { |
4ffcef2a NP |
692 | ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, |
693 | DSISR_R_BADCONFIG); | |
d92baf00 | 694 | } |
077a3704 | 695 | return false; |
d92baf00 CLG |
696 | } |
697 | if (!validate_pate(cpu, lpid, &pate)) { | |
e606a558 | 698 | if (guest_visible) { |
4ffcef2a NP |
699 | ppc_radix64_raise_hsi(cpu, access_type, eaddr, eaddr, |
700 | DSISR_R_BADCONFIG); | |
d92baf00 | 701 | } |
077a3704 | 702 | return false; |
d92baf00 | 703 | } |
d92baf00 CLG |
704 | } |
705 | ||
706 | *psizep = INT_MAX; | |
707 | *protp = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
708 | ||
709 | /* | |
710 | * Perform process-scoped translation if relocation enabled. | |
711 | * | |
712 | * - Translates an effective address to a host real address in | |
713 | * quadrants 0 and 3 when HV=1. | |
d04ea940 CLG |
714 | * |
715 | * - Translates an effective address to a guest real address. | |
d92baf00 CLG |
716 | */ |
717 | if (relocation) { | |
13c5fdba | 718 | int ret = ppc_radix64_process_scoped_xlate(cpu, access_type, eaddr, pid, |
d92baf00 | 719 | pate, &g_raddr, &prot, |
0fdf05d7 SA |
720 | &psize, mmu_idx, lpid, |
721 | guest_visible); | |
d92baf00 | 722 | if (ret) { |
077a3704 | 723 | return false; |
d92baf00 CLG |
724 | } |
725 | *psizep = MIN(*psizep, psize); | |
726 | *protp &= prot; | |
727 | } else { | |
728 | g_raddr = eaddr & R_EADDR_MASK; | |
729 | } | |
730 | ||
4dce0bde | 731 | if (vhyp_flat_addressing(cpu)) { |
d04ea940 CLG |
732 | *raddr = g_raddr; |
733 | } else { | |
734 | /* | |
735 | * Perform partition-scoped translation if !HV or HV access to | |
736 | * quadrants 1 or 2. Translates a guest real address to a host | |
737 | * real address. | |
738 | */ | |
3f9f76d5 | 739 | if (lpid || !mmuidx_hv(mmu_idx)) { |
d04ea940 CLG |
740 | int ret; |
741 | ||
13c5fdba RH |
742 | ret = ppc_radix64_partition_scoped_xlate(cpu, access_type, eaddr, |
743 | g_raddr, pate, raddr, | |
744 | &prot, &psize, false, | |
0fdf05d7 SA |
745 | mmu_idx, lpid, |
746 | guest_visible); | |
d04ea940 | 747 | if (ret) { |
077a3704 | 748 | return false; |
d04ea940 CLG |
749 | } |
750 | *psizep = MIN(*psizep, psize); | |
751 | *protp &= prot; | |
752 | } else { | |
753 | *raddr = g_raddr; | |
754 | } | |
755 | } | |
756 | ||
077a3704 | 757 | return true; |
d92baf00 | 758 | } |
932de569 CLG |
759 | |
760 | bool ppc_radix64_xlate(PowerPCCPU *cpu, vaddr eaddr, MMUAccessType access_type, | |
761 | hwaddr *raddrp, int *psizep, int *protp, int mmu_idx, | |
762 | bool guest_visible) | |
763 | { | |
764 | bool ret = ppc_radix64_xlate_impl(cpu, eaddr, access_type, raddrp, | |
765 | psizep, protp, mmu_idx, guest_visible); | |
766 | ||
767 | qemu_log_mask(CPU_LOG_MMU, "%s for %s @0x%"VADDR_PRIx | |
768 | " mmu_idx %u (prot %c%c%c) -> 0x%"HWADDR_PRIx"\n", | |
769 | __func__, access_str(access_type), | |
770 | eaddr, mmu_idx, | |
771 | *protp & PAGE_READ ? 'r' : '-', | |
772 | *protp & PAGE_WRITE ? 'w' : '-', | |
773 | *protp & PAGE_EXEC ? 'x' : '-', | |
774 | *raddrp); | |
775 | ||
776 | return ret; | |
777 | } |