]>
Commit | Line | Data |
---|---|---|
813dff13 HD |
1 | /* |
2 | * HPPA memory access helper routines | |
3 | * | |
4 | * Copyright (c) 2017 Helge Deller | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
d6ea4236 | 9 | * version 2.1 of the License, or (at your option) any later version. |
813dff13 HD |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
cd617484 | 21 | #include "qemu/log.h" |
813dff13 HD |
22 | #include "cpu.h" |
23 | #include "exec/exec-all.h" | |
24 | #include "exec/helper-proto.h" | |
2e5b09fd | 25 | #include "hw/core/cpu.h" |
23c3d569 | 26 | #include "trace.h" |
813dff13 | 27 | |
ccdf741c RH |
28 | hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) |
29 | { | |
fa71b4f8 RH |
30 | /* |
31 | * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes | |
32 | * an algorithm in which a 62-bit absolute address is transformed to | |
33 | * a 64-bit physical address. This must then be combined with that | |
34 | * pictured in Figure H-11 "Physical Address Space Mapping", in which | |
35 | * the full physical address is truncated to the N-bit physical address | |
36 | * supported by the implementation. | |
37 | * | |
38 | * Since the supported physical address space is below 54 bits, the | |
39 | * H-8 algorithm is moot and all that is left is to truncate. | |
40 | */ | |
41 | QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54); | |
42 | return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS); | |
ccdf741c RH |
43 | } |
44 | ||
45 | hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) | |
46 | { | |
fa71b4f8 RH |
47 | /* |
48 | * See Figure H-10, "Absolute Accesses when PSW W-bit is 0", | |
49 | * combined with Figure H-11, as above. | |
50 | */ | |
ccdf741c RH |
51 | if (likely(extract32(addr, 28, 4) != 0xf)) { |
52 | /* Memory address space */ | |
fa71b4f8 RH |
53 | addr = (uint32_t)addr; |
54 | } else if (extract32(addr, 24, 4) != 0) { | |
ccdf741c | 55 | /* I/O address space */ |
fa71b4f8 RH |
56 | addr = (int32_t)addr; |
57 | } else { | |
6ce18d53 HD |
58 | /* |
59 | * PDC address space: | |
60 | * Figures H-10 and H-11 of the parisc2.0 spec do not specify | |
61 | * where to map into the 64-bit PDC address space. | |
62 | * We map with an offset which equals the 32-bit address, which | |
63 | * is what can be seen on physical machines too. | |
64 | */ | |
65 | addr = (uint32_t)addr; | |
fa71b4f8 | 66 | addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4); |
ccdf741c | 67 | } |
fa71b4f8 | 68 | return addr; |
ccdf741c RH |
69 | } |
70 | ||
729cd350 | 71 | static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) |
650cdb2a | 72 | { |
d7553f35 | 73 | IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); |
650cdb2a | 74 | |
d7553f35 RH |
75 | if (i) { |
76 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
77 | trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, | |
78 | ent->itree.start, ent->itree.last, ent->pa); | |
79 | return ent; | |
650cdb2a | 80 | } |
23c3d569 | 81 | trace_hppa_tlb_find_entry_not_found(env, addr); |
650cdb2a RH |
82 | return NULL; |
83 | } | |
84 | ||
729cd350 | 85 | static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, |
fa824d99 | 86 | bool force_flush_btlb) |
8d6ae7fb | 87 | { |
25f32708 | 88 | CPUState *cs = env_cpu(env); |
d7553f35 | 89 | bool is_btlb; |
fa824d99 HD |
90 | |
91 | if (!ent->entry_valid) { | |
92 | return; | |
93 | } | |
8d6ae7fb | 94 | |
66866cc7 RH |
95 | trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, |
96 | ent->itree.last, ent->pa); | |
23c3d569 | 97 | |
66866cc7 RH |
98 | tlb_flush_range_by_mmuidx(cs, ent->itree.start, |
99 | ent->itree.last - ent->itree.start + 1, | |
100 | HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); | |
fa824d99 | 101 | |
d7553f35 | 102 | /* Never clear BTLBs, unless forced to do so. */ |
9cf2112b | 103 | is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; |
d7553f35 | 104 | if (is_btlb && !force_flush_btlb) { |
fa824d99 | 105 | return; |
8d6ae7fb RH |
106 | } |
107 | ||
d7553f35 | 108 | interval_tree_remove(&ent->itree, &env->tlb_root); |
8d6ae7fb | 109 | memset(ent, 0, sizeof(*ent)); |
d7553f35 RH |
110 | |
111 | if (!is_btlb) { | |
112 | ent->unused_next = env->tlb_unused; | |
113 | env->tlb_unused = ent; | |
114 | } | |
8d6ae7fb RH |
115 | } |
116 | ||
d7553f35 | 117 | static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e) |
09cae825 | 118 | { |
d7553f35 | 119 | IntervalTreeNode *i, *n; |
09cae825 | 120 | |
d7553f35 RH |
121 | i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); |
122 | for (; i ; i = n) { | |
123 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
09cae825 | 124 | |
d7553f35 RH |
125 | /* |
126 | * Find the next entry now: In the normal case the current entry | |
127 | * will be removed, but in the BTLB case it will remain. | |
128 | */ | |
129 | n = interval_tree_iter_next(i, va_b, va_e); | |
130 | hppa_flush_tlb_ent(env, ent, false); | |
09cae825 | 131 | } |
09cae825 RH |
132 | } |
133 | ||
729cd350 | 134 | static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) |
8d6ae7fb | 135 | { |
d7553f35 | 136 | HPPATLBEntry *ent = env->tlb_unused; |
fa824d99 | 137 | |
d7553f35 | 138 | if (ent == NULL) { |
9cf2112b | 139 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
d7553f35 | 140 | uint32_t i = env->tlb_last; |
8d6ae7fb | 141 | |
9cf2112b RH |
142 | if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { |
143 | i = btlb_entries; | |
d7553f35 RH |
144 | } |
145 | env->tlb_last = i + 1; | |
8d6ae7fb | 146 | |
d7553f35 RH |
147 | ent = &env->tlb[i]; |
148 | hppa_flush_tlb_ent(env, ent, false); | |
149 | } | |
150 | ||
151 | env->tlb_unused = ent->unused_next; | |
8d6ae7fb RH |
152 | return ent; |
153 | } | |
154 | ||
650cdb2a | 155 | int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, |
fa824d99 | 156 | int type, hwaddr *pphys, int *pprot, |
729cd350 | 157 | HPPATLBEntry **tlb_entry) |
650cdb2a RH |
158 | { |
159 | hwaddr phys; | |
3d066afc | 160 | int prot, r_prot, w_prot, x_prot, priv; |
729cd350 | 161 | HPPATLBEntry *ent; |
650cdb2a RH |
162 | int ret = -1; |
163 | ||
fa824d99 HD |
164 | if (tlb_entry) { |
165 | *tlb_entry = NULL; | |
166 | } | |
167 | ||
451d993d RH |
168 | /* Virtual translation disabled. Map absolute to physical. */ |
169 | if (MMU_IDX_MMU_DISABLED(mmu_idx)) { | |
170 | switch (mmu_idx) { | |
171 | case MMU_ABS_W_IDX: | |
172 | phys = hppa_abs_to_phys_pa2_w1(addr); | |
173 | break; | |
174 | case MMU_ABS_IDX: | |
175 | if (hppa_is_pa20(env)) { | |
176 | phys = hppa_abs_to_phys_pa2_w0(addr); | |
177 | } else { | |
178 | phys = (uint32_t)addr; | |
179 | } | |
180 | break; | |
181 | default: | |
182 | g_assert_not_reached(); | |
183 | } | |
650cdb2a RH |
184 | prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; |
185 | goto egress; | |
186 | } | |
187 | ||
188 | /* Find a valid tlb entry that matches the virtual address. */ | |
189 | ent = hppa_find_tlb(env, addr); | |
d7553f35 | 190 | if (ent == NULL) { |
650cdb2a RH |
191 | phys = 0; |
192 | prot = 0; | |
acd6ba74 | 193 | ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; |
650cdb2a RH |
194 | goto egress; |
195 | } | |
196 | ||
fa824d99 HD |
197 | if (tlb_entry) { |
198 | *tlb_entry = ent; | |
199 | } | |
200 | ||
650cdb2a | 201 | /* We now know the physical address. */ |
66866cc7 | 202 | phys = ent->pa + (addr - ent->itree.start); |
650cdb2a RH |
203 | |
204 | /* Map TLB access_rights field to QEMU protection. */ | |
3d066afc HD |
205 | priv = MMU_IDX_TO_PRIV(mmu_idx); |
206 | r_prot = (priv <= ent->ar_pl1) * PAGE_READ; | |
207 | w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; | |
208 | x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; | |
650cdb2a RH |
209 | switch (ent->ar_type) { |
210 | case 0: /* read-only: data page */ | |
211 | prot = r_prot; | |
212 | break; | |
213 | case 1: /* read/write: dynamic data page */ | |
214 | prot = r_prot | w_prot; | |
215 | break; | |
216 | case 2: /* read/execute: normal code page */ | |
217 | prot = r_prot | x_prot; | |
218 | break; | |
219 | case 3: /* read/write/execute: dynamic code page */ | |
220 | prot = r_prot | w_prot | x_prot; | |
221 | break; | |
222 | default: /* execute: promote to privilege level type & 3 */ | |
223 | prot = x_prot; | |
43e05652 | 224 | break; |
650cdb2a RH |
225 | } |
226 | ||
d5de20bd | 227 | /* access_id == 0 means public page and no check is performed */ |
bb67ec32 | 228 | if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { |
d5de20bd SS |
229 | /* If bits [31:1] match, and bit 0 is set, suppress write. */ |
230 | int match = ent->access_id * 2 + 1; | |
231 | ||
232 | if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] || | |
233 | match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) { | |
234 | prot &= PAGE_READ | PAGE_EXEC; | |
235 | if (type == PAGE_WRITE) { | |
236 | ret = EXCP_DMPI; | |
237 | goto egress; | |
238 | } | |
239 | } | |
240 | } | |
650cdb2a RH |
241 | |
242 | /* No guest access type indicates a non-architectural access from | |
243 | within QEMU. Bypass checks for access, D, B and T bits. */ | |
244 | if (type == 0) { | |
245 | goto egress; | |
246 | } | |
247 | ||
248 | if (unlikely(!(prot & type))) { | |
249 | /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ | |
affdb7e6 | 250 | ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; |
650cdb2a RH |
251 | goto egress; |
252 | } | |
253 | ||
254 | /* In reverse priority order, check for conditions which raise faults. | |
255 | As we go, remove PROT bits that cover the condition we want to check. | |
256 | In this way, the resulting PROT will force a re-check of the | |
257 | architectural TLB entry for the next access. */ | |
258 | if (unlikely(!ent->d)) { | |
259 | if (type & PAGE_WRITE) { | |
260 | /* The D bit is not set -- TLB Dirty Bit Fault. */ | |
261 | ret = EXCP_TLB_DIRTY; | |
262 | } | |
263 | prot &= PAGE_READ | PAGE_EXEC; | |
264 | } | |
265 | if (unlikely(ent->b)) { | |
266 | if (type & PAGE_WRITE) { | |
267 | /* The B bit is set -- Data Memory Break Fault. */ | |
268 | ret = EXCP_DMB; | |
269 | } | |
270 | prot &= PAGE_READ | PAGE_EXEC; | |
271 | } | |
272 | if (unlikely(ent->t)) { | |
273 | if (!(type & PAGE_EXEC)) { | |
274 | /* The T bit is set -- Page Reference Fault. */ | |
275 | ret = EXCP_PAGE_REF; | |
276 | } | |
277 | prot &= PAGE_EXEC; | |
278 | } | |
279 | ||
280 | egress: | |
451d993d | 281 | *pphys = phys; |
650cdb2a | 282 | *pprot = prot; |
23c3d569 | 283 | trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); |
650cdb2a RH |
284 | return ret; |
285 | } | |
286 | ||
813dff13 HD |
287 | hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
288 | { | |
650cdb2a RH |
289 | HPPACPU *cpu = HPPA_CPU(cs); |
290 | hwaddr phys; | |
451d993d | 291 | int prot, excp, mmu_idx; |
650cdb2a RH |
292 | |
293 | /* If the (data) mmu is disabled, bypass translation. */ | |
294 | /* ??? We really ought to know if the code mmu is disabled too, | |
295 | in order to get the correct debugging dumps. */ | |
451d993d RH |
296 | mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX : |
297 | cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); | |
650cdb2a | 298 | |
451d993d | 299 | excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, |
fa824d99 | 300 | &phys, &prot, NULL); |
650cdb2a RH |
301 | |
302 | /* Since we're translating for debugging, the only error that is a | |
303 | hard error is no translation at all. Otherwise, while a real cpu | |
304 | access might not have permission, the debugger does. */ | |
305 | return excp == EXCP_DTLB_MISS ? -1 : phys; | |
813dff13 HD |
306 | } |
307 | ||
3824e0d6 | 308 | void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled) |
8a02b9a6 | 309 | { |
8a02b9a6 RH |
310 | if (env->psw & PSW_Q) { |
311 | /* | |
312 | * For pa1.x, the offset and space never overlap, and so we | |
313 | * simply extract the high and low part of the virtual address. | |
314 | * | |
315 | * For pa2.0, the formation of these are described in section | |
316 | * "Interruption Parameter Registers", page 2-15. | |
317 | */ | |
318 | env->cr[CR_IOR] = (uint32_t)addr; | |
319 | env->cr[CR_ISR] = addr >> 32; | |
320 | ||
321 | if (hppa_is_pa20(env)) { | |
322 | if (mmu_disabled) { | |
323 | /* | |
324 | * If data translation was disabled, the ISR contains | |
325 | * the upper portion of the abs address, zero-extended. | |
326 | */ | |
327 | env->cr[CR_ISR] &= 0x3fffffff; | |
328 | } else { | |
329 | /* | |
330 | * If data translation was enabled, the upper two bits | |
331 | * of the IOR (the b field) are equal to the two space | |
332 | * bits from the base register used to form the gva. | |
333 | */ | |
334 | uint64_t b; | |
335 | ||
5915b670 | 336 | b = env->unwind_breg ? env->gr[env->unwind_breg] : 0; |
8a02b9a6 RH |
337 | b >>= (env->psw & PSW_W ? 62 : 30); |
338 | env->cr[CR_IOR] |= b << 62; | |
8a02b9a6 RH |
339 | } |
340 | } | |
341 | } | |
3824e0d6 HD |
342 | } |
343 | ||
344 | G_NORETURN static void | |
345 | raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, | |
346 | vaddr addr, bool mmu_disabled) | |
347 | { | |
348 | CPUState *cs = env_cpu(env); | |
349 | ||
350 | cs->exception_index = excp; | |
5ccd5017 | 351 | cpu_restore_state(cs, retaddr); |
3824e0d6 HD |
352 | hppa_set_ior_and_isr(env, addr, mmu_disabled); |
353 | ||
5ccd5017 | 354 | cpu_loop_exit(cs); |
8a02b9a6 RH |
355 | } |
356 | ||
9ccbe394 HD |
357 | void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, |
358 | vaddr addr, unsigned size, | |
359 | MMUAccessType access_type, | |
360 | int mmu_idx, MemTxAttrs attrs, | |
361 | MemTxResult response, uintptr_t retaddr) | |
362 | { | |
363 | CPUHPPAState *env = cpu_env(cs); | |
364 | ||
365 | qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx | |
366 | " while accessing I/O at %#08" HWADDR_PRIx "\n", | |
367 | env->iasq_f, env->iaoq_f, physaddr); | |
368 | ||
369 | /* FIXME: Enable HPMC exceptions when firmware has clean device probing */ | |
370 | if (0) { | |
371 | raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr, | |
372 | MMU_IDX_MMU_DISABLED(mmu_idx)); | |
373 | } | |
374 | } | |
375 | ||
3c7bef03 RH |
376 | bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, |
377 | MMUAccessType type, int mmu_idx, | |
378 | bool probe, uintptr_t retaddr) | |
813dff13 | 379 | { |
650cdb2a | 380 | HPPACPU *cpu = HPPA_CPU(cs); |
23c3d569 | 381 | CPUHPPAState *env = &cpu->env; |
729cd350 | 382 | HPPATLBEntry *ent; |
650cdb2a RH |
383 | int prot, excp, a_prot; |
384 | hwaddr phys; | |
385 | ||
386 | switch (type) { | |
387 | case MMU_INST_FETCH: | |
388 | a_prot = PAGE_EXEC; | |
389 | break; | |
390 | case MMU_DATA_STORE: | |
391 | a_prot = PAGE_WRITE; | |
392 | break; | |
393 | default: | |
394 | a_prot = PAGE_READ; | |
395 | break; | |
396 | } | |
397 | ||
23c3d569 | 398 | excp = hppa_get_physical_address(env, addr, mmu_idx, |
fa824d99 | 399 | a_prot, &phys, &prot, &ent); |
650cdb2a | 400 | if (unlikely(excp >= 0)) { |
3c7bef03 RH |
401 | if (probe) { |
402 | return false; | |
403 | } | |
23c3d569 | 404 | trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); |
8a02b9a6 | 405 | |
650cdb2a | 406 | /* Failure. Raise the indicated exception. */ |
17fe594c RH |
407 | raise_exception_with_ior(env, excp, retaddr, addr, |
408 | MMU_IDX_MMU_DISABLED(mmu_idx)); | |
650cdb2a | 409 | } |
813dff13 | 410 | |
23c3d569 SS |
411 | trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, |
412 | phys & TARGET_PAGE_MASK, size, type, mmu_idx); | |
f8cda28b RH |
413 | |
414 | /* | |
415 | * Success! Store the translation into the QEMU TLB. | |
416 | * Note that we always install a single-page entry, because that | |
417 | * is what works best with softmmu -- anything else will trigger | |
418 | * the large page protection mask. We do not require this, | |
419 | * because we record the large page here in the hppa tlb. | |
420 | */ | |
813dff13 | 421 | tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, |
f8cda28b | 422 | prot, mmu_idx, TARGET_PAGE_SIZE); |
3c7bef03 RH |
423 | return true; |
424 | } | |
425 | ||
8d6ae7fb | 426 | /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ |
c53e401e | 427 | void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) |
8d6ae7fb | 428 | { |
d7553f35 | 429 | HPPATLBEntry *ent; |
8d6ae7fb | 430 | |
d7553f35 | 431 | /* Zap any old entries covering ADDR. */ |
09cae825 | 432 | addr &= TARGET_PAGE_MASK; |
d7553f35 | 433 | hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); |
8d6ae7fb | 434 | |
d7553f35 RH |
435 | ent = env->tlb_partial; |
436 | if (ent == NULL) { | |
437 | ent = hppa_alloc_tlb_ent(env); | |
438 | env->tlb_partial = ent; | |
8d6ae7fb RH |
439 | } |
440 | ||
d7553f35 RH |
441 | /* Note that ent->entry_valid == 0 already. */ |
442 | ent->itree.start = addr; | |
443 | ent->itree.last = addr + TARGET_PAGE_SIZE - 1; | |
444 | ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; | |
445 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
8d6ae7fb RH |
446 | } |
447 | ||
8577f354 | 448 | static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, |
c53e401e | 449 | target_ulong reg) |
8d6ae7fb | 450 | { |
8d6ae7fb RH |
451 | ent->access_id = extract32(reg, 1, 18); |
452 | ent->u = extract32(reg, 19, 1); | |
453 | ent->ar_pl2 = extract32(reg, 20, 2); | |
454 | ent->ar_pl1 = extract32(reg, 22, 2); | |
455 | ent->ar_type = extract32(reg, 24, 3); | |
456 | ent->b = extract32(reg, 27, 1); | |
457 | ent->d = extract32(reg, 28, 1); | |
458 | ent->t = extract32(reg, 29, 1); | |
459 | ent->entry_valid = 1; | |
d7553f35 RH |
460 | |
461 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
23c3d569 SS |
462 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, |
463 | ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); | |
8d6ae7fb | 464 | } |
63300a00 | 465 | |
fa824d99 | 466 | /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ |
c53e401e | 467 | void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) |
fa824d99 | 468 | { |
d7553f35 | 469 | HPPATLBEntry *ent = env->tlb_partial; |
fa824d99 | 470 | |
d7553f35 RH |
471 | if (ent) { |
472 | env->tlb_partial = NULL; | |
473 | if (ent->itree.start <= addr && addr <= ent->itree.last) { | |
8577f354 | 474 | set_access_bits_pa11(env, ent, reg); |
d7553f35 RH |
475 | return; |
476 | } | |
fa824d99 | 477 | } |
d7553f35 | 478 | qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); |
fa824d99 HD |
479 | } |
480 | ||
c53e401e RH |
481 | static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, |
482 | target_ulong r2, vaddr va_b) | |
8577f354 RH |
483 | { |
484 | HPPATLBEntry *ent; | |
485 | vaddr va_e; | |
486 | uint64_t va_size; | |
487 | int mask_shift; | |
488 | ||
489 | mask_shift = 2 * (r1 & 0xf); | |
2a23f0f1 | 490 | va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift; |
8577f354 RH |
491 | va_b &= -va_size; |
492 | va_e = va_b + va_size - 1; | |
493 | ||
494 | hppa_flush_tlb_range(env, va_b, va_e); | |
495 | ent = hppa_alloc_tlb_ent(env); | |
496 | ||
497 | ent->itree.start = va_b; | |
498 | ent->itree.last = va_e; | |
fa71b4f8 RH |
499 | |
500 | /* Extract all 52 bits present in the page table entry. */ | |
501 | ent->pa = r1 << (TARGET_PAGE_BITS - 5); | |
502 | /* Align per the page size. */ | |
503 | ent->pa &= TARGET_PAGE_MASK << mask_shift; | |
504 | /* Ignore the bits beyond physical address space. */ | |
505 | ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS); | |
506 | ||
8577f354 RH |
507 | ent->t = extract64(r2, 61, 1); |
508 | ent->d = extract64(r2, 60, 1); | |
509 | ent->b = extract64(r2, 59, 1); | |
510 | ent->ar_type = extract64(r2, 56, 3); | |
511 | ent->ar_pl1 = extract64(r2, 54, 2); | |
512 | ent->ar_pl2 = extract64(r2, 52, 2); | |
513 | ent->u = extract64(r2, 51, 1); | |
514 | /* o = bit 50 */ | |
515 | /* p = bit 49 */ | |
516 | ent->access_id = extract64(r2, 1, 31); | |
517 | ent->entry_valid = 1; | |
518 | ||
519 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
520 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
521 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, | |
522 | ent->ar_pl2, ent->ar_pl1, ent->ar_type, | |
523 | ent->b, ent->d, ent->t); | |
524 | } | |
525 | ||
c53e401e | 526 | void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) |
8577f354 RH |
527 | { |
528 | vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); | |
529 | itlbt_pa20(env, r1, r2, va_b); | |
530 | } | |
531 | ||
c53e401e | 532 | void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) |
8577f354 RH |
533 | { |
534 | vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); | |
535 | itlbt_pa20(env, r1, r2, va_b); | |
536 | } | |
537 | ||
eb25d10f | 538 | /* Purge (Insn/Data) TLB. */ |
63300a00 RH |
539 | static void ptlb_work(CPUState *cpu, run_on_cpu_data data) |
540 | { | |
b77af26e | 541 | CPUHPPAState *env = cpu_env(cpu); |
eb25d10f HD |
542 | vaddr start = data.target_ptr; |
543 | vaddr end; | |
63300a00 | 544 | |
eb25d10f HD |
545 | /* |
546 | * PA2.0 allows a range of pages encoded into GR[b], which we have | |
547 | * copied into the bottom bits of the otherwise page-aligned address. | |
548 | * PA1.x will always provide zero here, for a single page flush. | |
549 | */ | |
550 | end = start & 0xf; | |
551 | start &= TARGET_PAGE_MASK; | |
2a23f0f1 | 552 | end = (vaddr)TARGET_PAGE_SIZE << (2 * end); |
eb25d10f HD |
553 | end = start + end - 1; |
554 | ||
555 | hppa_flush_tlb_range(env, start, end); | |
556 | } | |
557 | ||
558 | /* This is local to the current cpu. */ | |
559 | void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr) | |
560 | { | |
561 | trace_hppa_tlb_ptlb_local(env); | |
562 | ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr)); | |
63300a00 RH |
563 | } |
564 | ||
eb25d10f | 565 | /* This is synchronous across all processors. */ |
63300a00 RH |
566 | void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) |
567 | { | |
25f32708 | 568 | CPUState *src = env_cpu(env); |
63300a00 | 569 | CPUState *cpu; |
34a0d9ee | 570 | bool wait = false; |
d7553f35 | 571 | |
23c3d569 | 572 | trace_hppa_tlb_ptlb(env); |
63300a00 RH |
573 | run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); |
574 | ||
575 | CPU_FOREACH(cpu) { | |
576 | if (cpu != src) { | |
577 | async_run_on_cpu(cpu, ptlb_work, data); | |
34a0d9ee | 578 | wait = true; |
63300a00 RH |
579 | } |
580 | } | |
34a0d9ee RH |
581 | if (wait) { |
582 | async_safe_run_on_cpu(src, ptlb_work, data); | |
583 | } else { | |
584 | ptlb_work(src, data); | |
585 | } | |
63300a00 RH |
586 | } |
587 | ||
d7553f35 RH |
588 | void hppa_ptlbe(CPUHPPAState *env) |
589 | { | |
9cf2112b | 590 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
d7553f35 RH |
591 | uint32_t i; |
592 | ||
593 | /* Zap the (non-btlb) tlb entries themselves. */ | |
9cf2112b RH |
594 | memset(&env->tlb[btlb_entries], 0, |
595 | sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); | |
596 | env->tlb_last = btlb_entries; | |
d7553f35 RH |
597 | env->tlb_partial = NULL; |
598 | ||
599 | /* Put them all onto the unused list. */ | |
9cf2112b RH |
600 | env->tlb_unused = &env->tlb[btlb_entries]; |
601 | for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { | |
d7553f35 RH |
602 | env->tlb[i].unused_next = &env->tlb[i + 1]; |
603 | } | |
604 | ||
605 | /* Re-initialize the interval tree with only the btlb entries. */ | |
606 | memset(&env->tlb_root, 0, sizeof(env->tlb_root)); | |
9cf2112b | 607 | for (i = 0; i < btlb_entries; ++i) { |
d7553f35 RH |
608 | if (env->tlb[i].entry_valid) { |
609 | interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); | |
610 | } | |
611 | } | |
612 | ||
613 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); | |
614 | } | |
615 | ||
63300a00 RH |
616 | /* Purge (Insn/Data) TLB entry. This affects an implementation-defined |
617 | number of pages/entries (we choose all), and is local to the cpu. */ | |
618 | void HELPER(ptlbe)(CPUHPPAState *env) | |
619 | { | |
23c3d569 | 620 | trace_hppa_tlb_ptlbe(env); |
fa824d99 | 621 | qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); |
d7553f35 | 622 | hppa_ptlbe(env); |
63300a00 | 623 | } |
2dfcca9f | 624 | |
d5de20bd SS |
625 | void cpu_hppa_change_prot_id(CPUHPPAState *env) |
626 | { | |
bb67ec32 | 627 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK); |
d5de20bd SS |
628 | } |
629 | ||
630 | void HELPER(change_prot_id)(CPUHPPAState *env) | |
631 | { | |
632 | cpu_hppa_change_prot_id(env); | |
633 | } | |
634 | ||
c53e401e | 635 | target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr) |
2dfcca9f RH |
636 | { |
637 | hwaddr phys; | |
638 | int prot, excp; | |
639 | ||
640 | excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, | |
fa824d99 | 641 | &phys, &prot, NULL); |
2dfcca9f | 642 | if (excp >= 0) { |
2dfcca9f RH |
643 | if (excp == EXCP_DTLB_MISS) { |
644 | excp = EXCP_NA_DTLB_MISS; | |
645 | } | |
23c3d569 | 646 | trace_hppa_tlb_lpa_failed(env, addr); |
8a02b9a6 | 647 | raise_exception_with_ior(env, excp, GETPC(), addr, false); |
2dfcca9f | 648 | } |
23c3d569 | 649 | trace_hppa_tlb_lpa_success(env, addr, phys); |
2dfcca9f RH |
650 | return phys; |
651 | } | |
43e05652 RH |
652 | |
653 | /* Return the ar_type of the TLB at VADDR, or -1. */ | |
654 | int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) | |
655 | { | |
729cd350 | 656 | HPPATLBEntry *ent = hppa_find_tlb(env, vaddr); |
43e05652 RH |
657 | return ent ? ent->ar_type : -1; |
658 | } | |
cf6b28d4 HD |
659 | |
660 | /* | |
661 | * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to | |
662 | * allow operating systems to modify the Block TLB (BTLB) entries. | |
663 | * For implementation details see page 1-13 in | |
664 | * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf | |
665 | */ | |
666 | void HELPER(diag_btlb)(CPUHPPAState *env) | |
667 | { | |
668 | unsigned int phys_page, len, slot; | |
3b916140 | 669 | int mmu_idx = cpu_mmu_index(env_cpu(env), 0); |
cf6b28d4 | 670 | uintptr_t ra = GETPC(); |
729cd350 | 671 | HPPATLBEntry *btlb; |
cf6b28d4 HD |
672 | uint64_t virt_page; |
673 | uint32_t *vaddr; | |
9cf2112b | 674 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
cf6b28d4 | 675 | |
cf6b28d4 | 676 | /* BTLBs are not supported on 64-bit CPUs */ |
9cf2112b RH |
677 | if (btlb_entries == 0) { |
678 | env->gr[28] = -1; /* nonexistent procedure */ | |
679 | return; | |
680 | } | |
681 | ||
cf6b28d4 HD |
682 | env->gr[28] = 0; /* PDC_OK */ |
683 | ||
684 | switch (env->gr[25]) { | |
685 | case 0: | |
686 | /* return BTLB parameters */ | |
687 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); | |
68e3e604 | 688 | vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t), |
cf6b28d4 HD |
689 | MMU_DATA_STORE, mmu_idx, ra); |
690 | if (vaddr == NULL) { | |
691 | env->gr[28] = -10; /* invalid argument */ | |
692 | } else { | |
693 | vaddr[0] = cpu_to_be32(1); | |
694 | vaddr[1] = cpu_to_be32(16 * 1024); | |
9cf2112b RH |
695 | vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); |
696 | vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); | |
cf6b28d4 HD |
697 | } |
698 | break; | |
699 | case 1: | |
700 | /* insert BTLB entry */ | |
701 | virt_page = env->gr[24]; /* upper 32 bits */ | |
702 | virt_page <<= 32; | |
703 | virt_page |= env->gr[23]; /* lower 32 bits */ | |
704 | phys_page = env->gr[22]; | |
705 | len = env->gr[21]; | |
706 | slot = env->gr[19]; | |
707 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT " | |
708 | "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " | |
709 | "into slot %d\n", | |
710 | (long long) virt_page << TARGET_PAGE_BITS, | |
711 | (long long) (virt_page + len) << TARGET_PAGE_BITS, | |
712 | (long long) virt_page, phys_page, len, slot); | |
9cf2112b | 713 | if (slot < btlb_entries) { |
cf6b28d4 | 714 | btlb = &env->tlb[slot]; |
d7553f35 RH |
715 | |
716 | /* Force flush of possibly existing BTLB entry. */ | |
cf6b28d4 | 717 | hppa_flush_tlb_ent(env, btlb, true); |
d7553f35 RH |
718 | |
719 | /* Create new BTLB entry */ | |
66866cc7 RH |
720 | btlb->itree.start = virt_page << TARGET_PAGE_BITS; |
721 | btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; | |
cf6b28d4 | 722 | btlb->pa = phys_page << TARGET_PAGE_BITS; |
8577f354 | 723 | set_access_bits_pa11(env, btlb, env->gr[20]); |
cf6b28d4 HD |
724 | btlb->t = 0; |
725 | btlb->d = 1; | |
726 | } else { | |
727 | env->gr[28] = -10; /* invalid argument */ | |
728 | } | |
729 | break; | |
730 | case 2: | |
731 | /* Purge BTLB entry */ | |
732 | slot = env->gr[22]; | |
733 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", | |
734 | slot); | |
9cf2112b | 735 | if (slot < btlb_entries) { |
cf6b28d4 HD |
736 | btlb = &env->tlb[slot]; |
737 | hppa_flush_tlb_ent(env, btlb, true); | |
738 | } else { | |
739 | env->gr[28] = -10; /* invalid argument */ | |
740 | } | |
741 | break; | |
742 | case 3: | |
743 | /* Purge all BTLB entries */ | |
744 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); | |
9cf2112b | 745 | for (slot = 0; slot < btlb_entries; slot++) { |
cf6b28d4 HD |
746 | btlb = &env->tlb[slot]; |
747 | hppa_flush_tlb_ent(env, btlb, true); | |
748 | } | |
749 | break; | |
750 | default: | |
751 | env->gr[28] = -2; /* nonexistent option */ | |
752 | break; | |
753 | } | |
754 | } |