]>
Commit | Line | Data |
---|---|---|
1 | /* | |
2 | * HPPA memory access helper routines | |
3 | * | |
4 | * Copyright (c) 2017 Helge Deller | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
9 | * version 2.1 of the License, or (at your option) any later version. | |
10 | * | |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
21 | #include "qemu/log.h" | |
22 | #include "cpu.h" | |
23 | #include "exec/exec-all.h" | |
24 | #include "exec/page-protection.h" | |
25 | #include "exec/helper-proto.h" | |
26 | #include "hw/core/cpu.h" | |
27 | #include "trace.h" | |
28 | ||
29 | hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) | |
30 | { | |
31 | /* | |
32 | * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes | |
33 | * an algorithm in which a 62-bit absolute address is transformed to | |
34 | * a 64-bit physical address. This must then be combined with that | |
35 | * pictured in Figure H-11 "Physical Address Space Mapping", in which | |
36 | * the full physical address is truncated to the N-bit physical address | |
37 | * supported by the implementation. | |
38 | * | |
39 | * Since the supported physical address space is below 54 bits, the | |
40 | * H-8 algorithm is moot and all that is left is to truncate. | |
41 | */ | |
42 | QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS > 54); | |
43 | return sextract64(addr, 0, TARGET_PHYS_ADDR_SPACE_BITS); | |
44 | } | |
45 | ||
46 | hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) | |
47 | { | |
48 | /* | |
49 | * See Figure H-10, "Absolute Accesses when PSW W-bit is 0", | |
50 | * combined with Figure H-11, as above. | |
51 | */ | |
52 | if (likely(extract32(addr, 28, 4) != 0xf)) { | |
53 | /* Memory address space */ | |
54 | addr = (uint32_t)addr; | |
55 | } else if (extract32(addr, 24, 4) != 0) { | |
56 | /* I/O address space */ | |
57 | addr = (int32_t)addr; | |
58 | } else { | |
59 | /* | |
60 | * PDC address space: | |
61 | * Figures H-10 and H-11 of the parisc2.0 spec do not specify | |
62 | * where to map into the 64-bit PDC address space. | |
63 | * We map with an offset which equals the 32-bit address, which | |
64 | * is what can be seen on physical machines too. | |
65 | */ | |
66 | addr = (uint32_t)addr; | |
67 | addr |= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS - 4); | |
68 | } | |
69 | return addr; | |
70 | } | |
71 | ||
72 | static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) | |
73 | { | |
74 | IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); | |
75 | ||
76 | if (i) { | |
77 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
78 | trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, | |
79 | ent->itree.start, ent->itree.last, ent->pa); | |
80 | return ent; | |
81 | } | |
82 | trace_hppa_tlb_find_entry_not_found(env, addr); | |
83 | return NULL; | |
84 | } | |
85 | ||
86 | static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, | |
87 | bool force_flush_btlb) | |
88 | { | |
89 | CPUState *cs = env_cpu(env); | |
90 | bool is_btlb; | |
91 | ||
92 | if (!ent->entry_valid) { | |
93 | return; | |
94 | } | |
95 | ||
96 | trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, | |
97 | ent->itree.last, ent->pa); | |
98 | ||
99 | tlb_flush_range_by_mmuidx(cs, ent->itree.start, | |
100 | ent->itree.last - ent->itree.start + 1, | |
101 | HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); | |
102 | ||
103 | /* Never clear BTLBs, unless forced to do so. */ | |
104 | is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; | |
105 | if (is_btlb && !force_flush_btlb) { | |
106 | return; | |
107 | } | |
108 | ||
109 | interval_tree_remove(&ent->itree, &env->tlb_root); | |
110 | memset(ent, 0, sizeof(*ent)); | |
111 | ||
112 | if (!is_btlb) { | |
113 | ent->unused_next = env->tlb_unused; | |
114 | env->tlb_unused = ent; | |
115 | } | |
116 | } | |
117 | ||
118 | static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e) | |
119 | { | |
120 | IntervalTreeNode *i, *n; | |
121 | ||
122 | i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); | |
123 | for (; i ; i = n) { | |
124 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
125 | ||
126 | /* | |
127 | * Find the next entry now: In the normal case the current entry | |
128 | * will be removed, but in the BTLB case it will remain. | |
129 | */ | |
130 | n = interval_tree_iter_next(i, va_b, va_e); | |
131 | hppa_flush_tlb_ent(env, ent, false); | |
132 | } | |
133 | } | |
134 | ||
135 | static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) | |
136 | { | |
137 | HPPATLBEntry *ent = env->tlb_unused; | |
138 | ||
139 | if (ent == NULL) { | |
140 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); | |
141 | uint32_t i = env->tlb_last; | |
142 | ||
143 | if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { | |
144 | i = btlb_entries; | |
145 | } | |
146 | env->tlb_last = i + 1; | |
147 | ||
148 | ent = &env->tlb[i]; | |
149 | hppa_flush_tlb_ent(env, ent, false); | |
150 | } | |
151 | ||
152 | env->tlb_unused = ent->unused_next; | |
153 | return ent; | |
154 | } | |
155 | ||
156 | #define ACCESS_ID_MASK 0xffff | |
157 | ||
158 | /* Return the set of protections allowed by a PID match. */ | |
159 | static int match_prot_id_1(uint32_t access_id, uint32_t prot_id) | |
160 | { | |
161 | if (((access_id ^ (prot_id >> 1)) & ACCESS_ID_MASK) == 0) { | |
162 | return (prot_id & 1 | |
163 | ? PAGE_EXEC | PAGE_READ | |
164 | : PAGE_EXEC | PAGE_READ | PAGE_WRITE); | |
165 | } | |
166 | return 0; | |
167 | } | |
168 | ||
169 | static int match_prot_id32(CPUHPPAState *env, uint32_t access_id) | |
170 | { | |
171 | int r, i; | |
172 | ||
173 | for (i = CR_PID1; i <= CR_PID4; ++i) { | |
174 | r = match_prot_id_1(access_id, env->cr[i]); | |
175 | if (r) { | |
176 | return r; | |
177 | } | |
178 | } | |
179 | return 0; | |
180 | } | |
181 | ||
182 | static int match_prot_id64(CPUHPPAState *env, uint32_t access_id) | |
183 | { | |
184 | int r, i; | |
185 | ||
186 | for (i = CR_PID1; i <= CR_PID4; ++i) { | |
187 | r = match_prot_id_1(access_id, env->cr[i]); | |
188 | if (r) { | |
189 | return r; | |
190 | } | |
191 | r = match_prot_id_1(access_id, env->cr[i] >> 32); | |
192 | if (r) { | |
193 | return r; | |
194 | } | |
195 | } | |
196 | return 0; | |
197 | } | |
198 | ||
199 | int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, | |
200 | int type, hwaddr *pphys, int *pprot) | |
201 | { | |
202 | hwaddr phys; | |
203 | int prot, r_prot, w_prot, x_prot, priv; | |
204 | HPPATLBEntry *ent; | |
205 | int ret = -1; | |
206 | ||
207 | /* Virtual translation disabled. Map absolute to physical. */ | |
208 | if (MMU_IDX_MMU_DISABLED(mmu_idx)) { | |
209 | switch (mmu_idx) { | |
210 | case MMU_ABS_W_IDX: | |
211 | phys = hppa_abs_to_phys_pa2_w1(addr); | |
212 | break; | |
213 | case MMU_ABS_IDX: | |
214 | if (hppa_is_pa20(env)) { | |
215 | phys = hppa_abs_to_phys_pa2_w0(addr); | |
216 | } else { | |
217 | phys = (uint32_t)addr; | |
218 | } | |
219 | break; | |
220 | default: | |
221 | g_assert_not_reached(); | |
222 | } | |
223 | prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
224 | goto egress; | |
225 | } | |
226 | ||
227 | /* Find a valid tlb entry that matches the virtual address. */ | |
228 | ent = hppa_find_tlb(env, addr); | |
229 | if (ent == NULL) { | |
230 | phys = 0; | |
231 | prot = 0; | |
232 | ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; | |
233 | goto egress; | |
234 | } | |
235 | ||
236 | /* We now know the physical address. */ | |
237 | phys = ent->pa + (addr - ent->itree.start); | |
238 | ||
239 | /* Map TLB access_rights field to QEMU protection. */ | |
240 | priv = MMU_IDX_TO_PRIV(mmu_idx); | |
241 | r_prot = (priv <= ent->ar_pl1) * PAGE_READ; | |
242 | w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; | |
243 | x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; | |
244 | switch (ent->ar_type) { | |
245 | case 0: /* read-only: data page */ | |
246 | prot = r_prot; | |
247 | break; | |
248 | case 1: /* read/write: dynamic data page */ | |
249 | prot = r_prot | w_prot; | |
250 | break; | |
251 | case 2: /* read/execute: normal code page */ | |
252 | prot = r_prot | x_prot; | |
253 | break; | |
254 | case 3: /* read/write/execute: dynamic code page */ | |
255 | prot = r_prot | w_prot | x_prot; | |
256 | break; | |
257 | default: /* execute: promote to privilege level type & 3 */ | |
258 | prot = x_prot; | |
259 | break; | |
260 | } | |
261 | ||
262 | /* | |
263 | * No guest access type indicates a non-architectural access from | |
264 | * within QEMU. Bypass checks for access, D, B, P and T bits. | |
265 | */ | |
266 | if (type == 0) { | |
267 | goto egress; | |
268 | } | |
269 | ||
270 | /* access_id == 0 means public page and no check is performed */ | |
271 | if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { | |
272 | int access_prot = (hppa_is_pa20(env) | |
273 | ? match_prot_id64(env, ent->access_id) | |
274 | : match_prot_id32(env, ent->access_id)); | |
275 | if (unlikely(!(type & access_prot))) { | |
276 | /* Not allowed -- Inst/Data Memory Protection Id Fault. */ | |
277 | ret = type & PAGE_EXEC ? EXCP_IMP : EXCP_DMPI; | |
278 | goto egress; | |
279 | } | |
280 | /* Otherwise exclude permissions not allowed (i.e WD). */ | |
281 | prot &= access_prot; | |
282 | } | |
283 | ||
284 | if (unlikely(!(prot & type))) { | |
285 | /* Not allowed -- Inst/Data Memory Access Rights Fault. */ | |
286 | ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; | |
287 | goto egress; | |
288 | } | |
289 | ||
290 | /* | |
291 | * In priority order, check for conditions which raise faults. | |
292 | * Remove PROT bits that cover the condition we want to check, | |
293 | * so that the resulting PROT will force a re-check of the | |
294 | * architectural TLB entry for the next access. | |
295 | */ | |
296 | if (unlikely(ent->t)) { | |
297 | prot &= PAGE_EXEC; | |
298 | if (!(type & PAGE_EXEC)) { | |
299 | /* The T bit is set -- Page Reference Fault. */ | |
300 | ret = EXCP_PAGE_REF; | |
301 | } | |
302 | } else if (!ent->d) { | |
303 | prot &= PAGE_READ | PAGE_EXEC; | |
304 | if (type & PAGE_WRITE) { | |
305 | /* The D bit is not set -- TLB Dirty Bit Fault. */ | |
306 | ret = EXCP_TLB_DIRTY; | |
307 | } | |
308 | } else if (unlikely(ent->b)) { | |
309 | prot &= PAGE_READ | PAGE_EXEC; | |
310 | if (type & PAGE_WRITE) { | |
311 | /* | |
312 | * The B bit is set -- Data Memory Break Fault. | |
313 | * Except when PSW_X is set, allow this single access to succeed. | |
314 | * The write bit will be invalidated for subsequent accesses. | |
315 | */ | |
316 | if (env->psw_xb & PSW_X) { | |
317 | prot |= PAGE_WRITE_INV; | |
318 | } else { | |
319 | ret = EXCP_DMB; | |
320 | } | |
321 | } | |
322 | } | |
323 | ||
324 | egress: | |
325 | *pphys = phys; | |
326 | *pprot = prot; | |
327 | trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); | |
328 | return ret; | |
329 | } | |
330 | ||
331 | hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) | |
332 | { | |
333 | HPPACPU *cpu = HPPA_CPU(cs); | |
334 | hwaddr phys; | |
335 | int prot, excp, mmu_idx; | |
336 | ||
337 | /* If the (data) mmu is disabled, bypass translation. */ | |
338 | /* ??? We really ought to know if the code mmu is disabled too, | |
339 | in order to get the correct debugging dumps. */ | |
340 | mmu_idx = (cpu->env.psw & PSW_D ? MMU_KERNEL_IDX : | |
341 | cpu->env.psw & PSW_W ? MMU_ABS_W_IDX : MMU_ABS_IDX); | |
342 | ||
343 | excp = hppa_get_physical_address(&cpu->env, addr, mmu_idx, 0, | |
344 | &phys, &prot); | |
345 | ||
346 | /* Since we're translating for debugging, the only error that is a | |
347 | hard error is no translation at all. Otherwise, while a real cpu | |
348 | access might not have permission, the debugger does. */ | |
349 | return excp == EXCP_DTLB_MISS ? -1 : phys; | |
350 | } | |
351 | ||
352 | void hppa_set_ior_and_isr(CPUHPPAState *env, vaddr addr, bool mmu_disabled) | |
353 | { | |
354 | if (env->psw & PSW_Q) { | |
355 | /* | |
356 | * For pa1.x, the offset and space never overlap, and so we | |
357 | * simply extract the high and low part of the virtual address. | |
358 | * | |
359 | * For pa2.0, the formation of these are described in section | |
360 | * "Interruption Parameter Registers", page 2-15. | |
361 | */ | |
362 | env->cr[CR_IOR] = (uint32_t)addr; | |
363 | env->cr[CR_ISR] = addr >> 32; | |
364 | ||
365 | if (hppa_is_pa20(env)) { | |
366 | if (mmu_disabled) { | |
367 | /* | |
368 | * If data translation was disabled, the ISR contains | |
369 | * the upper portion of the abs address, zero-extended. | |
370 | */ | |
371 | env->cr[CR_ISR] &= 0x3fffffff; | |
372 | } else { | |
373 | /* | |
374 | * If data translation was enabled, the upper two bits | |
375 | * of the IOR (the b field) are equal to the two space | |
376 | * bits from the base register used to form the gva. | |
377 | */ | |
378 | uint64_t b; | |
379 | ||
380 | b = env->unwind_breg ? env->gr[env->unwind_breg] : 0; | |
381 | b >>= (env->psw & PSW_W ? 62 : 30); | |
382 | env->cr[CR_IOR] |= b << 62; | |
383 | } | |
384 | } | |
385 | } | |
386 | } | |
387 | ||
388 | G_NORETURN static void | |
389 | raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, | |
390 | vaddr addr, bool mmu_disabled) | |
391 | { | |
392 | CPUState *cs = env_cpu(env); | |
393 | ||
394 | cs->exception_index = excp; | |
395 | cpu_restore_state(cs, retaddr); | |
396 | hppa_set_ior_and_isr(env, addr, mmu_disabled); | |
397 | ||
398 | cpu_loop_exit(cs); | |
399 | } | |
400 | ||
401 | void hppa_cpu_do_transaction_failed(CPUState *cs, hwaddr physaddr, | |
402 | vaddr addr, unsigned size, | |
403 | MMUAccessType access_type, | |
404 | int mmu_idx, MemTxAttrs attrs, | |
405 | MemTxResult response, uintptr_t retaddr) | |
406 | { | |
407 | CPUHPPAState *env = cpu_env(cs); | |
408 | ||
409 | qemu_log_mask(LOG_GUEST_ERROR, "HPMC at " TARGET_FMT_lx ":" TARGET_FMT_lx | |
410 | " while accessing I/O at %#08" HWADDR_PRIx "\n", | |
411 | env->iasq_f, env->iaoq_f, physaddr); | |
412 | ||
413 | /* FIXME: Enable HPMC exceptions when firmware has clean device probing */ | |
414 | if (0) { | |
415 | raise_exception_with_ior(env, EXCP_HPMC, retaddr, addr, | |
416 | MMU_IDX_MMU_DISABLED(mmu_idx)); | |
417 | } | |
418 | } | |
419 | ||
420 | bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, | |
421 | MMUAccessType type, int mmu_idx, | |
422 | bool probe, uintptr_t retaddr) | |
423 | { | |
424 | HPPACPU *cpu = HPPA_CPU(cs); | |
425 | CPUHPPAState *env = &cpu->env; | |
426 | int prot, excp, a_prot; | |
427 | hwaddr phys; | |
428 | ||
429 | switch (type) { | |
430 | case MMU_INST_FETCH: | |
431 | a_prot = PAGE_EXEC; | |
432 | break; | |
433 | case MMU_DATA_STORE: | |
434 | a_prot = PAGE_WRITE; | |
435 | break; | |
436 | default: | |
437 | a_prot = PAGE_READ; | |
438 | break; | |
439 | } | |
440 | ||
441 | excp = hppa_get_physical_address(env, addr, mmu_idx, a_prot, &phys, &prot); | |
442 | if (unlikely(excp >= 0)) { | |
443 | if (probe) { | |
444 | return false; | |
445 | } | |
446 | trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); | |
447 | ||
448 | /* Failure. Raise the indicated exception. */ | |
449 | raise_exception_with_ior(env, excp, retaddr, addr, | |
450 | MMU_IDX_MMU_DISABLED(mmu_idx)); | |
451 | } | |
452 | ||
453 | trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, | |
454 | phys & TARGET_PAGE_MASK, size, type, mmu_idx); | |
455 | ||
456 | /* | |
457 | * Success! Store the translation into the QEMU TLB. | |
458 | * Note that we always install a single-page entry, because that | |
459 | * is what works best with softmmu -- anything else will trigger | |
460 | * the large page protection mask. We do not require this, | |
461 | * because we record the large page here in the hppa tlb. | |
462 | */ | |
463 | tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, | |
464 | prot, mmu_idx, TARGET_PAGE_SIZE); | |
465 | return true; | |
466 | } | |
467 | ||
468 | /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ | |
469 | void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) | |
470 | { | |
471 | HPPATLBEntry *ent; | |
472 | ||
473 | /* Zap any old entries covering ADDR. */ | |
474 | addr &= TARGET_PAGE_MASK; | |
475 | hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); | |
476 | ||
477 | ent = env->tlb_partial; | |
478 | if (ent == NULL) { | |
479 | ent = hppa_alloc_tlb_ent(env); | |
480 | env->tlb_partial = ent; | |
481 | } | |
482 | ||
483 | /* Note that ent->entry_valid == 0 already. */ | |
484 | ent->itree.start = addr; | |
485 | ent->itree.last = addr + TARGET_PAGE_SIZE - 1; | |
486 | ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; | |
487 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
488 | } | |
489 | ||
490 | static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, | |
491 | target_ulong reg) | |
492 | { | |
493 | ent->access_id = extract32(reg, 1, 18); | |
494 | ent->u = extract32(reg, 19, 1); | |
495 | ent->ar_pl2 = extract32(reg, 20, 2); | |
496 | ent->ar_pl1 = extract32(reg, 22, 2); | |
497 | ent->ar_type = extract32(reg, 24, 3); | |
498 | ent->b = extract32(reg, 27, 1); | |
499 | ent->d = extract32(reg, 28, 1); | |
500 | ent->t = extract32(reg, 29, 1); | |
501 | ent->entry_valid = 1; | |
502 | ||
503 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
504 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, | |
505 | ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); | |
506 | } | |
507 | ||
508 | /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ | |
509 | void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) | |
510 | { | |
511 | HPPATLBEntry *ent = env->tlb_partial; | |
512 | ||
513 | if (ent) { | |
514 | env->tlb_partial = NULL; | |
515 | if (ent->itree.start <= addr && addr <= ent->itree.last) { | |
516 | set_access_bits_pa11(env, ent, reg); | |
517 | return; | |
518 | } | |
519 | } | |
520 | qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); | |
521 | } | |
522 | ||
523 | static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, | |
524 | target_ulong r2, vaddr va_b) | |
525 | { | |
526 | HPPATLBEntry *ent; | |
527 | vaddr va_e; | |
528 | uint64_t va_size; | |
529 | int mask_shift; | |
530 | ||
531 | mask_shift = 2 * (r1 & 0xf); | |
532 | va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift; | |
533 | va_b &= -va_size; | |
534 | va_e = va_b + va_size - 1; | |
535 | ||
536 | hppa_flush_tlb_range(env, va_b, va_e); | |
537 | ent = hppa_alloc_tlb_ent(env); | |
538 | ||
539 | ent->itree.start = va_b; | |
540 | ent->itree.last = va_e; | |
541 | ||
542 | /* Extract all 52 bits present in the page table entry. */ | |
543 | ent->pa = r1 << (TARGET_PAGE_BITS - 5); | |
544 | /* Align per the page size. */ | |
545 | ent->pa &= TARGET_PAGE_MASK << mask_shift; | |
546 | /* Ignore the bits beyond physical address space. */ | |
547 | ent->pa = sextract64(ent->pa, 0, TARGET_PHYS_ADDR_SPACE_BITS); | |
548 | ||
549 | ent->t = extract64(r2, 61, 1); | |
550 | ent->d = extract64(r2, 60, 1); | |
551 | ent->b = extract64(r2, 59, 1); | |
552 | ent->ar_type = extract64(r2, 56, 3); | |
553 | ent->ar_pl1 = extract64(r2, 54, 2); | |
554 | ent->ar_pl2 = extract64(r2, 52, 2); | |
555 | ent->u = extract64(r2, 51, 1); | |
556 | /* o = bit 50 */ | |
557 | /* p = bit 49 */ | |
558 | ent->access_id = extract64(r2, 1, 31); | |
559 | ent->entry_valid = 1; | |
560 | ||
561 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
562 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
563 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, | |
564 | ent->ar_pl2, ent->ar_pl1, ent->ar_type, | |
565 | ent->b, ent->d, ent->t); | |
566 | } | |
567 | ||
568 | void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) | |
569 | { | |
570 | vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); | |
571 | itlbt_pa20(env, r1, r2, va_b); | |
572 | } | |
573 | ||
574 | void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) | |
575 | { | |
576 | vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); | |
577 | itlbt_pa20(env, r1, r2, va_b); | |
578 | } | |
579 | ||
580 | /* Purge (Insn/Data) TLB. */ | |
581 | static void ptlb_work(CPUState *cpu, run_on_cpu_data data) | |
582 | { | |
583 | vaddr start = data.target_ptr; | |
584 | vaddr end; | |
585 | ||
586 | /* | |
587 | * PA2.0 allows a range of pages encoded into GR[b], which we have | |
588 | * copied into the bottom bits of the otherwise page-aligned address. | |
589 | * PA1.x will always provide zero here, for a single page flush. | |
590 | */ | |
591 | end = start & 0xf; | |
592 | start &= TARGET_PAGE_MASK; | |
593 | end = (vaddr)TARGET_PAGE_SIZE << (2 * end); | |
594 | end = start + end - 1; | |
595 | ||
596 | hppa_flush_tlb_range(cpu_env(cpu), start, end); | |
597 | } | |
598 | ||
599 | /* This is local to the current cpu. */ | |
600 | void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr) | |
601 | { | |
602 | trace_hppa_tlb_ptlb_local(env); | |
603 | ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr)); | |
604 | } | |
605 | ||
606 | /* This is synchronous across all processors. */ | |
607 | void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) | |
608 | { | |
609 | CPUState *src = env_cpu(env); | |
610 | CPUState *cpu; | |
611 | bool wait = false; | |
612 | ||
613 | trace_hppa_tlb_ptlb(env); | |
614 | run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); | |
615 | ||
616 | CPU_FOREACH(cpu) { | |
617 | if (cpu != src) { | |
618 | async_run_on_cpu(cpu, ptlb_work, data); | |
619 | wait = true; | |
620 | } | |
621 | } | |
622 | if (wait) { | |
623 | async_safe_run_on_cpu(src, ptlb_work, data); | |
624 | } else { | |
625 | ptlb_work(src, data); | |
626 | } | |
627 | } | |
628 | ||
629 | void hppa_ptlbe(CPUHPPAState *env) | |
630 | { | |
631 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); | |
632 | uint32_t i; | |
633 | ||
634 | /* Zap the (non-btlb) tlb entries themselves. */ | |
635 | memset(&env->tlb[btlb_entries], 0, | |
636 | sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); | |
637 | env->tlb_last = btlb_entries; | |
638 | env->tlb_partial = NULL; | |
639 | ||
640 | /* Put them all onto the unused list. */ | |
641 | env->tlb_unused = &env->tlb[btlb_entries]; | |
642 | for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { | |
643 | env->tlb[i].unused_next = &env->tlb[i + 1]; | |
644 | } | |
645 | ||
646 | /* Re-initialize the interval tree with only the btlb entries. */ | |
647 | memset(&env->tlb_root, 0, sizeof(env->tlb_root)); | |
648 | for (i = 0; i < btlb_entries; ++i) { | |
649 | if (env->tlb[i].entry_valid) { | |
650 | interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); | |
651 | } | |
652 | } | |
653 | ||
654 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); | |
655 | } | |
656 | ||
657 | /* Purge (Insn/Data) TLB entry. This affects an implementation-defined | |
658 | number of pages/entries (we choose all), and is local to the cpu. */ | |
659 | void HELPER(ptlbe)(CPUHPPAState *env) | |
660 | { | |
661 | trace_hppa_tlb_ptlbe(env); | |
662 | qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); | |
663 | hppa_ptlbe(env); | |
664 | } | |
665 | ||
666 | void cpu_hppa_change_prot_id(CPUHPPAState *env) | |
667 | { | |
668 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK); | |
669 | } | |
670 | ||
671 | void HELPER(change_prot_id)(CPUHPPAState *env) | |
672 | { | |
673 | cpu_hppa_change_prot_id(env); | |
674 | } | |
675 | ||
676 | target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr) | |
677 | { | |
678 | hwaddr phys; | |
679 | int prot, excp; | |
680 | ||
681 | excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, | |
682 | &phys, &prot); | |
683 | if (excp >= 0) { | |
684 | if (excp == EXCP_DTLB_MISS) { | |
685 | excp = EXCP_NA_DTLB_MISS; | |
686 | } | |
687 | trace_hppa_tlb_lpa_failed(env, addr); | |
688 | raise_exception_with_ior(env, excp, GETPC(), addr, false); | |
689 | } | |
690 | trace_hppa_tlb_lpa_success(env, addr, phys); | |
691 | return phys; | |
692 | } | |
693 | ||
694 | /* | |
695 | * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to | |
696 | * allow operating systems to modify the Block TLB (BTLB) entries. | |
697 | * For implementation details see page 1-13 in | |
698 | * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf | |
699 | */ | |
700 | void HELPER(diag_btlb)(CPUHPPAState *env) | |
701 | { | |
702 | unsigned int phys_page, len, slot; | |
703 | int mmu_idx = cpu_mmu_index(env_cpu(env), 0); | |
704 | uintptr_t ra = GETPC(); | |
705 | HPPATLBEntry *btlb; | |
706 | uint64_t virt_page; | |
707 | uint32_t *vaddr; | |
708 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); | |
709 | ||
710 | /* BTLBs are not supported on 64-bit CPUs */ | |
711 | if (btlb_entries == 0) { | |
712 | env->gr[28] = -1; /* nonexistent procedure */ | |
713 | return; | |
714 | } | |
715 | ||
716 | env->gr[28] = 0; /* PDC_OK */ | |
717 | ||
718 | switch (env->gr[25]) { | |
719 | case 0: | |
720 | /* return BTLB parameters */ | |
721 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); | |
722 | vaddr = probe_access(env, env->gr[24], 4 * sizeof(uint32_t), | |
723 | MMU_DATA_STORE, mmu_idx, ra); | |
724 | if (vaddr == NULL) { | |
725 | env->gr[28] = -10; /* invalid argument */ | |
726 | } else { | |
727 | vaddr[0] = cpu_to_be32(1); | |
728 | vaddr[1] = cpu_to_be32(16 * 1024); | |
729 | vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); | |
730 | vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); | |
731 | } | |
732 | break; | |
733 | case 1: | |
734 | /* insert BTLB entry */ | |
735 | virt_page = env->gr[24]; /* upper 32 bits */ | |
736 | virt_page <<= 32; | |
737 | virt_page |= env->gr[23]; /* lower 32 bits */ | |
738 | phys_page = env->gr[22]; | |
739 | len = env->gr[21]; | |
740 | slot = env->gr[19]; | |
741 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT " | |
742 | "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " | |
743 | "into slot %d\n", | |
744 | (long long) virt_page << TARGET_PAGE_BITS, | |
745 | (long long) (virt_page + len) << TARGET_PAGE_BITS, | |
746 | (long long) virt_page, phys_page, len, slot); | |
747 | if (slot < btlb_entries) { | |
748 | btlb = &env->tlb[slot]; | |
749 | ||
750 | /* Force flush of possibly existing BTLB entry. */ | |
751 | hppa_flush_tlb_ent(env, btlb, true); | |
752 | ||
753 | /* Create new BTLB entry */ | |
754 | btlb->itree.start = virt_page << TARGET_PAGE_BITS; | |
755 | btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; | |
756 | btlb->pa = phys_page << TARGET_PAGE_BITS; | |
757 | set_access_bits_pa11(env, btlb, env->gr[20]); | |
758 | btlb->t = 0; | |
759 | btlb->d = 1; | |
760 | } else { | |
761 | env->gr[28] = -10; /* invalid argument */ | |
762 | } | |
763 | break; | |
764 | case 2: | |
765 | /* Purge BTLB entry */ | |
766 | slot = env->gr[22]; | |
767 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", | |
768 | slot); | |
769 | if (slot < btlb_entries) { | |
770 | btlb = &env->tlb[slot]; | |
771 | hppa_flush_tlb_ent(env, btlb, true); | |
772 | } else { | |
773 | env->gr[28] = -10; /* invalid argument */ | |
774 | } | |
775 | break; | |
776 | case 3: | |
777 | /* Purge all BTLB entries */ | |
778 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); | |
779 | for (slot = 0; slot < btlb_entries; slot++) { | |
780 | btlb = &env->tlb[slot]; | |
781 | hppa_flush_tlb_ent(env, btlb, true); | |
782 | } | |
783 | break; | |
784 | default: | |
785 | env->gr[28] = -2; /* nonexistent option */ | |
786 | break; | |
787 | } | |
788 | } | |
789 | ||
790 | uint64_t HELPER(b_gate_priv)(CPUHPPAState *env, uint64_t iaoq_f) | |
791 | { | |
792 | uint64_t gva = hppa_form_gva(env, env->iasq_f, iaoq_f); | |
793 | HPPATLBEntry *ent = hppa_find_tlb(env, gva); | |
794 | ||
795 | if (ent == NULL) { | |
796 | raise_exception_with_ior(env, EXCP_ITLB_MISS, GETPC(), gva, false); | |
797 | } | |
798 | ||
799 | /* | |
800 | * There should be no need to check page permissions, as that will | |
801 | * already have been done by tb_lookup via get_page_addr_code. | |
802 | * All we need at this point is to check the ar_type. | |
803 | * | |
804 | * No change for non-gateway pages or for priv decrease. | |
805 | */ | |
806 | if (ent->ar_type & 4) { | |
807 | int old_priv = iaoq_f & 3; | |
808 | int new_priv = ent->ar_type & 3; | |
809 | ||
810 | if (new_priv < old_priv) { | |
811 | iaoq_f = (iaoq_f & -4) | new_priv; | |
812 | } | |
813 | } | |
814 | return iaoq_f; | |
815 | } |