]>
Commit | Line | Data |
---|---|---|
813dff13 HD |
1 | /* |
2 | * HPPA memory access helper routines | |
3 | * | |
4 | * Copyright (c) 2017 Helge Deller | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
d6ea4236 | 9 | * version 2.1 of the License, or (at your option) any later version. |
813dff13 HD |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
cd617484 | 21 | #include "qemu/log.h" |
813dff13 HD |
22 | #include "cpu.h" |
23 | #include "exec/exec-all.h" | |
24 | #include "exec/helper-proto.h" | |
2e5b09fd | 25 | #include "hw/core/cpu.h" |
23c3d569 | 26 | #include "trace.h" |
813dff13 | 27 | |
ccdf741c RH |
28 | hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) |
29 | { | |
30 | if (likely(extract64(addr, 58, 4) != 0xf)) { | |
31 | /* Memory address space */ | |
32 | return addr & MAKE_64BIT_MASK(0, 62); | |
33 | } | |
34 | if (extract64(addr, 54, 4) != 0) { | |
35 | /* I/O address space */ | |
36 | return addr | MAKE_64BIT_MASK(62, 2); | |
37 | } | |
38 | /* PDC address space */ | |
39 | return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4); | |
40 | } | |
41 | ||
42 | hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) | |
43 | { | |
44 | if (likely(extract32(addr, 28, 4) != 0xf)) { | |
45 | /* Memory address space */ | |
46 | return addr & MAKE_64BIT_MASK(0, 32); | |
47 | } | |
48 | if (extract32(addr, 24, 4) != 0) { | |
49 | /* I/O address space */ | |
50 | return addr | MAKE_64BIT_MASK(32, 32); | |
51 | } | |
52 | /* PDC address space */ | |
53 | return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4); | |
54 | } | |
55 | ||
56 | static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr) | |
57 | { | |
58 | if (!hppa_is_pa20(env)) { | |
59 | return addr; | |
60 | } else if (env->psw & PSW_W) { | |
61 | return hppa_abs_to_phys_pa2_w1(addr); | |
62 | } else { | |
63 | return hppa_abs_to_phys_pa2_w0(addr); | |
64 | } | |
65 | } | |
66 | ||
729cd350 | 67 | static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) |
650cdb2a | 68 | { |
d7553f35 | 69 | IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); |
650cdb2a | 70 | |
d7553f35 RH |
71 | if (i) { |
72 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
73 | trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, | |
74 | ent->itree.start, ent->itree.last, ent->pa); | |
75 | return ent; | |
650cdb2a | 76 | } |
23c3d569 | 77 | trace_hppa_tlb_find_entry_not_found(env, addr); |
650cdb2a RH |
78 | return NULL; |
79 | } | |
80 | ||
729cd350 | 81 | static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, |
fa824d99 | 82 | bool force_flush_btlb) |
8d6ae7fb | 83 | { |
25f32708 | 84 | CPUState *cs = env_cpu(env); |
d7553f35 | 85 | bool is_btlb; |
fa824d99 HD |
86 | |
87 | if (!ent->entry_valid) { | |
88 | return; | |
89 | } | |
8d6ae7fb | 90 | |
66866cc7 RH |
91 | trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, |
92 | ent->itree.last, ent->pa); | |
23c3d569 | 93 | |
66866cc7 RH |
94 | tlb_flush_range_by_mmuidx(cs, ent->itree.start, |
95 | ent->itree.last - ent->itree.start + 1, | |
96 | HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); | |
fa824d99 | 97 | |
d7553f35 | 98 | /* Never clear BTLBs, unless forced to do so. */ |
9cf2112b | 99 | is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; |
d7553f35 | 100 | if (is_btlb && !force_flush_btlb) { |
fa824d99 | 101 | return; |
8d6ae7fb RH |
102 | } |
103 | ||
d7553f35 | 104 | interval_tree_remove(&ent->itree, &env->tlb_root); |
8d6ae7fb | 105 | memset(ent, 0, sizeof(*ent)); |
d7553f35 RH |
106 | |
107 | if (!is_btlb) { | |
108 | ent->unused_next = env->tlb_unused; | |
109 | env->tlb_unused = ent; | |
110 | } | |
8d6ae7fb RH |
111 | } |
112 | ||
d7553f35 | 113 | static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e) |
09cae825 | 114 | { |
d7553f35 | 115 | IntervalTreeNode *i, *n; |
09cae825 | 116 | |
d7553f35 RH |
117 | i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); |
118 | for (; i ; i = n) { | |
119 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
09cae825 | 120 | |
d7553f35 RH |
121 | /* |
122 | * Find the next entry now: In the normal case the current entry | |
123 | * will be removed, but in the BTLB case it will remain. | |
124 | */ | |
125 | n = interval_tree_iter_next(i, va_b, va_e); | |
126 | hppa_flush_tlb_ent(env, ent, false); | |
09cae825 | 127 | } |
09cae825 RH |
128 | } |
129 | ||
729cd350 | 130 | static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) |
8d6ae7fb | 131 | { |
d7553f35 | 132 | HPPATLBEntry *ent = env->tlb_unused; |
fa824d99 | 133 | |
d7553f35 | 134 | if (ent == NULL) { |
9cf2112b | 135 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
d7553f35 | 136 | uint32_t i = env->tlb_last; |
8d6ae7fb | 137 | |
9cf2112b RH |
138 | if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { |
139 | i = btlb_entries; | |
d7553f35 RH |
140 | } |
141 | env->tlb_last = i + 1; | |
8d6ae7fb | 142 | |
d7553f35 RH |
143 | ent = &env->tlb[i]; |
144 | hppa_flush_tlb_ent(env, ent, false); | |
145 | } | |
146 | ||
147 | env->tlb_unused = ent->unused_next; | |
8d6ae7fb RH |
148 | return ent; |
149 | } | |
150 | ||
650cdb2a | 151 | int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, |
fa824d99 | 152 | int type, hwaddr *pphys, int *pprot, |
729cd350 | 153 | HPPATLBEntry **tlb_entry) |
650cdb2a RH |
154 | { |
155 | hwaddr phys; | |
3d066afc | 156 | int prot, r_prot, w_prot, x_prot, priv; |
729cd350 | 157 | HPPATLBEntry *ent; |
650cdb2a RH |
158 | int ret = -1; |
159 | ||
fa824d99 HD |
160 | if (tlb_entry) { |
161 | *tlb_entry = NULL; | |
162 | } | |
163 | ||
650cdb2a RH |
164 | /* Virtual translation disabled. Direct map virtual to physical. */ |
165 | if (mmu_idx == MMU_PHYS_IDX) { | |
166 | phys = addr; | |
167 | prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
168 | goto egress; | |
169 | } | |
170 | ||
171 | /* Find a valid tlb entry that matches the virtual address. */ | |
172 | ent = hppa_find_tlb(env, addr); | |
d7553f35 | 173 | if (ent == NULL) { |
650cdb2a RH |
174 | phys = 0; |
175 | prot = 0; | |
acd6ba74 | 176 | ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; |
650cdb2a RH |
177 | goto egress; |
178 | } | |
179 | ||
fa824d99 HD |
180 | if (tlb_entry) { |
181 | *tlb_entry = ent; | |
182 | } | |
183 | ||
650cdb2a | 184 | /* We now know the physical address. */ |
66866cc7 | 185 | phys = ent->pa + (addr - ent->itree.start); |
650cdb2a RH |
186 | |
187 | /* Map TLB access_rights field to QEMU protection. */ | |
3d066afc HD |
188 | priv = MMU_IDX_TO_PRIV(mmu_idx); |
189 | r_prot = (priv <= ent->ar_pl1) * PAGE_READ; | |
190 | w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; | |
191 | x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; | |
650cdb2a RH |
192 | switch (ent->ar_type) { |
193 | case 0: /* read-only: data page */ | |
194 | prot = r_prot; | |
195 | break; | |
196 | case 1: /* read/write: dynamic data page */ | |
197 | prot = r_prot | w_prot; | |
198 | break; | |
199 | case 2: /* read/execute: normal code page */ | |
200 | prot = r_prot | x_prot; | |
201 | break; | |
202 | case 3: /* read/write/execute: dynamic code page */ | |
203 | prot = r_prot | w_prot | x_prot; | |
204 | break; | |
205 | default: /* execute: promote to privilege level type & 3 */ | |
206 | prot = x_prot; | |
43e05652 | 207 | break; |
650cdb2a RH |
208 | } |
209 | ||
d5de20bd | 210 | /* access_id == 0 means public page and no check is performed */ |
bb67ec32 | 211 | if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { |
d5de20bd SS |
212 | /* If bits [31:1] match, and bit 0 is set, suppress write. */ |
213 | int match = ent->access_id * 2 + 1; | |
214 | ||
215 | if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] || | |
216 | match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) { | |
217 | prot &= PAGE_READ | PAGE_EXEC; | |
218 | if (type == PAGE_WRITE) { | |
219 | ret = EXCP_DMPI; | |
220 | goto egress; | |
221 | } | |
222 | } | |
223 | } | |
650cdb2a RH |
224 | |
225 | /* No guest access type indicates a non-architectural access from | |
226 | within QEMU. Bypass checks for access, D, B and T bits. */ | |
227 | if (type == 0) { | |
228 | goto egress; | |
229 | } | |
230 | ||
231 | if (unlikely(!(prot & type))) { | |
232 | /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ | |
affdb7e6 | 233 | ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; |
650cdb2a RH |
234 | goto egress; |
235 | } | |
236 | ||
237 | /* In reverse priority order, check for conditions which raise faults. | |
238 | As we go, remove PROT bits that cover the condition we want to check. | |
239 | In this way, the resulting PROT will force a re-check of the | |
240 | architectural TLB entry for the next access. */ | |
241 | if (unlikely(!ent->d)) { | |
242 | if (type & PAGE_WRITE) { | |
243 | /* The D bit is not set -- TLB Dirty Bit Fault. */ | |
244 | ret = EXCP_TLB_DIRTY; | |
245 | } | |
246 | prot &= PAGE_READ | PAGE_EXEC; | |
247 | } | |
248 | if (unlikely(ent->b)) { | |
249 | if (type & PAGE_WRITE) { | |
250 | /* The B bit is set -- Data Memory Break Fault. */ | |
251 | ret = EXCP_DMB; | |
252 | } | |
253 | prot &= PAGE_READ | PAGE_EXEC; | |
254 | } | |
255 | if (unlikely(ent->t)) { | |
256 | if (!(type & PAGE_EXEC)) { | |
257 | /* The T bit is set -- Page Reference Fault. */ | |
258 | ret = EXCP_PAGE_REF; | |
259 | } | |
260 | prot &= PAGE_EXEC; | |
261 | } | |
262 | ||
263 | egress: | |
ccdf741c | 264 | *pphys = phys = hppa_abs_to_phys(env, phys); |
650cdb2a | 265 | *pprot = prot; |
23c3d569 | 266 | trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); |
650cdb2a RH |
267 | return ret; |
268 | } | |
269 | ||
813dff13 HD |
270 | hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
271 | { | |
650cdb2a RH |
272 | HPPACPU *cpu = HPPA_CPU(cs); |
273 | hwaddr phys; | |
274 | int prot, excp; | |
275 | ||
276 | /* If the (data) mmu is disabled, bypass translation. */ | |
277 | /* ??? We really ought to know if the code mmu is disabled too, | |
278 | in order to get the correct debugging dumps. */ | |
279 | if (!(cpu->env.psw & PSW_D)) { | |
ccdf741c | 280 | return hppa_abs_to_phys(&cpu->env, addr); |
650cdb2a RH |
281 | } |
282 | ||
283 | excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, | |
fa824d99 | 284 | &phys, &prot, NULL); |
650cdb2a RH |
285 | |
286 | /* Since we're translating for debugging, the only error that is a | |
287 | hard error is no translation at all. Otherwise, while a real cpu | |
288 | access might not have permission, the debugger does. */ | |
289 | return excp == EXCP_DTLB_MISS ? -1 : phys; | |
813dff13 HD |
290 | } |
291 | ||
8a02b9a6 RH |
292 | G_NORETURN static void |
293 | raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr, | |
294 | vaddr addr, bool mmu_disabled) | |
295 | { | |
296 | CPUState *cs = env_cpu(env); | |
297 | ||
298 | cs->exception_index = excp; | |
299 | ||
300 | if (env->psw & PSW_Q) { | |
301 | /* | |
302 | * For pa1.x, the offset and space never overlap, and so we | |
303 | * simply extract the high and low part of the virtual address. | |
304 | * | |
305 | * For pa2.0, the formation of these are described in section | |
306 | * "Interruption Parameter Registers", page 2-15. | |
307 | */ | |
308 | env->cr[CR_IOR] = (uint32_t)addr; | |
309 | env->cr[CR_ISR] = addr >> 32; | |
310 | ||
311 | if (hppa_is_pa20(env)) { | |
312 | if (mmu_disabled) { | |
313 | /* | |
314 | * If data translation was disabled, the ISR contains | |
315 | * the upper portion of the abs address, zero-extended. | |
316 | */ | |
317 | env->cr[CR_ISR] &= 0x3fffffff; | |
318 | } else { | |
319 | /* | |
320 | * If data translation was enabled, the upper two bits | |
321 | * of the IOR (the b field) are equal to the two space | |
322 | * bits from the base register used to form the gva. | |
323 | */ | |
324 | uint64_t b; | |
325 | ||
326 | cpu_restore_state(cs, retaddr); | |
327 | ||
328 | b = env->gr[env->unwind_breg]; | |
329 | b >>= (env->psw & PSW_W ? 62 : 30); | |
330 | env->cr[CR_IOR] |= b << 62; | |
331 | ||
332 | cpu_loop_exit(cs); | |
333 | } | |
334 | } | |
335 | } | |
336 | cpu_loop_exit_restore(cs, retaddr); | |
337 | } | |
338 | ||
3c7bef03 RH |
339 | bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, |
340 | MMUAccessType type, int mmu_idx, | |
341 | bool probe, uintptr_t retaddr) | |
813dff13 | 342 | { |
650cdb2a | 343 | HPPACPU *cpu = HPPA_CPU(cs); |
23c3d569 | 344 | CPUHPPAState *env = &cpu->env; |
729cd350 | 345 | HPPATLBEntry *ent; |
650cdb2a RH |
346 | int prot, excp, a_prot; |
347 | hwaddr phys; | |
348 | ||
349 | switch (type) { | |
350 | case MMU_INST_FETCH: | |
351 | a_prot = PAGE_EXEC; | |
352 | break; | |
353 | case MMU_DATA_STORE: | |
354 | a_prot = PAGE_WRITE; | |
355 | break; | |
356 | default: | |
357 | a_prot = PAGE_READ; | |
358 | break; | |
359 | } | |
360 | ||
23c3d569 | 361 | excp = hppa_get_physical_address(env, addr, mmu_idx, |
fa824d99 | 362 | a_prot, &phys, &prot, &ent); |
650cdb2a | 363 | if (unlikely(excp >= 0)) { |
3c7bef03 RH |
364 | if (probe) { |
365 | return false; | |
366 | } | |
23c3d569 | 367 | trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); |
8a02b9a6 | 368 | |
650cdb2a | 369 | /* Failure. Raise the indicated exception. */ |
8a02b9a6 RH |
370 | raise_exception_with_ior(env, excp, retaddr, |
371 | addr, mmu_idx == MMU_PHYS_IDX); | |
650cdb2a | 372 | } |
813dff13 | 373 | |
23c3d569 SS |
374 | trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, |
375 | phys & TARGET_PAGE_MASK, size, type, mmu_idx); | |
f8cda28b RH |
376 | |
377 | /* | |
378 | * Success! Store the translation into the QEMU TLB. | |
379 | * Note that we always install a single-page entry, because that | |
380 | * is what works best with softmmu -- anything else will trigger | |
381 | * the large page protection mask. We do not require this, | |
382 | * because we record the large page here in the hppa tlb. | |
383 | */ | |
813dff13 | 384 | tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, |
f8cda28b | 385 | prot, mmu_idx, TARGET_PAGE_SIZE); |
3c7bef03 RH |
386 | return true; |
387 | } | |
388 | ||
8d6ae7fb | 389 | /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ |
c53e401e | 390 | void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) |
8d6ae7fb | 391 | { |
d7553f35 | 392 | HPPATLBEntry *ent; |
8d6ae7fb | 393 | |
d7553f35 | 394 | /* Zap any old entries covering ADDR. */ |
09cae825 | 395 | addr &= TARGET_PAGE_MASK; |
d7553f35 | 396 | hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); |
8d6ae7fb | 397 | |
d7553f35 RH |
398 | ent = env->tlb_partial; |
399 | if (ent == NULL) { | |
400 | ent = hppa_alloc_tlb_ent(env); | |
401 | env->tlb_partial = ent; | |
8d6ae7fb RH |
402 | } |
403 | ||
d7553f35 RH |
404 | /* Note that ent->entry_valid == 0 already. */ |
405 | ent->itree.start = addr; | |
406 | ent->itree.last = addr + TARGET_PAGE_SIZE - 1; | |
407 | ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; | |
408 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
8d6ae7fb RH |
409 | } |
410 | ||
8577f354 | 411 | static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, |
c53e401e | 412 | target_ulong reg) |
8d6ae7fb | 413 | { |
8d6ae7fb RH |
414 | ent->access_id = extract32(reg, 1, 18); |
415 | ent->u = extract32(reg, 19, 1); | |
416 | ent->ar_pl2 = extract32(reg, 20, 2); | |
417 | ent->ar_pl1 = extract32(reg, 22, 2); | |
418 | ent->ar_type = extract32(reg, 24, 3); | |
419 | ent->b = extract32(reg, 27, 1); | |
420 | ent->d = extract32(reg, 28, 1); | |
421 | ent->t = extract32(reg, 29, 1); | |
422 | ent->entry_valid = 1; | |
d7553f35 RH |
423 | |
424 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
23c3d569 SS |
425 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, |
426 | ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); | |
8d6ae7fb | 427 | } |
63300a00 | 428 | |
fa824d99 | 429 | /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ |
c53e401e | 430 | void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg) |
fa824d99 | 431 | { |
d7553f35 | 432 | HPPATLBEntry *ent = env->tlb_partial; |
fa824d99 | 433 | |
d7553f35 RH |
434 | if (ent) { |
435 | env->tlb_partial = NULL; | |
436 | if (ent->itree.start <= addr && addr <= ent->itree.last) { | |
8577f354 | 437 | set_access_bits_pa11(env, ent, reg); |
d7553f35 RH |
438 | return; |
439 | } | |
fa824d99 | 440 | } |
d7553f35 | 441 | qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); |
fa824d99 HD |
442 | } |
443 | ||
c53e401e RH |
444 | static void itlbt_pa20(CPUHPPAState *env, target_ulong r1, |
445 | target_ulong r2, vaddr va_b) | |
8577f354 RH |
446 | { |
447 | HPPATLBEntry *ent; | |
448 | vaddr va_e; | |
449 | uint64_t va_size; | |
450 | int mask_shift; | |
451 | ||
452 | mask_shift = 2 * (r1 & 0xf); | |
2a23f0f1 | 453 | va_size = (uint64_t)TARGET_PAGE_SIZE << mask_shift; |
8577f354 RH |
454 | va_b &= -va_size; |
455 | va_e = va_b + va_size - 1; | |
456 | ||
457 | hppa_flush_tlb_range(env, va_b, va_e); | |
458 | ent = hppa_alloc_tlb_ent(env); | |
459 | ||
460 | ent->itree.start = va_b; | |
461 | ent->itree.last = va_e; | |
462 | ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift); | |
463 | ent->t = extract64(r2, 61, 1); | |
464 | ent->d = extract64(r2, 60, 1); | |
465 | ent->b = extract64(r2, 59, 1); | |
466 | ent->ar_type = extract64(r2, 56, 3); | |
467 | ent->ar_pl1 = extract64(r2, 54, 2); | |
468 | ent->ar_pl2 = extract64(r2, 52, 2); | |
469 | ent->u = extract64(r2, 51, 1); | |
470 | /* o = bit 50 */ | |
471 | /* p = bit 49 */ | |
472 | ent->access_id = extract64(r2, 1, 31); | |
473 | ent->entry_valid = 1; | |
474 | ||
475 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
476 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
477 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, | |
478 | ent->ar_pl2, ent->ar_pl1, ent->ar_type, | |
479 | ent->b, ent->d, ent->t); | |
480 | } | |
481 | ||
c53e401e | 482 | void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) |
8577f354 RH |
483 | { |
484 | vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); | |
485 | itlbt_pa20(env, r1, r2, va_b); | |
486 | } | |
487 | ||
c53e401e | 488 | void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2) |
8577f354 RH |
489 | { |
490 | vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); | |
491 | itlbt_pa20(env, r1, r2, va_b); | |
492 | } | |
493 | ||
eb25d10f | 494 | /* Purge (Insn/Data) TLB. */ |
63300a00 RH |
495 | static void ptlb_work(CPUState *cpu, run_on_cpu_data data) |
496 | { | |
b77af26e | 497 | CPUHPPAState *env = cpu_env(cpu); |
eb25d10f HD |
498 | vaddr start = data.target_ptr; |
499 | vaddr end; | |
63300a00 | 500 | |
eb25d10f HD |
501 | /* |
502 | * PA2.0 allows a range of pages encoded into GR[b], which we have | |
503 | * copied into the bottom bits of the otherwise page-aligned address. | |
504 | * PA1.x will always provide zero here, for a single page flush. | |
505 | */ | |
506 | end = start & 0xf; | |
507 | start &= TARGET_PAGE_MASK; | |
2a23f0f1 | 508 | end = (vaddr)TARGET_PAGE_SIZE << (2 * end); |
eb25d10f HD |
509 | end = start + end - 1; |
510 | ||
511 | hppa_flush_tlb_range(env, start, end); | |
512 | } | |
513 | ||
514 | /* This is local to the current cpu. */ | |
515 | void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr) | |
516 | { | |
517 | trace_hppa_tlb_ptlb_local(env); | |
518 | ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr)); | |
63300a00 RH |
519 | } |
520 | ||
eb25d10f | 521 | /* This is synchronous across all processors. */ |
63300a00 RH |
522 | void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) |
523 | { | |
25f32708 | 524 | CPUState *src = env_cpu(env); |
63300a00 | 525 | CPUState *cpu; |
34a0d9ee | 526 | bool wait = false; |
d7553f35 | 527 | |
23c3d569 | 528 | trace_hppa_tlb_ptlb(env); |
63300a00 RH |
529 | run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); |
530 | ||
531 | CPU_FOREACH(cpu) { | |
532 | if (cpu != src) { | |
533 | async_run_on_cpu(cpu, ptlb_work, data); | |
34a0d9ee | 534 | wait = true; |
63300a00 RH |
535 | } |
536 | } | |
34a0d9ee RH |
537 | if (wait) { |
538 | async_safe_run_on_cpu(src, ptlb_work, data); | |
539 | } else { | |
540 | ptlb_work(src, data); | |
541 | } | |
63300a00 RH |
542 | } |
543 | ||
d7553f35 RH |
544 | void hppa_ptlbe(CPUHPPAState *env) |
545 | { | |
9cf2112b | 546 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
d7553f35 RH |
547 | uint32_t i; |
548 | ||
549 | /* Zap the (non-btlb) tlb entries themselves. */ | |
9cf2112b RH |
550 | memset(&env->tlb[btlb_entries], 0, |
551 | sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); | |
552 | env->tlb_last = btlb_entries; | |
d7553f35 RH |
553 | env->tlb_partial = NULL; |
554 | ||
555 | /* Put them all onto the unused list. */ | |
9cf2112b RH |
556 | env->tlb_unused = &env->tlb[btlb_entries]; |
557 | for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { | |
d7553f35 RH |
558 | env->tlb[i].unused_next = &env->tlb[i + 1]; |
559 | } | |
560 | ||
561 | /* Re-initialize the interval tree with only the btlb entries. */ | |
562 | memset(&env->tlb_root, 0, sizeof(env->tlb_root)); | |
9cf2112b | 563 | for (i = 0; i < btlb_entries; ++i) { |
d7553f35 RH |
564 | if (env->tlb[i].entry_valid) { |
565 | interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); | |
566 | } | |
567 | } | |
568 | ||
569 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); | |
570 | } | |
571 | ||
63300a00 RH |
572 | /* Purge (Insn/Data) TLB entry. This affects an implementation-defined |
573 | number of pages/entries (we choose all), and is local to the cpu. */ | |
574 | void HELPER(ptlbe)(CPUHPPAState *env) | |
575 | { | |
23c3d569 | 576 | trace_hppa_tlb_ptlbe(env); |
fa824d99 | 577 | qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); |
d7553f35 | 578 | hppa_ptlbe(env); |
63300a00 | 579 | } |
2dfcca9f | 580 | |
d5de20bd SS |
581 | void cpu_hppa_change_prot_id(CPUHPPAState *env) |
582 | { | |
bb67ec32 | 583 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK); |
d5de20bd SS |
584 | } |
585 | ||
586 | void HELPER(change_prot_id)(CPUHPPAState *env) | |
587 | { | |
588 | cpu_hppa_change_prot_id(env); | |
589 | } | |
590 | ||
c53e401e | 591 | target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr) |
2dfcca9f RH |
592 | { |
593 | hwaddr phys; | |
594 | int prot, excp; | |
595 | ||
596 | excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, | |
fa824d99 | 597 | &phys, &prot, NULL); |
2dfcca9f | 598 | if (excp >= 0) { |
2dfcca9f RH |
599 | if (excp == EXCP_DTLB_MISS) { |
600 | excp = EXCP_NA_DTLB_MISS; | |
601 | } | |
23c3d569 | 602 | trace_hppa_tlb_lpa_failed(env, addr); |
8a02b9a6 | 603 | raise_exception_with_ior(env, excp, GETPC(), addr, false); |
2dfcca9f | 604 | } |
23c3d569 | 605 | trace_hppa_tlb_lpa_success(env, addr, phys); |
2dfcca9f RH |
606 | return phys; |
607 | } | |
43e05652 RH |
608 | |
609 | /* Return the ar_type of the TLB at VADDR, or -1. */ | |
610 | int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) | |
611 | { | |
729cd350 | 612 | HPPATLBEntry *ent = hppa_find_tlb(env, vaddr); |
43e05652 RH |
613 | return ent ? ent->ar_type : -1; |
614 | } | |
cf6b28d4 HD |
615 | |
616 | /* | |
617 | * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to | |
618 | * allow operating systems to modify the Block TLB (BTLB) entries. | |
619 | * For implementation details see page 1-13 in | |
620 | * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf | |
621 | */ | |
622 | void HELPER(diag_btlb)(CPUHPPAState *env) | |
623 | { | |
624 | unsigned int phys_page, len, slot; | |
625 | int mmu_idx = cpu_mmu_index(env, 0); | |
626 | uintptr_t ra = GETPC(); | |
729cd350 | 627 | HPPATLBEntry *btlb; |
cf6b28d4 HD |
628 | uint64_t virt_page; |
629 | uint32_t *vaddr; | |
9cf2112b | 630 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
cf6b28d4 | 631 | |
cf6b28d4 | 632 | /* BTLBs are not supported on 64-bit CPUs */ |
9cf2112b RH |
633 | if (btlb_entries == 0) { |
634 | env->gr[28] = -1; /* nonexistent procedure */ | |
635 | return; | |
636 | } | |
637 | ||
cf6b28d4 HD |
638 | env->gr[28] = 0; /* PDC_OK */ |
639 | ||
640 | switch (env->gr[25]) { | |
641 | case 0: | |
642 | /* return BTLB parameters */ | |
643 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); | |
644 | vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong), | |
645 | MMU_DATA_STORE, mmu_idx, ra); | |
646 | if (vaddr == NULL) { | |
647 | env->gr[28] = -10; /* invalid argument */ | |
648 | } else { | |
649 | vaddr[0] = cpu_to_be32(1); | |
650 | vaddr[1] = cpu_to_be32(16 * 1024); | |
9cf2112b RH |
651 | vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); |
652 | vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); | |
cf6b28d4 HD |
653 | } |
654 | break; | |
655 | case 1: | |
656 | /* insert BTLB entry */ | |
657 | virt_page = env->gr[24]; /* upper 32 bits */ | |
658 | virt_page <<= 32; | |
659 | virt_page |= env->gr[23]; /* lower 32 bits */ | |
660 | phys_page = env->gr[22]; | |
661 | len = env->gr[21]; | |
662 | slot = env->gr[19]; | |
663 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT " | |
664 | "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " | |
665 | "into slot %d\n", | |
666 | (long long) virt_page << TARGET_PAGE_BITS, | |
667 | (long long) (virt_page + len) << TARGET_PAGE_BITS, | |
668 | (long long) virt_page, phys_page, len, slot); | |
9cf2112b | 669 | if (slot < btlb_entries) { |
cf6b28d4 | 670 | btlb = &env->tlb[slot]; |
d7553f35 RH |
671 | |
672 | /* Force flush of possibly existing BTLB entry. */ | |
cf6b28d4 | 673 | hppa_flush_tlb_ent(env, btlb, true); |
d7553f35 RH |
674 | |
675 | /* Create new BTLB entry */ | |
66866cc7 RH |
676 | btlb->itree.start = virt_page << TARGET_PAGE_BITS; |
677 | btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; | |
cf6b28d4 | 678 | btlb->pa = phys_page << TARGET_PAGE_BITS; |
8577f354 | 679 | set_access_bits_pa11(env, btlb, env->gr[20]); |
cf6b28d4 HD |
680 | btlb->t = 0; |
681 | btlb->d = 1; | |
682 | } else { | |
683 | env->gr[28] = -10; /* invalid argument */ | |
684 | } | |
685 | break; | |
686 | case 2: | |
687 | /* Purge BTLB entry */ | |
688 | slot = env->gr[22]; | |
689 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", | |
690 | slot); | |
9cf2112b | 691 | if (slot < btlb_entries) { |
cf6b28d4 HD |
692 | btlb = &env->tlb[slot]; |
693 | hppa_flush_tlb_ent(env, btlb, true); | |
694 | } else { | |
695 | env->gr[28] = -10; /* invalid argument */ | |
696 | } | |
697 | break; | |
698 | case 3: | |
699 | /* Purge all BTLB entries */ | |
700 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); | |
9cf2112b | 701 | for (slot = 0; slot < btlb_entries; slot++) { |
cf6b28d4 HD |
702 | btlb = &env->tlb[slot]; |
703 | hppa_flush_tlb_ent(env, btlb, true); | |
704 | } | |
705 | break; | |
706 | default: | |
707 | env->gr[28] = -2; /* nonexistent option */ | |
708 | break; | |
709 | } | |
710 | } |