]>
Commit | Line | Data |
---|---|---|
813dff13 HD |
1 | /* |
2 | * HPPA memory access helper routines | |
3 | * | |
4 | * Copyright (c) 2017 Helge Deller | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
d6ea4236 | 9 | * version 2.1 of the License, or (at your option) any later version. |
813dff13 HD |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
cd617484 | 21 | #include "qemu/log.h" |
813dff13 HD |
22 | #include "cpu.h" |
23 | #include "exec/exec-all.h" | |
24 | #include "exec/helper-proto.h" | |
2e5b09fd | 25 | #include "hw/core/cpu.h" |
23c3d569 | 26 | #include "trace.h" |
813dff13 | 27 | |
ccdf741c RH |
28 | hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr) |
29 | { | |
30 | if (likely(extract64(addr, 58, 4) != 0xf)) { | |
31 | /* Memory address space */ | |
32 | return addr & MAKE_64BIT_MASK(0, 62); | |
33 | } | |
34 | if (extract64(addr, 54, 4) != 0) { | |
35 | /* I/O address space */ | |
36 | return addr | MAKE_64BIT_MASK(62, 2); | |
37 | } | |
38 | /* PDC address space */ | |
39 | return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4); | |
40 | } | |
41 | ||
42 | hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr) | |
43 | { | |
44 | if (likely(extract32(addr, 28, 4) != 0xf)) { | |
45 | /* Memory address space */ | |
46 | return addr & MAKE_64BIT_MASK(0, 32); | |
47 | } | |
48 | if (extract32(addr, 24, 4) != 0) { | |
49 | /* I/O address space */ | |
50 | return addr | MAKE_64BIT_MASK(32, 32); | |
51 | } | |
52 | /* PDC address space */ | |
53 | return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4); | |
54 | } | |
55 | ||
56 | static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr) | |
57 | { | |
58 | if (!hppa_is_pa20(env)) { | |
59 | return addr; | |
60 | } else if (env->psw & PSW_W) { | |
61 | return hppa_abs_to_phys_pa2_w1(addr); | |
62 | } else { | |
63 | return hppa_abs_to_phys_pa2_w0(addr); | |
64 | } | |
65 | } | |
66 | ||
729cd350 | 67 | static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) |
650cdb2a | 68 | { |
d7553f35 | 69 | IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr); |
650cdb2a | 70 | |
d7553f35 RH |
71 | if (i) { |
72 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
73 | trace_hppa_tlb_find_entry(env, ent, ent->entry_valid, | |
74 | ent->itree.start, ent->itree.last, ent->pa); | |
75 | return ent; | |
650cdb2a | 76 | } |
23c3d569 | 77 | trace_hppa_tlb_find_entry_not_found(env, addr); |
650cdb2a RH |
78 | return NULL; |
79 | } | |
80 | ||
729cd350 | 81 | static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent, |
fa824d99 | 82 | bool force_flush_btlb) |
8d6ae7fb | 83 | { |
25f32708 | 84 | CPUState *cs = env_cpu(env); |
d7553f35 | 85 | bool is_btlb; |
fa824d99 HD |
86 | |
87 | if (!ent->entry_valid) { | |
88 | return; | |
89 | } | |
8d6ae7fb | 90 | |
66866cc7 RH |
91 | trace_hppa_tlb_flush_ent(env, ent, ent->itree.start, |
92 | ent->itree.last, ent->pa); | |
23c3d569 | 93 | |
66866cc7 RH |
94 | tlb_flush_range_by_mmuidx(cs, ent->itree.start, |
95 | ent->itree.last - ent->itree.start + 1, | |
96 | HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); | |
fa824d99 | 97 | |
d7553f35 | 98 | /* Never clear BTLBs, unless forced to do so. */ |
9cf2112b | 99 | is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)]; |
d7553f35 | 100 | if (is_btlb && !force_flush_btlb) { |
fa824d99 | 101 | return; |
8d6ae7fb RH |
102 | } |
103 | ||
d7553f35 | 104 | interval_tree_remove(&ent->itree, &env->tlb_root); |
8d6ae7fb | 105 | memset(ent, 0, sizeof(*ent)); |
d7553f35 RH |
106 | |
107 | if (!is_btlb) { | |
108 | ent->unused_next = env->tlb_unused; | |
109 | env->tlb_unused = ent; | |
110 | } | |
8d6ae7fb RH |
111 | } |
112 | ||
d7553f35 | 113 | static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e) |
09cae825 | 114 | { |
d7553f35 | 115 | IntervalTreeNode *i, *n; |
09cae825 | 116 | |
d7553f35 RH |
117 | i = interval_tree_iter_first(&env->tlb_root, va_b, va_e); |
118 | for (; i ; i = n) { | |
119 | HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree); | |
09cae825 | 120 | |
d7553f35 RH |
121 | /* |
122 | * Find the next entry now: In the normal case the current entry | |
123 | * will be removed, but in the BTLB case it will remain. | |
124 | */ | |
125 | n = interval_tree_iter_next(i, va_b, va_e); | |
126 | hppa_flush_tlb_ent(env, ent, false); | |
09cae825 | 127 | } |
09cae825 RH |
128 | } |
129 | ||
729cd350 | 130 | static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env) |
8d6ae7fb | 131 | { |
d7553f35 | 132 | HPPATLBEntry *ent = env->tlb_unused; |
fa824d99 | 133 | |
d7553f35 | 134 | if (ent == NULL) { |
9cf2112b | 135 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
d7553f35 | 136 | uint32_t i = env->tlb_last; |
8d6ae7fb | 137 | |
9cf2112b RH |
138 | if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) { |
139 | i = btlb_entries; | |
d7553f35 RH |
140 | } |
141 | env->tlb_last = i + 1; | |
8d6ae7fb | 142 | |
d7553f35 RH |
143 | ent = &env->tlb[i]; |
144 | hppa_flush_tlb_ent(env, ent, false); | |
145 | } | |
146 | ||
147 | env->tlb_unused = ent->unused_next; | |
8d6ae7fb RH |
148 | return ent; |
149 | } | |
150 | ||
650cdb2a | 151 | int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, |
fa824d99 | 152 | int type, hwaddr *pphys, int *pprot, |
729cd350 | 153 | HPPATLBEntry **tlb_entry) |
650cdb2a RH |
154 | { |
155 | hwaddr phys; | |
3d066afc | 156 | int prot, r_prot, w_prot, x_prot, priv; |
729cd350 | 157 | HPPATLBEntry *ent; |
650cdb2a RH |
158 | int ret = -1; |
159 | ||
fa824d99 HD |
160 | if (tlb_entry) { |
161 | *tlb_entry = NULL; | |
162 | } | |
163 | ||
650cdb2a RH |
164 | /* Virtual translation disabled. Direct map virtual to physical. */ |
165 | if (mmu_idx == MMU_PHYS_IDX) { | |
166 | phys = addr; | |
167 | prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
168 | goto egress; | |
169 | } | |
170 | ||
171 | /* Find a valid tlb entry that matches the virtual address. */ | |
172 | ent = hppa_find_tlb(env, addr); | |
d7553f35 | 173 | if (ent == NULL) { |
650cdb2a RH |
174 | phys = 0; |
175 | prot = 0; | |
acd6ba74 | 176 | ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; |
650cdb2a RH |
177 | goto egress; |
178 | } | |
179 | ||
fa824d99 HD |
180 | if (tlb_entry) { |
181 | *tlb_entry = ent; | |
182 | } | |
183 | ||
650cdb2a | 184 | /* We now know the physical address. */ |
66866cc7 | 185 | phys = ent->pa + (addr - ent->itree.start); |
650cdb2a RH |
186 | |
187 | /* Map TLB access_rights field to QEMU protection. */ | |
3d066afc HD |
188 | priv = MMU_IDX_TO_PRIV(mmu_idx); |
189 | r_prot = (priv <= ent->ar_pl1) * PAGE_READ; | |
190 | w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; | |
191 | x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; | |
650cdb2a RH |
192 | switch (ent->ar_type) { |
193 | case 0: /* read-only: data page */ | |
194 | prot = r_prot; | |
195 | break; | |
196 | case 1: /* read/write: dynamic data page */ | |
197 | prot = r_prot | w_prot; | |
198 | break; | |
199 | case 2: /* read/execute: normal code page */ | |
200 | prot = r_prot | x_prot; | |
201 | break; | |
202 | case 3: /* read/write/execute: dynamic code page */ | |
203 | prot = r_prot | w_prot | x_prot; | |
204 | break; | |
205 | default: /* execute: promote to privilege level type & 3 */ | |
206 | prot = x_prot; | |
43e05652 | 207 | break; |
650cdb2a RH |
208 | } |
209 | ||
d5de20bd | 210 | /* access_id == 0 means public page and no check is performed */ |
bb67ec32 | 211 | if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) { |
d5de20bd SS |
212 | /* If bits [31:1] match, and bit 0 is set, suppress write. */ |
213 | int match = ent->access_id * 2 + 1; | |
214 | ||
215 | if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] || | |
216 | match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) { | |
217 | prot &= PAGE_READ | PAGE_EXEC; | |
218 | if (type == PAGE_WRITE) { | |
219 | ret = EXCP_DMPI; | |
220 | goto egress; | |
221 | } | |
222 | } | |
223 | } | |
650cdb2a RH |
224 | |
225 | /* No guest access type indicates a non-architectural access from | |
226 | within QEMU. Bypass checks for access, D, B and T bits. */ | |
227 | if (type == 0) { | |
228 | goto egress; | |
229 | } | |
230 | ||
231 | if (unlikely(!(prot & type))) { | |
232 | /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ | |
affdb7e6 | 233 | ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; |
650cdb2a RH |
234 | goto egress; |
235 | } | |
236 | ||
237 | /* In reverse priority order, check for conditions which raise faults. | |
238 | As we go, remove PROT bits that cover the condition we want to check. | |
239 | In this way, the resulting PROT will force a re-check of the | |
240 | architectural TLB entry for the next access. */ | |
241 | if (unlikely(!ent->d)) { | |
242 | if (type & PAGE_WRITE) { | |
243 | /* The D bit is not set -- TLB Dirty Bit Fault. */ | |
244 | ret = EXCP_TLB_DIRTY; | |
245 | } | |
246 | prot &= PAGE_READ | PAGE_EXEC; | |
247 | } | |
248 | if (unlikely(ent->b)) { | |
249 | if (type & PAGE_WRITE) { | |
250 | /* The B bit is set -- Data Memory Break Fault. */ | |
251 | ret = EXCP_DMB; | |
252 | } | |
253 | prot &= PAGE_READ | PAGE_EXEC; | |
254 | } | |
255 | if (unlikely(ent->t)) { | |
256 | if (!(type & PAGE_EXEC)) { | |
257 | /* The T bit is set -- Page Reference Fault. */ | |
258 | ret = EXCP_PAGE_REF; | |
259 | } | |
260 | prot &= PAGE_EXEC; | |
261 | } | |
262 | ||
263 | egress: | |
ccdf741c | 264 | *pphys = phys = hppa_abs_to_phys(env, phys); |
650cdb2a | 265 | *pprot = prot; |
23c3d569 | 266 | trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); |
650cdb2a RH |
267 | return ret; |
268 | } | |
269 | ||
813dff13 HD |
270 | hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
271 | { | |
650cdb2a RH |
272 | HPPACPU *cpu = HPPA_CPU(cs); |
273 | hwaddr phys; | |
274 | int prot, excp; | |
275 | ||
276 | /* If the (data) mmu is disabled, bypass translation. */ | |
277 | /* ??? We really ought to know if the code mmu is disabled too, | |
278 | in order to get the correct debugging dumps. */ | |
279 | if (!(cpu->env.psw & PSW_D)) { | |
ccdf741c | 280 | return hppa_abs_to_phys(&cpu->env, addr); |
650cdb2a RH |
281 | } |
282 | ||
283 | excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, | |
fa824d99 | 284 | &phys, &prot, NULL); |
650cdb2a RH |
285 | |
286 | /* Since we're translating for debugging, the only error that is a | |
287 | hard error is no translation at all. Otherwise, while a real cpu | |
288 | access might not have permission, the debugger does. */ | |
289 | return excp == EXCP_DTLB_MISS ? -1 : phys; | |
813dff13 HD |
290 | } |
291 | ||
3c7bef03 RH |
292 | bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, |
293 | MMUAccessType type, int mmu_idx, | |
294 | bool probe, uintptr_t retaddr) | |
813dff13 | 295 | { |
650cdb2a | 296 | HPPACPU *cpu = HPPA_CPU(cs); |
23c3d569 | 297 | CPUHPPAState *env = &cpu->env; |
729cd350 | 298 | HPPATLBEntry *ent; |
650cdb2a RH |
299 | int prot, excp, a_prot; |
300 | hwaddr phys; | |
301 | ||
302 | switch (type) { | |
303 | case MMU_INST_FETCH: | |
304 | a_prot = PAGE_EXEC; | |
305 | break; | |
306 | case MMU_DATA_STORE: | |
307 | a_prot = PAGE_WRITE; | |
308 | break; | |
309 | default: | |
310 | a_prot = PAGE_READ; | |
311 | break; | |
312 | } | |
313 | ||
23c3d569 | 314 | excp = hppa_get_physical_address(env, addr, mmu_idx, |
fa824d99 | 315 | a_prot, &phys, &prot, &ent); |
650cdb2a | 316 | if (unlikely(excp >= 0)) { |
3c7bef03 RH |
317 | if (probe) { |
318 | return false; | |
319 | } | |
23c3d569 | 320 | trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); |
650cdb2a RH |
321 | /* Failure. Raise the indicated exception. */ |
322 | cs->exception_index = excp; | |
323 | if (cpu->env.psw & PSW_Q) { | |
324 | /* ??? Needs tweaking for hppa64. */ | |
325 | cpu->env.cr[CR_IOR] = addr; | |
326 | cpu->env.cr[CR_ISR] = addr >> 32; | |
327 | } | |
328 | cpu_loop_exit_restore(cs, retaddr); | |
329 | } | |
813dff13 | 330 | |
23c3d569 SS |
331 | trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, |
332 | phys & TARGET_PAGE_MASK, size, type, mmu_idx); | |
f8cda28b RH |
333 | |
334 | /* | |
335 | * Success! Store the translation into the QEMU TLB. | |
336 | * Note that we always install a single-page entry, because that | |
337 | * is what works best with softmmu -- anything else will trigger | |
338 | * the large page protection mask. We do not require this, | |
339 | * because we record the large page here in the hppa tlb. | |
340 | */ | |
813dff13 | 341 | tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, |
f8cda28b | 342 | prot, mmu_idx, TARGET_PAGE_SIZE); |
3c7bef03 RH |
343 | return true; |
344 | } | |
345 | ||
8d6ae7fb | 346 | /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ |
8577f354 | 347 | void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ureg reg) |
8d6ae7fb | 348 | { |
d7553f35 | 349 | HPPATLBEntry *ent; |
8d6ae7fb | 350 | |
d7553f35 | 351 | /* Zap any old entries covering ADDR. */ |
09cae825 | 352 | addr &= TARGET_PAGE_MASK; |
d7553f35 | 353 | hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1); |
8d6ae7fb | 354 | |
d7553f35 RH |
355 | ent = env->tlb_partial; |
356 | if (ent == NULL) { | |
357 | ent = hppa_alloc_tlb_ent(env); | |
358 | env->tlb_partial = ent; | |
8d6ae7fb RH |
359 | } |
360 | ||
d7553f35 RH |
361 | /* Note that ent->entry_valid == 0 already. */ |
362 | ent->itree.start = addr; | |
363 | ent->itree.last = addr + TARGET_PAGE_SIZE - 1; | |
364 | ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; | |
365 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
8d6ae7fb RH |
366 | } |
367 | ||
8577f354 RH |
368 | static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent, |
369 | target_ureg reg) | |
8d6ae7fb | 370 | { |
8d6ae7fb RH |
371 | ent->access_id = extract32(reg, 1, 18); |
372 | ent->u = extract32(reg, 19, 1); | |
373 | ent->ar_pl2 = extract32(reg, 20, 2); | |
374 | ent->ar_pl1 = extract32(reg, 22, 2); | |
375 | ent->ar_type = extract32(reg, 24, 3); | |
376 | ent->b = extract32(reg, 27, 1); | |
377 | ent->d = extract32(reg, 28, 1); | |
378 | ent->t = extract32(reg, 29, 1); | |
379 | ent->entry_valid = 1; | |
d7553f35 RH |
380 | |
381 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
23c3d569 SS |
382 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, |
383 | ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); | |
8d6ae7fb | 384 | } |
63300a00 | 385 | |
fa824d99 | 386 | /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ |
8577f354 | 387 | void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ureg reg) |
fa824d99 | 388 | { |
d7553f35 | 389 | HPPATLBEntry *ent = env->tlb_partial; |
fa824d99 | 390 | |
d7553f35 RH |
391 | if (ent) { |
392 | env->tlb_partial = NULL; | |
393 | if (ent->itree.start <= addr && addr <= ent->itree.last) { | |
8577f354 | 394 | set_access_bits_pa11(env, ent, reg); |
d7553f35 RH |
395 | return; |
396 | } | |
fa824d99 | 397 | } |
d7553f35 | 398 | qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); |
fa824d99 HD |
399 | } |
400 | ||
8577f354 RH |
401 | static void itlbt_pa20(CPUHPPAState *env, target_ureg r1, |
402 | target_ureg r2, vaddr va_b) | |
403 | { | |
404 | HPPATLBEntry *ent; | |
405 | vaddr va_e; | |
406 | uint64_t va_size; | |
407 | int mask_shift; | |
408 | ||
409 | mask_shift = 2 * (r1 & 0xf); | |
410 | va_size = TARGET_PAGE_SIZE << mask_shift; | |
411 | va_b &= -va_size; | |
412 | va_e = va_b + va_size - 1; | |
413 | ||
414 | hppa_flush_tlb_range(env, va_b, va_e); | |
415 | ent = hppa_alloc_tlb_ent(env); | |
416 | ||
417 | ent->itree.start = va_b; | |
418 | ent->itree.last = va_e; | |
419 | ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift); | |
420 | ent->t = extract64(r2, 61, 1); | |
421 | ent->d = extract64(r2, 60, 1); | |
422 | ent->b = extract64(r2, 59, 1); | |
423 | ent->ar_type = extract64(r2, 56, 3); | |
424 | ent->ar_pl1 = extract64(r2, 54, 2); | |
425 | ent->ar_pl2 = extract64(r2, 52, 2); | |
426 | ent->u = extract64(r2, 51, 1); | |
427 | /* o = bit 50 */ | |
428 | /* p = bit 49 */ | |
429 | ent->access_id = extract64(r2, 1, 31); | |
430 | ent->entry_valid = 1; | |
431 | ||
432 | interval_tree_insert(&ent->itree, &env->tlb_root); | |
433 | trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa); | |
434 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, | |
435 | ent->ar_pl2, ent->ar_pl1, ent->ar_type, | |
436 | ent->b, ent->d, ent->t); | |
437 | } | |
438 | ||
439 | void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ureg r1, target_ureg r2) | |
440 | { | |
441 | vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]); | |
442 | itlbt_pa20(env, r1, r2, va_b); | |
443 | } | |
444 | ||
445 | void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ureg r1, target_ureg r2) | |
446 | { | |
447 | vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]); | |
448 | itlbt_pa20(env, r1, r2, va_b); | |
449 | } | |
450 | ||
63300a00 RH |
451 | /* Purge (Insn/Data) TLB. This is explicitly page-based, and is |
452 | synchronous across all processors. */ | |
453 | static void ptlb_work(CPUState *cpu, run_on_cpu_data data) | |
454 | { | |
b77af26e | 455 | CPUHPPAState *env = cpu_env(cpu); |
63300a00 | 456 | target_ulong addr = (target_ulong) data.target_ptr; |
63300a00 | 457 | |
d7553f35 | 458 | hppa_flush_tlb_range(env, addr, addr); |
63300a00 RH |
459 | } |
460 | ||
461 | void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) | |
462 | { | |
25f32708 | 463 | CPUState *src = env_cpu(env); |
63300a00 | 464 | CPUState *cpu; |
d7553f35 | 465 | |
23c3d569 | 466 | trace_hppa_tlb_ptlb(env); |
63300a00 RH |
467 | run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); |
468 | ||
469 | CPU_FOREACH(cpu) { | |
470 | if (cpu != src) { | |
471 | async_run_on_cpu(cpu, ptlb_work, data); | |
472 | } | |
473 | } | |
474 | async_safe_run_on_cpu(src, ptlb_work, data); | |
475 | } | |
476 | ||
d7553f35 RH |
477 | void hppa_ptlbe(CPUHPPAState *env) |
478 | { | |
9cf2112b | 479 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
d7553f35 RH |
480 | uint32_t i; |
481 | ||
482 | /* Zap the (non-btlb) tlb entries themselves. */ | |
9cf2112b RH |
483 | memset(&env->tlb[btlb_entries], 0, |
484 | sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0])); | |
485 | env->tlb_last = btlb_entries; | |
d7553f35 RH |
486 | env->tlb_partial = NULL; |
487 | ||
488 | /* Put them all onto the unused list. */ | |
9cf2112b RH |
489 | env->tlb_unused = &env->tlb[btlb_entries]; |
490 | for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) { | |
d7553f35 RH |
491 | env->tlb[i].unused_next = &env->tlb[i + 1]; |
492 | } | |
493 | ||
494 | /* Re-initialize the interval tree with only the btlb entries. */ | |
495 | memset(&env->tlb_root, 0, sizeof(env->tlb_root)); | |
9cf2112b | 496 | for (i = 0; i < btlb_entries; ++i) { |
d7553f35 RH |
497 | if (env->tlb[i].entry_valid) { |
498 | interval_tree_insert(&env->tlb[i].itree, &env->tlb_root); | |
499 | } | |
500 | } | |
501 | ||
502 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); | |
503 | } | |
504 | ||
63300a00 RH |
505 | /* Purge (Insn/Data) TLB entry. This affects an implementation-defined |
506 | number of pages/entries (we choose all), and is local to the cpu. */ | |
507 | void HELPER(ptlbe)(CPUHPPAState *env) | |
508 | { | |
23c3d569 | 509 | trace_hppa_tlb_ptlbe(env); |
fa824d99 | 510 | qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); |
d7553f35 | 511 | hppa_ptlbe(env); |
63300a00 | 512 | } |
2dfcca9f | 513 | |
d5de20bd SS |
514 | void cpu_hppa_change_prot_id(CPUHPPAState *env) |
515 | { | |
bb67ec32 | 516 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK); |
d5de20bd SS |
517 | } |
518 | ||
519 | void HELPER(change_prot_id)(CPUHPPAState *env) | |
520 | { | |
521 | cpu_hppa_change_prot_id(env); | |
522 | } | |
523 | ||
2dfcca9f RH |
524 | target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr) |
525 | { | |
526 | hwaddr phys; | |
527 | int prot, excp; | |
528 | ||
529 | excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, | |
fa824d99 | 530 | &phys, &prot, NULL); |
2dfcca9f RH |
531 | if (excp >= 0) { |
532 | if (env->psw & PSW_Q) { | |
533 | /* ??? Needs tweaking for hppa64. */ | |
534 | env->cr[CR_IOR] = addr; | |
535 | env->cr[CR_ISR] = addr >> 32; | |
536 | } | |
537 | if (excp == EXCP_DTLB_MISS) { | |
538 | excp = EXCP_NA_DTLB_MISS; | |
539 | } | |
23c3d569 | 540 | trace_hppa_tlb_lpa_failed(env, addr); |
2dfcca9f RH |
541 | hppa_dynamic_excp(env, excp, GETPC()); |
542 | } | |
23c3d569 | 543 | trace_hppa_tlb_lpa_success(env, addr, phys); |
2dfcca9f RH |
544 | return phys; |
545 | } | |
43e05652 RH |
546 | |
547 | /* Return the ar_type of the TLB at VADDR, or -1. */ | |
548 | int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) | |
549 | { | |
729cd350 | 550 | HPPATLBEntry *ent = hppa_find_tlb(env, vaddr); |
43e05652 RH |
551 | return ent ? ent->ar_type : -1; |
552 | } | |
cf6b28d4 HD |
553 | |
554 | /* | |
555 | * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to | |
556 | * allow operating systems to modify the Block TLB (BTLB) entries. | |
557 | * For implementation details see page 1-13 in | |
558 | * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf | |
559 | */ | |
560 | void HELPER(diag_btlb)(CPUHPPAState *env) | |
561 | { | |
562 | unsigned int phys_page, len, slot; | |
563 | int mmu_idx = cpu_mmu_index(env, 0); | |
564 | uintptr_t ra = GETPC(); | |
729cd350 | 565 | HPPATLBEntry *btlb; |
cf6b28d4 HD |
566 | uint64_t virt_page; |
567 | uint32_t *vaddr; | |
9cf2112b | 568 | uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env); |
cf6b28d4 | 569 | |
cf6b28d4 | 570 | /* BTLBs are not supported on 64-bit CPUs */ |
9cf2112b RH |
571 | if (btlb_entries == 0) { |
572 | env->gr[28] = -1; /* nonexistent procedure */ | |
573 | return; | |
574 | } | |
575 | ||
cf6b28d4 HD |
576 | env->gr[28] = 0; /* PDC_OK */ |
577 | ||
578 | switch (env->gr[25]) { | |
579 | case 0: | |
580 | /* return BTLB parameters */ | |
581 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); | |
582 | vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong), | |
583 | MMU_DATA_STORE, mmu_idx, ra); | |
584 | if (vaddr == NULL) { | |
585 | env->gr[28] = -10; /* invalid argument */ | |
586 | } else { | |
587 | vaddr[0] = cpu_to_be32(1); | |
588 | vaddr[1] = cpu_to_be32(16 * 1024); | |
9cf2112b RH |
589 | vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED); |
590 | vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE); | |
cf6b28d4 HD |
591 | } |
592 | break; | |
593 | case 1: | |
594 | /* insert BTLB entry */ | |
595 | virt_page = env->gr[24]; /* upper 32 bits */ | |
596 | virt_page <<= 32; | |
597 | virt_page |= env->gr[23]; /* lower 32 bits */ | |
598 | phys_page = env->gr[22]; | |
599 | len = env->gr[21]; | |
600 | slot = env->gr[19]; | |
601 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT " | |
602 | "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " | |
603 | "into slot %d\n", | |
604 | (long long) virt_page << TARGET_PAGE_BITS, | |
605 | (long long) (virt_page + len) << TARGET_PAGE_BITS, | |
606 | (long long) virt_page, phys_page, len, slot); | |
9cf2112b | 607 | if (slot < btlb_entries) { |
cf6b28d4 | 608 | btlb = &env->tlb[slot]; |
d7553f35 RH |
609 | |
610 | /* Force flush of possibly existing BTLB entry. */ | |
cf6b28d4 | 611 | hppa_flush_tlb_ent(env, btlb, true); |
d7553f35 RH |
612 | |
613 | /* Create new BTLB entry */ | |
66866cc7 RH |
614 | btlb->itree.start = virt_page << TARGET_PAGE_BITS; |
615 | btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1; | |
cf6b28d4 | 616 | btlb->pa = phys_page << TARGET_PAGE_BITS; |
8577f354 | 617 | set_access_bits_pa11(env, btlb, env->gr[20]); |
cf6b28d4 HD |
618 | btlb->t = 0; |
619 | btlb->d = 1; | |
620 | } else { | |
621 | env->gr[28] = -10; /* invalid argument */ | |
622 | } | |
623 | break; | |
624 | case 2: | |
625 | /* Purge BTLB entry */ | |
626 | slot = env->gr[22]; | |
627 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", | |
628 | slot); | |
9cf2112b | 629 | if (slot < btlb_entries) { |
cf6b28d4 HD |
630 | btlb = &env->tlb[slot]; |
631 | hppa_flush_tlb_ent(env, btlb, true); | |
632 | } else { | |
633 | env->gr[28] = -10; /* invalid argument */ | |
634 | } | |
635 | break; | |
636 | case 3: | |
637 | /* Purge all BTLB entries */ | |
638 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); | |
9cf2112b | 639 | for (slot = 0; slot < btlb_entries; slot++) { |
cf6b28d4 HD |
640 | btlb = &env->tlb[slot]; |
641 | hppa_flush_tlb_ent(env, btlb, true); | |
642 | } | |
643 | break; | |
644 | default: | |
645 | env->gr[28] = -2; /* nonexistent option */ | |
646 | break; | |
647 | } | |
648 | } |