]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
858ce6ec7f761f94537ea3d6aaa93854ad0da087
[mirror_qemu.git] / target / hppa / mem_helper.c
1 /*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
29 {
30 if (likely(extract64(addr, 58, 4) != 0xf)) {
31 /* Memory address space */
32 return addr & MAKE_64BIT_MASK(0, 62);
33 }
34 if (extract64(addr, 54, 4) != 0) {
35 /* I/O address space */
36 return addr | MAKE_64BIT_MASK(62, 2);
37 }
38 /* PDC address space */
39 return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
40 }
41
42 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
43 {
44 if (likely(extract32(addr, 28, 4) != 0xf)) {
45 /* Memory address space */
46 return addr & MAKE_64BIT_MASK(0, 32);
47 }
48 if (extract32(addr, 24, 4) != 0) {
49 /* I/O address space */
50 return addr | MAKE_64BIT_MASK(32, 32);
51 }
52 /* PDC address space */
53 return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
54 }
55
56 static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr)
57 {
58 if (!hppa_is_pa20(env)) {
59 return addr;
60 } else if (env->psw & PSW_W) {
61 return hppa_abs_to_phys_pa2_w1(addr);
62 } else {
63 return hppa_abs_to_phys_pa2_w0(addr);
64 }
65 }
66
67 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
68 {
69 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
70
71 if (i) {
72 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
73 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
74 ent->itree.start, ent->itree.last, ent->pa);
75 return ent;
76 }
77 trace_hppa_tlb_find_entry_not_found(env, addr);
78 return NULL;
79 }
80
81 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
82 bool force_flush_btlb)
83 {
84 CPUState *cs = env_cpu(env);
85 bool is_btlb;
86
87 if (!ent->entry_valid) {
88 return;
89 }
90
91 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
92 ent->itree.last, ent->pa);
93
94 tlb_flush_range_by_mmuidx(cs, ent->itree.start,
95 ent->itree.last - ent->itree.start + 1,
96 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
97
98 /* Never clear BTLBs, unless forced to do so. */
99 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
100 if (is_btlb && !force_flush_btlb) {
101 return;
102 }
103
104 interval_tree_remove(&ent->itree, &env->tlb_root);
105 memset(ent, 0, sizeof(*ent));
106
107 if (!is_btlb) {
108 ent->unused_next = env->tlb_unused;
109 env->tlb_unused = ent;
110 }
111 }
112
113 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
114 {
115 IntervalTreeNode *i, *n;
116
117 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
118 for (; i ; i = n) {
119 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
120
121 /*
122 * Find the next entry now: In the normal case the current entry
123 * will be removed, but in the BTLB case it will remain.
124 */
125 n = interval_tree_iter_next(i, va_b, va_e);
126 hppa_flush_tlb_ent(env, ent, false);
127 }
128 }
129
130 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
131 {
132 HPPATLBEntry *ent = env->tlb_unused;
133
134 if (ent == NULL) {
135 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
136 uint32_t i = env->tlb_last;
137
138 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
139 i = btlb_entries;
140 }
141 env->tlb_last = i + 1;
142
143 ent = &env->tlb[i];
144 hppa_flush_tlb_ent(env, ent, false);
145 }
146
147 env->tlb_unused = ent->unused_next;
148 return ent;
149 }
150
151 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
152 int type, hwaddr *pphys, int *pprot,
153 HPPATLBEntry **tlb_entry)
154 {
155 hwaddr phys;
156 int prot, r_prot, w_prot, x_prot, priv;
157 HPPATLBEntry *ent;
158 int ret = -1;
159
160 if (tlb_entry) {
161 *tlb_entry = NULL;
162 }
163
164 /* Virtual translation disabled. Direct map virtual to physical. */
165 if (mmu_idx == MMU_PHYS_IDX) {
166 phys = addr;
167 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
168 goto egress;
169 }
170
171 /* Find a valid tlb entry that matches the virtual address. */
172 ent = hppa_find_tlb(env, addr);
173 if (ent == NULL) {
174 phys = 0;
175 prot = 0;
176 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
177 goto egress;
178 }
179
180 if (tlb_entry) {
181 *tlb_entry = ent;
182 }
183
184 /* We now know the physical address. */
185 phys = ent->pa + (addr - ent->itree.start);
186
187 /* Map TLB access_rights field to QEMU protection. */
188 priv = MMU_IDX_TO_PRIV(mmu_idx);
189 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
190 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
191 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
192 switch (ent->ar_type) {
193 case 0: /* read-only: data page */
194 prot = r_prot;
195 break;
196 case 1: /* read/write: dynamic data page */
197 prot = r_prot | w_prot;
198 break;
199 case 2: /* read/execute: normal code page */
200 prot = r_prot | x_prot;
201 break;
202 case 3: /* read/write/execute: dynamic code page */
203 prot = r_prot | w_prot | x_prot;
204 break;
205 default: /* execute: promote to privilege level type & 3 */
206 prot = x_prot;
207 break;
208 }
209
210 /* access_id == 0 means public page and no check is performed */
211 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
212 /* If bits [31:1] match, and bit 0 is set, suppress write. */
213 int match = ent->access_id * 2 + 1;
214
215 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
216 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
217 prot &= PAGE_READ | PAGE_EXEC;
218 if (type == PAGE_WRITE) {
219 ret = EXCP_DMPI;
220 goto egress;
221 }
222 }
223 }
224
225 /* No guest access type indicates a non-architectural access from
226 within QEMU. Bypass checks for access, D, B and T bits. */
227 if (type == 0) {
228 goto egress;
229 }
230
231 if (unlikely(!(prot & type))) {
232 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
233 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
234 goto egress;
235 }
236
237 /* In reverse priority order, check for conditions which raise faults.
238 As we go, remove PROT bits that cover the condition we want to check.
239 In this way, the resulting PROT will force a re-check of the
240 architectural TLB entry for the next access. */
241 if (unlikely(!ent->d)) {
242 if (type & PAGE_WRITE) {
243 /* The D bit is not set -- TLB Dirty Bit Fault. */
244 ret = EXCP_TLB_DIRTY;
245 }
246 prot &= PAGE_READ | PAGE_EXEC;
247 }
248 if (unlikely(ent->b)) {
249 if (type & PAGE_WRITE) {
250 /* The B bit is set -- Data Memory Break Fault. */
251 ret = EXCP_DMB;
252 }
253 prot &= PAGE_READ | PAGE_EXEC;
254 }
255 if (unlikely(ent->t)) {
256 if (!(type & PAGE_EXEC)) {
257 /* The T bit is set -- Page Reference Fault. */
258 ret = EXCP_PAGE_REF;
259 }
260 prot &= PAGE_EXEC;
261 }
262
263 egress:
264 *pphys = phys = hppa_abs_to_phys(env, phys);
265 *pprot = prot;
266 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
267 return ret;
268 }
269
270 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
271 {
272 HPPACPU *cpu = HPPA_CPU(cs);
273 hwaddr phys;
274 int prot, excp;
275
276 /* If the (data) mmu is disabled, bypass translation. */
277 /* ??? We really ought to know if the code mmu is disabled too,
278 in order to get the correct debugging dumps. */
279 if (!(cpu->env.psw & PSW_D)) {
280 return hppa_abs_to_phys(&cpu->env, addr);
281 }
282
283 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
284 &phys, &prot, NULL);
285
286 /* Since we're translating for debugging, the only error that is a
287 hard error is no translation at all. Otherwise, while a real cpu
288 access might not have permission, the debugger does. */
289 return excp == EXCP_DTLB_MISS ? -1 : phys;
290 }
291
292 G_NORETURN static void
293 raise_exception_with_ior(CPUHPPAState *env, int excp, uintptr_t retaddr,
294 vaddr addr, bool mmu_disabled)
295 {
296 CPUState *cs = env_cpu(env);
297
298 cs->exception_index = excp;
299
300 if (env->psw & PSW_Q) {
301 /*
302 * For pa1.x, the offset and space never overlap, and so we
303 * simply extract the high and low part of the virtual address.
304 *
305 * For pa2.0, the formation of these are described in section
306 * "Interruption Parameter Registers", page 2-15.
307 */
308 env->cr[CR_IOR] = (uint32_t)addr;
309 env->cr[CR_ISR] = addr >> 32;
310
311 if (hppa_is_pa20(env)) {
312 if (mmu_disabled) {
313 /*
314 * If data translation was disabled, the ISR contains
315 * the upper portion of the abs address, zero-extended.
316 */
317 env->cr[CR_ISR] &= 0x3fffffff;
318 } else {
319 /*
320 * If data translation was enabled, the upper two bits
321 * of the IOR (the b field) are equal to the two space
322 * bits from the base register used to form the gva.
323 */
324 uint64_t b;
325
326 cpu_restore_state(cs, retaddr);
327
328 b = env->gr[env->unwind_breg];
329 b >>= (env->psw & PSW_W ? 62 : 30);
330 env->cr[CR_IOR] |= b << 62;
331
332 cpu_loop_exit(cs);
333 }
334 }
335 }
336 cpu_loop_exit_restore(cs, retaddr);
337 }
338
339 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
340 MMUAccessType type, int mmu_idx,
341 bool probe, uintptr_t retaddr)
342 {
343 HPPACPU *cpu = HPPA_CPU(cs);
344 CPUHPPAState *env = &cpu->env;
345 HPPATLBEntry *ent;
346 int prot, excp, a_prot;
347 hwaddr phys;
348
349 switch (type) {
350 case MMU_INST_FETCH:
351 a_prot = PAGE_EXEC;
352 break;
353 case MMU_DATA_STORE:
354 a_prot = PAGE_WRITE;
355 break;
356 default:
357 a_prot = PAGE_READ;
358 break;
359 }
360
361 excp = hppa_get_physical_address(env, addr, mmu_idx,
362 a_prot, &phys, &prot, &ent);
363 if (unlikely(excp >= 0)) {
364 if (probe) {
365 return false;
366 }
367 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
368
369 /* Failure. Raise the indicated exception. */
370 raise_exception_with_ior(env, excp, retaddr,
371 addr, mmu_idx == MMU_PHYS_IDX);
372 }
373
374 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
375 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
376
377 /*
378 * Success! Store the translation into the QEMU TLB.
379 * Note that we always install a single-page entry, because that
380 * is what works best with softmmu -- anything else will trigger
381 * the large page protection mask. We do not require this,
382 * because we record the large page here in the hppa tlb.
383 */
384 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
385 prot, mmu_idx, TARGET_PAGE_SIZE);
386 return true;
387 }
388
389 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
390 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
391 {
392 HPPATLBEntry *ent;
393
394 /* Zap any old entries covering ADDR. */
395 addr &= TARGET_PAGE_MASK;
396 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
397
398 ent = env->tlb_partial;
399 if (ent == NULL) {
400 ent = hppa_alloc_tlb_ent(env);
401 env->tlb_partial = ent;
402 }
403
404 /* Note that ent->entry_valid == 0 already. */
405 ent->itree.start = addr;
406 ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
407 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
408 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
409 }
410
411 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
412 target_ulong reg)
413 {
414 ent->access_id = extract32(reg, 1, 18);
415 ent->u = extract32(reg, 19, 1);
416 ent->ar_pl2 = extract32(reg, 20, 2);
417 ent->ar_pl1 = extract32(reg, 22, 2);
418 ent->ar_type = extract32(reg, 24, 3);
419 ent->b = extract32(reg, 27, 1);
420 ent->d = extract32(reg, 28, 1);
421 ent->t = extract32(reg, 29, 1);
422 ent->entry_valid = 1;
423
424 interval_tree_insert(&ent->itree, &env->tlb_root);
425 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
426 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
427 }
428
429 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
430 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ulong reg)
431 {
432 HPPATLBEntry *ent = env->tlb_partial;
433
434 if (ent) {
435 env->tlb_partial = NULL;
436 if (ent->itree.start <= addr && addr <= ent->itree.last) {
437 set_access_bits_pa11(env, ent, reg);
438 return;
439 }
440 }
441 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
442 }
443
444 static void itlbt_pa20(CPUHPPAState *env, target_ulong r1,
445 target_ulong r2, vaddr va_b)
446 {
447 HPPATLBEntry *ent;
448 vaddr va_e;
449 uint64_t va_size;
450 int mask_shift;
451
452 mask_shift = 2 * (r1 & 0xf);
453 va_size = TARGET_PAGE_SIZE << mask_shift;
454 va_b &= -va_size;
455 va_e = va_b + va_size - 1;
456
457 hppa_flush_tlb_range(env, va_b, va_e);
458 ent = hppa_alloc_tlb_ent(env);
459
460 ent->itree.start = va_b;
461 ent->itree.last = va_e;
462 ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift);
463 ent->t = extract64(r2, 61, 1);
464 ent->d = extract64(r2, 60, 1);
465 ent->b = extract64(r2, 59, 1);
466 ent->ar_type = extract64(r2, 56, 3);
467 ent->ar_pl1 = extract64(r2, 54, 2);
468 ent->ar_pl2 = extract64(r2, 52, 2);
469 ent->u = extract64(r2, 51, 1);
470 /* o = bit 50 */
471 /* p = bit 49 */
472 ent->access_id = extract64(r2, 1, 31);
473 ent->entry_valid = 1;
474
475 interval_tree_insert(&ent->itree, &env->tlb_root);
476 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
477 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
478 ent->ar_pl2, ent->ar_pl1, ent->ar_type,
479 ent->b, ent->d, ent->t);
480 }
481
482 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
483 {
484 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
485 itlbt_pa20(env, r1, r2, va_b);
486 }
487
488 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ulong r1, target_ulong r2)
489 {
490 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
491 itlbt_pa20(env, r1, r2, va_b);
492 }
493
494 /* Purge (Insn/Data) TLB. */
495 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
496 {
497 CPUHPPAState *env = cpu_env(cpu);
498 vaddr start = data.target_ptr;
499 vaddr end;
500
501 /*
502 * PA2.0 allows a range of pages encoded into GR[b], which we have
503 * copied into the bottom bits of the otherwise page-aligned address.
504 * PA1.x will always provide zero here, for a single page flush.
505 */
506 end = start & 0xf;
507 start &= TARGET_PAGE_MASK;
508 end = TARGET_PAGE_SIZE << (2 * end);
509 end = start + end - 1;
510
511 hppa_flush_tlb_range(env, start, end);
512 }
513
514 /* This is local to the current cpu. */
515 void HELPER(ptlb_l)(CPUHPPAState *env, target_ulong addr)
516 {
517 trace_hppa_tlb_ptlb_local(env);
518 ptlb_work(env_cpu(env), RUN_ON_CPU_TARGET_PTR(addr));
519 }
520
521 /* This is synchronous across all processors. */
522 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
523 {
524 CPUState *src = env_cpu(env);
525 CPUState *cpu;
526 bool wait = false;
527
528 trace_hppa_tlb_ptlb(env);
529 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
530
531 CPU_FOREACH(cpu) {
532 if (cpu != src) {
533 async_run_on_cpu(cpu, ptlb_work, data);
534 wait = true;
535 }
536 }
537 if (wait) {
538 async_safe_run_on_cpu(src, ptlb_work, data);
539 } else {
540 ptlb_work(src, data);
541 }
542 }
543
544 void hppa_ptlbe(CPUHPPAState *env)
545 {
546 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
547 uint32_t i;
548
549 /* Zap the (non-btlb) tlb entries themselves. */
550 memset(&env->tlb[btlb_entries], 0,
551 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
552 env->tlb_last = btlb_entries;
553 env->tlb_partial = NULL;
554
555 /* Put them all onto the unused list. */
556 env->tlb_unused = &env->tlb[btlb_entries];
557 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
558 env->tlb[i].unused_next = &env->tlb[i + 1];
559 }
560
561 /* Re-initialize the interval tree with only the btlb entries. */
562 memset(&env->tlb_root, 0, sizeof(env->tlb_root));
563 for (i = 0; i < btlb_entries; ++i) {
564 if (env->tlb[i].entry_valid) {
565 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
566 }
567 }
568
569 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
570 }
571
572 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
573 number of pages/entries (we choose all), and is local to the cpu. */
574 void HELPER(ptlbe)(CPUHPPAState *env)
575 {
576 trace_hppa_tlb_ptlbe(env);
577 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
578 hppa_ptlbe(env);
579 }
580
581 void cpu_hppa_change_prot_id(CPUHPPAState *env)
582 {
583 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
584 }
585
586 void HELPER(change_prot_id)(CPUHPPAState *env)
587 {
588 cpu_hppa_change_prot_id(env);
589 }
590
591 target_ulong HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
592 {
593 hwaddr phys;
594 int prot, excp;
595
596 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
597 &phys, &prot, NULL);
598 if (excp >= 0) {
599 if (excp == EXCP_DTLB_MISS) {
600 excp = EXCP_NA_DTLB_MISS;
601 }
602 trace_hppa_tlb_lpa_failed(env, addr);
603 raise_exception_with_ior(env, excp, GETPC(), addr, false);
604 }
605 trace_hppa_tlb_lpa_success(env, addr, phys);
606 return phys;
607 }
608
609 /* Return the ar_type of the TLB at VADDR, or -1. */
610 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
611 {
612 HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
613 return ent ? ent->ar_type : -1;
614 }
615
616 /*
617 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
618 * allow operating systems to modify the Block TLB (BTLB) entries.
619 * For implementation details see page 1-13 in
620 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
621 */
622 void HELPER(diag_btlb)(CPUHPPAState *env)
623 {
624 unsigned int phys_page, len, slot;
625 int mmu_idx = cpu_mmu_index(env, 0);
626 uintptr_t ra = GETPC();
627 HPPATLBEntry *btlb;
628 uint64_t virt_page;
629 uint32_t *vaddr;
630 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
631
632 /* BTLBs are not supported on 64-bit CPUs */
633 if (btlb_entries == 0) {
634 env->gr[28] = -1; /* nonexistent procedure */
635 return;
636 }
637
638 env->gr[28] = 0; /* PDC_OK */
639
640 switch (env->gr[25]) {
641 case 0:
642 /* return BTLB parameters */
643 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
644 vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
645 MMU_DATA_STORE, mmu_idx, ra);
646 if (vaddr == NULL) {
647 env->gr[28] = -10; /* invalid argument */
648 } else {
649 vaddr[0] = cpu_to_be32(1);
650 vaddr[1] = cpu_to_be32(16 * 1024);
651 vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
652 vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
653 }
654 break;
655 case 1:
656 /* insert BTLB entry */
657 virt_page = env->gr[24]; /* upper 32 bits */
658 virt_page <<= 32;
659 virt_page |= env->gr[23]; /* lower 32 bits */
660 phys_page = env->gr[22];
661 len = env->gr[21];
662 slot = env->gr[19];
663 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
664 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
665 "into slot %d\n",
666 (long long) virt_page << TARGET_PAGE_BITS,
667 (long long) (virt_page + len) << TARGET_PAGE_BITS,
668 (long long) virt_page, phys_page, len, slot);
669 if (slot < btlb_entries) {
670 btlb = &env->tlb[slot];
671
672 /* Force flush of possibly existing BTLB entry. */
673 hppa_flush_tlb_ent(env, btlb, true);
674
675 /* Create new BTLB entry */
676 btlb->itree.start = virt_page << TARGET_PAGE_BITS;
677 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
678 btlb->pa = phys_page << TARGET_PAGE_BITS;
679 set_access_bits_pa11(env, btlb, env->gr[20]);
680 btlb->t = 0;
681 btlb->d = 1;
682 } else {
683 env->gr[28] = -10; /* invalid argument */
684 }
685 break;
686 case 2:
687 /* Purge BTLB entry */
688 slot = env->gr[22];
689 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
690 slot);
691 if (slot < btlb_entries) {
692 btlb = &env->tlb[slot];
693 hppa_flush_tlb_ent(env, btlb, true);
694 } else {
695 env->gr[28] = -10; /* invalid argument */
696 }
697 break;
698 case 3:
699 /* Purge all BTLB entries */
700 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
701 for (slot = 0; slot < btlb_entries; slot++) {
702 btlb = &env->tlb[slot];
703 hppa_flush_tlb_ent(env, btlb, true);
704 }
705 break;
706 default:
707 env->gr[28] = -2; /* nonexistent option */
708 break;
709 }
710 }