]>
git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
28 hwaddr
hppa_abs_to_phys_pa2_w1(vaddr addr
)
30 if (likely(extract64(addr
, 58, 4) != 0xf)) {
31 /* Memory address space */
32 return addr
& MAKE_64BIT_MASK(0, 62);
34 if (extract64(addr
, 54, 4) != 0) {
35 /* I/O address space */
36 return addr
| MAKE_64BIT_MASK(62, 2);
38 /* PDC address space */
39 return (addr
& MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
42 hwaddr
hppa_abs_to_phys_pa2_w0(vaddr addr
)
44 if (likely(extract32(addr
, 28, 4) != 0xf)) {
45 /* Memory address space */
46 return addr
& MAKE_64BIT_MASK(0, 32);
48 if (extract32(addr
, 24, 4) != 0) {
49 /* I/O address space */
50 return addr
| MAKE_64BIT_MASK(32, 32);
52 /* PDC address space */
53 return (addr
& MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
56 static hwaddr
hppa_abs_to_phys(CPUHPPAState
*env
, vaddr addr
)
58 if (!hppa_is_pa20(env
)) {
60 } else if (env
->psw
& PSW_W
) {
61 return hppa_abs_to_phys_pa2_w1(addr
);
63 return hppa_abs_to_phys_pa2_w0(addr
);
67 static HPPATLBEntry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
69 IntervalTreeNode
*i
= interval_tree_iter_first(&env
->tlb_root
, addr
, addr
);
72 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
73 trace_hppa_tlb_find_entry(env
, ent
, ent
->entry_valid
,
74 ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
77 trace_hppa_tlb_find_entry_not_found(env
, addr
);
81 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
82 bool force_flush_btlb
)
84 CPUState
*cs
= env_cpu(env
);
87 if (!ent
->entry_valid
) {
91 trace_hppa_tlb_flush_ent(env
, ent
, ent
->itree
.start
,
92 ent
->itree
.last
, ent
->pa
);
94 tlb_flush_range_by_mmuidx(cs
, ent
->itree
.start
,
95 ent
->itree
.last
- ent
->itree
.start
+ 1,
96 HPPA_MMU_FLUSH_MASK
, TARGET_LONG_BITS
);
98 /* Never clear BTLBs, unless forced to do so. */
99 is_btlb
= ent
< &env
->tlb
[HPPA_BTLB_ENTRIES(env
)];
100 if (is_btlb
&& !force_flush_btlb
) {
104 interval_tree_remove(&ent
->itree
, &env
->tlb_root
);
105 memset(ent
, 0, sizeof(*ent
));
108 ent
->unused_next
= env
->tlb_unused
;
109 env
->tlb_unused
= ent
;
113 static void hppa_flush_tlb_range(CPUHPPAState
*env
, vaddr va_b
, vaddr va_e
)
115 IntervalTreeNode
*i
, *n
;
117 i
= interval_tree_iter_first(&env
->tlb_root
, va_b
, va_e
);
119 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
122 * Find the next entry now: In the normal case the current entry
123 * will be removed, but in the BTLB case it will remain.
125 n
= interval_tree_iter_next(i
, va_b
, va_e
);
126 hppa_flush_tlb_ent(env
, ent
, false);
130 static HPPATLBEntry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
132 HPPATLBEntry
*ent
= env
->tlb_unused
;
135 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
136 uint32_t i
= env
->tlb_last
;
138 if (i
< btlb_entries
|| i
>= ARRAY_SIZE(env
->tlb
)) {
141 env
->tlb_last
= i
+ 1;
144 hppa_flush_tlb_ent(env
, ent
, false);
147 env
->tlb_unused
= ent
->unused_next
;
151 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
152 int type
, hwaddr
*pphys
, int *pprot
,
153 HPPATLBEntry
**tlb_entry
)
156 int prot
, r_prot
, w_prot
, x_prot
, priv
;
164 /* Virtual translation disabled. Direct map virtual to physical. */
165 if (mmu_idx
== MMU_PHYS_IDX
) {
167 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
171 /* Find a valid tlb entry that matches the virtual address. */
172 ent
= hppa_find_tlb(env
, addr
);
176 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
184 /* We now know the physical address. */
185 phys
= ent
->pa
+ (addr
- ent
->itree
.start
);
187 /* Map TLB access_rights field to QEMU protection. */
188 priv
= MMU_IDX_TO_PRIV(mmu_idx
);
189 r_prot
= (priv
<= ent
->ar_pl1
) * PAGE_READ
;
190 w_prot
= (priv
<= ent
->ar_pl2
) * PAGE_WRITE
;
191 x_prot
= (ent
->ar_pl2
<= priv
&& priv
<= ent
->ar_pl1
) * PAGE_EXEC
;
192 switch (ent
->ar_type
) {
193 case 0: /* read-only: data page */
196 case 1: /* read/write: dynamic data page */
197 prot
= r_prot
| w_prot
;
199 case 2: /* read/execute: normal code page */
200 prot
= r_prot
| x_prot
;
202 case 3: /* read/write/execute: dynamic code page */
203 prot
= r_prot
| w_prot
| x_prot
;
205 default: /* execute: promote to privilege level type & 3 */
210 /* access_id == 0 means public page and no check is performed */
211 if (ent
->access_id
&& MMU_IDX_TO_P(mmu_idx
)) {
212 /* If bits [31:1] match, and bit 0 is set, suppress write. */
213 int match
= ent
->access_id
* 2 + 1;
215 if (match
== env
->cr
[CR_PID1
] || match
== env
->cr
[CR_PID2
] ||
216 match
== env
->cr
[CR_PID3
] || match
== env
->cr
[CR_PID4
]) {
217 prot
&= PAGE_READ
| PAGE_EXEC
;
218 if (type
== PAGE_WRITE
) {
225 /* No guest access type indicates a non-architectural access from
226 within QEMU. Bypass checks for access, D, B and T bits. */
231 if (unlikely(!(prot
& type
))) {
232 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
233 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
237 /* In reverse priority order, check for conditions which raise faults.
238 As we go, remove PROT bits that cover the condition we want to check.
239 In this way, the resulting PROT will force a re-check of the
240 architectural TLB entry for the next access. */
241 if (unlikely(!ent
->d
)) {
242 if (type
& PAGE_WRITE
) {
243 /* The D bit is not set -- TLB Dirty Bit Fault. */
244 ret
= EXCP_TLB_DIRTY
;
246 prot
&= PAGE_READ
| PAGE_EXEC
;
248 if (unlikely(ent
->b
)) {
249 if (type
& PAGE_WRITE
) {
250 /* The B bit is set -- Data Memory Break Fault. */
253 prot
&= PAGE_READ
| PAGE_EXEC
;
255 if (unlikely(ent
->t
)) {
256 if (!(type
& PAGE_EXEC
)) {
257 /* The T bit is set -- Page Reference Fault. */
264 *pphys
= phys
= hppa_abs_to_phys(env
, phys
);
266 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
270 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
272 HPPACPU
*cpu
= HPPA_CPU(cs
);
276 /* If the (data) mmu is disabled, bypass translation. */
277 /* ??? We really ought to know if the code mmu is disabled too,
278 in order to get the correct debugging dumps. */
279 if (!(cpu
->env
.psw
& PSW_D
)) {
280 return hppa_abs_to_phys(&cpu
->env
, addr
);
283 excp
= hppa_get_physical_address(&cpu
->env
, addr
, MMU_KERNEL_IDX
, 0,
286 /* Since we're translating for debugging, the only error that is a
287 hard error is no translation at all. Otherwise, while a real cpu
288 access might not have permission, the debugger does. */
289 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
292 G_NORETURN
static void
293 raise_exception_with_ior(CPUHPPAState
*env
, int excp
, uintptr_t retaddr
,
294 vaddr addr
, bool mmu_disabled
)
296 CPUState
*cs
= env_cpu(env
);
298 cs
->exception_index
= excp
;
300 if (env
->psw
& PSW_Q
) {
302 * For pa1.x, the offset and space never overlap, and so we
303 * simply extract the high and low part of the virtual address.
305 * For pa2.0, the formation of these are described in section
306 * "Interruption Parameter Registers", page 2-15.
308 env
->cr
[CR_IOR
] = (uint32_t)addr
;
309 env
->cr
[CR_ISR
] = addr
>> 32;
311 if (hppa_is_pa20(env
)) {
314 * If data translation was disabled, the ISR contains
315 * the upper portion of the abs address, zero-extended.
317 env
->cr
[CR_ISR
] &= 0x3fffffff;
320 * If data translation was enabled, the upper two bits
321 * of the IOR (the b field) are equal to the two space
322 * bits from the base register used to form the gva.
326 cpu_restore_state(cs
, retaddr
);
328 b
= env
->gr
[env
->unwind_breg
];
329 b
>>= (env
->psw
& PSW_W
? 62 : 30);
330 env
->cr
[CR_IOR
] |= b
<< 62;
336 cpu_loop_exit_restore(cs
, retaddr
);
339 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
340 MMUAccessType type
, int mmu_idx
,
341 bool probe
, uintptr_t retaddr
)
343 HPPACPU
*cpu
= HPPA_CPU(cs
);
344 CPUHPPAState
*env
= &cpu
->env
;
346 int prot
, excp
, a_prot
;
361 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
,
362 a_prot
, &phys
, &prot
, &ent
);
363 if (unlikely(excp
>= 0)) {
367 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
369 /* Failure. Raise the indicated exception. */
370 raise_exception_with_ior(env
, excp
, retaddr
,
371 addr
, mmu_idx
== MMU_PHYS_IDX
);
374 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
375 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
378 * Success! Store the translation into the QEMU TLB.
379 * Note that we always install a single-page entry, because that
380 * is what works best with softmmu -- anything else will trigger
381 * the large page protection mask. We do not require this,
382 * because we record the large page here in the hppa tlb.
384 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
385 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
389 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
390 void HELPER(itlba_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
394 /* Zap any old entries covering ADDR. */
395 addr
&= TARGET_PAGE_MASK
;
396 hppa_flush_tlb_range(env
, addr
, addr
+ TARGET_PAGE_SIZE
- 1);
398 ent
= env
->tlb_partial
;
400 ent
= hppa_alloc_tlb_ent(env
);
401 env
->tlb_partial
= ent
;
404 /* Note that ent->entry_valid == 0 already. */
405 ent
->itree
.start
= addr
;
406 ent
->itree
.last
= addr
+ TARGET_PAGE_SIZE
- 1;
407 ent
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
408 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
411 static void set_access_bits_pa11(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
414 ent
->access_id
= extract32(reg
, 1, 18);
415 ent
->u
= extract32(reg
, 19, 1);
416 ent
->ar_pl2
= extract32(reg
, 20, 2);
417 ent
->ar_pl1
= extract32(reg
, 22, 2);
418 ent
->ar_type
= extract32(reg
, 24, 3);
419 ent
->b
= extract32(reg
, 27, 1);
420 ent
->d
= extract32(reg
, 28, 1);
421 ent
->t
= extract32(reg
, 29, 1);
422 ent
->entry_valid
= 1;
424 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
425 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
426 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
429 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
430 void HELPER(itlbp_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
432 HPPATLBEntry
*ent
= env
->tlb_partial
;
435 env
->tlb_partial
= NULL
;
436 if (ent
->itree
.start
<= addr
&& addr
<= ent
->itree
.last
) {
437 set_access_bits_pa11(env
, ent
, reg
);
441 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
444 static void itlbt_pa20(CPUHPPAState
*env
, target_ulong r1
,
445 target_ulong r2
, vaddr va_b
)
452 mask_shift
= 2 * (r1
& 0xf);
453 va_size
= TARGET_PAGE_SIZE
<< mask_shift
;
455 va_e
= va_b
+ va_size
- 1;
457 hppa_flush_tlb_range(env
, va_b
, va_e
);
458 ent
= hppa_alloc_tlb_ent(env
);
460 ent
->itree
.start
= va_b
;
461 ent
->itree
.last
= va_e
;
462 ent
->pa
= (r1
<< 7) & (TARGET_PAGE_MASK
<< mask_shift
);
463 ent
->t
= extract64(r2
, 61, 1);
464 ent
->d
= extract64(r2
, 60, 1);
465 ent
->b
= extract64(r2
, 59, 1);
466 ent
->ar_type
= extract64(r2
, 56, 3);
467 ent
->ar_pl1
= extract64(r2
, 54, 2);
468 ent
->ar_pl2
= extract64(r2
, 52, 2);
469 ent
->u
= extract64(r2
, 51, 1);
472 ent
->access_id
= extract64(r2
, 1, 31);
473 ent
->entry_valid
= 1;
475 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
476 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
477 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
,
478 ent
->ar_pl2
, ent
->ar_pl1
, ent
->ar_type
,
479 ent
->b
, ent
->d
, ent
->t
);
482 void HELPER(idtlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
484 vaddr va_b
= deposit64(env
->cr
[CR_IOR
], 32, 32, env
->cr
[CR_ISR
]);
485 itlbt_pa20(env
, r1
, r2
, va_b
);
488 void HELPER(iitlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
490 vaddr va_b
= deposit64(env
->cr
[CR_IIAOQ
], 32, 32, env
->cr
[CR_IIASQ
]);
491 itlbt_pa20(env
, r1
, r2
, va_b
);
494 /* Purge (Insn/Data) TLB. */
495 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
497 CPUHPPAState
*env
= cpu_env(cpu
);
498 vaddr start
= data
.target_ptr
;
502 * PA2.0 allows a range of pages encoded into GR[b], which we have
503 * copied into the bottom bits of the otherwise page-aligned address.
504 * PA1.x will always provide zero here, for a single page flush.
507 start
&= TARGET_PAGE_MASK
;
508 end
= TARGET_PAGE_SIZE
<< (2 * end
);
509 end
= start
+ end
- 1;
511 hppa_flush_tlb_range(env
, start
, end
);
514 /* This is local to the current cpu. */
515 void HELPER(ptlb_l
)(CPUHPPAState
*env
, target_ulong addr
)
517 trace_hppa_tlb_ptlb_local(env
);
518 ptlb_work(env_cpu(env
), RUN_ON_CPU_TARGET_PTR(addr
));
521 /* This is synchronous across all processors. */
522 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
524 CPUState
*src
= env_cpu(env
);
528 trace_hppa_tlb_ptlb(env
);
529 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
533 async_run_on_cpu(cpu
, ptlb_work
, data
);
538 async_safe_run_on_cpu(src
, ptlb_work
, data
);
540 ptlb_work(src
, data
);
544 void hppa_ptlbe(CPUHPPAState
*env
)
546 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
549 /* Zap the (non-btlb) tlb entries themselves. */
550 memset(&env
->tlb
[btlb_entries
], 0,
551 sizeof(env
->tlb
) - btlb_entries
* sizeof(env
->tlb
[0]));
552 env
->tlb_last
= btlb_entries
;
553 env
->tlb_partial
= NULL
;
555 /* Put them all onto the unused list. */
556 env
->tlb_unused
= &env
->tlb
[btlb_entries
];
557 for (i
= btlb_entries
; i
< ARRAY_SIZE(env
->tlb
) - 1; ++i
) {
558 env
->tlb
[i
].unused_next
= &env
->tlb
[i
+ 1];
561 /* Re-initialize the interval tree with only the btlb entries. */
562 memset(&env
->tlb_root
, 0, sizeof(env
->tlb_root
));
563 for (i
= 0; i
< btlb_entries
; ++i
) {
564 if (env
->tlb
[i
].entry_valid
) {
565 interval_tree_insert(&env
->tlb
[i
].itree
, &env
->tlb_root
);
569 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_MASK
);
572 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
573 number of pages/entries (we choose all), and is local to the cpu. */
574 void HELPER(ptlbe
)(CPUHPPAState
*env
)
576 trace_hppa_tlb_ptlbe(env
);
577 qemu_log_mask(CPU_LOG_MMU
, "FLUSH ALL TLB ENTRIES\n");
581 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
583 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_P_MASK
);
586 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
588 cpu_hppa_change_prot_id(env
);
591 target_ulong
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
596 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0,
599 if (excp
== EXCP_DTLB_MISS
) {
600 excp
= EXCP_NA_DTLB_MISS
;
602 trace_hppa_tlb_lpa_failed(env
, addr
);
603 raise_exception_with_ior(env
, excp
, GETPC(), addr
, false);
605 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
609 /* Return the ar_type of the TLB at VADDR, or -1. */
610 int hppa_artype_for_page(CPUHPPAState
*env
, target_ulong vaddr
)
612 HPPATLBEntry
*ent
= hppa_find_tlb(env
, vaddr
);
613 return ent
? ent
->ar_type
: -1;
617 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
618 * allow operating systems to modify the Block TLB (BTLB) entries.
619 * For implementation details see page 1-13 in
620 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
622 void HELPER(diag_btlb
)(CPUHPPAState
*env
)
624 unsigned int phys_page
, len
, slot
;
625 int mmu_idx
= cpu_mmu_index(env
, 0);
626 uintptr_t ra
= GETPC();
630 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
632 /* BTLBs are not supported on 64-bit CPUs */
633 if (btlb_entries
== 0) {
634 env
->gr
[28] = -1; /* nonexistent procedure */
638 env
->gr
[28] = 0; /* PDC_OK */
640 switch (env
->gr
[25]) {
642 /* return BTLB parameters */
643 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
644 vaddr
= probe_access(env
, env
->gr
[24], 4 * sizeof(target_ulong
),
645 MMU_DATA_STORE
, mmu_idx
, ra
);
647 env
->gr
[28] = -10; /* invalid argument */
649 vaddr
[0] = cpu_to_be32(1);
650 vaddr
[1] = cpu_to_be32(16 * 1024);
651 vaddr
[2] = cpu_to_be32(PA10_BTLB_FIXED
);
652 vaddr
[3] = cpu_to_be32(PA10_BTLB_VARIABLE
);
656 /* insert BTLB entry */
657 virt_page
= env
->gr
[24]; /* upper 32 bits */
659 virt_page
|= env
->gr
[23]; /* lower 32 bits */
660 phys_page
= env
->gr
[22];
663 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
664 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
666 (long long) virt_page
<< TARGET_PAGE_BITS
,
667 (long long) (virt_page
+ len
) << TARGET_PAGE_BITS
,
668 (long long) virt_page
, phys_page
, len
, slot
);
669 if (slot
< btlb_entries
) {
670 btlb
= &env
->tlb
[slot
];
672 /* Force flush of possibly existing BTLB entry. */
673 hppa_flush_tlb_ent(env
, btlb
, true);
675 /* Create new BTLB entry */
676 btlb
->itree
.start
= virt_page
<< TARGET_PAGE_BITS
;
677 btlb
->itree
.last
= btlb
->itree
.start
+ len
* TARGET_PAGE_SIZE
- 1;
678 btlb
->pa
= phys_page
<< TARGET_PAGE_BITS
;
679 set_access_bits_pa11(env
, btlb
, env
->gr
[20]);
683 env
->gr
[28] = -10; /* invalid argument */
687 /* Purge BTLB entry */
689 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
691 if (slot
< btlb_entries
) {
692 btlb
= &env
->tlb
[slot
];
693 hppa_flush_tlb_ent(env
, btlb
, true);
695 env
->gr
[28] = -10; /* invalid argument */
699 /* Purge all BTLB entries */
700 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
701 for (slot
= 0; slot
< btlb_entries
; slot
++) {
702 btlb
= &env
->tlb
[slot
];
703 hppa_flush_tlb_ent(env
, btlb
, true);
707 env
->gr
[28] = -2; /* nonexistent option */