2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "exec/helper-proto.h"
26 #include "hw/core/cpu.h"
29 hwaddr
hppa_abs_to_phys_pa2_w1(vaddr addr
)
32 * Figure H-8 "62-bit Absolute Accesses when PSW W-bit is 1" describes
33 * an algorithm in which a 62-bit absolute address is transformed to
34 * a 64-bit physical address. This must then be combined with that
35 * pictured in Figure H-11 "Physical Address Space Mapping", in which
36 * the full physical address is truncated to the N-bit physical address
37 * supported by the implementation.
39 * Since the supported physical address space is below 54 bits, the
40 * H-8 algorithm is moot and all that is left is to truncate.
42 QEMU_BUILD_BUG_ON(TARGET_PHYS_ADDR_SPACE_BITS
> 54);
43 return sextract64(addr
, 0, TARGET_PHYS_ADDR_SPACE_BITS
);
46 hwaddr
hppa_abs_to_phys_pa2_w0(vaddr addr
)
49 * See Figure H-10, "Absolute Accesses when PSW W-bit is 0",
50 * combined with Figure H-11, as above.
52 if (likely(extract32(addr
, 28, 4) != 0xf)) {
53 /* Memory address space */
54 addr
= (uint32_t)addr
;
55 } else if (extract32(addr
, 24, 4) != 0) {
56 /* I/O address space */
61 * Figures H-10 and H-11 of the parisc2.0 spec do not specify
62 * where to map into the 64-bit PDC address space.
63 * We map with an offset which equals the 32-bit address, which
64 * is what can be seen on physical machines too.
66 addr
= (uint32_t)addr
;
67 addr
|= -1ull << (TARGET_PHYS_ADDR_SPACE_BITS
- 4);
72 static HPPATLBEntry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
74 IntervalTreeNode
*i
= interval_tree_iter_first(&env
->tlb_root
, addr
, addr
);
77 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
78 trace_hppa_tlb_find_entry(env
, ent
, ent
->entry_valid
,
79 ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
82 trace_hppa_tlb_find_entry_not_found(env
, addr
);
86 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
87 bool force_flush_btlb
)
89 CPUState
*cs
= env_cpu(env
);
92 if (!ent
->entry_valid
) {
96 trace_hppa_tlb_flush_ent(env
, ent
, ent
->itree
.start
,
97 ent
->itree
.last
, ent
->pa
);
99 tlb_flush_range_by_mmuidx(cs
, ent
->itree
.start
,
100 ent
->itree
.last
- ent
->itree
.start
+ 1,
101 HPPA_MMU_FLUSH_MASK
, TARGET_LONG_BITS
);
103 /* Never clear BTLBs, unless forced to do so. */
104 is_btlb
= ent
< &env
->tlb
[HPPA_BTLB_ENTRIES(env
)];
105 if (is_btlb
&& !force_flush_btlb
) {
109 interval_tree_remove(&ent
->itree
, &env
->tlb_root
);
110 memset(ent
, 0, sizeof(*ent
));
113 ent
->unused_next
= env
->tlb_unused
;
114 env
->tlb_unused
= ent
;
118 static void hppa_flush_tlb_range(CPUHPPAState
*env
, vaddr va_b
, vaddr va_e
)
120 IntervalTreeNode
*i
, *n
;
122 i
= interval_tree_iter_first(&env
->tlb_root
, va_b
, va_e
);
124 HPPATLBEntry
*ent
= container_of(i
, HPPATLBEntry
, itree
);
127 * Find the next entry now: In the normal case the current entry
128 * will be removed, but in the BTLB case it will remain.
130 n
= interval_tree_iter_next(i
, va_b
, va_e
);
131 hppa_flush_tlb_ent(env
, ent
, false);
135 static HPPATLBEntry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
137 HPPATLBEntry
*ent
= env
->tlb_unused
;
140 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
141 uint32_t i
= env
->tlb_last
;
143 if (i
< btlb_entries
|| i
>= ARRAY_SIZE(env
->tlb
)) {
146 env
->tlb_last
= i
+ 1;
149 hppa_flush_tlb_ent(env
, ent
, false);
152 env
->tlb_unused
= ent
->unused_next
;
156 #define ACCESS_ID_MASK 0xffff
158 /* Return the set of protections allowed by a PID match. */
159 static int match_prot_id_1(uint32_t access_id
, uint32_t prot_id
)
161 if (((access_id
^ (prot_id
>> 1)) & ACCESS_ID_MASK
) == 0) {
163 ? PAGE_EXEC
| PAGE_READ
164 : PAGE_EXEC
| PAGE_READ
| PAGE_WRITE
);
169 static int match_prot_id32(CPUHPPAState
*env
, uint32_t access_id
)
173 for (i
= CR_PID1
; i
<= CR_PID4
; ++i
) {
174 r
= match_prot_id_1(access_id
, env
->cr
[i
]);
182 static int match_prot_id64(CPUHPPAState
*env
, uint32_t access_id
)
186 for (i
= CR_PID1
; i
<= CR_PID4
; ++i
) {
187 r
= match_prot_id_1(access_id
, env
->cr
[i
]);
191 r
= match_prot_id_1(access_id
, env
->cr
[i
] >> 32);
199 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
200 int type
, hwaddr
*pphys
, int *pprot
)
203 int prot
, r_prot
, w_prot
, x_prot
, priv
;
207 /* Virtual translation disabled. Map absolute to physical. */
208 if (MMU_IDX_MMU_DISABLED(mmu_idx
)) {
211 phys
= hppa_abs_to_phys_pa2_w1(addr
);
214 if (hppa_is_pa20(env
)) {
215 phys
= hppa_abs_to_phys_pa2_w0(addr
);
217 phys
= (uint32_t)addr
;
221 g_assert_not_reached();
223 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
227 /* Find a valid tlb entry that matches the virtual address. */
228 ent
= hppa_find_tlb(env
, addr
);
232 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
236 /* We now know the physical address. */
237 phys
= ent
->pa
+ (addr
- ent
->itree
.start
);
239 /* Map TLB access_rights field to QEMU protection. */
240 priv
= MMU_IDX_TO_PRIV(mmu_idx
);
241 r_prot
= (priv
<= ent
->ar_pl1
) * PAGE_READ
;
242 w_prot
= (priv
<= ent
->ar_pl2
) * PAGE_WRITE
;
243 x_prot
= (ent
->ar_pl2
<= priv
&& priv
<= ent
->ar_pl1
) * PAGE_EXEC
;
244 switch (ent
->ar_type
) {
245 case 0: /* read-only: data page */
248 case 1: /* read/write: dynamic data page */
249 prot
= r_prot
| w_prot
;
251 case 2: /* read/execute: normal code page */
252 prot
= r_prot
| x_prot
;
254 case 3: /* read/write/execute: dynamic code page */
255 prot
= r_prot
| w_prot
| x_prot
;
257 default: /* execute: promote to privilege level type & 3 */
263 * No guest access type indicates a non-architectural access from
264 * within QEMU. Bypass checks for access, D, B, P and T bits.
270 /* access_id == 0 means public page and no check is performed */
271 if (ent
->access_id
&& MMU_IDX_TO_P(mmu_idx
)) {
272 int access_prot
= (hppa_is_pa20(env
)
273 ? match_prot_id64(env
, ent
->access_id
)
274 : match_prot_id32(env
, ent
->access_id
));
275 if (unlikely(!(type
& access_prot
))) {
276 /* Not allowed -- Inst/Data Memory Protection Id Fault. */
277 ret
= type
& PAGE_EXEC
? EXCP_IMP
: EXCP_DMPI
;
280 /* Otherwise exclude permissions not allowed (i.e WD). */
284 if (unlikely(!(prot
& type
))) {
285 /* Not allowed -- Inst/Data Memory Access Rights Fault. */
286 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
291 * In priority order, check for conditions which raise faults.
292 * Remove PROT bits that cover the condition we want to check,
293 * so that the resulting PROT will force a re-check of the
294 * architectural TLB entry for the next access.
296 if (unlikely(ent
->t
)) {
298 if (!(type
& PAGE_EXEC
)) {
299 /* The T bit is set -- Page Reference Fault. */
302 } else if (!ent
->d
) {
303 prot
&= PAGE_READ
| PAGE_EXEC
;
304 if (type
& PAGE_WRITE
) {
305 /* The D bit is not set -- TLB Dirty Bit Fault. */
306 ret
= EXCP_TLB_DIRTY
;
308 } else if (unlikely(ent
->b
)) {
309 prot
&= PAGE_READ
| PAGE_EXEC
;
310 if (type
& PAGE_WRITE
) {
312 * The B bit is set -- Data Memory Break Fault.
313 * Except when PSW_X is set, allow this single access to succeed.
314 * The write bit will be invalidated for subsequent accesses.
316 if (env
->psw_xb
& PSW_X
) {
317 prot
|= PAGE_WRITE_INV
;
327 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
331 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
333 HPPACPU
*cpu
= HPPA_CPU(cs
);
335 int prot
, excp
, mmu_idx
;
337 /* If the (data) mmu is disabled, bypass translation. */
338 /* ??? We really ought to know if the code mmu is disabled too,
339 in order to get the correct debugging dumps. */
340 mmu_idx
= (cpu
->env
.psw
& PSW_D
? MMU_KERNEL_IDX
:
341 cpu
->env
.psw
& PSW_W
? MMU_ABS_W_IDX
: MMU_ABS_IDX
);
343 excp
= hppa_get_physical_address(&cpu
->env
, addr
, mmu_idx
, 0,
346 /* Since we're translating for debugging, the only error that is a
347 hard error is no translation at all. Otherwise, while a real cpu
348 access might not have permission, the debugger does. */
349 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
352 void hppa_set_ior_and_isr(CPUHPPAState
*env
, vaddr addr
, bool mmu_disabled
)
354 if (env
->psw
& PSW_Q
) {
356 * For pa1.x, the offset and space never overlap, and so we
357 * simply extract the high and low part of the virtual address.
359 * For pa2.0, the formation of these are described in section
360 * "Interruption Parameter Registers", page 2-15.
362 env
->cr
[CR_IOR
] = (uint32_t)addr
;
363 env
->cr
[CR_ISR
] = addr
>> 32;
365 if (hppa_is_pa20(env
)) {
368 * If data translation was disabled, the ISR contains
369 * the upper portion of the abs address, zero-extended.
371 env
->cr
[CR_ISR
] &= 0x3fffffff;
374 * If data translation was enabled, the upper two bits
375 * of the IOR (the b field) are equal to the two space
376 * bits from the base register used to form the gva.
380 b
= env
->unwind_breg
? env
->gr
[env
->unwind_breg
] : 0;
381 b
>>= (env
->psw
& PSW_W
? 62 : 30);
382 env
->cr
[CR_IOR
] |= b
<< 62;
388 G_NORETURN
static void
389 raise_exception_with_ior(CPUHPPAState
*env
, int excp
, uintptr_t retaddr
,
390 vaddr addr
, bool mmu_disabled
)
392 CPUState
*cs
= env_cpu(env
);
394 cs
->exception_index
= excp
;
395 cpu_restore_state(cs
, retaddr
);
396 hppa_set_ior_and_isr(env
, addr
, mmu_disabled
);
401 void hppa_cpu_do_transaction_failed(CPUState
*cs
, hwaddr physaddr
,
402 vaddr addr
, unsigned size
,
403 MMUAccessType access_type
,
404 int mmu_idx
, MemTxAttrs attrs
,
405 MemTxResult response
, uintptr_t retaddr
)
407 CPUHPPAState
*env
= cpu_env(cs
);
409 qemu_log_mask(LOG_GUEST_ERROR
, "HPMC at " TARGET_FMT_lx
":" TARGET_FMT_lx
410 " while accessing I/O at %#08" HWADDR_PRIx
"\n",
411 env
->iasq_f
, env
->iaoq_f
, physaddr
);
413 /* FIXME: Enable HPMC exceptions when firmware has clean device probing */
415 raise_exception_with_ior(env
, EXCP_HPMC
, retaddr
, addr
,
416 MMU_IDX_MMU_DISABLED(mmu_idx
));
420 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
421 MMUAccessType type
, int mmu_idx
,
422 bool probe
, uintptr_t retaddr
)
424 HPPACPU
*cpu
= HPPA_CPU(cs
);
425 CPUHPPAState
*env
= &cpu
->env
;
426 int prot
, excp
, a_prot
;
441 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
, a_prot
, &phys
, &prot
);
442 if (unlikely(excp
>= 0)) {
446 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
448 /* Failure. Raise the indicated exception. */
449 raise_exception_with_ior(env
, excp
, retaddr
, addr
,
450 MMU_IDX_MMU_DISABLED(mmu_idx
));
453 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
454 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
457 * Success! Store the translation into the QEMU TLB.
458 * Note that we always install a single-page entry, because that
459 * is what works best with softmmu -- anything else will trigger
460 * the large page protection mask. We do not require this,
461 * because we record the large page here in the hppa tlb.
463 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
464 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
468 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
469 void HELPER(itlba_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
473 /* Zap any old entries covering ADDR. */
474 addr
&= TARGET_PAGE_MASK
;
475 hppa_flush_tlb_range(env
, addr
, addr
+ TARGET_PAGE_SIZE
- 1);
477 ent
= env
->tlb_partial
;
479 ent
= hppa_alloc_tlb_ent(env
);
480 env
->tlb_partial
= ent
;
483 /* Note that ent->entry_valid == 0 already. */
484 ent
->itree
.start
= addr
;
485 ent
->itree
.last
= addr
+ TARGET_PAGE_SIZE
- 1;
486 ent
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
487 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
490 static void set_access_bits_pa11(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
493 ent
->access_id
= extract32(reg
, 1, 18);
494 ent
->u
= extract32(reg
, 19, 1);
495 ent
->ar_pl2
= extract32(reg
, 20, 2);
496 ent
->ar_pl1
= extract32(reg
, 22, 2);
497 ent
->ar_type
= extract32(reg
, 24, 3);
498 ent
->b
= extract32(reg
, 27, 1);
499 ent
->d
= extract32(reg
, 28, 1);
500 ent
->t
= extract32(reg
, 29, 1);
501 ent
->entry_valid
= 1;
503 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
504 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
505 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
508 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
509 void HELPER(itlbp_pa11
)(CPUHPPAState
*env
, target_ulong addr
, target_ulong reg
)
511 HPPATLBEntry
*ent
= env
->tlb_partial
;
514 env
->tlb_partial
= NULL
;
515 if (ent
->itree
.start
<= addr
&& addr
<= ent
->itree
.last
) {
516 set_access_bits_pa11(env
, ent
, reg
);
520 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
523 static void itlbt_pa20(CPUHPPAState
*env
, target_ulong r1
,
524 target_ulong r2
, vaddr va_b
)
531 mask_shift
= 2 * (r1
& 0xf);
532 va_size
= (uint64_t)TARGET_PAGE_SIZE
<< mask_shift
;
534 va_e
= va_b
+ va_size
- 1;
536 hppa_flush_tlb_range(env
, va_b
, va_e
);
537 ent
= hppa_alloc_tlb_ent(env
);
539 ent
->itree
.start
= va_b
;
540 ent
->itree
.last
= va_e
;
542 /* Extract all 52 bits present in the page table entry. */
543 ent
->pa
= r1
<< (TARGET_PAGE_BITS
- 5);
544 /* Align per the page size. */
545 ent
->pa
&= TARGET_PAGE_MASK
<< mask_shift
;
546 /* Ignore the bits beyond physical address space. */
547 ent
->pa
= sextract64(ent
->pa
, 0, TARGET_PHYS_ADDR_SPACE_BITS
);
549 ent
->t
= extract64(r2
, 61, 1);
550 ent
->d
= extract64(r2
, 60, 1);
551 ent
->b
= extract64(r2
, 59, 1);
552 ent
->ar_type
= extract64(r2
, 56, 3);
553 ent
->ar_pl1
= extract64(r2
, 54, 2);
554 ent
->ar_pl2
= extract64(r2
, 52, 2);
555 ent
->u
= extract64(r2
, 51, 1);
558 ent
->access_id
= extract64(r2
, 1, 31);
559 ent
->entry_valid
= 1;
561 interval_tree_insert(&ent
->itree
, &env
->tlb_root
);
562 trace_hppa_tlb_itlba(env
, ent
, ent
->itree
.start
, ent
->itree
.last
, ent
->pa
);
563 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
,
564 ent
->ar_pl2
, ent
->ar_pl1
, ent
->ar_type
,
565 ent
->b
, ent
->d
, ent
->t
);
568 void HELPER(idtlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
570 vaddr va_b
= deposit64(env
->cr
[CR_IOR
], 32, 32, env
->cr
[CR_ISR
]);
571 itlbt_pa20(env
, r1
, r2
, va_b
);
574 void HELPER(iitlbt_pa20
)(CPUHPPAState
*env
, target_ulong r1
, target_ulong r2
)
576 vaddr va_b
= deposit64(env
->cr
[CR_IIAOQ
], 32, 32, env
->cr
[CR_IIASQ
]);
577 itlbt_pa20(env
, r1
, r2
, va_b
);
580 /* Purge (Insn/Data) TLB. */
581 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
583 vaddr start
= data
.target_ptr
;
587 * PA2.0 allows a range of pages encoded into GR[b], which we have
588 * copied into the bottom bits of the otherwise page-aligned address.
589 * PA1.x will always provide zero here, for a single page flush.
592 start
&= TARGET_PAGE_MASK
;
593 end
= (vaddr
)TARGET_PAGE_SIZE
<< (2 * end
);
594 end
= start
+ end
- 1;
596 hppa_flush_tlb_range(cpu_env(cpu
), start
, end
);
599 /* This is local to the current cpu. */
600 void HELPER(ptlb_l
)(CPUHPPAState
*env
, target_ulong addr
)
602 trace_hppa_tlb_ptlb_local(env
);
603 ptlb_work(env_cpu(env
), RUN_ON_CPU_TARGET_PTR(addr
));
606 /* This is synchronous across all processors. */
607 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
609 CPUState
*src
= env_cpu(env
);
613 trace_hppa_tlb_ptlb(env
);
614 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
618 async_run_on_cpu(cpu
, ptlb_work
, data
);
623 async_safe_run_on_cpu(src
, ptlb_work
, data
);
625 ptlb_work(src
, data
);
629 void hppa_ptlbe(CPUHPPAState
*env
)
631 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
634 /* Zap the (non-btlb) tlb entries themselves. */
635 memset(&env
->tlb
[btlb_entries
], 0,
636 sizeof(env
->tlb
) - btlb_entries
* sizeof(env
->tlb
[0]));
637 env
->tlb_last
= btlb_entries
;
638 env
->tlb_partial
= NULL
;
640 /* Put them all onto the unused list. */
641 env
->tlb_unused
= &env
->tlb
[btlb_entries
];
642 for (i
= btlb_entries
; i
< ARRAY_SIZE(env
->tlb
) - 1; ++i
) {
643 env
->tlb
[i
].unused_next
= &env
->tlb
[i
+ 1];
646 /* Re-initialize the interval tree with only the btlb entries. */
647 memset(&env
->tlb_root
, 0, sizeof(env
->tlb_root
));
648 for (i
= 0; i
< btlb_entries
; ++i
) {
649 if (env
->tlb
[i
].entry_valid
) {
650 interval_tree_insert(&env
->tlb
[i
].itree
, &env
->tlb_root
);
654 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_MASK
);
657 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
658 number of pages/entries (we choose all), and is local to the cpu. */
659 void HELPER(ptlbe
)(CPUHPPAState
*env
)
661 trace_hppa_tlb_ptlbe(env
);
662 qemu_log_mask(CPU_LOG_MMU
, "FLUSH ALL TLB ENTRIES\n");
666 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
668 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_P_MASK
);
671 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
673 cpu_hppa_change_prot_id(env
);
676 target_ulong
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
681 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0,
684 if (excp
== EXCP_DTLB_MISS
) {
685 excp
= EXCP_NA_DTLB_MISS
;
687 trace_hppa_tlb_lpa_failed(env
, addr
);
688 raise_exception_with_ior(env
, excp
, GETPC(), addr
, false);
690 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
695 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
696 * allow operating systems to modify the Block TLB (BTLB) entries.
697 * For implementation details see page 1-13 in
698 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
700 void HELPER(diag_btlb
)(CPUHPPAState
*env
)
702 unsigned int phys_page
, len
, slot
;
703 int mmu_idx
= cpu_mmu_index(env_cpu(env
), 0);
704 uintptr_t ra
= GETPC();
708 uint32_t btlb_entries
= HPPA_BTLB_ENTRIES(env
);
710 /* BTLBs are not supported on 64-bit CPUs */
711 if (btlb_entries
== 0) {
712 env
->gr
[28] = -1; /* nonexistent procedure */
716 env
->gr
[28] = 0; /* PDC_OK */
718 switch (env
->gr
[25]) {
720 /* return BTLB parameters */
721 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
722 vaddr
= probe_access(env
, env
->gr
[24], 4 * sizeof(uint32_t),
723 MMU_DATA_STORE
, mmu_idx
, ra
);
725 env
->gr
[28] = -10; /* invalid argument */
727 vaddr
[0] = cpu_to_be32(1);
728 vaddr
[1] = cpu_to_be32(16 * 1024);
729 vaddr
[2] = cpu_to_be32(PA10_BTLB_FIXED
);
730 vaddr
[3] = cpu_to_be32(PA10_BTLB_VARIABLE
);
734 /* insert BTLB entry */
735 virt_page
= env
->gr
[24]; /* upper 32 bits */
737 virt_page
|= env
->gr
[23]; /* lower 32 bits */
738 phys_page
= env
->gr
[22];
741 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
742 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
744 (long long) virt_page
<< TARGET_PAGE_BITS
,
745 (long long) (virt_page
+ len
) << TARGET_PAGE_BITS
,
746 (long long) virt_page
, phys_page
, len
, slot
);
747 if (slot
< btlb_entries
) {
748 btlb
= &env
->tlb
[slot
];
750 /* Force flush of possibly existing BTLB entry. */
751 hppa_flush_tlb_ent(env
, btlb
, true);
753 /* Create new BTLB entry */
754 btlb
->itree
.start
= virt_page
<< TARGET_PAGE_BITS
;
755 btlb
->itree
.last
= btlb
->itree
.start
+ len
* TARGET_PAGE_SIZE
- 1;
756 btlb
->pa
= phys_page
<< TARGET_PAGE_BITS
;
757 set_access_bits_pa11(env
, btlb
, env
->gr
[20]);
761 env
->gr
[28] = -10; /* invalid argument */
765 /* Purge BTLB entry */
767 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
769 if (slot
< btlb_entries
) {
770 btlb
= &env
->tlb
[slot
];
771 hppa_flush_tlb_ent(env
, btlb
, true);
773 env
->gr
[28] = -10; /* invalid argument */
777 /* Purge all BTLB entries */
778 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
779 for (slot
= 0; slot
< btlb_entries
; slot
++) {
780 btlb
= &env
->tlb
[slot
];
781 hppa_flush_tlb_ent(env
, btlb
, true);
785 env
->gr
[28] = -2; /* nonexistent option */
790 uint64_t HELPER(b_gate_priv
)(CPUHPPAState
*env
, uint64_t iaoq_f
)
792 uint64_t gva
= hppa_form_gva(env
, env
->iasq_f
, iaoq_f
);
793 HPPATLBEntry
*ent
= hppa_find_tlb(env
, gva
);
796 raise_exception_with_ior(env
, EXCP_ITLB_MISS
, GETPC(), gva
, false);
800 * There should be no need to check page permissions, as that will
801 * already have been done by tb_lookup via get_page_addr_code.
802 * All we need at this point is to check the ar_type.
804 * No change for non-gateway pages or for priv decrease.
806 if (ent
->ar_type
& 4) {
807 int old_priv
= iaoq_f
& 3;
808 int new_priv
= ent
->ar_type
& 3;
810 if (new_priv
< old_priv
) {
811 iaoq_f
= (iaoq_f
& -4) | new_priv
;