]>
git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "hw/core/cpu.h"
27 static hppa_tlb_entry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
31 for (i
= 0; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
32 hppa_tlb_entry
*ent
= &env
->tlb
[i
];
33 if (ent
->va_b
<= addr
&& addr
<= ent
->va_e
) {
34 trace_hppa_tlb_find_entry(env
, ent
+ i
, ent
->entry_valid
,
35 ent
->va_b
, ent
->va_e
, ent
->pa
);
39 trace_hppa_tlb_find_entry_not_found(env
, addr
);
43 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, hppa_tlb_entry
*ent
)
45 CPUState
*cs
= env_cpu(env
);
46 unsigned i
, n
= 1 << (2 * ent
->page_size
);
47 uint64_t addr
= ent
->va_b
;
49 trace_hppa_tlb_flush_ent(env
, ent
, ent
->va_b
, ent
->va_e
, ent
->pa
);
51 for (i
= 0; i
< n
; ++i
, addr
+= TARGET_PAGE_SIZE
) {
52 /* Do not flush MMU_PHYS_IDX. */
53 tlb_flush_page_by_mmuidx(cs
, addr
, 0xf);
56 memset(ent
, 0, sizeof(*ent
));
60 static hppa_tlb_entry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
63 uint32_t i
= env
->tlb_last
;
65 env
->tlb_last
= (i
== ARRAY_SIZE(env
->tlb
) - 1 ? 0 : i
+ 1);
68 hppa_flush_tlb_ent(env
, ent
);
72 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
73 int type
, hwaddr
*pphys
, int *pprot
)
76 int prot
, r_prot
, w_prot
, x_prot
;
80 /* Virtual translation disabled. Direct map virtual to physical. */
81 if (mmu_idx
== MMU_PHYS_IDX
) {
83 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
87 /* Find a valid tlb entry that matches the virtual address. */
88 ent
= hppa_find_tlb(env
, addr
);
89 if (ent
== NULL
|| !ent
->entry_valid
) {
92 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
96 /* We now know the physical address. */
97 phys
= ent
->pa
+ (addr
& ~TARGET_PAGE_MASK
);
99 /* Map TLB access_rights field to QEMU protection. */
100 r_prot
= (mmu_idx
<= ent
->ar_pl1
) * PAGE_READ
;
101 w_prot
= (mmu_idx
<= ent
->ar_pl2
) * PAGE_WRITE
;
102 x_prot
= (ent
->ar_pl2
<= mmu_idx
&& mmu_idx
<= ent
->ar_pl1
) * PAGE_EXEC
;
103 switch (ent
->ar_type
) {
104 case 0: /* read-only: data page */
107 case 1: /* read/write: dynamic data page */
108 prot
= r_prot
| w_prot
;
110 case 2: /* read/execute: normal code page */
111 prot
= r_prot
| x_prot
;
113 case 3: /* read/write/execute: dynamic code page */
114 prot
= r_prot
| w_prot
| x_prot
;
116 default: /* execute: promote to privilege level type & 3 */
121 /* access_id == 0 means public page and no check is performed */
122 if ((env
->psw
& PSW_P
) && ent
->access_id
) {
123 /* If bits [31:1] match, and bit 0 is set, suppress write. */
124 int match
= ent
->access_id
* 2 + 1;
126 if (match
== env
->cr
[CR_PID1
] || match
== env
->cr
[CR_PID2
] ||
127 match
== env
->cr
[CR_PID3
] || match
== env
->cr
[CR_PID4
]) {
128 prot
&= PAGE_READ
| PAGE_EXEC
;
129 if (type
== PAGE_WRITE
) {
136 /* No guest access type indicates a non-architectural access from
137 within QEMU. Bypass checks for access, D, B and T bits. */
142 if (unlikely(!(prot
& type
))) {
143 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
144 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
148 /* In reverse priority order, check for conditions which raise faults.
149 As we go, remove PROT bits that cover the condition we want to check.
150 In this way, the resulting PROT will force a re-check of the
151 architectural TLB entry for the next access. */
152 if (unlikely(!ent
->d
)) {
153 if (type
& PAGE_WRITE
) {
154 /* The D bit is not set -- TLB Dirty Bit Fault. */
155 ret
= EXCP_TLB_DIRTY
;
157 prot
&= PAGE_READ
| PAGE_EXEC
;
159 if (unlikely(ent
->b
)) {
160 if (type
& PAGE_WRITE
) {
161 /* The B bit is set -- Data Memory Break Fault. */
164 prot
&= PAGE_READ
| PAGE_EXEC
;
166 if (unlikely(ent
->t
)) {
167 if (!(type
& PAGE_EXEC
)) {
168 /* The T bit is set -- Page Reference Fault. */
177 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
181 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
183 HPPACPU
*cpu
= HPPA_CPU(cs
);
187 /* If the (data) mmu is disabled, bypass translation. */
188 /* ??? We really ought to know if the code mmu is disabled too,
189 in order to get the correct debugging dumps. */
190 if (!(cpu
->env
.psw
& PSW_D
)) {
194 excp
= hppa_get_physical_address(&cpu
->env
, addr
, MMU_KERNEL_IDX
, 0,
197 /* Since we're translating for debugging, the only error that is a
198 hard error is no translation at all. Otherwise, while a real cpu
199 access might not have permission, the debugger does. */
200 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
203 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
204 MMUAccessType type
, int mmu_idx
,
205 bool probe
, uintptr_t retaddr
)
207 HPPACPU
*cpu
= HPPA_CPU(cs
);
208 CPUHPPAState
*env
= &cpu
->env
;
209 int prot
, excp
, a_prot
;
224 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
,
225 a_prot
, &phys
, &prot
);
226 if (unlikely(excp
>= 0)) {
230 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
231 /* Failure. Raise the indicated exception. */
232 cs
->exception_index
= excp
;
233 if (cpu
->env
.psw
& PSW_Q
) {
234 /* ??? Needs tweaking for hppa64. */
235 cpu
->env
.cr
[CR_IOR
] = addr
;
236 cpu
->env
.cr
[CR_ISR
] = addr
>> 32;
238 cpu_loop_exit_restore(cs
, retaddr
);
241 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
242 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
243 /* Success! Store the translation into the QEMU TLB. */
244 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
245 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
249 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
250 void HELPER(itlba
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
252 hppa_tlb_entry
*empty
= NULL
;
255 /* Zap any old entries covering ADDR; notice empty entries on the way. */
256 for (i
= 0; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
257 hppa_tlb_entry
*ent
= &env
->tlb
[i
];
258 if (ent
->va_b
<= addr
&& addr
<= ent
->va_e
) {
259 if (ent
->entry_valid
) {
260 hppa_flush_tlb_ent(env
, ent
);
268 /* If we didn't see an empty entry, evict one. */
270 empty
= hppa_alloc_tlb_ent(env
);
273 /* Note that empty->entry_valid == 0 already. */
274 empty
->va_b
= addr
& TARGET_PAGE_MASK
;
275 empty
->va_e
= empty
->va_b
+ TARGET_PAGE_SIZE
- 1;
276 empty
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
277 trace_hppa_tlb_itlba(env
, empty
, empty
->va_b
, empty
->va_e
, empty
->pa
);
280 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
281 void HELPER(itlbp
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
283 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, addr
);
285 if (unlikely(ent
== NULL
)) {
286 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
290 ent
->access_id
= extract32(reg
, 1, 18);
291 ent
->u
= extract32(reg
, 19, 1);
292 ent
->ar_pl2
= extract32(reg
, 20, 2);
293 ent
->ar_pl1
= extract32(reg
, 22, 2);
294 ent
->ar_type
= extract32(reg
, 24, 3);
295 ent
->b
= extract32(reg
, 27, 1);
296 ent
->d
= extract32(reg
, 28, 1);
297 ent
->t
= extract32(reg
, 29, 1);
298 ent
->entry_valid
= 1;
299 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
300 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
303 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
304 synchronous across all processors. */
305 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
307 CPUHPPAState
*env
= cpu
->env_ptr
;
308 target_ulong addr
= (target_ulong
) data
.target_ptr
;
309 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, addr
);
311 if (ent
&& ent
->entry_valid
) {
312 hppa_flush_tlb_ent(env
, ent
);
316 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
318 CPUState
*src
= env_cpu(env
);
320 trace_hppa_tlb_ptlb(env
);
321 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
325 async_run_on_cpu(cpu
, ptlb_work
, data
);
328 async_safe_run_on_cpu(src
, ptlb_work
, data
);
331 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
332 number of pages/entries (we choose all), and is local to the cpu. */
333 void HELPER(ptlbe
)(CPUHPPAState
*env
)
335 trace_hppa_tlb_ptlbe(env
);
336 memset(env
->tlb
, 0, sizeof(env
->tlb
));
337 tlb_flush_by_mmuidx(env_cpu(env
), 0xf);
340 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
342 if (env
->psw
& PSW_P
) {
343 tlb_flush_by_mmuidx(env_cpu(env
), 0xf);
347 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
349 cpu_hppa_change_prot_id(env
);
352 target_ureg
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
357 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0,
360 if (env
->psw
& PSW_Q
) {
361 /* ??? Needs tweaking for hppa64. */
362 env
->cr
[CR_IOR
] = addr
;
363 env
->cr
[CR_ISR
] = addr
>> 32;
365 if (excp
== EXCP_DTLB_MISS
) {
366 excp
= EXCP_NA_DTLB_MISS
;
368 trace_hppa_tlb_lpa_failed(env
, addr
);
369 hppa_dynamic_excp(env
, excp
, GETPC());
371 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
375 /* Return the ar_type of the TLB at VADDR, or -1. */
376 int hppa_artype_for_page(CPUHPPAState
*env
, target_ulong vaddr
)
378 hppa_tlb_entry
*ent
= hppa_find_tlb(env
, vaddr
);
379 return ent
? ent
->ar_type
: -1;