]>
git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
2 * HPPA memory access helper routines
4 * Copyright (c) 2017 Helge Deller
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
28 static HPPATLBEntry
*hppa_find_tlb(CPUHPPAState
*env
, vaddr addr
)
32 for (i
= 0; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
33 HPPATLBEntry
*ent
= &env
->tlb
[i
];
34 if (ent
->itree
.start
<= addr
&& addr
<= ent
->itree
.last
) {
35 trace_hppa_tlb_find_entry(env
, ent
+ i
, ent
->entry_valid
,
36 ent
->itree
.start
, ent
->itree
.last
,
41 trace_hppa_tlb_find_entry_not_found(env
, addr
);
45 static void hppa_flush_tlb_ent(CPUHPPAState
*env
, HPPATLBEntry
*ent
,
46 bool force_flush_btlb
)
48 CPUState
*cs
= env_cpu(env
);
50 if (!ent
->entry_valid
) {
54 trace_hppa_tlb_flush_ent(env
, ent
, ent
->itree
.start
,
55 ent
->itree
.last
, ent
->pa
);
57 tlb_flush_range_by_mmuidx(cs
, ent
->itree
.start
,
58 ent
->itree
.last
- ent
->itree
.start
+ 1,
59 HPPA_MMU_FLUSH_MASK
, TARGET_LONG_BITS
);
61 /* never clear BTLBs, unless forced to do so. */
62 if (ent
< &env
->tlb
[HPPA_BTLB_ENTRIES
] && !force_flush_btlb
) {
66 memset(ent
, 0, sizeof(*ent
));
67 ent
->itree
.start
= -1;
70 static HPPATLBEntry
*hppa_alloc_tlb_ent(CPUHPPAState
*env
)
75 if (env
->tlb_last
< HPPA_BTLB_ENTRIES
|| env
->tlb_last
>= ARRAY_SIZE(env
->tlb
)) {
76 i
= HPPA_BTLB_ENTRIES
;
77 env
->tlb_last
= HPPA_BTLB_ENTRIES
+ 1;
85 hppa_flush_tlb_ent(env
, ent
, false);
89 int hppa_get_physical_address(CPUHPPAState
*env
, vaddr addr
, int mmu_idx
,
90 int type
, hwaddr
*pphys
, int *pprot
,
91 HPPATLBEntry
**tlb_entry
)
94 int prot
, r_prot
, w_prot
, x_prot
, priv
;
102 /* Virtual translation disabled. Direct map virtual to physical. */
103 if (mmu_idx
== MMU_PHYS_IDX
) {
105 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
109 /* Find a valid tlb entry that matches the virtual address. */
110 ent
= hppa_find_tlb(env
, addr
);
111 if (ent
== NULL
|| !ent
->entry_valid
) {
114 ret
= (type
== PAGE_EXEC
) ? EXCP_ITLB_MISS
: EXCP_DTLB_MISS
;
122 /* We now know the physical address. */
123 phys
= ent
->pa
+ (addr
- ent
->itree
.start
);
125 /* Map TLB access_rights field to QEMU protection. */
126 priv
= MMU_IDX_TO_PRIV(mmu_idx
);
127 r_prot
= (priv
<= ent
->ar_pl1
) * PAGE_READ
;
128 w_prot
= (priv
<= ent
->ar_pl2
) * PAGE_WRITE
;
129 x_prot
= (ent
->ar_pl2
<= priv
&& priv
<= ent
->ar_pl1
) * PAGE_EXEC
;
130 switch (ent
->ar_type
) {
131 case 0: /* read-only: data page */
134 case 1: /* read/write: dynamic data page */
135 prot
= r_prot
| w_prot
;
137 case 2: /* read/execute: normal code page */
138 prot
= r_prot
| x_prot
;
140 case 3: /* read/write/execute: dynamic code page */
141 prot
= r_prot
| w_prot
| x_prot
;
143 default: /* execute: promote to privilege level type & 3 */
148 /* access_id == 0 means public page and no check is performed */
149 if (ent
->access_id
&& MMU_IDX_TO_P(mmu_idx
)) {
150 /* If bits [31:1] match, and bit 0 is set, suppress write. */
151 int match
= ent
->access_id
* 2 + 1;
153 if (match
== env
->cr
[CR_PID1
] || match
== env
->cr
[CR_PID2
] ||
154 match
== env
->cr
[CR_PID3
] || match
== env
->cr
[CR_PID4
]) {
155 prot
&= PAGE_READ
| PAGE_EXEC
;
156 if (type
== PAGE_WRITE
) {
163 /* No guest access type indicates a non-architectural access from
164 within QEMU. Bypass checks for access, D, B and T bits. */
169 if (unlikely(!(prot
& type
))) {
170 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
171 ret
= (type
& PAGE_EXEC
) ? EXCP_IMP
: EXCP_DMAR
;
175 /* In reverse priority order, check for conditions which raise faults.
176 As we go, remove PROT bits that cover the condition we want to check.
177 In this way, the resulting PROT will force a re-check of the
178 architectural TLB entry for the next access. */
179 if (unlikely(!ent
->d
)) {
180 if (type
& PAGE_WRITE
) {
181 /* The D bit is not set -- TLB Dirty Bit Fault. */
182 ret
= EXCP_TLB_DIRTY
;
184 prot
&= PAGE_READ
| PAGE_EXEC
;
186 if (unlikely(ent
->b
)) {
187 if (type
& PAGE_WRITE
) {
188 /* The B bit is set -- Data Memory Break Fault. */
191 prot
&= PAGE_READ
| PAGE_EXEC
;
193 if (unlikely(ent
->t
)) {
194 if (!(type
& PAGE_EXEC
)) {
195 /* The T bit is set -- Page Reference Fault. */
204 trace_hppa_tlb_get_physical_address(env
, ret
, prot
, addr
, phys
);
208 hwaddr
hppa_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
210 HPPACPU
*cpu
= HPPA_CPU(cs
);
214 /* If the (data) mmu is disabled, bypass translation. */
215 /* ??? We really ought to know if the code mmu is disabled too,
216 in order to get the correct debugging dumps. */
217 if (!(cpu
->env
.psw
& PSW_D
)) {
221 excp
= hppa_get_physical_address(&cpu
->env
, addr
, MMU_KERNEL_IDX
, 0,
224 /* Since we're translating for debugging, the only error that is a
225 hard error is no translation at all. Otherwise, while a real cpu
226 access might not have permission, the debugger does. */
227 return excp
== EXCP_DTLB_MISS
? -1 : phys
;
230 bool hppa_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
231 MMUAccessType type
, int mmu_idx
,
232 bool probe
, uintptr_t retaddr
)
234 HPPACPU
*cpu
= HPPA_CPU(cs
);
235 CPUHPPAState
*env
= &cpu
->env
;
237 int prot
, excp
, a_prot
;
252 excp
= hppa_get_physical_address(env
, addr
, mmu_idx
,
253 a_prot
, &phys
, &prot
, &ent
);
254 if (unlikely(excp
>= 0)) {
258 trace_hppa_tlb_fill_excp(env
, addr
, size
, type
, mmu_idx
);
259 /* Failure. Raise the indicated exception. */
260 cs
->exception_index
= excp
;
261 if (cpu
->env
.psw
& PSW_Q
) {
262 /* ??? Needs tweaking for hppa64. */
263 cpu
->env
.cr
[CR_IOR
] = addr
;
264 cpu
->env
.cr
[CR_ISR
] = addr
>> 32;
266 cpu_loop_exit_restore(cs
, retaddr
);
269 trace_hppa_tlb_fill_success(env
, addr
& TARGET_PAGE_MASK
,
270 phys
& TARGET_PAGE_MASK
, size
, type
, mmu_idx
);
271 /* Success! Store the translation into the QEMU TLB. */
272 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
273 prot
, mmu_idx
, TARGET_PAGE_SIZE
<< (ent
? 2 * ent
->page_size
: 0));
277 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
278 void HELPER(itlba
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
280 HPPATLBEntry
*empty
= NULL
;
283 /* Zap any old entries covering ADDR; notice empty entries on the way. */
284 for (i
= HPPA_BTLB_ENTRIES
; i
< ARRAY_SIZE(env
->tlb
); ++i
) {
285 HPPATLBEntry
*ent
= &env
->tlb
[i
];
286 if (ent
->itree
.start
<= addr
&& addr
<= ent
->itree
.last
) {
287 if (ent
->entry_valid
) {
288 hppa_flush_tlb_ent(env
, ent
, false);
296 /* If we didn't see an empty entry, evict one. */
298 empty
= hppa_alloc_tlb_ent(env
);
301 /* Note that empty->entry_valid == 0 already. */
302 empty
->itree
.start
= addr
& TARGET_PAGE_MASK
;
303 empty
->itree
.last
= empty
->itree
.start
+ TARGET_PAGE_SIZE
- 1;
304 empty
->pa
= extract32(reg
, 5, 20) << TARGET_PAGE_BITS
;
305 trace_hppa_tlb_itlba(env
, empty
, empty
->itree
.start
,
306 empty
->itree
.last
, empty
->pa
);
309 static void set_access_bits(CPUHPPAState
*env
, HPPATLBEntry
*ent
, target_ureg reg
)
311 ent
->access_id
= extract32(reg
, 1, 18);
312 ent
->u
= extract32(reg
, 19, 1);
313 ent
->ar_pl2
= extract32(reg
, 20, 2);
314 ent
->ar_pl1
= extract32(reg
, 22, 2);
315 ent
->ar_type
= extract32(reg
, 24, 3);
316 ent
->b
= extract32(reg
, 27, 1);
317 ent
->d
= extract32(reg
, 28, 1);
318 ent
->t
= extract32(reg
, 29, 1);
319 ent
->entry_valid
= 1;
320 trace_hppa_tlb_itlbp(env
, ent
, ent
->access_id
, ent
->u
, ent
->ar_pl2
,
321 ent
->ar_pl1
, ent
->ar_type
, ent
->b
, ent
->d
, ent
->t
);
324 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
325 void HELPER(itlbp
)(CPUHPPAState
*env
, target_ulong addr
, target_ureg reg
)
327 HPPATLBEntry
*ent
= hppa_find_tlb(env
, addr
);
329 if (unlikely(ent
== NULL
)) {
330 qemu_log_mask(LOG_GUEST_ERROR
, "ITLBP not following ITLBA\n");
334 set_access_bits(env
, ent
, reg
);
337 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
338 synchronous across all processors. */
339 static void ptlb_work(CPUState
*cpu
, run_on_cpu_data data
)
341 CPUHPPAState
*env
= cpu_env(cpu
);
342 target_ulong addr
= (target_ulong
) data
.target_ptr
;
343 HPPATLBEntry
*ent
= hppa_find_tlb(env
, addr
);
345 if (ent
&& ent
->entry_valid
) {
346 hppa_flush_tlb_ent(env
, ent
, false);
350 void HELPER(ptlb
)(CPUHPPAState
*env
, target_ulong addr
)
352 CPUState
*src
= env_cpu(env
);
354 trace_hppa_tlb_ptlb(env
);
355 run_on_cpu_data data
= RUN_ON_CPU_TARGET_PTR(addr
);
359 async_run_on_cpu(cpu
, ptlb_work
, data
);
362 async_safe_run_on_cpu(src
, ptlb_work
, data
);
365 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
366 number of pages/entries (we choose all), and is local to the cpu. */
367 void HELPER(ptlbe
)(CPUHPPAState
*env
)
369 trace_hppa_tlb_ptlbe(env
);
370 qemu_log_mask(CPU_LOG_MMU
, "FLUSH ALL TLB ENTRIES\n");
371 memset(&env
->tlb
[HPPA_BTLB_ENTRIES
], 0,
372 sizeof(env
->tlb
) - HPPA_BTLB_ENTRIES
* sizeof(env
->tlb
[0]));
373 env
->tlb_last
= HPPA_BTLB_ENTRIES
;
374 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_MASK
);
377 void cpu_hppa_change_prot_id(CPUHPPAState
*env
)
379 tlb_flush_by_mmuidx(env_cpu(env
), HPPA_MMU_FLUSH_P_MASK
);
382 void HELPER(change_prot_id
)(CPUHPPAState
*env
)
384 cpu_hppa_change_prot_id(env
);
387 target_ureg
HELPER(lpa
)(CPUHPPAState
*env
, target_ulong addr
)
392 excp
= hppa_get_physical_address(env
, addr
, MMU_KERNEL_IDX
, 0,
395 if (env
->psw
& PSW_Q
) {
396 /* ??? Needs tweaking for hppa64. */
397 env
->cr
[CR_IOR
] = addr
;
398 env
->cr
[CR_ISR
] = addr
>> 32;
400 if (excp
== EXCP_DTLB_MISS
) {
401 excp
= EXCP_NA_DTLB_MISS
;
403 trace_hppa_tlb_lpa_failed(env
, addr
);
404 hppa_dynamic_excp(env
, excp
, GETPC());
406 trace_hppa_tlb_lpa_success(env
, addr
, phys
);
410 /* Return the ar_type of the TLB at VADDR, or -1. */
411 int hppa_artype_for_page(CPUHPPAState
*env
, target_ulong vaddr
)
413 HPPATLBEntry
*ent
= hppa_find_tlb(env
, vaddr
);
414 return ent
? ent
->ar_type
: -1;
418 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
419 * allow operating systems to modify the Block TLB (BTLB) entries.
420 * For implementation details see page 1-13 in
421 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
423 void HELPER(diag_btlb
)(CPUHPPAState
*env
)
425 unsigned int phys_page
, len
, slot
;
426 int mmu_idx
= cpu_mmu_index(env
, 0);
427 uintptr_t ra
= GETPC();
433 /* BTLBs are not supported on 64-bit CPUs */
434 env
->gr
[28] = -1; /* nonexistent procedure */
437 env
->gr
[28] = 0; /* PDC_OK */
439 switch (env
->gr
[25]) {
441 /* return BTLB parameters */
442 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
443 vaddr
= probe_access(env
, env
->gr
[24], 4 * sizeof(target_ulong
),
444 MMU_DATA_STORE
, mmu_idx
, ra
);
446 env
->gr
[28] = -10; /* invalid argument */
448 vaddr
[0] = cpu_to_be32(1);
449 vaddr
[1] = cpu_to_be32(16 * 1024);
450 vaddr
[2] = cpu_to_be32(HPPA_BTLB_FIXED
);
451 vaddr
[3] = cpu_to_be32(HPPA_BTLB_VARIABLE
);
455 /* insert BTLB entry */
456 virt_page
= env
->gr
[24]; /* upper 32 bits */
458 virt_page
|= env
->gr
[23]; /* lower 32 bits */
459 phys_page
= env
->gr
[22];
462 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
463 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
465 (long long) virt_page
<< TARGET_PAGE_BITS
,
466 (long long) (virt_page
+ len
) << TARGET_PAGE_BITS
,
467 (long long) virt_page
, phys_page
, len
, slot
);
468 if (slot
< HPPA_BTLB_ENTRIES
) {
469 btlb
= &env
->tlb
[slot
];
470 /* force flush of possibly existing BTLB entry */
471 hppa_flush_tlb_ent(env
, btlb
, true);
472 /* create new BTLB entry */
473 btlb
->itree
.start
= virt_page
<< TARGET_PAGE_BITS
;
474 btlb
->itree
.last
= btlb
->itree
.start
+ len
* TARGET_PAGE_SIZE
- 1;
475 btlb
->pa
= phys_page
<< TARGET_PAGE_BITS
;
476 set_access_bits(env
, btlb
, env
->gr
[20]);
480 env
->gr
[28] = -10; /* invalid argument */
484 /* Purge BTLB entry */
486 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
488 if (slot
< HPPA_BTLB_ENTRIES
) {
489 btlb
= &env
->tlb
[slot
];
490 hppa_flush_tlb_ent(env
, btlb
, true);
492 env
->gr
[28] = -10; /* invalid argument */
496 /* Purge all BTLB entries */
497 qemu_log_mask(CPU_LOG_MMU
, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
498 for (slot
= 0; slot
< HPPA_BTLB_ENTRIES
; slot
++) {
499 btlb
= &env
->tlb
[slot
];
500 hppa_flush_tlb_ent(env
, btlb
, true);
504 env
->gr
[28] = -2; /* nonexistent option */