]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
target/hppa: add TLB trace events
[mirror_qemu.git] / target / hppa / mem_helper.c
1 /*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qom/cpu.h"
25 #include "trace.h"
26
27 #ifdef CONFIG_USER_ONLY
28 int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
29 int size, int rw, int mmu_idx)
30 {
31 HPPACPU *cpu = HPPA_CPU(cs);
32
33 /* ??? Test between data page fault and data memory protection trap,
34 which would affect si_code. */
35 cs->exception_index = EXCP_DMP;
36 cpu->env.cr[CR_IOR] = address;
37 return 1;
38 }
39 #else
40 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
41 {
42 int i;
43
44 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
45 hppa_tlb_entry *ent = &env->tlb[i];
46 if (ent->va_b <= addr && addr <= ent->va_e) {
47 trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
48 ent->va_b, ent->va_e, ent->pa);
49 return ent;
50 }
51 }
52 trace_hppa_tlb_find_entry_not_found(env, addr);
53 return NULL;
54 }
55
56 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
57 {
58 CPUState *cs = CPU(hppa_env_get_cpu(env));
59 unsigned i, n = 1 << (2 * ent->page_size);
60 uint64_t addr = ent->va_b;
61
62 trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
63
64 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
65 /* Do not flush MMU_PHYS_IDX. */
66 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
67 }
68
69 memset(ent, 0, sizeof(*ent));
70 ent->va_b = -1;
71 }
72
73 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
74 {
75 hppa_tlb_entry *ent;
76 uint32_t i = env->tlb_last;
77
78 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
79 ent = &env->tlb[i];
80
81 hppa_flush_tlb_ent(env, ent);
82 return ent;
83 }
84
85 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
86 int type, hwaddr *pphys, int *pprot)
87 {
88 hwaddr phys;
89 int prot, r_prot, w_prot, x_prot;
90 hppa_tlb_entry *ent;
91 int ret = -1;
92
93 /* Virtual translation disabled. Direct map virtual to physical. */
94 if (mmu_idx == MMU_PHYS_IDX) {
95 phys = addr;
96 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
97 goto egress;
98 }
99
100 /* Find a valid tlb entry that matches the virtual address. */
101 ent = hppa_find_tlb(env, addr);
102 if (ent == NULL || !ent->entry_valid) {
103 phys = 0;
104 prot = 0;
105 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
106 goto egress;
107 }
108
109 /* We now know the physical address. */
110 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
111
112 /* Map TLB access_rights field to QEMU protection. */
113 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
114 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
115 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
116 switch (ent->ar_type) {
117 case 0: /* read-only: data page */
118 prot = r_prot;
119 break;
120 case 1: /* read/write: dynamic data page */
121 prot = r_prot | w_prot;
122 break;
123 case 2: /* read/execute: normal code page */
124 prot = r_prot | x_prot;
125 break;
126 case 3: /* read/write/execute: dynamic code page */
127 prot = r_prot | w_prot | x_prot;
128 break;
129 default: /* execute: promote to privilege level type & 3 */
130 prot = x_prot;
131 break;
132 }
133
134 /* ??? Check PSW_P and ent->access_prot. This can remove PAGE_WRITE. */
135
136 /* No guest access type indicates a non-architectural access from
137 within QEMU. Bypass checks for access, D, B and T bits. */
138 if (type == 0) {
139 goto egress;
140 }
141
142 if (unlikely(!(prot & type))) {
143 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
144 ret = (type & PAGE_EXEC ? EXCP_IMP :
145 prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR);
146 goto egress;
147 }
148
149 /* In reverse priority order, check for conditions which raise faults.
150 As we go, remove PROT bits that cover the condition we want to check.
151 In this way, the resulting PROT will force a re-check of the
152 architectural TLB entry for the next access. */
153 if (unlikely(!ent->d)) {
154 if (type & PAGE_WRITE) {
155 /* The D bit is not set -- TLB Dirty Bit Fault. */
156 ret = EXCP_TLB_DIRTY;
157 }
158 prot &= PAGE_READ | PAGE_EXEC;
159 }
160 if (unlikely(ent->b)) {
161 if (type & PAGE_WRITE) {
162 /* The B bit is set -- Data Memory Break Fault. */
163 ret = EXCP_DMB;
164 }
165 prot &= PAGE_READ | PAGE_EXEC;
166 }
167 if (unlikely(ent->t)) {
168 if (!(type & PAGE_EXEC)) {
169 /* The T bit is set -- Page Reference Fault. */
170 ret = EXCP_PAGE_REF;
171 }
172 prot &= PAGE_EXEC;
173 }
174
175 egress:
176 *pphys = phys;
177 *pprot = prot;
178 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
179 return ret;
180 }
181
182 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
183 {
184 HPPACPU *cpu = HPPA_CPU(cs);
185 hwaddr phys;
186 int prot, excp;
187
188 /* If the (data) mmu is disabled, bypass translation. */
189 /* ??? We really ought to know if the code mmu is disabled too,
190 in order to get the correct debugging dumps. */
191 if (!(cpu->env.psw & PSW_D)) {
192 return addr;
193 }
194
195 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
196 &phys, &prot);
197
198 /* Since we're translating for debugging, the only error that is a
199 hard error is no translation at all. Otherwise, while a real cpu
200 access might not have permission, the debugger does. */
201 return excp == EXCP_DTLB_MISS ? -1 : phys;
202 }
203
204 void tlb_fill(CPUState *cs, target_ulong addr, int size,
205 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
206 {
207 HPPACPU *cpu = HPPA_CPU(cs);
208 CPUHPPAState *env = &cpu->env;
209 int prot, excp, a_prot;
210 hwaddr phys;
211
212 switch (type) {
213 case MMU_INST_FETCH:
214 a_prot = PAGE_EXEC;
215 break;
216 case MMU_DATA_STORE:
217 a_prot = PAGE_WRITE;
218 break;
219 default:
220 a_prot = PAGE_READ;
221 break;
222 }
223
224 excp = hppa_get_physical_address(env, addr, mmu_idx,
225 a_prot, &phys, &prot);
226 if (unlikely(excp >= 0)) {
227 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
228 /* Failure. Raise the indicated exception. */
229 cs->exception_index = excp;
230 if (cpu->env.psw & PSW_Q) {
231 /* ??? Needs tweaking for hppa64. */
232 cpu->env.cr[CR_IOR] = addr;
233 cpu->env.cr[CR_ISR] = addr >> 32;
234 }
235 cpu_loop_exit_restore(cs, retaddr);
236 }
237
238 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
239 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
240 /* Success! Store the translation into the QEMU TLB. */
241 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
242 prot, mmu_idx, TARGET_PAGE_SIZE);
243 }
244
245 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
246 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
247 {
248 hppa_tlb_entry *empty = NULL;
249 int i;
250
251 /* Zap any old entries covering ADDR; notice empty entries on the way. */
252 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
253 hppa_tlb_entry *ent = &env->tlb[i];
254 if (ent->va_b <= addr && addr <= ent->va_e) {
255 if (ent->entry_valid) {
256 hppa_flush_tlb_ent(env, ent);
257 }
258 if (!empty) {
259 empty = ent;
260 }
261 }
262 }
263
264 /* If we didn't see an empty entry, evict one. */
265 if (empty == NULL) {
266 empty = hppa_alloc_tlb_ent(env);
267 }
268
269 /* Note that empty->entry_valid == 0 already. */
270 empty->va_b = addr & TARGET_PAGE_MASK;
271 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
272 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
273 trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
274 }
275
276 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
277 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
278 {
279 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
280
281 if (unlikely(ent == NULL || ent->entry_valid)) {
282 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
283 return;
284 }
285
286 ent->access_id = extract32(reg, 1, 18);
287 ent->u = extract32(reg, 19, 1);
288 ent->ar_pl2 = extract32(reg, 20, 2);
289 ent->ar_pl1 = extract32(reg, 22, 2);
290 ent->ar_type = extract32(reg, 24, 3);
291 ent->b = extract32(reg, 27, 1);
292 ent->d = extract32(reg, 28, 1);
293 ent->t = extract32(reg, 29, 1);
294 ent->entry_valid = 1;
295 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
296 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
297 }
298
299 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
300 synchronous across all processors. */
301 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
302 {
303 CPUHPPAState *env = cpu->env_ptr;
304 target_ulong addr = (target_ulong) data.target_ptr;
305 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
306
307 if (ent && ent->entry_valid) {
308 hppa_flush_tlb_ent(env, ent);
309 }
310 }
311
312 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
313 {
314 CPUState *src = CPU(hppa_env_get_cpu(env));
315 CPUState *cpu;
316 trace_hppa_tlb_ptlb(env);
317 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
318
319 CPU_FOREACH(cpu) {
320 if (cpu != src) {
321 async_run_on_cpu(cpu, ptlb_work, data);
322 }
323 }
324 async_safe_run_on_cpu(src, ptlb_work, data);
325 }
326
327 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
328 number of pages/entries (we choose all), and is local to the cpu. */
329 void HELPER(ptlbe)(CPUHPPAState *env)
330 {
331 CPUState *src = CPU(hppa_env_get_cpu(env));
332 trace_hppa_tlb_ptlbe(env);
333 memset(env->tlb, 0, sizeof(env->tlb));
334 tlb_flush_by_mmuidx(src, 0xf);
335 }
336
337 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
338 {
339 hwaddr phys;
340 int prot, excp;
341
342 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
343 &phys, &prot);
344 if (excp >= 0) {
345 if (env->psw & PSW_Q) {
346 /* ??? Needs tweaking for hppa64. */
347 env->cr[CR_IOR] = addr;
348 env->cr[CR_ISR] = addr >> 32;
349 }
350 if (excp == EXCP_DTLB_MISS) {
351 excp = EXCP_NA_DTLB_MISS;
352 }
353 trace_hppa_tlb_lpa_failed(env, addr);
354 hppa_dynamic_excp(env, excp, GETPC());
355 }
356 trace_hppa_tlb_lpa_success(env, addr, phys);
357 return phys;
358 }
359
360 /* Return the ar_type of the TLB at VADDR, or -1. */
361 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
362 {
363 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
364 return ent ? ent->ar_type : -1;
365 }
366 #endif /* CONFIG_USER_ONLY */