]> git.proxmox.com Git - mirror_qemu.git/blame - target/hppa/mem_helper.c
target/cris: Convert to CPUClass::tlb_fill
[mirror_qemu.git] / target / hppa / mem_helper.c
CommitLineData
813dff13
HD
1/*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20#include "qemu/osdep.h"
21#include "cpu.h"
22#include "exec/exec-all.h"
23#include "exec/helper-proto.h"
24#include "qom/cpu.h"
23c3d569 25#include "trace.h"
813dff13
HD
26
27#ifdef CONFIG_USER_ONLY
28int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
29 int size, int rw, int mmu_idx)
30{
31 HPPACPU *cpu = HPPA_CPU(cs);
32
2986721d
RH
33 /* ??? Test between data page fault and data memory protection trap,
34 which would affect si_code. */
35 cs->exception_index = EXCP_DMP;
35136a77 36 cpu->env.cr[CR_IOR] = address;
813dff13
HD
37 return 1;
38}
39#else
650cdb2a
RH
40static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
41{
42 int i;
43
44 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
45 hppa_tlb_entry *ent = &env->tlb[i];
8d6ae7fb 46 if (ent->va_b <= addr && addr <= ent->va_e) {
23c3d569
SS
47 trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
48 ent->va_b, ent->va_e, ent->pa);
650cdb2a
RH
49 return ent;
50 }
51 }
23c3d569 52 trace_hppa_tlb_find_entry_not_found(env, addr);
650cdb2a
RH
53 return NULL;
54}
55
8d6ae7fb
RH
56static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
57{
58 CPUState *cs = CPU(hppa_env_get_cpu(env));
59 unsigned i, n = 1 << (2 * ent->page_size);
60 uint64_t addr = ent->va_b;
61
23c3d569
SS
62 trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
63
8d6ae7fb
RH
64 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
65 /* Do not flush MMU_PHYS_IDX. */
66 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
67 }
68
69 memset(ent, 0, sizeof(*ent));
70 ent->va_b = -1;
71}
72
73static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
74{
75 hppa_tlb_entry *ent;
76 uint32_t i = env->tlb_last;
77
78 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
79 ent = &env->tlb[i];
80
81 hppa_flush_tlb_ent(env, ent);
82 return ent;
83}
84
650cdb2a
RH
85int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
86 int type, hwaddr *pphys, int *pprot)
87{
88 hwaddr phys;
89 int prot, r_prot, w_prot, x_prot;
90 hppa_tlb_entry *ent;
91 int ret = -1;
92
93 /* Virtual translation disabled. Direct map virtual to physical. */
94 if (mmu_idx == MMU_PHYS_IDX) {
95 phys = addr;
96 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
97 goto egress;
98 }
99
100 /* Find a valid tlb entry that matches the virtual address. */
101 ent = hppa_find_tlb(env, addr);
8d6ae7fb 102 if (ent == NULL || !ent->entry_valid) {
650cdb2a
RH
103 phys = 0;
104 prot = 0;
acd6ba74 105 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
650cdb2a
RH
106 goto egress;
107 }
108
109 /* We now know the physical address. */
110 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
111
112 /* Map TLB access_rights field to QEMU protection. */
113 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
114 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
115 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
116 switch (ent->ar_type) {
117 case 0: /* read-only: data page */
118 prot = r_prot;
119 break;
120 case 1: /* read/write: dynamic data page */
121 prot = r_prot | w_prot;
122 break;
123 case 2: /* read/execute: normal code page */
124 prot = r_prot | x_prot;
125 break;
126 case 3: /* read/write/execute: dynamic code page */
127 prot = r_prot | w_prot | x_prot;
128 break;
129 default: /* execute: promote to privilege level type & 3 */
130 prot = x_prot;
43e05652 131 break;
650cdb2a
RH
132 }
133
d5de20bd
SS
134 /* access_id == 0 means public page and no check is performed */
135 if ((env->psw & PSW_P) && ent->access_id) {
136 /* If bits [31:1] match, and bit 0 is set, suppress write. */
137 int match = ent->access_id * 2 + 1;
138
139 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
140 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
141 prot &= PAGE_READ | PAGE_EXEC;
142 if (type == PAGE_WRITE) {
143 ret = EXCP_DMPI;
144 goto egress;
145 }
146 }
147 }
650cdb2a
RH
148
149 /* No guest access type indicates a non-architectural access from
150 within QEMU. Bypass checks for access, D, B and T bits. */
151 if (type == 0) {
152 goto egress;
153 }
154
155 if (unlikely(!(prot & type))) {
156 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
affdb7e6 157 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
650cdb2a
RH
158 goto egress;
159 }
160
161 /* In reverse priority order, check for conditions which raise faults.
162 As we go, remove PROT bits that cover the condition we want to check.
163 In this way, the resulting PROT will force a re-check of the
164 architectural TLB entry for the next access. */
165 if (unlikely(!ent->d)) {
166 if (type & PAGE_WRITE) {
167 /* The D bit is not set -- TLB Dirty Bit Fault. */
168 ret = EXCP_TLB_DIRTY;
169 }
170 prot &= PAGE_READ | PAGE_EXEC;
171 }
172 if (unlikely(ent->b)) {
173 if (type & PAGE_WRITE) {
174 /* The B bit is set -- Data Memory Break Fault. */
175 ret = EXCP_DMB;
176 }
177 prot &= PAGE_READ | PAGE_EXEC;
178 }
179 if (unlikely(ent->t)) {
180 if (!(type & PAGE_EXEC)) {
181 /* The T bit is set -- Page Reference Fault. */
182 ret = EXCP_PAGE_REF;
183 }
184 prot &= PAGE_EXEC;
185 }
186
187 egress:
188 *pphys = phys;
189 *pprot = prot;
23c3d569 190 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
650cdb2a
RH
191 return ret;
192}
193
813dff13
HD
194hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
195{
650cdb2a
RH
196 HPPACPU *cpu = HPPA_CPU(cs);
197 hwaddr phys;
198 int prot, excp;
199
200 /* If the (data) mmu is disabled, bypass translation. */
201 /* ??? We really ought to know if the code mmu is disabled too,
202 in order to get the correct debugging dumps. */
203 if (!(cpu->env.psw & PSW_D)) {
204 return addr;
205 }
206
207 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
208 &phys, &prot);
209
210 /* Since we're translating for debugging, the only error that is a
211 hard error is no translation at all. Otherwise, while a real cpu
212 access might not have permission, the debugger does. */
213 return excp == EXCP_DTLB_MISS ? -1 : phys;
813dff13
HD
214}
215
650cdb2a
RH
216void tlb_fill(CPUState *cs, target_ulong addr, int size,
217 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
813dff13 218{
650cdb2a 219 HPPACPU *cpu = HPPA_CPU(cs);
23c3d569 220 CPUHPPAState *env = &cpu->env;
650cdb2a
RH
221 int prot, excp, a_prot;
222 hwaddr phys;
223
224 switch (type) {
225 case MMU_INST_FETCH:
226 a_prot = PAGE_EXEC;
227 break;
228 case MMU_DATA_STORE:
229 a_prot = PAGE_WRITE;
230 break;
231 default:
232 a_prot = PAGE_READ;
233 break;
234 }
235
23c3d569 236 excp = hppa_get_physical_address(env, addr, mmu_idx,
650cdb2a
RH
237 a_prot, &phys, &prot);
238 if (unlikely(excp >= 0)) {
23c3d569 239 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
650cdb2a
RH
240 /* Failure. Raise the indicated exception. */
241 cs->exception_index = excp;
242 if (cpu->env.psw & PSW_Q) {
243 /* ??? Needs tweaking for hppa64. */
244 cpu->env.cr[CR_IOR] = addr;
245 cpu->env.cr[CR_ISR] = addr >> 32;
246 }
247 cpu_loop_exit_restore(cs, retaddr);
248 }
813dff13 249
23c3d569
SS
250 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
251 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
813dff13
HD
252 /* Success! Store the translation into the QEMU TLB. */
253 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
254 prot, mmu_idx, TARGET_PAGE_SIZE);
255}
8d6ae7fb
RH
256
257/* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
258void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
259{
260 hppa_tlb_entry *empty = NULL;
261 int i;
262
263 /* Zap any old entries covering ADDR; notice empty entries on the way. */
264 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
265 hppa_tlb_entry *ent = &env->tlb[i];
0b49c339
SS
266 if (ent->va_b <= addr && addr <= ent->va_e) {
267 if (ent->entry_valid) {
268 hppa_flush_tlb_ent(env, ent);
269 }
270 if (!empty) {
271 empty = ent;
272 }
8d6ae7fb
RH
273 }
274 }
275
276 /* If we didn't see an empty entry, evict one. */
277 if (empty == NULL) {
278 empty = hppa_alloc_tlb_ent(env);
279 }
280
281 /* Note that empty->entry_valid == 0 already. */
282 empty->va_b = addr & TARGET_PAGE_MASK;
283 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
284 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
23c3d569 285 trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
8d6ae7fb
RH
286}
287
288/* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
289void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
290{
291 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
292
38188fd2 293 if (unlikely(ent == NULL)) {
8d6ae7fb
RH
294 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
295 return;
296 }
297
298 ent->access_id = extract32(reg, 1, 18);
299 ent->u = extract32(reg, 19, 1);
300 ent->ar_pl2 = extract32(reg, 20, 2);
301 ent->ar_pl1 = extract32(reg, 22, 2);
302 ent->ar_type = extract32(reg, 24, 3);
303 ent->b = extract32(reg, 27, 1);
304 ent->d = extract32(reg, 28, 1);
305 ent->t = extract32(reg, 29, 1);
306 ent->entry_valid = 1;
23c3d569
SS
307 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
308 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
8d6ae7fb 309}
63300a00
RH
310
311/* Purge (Insn/Data) TLB. This is explicitly page-based, and is
312 synchronous across all processors. */
313static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
314{
315 CPUHPPAState *env = cpu->env_ptr;
316 target_ulong addr = (target_ulong) data.target_ptr;
317 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
318
319 if (ent && ent->entry_valid) {
320 hppa_flush_tlb_ent(env, ent);
321 }
322}
323
324void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
325{
326 CPUState *src = CPU(hppa_env_get_cpu(env));
327 CPUState *cpu;
23c3d569 328 trace_hppa_tlb_ptlb(env);
63300a00
RH
329 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
330
331 CPU_FOREACH(cpu) {
332 if (cpu != src) {
333 async_run_on_cpu(cpu, ptlb_work, data);
334 }
335 }
336 async_safe_run_on_cpu(src, ptlb_work, data);
337}
338
339/* Purge (Insn/Data) TLB entry. This affects an implementation-defined
340 number of pages/entries (we choose all), and is local to the cpu. */
341void HELPER(ptlbe)(CPUHPPAState *env)
342{
343 CPUState *src = CPU(hppa_env_get_cpu(env));
23c3d569 344 trace_hppa_tlb_ptlbe(env);
63300a00
RH
345 memset(env->tlb, 0, sizeof(env->tlb));
346 tlb_flush_by_mmuidx(src, 0xf);
347}
2dfcca9f 348
d5de20bd
SS
349void cpu_hppa_change_prot_id(CPUHPPAState *env)
350{
351 if (env->psw & PSW_P) {
352 CPUState *src = CPU(hppa_env_get_cpu(env));
353 tlb_flush_by_mmuidx(src, 0xf);
354 }
355}
356
357void HELPER(change_prot_id)(CPUHPPAState *env)
358{
359 cpu_hppa_change_prot_id(env);
360}
361
2dfcca9f
RH
362target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
363{
364 hwaddr phys;
365 int prot, excp;
366
367 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
368 &phys, &prot);
369 if (excp >= 0) {
370 if (env->psw & PSW_Q) {
371 /* ??? Needs tweaking for hppa64. */
372 env->cr[CR_IOR] = addr;
373 env->cr[CR_ISR] = addr >> 32;
374 }
375 if (excp == EXCP_DTLB_MISS) {
376 excp = EXCP_NA_DTLB_MISS;
377 }
23c3d569 378 trace_hppa_tlb_lpa_failed(env, addr);
2dfcca9f
RH
379 hppa_dynamic_excp(env, excp, GETPC());
380 }
23c3d569 381 trace_hppa_tlb_lpa_success(env, addr, phys);
2dfcca9f
RH
382 return phys;
383}
43e05652
RH
384
385/* Return the ar_type of the TLB at VADDR, or -1. */
386int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
387{
388 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
389 return ent ? ent->ar_type : -1;
390}
813dff13 391#endif /* CONFIG_USER_ONLY */