]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
Revert "vl: Fix to create migration object before block backends again"
[mirror_qemu.git] / target / hppa / mem_helper.c
1 /*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "cpu.h"
22 #include "exec/exec-all.h"
23 #include "exec/helper-proto.h"
24 #include "qom/cpu.h"
25 #include "trace.h"
26
27 #ifdef CONFIG_USER_ONLY
28 int hppa_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
29 int size, int rw, int mmu_idx)
30 {
31 HPPACPU *cpu = HPPA_CPU(cs);
32
33 /* ??? Test between data page fault and data memory protection trap,
34 which would affect si_code. */
35 cs->exception_index = EXCP_DMP;
36 cpu->env.cr[CR_IOR] = address;
37 return 1;
38 }
39 #else
40 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
41 {
42 int i;
43
44 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
45 hppa_tlb_entry *ent = &env->tlb[i];
46 if (ent->va_b <= addr && addr <= ent->va_e) {
47 trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
48 ent->va_b, ent->va_e, ent->pa);
49 return ent;
50 }
51 }
52 trace_hppa_tlb_find_entry_not_found(env, addr);
53 return NULL;
54 }
55
56 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent)
57 {
58 CPUState *cs = CPU(hppa_env_get_cpu(env));
59 unsigned i, n = 1 << (2 * ent->page_size);
60 uint64_t addr = ent->va_b;
61
62 trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
63
64 for (i = 0; i < n; ++i, addr += TARGET_PAGE_SIZE) {
65 /* Do not flush MMU_PHYS_IDX. */
66 tlb_flush_page_by_mmuidx(cs, addr, 0xf);
67 }
68
69 memset(ent, 0, sizeof(*ent));
70 ent->va_b = -1;
71 }
72
73 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
74 {
75 hppa_tlb_entry *ent;
76 uint32_t i = env->tlb_last;
77
78 env->tlb_last = (i == ARRAY_SIZE(env->tlb) - 1 ? 0 : i + 1);
79 ent = &env->tlb[i];
80
81 hppa_flush_tlb_ent(env, ent);
82 return ent;
83 }
84
85 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
86 int type, hwaddr *pphys, int *pprot)
87 {
88 hwaddr phys;
89 int prot, r_prot, w_prot, x_prot;
90 hppa_tlb_entry *ent;
91 int ret = -1;
92
93 /* Virtual translation disabled. Direct map virtual to physical. */
94 if (mmu_idx == MMU_PHYS_IDX) {
95 phys = addr;
96 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
97 goto egress;
98 }
99
100 /* Find a valid tlb entry that matches the virtual address. */
101 ent = hppa_find_tlb(env, addr);
102 if (ent == NULL || !ent->entry_valid) {
103 phys = 0;
104 prot = 0;
105 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
106 goto egress;
107 }
108
109 /* We now know the physical address. */
110 phys = ent->pa + (addr & ~TARGET_PAGE_MASK);
111
112 /* Map TLB access_rights field to QEMU protection. */
113 r_prot = (mmu_idx <= ent->ar_pl1) * PAGE_READ;
114 w_prot = (mmu_idx <= ent->ar_pl2) * PAGE_WRITE;
115 x_prot = (ent->ar_pl2 <= mmu_idx && mmu_idx <= ent->ar_pl1) * PAGE_EXEC;
116 switch (ent->ar_type) {
117 case 0: /* read-only: data page */
118 prot = r_prot;
119 break;
120 case 1: /* read/write: dynamic data page */
121 prot = r_prot | w_prot;
122 break;
123 case 2: /* read/execute: normal code page */
124 prot = r_prot | x_prot;
125 break;
126 case 3: /* read/write/execute: dynamic code page */
127 prot = r_prot | w_prot | x_prot;
128 break;
129 default: /* execute: promote to privilege level type & 3 */
130 prot = x_prot;
131 break;
132 }
133
134 /* access_id == 0 means public page and no check is performed */
135 if ((env->psw & PSW_P) && ent->access_id) {
136 /* If bits [31:1] match, and bit 0 is set, suppress write. */
137 int match = ent->access_id * 2 + 1;
138
139 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
140 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
141 prot &= PAGE_READ | PAGE_EXEC;
142 if (type == PAGE_WRITE) {
143 ret = EXCP_DMPI;
144 goto egress;
145 }
146 }
147 }
148
149 /* No guest access type indicates a non-architectural access from
150 within QEMU. Bypass checks for access, D, B and T bits. */
151 if (type == 0) {
152 goto egress;
153 }
154
155 if (unlikely(!(prot & type))) {
156 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
157 ret = (type & PAGE_EXEC ? EXCP_IMP :
158 prot & PAGE_READ ? EXCP_DMP : EXCP_DMAR);
159 goto egress;
160 }
161
162 /* In reverse priority order, check for conditions which raise faults.
163 As we go, remove PROT bits that cover the condition we want to check.
164 In this way, the resulting PROT will force a re-check of the
165 architectural TLB entry for the next access. */
166 if (unlikely(!ent->d)) {
167 if (type & PAGE_WRITE) {
168 /* The D bit is not set -- TLB Dirty Bit Fault. */
169 ret = EXCP_TLB_DIRTY;
170 }
171 prot &= PAGE_READ | PAGE_EXEC;
172 }
173 if (unlikely(ent->b)) {
174 if (type & PAGE_WRITE) {
175 /* The B bit is set -- Data Memory Break Fault. */
176 ret = EXCP_DMB;
177 }
178 prot &= PAGE_READ | PAGE_EXEC;
179 }
180 if (unlikely(ent->t)) {
181 if (!(type & PAGE_EXEC)) {
182 /* The T bit is set -- Page Reference Fault. */
183 ret = EXCP_PAGE_REF;
184 }
185 prot &= PAGE_EXEC;
186 }
187
188 egress:
189 *pphys = phys;
190 *pprot = prot;
191 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
192 return ret;
193 }
194
195 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
196 {
197 HPPACPU *cpu = HPPA_CPU(cs);
198 hwaddr phys;
199 int prot, excp;
200
201 /* If the (data) mmu is disabled, bypass translation. */
202 /* ??? We really ought to know if the code mmu is disabled too,
203 in order to get the correct debugging dumps. */
204 if (!(cpu->env.psw & PSW_D)) {
205 return addr;
206 }
207
208 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
209 &phys, &prot);
210
211 /* Since we're translating for debugging, the only error that is a
212 hard error is no translation at all. Otherwise, while a real cpu
213 access might not have permission, the debugger does. */
214 return excp == EXCP_DTLB_MISS ? -1 : phys;
215 }
216
217 void tlb_fill(CPUState *cs, target_ulong addr, int size,
218 MMUAccessType type, int mmu_idx, uintptr_t retaddr)
219 {
220 HPPACPU *cpu = HPPA_CPU(cs);
221 CPUHPPAState *env = &cpu->env;
222 int prot, excp, a_prot;
223 hwaddr phys;
224
225 switch (type) {
226 case MMU_INST_FETCH:
227 a_prot = PAGE_EXEC;
228 break;
229 case MMU_DATA_STORE:
230 a_prot = PAGE_WRITE;
231 break;
232 default:
233 a_prot = PAGE_READ;
234 break;
235 }
236
237 excp = hppa_get_physical_address(env, addr, mmu_idx,
238 a_prot, &phys, &prot);
239 if (unlikely(excp >= 0)) {
240 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
241 /* Failure. Raise the indicated exception. */
242 cs->exception_index = excp;
243 if (cpu->env.psw & PSW_Q) {
244 /* ??? Needs tweaking for hppa64. */
245 cpu->env.cr[CR_IOR] = addr;
246 cpu->env.cr[CR_ISR] = addr >> 32;
247 }
248 cpu_loop_exit_restore(cs, retaddr);
249 }
250
251 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
252 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
253 /* Success! Store the translation into the QEMU TLB. */
254 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
255 prot, mmu_idx, TARGET_PAGE_SIZE);
256 }
257
258 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
259 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
260 {
261 hppa_tlb_entry *empty = NULL;
262 int i;
263
264 /* Zap any old entries covering ADDR; notice empty entries on the way. */
265 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
266 hppa_tlb_entry *ent = &env->tlb[i];
267 if (ent->va_b <= addr && addr <= ent->va_e) {
268 if (ent->entry_valid) {
269 hppa_flush_tlb_ent(env, ent);
270 }
271 if (!empty) {
272 empty = ent;
273 }
274 }
275 }
276
277 /* If we didn't see an empty entry, evict one. */
278 if (empty == NULL) {
279 empty = hppa_alloc_tlb_ent(env);
280 }
281
282 /* Note that empty->entry_valid == 0 already. */
283 empty->va_b = addr & TARGET_PAGE_MASK;
284 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
285 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
286 trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
287 }
288
289 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
290 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
291 {
292 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
293
294 if (unlikely(ent == NULL)) {
295 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
296 return;
297 }
298
299 ent->access_id = extract32(reg, 1, 18);
300 ent->u = extract32(reg, 19, 1);
301 ent->ar_pl2 = extract32(reg, 20, 2);
302 ent->ar_pl1 = extract32(reg, 22, 2);
303 ent->ar_type = extract32(reg, 24, 3);
304 ent->b = extract32(reg, 27, 1);
305 ent->d = extract32(reg, 28, 1);
306 ent->t = extract32(reg, 29, 1);
307 ent->entry_valid = 1;
308 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
309 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
310 }
311
312 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
313 synchronous across all processors. */
314 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
315 {
316 CPUHPPAState *env = cpu->env_ptr;
317 target_ulong addr = (target_ulong) data.target_ptr;
318 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
319
320 if (ent && ent->entry_valid) {
321 hppa_flush_tlb_ent(env, ent);
322 }
323 }
324
325 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
326 {
327 CPUState *src = CPU(hppa_env_get_cpu(env));
328 CPUState *cpu;
329 trace_hppa_tlb_ptlb(env);
330 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
331
332 CPU_FOREACH(cpu) {
333 if (cpu != src) {
334 async_run_on_cpu(cpu, ptlb_work, data);
335 }
336 }
337 async_safe_run_on_cpu(src, ptlb_work, data);
338 }
339
340 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
341 number of pages/entries (we choose all), and is local to the cpu. */
342 void HELPER(ptlbe)(CPUHPPAState *env)
343 {
344 CPUState *src = CPU(hppa_env_get_cpu(env));
345 trace_hppa_tlb_ptlbe(env);
346 memset(env->tlb, 0, sizeof(env->tlb));
347 tlb_flush_by_mmuidx(src, 0xf);
348 }
349
350 void cpu_hppa_change_prot_id(CPUHPPAState *env)
351 {
352 if (env->psw & PSW_P) {
353 CPUState *src = CPU(hppa_env_get_cpu(env));
354 tlb_flush_by_mmuidx(src, 0xf);
355 }
356 }
357
358 void HELPER(change_prot_id)(CPUHPPAState *env)
359 {
360 cpu_hppa_change_prot_id(env);
361 }
362
363 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
364 {
365 hwaddr phys;
366 int prot, excp;
367
368 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
369 &phys, &prot);
370 if (excp >= 0) {
371 if (env->psw & PSW_Q) {
372 /* ??? Needs tweaking for hppa64. */
373 env->cr[CR_IOR] = addr;
374 env->cr[CR_ISR] = addr >> 32;
375 }
376 if (excp == EXCP_DTLB_MISS) {
377 excp = EXCP_NA_DTLB_MISS;
378 }
379 trace_hppa_tlb_lpa_failed(env, addr);
380 hppa_dynamic_excp(env, excp, GETPC());
381 }
382 trace_hppa_tlb_lpa_success(env, addr, phys);
383 return phys;
384 }
385
386 /* Return the ar_type of the TLB at VADDR, or -1. */
387 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
388 {
389 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
390 return ent ? ent->ar_type : -1;
391 }
392 #endif /* CONFIG_USER_ONLY */