]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
target/hppa: Include PSW_P in tb flags and mmu index
[mirror_qemu.git] / target / hppa / mem_helper.c
1 /*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27
28 static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
29 {
30 int i;
31
32 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
33 hppa_tlb_entry *ent = &env->tlb[i];
34 if (ent->va_b <= addr && addr <= ent->va_e) {
35 trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
36 ent->va_b, ent->va_e, ent->pa);
37 return ent;
38 }
39 }
40 trace_hppa_tlb_find_entry_not_found(env, addr);
41 return NULL;
42 }
43
44 static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent,
45 bool force_flush_btlb)
46 {
47 CPUState *cs = env_cpu(env);
48
49 if (!ent->entry_valid) {
50 return;
51 }
52
53 trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa);
54
55 tlb_flush_range_by_mmuidx(cs, ent->va_b,
56 ent->va_e - ent->va_b + 1,
57 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
58
59 /* never clear BTLBs, unless forced to do so. */
60 if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
61 return;
62 }
63
64 memset(ent, 0, sizeof(*ent));
65 ent->va_b = -1;
66 }
67
68 static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env)
69 {
70 hppa_tlb_entry *ent;
71 uint32_t i;
72
73 if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) {
74 i = HPPA_BTLB_ENTRIES;
75 env->tlb_last = HPPA_BTLB_ENTRIES + 1;
76 } else {
77 i = env->tlb_last;
78 env->tlb_last++;
79 }
80
81 ent = &env->tlb[i];
82
83 hppa_flush_tlb_ent(env, ent, false);
84 return ent;
85 }
86
87 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
88 int type, hwaddr *pphys, int *pprot,
89 hppa_tlb_entry **tlb_entry)
90 {
91 hwaddr phys;
92 int prot, r_prot, w_prot, x_prot, priv;
93 hppa_tlb_entry *ent;
94 int ret = -1;
95
96 if (tlb_entry) {
97 *tlb_entry = NULL;
98 }
99
100 /* Virtual translation disabled. Direct map virtual to physical. */
101 if (mmu_idx == MMU_PHYS_IDX) {
102 phys = addr;
103 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
104 goto egress;
105 }
106
107 /* Find a valid tlb entry that matches the virtual address. */
108 ent = hppa_find_tlb(env, addr);
109 if (ent == NULL || !ent->entry_valid) {
110 phys = 0;
111 prot = 0;
112 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
113 goto egress;
114 }
115
116 if (tlb_entry) {
117 *tlb_entry = ent;
118 }
119
120 /* We now know the physical address. */
121 phys = ent->pa + (addr - ent->va_b);
122
123 /* Map TLB access_rights field to QEMU protection. */
124 priv = MMU_IDX_TO_PRIV(mmu_idx);
125 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
126 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
127 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
128 switch (ent->ar_type) {
129 case 0: /* read-only: data page */
130 prot = r_prot;
131 break;
132 case 1: /* read/write: dynamic data page */
133 prot = r_prot | w_prot;
134 break;
135 case 2: /* read/execute: normal code page */
136 prot = r_prot | x_prot;
137 break;
138 case 3: /* read/write/execute: dynamic code page */
139 prot = r_prot | w_prot | x_prot;
140 break;
141 default: /* execute: promote to privilege level type & 3 */
142 prot = x_prot;
143 break;
144 }
145
146 /* access_id == 0 means public page and no check is performed */
147 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
148 /* If bits [31:1] match, and bit 0 is set, suppress write. */
149 int match = ent->access_id * 2 + 1;
150
151 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
152 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
153 prot &= PAGE_READ | PAGE_EXEC;
154 if (type == PAGE_WRITE) {
155 ret = EXCP_DMPI;
156 goto egress;
157 }
158 }
159 }
160
161 /* No guest access type indicates a non-architectural access from
162 within QEMU. Bypass checks for access, D, B and T bits. */
163 if (type == 0) {
164 goto egress;
165 }
166
167 if (unlikely(!(prot & type))) {
168 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
169 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
170 goto egress;
171 }
172
173 /* In reverse priority order, check for conditions which raise faults.
174 As we go, remove PROT bits that cover the condition we want to check.
175 In this way, the resulting PROT will force a re-check of the
176 architectural TLB entry for the next access. */
177 if (unlikely(!ent->d)) {
178 if (type & PAGE_WRITE) {
179 /* The D bit is not set -- TLB Dirty Bit Fault. */
180 ret = EXCP_TLB_DIRTY;
181 }
182 prot &= PAGE_READ | PAGE_EXEC;
183 }
184 if (unlikely(ent->b)) {
185 if (type & PAGE_WRITE) {
186 /* The B bit is set -- Data Memory Break Fault. */
187 ret = EXCP_DMB;
188 }
189 prot &= PAGE_READ | PAGE_EXEC;
190 }
191 if (unlikely(ent->t)) {
192 if (!(type & PAGE_EXEC)) {
193 /* The T bit is set -- Page Reference Fault. */
194 ret = EXCP_PAGE_REF;
195 }
196 prot &= PAGE_EXEC;
197 }
198
199 egress:
200 *pphys = phys;
201 *pprot = prot;
202 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
203 return ret;
204 }
205
206 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
207 {
208 HPPACPU *cpu = HPPA_CPU(cs);
209 hwaddr phys;
210 int prot, excp;
211
212 /* If the (data) mmu is disabled, bypass translation. */
213 /* ??? We really ought to know if the code mmu is disabled too,
214 in order to get the correct debugging dumps. */
215 if (!(cpu->env.psw & PSW_D)) {
216 return addr;
217 }
218
219 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
220 &phys, &prot, NULL);
221
222 /* Since we're translating for debugging, the only error that is a
223 hard error is no translation at all. Otherwise, while a real cpu
224 access might not have permission, the debugger does. */
225 return excp == EXCP_DTLB_MISS ? -1 : phys;
226 }
227
228 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
229 MMUAccessType type, int mmu_idx,
230 bool probe, uintptr_t retaddr)
231 {
232 HPPACPU *cpu = HPPA_CPU(cs);
233 CPUHPPAState *env = &cpu->env;
234 hppa_tlb_entry *ent;
235 int prot, excp, a_prot;
236 hwaddr phys;
237
238 switch (type) {
239 case MMU_INST_FETCH:
240 a_prot = PAGE_EXEC;
241 break;
242 case MMU_DATA_STORE:
243 a_prot = PAGE_WRITE;
244 break;
245 default:
246 a_prot = PAGE_READ;
247 break;
248 }
249
250 excp = hppa_get_physical_address(env, addr, mmu_idx,
251 a_prot, &phys, &prot, &ent);
252 if (unlikely(excp >= 0)) {
253 if (probe) {
254 return false;
255 }
256 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
257 /* Failure. Raise the indicated exception. */
258 cs->exception_index = excp;
259 if (cpu->env.psw & PSW_Q) {
260 /* ??? Needs tweaking for hppa64. */
261 cpu->env.cr[CR_IOR] = addr;
262 cpu->env.cr[CR_ISR] = addr >> 32;
263 }
264 cpu_loop_exit_restore(cs, retaddr);
265 }
266
267 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
268 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
269 /* Success! Store the translation into the QEMU TLB. */
270 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
271 prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0));
272 return true;
273 }
274
275 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
276 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
277 {
278 hppa_tlb_entry *empty = NULL;
279 int i;
280
281 /* Zap any old entries covering ADDR; notice empty entries on the way. */
282 for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
283 hppa_tlb_entry *ent = &env->tlb[i];
284 if (ent->va_b <= addr && addr <= ent->va_e) {
285 if (ent->entry_valid) {
286 hppa_flush_tlb_ent(env, ent, false);
287 }
288 if (!empty) {
289 empty = ent;
290 }
291 }
292 }
293
294 /* If we didn't see an empty entry, evict one. */
295 if (empty == NULL) {
296 empty = hppa_alloc_tlb_ent(env);
297 }
298
299 /* Note that empty->entry_valid == 0 already. */
300 empty->va_b = addr & TARGET_PAGE_MASK;
301 empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1;
302 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
303 trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa);
304 }
305
306 static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg reg)
307 {
308 ent->access_id = extract32(reg, 1, 18);
309 ent->u = extract32(reg, 19, 1);
310 ent->ar_pl2 = extract32(reg, 20, 2);
311 ent->ar_pl1 = extract32(reg, 22, 2);
312 ent->ar_type = extract32(reg, 24, 3);
313 ent->b = extract32(reg, 27, 1);
314 ent->d = extract32(reg, 28, 1);
315 ent->t = extract32(reg, 29, 1);
316 ent->entry_valid = 1;
317 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
318 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
319 }
320
321 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
322 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
323 {
324 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
325
326 if (unlikely(ent == NULL)) {
327 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
328 return;
329 }
330
331 set_access_bits(env, ent, reg);
332 }
333
334 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
335 synchronous across all processors. */
336 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
337 {
338 CPUHPPAState *env = cpu_env(cpu);
339 target_ulong addr = (target_ulong) data.target_ptr;
340 hppa_tlb_entry *ent = hppa_find_tlb(env, addr);
341
342 if (ent && ent->entry_valid) {
343 hppa_flush_tlb_ent(env, ent, false);
344 }
345 }
346
347 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
348 {
349 CPUState *src = env_cpu(env);
350 CPUState *cpu;
351 trace_hppa_tlb_ptlb(env);
352 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
353
354 CPU_FOREACH(cpu) {
355 if (cpu != src) {
356 async_run_on_cpu(cpu, ptlb_work, data);
357 }
358 }
359 async_safe_run_on_cpu(src, ptlb_work, data);
360 }
361
362 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
363 number of pages/entries (we choose all), and is local to the cpu. */
364 void HELPER(ptlbe)(CPUHPPAState *env)
365 {
366 trace_hppa_tlb_ptlbe(env);
367 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
368 memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
369 sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
370 env->tlb_last = HPPA_BTLB_ENTRIES;
371 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
372 }
373
374 void cpu_hppa_change_prot_id(CPUHPPAState *env)
375 {
376 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
377 }
378
379 void HELPER(change_prot_id)(CPUHPPAState *env)
380 {
381 cpu_hppa_change_prot_id(env);
382 }
383
384 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
385 {
386 hwaddr phys;
387 int prot, excp;
388
389 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
390 &phys, &prot, NULL);
391 if (excp >= 0) {
392 if (env->psw & PSW_Q) {
393 /* ??? Needs tweaking for hppa64. */
394 env->cr[CR_IOR] = addr;
395 env->cr[CR_ISR] = addr >> 32;
396 }
397 if (excp == EXCP_DTLB_MISS) {
398 excp = EXCP_NA_DTLB_MISS;
399 }
400 trace_hppa_tlb_lpa_failed(env, addr);
401 hppa_dynamic_excp(env, excp, GETPC());
402 }
403 trace_hppa_tlb_lpa_success(env, addr, phys);
404 return phys;
405 }
406
407 /* Return the ar_type of the TLB at VADDR, or -1. */
408 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
409 {
410 hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr);
411 return ent ? ent->ar_type : -1;
412 }
413
414 /*
415 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
416 * allow operating systems to modify the Block TLB (BTLB) entries.
417 * For implementation details see page 1-13 in
418 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
419 */
420 void HELPER(diag_btlb)(CPUHPPAState *env)
421 {
422 unsigned int phys_page, len, slot;
423 int mmu_idx = cpu_mmu_index(env, 0);
424 uintptr_t ra = GETPC();
425 hppa_tlb_entry *btlb;
426 uint64_t virt_page;
427 uint32_t *vaddr;
428
429 #ifdef TARGET_HPPA64
430 /* BTLBs are not supported on 64-bit CPUs */
431 env->gr[28] = -1; /* nonexistent procedure */
432 return;
433 #endif
434 env->gr[28] = 0; /* PDC_OK */
435
436 switch (env->gr[25]) {
437 case 0:
438 /* return BTLB parameters */
439 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
440 vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
441 MMU_DATA_STORE, mmu_idx, ra);
442 if (vaddr == NULL) {
443 env->gr[28] = -10; /* invalid argument */
444 } else {
445 vaddr[0] = cpu_to_be32(1);
446 vaddr[1] = cpu_to_be32(16 * 1024);
447 vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED);
448 vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE);
449 }
450 break;
451 case 1:
452 /* insert BTLB entry */
453 virt_page = env->gr[24]; /* upper 32 bits */
454 virt_page <<= 32;
455 virt_page |= env->gr[23]; /* lower 32 bits */
456 phys_page = env->gr[22];
457 len = env->gr[21];
458 slot = env->gr[19];
459 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
460 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
461 "into slot %d\n",
462 (long long) virt_page << TARGET_PAGE_BITS,
463 (long long) (virt_page + len) << TARGET_PAGE_BITS,
464 (long long) virt_page, phys_page, len, slot);
465 if (slot < HPPA_BTLB_ENTRIES) {
466 btlb = &env->tlb[slot];
467 /* force flush of possibly existing BTLB entry */
468 hppa_flush_tlb_ent(env, btlb, true);
469 /* create new BTLB entry */
470 btlb->va_b = virt_page << TARGET_PAGE_BITS;
471 btlb->va_e = btlb->va_b + len * TARGET_PAGE_SIZE - 1;
472 btlb->pa = phys_page << TARGET_PAGE_BITS;
473 set_access_bits(env, btlb, env->gr[20]);
474 btlb->t = 0;
475 btlb->d = 1;
476 } else {
477 env->gr[28] = -10; /* invalid argument */
478 }
479 break;
480 case 2:
481 /* Purge BTLB entry */
482 slot = env->gr[22];
483 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
484 slot);
485 if (slot < HPPA_BTLB_ENTRIES) {
486 btlb = &env->tlb[slot];
487 hppa_flush_tlb_ent(env, btlb, true);
488 } else {
489 env->gr[28] = -10; /* invalid argument */
490 }
491 break;
492 case 3:
493 /* Purge all BTLB entries */
494 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
495 for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) {
496 btlb = &env->tlb[slot];
497 hppa_flush_tlb_ent(env, btlb, true);
498 }
499 break;
500 default:
501 env->gr[28] = -2; /* nonexistent option */
502 break;
503 }
504 }