]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
target/hppa: Use IntervalTreeNode in HPPATLBEntry
[mirror_qemu.git] / target / hppa / mem_helper.c
1 /*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27
28 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
29 {
30 int i;
31
32 for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) {
33 HPPATLBEntry *ent = &env->tlb[i];
34 if (ent->itree.start <= addr && addr <= ent->itree.last) {
35 trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid,
36 ent->itree.start, ent->itree.last,
37 ent->pa);
38 return ent;
39 }
40 }
41 trace_hppa_tlb_find_entry_not_found(env, addr);
42 return NULL;
43 }
44
45 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
46 bool force_flush_btlb)
47 {
48 CPUState *cs = env_cpu(env);
49
50 if (!ent->entry_valid) {
51 return;
52 }
53
54 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
55 ent->itree.last, ent->pa);
56
57 tlb_flush_range_by_mmuidx(cs, ent->itree.start,
58 ent->itree.last - ent->itree.start + 1,
59 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
60
61 /* never clear BTLBs, unless forced to do so. */
62 if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) {
63 return;
64 }
65
66 memset(ent, 0, sizeof(*ent));
67 ent->itree.start = -1;
68 }
69
70 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
71 {
72 HPPATLBEntry *ent;
73 uint32_t i;
74
75 if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) {
76 i = HPPA_BTLB_ENTRIES;
77 env->tlb_last = HPPA_BTLB_ENTRIES + 1;
78 } else {
79 i = env->tlb_last;
80 env->tlb_last++;
81 }
82
83 ent = &env->tlb[i];
84
85 hppa_flush_tlb_ent(env, ent, false);
86 return ent;
87 }
88
89 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
90 int type, hwaddr *pphys, int *pprot,
91 HPPATLBEntry **tlb_entry)
92 {
93 hwaddr phys;
94 int prot, r_prot, w_prot, x_prot, priv;
95 HPPATLBEntry *ent;
96 int ret = -1;
97
98 if (tlb_entry) {
99 *tlb_entry = NULL;
100 }
101
102 /* Virtual translation disabled. Direct map virtual to physical. */
103 if (mmu_idx == MMU_PHYS_IDX) {
104 phys = addr;
105 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
106 goto egress;
107 }
108
109 /* Find a valid tlb entry that matches the virtual address. */
110 ent = hppa_find_tlb(env, addr);
111 if (ent == NULL || !ent->entry_valid) {
112 phys = 0;
113 prot = 0;
114 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
115 goto egress;
116 }
117
118 if (tlb_entry) {
119 *tlb_entry = ent;
120 }
121
122 /* We now know the physical address. */
123 phys = ent->pa + (addr - ent->itree.start);
124
125 /* Map TLB access_rights field to QEMU protection. */
126 priv = MMU_IDX_TO_PRIV(mmu_idx);
127 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
128 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
129 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
130 switch (ent->ar_type) {
131 case 0: /* read-only: data page */
132 prot = r_prot;
133 break;
134 case 1: /* read/write: dynamic data page */
135 prot = r_prot | w_prot;
136 break;
137 case 2: /* read/execute: normal code page */
138 prot = r_prot | x_prot;
139 break;
140 case 3: /* read/write/execute: dynamic code page */
141 prot = r_prot | w_prot | x_prot;
142 break;
143 default: /* execute: promote to privilege level type & 3 */
144 prot = x_prot;
145 break;
146 }
147
148 /* access_id == 0 means public page and no check is performed */
149 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
150 /* If bits [31:1] match, and bit 0 is set, suppress write. */
151 int match = ent->access_id * 2 + 1;
152
153 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
154 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
155 prot &= PAGE_READ | PAGE_EXEC;
156 if (type == PAGE_WRITE) {
157 ret = EXCP_DMPI;
158 goto egress;
159 }
160 }
161 }
162
163 /* No guest access type indicates a non-architectural access from
164 within QEMU. Bypass checks for access, D, B and T bits. */
165 if (type == 0) {
166 goto egress;
167 }
168
169 if (unlikely(!(prot & type))) {
170 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
171 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
172 goto egress;
173 }
174
175 /* In reverse priority order, check for conditions which raise faults.
176 As we go, remove PROT bits that cover the condition we want to check.
177 In this way, the resulting PROT will force a re-check of the
178 architectural TLB entry for the next access. */
179 if (unlikely(!ent->d)) {
180 if (type & PAGE_WRITE) {
181 /* The D bit is not set -- TLB Dirty Bit Fault. */
182 ret = EXCP_TLB_DIRTY;
183 }
184 prot &= PAGE_READ | PAGE_EXEC;
185 }
186 if (unlikely(ent->b)) {
187 if (type & PAGE_WRITE) {
188 /* The B bit is set -- Data Memory Break Fault. */
189 ret = EXCP_DMB;
190 }
191 prot &= PAGE_READ | PAGE_EXEC;
192 }
193 if (unlikely(ent->t)) {
194 if (!(type & PAGE_EXEC)) {
195 /* The T bit is set -- Page Reference Fault. */
196 ret = EXCP_PAGE_REF;
197 }
198 prot &= PAGE_EXEC;
199 }
200
201 egress:
202 *pphys = phys;
203 *pprot = prot;
204 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
205 return ret;
206 }
207
208 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
209 {
210 HPPACPU *cpu = HPPA_CPU(cs);
211 hwaddr phys;
212 int prot, excp;
213
214 /* If the (data) mmu is disabled, bypass translation. */
215 /* ??? We really ought to know if the code mmu is disabled too,
216 in order to get the correct debugging dumps. */
217 if (!(cpu->env.psw & PSW_D)) {
218 return addr;
219 }
220
221 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
222 &phys, &prot, NULL);
223
224 /* Since we're translating for debugging, the only error that is a
225 hard error is no translation at all. Otherwise, while a real cpu
226 access might not have permission, the debugger does. */
227 return excp == EXCP_DTLB_MISS ? -1 : phys;
228 }
229
230 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
231 MMUAccessType type, int mmu_idx,
232 bool probe, uintptr_t retaddr)
233 {
234 HPPACPU *cpu = HPPA_CPU(cs);
235 CPUHPPAState *env = &cpu->env;
236 HPPATLBEntry *ent;
237 int prot, excp, a_prot;
238 hwaddr phys;
239
240 switch (type) {
241 case MMU_INST_FETCH:
242 a_prot = PAGE_EXEC;
243 break;
244 case MMU_DATA_STORE:
245 a_prot = PAGE_WRITE;
246 break;
247 default:
248 a_prot = PAGE_READ;
249 break;
250 }
251
252 excp = hppa_get_physical_address(env, addr, mmu_idx,
253 a_prot, &phys, &prot, &ent);
254 if (unlikely(excp >= 0)) {
255 if (probe) {
256 return false;
257 }
258 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
259 /* Failure. Raise the indicated exception. */
260 cs->exception_index = excp;
261 if (cpu->env.psw & PSW_Q) {
262 /* ??? Needs tweaking for hppa64. */
263 cpu->env.cr[CR_IOR] = addr;
264 cpu->env.cr[CR_ISR] = addr >> 32;
265 }
266 cpu_loop_exit_restore(cs, retaddr);
267 }
268
269 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
270 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
271 /* Success! Store the translation into the QEMU TLB. */
272 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
273 prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0));
274 return true;
275 }
276
277 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
278 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
279 {
280 HPPATLBEntry *empty = NULL;
281 int i;
282
283 /* Zap any old entries covering ADDR; notice empty entries on the way. */
284 for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) {
285 HPPATLBEntry *ent = &env->tlb[i];
286 if (ent->itree.start <= addr && addr <= ent->itree.last) {
287 if (ent->entry_valid) {
288 hppa_flush_tlb_ent(env, ent, false);
289 }
290 if (!empty) {
291 empty = ent;
292 }
293 }
294 }
295
296 /* If we didn't see an empty entry, evict one. */
297 if (empty == NULL) {
298 empty = hppa_alloc_tlb_ent(env);
299 }
300
301 /* Note that empty->entry_valid == 0 already. */
302 empty->itree.start = addr & TARGET_PAGE_MASK;
303 empty->itree.last = empty->itree.start + TARGET_PAGE_SIZE - 1;
304 empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
305 trace_hppa_tlb_itlba(env, empty, empty->itree.start,
306 empty->itree.last, empty->pa);
307 }
308
309 static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
310 {
311 ent->access_id = extract32(reg, 1, 18);
312 ent->u = extract32(reg, 19, 1);
313 ent->ar_pl2 = extract32(reg, 20, 2);
314 ent->ar_pl1 = extract32(reg, 22, 2);
315 ent->ar_type = extract32(reg, 24, 3);
316 ent->b = extract32(reg, 27, 1);
317 ent->d = extract32(reg, 28, 1);
318 ent->t = extract32(reg, 29, 1);
319 ent->entry_valid = 1;
320 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
321 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
322 }
323
324 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
325 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
326 {
327 HPPATLBEntry *ent = hppa_find_tlb(env, addr);
328
329 if (unlikely(ent == NULL)) {
330 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
331 return;
332 }
333
334 set_access_bits(env, ent, reg);
335 }
336
337 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
338 synchronous across all processors. */
339 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
340 {
341 CPUHPPAState *env = cpu_env(cpu);
342 target_ulong addr = (target_ulong) data.target_ptr;
343 HPPATLBEntry *ent = hppa_find_tlb(env, addr);
344
345 if (ent && ent->entry_valid) {
346 hppa_flush_tlb_ent(env, ent, false);
347 }
348 }
349
350 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
351 {
352 CPUState *src = env_cpu(env);
353 CPUState *cpu;
354 trace_hppa_tlb_ptlb(env);
355 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
356
357 CPU_FOREACH(cpu) {
358 if (cpu != src) {
359 async_run_on_cpu(cpu, ptlb_work, data);
360 }
361 }
362 async_safe_run_on_cpu(src, ptlb_work, data);
363 }
364
365 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
366 number of pages/entries (we choose all), and is local to the cpu. */
367 void HELPER(ptlbe)(CPUHPPAState *env)
368 {
369 trace_hppa_tlb_ptlbe(env);
370 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
371 memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
372 sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
373 env->tlb_last = HPPA_BTLB_ENTRIES;
374 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
375 }
376
377 void cpu_hppa_change_prot_id(CPUHPPAState *env)
378 {
379 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
380 }
381
382 void HELPER(change_prot_id)(CPUHPPAState *env)
383 {
384 cpu_hppa_change_prot_id(env);
385 }
386
387 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
388 {
389 hwaddr phys;
390 int prot, excp;
391
392 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
393 &phys, &prot, NULL);
394 if (excp >= 0) {
395 if (env->psw & PSW_Q) {
396 /* ??? Needs tweaking for hppa64. */
397 env->cr[CR_IOR] = addr;
398 env->cr[CR_ISR] = addr >> 32;
399 }
400 if (excp == EXCP_DTLB_MISS) {
401 excp = EXCP_NA_DTLB_MISS;
402 }
403 trace_hppa_tlb_lpa_failed(env, addr);
404 hppa_dynamic_excp(env, excp, GETPC());
405 }
406 trace_hppa_tlb_lpa_success(env, addr, phys);
407 return phys;
408 }
409
410 /* Return the ar_type of the TLB at VADDR, or -1. */
411 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
412 {
413 HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
414 return ent ? ent->ar_type : -1;
415 }
416
417 /*
418 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
419 * allow operating systems to modify the Block TLB (BTLB) entries.
420 * For implementation details see page 1-13 in
421 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
422 */
423 void HELPER(diag_btlb)(CPUHPPAState *env)
424 {
425 unsigned int phys_page, len, slot;
426 int mmu_idx = cpu_mmu_index(env, 0);
427 uintptr_t ra = GETPC();
428 HPPATLBEntry *btlb;
429 uint64_t virt_page;
430 uint32_t *vaddr;
431
432 #ifdef TARGET_HPPA64
433 /* BTLBs are not supported on 64-bit CPUs */
434 env->gr[28] = -1; /* nonexistent procedure */
435 return;
436 #endif
437 env->gr[28] = 0; /* PDC_OK */
438
439 switch (env->gr[25]) {
440 case 0:
441 /* return BTLB parameters */
442 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
443 vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
444 MMU_DATA_STORE, mmu_idx, ra);
445 if (vaddr == NULL) {
446 env->gr[28] = -10; /* invalid argument */
447 } else {
448 vaddr[0] = cpu_to_be32(1);
449 vaddr[1] = cpu_to_be32(16 * 1024);
450 vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED);
451 vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE);
452 }
453 break;
454 case 1:
455 /* insert BTLB entry */
456 virt_page = env->gr[24]; /* upper 32 bits */
457 virt_page <<= 32;
458 virt_page |= env->gr[23]; /* lower 32 bits */
459 phys_page = env->gr[22];
460 len = env->gr[21];
461 slot = env->gr[19];
462 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
463 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
464 "into slot %d\n",
465 (long long) virt_page << TARGET_PAGE_BITS,
466 (long long) (virt_page + len) << TARGET_PAGE_BITS,
467 (long long) virt_page, phys_page, len, slot);
468 if (slot < HPPA_BTLB_ENTRIES) {
469 btlb = &env->tlb[slot];
470 /* force flush of possibly existing BTLB entry */
471 hppa_flush_tlb_ent(env, btlb, true);
472 /* create new BTLB entry */
473 btlb->itree.start = virt_page << TARGET_PAGE_BITS;
474 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
475 btlb->pa = phys_page << TARGET_PAGE_BITS;
476 set_access_bits(env, btlb, env->gr[20]);
477 btlb->t = 0;
478 btlb->d = 1;
479 } else {
480 env->gr[28] = -10; /* invalid argument */
481 }
482 break;
483 case 2:
484 /* Purge BTLB entry */
485 slot = env->gr[22];
486 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
487 slot);
488 if (slot < HPPA_BTLB_ENTRIES) {
489 btlb = &env->tlb[slot];
490 hppa_flush_tlb_ent(env, btlb, true);
491 } else {
492 env->gr[28] = -10; /* invalid argument */
493 }
494 break;
495 case 3:
496 /* Purge all BTLB entries */
497 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
498 for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) {
499 btlb = &env->tlb[slot];
500 hppa_flush_tlb_ent(env, btlb, true);
501 }
502 break;
503 default:
504 env->gr[28] = -2; /* nonexistent option */
505 break;
506 }
507 }