]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
target/hppa: Populate an interval tree with valid tlb entries
[mirror_qemu.git] / target / hppa / mem_helper.c
1 /*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27
28 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
29 {
30 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
31
32 if (i) {
33 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
34 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
35 ent->itree.start, ent->itree.last, ent->pa);
36 return ent;
37 }
38 trace_hppa_tlb_find_entry_not_found(env, addr);
39 return NULL;
40 }
41
42 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
43 bool force_flush_btlb)
44 {
45 CPUState *cs = env_cpu(env);
46 bool is_btlb;
47
48 if (!ent->entry_valid) {
49 return;
50 }
51
52 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
53 ent->itree.last, ent->pa);
54
55 tlb_flush_range_by_mmuidx(cs, ent->itree.start,
56 ent->itree.last - ent->itree.start + 1,
57 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
58
59 /* Never clear BTLBs, unless forced to do so. */
60 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES];
61 if (is_btlb && !force_flush_btlb) {
62 return;
63 }
64
65 interval_tree_remove(&ent->itree, &env->tlb_root);
66 memset(ent, 0, sizeof(*ent));
67
68 if (!is_btlb) {
69 ent->unused_next = env->tlb_unused;
70 env->tlb_unused = ent;
71 }
72 }
73
74 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
75 {
76 IntervalTreeNode *i, *n;
77
78 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
79 for (; i ; i = n) {
80 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
81
82 /*
83 * Find the next entry now: In the normal case the current entry
84 * will be removed, but in the BTLB case it will remain.
85 */
86 n = interval_tree_iter_next(i, va_b, va_e);
87 hppa_flush_tlb_ent(env, ent, false);
88 }
89 }
90
91 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
92 {
93 HPPATLBEntry *ent = env->tlb_unused;
94
95 if (ent == NULL) {
96 uint32_t i = env->tlb_last;
97
98 if (i < HPPA_BTLB_ENTRIES || i >= ARRAY_SIZE(env->tlb)) {
99 i = HPPA_BTLB_ENTRIES;
100 }
101 env->tlb_last = i + 1;
102
103 ent = &env->tlb[i];
104 hppa_flush_tlb_ent(env, ent, false);
105 }
106
107 env->tlb_unused = ent->unused_next;
108 return ent;
109 }
110
111 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
112 int type, hwaddr *pphys, int *pprot,
113 HPPATLBEntry **tlb_entry)
114 {
115 hwaddr phys;
116 int prot, r_prot, w_prot, x_prot, priv;
117 HPPATLBEntry *ent;
118 int ret = -1;
119
120 if (tlb_entry) {
121 *tlb_entry = NULL;
122 }
123
124 /* Virtual translation disabled. Direct map virtual to physical. */
125 if (mmu_idx == MMU_PHYS_IDX) {
126 phys = addr;
127 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
128 goto egress;
129 }
130
131 /* Find a valid tlb entry that matches the virtual address. */
132 ent = hppa_find_tlb(env, addr);
133 if (ent == NULL) {
134 phys = 0;
135 prot = 0;
136 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
137 goto egress;
138 }
139
140 if (tlb_entry) {
141 *tlb_entry = ent;
142 }
143
144 /* We now know the physical address. */
145 phys = ent->pa + (addr - ent->itree.start);
146
147 /* Map TLB access_rights field to QEMU protection. */
148 priv = MMU_IDX_TO_PRIV(mmu_idx);
149 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
150 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
151 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
152 switch (ent->ar_type) {
153 case 0: /* read-only: data page */
154 prot = r_prot;
155 break;
156 case 1: /* read/write: dynamic data page */
157 prot = r_prot | w_prot;
158 break;
159 case 2: /* read/execute: normal code page */
160 prot = r_prot | x_prot;
161 break;
162 case 3: /* read/write/execute: dynamic code page */
163 prot = r_prot | w_prot | x_prot;
164 break;
165 default: /* execute: promote to privilege level type & 3 */
166 prot = x_prot;
167 break;
168 }
169
170 /* access_id == 0 means public page and no check is performed */
171 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
172 /* If bits [31:1] match, and bit 0 is set, suppress write. */
173 int match = ent->access_id * 2 + 1;
174
175 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
176 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
177 prot &= PAGE_READ | PAGE_EXEC;
178 if (type == PAGE_WRITE) {
179 ret = EXCP_DMPI;
180 goto egress;
181 }
182 }
183 }
184
185 /* No guest access type indicates a non-architectural access from
186 within QEMU. Bypass checks for access, D, B and T bits. */
187 if (type == 0) {
188 goto egress;
189 }
190
191 if (unlikely(!(prot & type))) {
192 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
193 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
194 goto egress;
195 }
196
197 /* In reverse priority order, check for conditions which raise faults.
198 As we go, remove PROT bits that cover the condition we want to check.
199 In this way, the resulting PROT will force a re-check of the
200 architectural TLB entry for the next access. */
201 if (unlikely(!ent->d)) {
202 if (type & PAGE_WRITE) {
203 /* The D bit is not set -- TLB Dirty Bit Fault. */
204 ret = EXCP_TLB_DIRTY;
205 }
206 prot &= PAGE_READ | PAGE_EXEC;
207 }
208 if (unlikely(ent->b)) {
209 if (type & PAGE_WRITE) {
210 /* The B bit is set -- Data Memory Break Fault. */
211 ret = EXCP_DMB;
212 }
213 prot &= PAGE_READ | PAGE_EXEC;
214 }
215 if (unlikely(ent->t)) {
216 if (!(type & PAGE_EXEC)) {
217 /* The T bit is set -- Page Reference Fault. */
218 ret = EXCP_PAGE_REF;
219 }
220 prot &= PAGE_EXEC;
221 }
222
223 egress:
224 *pphys = phys;
225 *pprot = prot;
226 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
227 return ret;
228 }
229
230 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
231 {
232 HPPACPU *cpu = HPPA_CPU(cs);
233 hwaddr phys;
234 int prot, excp;
235
236 /* If the (data) mmu is disabled, bypass translation. */
237 /* ??? We really ought to know if the code mmu is disabled too,
238 in order to get the correct debugging dumps. */
239 if (!(cpu->env.psw & PSW_D)) {
240 return addr;
241 }
242
243 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
244 &phys, &prot, NULL);
245
246 /* Since we're translating for debugging, the only error that is a
247 hard error is no translation at all. Otherwise, while a real cpu
248 access might not have permission, the debugger does. */
249 return excp == EXCP_DTLB_MISS ? -1 : phys;
250 }
251
252 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
253 MMUAccessType type, int mmu_idx,
254 bool probe, uintptr_t retaddr)
255 {
256 HPPACPU *cpu = HPPA_CPU(cs);
257 CPUHPPAState *env = &cpu->env;
258 HPPATLBEntry *ent;
259 int prot, excp, a_prot;
260 hwaddr phys;
261
262 switch (type) {
263 case MMU_INST_FETCH:
264 a_prot = PAGE_EXEC;
265 break;
266 case MMU_DATA_STORE:
267 a_prot = PAGE_WRITE;
268 break;
269 default:
270 a_prot = PAGE_READ;
271 break;
272 }
273
274 excp = hppa_get_physical_address(env, addr, mmu_idx,
275 a_prot, &phys, &prot, &ent);
276 if (unlikely(excp >= 0)) {
277 if (probe) {
278 return false;
279 }
280 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
281 /* Failure. Raise the indicated exception. */
282 cs->exception_index = excp;
283 if (cpu->env.psw & PSW_Q) {
284 /* ??? Needs tweaking for hppa64. */
285 cpu->env.cr[CR_IOR] = addr;
286 cpu->env.cr[CR_ISR] = addr >> 32;
287 }
288 cpu_loop_exit_restore(cs, retaddr);
289 }
290
291 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
292 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
293
294 /*
295 * Success! Store the translation into the QEMU TLB.
296 * Note that we always install a single-page entry, because that
297 * is what works best with softmmu -- anything else will trigger
298 * the large page protection mask. We do not require this,
299 * because we record the large page here in the hppa tlb.
300 */
301 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
302 prot, mmu_idx, TARGET_PAGE_SIZE);
303 return true;
304 }
305
306 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
307 void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
308 {
309 HPPATLBEntry *ent;
310
311 /* Zap any old entries covering ADDR. */
312 addr &= TARGET_PAGE_MASK;
313 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
314
315 ent = env->tlb_partial;
316 if (ent == NULL) {
317 ent = hppa_alloc_tlb_ent(env);
318 env->tlb_partial = ent;
319 }
320
321 /* Note that ent->entry_valid == 0 already. */
322 ent->itree.start = addr;
323 ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
324 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
325 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
326 }
327
328 static void set_access_bits(CPUHPPAState *env, HPPATLBEntry *ent, target_ureg reg)
329 {
330 ent->access_id = extract32(reg, 1, 18);
331 ent->u = extract32(reg, 19, 1);
332 ent->ar_pl2 = extract32(reg, 20, 2);
333 ent->ar_pl1 = extract32(reg, 22, 2);
334 ent->ar_type = extract32(reg, 24, 3);
335 ent->b = extract32(reg, 27, 1);
336 ent->d = extract32(reg, 28, 1);
337 ent->t = extract32(reg, 29, 1);
338 ent->entry_valid = 1;
339
340 interval_tree_insert(&ent->itree, &env->tlb_root);
341 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
342 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
343 }
344
345 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
346 void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
347 {
348 HPPATLBEntry *ent = env->tlb_partial;
349
350 if (ent) {
351 env->tlb_partial = NULL;
352 if (ent->itree.start <= addr && addr <= ent->itree.last) {
353 set_access_bits(env, ent, reg);
354 return;
355 }
356 }
357 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
358 }
359
360 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
361 synchronous across all processors. */
362 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
363 {
364 CPUHPPAState *env = cpu_env(cpu);
365 target_ulong addr = (target_ulong) data.target_ptr;
366
367 hppa_flush_tlb_range(env, addr, addr);
368 }
369
370 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
371 {
372 CPUState *src = env_cpu(env);
373 CPUState *cpu;
374
375 trace_hppa_tlb_ptlb(env);
376 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
377
378 CPU_FOREACH(cpu) {
379 if (cpu != src) {
380 async_run_on_cpu(cpu, ptlb_work, data);
381 }
382 }
383 async_safe_run_on_cpu(src, ptlb_work, data);
384 }
385
386 void hppa_ptlbe(CPUHPPAState *env)
387 {
388 uint32_t i;
389
390 /* Zap the (non-btlb) tlb entries themselves. */
391 memset(&env->tlb[HPPA_BTLB_ENTRIES], 0,
392 sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0]));
393 env->tlb_last = HPPA_BTLB_ENTRIES;
394 env->tlb_partial = NULL;
395
396 /* Put them all onto the unused list. */
397 env->tlb_unused = &env->tlb[HPPA_BTLB_ENTRIES];
398 for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
399 env->tlb[i].unused_next = &env->tlb[i + 1];
400 }
401
402 /* Re-initialize the interval tree with only the btlb entries. */
403 memset(&env->tlb_root, 0, sizeof(env->tlb_root));
404 for (i = 0; i < HPPA_BTLB_ENTRIES; ++i) {
405 if (env->tlb[i].entry_valid) {
406 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
407 }
408 }
409
410 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
411 }
412
413 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
414 number of pages/entries (we choose all), and is local to the cpu. */
415 void HELPER(ptlbe)(CPUHPPAState *env)
416 {
417 trace_hppa_tlb_ptlbe(env);
418 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
419 hppa_ptlbe(env);
420 }
421
422 void cpu_hppa_change_prot_id(CPUHPPAState *env)
423 {
424 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
425 }
426
427 void HELPER(change_prot_id)(CPUHPPAState *env)
428 {
429 cpu_hppa_change_prot_id(env);
430 }
431
432 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
433 {
434 hwaddr phys;
435 int prot, excp;
436
437 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
438 &phys, &prot, NULL);
439 if (excp >= 0) {
440 if (env->psw & PSW_Q) {
441 /* ??? Needs tweaking for hppa64. */
442 env->cr[CR_IOR] = addr;
443 env->cr[CR_ISR] = addr >> 32;
444 }
445 if (excp == EXCP_DTLB_MISS) {
446 excp = EXCP_NA_DTLB_MISS;
447 }
448 trace_hppa_tlb_lpa_failed(env, addr);
449 hppa_dynamic_excp(env, excp, GETPC());
450 }
451 trace_hppa_tlb_lpa_success(env, addr, phys);
452 return phys;
453 }
454
455 /* Return the ar_type of the TLB at VADDR, or -1. */
456 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
457 {
458 HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
459 return ent ? ent->ar_type : -1;
460 }
461
462 /*
463 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
464 * allow operating systems to modify the Block TLB (BTLB) entries.
465 * For implementation details see page 1-13 in
466 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
467 */
468 void HELPER(diag_btlb)(CPUHPPAState *env)
469 {
470 unsigned int phys_page, len, slot;
471 int mmu_idx = cpu_mmu_index(env, 0);
472 uintptr_t ra = GETPC();
473 HPPATLBEntry *btlb;
474 uint64_t virt_page;
475 uint32_t *vaddr;
476
477 #ifdef TARGET_HPPA64
478 /* BTLBs are not supported on 64-bit CPUs */
479 env->gr[28] = -1; /* nonexistent procedure */
480 return;
481 #endif
482 env->gr[28] = 0; /* PDC_OK */
483
484 switch (env->gr[25]) {
485 case 0:
486 /* return BTLB parameters */
487 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
488 vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
489 MMU_DATA_STORE, mmu_idx, ra);
490 if (vaddr == NULL) {
491 env->gr[28] = -10; /* invalid argument */
492 } else {
493 vaddr[0] = cpu_to_be32(1);
494 vaddr[1] = cpu_to_be32(16 * 1024);
495 vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED);
496 vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE);
497 }
498 break;
499 case 1:
500 /* insert BTLB entry */
501 virt_page = env->gr[24]; /* upper 32 bits */
502 virt_page <<= 32;
503 virt_page |= env->gr[23]; /* lower 32 bits */
504 phys_page = env->gr[22];
505 len = env->gr[21];
506 slot = env->gr[19];
507 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
508 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
509 "into slot %d\n",
510 (long long) virt_page << TARGET_PAGE_BITS,
511 (long long) (virt_page + len) << TARGET_PAGE_BITS,
512 (long long) virt_page, phys_page, len, slot);
513 if (slot < HPPA_BTLB_ENTRIES) {
514 btlb = &env->tlb[slot];
515
516 /* Force flush of possibly existing BTLB entry. */
517 hppa_flush_tlb_ent(env, btlb, true);
518
519 /* Create new BTLB entry */
520 btlb->itree.start = virt_page << TARGET_PAGE_BITS;
521 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
522 btlb->pa = phys_page << TARGET_PAGE_BITS;
523 set_access_bits(env, btlb, env->gr[20]);
524 btlb->t = 0;
525 btlb->d = 1;
526 } else {
527 env->gr[28] = -10; /* invalid argument */
528 }
529 break;
530 case 2:
531 /* Purge BTLB entry */
532 slot = env->gr[22];
533 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
534 slot);
535 if (slot < HPPA_BTLB_ENTRIES) {
536 btlb = &env->tlb[slot];
537 hppa_flush_tlb_ent(env, btlb, true);
538 } else {
539 env->gr[28] = -10; /* invalid argument */
540 }
541 break;
542 case 3:
543 /* Purge all BTLB entries */
544 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
545 for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) {
546 btlb = &env->tlb[slot];
547 hppa_flush_tlb_ent(env, btlb, true);
548 }
549 break;
550 default:
551 env->gr[28] = -2; /* nonexistent option */
552 break;
553 }
554 }