]> git.proxmox.com Git - mirror_qemu.git/blob - target/hppa/mem_helper.c
target/hppa: Implement IDTLBT, IITLBT
[mirror_qemu.git] / target / hppa / mem_helper.c
1 /*
2 * HPPA memory access helper routines
3 *
4 * Copyright (c) 2017 Helge Deller
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/helper-proto.h"
25 #include "hw/core/cpu.h"
26 #include "trace.h"
27
28 hwaddr hppa_abs_to_phys_pa2_w1(vaddr addr)
29 {
30 if (likely(extract64(addr, 58, 4) != 0xf)) {
31 /* Memory address space */
32 return addr & MAKE_64BIT_MASK(0, 62);
33 }
34 if (extract64(addr, 54, 4) != 0) {
35 /* I/O address space */
36 return addr | MAKE_64BIT_MASK(62, 2);
37 }
38 /* PDC address space */
39 return (addr & MAKE_64BIT_MASK(0, 54)) | MAKE_64BIT_MASK(60, 4);
40 }
41
42 hwaddr hppa_abs_to_phys_pa2_w0(vaddr addr)
43 {
44 if (likely(extract32(addr, 28, 4) != 0xf)) {
45 /* Memory address space */
46 return addr & MAKE_64BIT_MASK(0, 32);
47 }
48 if (extract32(addr, 24, 4) != 0) {
49 /* I/O address space */
50 return addr | MAKE_64BIT_MASK(32, 32);
51 }
52 /* PDC address space */
53 return (addr & MAKE_64BIT_MASK(0, 24)) | MAKE_64BIT_MASK(60, 4);
54 }
55
56 static hwaddr hppa_abs_to_phys(CPUHPPAState *env, vaddr addr)
57 {
58 if (!hppa_is_pa20(env)) {
59 return addr;
60 } else if (env->psw & PSW_W) {
61 return hppa_abs_to_phys_pa2_w1(addr);
62 } else {
63 return hppa_abs_to_phys_pa2_w0(addr);
64 }
65 }
66
67 static HPPATLBEntry *hppa_find_tlb(CPUHPPAState *env, vaddr addr)
68 {
69 IntervalTreeNode *i = interval_tree_iter_first(&env->tlb_root, addr, addr);
70
71 if (i) {
72 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
73 trace_hppa_tlb_find_entry(env, ent, ent->entry_valid,
74 ent->itree.start, ent->itree.last, ent->pa);
75 return ent;
76 }
77 trace_hppa_tlb_find_entry_not_found(env, addr);
78 return NULL;
79 }
80
81 static void hppa_flush_tlb_ent(CPUHPPAState *env, HPPATLBEntry *ent,
82 bool force_flush_btlb)
83 {
84 CPUState *cs = env_cpu(env);
85 bool is_btlb;
86
87 if (!ent->entry_valid) {
88 return;
89 }
90
91 trace_hppa_tlb_flush_ent(env, ent, ent->itree.start,
92 ent->itree.last, ent->pa);
93
94 tlb_flush_range_by_mmuidx(cs, ent->itree.start,
95 ent->itree.last - ent->itree.start + 1,
96 HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS);
97
98 /* Never clear BTLBs, unless forced to do so. */
99 is_btlb = ent < &env->tlb[HPPA_BTLB_ENTRIES(env)];
100 if (is_btlb && !force_flush_btlb) {
101 return;
102 }
103
104 interval_tree_remove(&ent->itree, &env->tlb_root);
105 memset(ent, 0, sizeof(*ent));
106
107 if (!is_btlb) {
108 ent->unused_next = env->tlb_unused;
109 env->tlb_unused = ent;
110 }
111 }
112
113 static void hppa_flush_tlb_range(CPUHPPAState *env, vaddr va_b, vaddr va_e)
114 {
115 IntervalTreeNode *i, *n;
116
117 i = interval_tree_iter_first(&env->tlb_root, va_b, va_e);
118 for (; i ; i = n) {
119 HPPATLBEntry *ent = container_of(i, HPPATLBEntry, itree);
120
121 /*
122 * Find the next entry now: In the normal case the current entry
123 * will be removed, but in the BTLB case it will remain.
124 */
125 n = interval_tree_iter_next(i, va_b, va_e);
126 hppa_flush_tlb_ent(env, ent, false);
127 }
128 }
129
130 static HPPATLBEntry *hppa_alloc_tlb_ent(CPUHPPAState *env)
131 {
132 HPPATLBEntry *ent = env->tlb_unused;
133
134 if (ent == NULL) {
135 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
136 uint32_t i = env->tlb_last;
137
138 if (i < btlb_entries || i >= ARRAY_SIZE(env->tlb)) {
139 i = btlb_entries;
140 }
141 env->tlb_last = i + 1;
142
143 ent = &env->tlb[i];
144 hppa_flush_tlb_ent(env, ent, false);
145 }
146
147 env->tlb_unused = ent->unused_next;
148 return ent;
149 }
150
151 int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx,
152 int type, hwaddr *pphys, int *pprot,
153 HPPATLBEntry **tlb_entry)
154 {
155 hwaddr phys;
156 int prot, r_prot, w_prot, x_prot, priv;
157 HPPATLBEntry *ent;
158 int ret = -1;
159
160 if (tlb_entry) {
161 *tlb_entry = NULL;
162 }
163
164 /* Virtual translation disabled. Direct map virtual to physical. */
165 if (mmu_idx == MMU_PHYS_IDX) {
166 phys = addr;
167 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
168 goto egress;
169 }
170
171 /* Find a valid tlb entry that matches the virtual address. */
172 ent = hppa_find_tlb(env, addr);
173 if (ent == NULL) {
174 phys = 0;
175 prot = 0;
176 ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS;
177 goto egress;
178 }
179
180 if (tlb_entry) {
181 *tlb_entry = ent;
182 }
183
184 /* We now know the physical address. */
185 phys = ent->pa + (addr - ent->itree.start);
186
187 /* Map TLB access_rights field to QEMU protection. */
188 priv = MMU_IDX_TO_PRIV(mmu_idx);
189 r_prot = (priv <= ent->ar_pl1) * PAGE_READ;
190 w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE;
191 x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC;
192 switch (ent->ar_type) {
193 case 0: /* read-only: data page */
194 prot = r_prot;
195 break;
196 case 1: /* read/write: dynamic data page */
197 prot = r_prot | w_prot;
198 break;
199 case 2: /* read/execute: normal code page */
200 prot = r_prot | x_prot;
201 break;
202 case 3: /* read/write/execute: dynamic code page */
203 prot = r_prot | w_prot | x_prot;
204 break;
205 default: /* execute: promote to privilege level type & 3 */
206 prot = x_prot;
207 break;
208 }
209
210 /* access_id == 0 means public page and no check is performed */
211 if (ent->access_id && MMU_IDX_TO_P(mmu_idx)) {
212 /* If bits [31:1] match, and bit 0 is set, suppress write. */
213 int match = ent->access_id * 2 + 1;
214
215 if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] ||
216 match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) {
217 prot &= PAGE_READ | PAGE_EXEC;
218 if (type == PAGE_WRITE) {
219 ret = EXCP_DMPI;
220 goto egress;
221 }
222 }
223 }
224
225 /* No guest access type indicates a non-architectural access from
226 within QEMU. Bypass checks for access, D, B and T bits. */
227 if (type == 0) {
228 goto egress;
229 }
230
231 if (unlikely(!(prot & type))) {
232 /* The access isn't allowed -- Inst/Data Memory Protection Fault. */
233 ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR;
234 goto egress;
235 }
236
237 /* In reverse priority order, check for conditions which raise faults.
238 As we go, remove PROT bits that cover the condition we want to check.
239 In this way, the resulting PROT will force a re-check of the
240 architectural TLB entry for the next access. */
241 if (unlikely(!ent->d)) {
242 if (type & PAGE_WRITE) {
243 /* The D bit is not set -- TLB Dirty Bit Fault. */
244 ret = EXCP_TLB_DIRTY;
245 }
246 prot &= PAGE_READ | PAGE_EXEC;
247 }
248 if (unlikely(ent->b)) {
249 if (type & PAGE_WRITE) {
250 /* The B bit is set -- Data Memory Break Fault. */
251 ret = EXCP_DMB;
252 }
253 prot &= PAGE_READ | PAGE_EXEC;
254 }
255 if (unlikely(ent->t)) {
256 if (!(type & PAGE_EXEC)) {
257 /* The T bit is set -- Page Reference Fault. */
258 ret = EXCP_PAGE_REF;
259 }
260 prot &= PAGE_EXEC;
261 }
262
263 egress:
264 *pphys = phys = hppa_abs_to_phys(env, phys);
265 *pprot = prot;
266 trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys);
267 return ret;
268 }
269
270 hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
271 {
272 HPPACPU *cpu = HPPA_CPU(cs);
273 hwaddr phys;
274 int prot, excp;
275
276 /* If the (data) mmu is disabled, bypass translation. */
277 /* ??? We really ought to know if the code mmu is disabled too,
278 in order to get the correct debugging dumps. */
279 if (!(cpu->env.psw & PSW_D)) {
280 return hppa_abs_to_phys(&cpu->env, addr);
281 }
282
283 excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0,
284 &phys, &prot, NULL);
285
286 /* Since we're translating for debugging, the only error that is a
287 hard error is no translation at all. Otherwise, while a real cpu
288 access might not have permission, the debugger does. */
289 return excp == EXCP_DTLB_MISS ? -1 : phys;
290 }
291
292 bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
293 MMUAccessType type, int mmu_idx,
294 bool probe, uintptr_t retaddr)
295 {
296 HPPACPU *cpu = HPPA_CPU(cs);
297 CPUHPPAState *env = &cpu->env;
298 HPPATLBEntry *ent;
299 int prot, excp, a_prot;
300 hwaddr phys;
301
302 switch (type) {
303 case MMU_INST_FETCH:
304 a_prot = PAGE_EXEC;
305 break;
306 case MMU_DATA_STORE:
307 a_prot = PAGE_WRITE;
308 break;
309 default:
310 a_prot = PAGE_READ;
311 break;
312 }
313
314 excp = hppa_get_physical_address(env, addr, mmu_idx,
315 a_prot, &phys, &prot, &ent);
316 if (unlikely(excp >= 0)) {
317 if (probe) {
318 return false;
319 }
320 trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx);
321 /* Failure. Raise the indicated exception. */
322 cs->exception_index = excp;
323 if (cpu->env.psw & PSW_Q) {
324 /* ??? Needs tweaking for hppa64. */
325 cpu->env.cr[CR_IOR] = addr;
326 cpu->env.cr[CR_ISR] = addr >> 32;
327 }
328 cpu_loop_exit_restore(cs, retaddr);
329 }
330
331 trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK,
332 phys & TARGET_PAGE_MASK, size, type, mmu_idx);
333
334 /*
335 * Success! Store the translation into the QEMU TLB.
336 * Note that we always install a single-page entry, because that
337 * is what works best with softmmu -- anything else will trigger
338 * the large page protection mask. We do not require this,
339 * because we record the large page here in the hppa tlb.
340 */
341 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
342 prot, mmu_idx, TARGET_PAGE_SIZE);
343 return true;
344 }
345
346 /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */
347 void HELPER(itlba_pa11)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
348 {
349 HPPATLBEntry *ent;
350
351 /* Zap any old entries covering ADDR. */
352 addr &= TARGET_PAGE_MASK;
353 hppa_flush_tlb_range(env, addr, addr + TARGET_PAGE_SIZE - 1);
354
355 ent = env->tlb_partial;
356 if (ent == NULL) {
357 ent = hppa_alloc_tlb_ent(env);
358 env->tlb_partial = ent;
359 }
360
361 /* Note that ent->entry_valid == 0 already. */
362 ent->itree.start = addr;
363 ent->itree.last = addr + TARGET_PAGE_SIZE - 1;
364 ent->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS;
365 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
366 }
367
368 static void set_access_bits_pa11(CPUHPPAState *env, HPPATLBEntry *ent,
369 target_ureg reg)
370 {
371 ent->access_id = extract32(reg, 1, 18);
372 ent->u = extract32(reg, 19, 1);
373 ent->ar_pl2 = extract32(reg, 20, 2);
374 ent->ar_pl1 = extract32(reg, 22, 2);
375 ent->ar_type = extract32(reg, 24, 3);
376 ent->b = extract32(reg, 27, 1);
377 ent->d = extract32(reg, 28, 1);
378 ent->t = extract32(reg, 29, 1);
379 ent->entry_valid = 1;
380
381 interval_tree_insert(&ent->itree, &env->tlb_root);
382 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2,
383 ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t);
384 }
385
386 /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */
387 void HELPER(itlbp_pa11)(CPUHPPAState *env, target_ulong addr, target_ureg reg)
388 {
389 HPPATLBEntry *ent = env->tlb_partial;
390
391 if (ent) {
392 env->tlb_partial = NULL;
393 if (ent->itree.start <= addr && addr <= ent->itree.last) {
394 set_access_bits_pa11(env, ent, reg);
395 return;
396 }
397 }
398 qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n");
399 }
400
401 static void itlbt_pa20(CPUHPPAState *env, target_ureg r1,
402 target_ureg r2, vaddr va_b)
403 {
404 HPPATLBEntry *ent;
405 vaddr va_e;
406 uint64_t va_size;
407 int mask_shift;
408
409 mask_shift = 2 * (r1 & 0xf);
410 va_size = TARGET_PAGE_SIZE << mask_shift;
411 va_b &= -va_size;
412 va_e = va_b + va_size - 1;
413
414 hppa_flush_tlb_range(env, va_b, va_e);
415 ent = hppa_alloc_tlb_ent(env);
416
417 ent->itree.start = va_b;
418 ent->itree.last = va_e;
419 ent->pa = (r1 << 7) & (TARGET_PAGE_MASK << mask_shift);
420 ent->t = extract64(r2, 61, 1);
421 ent->d = extract64(r2, 60, 1);
422 ent->b = extract64(r2, 59, 1);
423 ent->ar_type = extract64(r2, 56, 3);
424 ent->ar_pl1 = extract64(r2, 54, 2);
425 ent->ar_pl2 = extract64(r2, 52, 2);
426 ent->u = extract64(r2, 51, 1);
427 /* o = bit 50 */
428 /* p = bit 49 */
429 ent->access_id = extract64(r2, 1, 31);
430 ent->entry_valid = 1;
431
432 interval_tree_insert(&ent->itree, &env->tlb_root);
433 trace_hppa_tlb_itlba(env, ent, ent->itree.start, ent->itree.last, ent->pa);
434 trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u,
435 ent->ar_pl2, ent->ar_pl1, ent->ar_type,
436 ent->b, ent->d, ent->t);
437 }
438
439 void HELPER(idtlbt_pa20)(CPUHPPAState *env, target_ureg r1, target_ureg r2)
440 {
441 vaddr va_b = deposit64(env->cr[CR_IOR], 32, 32, env->cr[CR_ISR]);
442 itlbt_pa20(env, r1, r2, va_b);
443 }
444
445 void HELPER(iitlbt_pa20)(CPUHPPAState *env, target_ureg r1, target_ureg r2)
446 {
447 vaddr va_b = deposit64(env->cr[CR_IIAOQ], 32, 32, env->cr[CR_IIASQ]);
448 itlbt_pa20(env, r1, r2, va_b);
449 }
450
451 /* Purge (Insn/Data) TLB. This is explicitly page-based, and is
452 synchronous across all processors. */
453 static void ptlb_work(CPUState *cpu, run_on_cpu_data data)
454 {
455 CPUHPPAState *env = cpu_env(cpu);
456 target_ulong addr = (target_ulong) data.target_ptr;
457
458 hppa_flush_tlb_range(env, addr, addr);
459 }
460
461 void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr)
462 {
463 CPUState *src = env_cpu(env);
464 CPUState *cpu;
465
466 trace_hppa_tlb_ptlb(env);
467 run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr);
468
469 CPU_FOREACH(cpu) {
470 if (cpu != src) {
471 async_run_on_cpu(cpu, ptlb_work, data);
472 }
473 }
474 async_safe_run_on_cpu(src, ptlb_work, data);
475 }
476
477 void hppa_ptlbe(CPUHPPAState *env)
478 {
479 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
480 uint32_t i;
481
482 /* Zap the (non-btlb) tlb entries themselves. */
483 memset(&env->tlb[btlb_entries], 0,
484 sizeof(env->tlb) - btlb_entries * sizeof(env->tlb[0]));
485 env->tlb_last = btlb_entries;
486 env->tlb_partial = NULL;
487
488 /* Put them all onto the unused list. */
489 env->tlb_unused = &env->tlb[btlb_entries];
490 for (i = btlb_entries; i < ARRAY_SIZE(env->tlb) - 1; ++i) {
491 env->tlb[i].unused_next = &env->tlb[i + 1];
492 }
493
494 /* Re-initialize the interval tree with only the btlb entries. */
495 memset(&env->tlb_root, 0, sizeof(env->tlb_root));
496 for (i = 0; i < btlb_entries; ++i) {
497 if (env->tlb[i].entry_valid) {
498 interval_tree_insert(&env->tlb[i].itree, &env->tlb_root);
499 }
500 }
501
502 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK);
503 }
504
505 /* Purge (Insn/Data) TLB entry. This affects an implementation-defined
506 number of pages/entries (we choose all), and is local to the cpu. */
507 void HELPER(ptlbe)(CPUHPPAState *env)
508 {
509 trace_hppa_tlb_ptlbe(env);
510 qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n");
511 hppa_ptlbe(env);
512 }
513
514 void cpu_hppa_change_prot_id(CPUHPPAState *env)
515 {
516 tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_P_MASK);
517 }
518
519 void HELPER(change_prot_id)(CPUHPPAState *env)
520 {
521 cpu_hppa_change_prot_id(env);
522 }
523
524 target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr)
525 {
526 hwaddr phys;
527 int prot, excp;
528
529 excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0,
530 &phys, &prot, NULL);
531 if (excp >= 0) {
532 if (env->psw & PSW_Q) {
533 /* ??? Needs tweaking for hppa64. */
534 env->cr[CR_IOR] = addr;
535 env->cr[CR_ISR] = addr >> 32;
536 }
537 if (excp == EXCP_DTLB_MISS) {
538 excp = EXCP_NA_DTLB_MISS;
539 }
540 trace_hppa_tlb_lpa_failed(env, addr);
541 hppa_dynamic_excp(env, excp, GETPC());
542 }
543 trace_hppa_tlb_lpa_success(env, addr, phys);
544 return phys;
545 }
546
547 /* Return the ar_type of the TLB at VADDR, or -1. */
548 int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr)
549 {
550 HPPATLBEntry *ent = hppa_find_tlb(env, vaddr);
551 return ent ? ent->ar_type : -1;
552 }
553
554 /*
555 * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to
556 * allow operating systems to modify the Block TLB (BTLB) entries.
557 * For implementation details see page 1-13 in
558 * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf
559 */
560 void HELPER(diag_btlb)(CPUHPPAState *env)
561 {
562 unsigned int phys_page, len, slot;
563 int mmu_idx = cpu_mmu_index(env, 0);
564 uintptr_t ra = GETPC();
565 HPPATLBEntry *btlb;
566 uint64_t virt_page;
567 uint32_t *vaddr;
568 uint32_t btlb_entries = HPPA_BTLB_ENTRIES(env);
569
570 /* BTLBs are not supported on 64-bit CPUs */
571 if (btlb_entries == 0) {
572 env->gr[28] = -1; /* nonexistent procedure */
573 return;
574 }
575
576 env->gr[28] = 0; /* PDC_OK */
577
578 switch (env->gr[25]) {
579 case 0:
580 /* return BTLB parameters */
581 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n");
582 vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong),
583 MMU_DATA_STORE, mmu_idx, ra);
584 if (vaddr == NULL) {
585 env->gr[28] = -10; /* invalid argument */
586 } else {
587 vaddr[0] = cpu_to_be32(1);
588 vaddr[1] = cpu_to_be32(16 * 1024);
589 vaddr[2] = cpu_to_be32(PA10_BTLB_FIXED);
590 vaddr[3] = cpu_to_be32(PA10_BTLB_VARIABLE);
591 }
592 break;
593 case 1:
594 /* insert BTLB entry */
595 virt_page = env->gr[24]; /* upper 32 bits */
596 virt_page <<= 32;
597 virt_page |= env->gr[23]; /* lower 32 bits */
598 phys_page = env->gr[22];
599 len = env->gr[21];
600 slot = env->gr[19];
601 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT "
602 "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d "
603 "into slot %d\n",
604 (long long) virt_page << TARGET_PAGE_BITS,
605 (long long) (virt_page + len) << TARGET_PAGE_BITS,
606 (long long) virt_page, phys_page, len, slot);
607 if (slot < btlb_entries) {
608 btlb = &env->tlb[slot];
609
610 /* Force flush of possibly existing BTLB entry. */
611 hppa_flush_tlb_ent(env, btlb, true);
612
613 /* Create new BTLB entry */
614 btlb->itree.start = virt_page << TARGET_PAGE_BITS;
615 btlb->itree.last = btlb->itree.start + len * TARGET_PAGE_SIZE - 1;
616 btlb->pa = phys_page << TARGET_PAGE_BITS;
617 set_access_bits_pa11(env, btlb, env->gr[20]);
618 btlb->t = 0;
619 btlb->d = 1;
620 } else {
621 env->gr[28] = -10; /* invalid argument */
622 }
623 break;
624 case 2:
625 /* Purge BTLB entry */
626 slot = env->gr[22];
627 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n",
628 slot);
629 if (slot < btlb_entries) {
630 btlb = &env->tlb[slot];
631 hppa_flush_tlb_ent(env, btlb, true);
632 } else {
633 env->gr[28] = -10; /* invalid argument */
634 }
635 break;
636 case 3:
637 /* Purge all BTLB entries */
638 qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n");
639 for (slot = 0; slot < btlb_entries; slot++) {
640 btlb = &env->tlb[slot];
641 hppa_flush_tlb_ent(env, btlb, true);
642 }
643 break;
644 default:
645 env->gr[28] = -2; /* nonexistent option */
646 break;
647 }
648 }