]>
Commit | Line | Data |
---|---|---|
813dff13 HD |
1 | /* |
2 | * HPPA memory access helper routines | |
3 | * | |
4 | * Copyright (c) 2017 Helge Deller | |
5 | * | |
6 | * This library is free software; you can redistribute it and/or | |
7 | * modify it under the terms of the GNU Lesser General Public | |
8 | * License as published by the Free Software Foundation; either | |
d6ea4236 | 9 | * version 2.1 of the License, or (at your option) any later version. |
813dff13 HD |
10 | * |
11 | * This library is distributed in the hope that it will be useful, | |
12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
14 | * Lesser General Public License for more details. | |
15 | * | |
16 | * You should have received a copy of the GNU Lesser General Public | |
17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. | |
18 | */ | |
19 | ||
20 | #include "qemu/osdep.h" | |
cd617484 | 21 | #include "qemu/log.h" |
813dff13 HD |
22 | #include "cpu.h" |
23 | #include "exec/exec-all.h" | |
24 | #include "exec/helper-proto.h" | |
2e5b09fd | 25 | #include "hw/core/cpu.h" |
23c3d569 | 26 | #include "trace.h" |
813dff13 | 27 | |
650cdb2a RH |
28 | static hppa_tlb_entry *hppa_find_tlb(CPUHPPAState *env, vaddr addr) |
29 | { | |
30 | int i; | |
31 | ||
32 | for (i = 0; i < ARRAY_SIZE(env->tlb); ++i) { | |
33 | hppa_tlb_entry *ent = &env->tlb[i]; | |
8d6ae7fb | 34 | if (ent->va_b <= addr && addr <= ent->va_e) { |
23c3d569 SS |
35 | trace_hppa_tlb_find_entry(env, ent + i, ent->entry_valid, |
36 | ent->va_b, ent->va_e, ent->pa); | |
650cdb2a RH |
37 | return ent; |
38 | } | |
39 | } | |
23c3d569 | 40 | trace_hppa_tlb_find_entry_not_found(env, addr); |
650cdb2a RH |
41 | return NULL; |
42 | } | |
43 | ||
fa824d99 HD |
44 | static void hppa_flush_tlb_ent(CPUHPPAState *env, hppa_tlb_entry *ent, |
45 | bool force_flush_btlb) | |
8d6ae7fb | 46 | { |
25f32708 | 47 | CPUState *cs = env_cpu(env); |
fa824d99 HD |
48 | |
49 | if (!ent->entry_valid) { | |
50 | return; | |
51 | } | |
8d6ae7fb | 52 | |
23c3d569 SS |
53 | trace_hppa_tlb_flush_ent(env, ent, ent->va_b, ent->va_e, ent->pa); |
54 | ||
fa824d99 HD |
55 | tlb_flush_range_by_mmuidx(cs, ent->va_b, |
56 | ent->va_e - ent->va_b + 1, | |
57 | HPPA_MMU_FLUSH_MASK, TARGET_LONG_BITS); | |
58 | ||
59 | /* never clear BTLBs, unless forced to do so. */ | |
60 | if (ent < &env->tlb[HPPA_BTLB_ENTRIES] && !force_flush_btlb) { | |
61 | return; | |
8d6ae7fb RH |
62 | } |
63 | ||
64 | memset(ent, 0, sizeof(*ent)); | |
65 | ent->va_b = -1; | |
66 | } | |
67 | ||
68 | static hppa_tlb_entry *hppa_alloc_tlb_ent(CPUHPPAState *env) | |
69 | { | |
70 | hppa_tlb_entry *ent; | |
fa824d99 HD |
71 | uint32_t i; |
72 | ||
73 | if (env->tlb_last < HPPA_BTLB_ENTRIES || env->tlb_last >= ARRAY_SIZE(env->tlb)) { | |
74 | i = HPPA_BTLB_ENTRIES; | |
75 | env->tlb_last = HPPA_BTLB_ENTRIES + 1; | |
76 | } else { | |
77 | i = env->tlb_last; | |
78 | env->tlb_last++; | |
79 | } | |
8d6ae7fb | 80 | |
8d6ae7fb RH |
81 | ent = &env->tlb[i]; |
82 | ||
fa824d99 | 83 | hppa_flush_tlb_ent(env, ent, false); |
8d6ae7fb RH |
84 | return ent; |
85 | } | |
86 | ||
650cdb2a | 87 | int hppa_get_physical_address(CPUHPPAState *env, vaddr addr, int mmu_idx, |
fa824d99 HD |
88 | int type, hwaddr *pphys, int *pprot, |
89 | hppa_tlb_entry **tlb_entry) | |
650cdb2a RH |
90 | { |
91 | hwaddr phys; | |
3d066afc | 92 | int prot, r_prot, w_prot, x_prot, priv; |
650cdb2a RH |
93 | hppa_tlb_entry *ent; |
94 | int ret = -1; | |
95 | ||
fa824d99 HD |
96 | if (tlb_entry) { |
97 | *tlb_entry = NULL; | |
98 | } | |
99 | ||
650cdb2a RH |
100 | /* Virtual translation disabled. Direct map virtual to physical. */ |
101 | if (mmu_idx == MMU_PHYS_IDX) { | |
102 | phys = addr; | |
103 | prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC; | |
104 | goto egress; | |
105 | } | |
106 | ||
107 | /* Find a valid tlb entry that matches the virtual address. */ | |
108 | ent = hppa_find_tlb(env, addr); | |
8d6ae7fb | 109 | if (ent == NULL || !ent->entry_valid) { |
650cdb2a RH |
110 | phys = 0; |
111 | prot = 0; | |
acd6ba74 | 112 | ret = (type == PAGE_EXEC) ? EXCP_ITLB_MISS : EXCP_DTLB_MISS; |
650cdb2a RH |
113 | goto egress; |
114 | } | |
115 | ||
fa824d99 HD |
116 | if (tlb_entry) { |
117 | *tlb_entry = ent; | |
118 | } | |
119 | ||
650cdb2a | 120 | /* We now know the physical address. */ |
fa824d99 | 121 | phys = ent->pa + (addr - ent->va_b); |
650cdb2a RH |
122 | |
123 | /* Map TLB access_rights field to QEMU protection. */ | |
3d066afc HD |
124 | priv = MMU_IDX_TO_PRIV(mmu_idx); |
125 | r_prot = (priv <= ent->ar_pl1) * PAGE_READ; | |
126 | w_prot = (priv <= ent->ar_pl2) * PAGE_WRITE; | |
127 | x_prot = (ent->ar_pl2 <= priv && priv <= ent->ar_pl1) * PAGE_EXEC; | |
650cdb2a RH |
128 | switch (ent->ar_type) { |
129 | case 0: /* read-only: data page */ | |
130 | prot = r_prot; | |
131 | break; | |
132 | case 1: /* read/write: dynamic data page */ | |
133 | prot = r_prot | w_prot; | |
134 | break; | |
135 | case 2: /* read/execute: normal code page */ | |
136 | prot = r_prot | x_prot; | |
137 | break; | |
138 | case 3: /* read/write/execute: dynamic code page */ | |
139 | prot = r_prot | w_prot | x_prot; | |
140 | break; | |
141 | default: /* execute: promote to privilege level type & 3 */ | |
142 | prot = x_prot; | |
43e05652 | 143 | break; |
650cdb2a RH |
144 | } |
145 | ||
d5de20bd SS |
146 | /* access_id == 0 means public page and no check is performed */ |
147 | if ((env->psw & PSW_P) && ent->access_id) { | |
148 | /* If bits [31:1] match, and bit 0 is set, suppress write. */ | |
149 | int match = ent->access_id * 2 + 1; | |
150 | ||
151 | if (match == env->cr[CR_PID1] || match == env->cr[CR_PID2] || | |
152 | match == env->cr[CR_PID3] || match == env->cr[CR_PID4]) { | |
153 | prot &= PAGE_READ | PAGE_EXEC; | |
154 | if (type == PAGE_WRITE) { | |
155 | ret = EXCP_DMPI; | |
156 | goto egress; | |
157 | } | |
158 | } | |
159 | } | |
650cdb2a RH |
160 | |
161 | /* No guest access type indicates a non-architectural access from | |
162 | within QEMU. Bypass checks for access, D, B and T bits. */ | |
163 | if (type == 0) { | |
164 | goto egress; | |
165 | } | |
166 | ||
167 | if (unlikely(!(prot & type))) { | |
168 | /* The access isn't allowed -- Inst/Data Memory Protection Fault. */ | |
affdb7e6 | 169 | ret = (type & PAGE_EXEC) ? EXCP_IMP : EXCP_DMAR; |
650cdb2a RH |
170 | goto egress; |
171 | } | |
172 | ||
173 | /* In reverse priority order, check for conditions which raise faults. | |
174 | As we go, remove PROT bits that cover the condition we want to check. | |
175 | In this way, the resulting PROT will force a re-check of the | |
176 | architectural TLB entry for the next access. */ | |
177 | if (unlikely(!ent->d)) { | |
178 | if (type & PAGE_WRITE) { | |
179 | /* The D bit is not set -- TLB Dirty Bit Fault. */ | |
180 | ret = EXCP_TLB_DIRTY; | |
181 | } | |
182 | prot &= PAGE_READ | PAGE_EXEC; | |
183 | } | |
184 | if (unlikely(ent->b)) { | |
185 | if (type & PAGE_WRITE) { | |
186 | /* The B bit is set -- Data Memory Break Fault. */ | |
187 | ret = EXCP_DMB; | |
188 | } | |
189 | prot &= PAGE_READ | PAGE_EXEC; | |
190 | } | |
191 | if (unlikely(ent->t)) { | |
192 | if (!(type & PAGE_EXEC)) { | |
193 | /* The T bit is set -- Page Reference Fault. */ | |
194 | ret = EXCP_PAGE_REF; | |
195 | } | |
196 | prot &= PAGE_EXEC; | |
197 | } | |
198 | ||
199 | egress: | |
200 | *pphys = phys; | |
201 | *pprot = prot; | |
23c3d569 | 202 | trace_hppa_tlb_get_physical_address(env, ret, prot, addr, phys); |
650cdb2a RH |
203 | return ret; |
204 | } | |
205 | ||
813dff13 HD |
206 | hwaddr hppa_cpu_get_phys_page_debug(CPUState *cs, vaddr addr) |
207 | { | |
650cdb2a RH |
208 | HPPACPU *cpu = HPPA_CPU(cs); |
209 | hwaddr phys; | |
210 | int prot, excp; | |
211 | ||
212 | /* If the (data) mmu is disabled, bypass translation. */ | |
213 | /* ??? We really ought to know if the code mmu is disabled too, | |
214 | in order to get the correct debugging dumps. */ | |
215 | if (!(cpu->env.psw & PSW_D)) { | |
216 | return addr; | |
217 | } | |
218 | ||
219 | excp = hppa_get_physical_address(&cpu->env, addr, MMU_KERNEL_IDX, 0, | |
fa824d99 | 220 | &phys, &prot, NULL); |
650cdb2a RH |
221 | |
222 | /* Since we're translating for debugging, the only error that is a | |
223 | hard error is no translation at all. Otherwise, while a real cpu | |
224 | access might not have permission, the debugger does. */ | |
225 | return excp == EXCP_DTLB_MISS ? -1 : phys; | |
813dff13 HD |
226 | } |
227 | ||
3c7bef03 RH |
228 | bool hppa_cpu_tlb_fill(CPUState *cs, vaddr addr, int size, |
229 | MMUAccessType type, int mmu_idx, | |
230 | bool probe, uintptr_t retaddr) | |
813dff13 | 231 | { |
650cdb2a | 232 | HPPACPU *cpu = HPPA_CPU(cs); |
23c3d569 | 233 | CPUHPPAState *env = &cpu->env; |
fa824d99 | 234 | hppa_tlb_entry *ent; |
650cdb2a RH |
235 | int prot, excp, a_prot; |
236 | hwaddr phys; | |
237 | ||
238 | switch (type) { | |
239 | case MMU_INST_FETCH: | |
240 | a_prot = PAGE_EXEC; | |
241 | break; | |
242 | case MMU_DATA_STORE: | |
243 | a_prot = PAGE_WRITE; | |
244 | break; | |
245 | default: | |
246 | a_prot = PAGE_READ; | |
247 | break; | |
248 | } | |
249 | ||
23c3d569 | 250 | excp = hppa_get_physical_address(env, addr, mmu_idx, |
fa824d99 | 251 | a_prot, &phys, &prot, &ent); |
650cdb2a | 252 | if (unlikely(excp >= 0)) { |
3c7bef03 RH |
253 | if (probe) { |
254 | return false; | |
255 | } | |
23c3d569 | 256 | trace_hppa_tlb_fill_excp(env, addr, size, type, mmu_idx); |
650cdb2a RH |
257 | /* Failure. Raise the indicated exception. */ |
258 | cs->exception_index = excp; | |
259 | if (cpu->env.psw & PSW_Q) { | |
260 | /* ??? Needs tweaking for hppa64. */ | |
261 | cpu->env.cr[CR_IOR] = addr; | |
262 | cpu->env.cr[CR_ISR] = addr >> 32; | |
263 | } | |
264 | cpu_loop_exit_restore(cs, retaddr); | |
265 | } | |
813dff13 | 266 | |
23c3d569 SS |
267 | trace_hppa_tlb_fill_success(env, addr & TARGET_PAGE_MASK, |
268 | phys & TARGET_PAGE_MASK, size, type, mmu_idx); | |
813dff13 HD |
269 | /* Success! Store the translation into the QEMU TLB. */ |
270 | tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK, | |
fa824d99 | 271 | prot, mmu_idx, TARGET_PAGE_SIZE << (ent ? 2 * ent->page_size : 0)); |
3c7bef03 RH |
272 | return true; |
273 | } | |
274 | ||
8d6ae7fb RH |
275 | /* Insert (Insn/Data) TLB Address. Note this is PA 1.1 only. */ |
276 | void HELPER(itlba)(CPUHPPAState *env, target_ulong addr, target_ureg reg) | |
277 | { | |
278 | hppa_tlb_entry *empty = NULL; | |
279 | int i; | |
280 | ||
281 | /* Zap any old entries covering ADDR; notice empty entries on the way. */ | |
fa824d99 | 282 | for (i = HPPA_BTLB_ENTRIES; i < ARRAY_SIZE(env->tlb); ++i) { |
8d6ae7fb | 283 | hppa_tlb_entry *ent = &env->tlb[i]; |
0b49c339 SS |
284 | if (ent->va_b <= addr && addr <= ent->va_e) { |
285 | if (ent->entry_valid) { | |
fa824d99 | 286 | hppa_flush_tlb_ent(env, ent, false); |
0b49c339 SS |
287 | } |
288 | if (!empty) { | |
289 | empty = ent; | |
290 | } | |
8d6ae7fb RH |
291 | } |
292 | } | |
293 | ||
294 | /* If we didn't see an empty entry, evict one. */ | |
295 | if (empty == NULL) { | |
296 | empty = hppa_alloc_tlb_ent(env); | |
297 | } | |
298 | ||
299 | /* Note that empty->entry_valid == 0 already. */ | |
300 | empty->va_b = addr & TARGET_PAGE_MASK; | |
301 | empty->va_e = empty->va_b + TARGET_PAGE_SIZE - 1; | |
302 | empty->pa = extract32(reg, 5, 20) << TARGET_PAGE_BITS; | |
23c3d569 | 303 | trace_hppa_tlb_itlba(env, empty, empty->va_b, empty->va_e, empty->pa); |
8d6ae7fb RH |
304 | } |
305 | ||
fa824d99 | 306 | static void set_access_bits(CPUHPPAState *env, hppa_tlb_entry *ent, target_ureg reg) |
8d6ae7fb | 307 | { |
8d6ae7fb RH |
308 | ent->access_id = extract32(reg, 1, 18); |
309 | ent->u = extract32(reg, 19, 1); | |
310 | ent->ar_pl2 = extract32(reg, 20, 2); | |
311 | ent->ar_pl1 = extract32(reg, 22, 2); | |
312 | ent->ar_type = extract32(reg, 24, 3); | |
313 | ent->b = extract32(reg, 27, 1); | |
314 | ent->d = extract32(reg, 28, 1); | |
315 | ent->t = extract32(reg, 29, 1); | |
316 | ent->entry_valid = 1; | |
23c3d569 SS |
317 | trace_hppa_tlb_itlbp(env, ent, ent->access_id, ent->u, ent->ar_pl2, |
318 | ent->ar_pl1, ent->ar_type, ent->b, ent->d, ent->t); | |
8d6ae7fb | 319 | } |
63300a00 | 320 | |
fa824d99 HD |
321 | /* Insert (Insn/Data) TLB Protection. Note this is PA 1.1 only. */ |
322 | void HELPER(itlbp)(CPUHPPAState *env, target_ulong addr, target_ureg reg) | |
323 | { | |
324 | hppa_tlb_entry *ent = hppa_find_tlb(env, addr); | |
325 | ||
326 | if (unlikely(ent == NULL)) { | |
327 | qemu_log_mask(LOG_GUEST_ERROR, "ITLBP not following ITLBA\n"); | |
328 | return; | |
329 | } | |
330 | ||
331 | set_access_bits(env, ent, reg); | |
332 | } | |
333 | ||
63300a00 RH |
334 | /* Purge (Insn/Data) TLB. This is explicitly page-based, and is |
335 | synchronous across all processors. */ | |
336 | static void ptlb_work(CPUState *cpu, run_on_cpu_data data) | |
337 | { | |
b77af26e | 338 | CPUHPPAState *env = cpu_env(cpu); |
63300a00 RH |
339 | target_ulong addr = (target_ulong) data.target_ptr; |
340 | hppa_tlb_entry *ent = hppa_find_tlb(env, addr); | |
341 | ||
342 | if (ent && ent->entry_valid) { | |
fa824d99 | 343 | hppa_flush_tlb_ent(env, ent, false); |
63300a00 RH |
344 | } |
345 | } | |
346 | ||
347 | void HELPER(ptlb)(CPUHPPAState *env, target_ulong addr) | |
348 | { | |
25f32708 | 349 | CPUState *src = env_cpu(env); |
63300a00 | 350 | CPUState *cpu; |
23c3d569 | 351 | trace_hppa_tlb_ptlb(env); |
63300a00 RH |
352 | run_on_cpu_data data = RUN_ON_CPU_TARGET_PTR(addr); |
353 | ||
354 | CPU_FOREACH(cpu) { | |
355 | if (cpu != src) { | |
356 | async_run_on_cpu(cpu, ptlb_work, data); | |
357 | } | |
358 | } | |
359 | async_safe_run_on_cpu(src, ptlb_work, data); | |
360 | } | |
361 | ||
362 | /* Purge (Insn/Data) TLB entry. This affects an implementation-defined | |
363 | number of pages/entries (we choose all), and is local to the cpu. */ | |
364 | void HELPER(ptlbe)(CPUHPPAState *env) | |
365 | { | |
23c3d569 | 366 | trace_hppa_tlb_ptlbe(env); |
fa824d99 HD |
367 | qemu_log_mask(CPU_LOG_MMU, "FLUSH ALL TLB ENTRIES\n"); |
368 | memset(&env->tlb[HPPA_BTLB_ENTRIES], 0, | |
369 | sizeof(env->tlb) - HPPA_BTLB_ENTRIES * sizeof(env->tlb[0])); | |
370 | env->tlb_last = HPPA_BTLB_ENTRIES; | |
88b7ad10 | 371 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); |
63300a00 | 372 | } |
2dfcca9f | 373 | |
d5de20bd SS |
374 | void cpu_hppa_change_prot_id(CPUHPPAState *env) |
375 | { | |
376 | if (env->psw & PSW_P) { | |
88b7ad10 | 377 | tlb_flush_by_mmuidx(env_cpu(env), HPPA_MMU_FLUSH_MASK); |
d5de20bd SS |
378 | } |
379 | } | |
380 | ||
381 | void HELPER(change_prot_id)(CPUHPPAState *env) | |
382 | { | |
383 | cpu_hppa_change_prot_id(env); | |
384 | } | |
385 | ||
2dfcca9f RH |
386 | target_ureg HELPER(lpa)(CPUHPPAState *env, target_ulong addr) |
387 | { | |
388 | hwaddr phys; | |
389 | int prot, excp; | |
390 | ||
391 | excp = hppa_get_physical_address(env, addr, MMU_KERNEL_IDX, 0, | |
fa824d99 | 392 | &phys, &prot, NULL); |
2dfcca9f RH |
393 | if (excp >= 0) { |
394 | if (env->psw & PSW_Q) { | |
395 | /* ??? Needs tweaking for hppa64. */ | |
396 | env->cr[CR_IOR] = addr; | |
397 | env->cr[CR_ISR] = addr >> 32; | |
398 | } | |
399 | if (excp == EXCP_DTLB_MISS) { | |
400 | excp = EXCP_NA_DTLB_MISS; | |
401 | } | |
23c3d569 | 402 | trace_hppa_tlb_lpa_failed(env, addr); |
2dfcca9f RH |
403 | hppa_dynamic_excp(env, excp, GETPC()); |
404 | } | |
23c3d569 | 405 | trace_hppa_tlb_lpa_success(env, addr, phys); |
2dfcca9f RH |
406 | return phys; |
407 | } | |
43e05652 RH |
408 | |
409 | /* Return the ar_type of the TLB at VADDR, or -1. */ | |
410 | int hppa_artype_for_page(CPUHPPAState *env, target_ulong vaddr) | |
411 | { | |
412 | hppa_tlb_entry *ent = hppa_find_tlb(env, vaddr); | |
413 | return ent ? ent->ar_type : -1; | |
414 | } | |
cf6b28d4 HD |
415 | |
416 | /* | |
417 | * diag_btlb() emulates the PDC PDC_BLOCK_TLB firmware call to | |
418 | * allow operating systems to modify the Block TLB (BTLB) entries. | |
419 | * For implementation details see page 1-13 in | |
420 | * https://parisc.wiki.kernel.org/images-parisc/e/ef/Pdc11-v0.96-Ch1-procs.pdf | |
421 | */ | |
422 | void HELPER(diag_btlb)(CPUHPPAState *env) | |
423 | { | |
424 | unsigned int phys_page, len, slot; | |
425 | int mmu_idx = cpu_mmu_index(env, 0); | |
426 | uintptr_t ra = GETPC(); | |
427 | hppa_tlb_entry *btlb; | |
428 | uint64_t virt_page; | |
429 | uint32_t *vaddr; | |
430 | ||
431 | #ifdef TARGET_HPPA64 | |
432 | /* BTLBs are not supported on 64-bit CPUs */ | |
433 | env->gr[28] = -1; /* nonexistent procedure */ | |
434 | return; | |
435 | #endif | |
436 | env->gr[28] = 0; /* PDC_OK */ | |
437 | ||
438 | switch (env->gr[25]) { | |
439 | case 0: | |
440 | /* return BTLB parameters */ | |
441 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INFO\n"); | |
442 | vaddr = probe_access(env, env->gr[24], 4 * sizeof(target_ulong), | |
443 | MMU_DATA_STORE, mmu_idx, ra); | |
444 | if (vaddr == NULL) { | |
445 | env->gr[28] = -10; /* invalid argument */ | |
446 | } else { | |
447 | vaddr[0] = cpu_to_be32(1); | |
448 | vaddr[1] = cpu_to_be32(16 * 1024); | |
449 | vaddr[2] = cpu_to_be32(HPPA_BTLB_FIXED); | |
450 | vaddr[3] = cpu_to_be32(HPPA_BTLB_VARIABLE); | |
451 | } | |
452 | break; | |
453 | case 1: | |
454 | /* insert BTLB entry */ | |
455 | virt_page = env->gr[24]; /* upper 32 bits */ | |
456 | virt_page <<= 32; | |
457 | virt_page |= env->gr[23]; /* lower 32 bits */ | |
458 | phys_page = env->gr[22]; | |
459 | len = env->gr[21]; | |
460 | slot = env->gr[19]; | |
461 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_INSERT " | |
462 | "0x%08llx-0x%08llx: vpage 0x%llx for phys page 0x%04x len %d " | |
463 | "into slot %d\n", | |
464 | (long long) virt_page << TARGET_PAGE_BITS, | |
465 | (long long) (virt_page + len) << TARGET_PAGE_BITS, | |
466 | (long long) virt_page, phys_page, len, slot); | |
467 | if (slot < HPPA_BTLB_ENTRIES) { | |
468 | btlb = &env->tlb[slot]; | |
469 | /* force flush of possibly existing BTLB entry */ | |
470 | hppa_flush_tlb_ent(env, btlb, true); | |
471 | /* create new BTLB entry */ | |
472 | btlb->va_b = virt_page << TARGET_PAGE_BITS; | |
473 | btlb->va_e = btlb->va_b + len * TARGET_PAGE_SIZE - 1; | |
474 | btlb->pa = phys_page << TARGET_PAGE_BITS; | |
475 | set_access_bits(env, btlb, env->gr[20]); | |
476 | btlb->t = 0; | |
477 | btlb->d = 1; | |
478 | } else { | |
479 | env->gr[28] = -10; /* invalid argument */ | |
480 | } | |
481 | break; | |
482 | case 2: | |
483 | /* Purge BTLB entry */ | |
484 | slot = env->gr[22]; | |
485 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE slot %d\n", | |
486 | slot); | |
487 | if (slot < HPPA_BTLB_ENTRIES) { | |
488 | btlb = &env->tlb[slot]; | |
489 | hppa_flush_tlb_ent(env, btlb, true); | |
490 | } else { | |
491 | env->gr[28] = -10; /* invalid argument */ | |
492 | } | |
493 | break; | |
494 | case 3: | |
495 | /* Purge all BTLB entries */ | |
496 | qemu_log_mask(CPU_LOG_MMU, "PDC_BLOCK_TLB: PDC_BTLB_PURGE_ALL\n"); | |
497 | for (slot = 0; slot < HPPA_BTLB_ENTRIES; slot++) { | |
498 | btlb = &env->tlb[slot]; | |
499 | hppa_flush_tlb_ent(env, btlb, true); | |
500 | } | |
501 | break; | |
502 | default: | |
503 | env->gr[28] = -2; /* nonexistent option */ | |
504 | break; | |
505 | } | |
506 | } |