]> git.proxmox.com Git - mirror_qemu.git/blob - target/alpha/helper.c
target/alpha: Convert to CPUClass::tlb_fill
[mirror_qemu.git] / target / alpha / helper.c
1 /*
2 * Alpha emulation cpu helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
25 #include "exec/helper-proto.h"
26 #include "qemu/qemu-print.h"
27
28
29 #define CONVERT_BIT(X, SRC, DST) \
30 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
31
32 uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
33 {
34 return (uint64_t)env->fpcr << 32;
35 }
36
37 void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
38 {
39 uint32_t fpcr = val >> 32;
40 uint32_t t = 0;
41
42 t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
43 t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
44 t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
45 t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
46 t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
47
48 env->fpcr = fpcr;
49 env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
50
51 switch (fpcr & FPCR_DYN_MASK) {
52 case FPCR_DYN_NORMAL:
53 default:
54 t = float_round_nearest_even;
55 break;
56 case FPCR_DYN_CHOPPED:
57 t = float_round_to_zero;
58 break;
59 case FPCR_DYN_MINUS:
60 t = float_round_down;
61 break;
62 case FPCR_DYN_PLUS:
63 t = float_round_up;
64 break;
65 }
66 env->fpcr_dyn_round = t;
67
68 env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
69 env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
70 }
71
72 uint64_t helper_load_fpcr(CPUAlphaState *env)
73 {
74 return cpu_alpha_load_fpcr(env);
75 }
76
77 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
78 {
79 cpu_alpha_store_fpcr(env, val);
80 }
81
82 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
83 {
84 #ifndef CONFIG_USER_ONLY
85 if (env->flags & ENV_FLAG_PAL_MODE) {
86 if (reg >= 8 && reg <= 14) {
87 return &env->shadow[reg - 8];
88 } else if (reg == 25) {
89 return &env->shadow[7];
90 }
91 }
92 #endif
93 return &env->ir[reg];
94 }
95
96 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
97 {
98 return *cpu_alpha_addr_gr(env, reg);
99 }
100
101 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
102 {
103 *cpu_alpha_addr_gr(env, reg) = val;
104 }
105
106 #if defined(CONFIG_USER_ONLY)
107 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
108 MMUAccessType access_type, int mmu_idx,
109 bool probe, uintptr_t retaddr)
110 {
111 AlphaCPU *cpu = ALPHA_CPU(cs);
112
113 cs->exception_index = EXCP_MMFAULT;
114 cpu->env.trap_arg0 = address;
115 cpu_loop_exit_restore(cs, retaddr);
116 }
117 #else
118 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
119 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
120 int prot_need, int mmu_idx,
121 target_ulong *pphys, int *pprot)
122 {
123 CPUState *cs = CPU(alpha_env_get_cpu(env));
124 target_long saddr = addr;
125 target_ulong phys = 0;
126 target_ulong L1pte, L2pte, L3pte;
127 target_ulong pt, index;
128 int prot = 0;
129 int ret = MM_K_ACV;
130
131 /* Handle physical accesses. */
132 if (mmu_idx == MMU_PHYS_IDX) {
133 phys = addr;
134 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
135 ret = -1;
136 goto exit;
137 }
138
139 /* Ensure that the virtual address is properly sign-extended from
140 the last implemented virtual address bit. */
141 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
142 goto exit;
143 }
144
145 /* Translate the superpage. */
146 /* ??? When we do more than emulate Unix PALcode, we'll need to
147 determine which KSEG is actually active. */
148 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
149 /* User-space cannot access KSEG addresses. */
150 if (mmu_idx != MMU_KERNEL_IDX) {
151 goto exit;
152 }
153
154 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
155 We would not do this if the 48-bit KSEG is enabled. */
156 phys = saddr & ((1ull << 40) - 1);
157 phys |= (saddr & (1ull << 40)) << 3;
158
159 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
160 ret = -1;
161 goto exit;
162 }
163
164 /* Interpret the page table exactly like PALcode does. */
165
166 pt = env->ptbr;
167
168 /* TODO: rather than using ldq_phys() to read the page table we should
169 * use address_space_ldq() so that we can handle the case when
170 * the page table read gives a bus fault, rather than ignoring it.
171 * For the existing code the zero data that ldq_phys will return for
172 * an access to invalid memory will result in our treating the page
173 * table as invalid, which may even be the right behaviour.
174 */
175
176 /* L1 page table read. */
177 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
178 L1pte = ldq_phys(cs->as, pt + index*8);
179
180 if (unlikely((L1pte & PTE_VALID) == 0)) {
181 ret = MM_K_TNV;
182 goto exit;
183 }
184 if (unlikely((L1pte & PTE_KRE) == 0)) {
185 goto exit;
186 }
187 pt = L1pte >> 32 << TARGET_PAGE_BITS;
188
189 /* L2 page table read. */
190 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
191 L2pte = ldq_phys(cs->as, pt + index*8);
192
193 if (unlikely((L2pte & PTE_VALID) == 0)) {
194 ret = MM_K_TNV;
195 goto exit;
196 }
197 if (unlikely((L2pte & PTE_KRE) == 0)) {
198 goto exit;
199 }
200 pt = L2pte >> 32 << TARGET_PAGE_BITS;
201
202 /* L3 page table read. */
203 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
204 L3pte = ldq_phys(cs->as, pt + index*8);
205
206 phys = L3pte >> 32 << TARGET_PAGE_BITS;
207 if (unlikely((L3pte & PTE_VALID) == 0)) {
208 ret = MM_K_TNV;
209 goto exit;
210 }
211
212 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
213 # error page bits out of date
214 #endif
215
216 /* Check access violations. */
217 if (L3pte & (PTE_KRE << mmu_idx)) {
218 prot |= PAGE_READ | PAGE_EXEC;
219 }
220 if (L3pte & (PTE_KWE << mmu_idx)) {
221 prot |= PAGE_WRITE;
222 }
223 if (unlikely((prot & prot_need) == 0 && prot_need)) {
224 goto exit;
225 }
226
227 /* Check fault-on-operation violations. */
228 prot &= ~(L3pte >> 1);
229 ret = -1;
230 if (unlikely((prot & prot_need) == 0)) {
231 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
232 prot_need & PAGE_WRITE ? MM_K_FOW :
233 prot_need & PAGE_READ ? MM_K_FOR : -1);
234 }
235
236 exit:
237 *pphys = phys;
238 *pprot = prot;
239 return ret;
240 }
241
242 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
243 {
244 AlphaCPU *cpu = ALPHA_CPU(cs);
245 target_ulong phys;
246 int prot, fail;
247
248 fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
249 return (fail >= 0 ? -1 : phys);
250 }
251
252 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
253 MMUAccessType access_type, int mmu_idx,
254 bool probe, uintptr_t retaddr)
255 {
256 AlphaCPU *cpu = ALPHA_CPU(cs);
257 CPUAlphaState *env = &cpu->env;
258 target_ulong phys;
259 int prot, fail;
260
261 fail = get_physical_address(env, addr, 1 << access_type,
262 mmu_idx, &phys, &prot);
263 if (unlikely(fail >= 0)) {
264 if (probe) {
265 return false;
266 }
267 cs->exception_index = EXCP_MMFAULT;
268 env->trap_arg0 = addr;
269 env->trap_arg1 = fail;
270 env->trap_arg2 = (access_type == MMU_INST_FETCH ? -1 : access_type);
271 cpu_loop_exit_restore(cs, retaddr);
272 }
273
274 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
275 prot, mmu_idx, TARGET_PAGE_SIZE);
276 return true;
277 }
278
279 void tlb_fill(CPUState *cs, target_ulong addr, int size,
280 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
281 {
282 alpha_cpu_tlb_fill(cs, addr, size, access_type, mmu_idx, false, retaddr);
283 }
284 #endif /* USER_ONLY */
285
286 void alpha_cpu_do_interrupt(CPUState *cs)
287 {
288 AlphaCPU *cpu = ALPHA_CPU(cs);
289 CPUAlphaState *env = &cpu->env;
290 int i = cs->exception_index;
291
292 if (qemu_loglevel_mask(CPU_LOG_INT)) {
293 static int count;
294 const char *name = "<unknown>";
295
296 switch (i) {
297 case EXCP_RESET:
298 name = "reset";
299 break;
300 case EXCP_MCHK:
301 name = "mchk";
302 break;
303 case EXCP_SMP_INTERRUPT:
304 name = "smp_interrupt";
305 break;
306 case EXCP_CLK_INTERRUPT:
307 name = "clk_interrupt";
308 break;
309 case EXCP_DEV_INTERRUPT:
310 name = "dev_interrupt";
311 break;
312 case EXCP_MMFAULT:
313 name = "mmfault";
314 break;
315 case EXCP_UNALIGN:
316 name = "unalign";
317 break;
318 case EXCP_OPCDEC:
319 name = "opcdec";
320 break;
321 case EXCP_ARITH:
322 name = "arith";
323 break;
324 case EXCP_FEN:
325 name = "fen";
326 break;
327 case EXCP_CALL_PAL:
328 name = "call_pal";
329 break;
330 }
331 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
332 PRIx64 " sp=%016" PRIx64 "\n",
333 ++count, name, env->error_code, cs->cpu_index,
334 env->pc, env->ir[IR_SP]);
335 }
336
337 cs->exception_index = -1;
338
339 #if !defined(CONFIG_USER_ONLY)
340 switch (i) {
341 case EXCP_RESET:
342 i = 0x0000;
343 break;
344 case EXCP_MCHK:
345 i = 0x0080;
346 break;
347 case EXCP_SMP_INTERRUPT:
348 i = 0x0100;
349 break;
350 case EXCP_CLK_INTERRUPT:
351 i = 0x0180;
352 break;
353 case EXCP_DEV_INTERRUPT:
354 i = 0x0200;
355 break;
356 case EXCP_MMFAULT:
357 i = 0x0280;
358 break;
359 case EXCP_UNALIGN:
360 i = 0x0300;
361 break;
362 case EXCP_OPCDEC:
363 i = 0x0380;
364 break;
365 case EXCP_ARITH:
366 i = 0x0400;
367 break;
368 case EXCP_FEN:
369 i = 0x0480;
370 break;
371 case EXCP_CALL_PAL:
372 i = env->error_code;
373 /* There are 64 entry points for both privileged and unprivileged,
374 with bit 0x80 indicating unprivileged. Each entry point gets
375 64 bytes to do its job. */
376 if (i & 0x80) {
377 i = 0x2000 + (i - 0x80) * 64;
378 } else {
379 i = 0x1000 + i * 64;
380 }
381 break;
382 default:
383 cpu_abort(cs, "Unhandled CPU exception");
384 }
385
386 /* Remember where the exception happened. Emulate real hardware in
387 that the low bit of the PC indicates PALmode. */
388 env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
389
390 /* Continue execution at the PALcode entry point. */
391 env->pc = env->palbr + i;
392
393 /* Switch to PALmode. */
394 env->flags |= ENV_FLAG_PAL_MODE;
395 #endif /* !USER_ONLY */
396 }
397
398 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
399 {
400 AlphaCPU *cpu = ALPHA_CPU(cs);
401 CPUAlphaState *env = &cpu->env;
402 int idx = -1;
403
404 /* We never take interrupts while in PALmode. */
405 if (env->flags & ENV_FLAG_PAL_MODE) {
406 return false;
407 }
408
409 /* Fall through the switch, collecting the highest priority
410 interrupt that isn't masked by the processor status IPL. */
411 /* ??? This hard-codes the OSF/1 interrupt levels. */
412 switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
413 case 0 ... 3:
414 if (interrupt_request & CPU_INTERRUPT_HARD) {
415 idx = EXCP_DEV_INTERRUPT;
416 }
417 /* FALLTHRU */
418 case 4:
419 if (interrupt_request & CPU_INTERRUPT_TIMER) {
420 idx = EXCP_CLK_INTERRUPT;
421 }
422 /* FALLTHRU */
423 case 5:
424 if (interrupt_request & CPU_INTERRUPT_SMP) {
425 idx = EXCP_SMP_INTERRUPT;
426 }
427 /* FALLTHRU */
428 case 6:
429 if (interrupt_request & CPU_INTERRUPT_MCHK) {
430 idx = EXCP_MCHK;
431 }
432 }
433 if (idx >= 0) {
434 cs->exception_index = idx;
435 env->error_code = 0;
436 alpha_cpu_do_interrupt(cs);
437 return true;
438 }
439 return false;
440 }
441
442 void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
443 {
444 static const char *linux_reg_names[] = {
445 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
446 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
447 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
448 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
449 };
450 AlphaCPU *cpu = ALPHA_CPU(cs);
451 CPUAlphaState *env = &cpu->env;
452 int i;
453
454 qemu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
455 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
456 for (i = 0; i < 31; i++) {
457 qemu_fprintf(f, "IR%02d %s " TARGET_FMT_lx "%c", i,
458 linux_reg_names[i], cpu_alpha_load_gr(env, i),
459 (i % 3) == 2 ? '\n' : ' ');
460 }
461
462 qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
463 env->lock_addr, env->lock_value);
464
465 if (flags & CPU_DUMP_FPU) {
466 for (i = 0; i < 31; i++) {
467 qemu_fprintf(f, "FIR%02d %016" PRIx64 "%c", i, env->fir[i],
468 (i % 3) == 2 ? '\n' : ' ');
469 }
470 }
471 qemu_fprintf(f, "\n");
472 }
473
474 /* This should only be called from translate, via gen_excp.
475 We expect that ENV->PC has already been updated. */
476 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
477 {
478 AlphaCPU *cpu = alpha_env_get_cpu(env);
479 CPUState *cs = CPU(cpu);
480
481 cs->exception_index = excp;
482 env->error_code = error;
483 cpu_loop_exit(cs);
484 }
485
486 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
487 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
488 int excp, int error)
489 {
490 AlphaCPU *cpu = alpha_env_get_cpu(env);
491 CPUState *cs = CPU(cpu);
492
493 cs->exception_index = excp;
494 env->error_code = error;
495 if (retaddr) {
496 cpu_restore_state(cs, retaddr, true);
497 /* Floating-point exceptions (our only users) point to the next PC. */
498 env->pc += 4;
499 }
500 cpu_loop_exit(cs);
501 }
502
503 void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
504 int exc, uint64_t mask)
505 {
506 env->trap_arg0 = exc;
507 env->trap_arg1 = mask;
508 dynamic_excp(env, retaddr, EXCP_ARITH, 0);
509 }