2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
25 #include "softfloat.h"
28 uint64_t cpu_alpha_load_fpcr (CPUAlphaState
*env
)
33 t
= env
->fpcr_exc_status
;
36 if (t
& float_flag_invalid
) {
39 if (t
& float_flag_divbyzero
) {
42 if (t
& float_flag_overflow
) {
45 if (t
& float_flag_underflow
) {
48 if (t
& float_flag_inexact
) {
53 t
= env
->fpcr_exc_mask
;
54 if (t
& float_flag_invalid
) {
57 if (t
& float_flag_divbyzero
) {
60 if (t
& float_flag_overflow
) {
63 if (t
& float_flag_underflow
) {
66 if (t
& float_flag_inexact
) {
70 switch (env
->fpcr_dyn_round
) {
71 case float_round_nearest_even
:
74 case float_round_down
:
80 case float_round_to_zero
:
81 r
|= FPCR_DYN_CHOPPED
;
98 void cpu_alpha_store_fpcr (CPUAlphaState
*env
, uint64_t val
)
103 if (val
& FPCR_INV
) {
104 t
|= float_flag_invalid
;
106 if (val
& FPCR_DZE
) {
107 t
|= float_flag_divbyzero
;
109 if (val
& FPCR_OVF
) {
110 t
|= float_flag_overflow
;
112 if (val
& FPCR_UNF
) {
113 t
|= float_flag_underflow
;
115 if (val
& FPCR_INE
) {
116 t
|= float_flag_inexact
;
118 env
->fpcr_exc_status
= t
;
121 if (val
& FPCR_INVD
) {
122 t
|= float_flag_invalid
;
124 if (val
& FPCR_DZED
) {
125 t
|= float_flag_divbyzero
;
127 if (val
& FPCR_OVFD
) {
128 t
|= float_flag_overflow
;
130 if (val
& FPCR_UNFD
) {
131 t
|= float_flag_underflow
;
133 if (val
& FPCR_INED
) {
134 t
|= float_flag_inexact
;
136 env
->fpcr_exc_mask
= t
;
138 switch (val
& FPCR_DYN_MASK
) {
139 case FPCR_DYN_CHOPPED
:
140 t
= float_round_to_zero
;
143 t
= float_round_down
;
145 case FPCR_DYN_NORMAL
:
146 t
= float_round_nearest_even
;
152 env
->fpcr_dyn_round
= t
;
154 env
->fpcr_flush_to_zero
155 = (val
& (FPCR_UNDZ
|FPCR_UNFD
)) == (FPCR_UNDZ
|FPCR_UNFD
);
157 env
->fpcr_dnz
= (val
& FPCR_DNZ
) != 0;
158 env
->fpcr_dnod
= (val
& FPCR_DNOD
) != 0;
159 env
->fpcr_undz
= (val
& FPCR_UNDZ
) != 0;
162 uint64_t helper_load_fpcr(CPUAlphaState
*env
)
164 return cpu_alpha_load_fpcr(env
);
167 void helper_store_fpcr(CPUAlphaState
*env
, uint64_t val
)
169 cpu_alpha_store_fpcr(env
, val
);
172 #if defined(CONFIG_USER_ONLY)
173 int cpu_alpha_handle_mmu_fault(CPUAlphaState
*env
, target_ulong address
,
176 env
->exception_index
= EXCP_MMFAULT
;
177 env
->trap_arg0
= address
;
181 void swap_shadow_regs(CPUAlphaState
*env
)
183 uint64_t i0
, i1
, i2
, i3
, i4
, i5
, i6
, i7
;
194 env
->ir
[8] = env
->shadow
[0];
195 env
->ir
[9] = env
->shadow
[1];
196 env
->ir
[10] = env
->shadow
[2];
197 env
->ir
[11] = env
->shadow
[3];
198 env
->ir
[12] = env
->shadow
[4];
199 env
->ir
[13] = env
->shadow
[5];
200 env
->ir
[14] = env
->shadow
[6];
201 env
->ir
[25] = env
->shadow
[7];
213 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
214 static int get_physical_address(CPUAlphaState
*env
, target_ulong addr
,
215 int prot_need
, int mmu_idx
,
216 target_ulong
*pphys
, int *pprot
)
218 target_long saddr
= addr
;
219 target_ulong phys
= 0;
220 target_ulong L1pte
, L2pte
, L3pte
;
221 target_ulong pt
, index
;
225 /* Ensure that the virtual address is properly sign-extended from
226 the last implemented virtual address bit. */
227 if (saddr
>> TARGET_VIRT_ADDR_SPACE_BITS
!= saddr
>> 63) {
231 /* Translate the superpage. */
232 /* ??? When we do more than emulate Unix PALcode, we'll need to
233 determine which KSEG is actually active. */
234 if (saddr
< 0 && ((saddr
>> 41) & 3) == 2) {
235 /* User-space cannot access KSEG addresses. */
236 if (mmu_idx
!= MMU_KERNEL_IDX
) {
240 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
241 We would not do this if the 48-bit KSEG is enabled. */
242 phys
= saddr
& ((1ull << 40) - 1);
243 phys
|= (saddr
& (1ull << 40)) << 3;
245 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
250 /* Interpret the page table exactly like PALcode does. */
254 /* L1 page table read. */
255 index
= (addr
>> (TARGET_PAGE_BITS
+ 20)) & 0x3ff;
256 L1pte
= ldq_phys(pt
+ index
*8);
258 if (unlikely((L1pte
& PTE_VALID
) == 0)) {
262 if (unlikely((L1pte
& PTE_KRE
) == 0)) {
265 pt
= L1pte
>> 32 << TARGET_PAGE_BITS
;
267 /* L2 page table read. */
268 index
= (addr
>> (TARGET_PAGE_BITS
+ 10)) & 0x3ff;
269 L2pte
= ldq_phys(pt
+ index
*8);
271 if (unlikely((L2pte
& PTE_VALID
) == 0)) {
275 if (unlikely((L2pte
& PTE_KRE
) == 0)) {
278 pt
= L2pte
>> 32 << TARGET_PAGE_BITS
;
280 /* L3 page table read. */
281 index
= (addr
>> TARGET_PAGE_BITS
) & 0x3ff;
282 L3pte
= ldq_phys(pt
+ index
*8);
284 phys
= L3pte
>> 32 << TARGET_PAGE_BITS
;
285 if (unlikely((L3pte
& PTE_VALID
) == 0)) {
290 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
291 # error page bits out of date
294 /* Check access violations. */
295 if (L3pte
& (PTE_KRE
<< mmu_idx
)) {
296 prot
|= PAGE_READ
| PAGE_EXEC
;
298 if (L3pte
& (PTE_KWE
<< mmu_idx
)) {
301 if (unlikely((prot
& prot_need
) == 0 && prot_need
)) {
305 /* Check fault-on-operation violations. */
306 prot
&= ~(L3pte
>> 1);
308 if (unlikely((prot
& prot_need
) == 0)) {
309 ret
= (prot_need
& PAGE_EXEC
? MM_K_FOE
:
310 prot_need
& PAGE_WRITE
? MM_K_FOW
:
311 prot_need
& PAGE_READ
? MM_K_FOR
: -1);
320 target_phys_addr_t
cpu_get_phys_page_debug(CPUAlphaState
*env
, target_ulong addr
)
325 fail
= get_physical_address(env
, addr
, 0, 0, &phys
, &prot
);
326 return (fail
>= 0 ? -1 : phys
);
329 int cpu_alpha_handle_mmu_fault(CPUAlphaState
*env
, target_ulong addr
, int rw
,
335 fail
= get_physical_address(env
, addr
, 1 << rw
, mmu_idx
, &phys
, &prot
);
336 if (unlikely(fail
>= 0)) {
337 env
->exception_index
= EXCP_MMFAULT
;
338 env
->trap_arg0
= addr
;
339 env
->trap_arg1
= fail
;
340 env
->trap_arg2
= (rw
== 2 ? -1 : rw
);
344 tlb_set_page(env
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
345 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
348 #endif /* USER_ONLY */
350 void do_interrupt (CPUAlphaState
*env
)
352 int i
= env
->exception_index
;
354 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
356 const char *name
= "<unknown>";
365 case EXCP_SMP_INTERRUPT
:
366 name
= "smp_interrupt";
368 case EXCP_CLK_INTERRUPT
:
369 name
= "clk_interrupt";
371 case EXCP_DEV_INTERRUPT
:
372 name
= "dev_interrupt";
399 qemu_log("INT %6d: %s(%#x) pc=%016" PRIx64
" sp=%016" PRIx64
"\n",
400 ++count
, name
, env
->error_code
, env
->pc
, env
->ir
[IR_SP
]);
403 env
->exception_index
= -1;
405 #if !defined(CONFIG_USER_ONLY)
413 case EXCP_SMP_INTERRUPT
:
416 case EXCP_CLK_INTERRUPT
:
419 case EXCP_DEV_INTERRUPT
:
439 /* There are 64 entry points for both privileged and unprivileged,
440 with bit 0x80 indicating unprivileged. Each entry point gets
441 64 bytes to do its job. */
443 i
= 0x2000 + (i
- 0x80) * 64;
449 cpu_abort(env
, "Unhandled CPU exception");
452 /* Remember where the exception happened. Emulate real hardware in
453 that the low bit of the PC indicates PALmode. */
454 env
->exc_addr
= env
->pc
| env
->pal_mode
;
456 /* Continue execution at the PALcode entry point. */
457 env
->pc
= env
->palbr
+ i
;
459 /* Switch to PALmode. */
460 if (!env
->pal_mode
) {
462 swap_shadow_regs(env
);
464 #endif /* !USER_ONLY */
467 void cpu_dump_state (CPUAlphaState
*env
, FILE *f
, fprintf_function cpu_fprintf
,
470 static const char *linux_reg_names
[] = {
471 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
472 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
473 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
474 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
478 cpu_fprintf(f
, " PC " TARGET_FMT_lx
" PS %02x\n",
480 for (i
= 0; i
< 31; i
++) {
481 cpu_fprintf(f
, "IR%02d %s " TARGET_FMT_lx
" ", i
,
482 linux_reg_names
[i
], env
->ir
[i
]);
484 cpu_fprintf(f
, "\n");
487 cpu_fprintf(f
, "lock_a " TARGET_FMT_lx
" lock_v " TARGET_FMT_lx
"\n",
488 env
->lock_addr
, env
->lock_value
);
490 for (i
= 0; i
< 31; i
++) {
491 cpu_fprintf(f
, "FIR%02d " TARGET_FMT_lx
" ", i
,
492 *((uint64_t *)(&env
->fir
[i
])));
494 cpu_fprintf(f
, "\n");
496 cpu_fprintf(f
, "\n");
499 void do_restore_state(CPUAlphaState
*env
, void *retaddr
)
501 uintptr_t pc
= (uintptr_t)retaddr
;
503 TranslationBlock
*tb
= tb_find_pc(pc
);
505 cpu_restore_state(tb
, env
, pc
);
510 /* This should only be called from translate, via gen_excp.
511 We expect that ENV->PC has already been updated. */
512 void QEMU_NORETURN
helper_excp(CPUAlphaState
*env
, int excp
, int error
)
514 env
->exception_index
= excp
;
515 env
->error_code
= error
;
519 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
520 void QEMU_NORETURN
dynamic_excp(CPUAlphaState
*env
, void *retaddr
,
523 env
->exception_index
= excp
;
524 env
->error_code
= error
;
525 do_restore_state(env
, retaddr
);
529 void QEMU_NORETURN
arith_excp(CPUAlphaState
*env
, void *retaddr
,
530 int exc
, uint64_t mask
)
532 env
->trap_arg0
= exc
;
533 env
->trap_arg1
= mask
;
534 dynamic_excp(env
, retaddr
, EXCP_ARITH
, 0);