2 * Alpha emulation cpu helpers for qemu.
4 * Copyright (c) 2007 Jocelyn Mayer
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
20 #include "qemu/osdep.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "fpu/softfloat-types.h"
26 #include "exec/helper-proto.h"
27 #include "qemu/qemu-print.h"
30 #define CONVERT_BIT(X, SRC, DST) \
31 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
33 uint64_t cpu_alpha_load_fpcr(CPUAlphaState
*env
)
35 return (uint64_t)env
->fpcr
<< 32;
38 void cpu_alpha_store_fpcr(CPUAlphaState
*env
, uint64_t val
)
40 static const uint8_t rm_map
[] = {
41 [FPCR_DYN_NORMAL
>> FPCR_DYN_SHIFT
] = float_round_nearest_even
,
42 [FPCR_DYN_CHOPPED
>> FPCR_DYN_SHIFT
] = float_round_to_zero
,
43 [FPCR_DYN_MINUS
>> FPCR_DYN_SHIFT
] = float_round_down
,
44 [FPCR_DYN_PLUS
>> FPCR_DYN_SHIFT
] = float_round_up
,
47 uint32_t fpcr
= val
>> 32;
50 /* Record the raw value before adjusting for linux-user. */
53 #ifdef CONFIG_USER_ONLY
55 * Override some of these bits with the contents of ENV->SWCR.
56 * In system mode, some of these would trap to the kernel, at
57 * which point the kernel's handler would emulate and apply
58 * the software exception mask.
60 uint32_t soft_fpcr
= alpha_ieee_swcr_to_fpcr(env
->swcr
) >> 32;
61 fpcr
|= soft_fpcr
& (FPCR_STATUS_MASK
| FPCR_DNZ
);
64 * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
65 * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
66 * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
68 t
|= CONVERT_BIT(soft_fpcr
, FPCR_INVD
, FPCR_IOV
);
71 t
|= CONVERT_BIT(fpcr
, FPCR_INED
, FPCR_INE
);
72 t
|= CONVERT_BIT(fpcr
, FPCR_UNFD
, FPCR_UNF
);
73 t
|= CONVERT_BIT(fpcr
, FPCR_OVFD
, FPCR_OVF
);
74 t
|= CONVERT_BIT(fpcr
, FPCR_DZED
, FPCR_DZE
);
75 t
|= CONVERT_BIT(fpcr
, FPCR_INVD
, FPCR_INV
);
77 env
->fpcr_exc_enable
= ~t
& FPCR_STATUS_MASK
;
79 env
->fpcr_dyn_round
= rm_map
[(fpcr
& FPCR_DYN_MASK
) >> FPCR_DYN_SHIFT
];
80 env
->fp_status
.flush_inputs_to_zero
= (fpcr
& FPCR_DNZ
) != 0;
82 t
= (fpcr
& FPCR_UNFD
) && (fpcr
& FPCR_UNDZ
);
83 #ifdef CONFIG_USER_ONLY
84 t
|= (env
->swcr
& SWCR_MAP_UMZ
) != 0;
86 env
->fpcr_flush_to_zero
= t
;
89 uint64_t helper_load_fpcr(CPUAlphaState
*env
)
91 return cpu_alpha_load_fpcr(env
);
94 void helper_store_fpcr(CPUAlphaState
*env
, uint64_t val
)
96 cpu_alpha_store_fpcr(env
, val
);
99 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState
*env
, unsigned reg
)
101 #ifndef CONFIG_USER_ONLY
102 if (env
->flags
& ENV_FLAG_PAL_MODE
) {
103 if (reg
>= 8 && reg
<= 14) {
104 return &env
->shadow
[reg
- 8];
105 } else if (reg
== 25) {
106 return &env
->shadow
[7];
110 return &env
->ir
[reg
];
113 uint64_t cpu_alpha_load_gr(CPUAlphaState
*env
, unsigned reg
)
115 return *cpu_alpha_addr_gr(env
, reg
);
118 void cpu_alpha_store_gr(CPUAlphaState
*env
, unsigned reg
, uint64_t val
)
120 *cpu_alpha_addr_gr(env
, reg
) = val
;
123 #if defined(CONFIG_USER_ONLY)
124 void alpha_cpu_record_sigsegv(CPUState
*cs
, vaddr address
,
125 MMUAccessType access_type
,
126 bool maperr
, uintptr_t retaddr
)
128 CPUAlphaState
*env
= cpu_env(cs
);
129 target_ulong mmcsr
, cause
;
131 /* Assuming !maperr, infer the missing protection. */
132 switch (access_type
) {
146 g_assert_not_reached();
149 if (address
< BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS
- 1)) {
150 /* Userspace address, therefore page not mapped. */
153 /* Kernel or invalid address. */
158 /* Record the arguments that PALcode would give to the kernel. */
159 env
->trap_arg0
= address
;
160 env
->trap_arg1
= mmcsr
;
161 env
->trap_arg2
= cause
;
164 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
165 static int get_physical_address(CPUAlphaState
*env
, target_ulong addr
,
166 int prot_need
, int mmu_idx
,
167 target_ulong
*pphys
, int *pprot
)
169 CPUState
*cs
= env_cpu(env
);
170 target_long saddr
= addr
;
171 target_ulong phys
= 0;
172 target_ulong L1pte
, L2pte
, L3pte
;
173 target_ulong pt
, index
;
177 /* Handle physical accesses. */
178 if (mmu_idx
== MMU_PHYS_IDX
) {
180 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
185 /* Ensure that the virtual address is properly sign-extended from
186 the last implemented virtual address bit. */
187 if (saddr
>> TARGET_VIRT_ADDR_SPACE_BITS
!= saddr
>> 63) {
191 /* Translate the superpage. */
192 /* ??? When we do more than emulate Unix PALcode, we'll need to
193 determine which KSEG is actually active. */
194 if (saddr
< 0 && ((saddr
>> 41) & 3) == 2) {
195 /* User-space cannot access KSEG addresses. */
196 if (mmu_idx
!= MMU_KERNEL_IDX
) {
200 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
201 We would not do this if the 48-bit KSEG is enabled. */
202 phys
= saddr
& ((1ull << 40) - 1);
203 phys
|= (saddr
& (1ull << 40)) << 3;
205 prot
= PAGE_READ
| PAGE_WRITE
| PAGE_EXEC
;
210 /* Interpret the page table exactly like PALcode does. */
214 /* TODO: rather than using ldq_phys() to read the page table we should
215 * use address_space_ldq() so that we can handle the case when
216 * the page table read gives a bus fault, rather than ignoring it.
217 * For the existing code the zero data that ldq_phys will return for
218 * an access to invalid memory will result in our treating the page
219 * table as invalid, which may even be the right behaviour.
222 /* L1 page table read. */
223 index
= (addr
>> (TARGET_PAGE_BITS
+ 20)) & 0x3ff;
224 L1pte
= ldq_phys(cs
->as
, pt
+ index
*8);
226 if (unlikely((L1pte
& PTE_VALID
) == 0)) {
230 if (unlikely((L1pte
& PTE_KRE
) == 0)) {
233 pt
= L1pte
>> 32 << TARGET_PAGE_BITS
;
235 /* L2 page table read. */
236 index
= (addr
>> (TARGET_PAGE_BITS
+ 10)) & 0x3ff;
237 L2pte
= ldq_phys(cs
->as
, pt
+ index
*8);
239 if (unlikely((L2pte
& PTE_VALID
) == 0)) {
243 if (unlikely((L2pte
& PTE_KRE
) == 0)) {
246 pt
= L2pte
>> 32 << TARGET_PAGE_BITS
;
248 /* L3 page table read. */
249 index
= (addr
>> TARGET_PAGE_BITS
) & 0x3ff;
250 L3pte
= ldq_phys(cs
->as
, pt
+ index
*8);
252 phys
= L3pte
>> 32 << TARGET_PAGE_BITS
;
253 if (unlikely((L3pte
& PTE_VALID
) == 0)) {
258 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
259 # error page bits out of date
262 /* Check access violations. */
263 if (L3pte
& (PTE_KRE
<< mmu_idx
)) {
264 prot
|= PAGE_READ
| PAGE_EXEC
;
266 if (L3pte
& (PTE_KWE
<< mmu_idx
)) {
269 if (unlikely((prot
& prot_need
) == 0 && prot_need
)) {
273 /* Check fault-on-operation violations. */
274 prot
&= ~(L3pte
>> 1);
276 if (unlikely((prot
& prot_need
) == 0)) {
277 ret
= (prot_need
& PAGE_EXEC
? MM_K_FOE
:
278 prot_need
& PAGE_WRITE
? MM_K_FOW
:
279 prot_need
& PAGE_READ
? MM_K_FOR
: -1);
288 hwaddr
alpha_cpu_get_phys_page_debug(CPUState
*cs
, vaddr addr
)
293 fail
= get_physical_address(cpu_env(cs
), addr
, 0, 0, &phys
, &prot
);
294 return (fail
>= 0 ? -1 : phys
);
297 bool alpha_cpu_tlb_fill(CPUState
*cs
, vaddr addr
, int size
,
298 MMUAccessType access_type
, int mmu_idx
,
299 bool probe
, uintptr_t retaddr
)
301 CPUAlphaState
*env
= cpu_env(cs
);
305 fail
= get_physical_address(env
, addr
, 1 << access_type
,
306 mmu_idx
, &phys
, &prot
);
307 if (unlikely(fail
>= 0)) {
311 cs
->exception_index
= EXCP_MMFAULT
;
312 env
->trap_arg0
= addr
;
313 env
->trap_arg1
= fail
;
314 env
->trap_arg2
= (access_type
== MMU_DATA_LOAD
? 0ull :
315 access_type
== MMU_DATA_STORE
? 1ull :
316 /* access_type == MMU_INST_FETCH */ -1ull);
317 cpu_loop_exit_restore(cs
, retaddr
);
320 tlb_set_page(cs
, addr
& TARGET_PAGE_MASK
, phys
& TARGET_PAGE_MASK
,
321 prot
, mmu_idx
, TARGET_PAGE_SIZE
);
325 void alpha_cpu_do_interrupt(CPUState
*cs
)
327 CPUAlphaState
*env
= cpu_env(cs
);
328 int i
= cs
->exception_index
;
330 if (qemu_loglevel_mask(CPU_LOG_INT
)) {
332 const char *name
= "<unknown>";
341 case EXCP_SMP_INTERRUPT
:
342 name
= "smp_interrupt";
344 case EXCP_CLK_INTERRUPT
:
345 name
= "clk_interrupt";
347 case EXCP_DEV_INTERRUPT
:
348 name
= "dev_interrupt";
369 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
370 PRIx64
" sp=%016" PRIx64
"\n",
371 ++count
, name
, env
->error_code
, cs
->cpu_index
,
372 env
->pc
, env
->ir
[IR_SP
]);
375 cs
->exception_index
= -1;
384 case EXCP_SMP_INTERRUPT
:
387 case EXCP_CLK_INTERRUPT
:
390 case EXCP_DEV_INTERRUPT
:
410 /* There are 64 entry points for both privileged and unprivileged,
411 with bit 0x80 indicating unprivileged. Each entry point gets
412 64 bytes to do its job. */
414 i
= 0x2000 + (i
- 0x80) * 64;
420 cpu_abort(cs
, "Unhandled CPU exception");
423 /* Remember where the exception happened. Emulate real hardware in
424 that the low bit of the PC indicates PALmode. */
425 env
->exc_addr
= env
->pc
| (env
->flags
& ENV_FLAG_PAL_MODE
);
427 /* Continue execution at the PALcode entry point. */
428 env
->pc
= env
->palbr
+ i
;
430 /* Switch to PALmode. */
431 env
->flags
|= ENV_FLAG_PAL_MODE
;
434 bool alpha_cpu_exec_interrupt(CPUState
*cs
, int interrupt_request
)
436 CPUAlphaState
*env
= cpu_env(cs
);
439 /* We never take interrupts while in PALmode. */
440 if (env
->flags
& ENV_FLAG_PAL_MODE
) {
444 /* Fall through the switch, collecting the highest priority
445 interrupt that isn't masked by the processor status IPL. */
446 /* ??? This hard-codes the OSF/1 interrupt levels. */
447 switch ((env
->flags
>> ENV_FLAG_PS_SHIFT
) & PS_INT_MASK
) {
449 if (interrupt_request
& CPU_INTERRUPT_HARD
) {
450 idx
= EXCP_DEV_INTERRUPT
;
454 if (interrupt_request
& CPU_INTERRUPT_TIMER
) {
455 idx
= EXCP_CLK_INTERRUPT
;
459 if (interrupt_request
& CPU_INTERRUPT_SMP
) {
460 idx
= EXCP_SMP_INTERRUPT
;
464 if (interrupt_request
& CPU_INTERRUPT_MCHK
) {
469 cs
->exception_index
= idx
;
471 alpha_cpu_do_interrupt(cs
);
477 #endif /* !CONFIG_USER_ONLY */
479 void alpha_cpu_dump_state(CPUState
*cs
, FILE *f
, int flags
)
481 static const char linux_reg_names
[31][4] = {
482 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
483 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
484 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
485 "t10", "t11", "ra", "t12", "at", "gp", "sp"
487 CPUAlphaState
*env
= cpu_env(cs
);
490 qemu_fprintf(f
, "PC " TARGET_FMT_lx
" PS %02x\n",
491 env
->pc
, extract32(env
->flags
, ENV_FLAG_PS_SHIFT
, 8));
492 for (i
= 0; i
< 31; i
++) {
493 qemu_fprintf(f
, "%-8s" TARGET_FMT_lx
"%c",
494 linux_reg_names
[i
], cpu_alpha_load_gr(env
, i
),
495 (i
% 3) == 2 ? '\n' : ' ');
498 qemu_fprintf(f
, "lock_a " TARGET_FMT_lx
" lock_v " TARGET_FMT_lx
"\n",
499 env
->lock_addr
, env
->lock_value
);
501 if (flags
& CPU_DUMP_FPU
) {
502 for (i
= 0; i
< 31; i
++) {
503 qemu_fprintf(f
, "f%-7d%016" PRIx64
"%c", i
, env
->fir
[i
],
504 (i
% 3) == 2 ? '\n' : ' ');
506 qemu_fprintf(f
, "fpcr %016" PRIx64
"\n", cpu_alpha_load_fpcr(env
));
508 qemu_fprintf(f
, "\n");
511 /* This should only be called from translate, via gen_excp.
512 We expect that ENV->PC has already been updated. */
513 G_NORETURN
void helper_excp(CPUAlphaState
*env
, int excp
, int error
)
515 CPUState
*cs
= env_cpu(env
);
517 cs
->exception_index
= excp
;
518 env
->error_code
= error
;
522 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
523 G_NORETURN
void dynamic_excp(CPUAlphaState
*env
, uintptr_t retaddr
,
526 CPUState
*cs
= env_cpu(env
);
528 cs
->exception_index
= excp
;
529 env
->error_code
= error
;
531 cpu_restore_state(cs
, retaddr
);
532 /* Floating-point exceptions (our only users) point to the next PC. */
538 G_NORETURN
void arith_excp(CPUAlphaState
*env
, uintptr_t retaddr
,
539 int exc
, uint64_t mask
)
541 env
->trap_arg0
= exc
;
542 env
->trap_arg1
= mask
;
543 dynamic_excp(env
, retaddr
, EXCP_ARITH
, 0);