]> git.proxmox.com Git - mirror_qemu.git/blob - target/alpha/helper.c
Merge tag 'pull-maintainer-may24-160524-2' of https://gitlab.com/stsquad/qemu into...
[mirror_qemu.git] / target / alpha / helper.c
1 /*
2 * Alpha emulation cpu helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2.1 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21 #include "qemu/log.h"
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "exec/page-protection.h"
25 #include "fpu/softfloat-types.h"
26 #include "exec/helper-proto.h"
27 #include "qemu/qemu-print.h"
28
29
30 #define CONVERT_BIT(X, SRC, DST) \
31 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
32
33 uint64_t cpu_alpha_load_fpcr(CPUAlphaState *env)
34 {
35 return (uint64_t)env->fpcr << 32;
36 }
37
38 void cpu_alpha_store_fpcr(CPUAlphaState *env, uint64_t val)
39 {
40 static const uint8_t rm_map[] = {
41 [FPCR_DYN_NORMAL >> FPCR_DYN_SHIFT] = float_round_nearest_even,
42 [FPCR_DYN_CHOPPED >> FPCR_DYN_SHIFT] = float_round_to_zero,
43 [FPCR_DYN_MINUS >> FPCR_DYN_SHIFT] = float_round_down,
44 [FPCR_DYN_PLUS >> FPCR_DYN_SHIFT] = float_round_up,
45 };
46
47 uint32_t fpcr = val >> 32;
48 uint32_t t = 0;
49
50 /* Record the raw value before adjusting for linux-user. */
51 env->fpcr = fpcr;
52
53 #ifdef CONFIG_USER_ONLY
54 /*
55 * Override some of these bits with the contents of ENV->SWCR.
56 * In system mode, some of these would trap to the kernel, at
57 * which point the kernel's handler would emulate and apply
58 * the software exception mask.
59 */
60 uint32_t soft_fpcr = alpha_ieee_swcr_to_fpcr(env->swcr) >> 32;
61 fpcr |= soft_fpcr & (FPCR_STATUS_MASK | FPCR_DNZ);
62
63 /*
64 * The IOV exception is disabled by the kernel with SWCR_TRAP_ENABLE_INV,
65 * which got mapped by alpha_ieee_swcr_to_fpcr to FPCR_INVD.
66 * Add FPCR_IOV to fpcr_exc_enable so that it is handled identically.
67 */
68 t |= CONVERT_BIT(soft_fpcr, FPCR_INVD, FPCR_IOV);
69 #endif
70
71 t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
72 t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
73 t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
74 t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
75 t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
76
77 env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
78
79 env->fpcr_dyn_round = rm_map[(fpcr & FPCR_DYN_MASK) >> FPCR_DYN_SHIFT];
80 env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
81
82 t = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
83 #ifdef CONFIG_USER_ONLY
84 t |= (env->swcr & SWCR_MAP_UMZ) != 0;
85 #endif
86 env->fpcr_flush_to_zero = t;
87 }
88
89 uint64_t helper_load_fpcr(CPUAlphaState *env)
90 {
91 return cpu_alpha_load_fpcr(env);
92 }
93
94 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
95 {
96 cpu_alpha_store_fpcr(env, val);
97 }
98
99 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
100 {
101 #ifndef CONFIG_USER_ONLY
102 if (env->flags & ENV_FLAG_PAL_MODE) {
103 if (reg >= 8 && reg <= 14) {
104 return &env->shadow[reg - 8];
105 } else if (reg == 25) {
106 return &env->shadow[7];
107 }
108 }
109 #endif
110 return &env->ir[reg];
111 }
112
113 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
114 {
115 return *cpu_alpha_addr_gr(env, reg);
116 }
117
118 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
119 {
120 *cpu_alpha_addr_gr(env, reg) = val;
121 }
122
123 #if defined(CONFIG_USER_ONLY)
124 void alpha_cpu_record_sigsegv(CPUState *cs, vaddr address,
125 MMUAccessType access_type,
126 bool maperr, uintptr_t retaddr)
127 {
128 CPUAlphaState *env = cpu_env(cs);
129 target_ulong mmcsr, cause;
130
131 /* Assuming !maperr, infer the missing protection. */
132 switch (access_type) {
133 case MMU_DATA_LOAD:
134 mmcsr = MM_K_FOR;
135 cause = 0;
136 break;
137 case MMU_DATA_STORE:
138 mmcsr = MM_K_FOW;
139 cause = 1;
140 break;
141 case MMU_INST_FETCH:
142 mmcsr = MM_K_FOE;
143 cause = -1;
144 break;
145 default:
146 g_assert_not_reached();
147 }
148 if (maperr) {
149 if (address < BIT_ULL(TARGET_VIRT_ADDR_SPACE_BITS - 1)) {
150 /* Userspace address, therefore page not mapped. */
151 mmcsr = MM_K_TNV;
152 } else {
153 /* Kernel or invalid address. */
154 mmcsr = MM_K_ACV;
155 }
156 }
157
158 /* Record the arguments that PALcode would give to the kernel. */
159 env->trap_arg0 = address;
160 env->trap_arg1 = mmcsr;
161 env->trap_arg2 = cause;
162 }
163 #else
164 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
165 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
166 int prot_need, int mmu_idx,
167 target_ulong *pphys, int *pprot)
168 {
169 CPUState *cs = env_cpu(env);
170 target_long saddr = addr;
171 target_ulong phys = 0;
172 target_ulong L1pte, L2pte, L3pte;
173 target_ulong pt, index;
174 int prot = 0;
175 int ret = MM_K_ACV;
176
177 /* Handle physical accesses. */
178 if (mmu_idx == MMU_PHYS_IDX) {
179 phys = addr;
180 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
181 ret = -1;
182 goto exit;
183 }
184
185 /* Ensure that the virtual address is properly sign-extended from
186 the last implemented virtual address bit. */
187 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
188 goto exit;
189 }
190
191 /* Translate the superpage. */
192 /* ??? When we do more than emulate Unix PALcode, we'll need to
193 determine which KSEG is actually active. */
194 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
195 /* User-space cannot access KSEG addresses. */
196 if (mmu_idx != MMU_KERNEL_IDX) {
197 goto exit;
198 }
199
200 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
201 We would not do this if the 48-bit KSEG is enabled. */
202 phys = saddr & ((1ull << 40) - 1);
203 phys |= (saddr & (1ull << 40)) << 3;
204
205 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
206 ret = -1;
207 goto exit;
208 }
209
210 /* Interpret the page table exactly like PALcode does. */
211
212 pt = env->ptbr;
213
214 /* TODO: rather than using ldq_phys() to read the page table we should
215 * use address_space_ldq() so that we can handle the case when
216 * the page table read gives a bus fault, rather than ignoring it.
217 * For the existing code the zero data that ldq_phys will return for
218 * an access to invalid memory will result in our treating the page
219 * table as invalid, which may even be the right behaviour.
220 */
221
222 /* L1 page table read. */
223 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
224 L1pte = ldq_phys(cs->as, pt + index*8);
225
226 if (unlikely((L1pte & PTE_VALID) == 0)) {
227 ret = MM_K_TNV;
228 goto exit;
229 }
230 if (unlikely((L1pte & PTE_KRE) == 0)) {
231 goto exit;
232 }
233 pt = L1pte >> 32 << TARGET_PAGE_BITS;
234
235 /* L2 page table read. */
236 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
237 L2pte = ldq_phys(cs->as, pt + index*8);
238
239 if (unlikely((L2pte & PTE_VALID) == 0)) {
240 ret = MM_K_TNV;
241 goto exit;
242 }
243 if (unlikely((L2pte & PTE_KRE) == 0)) {
244 goto exit;
245 }
246 pt = L2pte >> 32 << TARGET_PAGE_BITS;
247
248 /* L3 page table read. */
249 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
250 L3pte = ldq_phys(cs->as, pt + index*8);
251
252 phys = L3pte >> 32 << TARGET_PAGE_BITS;
253 if (unlikely((L3pte & PTE_VALID) == 0)) {
254 ret = MM_K_TNV;
255 goto exit;
256 }
257
258 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
259 # error page bits out of date
260 #endif
261
262 /* Check access violations. */
263 if (L3pte & (PTE_KRE << mmu_idx)) {
264 prot |= PAGE_READ | PAGE_EXEC;
265 }
266 if (L3pte & (PTE_KWE << mmu_idx)) {
267 prot |= PAGE_WRITE;
268 }
269 if (unlikely((prot & prot_need) == 0 && prot_need)) {
270 goto exit;
271 }
272
273 /* Check fault-on-operation violations. */
274 prot &= ~(L3pte >> 1);
275 ret = -1;
276 if (unlikely((prot & prot_need) == 0)) {
277 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
278 prot_need & PAGE_WRITE ? MM_K_FOW :
279 prot_need & PAGE_READ ? MM_K_FOR : -1);
280 }
281
282 exit:
283 *pphys = phys;
284 *pprot = prot;
285 return ret;
286 }
287
288 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
289 {
290 target_ulong phys;
291 int prot, fail;
292
293 fail = get_physical_address(cpu_env(cs), addr, 0, 0, &phys, &prot);
294 return (fail >= 0 ? -1 : phys);
295 }
296
297 bool alpha_cpu_tlb_fill(CPUState *cs, vaddr addr, int size,
298 MMUAccessType access_type, int mmu_idx,
299 bool probe, uintptr_t retaddr)
300 {
301 CPUAlphaState *env = cpu_env(cs);
302 target_ulong phys;
303 int prot, fail;
304
305 fail = get_physical_address(env, addr, 1 << access_type,
306 mmu_idx, &phys, &prot);
307 if (unlikely(fail >= 0)) {
308 if (probe) {
309 return false;
310 }
311 cs->exception_index = EXCP_MMFAULT;
312 env->trap_arg0 = addr;
313 env->trap_arg1 = fail;
314 env->trap_arg2 = (access_type == MMU_DATA_LOAD ? 0ull :
315 access_type == MMU_DATA_STORE ? 1ull :
316 /* access_type == MMU_INST_FETCH */ -1ull);
317 cpu_loop_exit_restore(cs, retaddr);
318 }
319
320 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
321 prot, mmu_idx, TARGET_PAGE_SIZE);
322 return true;
323 }
324
325 void alpha_cpu_do_interrupt(CPUState *cs)
326 {
327 CPUAlphaState *env = cpu_env(cs);
328 int i = cs->exception_index;
329
330 if (qemu_loglevel_mask(CPU_LOG_INT)) {
331 static int count;
332 const char *name = "<unknown>";
333
334 switch (i) {
335 case EXCP_RESET:
336 name = "reset";
337 break;
338 case EXCP_MCHK:
339 name = "mchk";
340 break;
341 case EXCP_SMP_INTERRUPT:
342 name = "smp_interrupt";
343 break;
344 case EXCP_CLK_INTERRUPT:
345 name = "clk_interrupt";
346 break;
347 case EXCP_DEV_INTERRUPT:
348 name = "dev_interrupt";
349 break;
350 case EXCP_MMFAULT:
351 name = "mmfault";
352 break;
353 case EXCP_UNALIGN:
354 name = "unalign";
355 break;
356 case EXCP_OPCDEC:
357 name = "opcdec";
358 break;
359 case EXCP_ARITH:
360 name = "arith";
361 break;
362 case EXCP_FEN:
363 name = "fen";
364 break;
365 case EXCP_CALL_PAL:
366 name = "call_pal";
367 break;
368 }
369 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
370 PRIx64 " sp=%016" PRIx64 "\n",
371 ++count, name, env->error_code, cs->cpu_index,
372 env->pc, env->ir[IR_SP]);
373 }
374
375 cs->exception_index = -1;
376
377 switch (i) {
378 case EXCP_RESET:
379 i = 0x0000;
380 break;
381 case EXCP_MCHK:
382 i = 0x0080;
383 break;
384 case EXCP_SMP_INTERRUPT:
385 i = 0x0100;
386 break;
387 case EXCP_CLK_INTERRUPT:
388 i = 0x0180;
389 break;
390 case EXCP_DEV_INTERRUPT:
391 i = 0x0200;
392 break;
393 case EXCP_MMFAULT:
394 i = 0x0280;
395 break;
396 case EXCP_UNALIGN:
397 i = 0x0300;
398 break;
399 case EXCP_OPCDEC:
400 i = 0x0380;
401 break;
402 case EXCP_ARITH:
403 i = 0x0400;
404 break;
405 case EXCP_FEN:
406 i = 0x0480;
407 break;
408 case EXCP_CALL_PAL:
409 i = env->error_code;
410 /* There are 64 entry points for both privileged and unprivileged,
411 with bit 0x80 indicating unprivileged. Each entry point gets
412 64 bytes to do its job. */
413 if (i & 0x80) {
414 i = 0x2000 + (i - 0x80) * 64;
415 } else {
416 i = 0x1000 + i * 64;
417 }
418 break;
419 default:
420 cpu_abort(cs, "Unhandled CPU exception");
421 }
422
423 /* Remember where the exception happened. Emulate real hardware in
424 that the low bit of the PC indicates PALmode. */
425 env->exc_addr = env->pc | (env->flags & ENV_FLAG_PAL_MODE);
426
427 /* Continue execution at the PALcode entry point. */
428 env->pc = env->palbr + i;
429
430 /* Switch to PALmode. */
431 env->flags |= ENV_FLAG_PAL_MODE;
432 }
433
434 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
435 {
436 CPUAlphaState *env = cpu_env(cs);
437 int idx = -1;
438
439 /* We never take interrupts while in PALmode. */
440 if (env->flags & ENV_FLAG_PAL_MODE) {
441 return false;
442 }
443
444 /* Fall through the switch, collecting the highest priority
445 interrupt that isn't masked by the processor status IPL. */
446 /* ??? This hard-codes the OSF/1 interrupt levels. */
447 switch ((env->flags >> ENV_FLAG_PS_SHIFT) & PS_INT_MASK) {
448 case 0 ... 3:
449 if (interrupt_request & CPU_INTERRUPT_HARD) {
450 idx = EXCP_DEV_INTERRUPT;
451 }
452 /* FALLTHRU */
453 case 4:
454 if (interrupt_request & CPU_INTERRUPT_TIMER) {
455 idx = EXCP_CLK_INTERRUPT;
456 }
457 /* FALLTHRU */
458 case 5:
459 if (interrupt_request & CPU_INTERRUPT_SMP) {
460 idx = EXCP_SMP_INTERRUPT;
461 }
462 /* FALLTHRU */
463 case 6:
464 if (interrupt_request & CPU_INTERRUPT_MCHK) {
465 idx = EXCP_MCHK;
466 }
467 }
468 if (idx >= 0) {
469 cs->exception_index = idx;
470 env->error_code = 0;
471 alpha_cpu_do_interrupt(cs);
472 return true;
473 }
474 return false;
475 }
476
477 #endif /* !CONFIG_USER_ONLY */
478
479 void alpha_cpu_dump_state(CPUState *cs, FILE *f, int flags)
480 {
481 static const char linux_reg_names[31][4] = {
482 "v0", "t0", "t1", "t2", "t3", "t4", "t5", "t6",
483 "t7", "s0", "s1", "s2", "s3", "s4", "s5", "fp",
484 "a0", "a1", "a2", "a3", "a4", "a5", "t8", "t9",
485 "t10", "t11", "ra", "t12", "at", "gp", "sp"
486 };
487 CPUAlphaState *env = cpu_env(cs);
488 int i;
489
490 qemu_fprintf(f, "PC " TARGET_FMT_lx " PS %02x\n",
491 env->pc, extract32(env->flags, ENV_FLAG_PS_SHIFT, 8));
492 for (i = 0; i < 31; i++) {
493 qemu_fprintf(f, "%-8s" TARGET_FMT_lx "%c",
494 linux_reg_names[i], cpu_alpha_load_gr(env, i),
495 (i % 3) == 2 ? '\n' : ' ');
496 }
497
498 qemu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
499 env->lock_addr, env->lock_value);
500
501 if (flags & CPU_DUMP_FPU) {
502 for (i = 0; i < 31; i++) {
503 qemu_fprintf(f, "f%-7d%016" PRIx64 "%c", i, env->fir[i],
504 (i % 3) == 2 ? '\n' : ' ');
505 }
506 qemu_fprintf(f, "fpcr %016" PRIx64 "\n", cpu_alpha_load_fpcr(env));
507 }
508 qemu_fprintf(f, "\n");
509 }
510
511 /* This should only be called from translate, via gen_excp.
512 We expect that ENV->PC has already been updated. */
513 G_NORETURN void helper_excp(CPUAlphaState *env, int excp, int error)
514 {
515 CPUState *cs = env_cpu(env);
516
517 cs->exception_index = excp;
518 env->error_code = error;
519 cpu_loop_exit(cs);
520 }
521
522 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
523 G_NORETURN void dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
524 int excp, int error)
525 {
526 CPUState *cs = env_cpu(env);
527
528 cs->exception_index = excp;
529 env->error_code = error;
530 if (retaddr) {
531 cpu_restore_state(cs, retaddr);
532 /* Floating-point exceptions (our only users) point to the next PC. */
533 env->pc += 4;
534 }
535 cpu_loop_exit(cs);
536 }
537
538 G_NORETURN void arith_excp(CPUAlphaState *env, uintptr_t retaddr,
539 int exc, uint64_t mask)
540 {
541 env->trap_arg0 = exc;
542 env->trap_arg1 = mask;
543 dynamic_excp(env, retaddr, EXCP_ARITH, 0);
544 }