]> git.proxmox.com Git - mirror_qemu.git/blob - target-alpha/helper.c
test-qga: Avoid qobject_from_jsonv("%"PRId64)
[mirror_qemu.git] / target-alpha / helper.c
1 /*
2 * Alpha emulation cpu helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "qemu/osdep.h"
21
22 #include "cpu.h"
23 #include "exec/exec-all.h"
24 #include "fpu/softfloat.h"
25 #include "exec/helper-proto.h"
26
27
28 #define CONVERT_BIT(X, SRC, DST) \
29 (SRC > DST ? (X) / (SRC / DST) & (DST) : ((X) & SRC) * (DST / SRC))
30
31 uint64_t cpu_alpha_load_fpcr (CPUAlphaState *env)
32 {
33 return (uint64_t)env->fpcr << 32;
34 }
35
36 void cpu_alpha_store_fpcr (CPUAlphaState *env, uint64_t val)
37 {
38 uint32_t fpcr = val >> 32;
39 uint32_t t = 0;
40
41 t |= CONVERT_BIT(fpcr, FPCR_INED, FPCR_INE);
42 t |= CONVERT_BIT(fpcr, FPCR_UNFD, FPCR_UNF);
43 t |= CONVERT_BIT(fpcr, FPCR_OVFD, FPCR_OVF);
44 t |= CONVERT_BIT(fpcr, FPCR_DZED, FPCR_DZE);
45 t |= CONVERT_BIT(fpcr, FPCR_INVD, FPCR_INV);
46
47 env->fpcr = fpcr;
48 env->fpcr_exc_enable = ~t & FPCR_STATUS_MASK;
49
50 switch (fpcr & FPCR_DYN_MASK) {
51 case FPCR_DYN_NORMAL:
52 default:
53 t = float_round_nearest_even;
54 break;
55 case FPCR_DYN_CHOPPED:
56 t = float_round_to_zero;
57 break;
58 case FPCR_DYN_MINUS:
59 t = float_round_down;
60 break;
61 case FPCR_DYN_PLUS:
62 t = float_round_up;
63 break;
64 }
65 env->fpcr_dyn_round = t;
66
67 env->fpcr_flush_to_zero = (fpcr & FPCR_UNFD) && (fpcr & FPCR_UNDZ);
68 env->fp_status.flush_inputs_to_zero = (fpcr & FPCR_DNZ) != 0;
69 }
70
71 uint64_t helper_load_fpcr(CPUAlphaState *env)
72 {
73 return cpu_alpha_load_fpcr(env);
74 }
75
76 void helper_store_fpcr(CPUAlphaState *env, uint64_t val)
77 {
78 cpu_alpha_store_fpcr(env, val);
79 }
80
81 static uint64_t *cpu_alpha_addr_gr(CPUAlphaState *env, unsigned reg)
82 {
83 #ifndef CONFIG_USER_ONLY
84 if (env->pal_mode) {
85 if (reg >= 8 && reg <= 14) {
86 return &env->shadow[reg - 8];
87 } else if (reg == 25) {
88 return &env->shadow[7];
89 }
90 }
91 #endif
92 return &env->ir[reg];
93 }
94
95 uint64_t cpu_alpha_load_gr(CPUAlphaState *env, unsigned reg)
96 {
97 return *cpu_alpha_addr_gr(env, reg);
98 }
99
100 void cpu_alpha_store_gr(CPUAlphaState *env, unsigned reg, uint64_t val)
101 {
102 *cpu_alpha_addr_gr(env, reg) = val;
103 }
104
105 #if defined(CONFIG_USER_ONLY)
106 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
107 int rw, int mmu_idx)
108 {
109 AlphaCPU *cpu = ALPHA_CPU(cs);
110
111 cs->exception_index = EXCP_MMFAULT;
112 cpu->env.trap_arg0 = address;
113 return 1;
114 }
115 #else
116 /* Returns the OSF/1 entMM failure indication, or -1 on success. */
117 static int get_physical_address(CPUAlphaState *env, target_ulong addr,
118 int prot_need, int mmu_idx,
119 target_ulong *pphys, int *pprot)
120 {
121 CPUState *cs = CPU(alpha_env_get_cpu(env));
122 target_long saddr = addr;
123 target_ulong phys = 0;
124 target_ulong L1pte, L2pte, L3pte;
125 target_ulong pt, index;
126 int prot = 0;
127 int ret = MM_K_ACV;
128
129 /* Handle physical accesses. */
130 if (mmu_idx == MMU_PHYS_IDX) {
131 phys = addr;
132 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
133 ret = -1;
134 goto exit;
135 }
136
137 /* Ensure that the virtual address is properly sign-extended from
138 the last implemented virtual address bit. */
139 if (saddr >> TARGET_VIRT_ADDR_SPACE_BITS != saddr >> 63) {
140 goto exit;
141 }
142
143 /* Translate the superpage. */
144 /* ??? When we do more than emulate Unix PALcode, we'll need to
145 determine which KSEG is actually active. */
146 if (saddr < 0 && ((saddr >> 41) & 3) == 2) {
147 /* User-space cannot access KSEG addresses. */
148 if (mmu_idx != MMU_KERNEL_IDX) {
149 goto exit;
150 }
151
152 /* For the benefit of the Typhoon chipset, move bit 40 to bit 43.
153 We would not do this if the 48-bit KSEG is enabled. */
154 phys = saddr & ((1ull << 40) - 1);
155 phys |= (saddr & (1ull << 40)) << 3;
156
157 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
158 ret = -1;
159 goto exit;
160 }
161
162 /* Interpret the page table exactly like PALcode does. */
163
164 pt = env->ptbr;
165
166 /* L1 page table read. */
167 index = (addr >> (TARGET_PAGE_BITS + 20)) & 0x3ff;
168 L1pte = ldq_phys(cs->as, pt + index*8);
169
170 if (unlikely((L1pte & PTE_VALID) == 0)) {
171 ret = MM_K_TNV;
172 goto exit;
173 }
174 if (unlikely((L1pte & PTE_KRE) == 0)) {
175 goto exit;
176 }
177 pt = L1pte >> 32 << TARGET_PAGE_BITS;
178
179 /* L2 page table read. */
180 index = (addr >> (TARGET_PAGE_BITS + 10)) & 0x3ff;
181 L2pte = ldq_phys(cs->as, pt + index*8);
182
183 if (unlikely((L2pte & PTE_VALID) == 0)) {
184 ret = MM_K_TNV;
185 goto exit;
186 }
187 if (unlikely((L2pte & PTE_KRE) == 0)) {
188 goto exit;
189 }
190 pt = L2pte >> 32 << TARGET_PAGE_BITS;
191
192 /* L3 page table read. */
193 index = (addr >> TARGET_PAGE_BITS) & 0x3ff;
194 L3pte = ldq_phys(cs->as, pt + index*8);
195
196 phys = L3pte >> 32 << TARGET_PAGE_BITS;
197 if (unlikely((L3pte & PTE_VALID) == 0)) {
198 ret = MM_K_TNV;
199 goto exit;
200 }
201
202 #if PAGE_READ != 1 || PAGE_WRITE != 2 || PAGE_EXEC != 4
203 # error page bits out of date
204 #endif
205
206 /* Check access violations. */
207 if (L3pte & (PTE_KRE << mmu_idx)) {
208 prot |= PAGE_READ | PAGE_EXEC;
209 }
210 if (L3pte & (PTE_KWE << mmu_idx)) {
211 prot |= PAGE_WRITE;
212 }
213 if (unlikely((prot & prot_need) == 0 && prot_need)) {
214 goto exit;
215 }
216
217 /* Check fault-on-operation violations. */
218 prot &= ~(L3pte >> 1);
219 ret = -1;
220 if (unlikely((prot & prot_need) == 0)) {
221 ret = (prot_need & PAGE_EXEC ? MM_K_FOE :
222 prot_need & PAGE_WRITE ? MM_K_FOW :
223 prot_need & PAGE_READ ? MM_K_FOR : -1);
224 }
225
226 exit:
227 *pphys = phys;
228 *pprot = prot;
229 return ret;
230 }
231
232 hwaddr alpha_cpu_get_phys_page_debug(CPUState *cs, vaddr addr)
233 {
234 AlphaCPU *cpu = ALPHA_CPU(cs);
235 target_ulong phys;
236 int prot, fail;
237
238 fail = get_physical_address(&cpu->env, addr, 0, 0, &phys, &prot);
239 return (fail >= 0 ? -1 : phys);
240 }
241
242 int alpha_cpu_handle_mmu_fault(CPUState *cs, vaddr addr, int rw,
243 int mmu_idx)
244 {
245 AlphaCPU *cpu = ALPHA_CPU(cs);
246 CPUAlphaState *env = &cpu->env;
247 target_ulong phys;
248 int prot, fail;
249
250 fail = get_physical_address(env, addr, 1 << rw, mmu_idx, &phys, &prot);
251 if (unlikely(fail >= 0)) {
252 cs->exception_index = EXCP_MMFAULT;
253 env->trap_arg0 = addr;
254 env->trap_arg1 = fail;
255 env->trap_arg2 = (rw == 2 ? -1 : rw);
256 return 1;
257 }
258
259 tlb_set_page(cs, addr & TARGET_PAGE_MASK, phys & TARGET_PAGE_MASK,
260 prot, mmu_idx, TARGET_PAGE_SIZE);
261 return 0;
262 }
263 #endif /* USER_ONLY */
264
265 void alpha_cpu_do_interrupt(CPUState *cs)
266 {
267 AlphaCPU *cpu = ALPHA_CPU(cs);
268 CPUAlphaState *env = &cpu->env;
269 int i = cs->exception_index;
270
271 if (qemu_loglevel_mask(CPU_LOG_INT)) {
272 static int count;
273 const char *name = "<unknown>";
274
275 switch (i) {
276 case EXCP_RESET:
277 name = "reset";
278 break;
279 case EXCP_MCHK:
280 name = "mchk";
281 break;
282 case EXCP_SMP_INTERRUPT:
283 name = "smp_interrupt";
284 break;
285 case EXCP_CLK_INTERRUPT:
286 name = "clk_interrupt";
287 break;
288 case EXCP_DEV_INTERRUPT:
289 name = "dev_interrupt";
290 break;
291 case EXCP_MMFAULT:
292 name = "mmfault";
293 break;
294 case EXCP_UNALIGN:
295 name = "unalign";
296 break;
297 case EXCP_OPCDEC:
298 name = "opcdec";
299 break;
300 case EXCP_ARITH:
301 name = "arith";
302 break;
303 case EXCP_FEN:
304 name = "fen";
305 break;
306 case EXCP_CALL_PAL:
307 name = "call_pal";
308 break;
309 }
310 qemu_log("INT %6d: %s(%#x) cpu=%d pc=%016"
311 PRIx64 " sp=%016" PRIx64 "\n",
312 ++count, name, env->error_code, cs->cpu_index,
313 env->pc, env->ir[IR_SP]);
314 }
315
316 cs->exception_index = -1;
317
318 #if !defined(CONFIG_USER_ONLY)
319 switch (i) {
320 case EXCP_RESET:
321 i = 0x0000;
322 break;
323 case EXCP_MCHK:
324 i = 0x0080;
325 break;
326 case EXCP_SMP_INTERRUPT:
327 i = 0x0100;
328 break;
329 case EXCP_CLK_INTERRUPT:
330 i = 0x0180;
331 break;
332 case EXCP_DEV_INTERRUPT:
333 i = 0x0200;
334 break;
335 case EXCP_MMFAULT:
336 i = 0x0280;
337 break;
338 case EXCP_UNALIGN:
339 i = 0x0300;
340 break;
341 case EXCP_OPCDEC:
342 i = 0x0380;
343 break;
344 case EXCP_ARITH:
345 i = 0x0400;
346 break;
347 case EXCP_FEN:
348 i = 0x0480;
349 break;
350 case EXCP_CALL_PAL:
351 i = env->error_code;
352 /* There are 64 entry points for both privileged and unprivileged,
353 with bit 0x80 indicating unprivileged. Each entry point gets
354 64 bytes to do its job. */
355 if (i & 0x80) {
356 i = 0x2000 + (i - 0x80) * 64;
357 } else {
358 i = 0x1000 + i * 64;
359 }
360 break;
361 default:
362 cpu_abort(cs, "Unhandled CPU exception");
363 }
364
365 /* Remember where the exception happened. Emulate real hardware in
366 that the low bit of the PC indicates PALmode. */
367 env->exc_addr = env->pc | env->pal_mode;
368
369 /* Continue execution at the PALcode entry point. */
370 env->pc = env->palbr + i;
371
372 /* Switch to PALmode. */
373 env->pal_mode = 1;
374 #endif /* !USER_ONLY */
375 }
376
377 bool alpha_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
378 {
379 AlphaCPU *cpu = ALPHA_CPU(cs);
380 CPUAlphaState *env = &cpu->env;
381 int idx = -1;
382
383 /* We never take interrupts while in PALmode. */
384 if (env->pal_mode) {
385 return false;
386 }
387
388 /* Fall through the switch, collecting the highest priority
389 interrupt that isn't masked by the processor status IPL. */
390 /* ??? This hard-codes the OSF/1 interrupt levels. */
391 switch (env->ps & PS_INT_MASK) {
392 case 0 ... 3:
393 if (interrupt_request & CPU_INTERRUPT_HARD) {
394 idx = EXCP_DEV_INTERRUPT;
395 }
396 /* FALLTHRU */
397 case 4:
398 if (interrupt_request & CPU_INTERRUPT_TIMER) {
399 idx = EXCP_CLK_INTERRUPT;
400 }
401 /* FALLTHRU */
402 case 5:
403 if (interrupt_request & CPU_INTERRUPT_SMP) {
404 idx = EXCP_SMP_INTERRUPT;
405 }
406 /* FALLTHRU */
407 case 6:
408 if (interrupt_request & CPU_INTERRUPT_MCHK) {
409 idx = EXCP_MCHK;
410 }
411 }
412 if (idx >= 0) {
413 cs->exception_index = idx;
414 env->error_code = 0;
415 alpha_cpu_do_interrupt(cs);
416 return true;
417 }
418 return false;
419 }
420
421 void alpha_cpu_dump_state(CPUState *cs, FILE *f, fprintf_function cpu_fprintf,
422 int flags)
423 {
424 static const char *linux_reg_names[] = {
425 "v0 ", "t0 ", "t1 ", "t2 ", "t3 ", "t4 ", "t5 ", "t6 ",
426 "t7 ", "s0 ", "s1 ", "s2 ", "s3 ", "s4 ", "s5 ", "fp ",
427 "a0 ", "a1 ", "a2 ", "a3 ", "a4 ", "a5 ", "t8 ", "t9 ",
428 "t10", "t11", "ra ", "t12", "at ", "gp ", "sp ", "zero",
429 };
430 AlphaCPU *cpu = ALPHA_CPU(cs);
431 CPUAlphaState *env = &cpu->env;
432 int i;
433
434 cpu_fprintf(f, " PC " TARGET_FMT_lx " PS %02x\n",
435 env->pc, env->ps);
436 for (i = 0; i < 31; i++) {
437 cpu_fprintf(f, "IR%02d %s " TARGET_FMT_lx " ", i,
438 linux_reg_names[i], cpu_alpha_load_gr(env, i));
439 if ((i % 3) == 2)
440 cpu_fprintf(f, "\n");
441 }
442
443 cpu_fprintf(f, "lock_a " TARGET_FMT_lx " lock_v " TARGET_FMT_lx "\n",
444 env->lock_addr, env->lock_value);
445
446 for (i = 0; i < 31; i++) {
447 cpu_fprintf(f, "FIR%02d " TARGET_FMT_lx " ", i,
448 *((uint64_t *)(&env->fir[i])));
449 if ((i % 3) == 2)
450 cpu_fprintf(f, "\n");
451 }
452 cpu_fprintf(f, "\n");
453 }
454
455 /* This should only be called from translate, via gen_excp.
456 We expect that ENV->PC has already been updated. */
457 void QEMU_NORETURN helper_excp(CPUAlphaState *env, int excp, int error)
458 {
459 AlphaCPU *cpu = alpha_env_get_cpu(env);
460 CPUState *cs = CPU(cpu);
461
462 cs->exception_index = excp;
463 env->error_code = error;
464 cpu_loop_exit(cs);
465 }
466
467 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
468 void QEMU_NORETURN dynamic_excp(CPUAlphaState *env, uintptr_t retaddr,
469 int excp, int error)
470 {
471 AlphaCPU *cpu = alpha_env_get_cpu(env);
472 CPUState *cs = CPU(cpu);
473
474 cs->exception_index = excp;
475 env->error_code = error;
476 if (retaddr) {
477 cpu_restore_state(cs, retaddr);
478 /* Floating-point exceptions (our only users) point to the next PC. */
479 env->pc += 4;
480 }
481 cpu_loop_exit(cs);
482 }
483
484 void QEMU_NORETURN arith_excp(CPUAlphaState *env, uintptr_t retaddr,
485 int exc, uint64_t mask)
486 {
487 env->trap_arg0 = exc;
488 env->trap_arg1 = mask;
489 dynamic_excp(env, retaddr, EXCP_ARITH, 0);
490 }