]> git.proxmox.com Git - qemu.git/blob - target-alpha/op_helper.c
Remove unused is_softmmu parameter from cpu_handle_mmu_fault
[qemu.git] / target-alpha / op_helper.c
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "cpu.h"
21 #include "dyngen-exec.h"
22 #include "host-utils.h"
23 #include "softfloat.h"
24 #include "helper.h"
25 #include "qemu-timer.h"
26
27 #define FP_STATUS (env->fp_status)
28
29 /*****************************************************************************/
30 /* Exceptions processing helpers */
31
32 /* This should only be called from translate, via gen_excp.
33 We expect that ENV->PC has already been updated. */
34 void QEMU_NORETURN helper_excp(int excp, int error)
35 {
36 env->exception_index = excp;
37 env->error_code = error;
38 cpu_loop_exit(env);
39 }
40
41 static void do_restore_state(void *retaddr)
42 {
43 unsigned long pc = (unsigned long)retaddr;
44
45 if (pc) {
46 TranslationBlock *tb = tb_find_pc(pc);
47 if (tb) {
48 cpu_restore_state(tb, env, pc);
49 }
50 }
51 }
52
53 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
54 static void QEMU_NORETURN dynamic_excp(int excp, int error)
55 {
56 env->exception_index = excp;
57 env->error_code = error;
58 do_restore_state(GETPC());
59 cpu_loop_exit(env);
60 }
61
62 static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
63 {
64 env->trap_arg0 = exc;
65 env->trap_arg1 = mask;
66 dynamic_excp(EXCP_ARITH, 0);
67 }
68
69 uint64_t helper_load_pcc (void)
70 {
71 #ifndef CONFIG_USER_ONLY
72 /* In system mode we have access to a decent high-resolution clock.
73 In order to make OS-level time accounting work with the RPCC,
74 present it with a well-timed clock fixed at 250MHz. */
75 return (((uint64_t)env->pcc_ofs << 32)
76 | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
77 #else
78 /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
79 clock ticks. Also, don't bother taking PCC_OFS into account. */
80 return (uint32_t)cpu_get_real_ticks();
81 #endif
82 }
83
84 uint64_t helper_load_fpcr (void)
85 {
86 return cpu_alpha_load_fpcr (env);
87 }
88
89 void helper_store_fpcr (uint64_t val)
90 {
91 cpu_alpha_store_fpcr (env, val);
92 }
93
94 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
95 {
96 uint64_t tmp = op1;
97 op1 += op2;
98 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
99 arith_excp(EXC_M_IOV, 0);
100 }
101 return op1;
102 }
103
104 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
105 {
106 uint64_t tmp = op1;
107 op1 = (uint32_t)(op1 + op2);
108 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
109 arith_excp(EXC_M_IOV, 0);
110 }
111 return op1;
112 }
113
114 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
115 {
116 uint64_t res;
117 res = op1 - op2;
118 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
119 arith_excp(EXC_M_IOV, 0);
120 }
121 return res;
122 }
123
124 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
125 {
126 uint32_t res;
127 res = op1 - op2;
128 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
129 arith_excp(EXC_M_IOV, 0);
130 }
131 return res;
132 }
133
134 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
135 {
136 int64_t res = (int64_t)op1 * (int64_t)op2;
137
138 if (unlikely((int32_t)res != res)) {
139 arith_excp(EXC_M_IOV, 0);
140 }
141 return (int64_t)((int32_t)res);
142 }
143
144 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
145 {
146 uint64_t tl, th;
147
148 muls64(&tl, &th, op1, op2);
149 /* If th != 0 && th != -1, then we had an overflow */
150 if (unlikely((th + 1) > 1)) {
151 arith_excp(EXC_M_IOV, 0);
152 }
153 return tl;
154 }
155
156 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
157 {
158 uint64_t tl, th;
159
160 mulu64(&tl, &th, op1, op2);
161 return th;
162 }
163
164 uint64_t helper_ctpop (uint64_t arg)
165 {
166 return ctpop64(arg);
167 }
168
169 uint64_t helper_ctlz (uint64_t arg)
170 {
171 return clz64(arg);
172 }
173
174 uint64_t helper_cttz (uint64_t arg)
175 {
176 return ctz64(arg);
177 }
178
179 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
180 {
181 uint64_t mask;
182
183 mask = 0;
184 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
185 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
186 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
187 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
188 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
189 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
190 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
191 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
192
193 return op & ~mask;
194 }
195
196 uint64_t helper_zap(uint64_t val, uint64_t mask)
197 {
198 return byte_zap(val, mask);
199 }
200
201 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
202 {
203 return byte_zap(val, ~mask);
204 }
205
206 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
207 {
208 uint8_t opa, opb, res;
209 int i;
210
211 res = 0;
212 for (i = 0; i < 8; i++) {
213 opa = op1 >> (i * 8);
214 opb = op2 >> (i * 8);
215 if (opa >= opb)
216 res |= 1 << i;
217 }
218 return res;
219 }
220
221 uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
222 {
223 uint64_t res = 0;
224 uint8_t opa, opb, opr;
225 int i;
226
227 for (i = 0; i < 8; ++i) {
228 opa = op1 >> (i * 8);
229 opb = op2 >> (i * 8);
230 opr = opa < opb ? opa : opb;
231 res |= (uint64_t)opr << (i * 8);
232 }
233 return res;
234 }
235
236 uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
237 {
238 uint64_t res = 0;
239 int8_t opa, opb;
240 uint8_t opr;
241 int i;
242
243 for (i = 0; i < 8; ++i) {
244 opa = op1 >> (i * 8);
245 opb = op2 >> (i * 8);
246 opr = opa < opb ? opa : opb;
247 res |= (uint64_t)opr << (i * 8);
248 }
249 return res;
250 }
251
252 uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
253 {
254 uint64_t res = 0;
255 uint16_t opa, opb, opr;
256 int i;
257
258 for (i = 0; i < 4; ++i) {
259 opa = op1 >> (i * 16);
260 opb = op2 >> (i * 16);
261 opr = opa < opb ? opa : opb;
262 res |= (uint64_t)opr << (i * 16);
263 }
264 return res;
265 }
266
267 uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
268 {
269 uint64_t res = 0;
270 int16_t opa, opb;
271 uint16_t opr;
272 int i;
273
274 for (i = 0; i < 4; ++i) {
275 opa = op1 >> (i * 16);
276 opb = op2 >> (i * 16);
277 opr = opa < opb ? opa : opb;
278 res |= (uint64_t)opr << (i * 16);
279 }
280 return res;
281 }
282
283 uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
284 {
285 uint64_t res = 0;
286 uint8_t opa, opb, opr;
287 int i;
288
289 for (i = 0; i < 8; ++i) {
290 opa = op1 >> (i * 8);
291 opb = op2 >> (i * 8);
292 opr = opa > opb ? opa : opb;
293 res |= (uint64_t)opr << (i * 8);
294 }
295 return res;
296 }
297
298 uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
299 {
300 uint64_t res = 0;
301 int8_t opa, opb;
302 uint8_t opr;
303 int i;
304
305 for (i = 0; i < 8; ++i) {
306 opa = op1 >> (i * 8);
307 opb = op2 >> (i * 8);
308 opr = opa > opb ? opa : opb;
309 res |= (uint64_t)opr << (i * 8);
310 }
311 return res;
312 }
313
314 uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
315 {
316 uint64_t res = 0;
317 uint16_t opa, opb, opr;
318 int i;
319
320 for (i = 0; i < 4; ++i) {
321 opa = op1 >> (i * 16);
322 opb = op2 >> (i * 16);
323 opr = opa > opb ? opa : opb;
324 res |= (uint64_t)opr << (i * 16);
325 }
326 return res;
327 }
328
329 uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
330 {
331 uint64_t res = 0;
332 int16_t opa, opb;
333 uint16_t opr;
334 int i;
335
336 for (i = 0; i < 4; ++i) {
337 opa = op1 >> (i * 16);
338 opb = op2 >> (i * 16);
339 opr = opa > opb ? opa : opb;
340 res |= (uint64_t)opr << (i * 16);
341 }
342 return res;
343 }
344
345 uint64_t helper_perr (uint64_t op1, uint64_t op2)
346 {
347 uint64_t res = 0;
348 uint8_t opa, opb, opr;
349 int i;
350
351 for (i = 0; i < 8; ++i) {
352 opa = op1 >> (i * 8);
353 opb = op2 >> (i * 8);
354 if (opa >= opb)
355 opr = opa - opb;
356 else
357 opr = opb - opa;
358 res += opr;
359 }
360 return res;
361 }
362
363 uint64_t helper_pklb (uint64_t op1)
364 {
365 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
366 }
367
368 uint64_t helper_pkwb (uint64_t op1)
369 {
370 return ((op1 & 0xff)
371 | ((op1 >> 8) & 0xff00)
372 | ((op1 >> 16) & 0xff0000)
373 | ((op1 >> 24) & 0xff000000));
374 }
375
376 uint64_t helper_unpkbl (uint64_t op1)
377 {
378 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
379 }
380
381 uint64_t helper_unpkbw (uint64_t op1)
382 {
383 return ((op1 & 0xff)
384 | ((op1 & 0xff00) << 8)
385 | ((op1 & 0xff0000) << 16)
386 | ((op1 & 0xff000000) << 24));
387 }
388
389 /* Floating point helpers */
390
391 void helper_setroundmode (uint32_t val)
392 {
393 set_float_rounding_mode(val, &FP_STATUS);
394 }
395
396 void helper_setflushzero (uint32_t val)
397 {
398 set_flush_to_zero(val, &FP_STATUS);
399 }
400
401 void helper_fp_exc_clear (void)
402 {
403 set_float_exception_flags(0, &FP_STATUS);
404 }
405
406 uint32_t helper_fp_exc_get (void)
407 {
408 return get_float_exception_flags(&FP_STATUS);
409 }
410
411 /* Raise exceptions for ieee fp insns without software completion.
412 In that case there are no exceptions that don't trap; the mask
413 doesn't apply. */
414 void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
415 {
416 if (exc) {
417 uint32_t hw_exc = 0;
418
419 if (exc & float_flag_invalid) {
420 hw_exc |= EXC_M_INV;
421 }
422 if (exc & float_flag_divbyzero) {
423 hw_exc |= EXC_M_DZE;
424 }
425 if (exc & float_flag_overflow) {
426 hw_exc |= EXC_M_FOV;
427 }
428 if (exc & float_flag_underflow) {
429 hw_exc |= EXC_M_UNF;
430 }
431 if (exc & float_flag_inexact) {
432 hw_exc |= EXC_M_INE;
433 }
434
435 arith_excp(hw_exc, 1ull << regno);
436 }
437 }
438
439 /* Raise exceptions for ieee fp insns with software completion. */
440 void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
441 {
442 if (exc) {
443 env->fpcr_exc_status |= exc;
444
445 exc &= ~env->fpcr_exc_mask;
446 if (exc) {
447 helper_fp_exc_raise(exc, regno);
448 }
449 }
450 }
451
452 /* Input remapping without software completion. Handle denormal-map-to-zero
453 and trap for all other non-finite numbers. */
454 uint64_t helper_ieee_input(uint64_t val)
455 {
456 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
457 uint64_t frac = val & 0xfffffffffffffull;
458
459 if (exp == 0) {
460 if (frac != 0) {
461 /* If DNZ is set flush denormals to zero on input. */
462 if (env->fpcr_dnz) {
463 val &= 1ull << 63;
464 } else {
465 arith_excp(EXC_M_UNF, 0);
466 }
467 }
468 } else if (exp == 0x7ff) {
469 /* Infinity or NaN. */
470 /* ??? I'm not sure these exception bit flags are correct. I do
471 know that the Linux kernel, at least, doesn't rely on them and
472 just emulates the insn to figure out what exception to use. */
473 arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
474 }
475 return val;
476 }
477
478 /* Similar, but does not trap for infinities. Used for comparisons. */
479 uint64_t helper_ieee_input_cmp(uint64_t val)
480 {
481 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
482 uint64_t frac = val & 0xfffffffffffffull;
483
484 if (exp == 0) {
485 if (frac != 0) {
486 /* If DNZ is set flush denormals to zero on input. */
487 if (env->fpcr_dnz) {
488 val &= 1ull << 63;
489 } else {
490 arith_excp(EXC_M_UNF, 0);
491 }
492 }
493 } else if (exp == 0x7ff && frac) {
494 /* NaN. */
495 arith_excp(EXC_M_INV, 0);
496 }
497 return val;
498 }
499
500 /* Input remapping with software completion enabled. All we have to do
501 is handle denormal-map-to-zero; all other inputs get exceptions as
502 needed from the actual operation. */
503 uint64_t helper_ieee_input_s(uint64_t val)
504 {
505 if (env->fpcr_dnz) {
506 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
507 if (exp == 0) {
508 val &= 1ull << 63;
509 }
510 }
511 return val;
512 }
513
514 /* F floating (VAX) */
515 static inline uint64_t float32_to_f(float32 fa)
516 {
517 uint64_t r, exp, mant, sig;
518 CPU_FloatU a;
519
520 a.f = fa;
521 sig = ((uint64_t)a.l & 0x80000000) << 32;
522 exp = (a.l >> 23) & 0xff;
523 mant = ((uint64_t)a.l & 0x007fffff) << 29;
524
525 if (exp == 255) {
526 /* NaN or infinity */
527 r = 1; /* VAX dirty zero */
528 } else if (exp == 0) {
529 if (mant == 0) {
530 /* Zero */
531 r = 0;
532 } else {
533 /* Denormalized */
534 r = sig | ((exp + 1) << 52) | mant;
535 }
536 } else {
537 if (exp >= 253) {
538 /* Overflow */
539 r = 1; /* VAX dirty zero */
540 } else {
541 r = sig | ((exp + 2) << 52);
542 }
543 }
544
545 return r;
546 }
547
548 static inline float32 f_to_float32(uint64_t a)
549 {
550 uint32_t exp, mant_sig;
551 CPU_FloatU r;
552
553 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
554 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
555
556 if (unlikely(!exp && mant_sig)) {
557 /* Reserved operands / Dirty zero */
558 dynamic_excp(EXCP_OPCDEC, 0);
559 }
560
561 if (exp < 3) {
562 /* Underflow */
563 r.l = 0;
564 } else {
565 r.l = ((exp - 2) << 23) | mant_sig;
566 }
567
568 return r.f;
569 }
570
571 uint32_t helper_f_to_memory (uint64_t a)
572 {
573 uint32_t r;
574 r = (a & 0x00001fffe0000000ull) >> 13;
575 r |= (a & 0x07ffe00000000000ull) >> 45;
576 r |= (a & 0xc000000000000000ull) >> 48;
577 return r;
578 }
579
580 uint64_t helper_memory_to_f (uint32_t a)
581 {
582 uint64_t r;
583 r = ((uint64_t)(a & 0x0000c000)) << 48;
584 r |= ((uint64_t)(a & 0x003fffff)) << 45;
585 r |= ((uint64_t)(a & 0xffff0000)) << 13;
586 if (!(a & 0x00004000))
587 r |= 0x7ll << 59;
588 return r;
589 }
590
591 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
592 either implement VAX arithmetic properly or just signal invalid opcode. */
593
594 uint64_t helper_addf (uint64_t a, uint64_t b)
595 {
596 float32 fa, fb, fr;
597
598 fa = f_to_float32(a);
599 fb = f_to_float32(b);
600 fr = float32_add(fa, fb, &FP_STATUS);
601 return float32_to_f(fr);
602 }
603
604 uint64_t helper_subf (uint64_t a, uint64_t b)
605 {
606 float32 fa, fb, fr;
607
608 fa = f_to_float32(a);
609 fb = f_to_float32(b);
610 fr = float32_sub(fa, fb, &FP_STATUS);
611 return float32_to_f(fr);
612 }
613
614 uint64_t helper_mulf (uint64_t a, uint64_t b)
615 {
616 float32 fa, fb, fr;
617
618 fa = f_to_float32(a);
619 fb = f_to_float32(b);
620 fr = float32_mul(fa, fb, &FP_STATUS);
621 return float32_to_f(fr);
622 }
623
624 uint64_t helper_divf (uint64_t a, uint64_t b)
625 {
626 float32 fa, fb, fr;
627
628 fa = f_to_float32(a);
629 fb = f_to_float32(b);
630 fr = float32_div(fa, fb, &FP_STATUS);
631 return float32_to_f(fr);
632 }
633
634 uint64_t helper_sqrtf (uint64_t t)
635 {
636 float32 ft, fr;
637
638 ft = f_to_float32(t);
639 fr = float32_sqrt(ft, &FP_STATUS);
640 return float32_to_f(fr);
641 }
642
643
644 /* G floating (VAX) */
645 static inline uint64_t float64_to_g(float64 fa)
646 {
647 uint64_t r, exp, mant, sig;
648 CPU_DoubleU a;
649
650 a.d = fa;
651 sig = a.ll & 0x8000000000000000ull;
652 exp = (a.ll >> 52) & 0x7ff;
653 mant = a.ll & 0x000fffffffffffffull;
654
655 if (exp == 2047) {
656 /* NaN or infinity */
657 r = 1; /* VAX dirty zero */
658 } else if (exp == 0) {
659 if (mant == 0) {
660 /* Zero */
661 r = 0;
662 } else {
663 /* Denormalized */
664 r = sig | ((exp + 1) << 52) | mant;
665 }
666 } else {
667 if (exp >= 2045) {
668 /* Overflow */
669 r = 1; /* VAX dirty zero */
670 } else {
671 r = sig | ((exp + 2) << 52);
672 }
673 }
674
675 return r;
676 }
677
678 static inline float64 g_to_float64(uint64_t a)
679 {
680 uint64_t exp, mant_sig;
681 CPU_DoubleU r;
682
683 exp = (a >> 52) & 0x7ff;
684 mant_sig = a & 0x800fffffffffffffull;
685
686 if (!exp && mant_sig) {
687 /* Reserved operands / Dirty zero */
688 dynamic_excp(EXCP_OPCDEC, 0);
689 }
690
691 if (exp < 3) {
692 /* Underflow */
693 r.ll = 0;
694 } else {
695 r.ll = ((exp - 2) << 52) | mant_sig;
696 }
697
698 return r.d;
699 }
700
701 uint64_t helper_g_to_memory (uint64_t a)
702 {
703 uint64_t r;
704 r = (a & 0x000000000000ffffull) << 48;
705 r |= (a & 0x00000000ffff0000ull) << 16;
706 r |= (a & 0x0000ffff00000000ull) >> 16;
707 r |= (a & 0xffff000000000000ull) >> 48;
708 return r;
709 }
710
711 uint64_t helper_memory_to_g (uint64_t a)
712 {
713 uint64_t r;
714 r = (a & 0x000000000000ffffull) << 48;
715 r |= (a & 0x00000000ffff0000ull) << 16;
716 r |= (a & 0x0000ffff00000000ull) >> 16;
717 r |= (a & 0xffff000000000000ull) >> 48;
718 return r;
719 }
720
721 uint64_t helper_addg (uint64_t a, uint64_t b)
722 {
723 float64 fa, fb, fr;
724
725 fa = g_to_float64(a);
726 fb = g_to_float64(b);
727 fr = float64_add(fa, fb, &FP_STATUS);
728 return float64_to_g(fr);
729 }
730
731 uint64_t helper_subg (uint64_t a, uint64_t b)
732 {
733 float64 fa, fb, fr;
734
735 fa = g_to_float64(a);
736 fb = g_to_float64(b);
737 fr = float64_sub(fa, fb, &FP_STATUS);
738 return float64_to_g(fr);
739 }
740
741 uint64_t helper_mulg (uint64_t a, uint64_t b)
742 {
743 float64 fa, fb, fr;
744
745 fa = g_to_float64(a);
746 fb = g_to_float64(b);
747 fr = float64_mul(fa, fb, &FP_STATUS);
748 return float64_to_g(fr);
749 }
750
751 uint64_t helper_divg (uint64_t a, uint64_t b)
752 {
753 float64 fa, fb, fr;
754
755 fa = g_to_float64(a);
756 fb = g_to_float64(b);
757 fr = float64_div(fa, fb, &FP_STATUS);
758 return float64_to_g(fr);
759 }
760
761 uint64_t helper_sqrtg (uint64_t a)
762 {
763 float64 fa, fr;
764
765 fa = g_to_float64(a);
766 fr = float64_sqrt(fa, &FP_STATUS);
767 return float64_to_g(fr);
768 }
769
770
771 /* S floating (single) */
772
773 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
774 static inline uint64_t float32_to_s_int(uint32_t fi)
775 {
776 uint32_t frac = fi & 0x7fffff;
777 uint32_t sign = fi >> 31;
778 uint32_t exp_msb = (fi >> 30) & 1;
779 uint32_t exp_low = (fi >> 23) & 0x7f;
780 uint32_t exp;
781
782 exp = (exp_msb << 10) | exp_low;
783 if (exp_msb) {
784 if (exp_low == 0x7f)
785 exp = 0x7ff;
786 } else {
787 if (exp_low != 0x00)
788 exp |= 0x380;
789 }
790
791 return (((uint64_t)sign << 63)
792 | ((uint64_t)exp << 52)
793 | ((uint64_t)frac << 29));
794 }
795
796 static inline uint64_t float32_to_s(float32 fa)
797 {
798 CPU_FloatU a;
799 a.f = fa;
800 return float32_to_s_int(a.l);
801 }
802
803 static inline uint32_t s_to_float32_int(uint64_t a)
804 {
805 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
806 }
807
808 static inline float32 s_to_float32(uint64_t a)
809 {
810 CPU_FloatU r;
811 r.l = s_to_float32_int(a);
812 return r.f;
813 }
814
815 uint32_t helper_s_to_memory (uint64_t a)
816 {
817 return s_to_float32_int(a);
818 }
819
820 uint64_t helper_memory_to_s (uint32_t a)
821 {
822 return float32_to_s_int(a);
823 }
824
825 uint64_t helper_adds (uint64_t a, uint64_t b)
826 {
827 float32 fa, fb, fr;
828
829 fa = s_to_float32(a);
830 fb = s_to_float32(b);
831 fr = float32_add(fa, fb, &FP_STATUS);
832 return float32_to_s(fr);
833 }
834
835 uint64_t helper_subs (uint64_t a, uint64_t b)
836 {
837 float32 fa, fb, fr;
838
839 fa = s_to_float32(a);
840 fb = s_to_float32(b);
841 fr = float32_sub(fa, fb, &FP_STATUS);
842 return float32_to_s(fr);
843 }
844
845 uint64_t helper_muls (uint64_t a, uint64_t b)
846 {
847 float32 fa, fb, fr;
848
849 fa = s_to_float32(a);
850 fb = s_to_float32(b);
851 fr = float32_mul(fa, fb, &FP_STATUS);
852 return float32_to_s(fr);
853 }
854
855 uint64_t helper_divs (uint64_t a, uint64_t b)
856 {
857 float32 fa, fb, fr;
858
859 fa = s_to_float32(a);
860 fb = s_to_float32(b);
861 fr = float32_div(fa, fb, &FP_STATUS);
862 return float32_to_s(fr);
863 }
864
865 uint64_t helper_sqrts (uint64_t a)
866 {
867 float32 fa, fr;
868
869 fa = s_to_float32(a);
870 fr = float32_sqrt(fa, &FP_STATUS);
871 return float32_to_s(fr);
872 }
873
874
875 /* T floating (double) */
876 static inline float64 t_to_float64(uint64_t a)
877 {
878 /* Memory format is the same as float64 */
879 CPU_DoubleU r;
880 r.ll = a;
881 return r.d;
882 }
883
884 static inline uint64_t float64_to_t(float64 fa)
885 {
886 /* Memory format is the same as float64 */
887 CPU_DoubleU r;
888 r.d = fa;
889 return r.ll;
890 }
891
892 uint64_t helper_addt (uint64_t a, uint64_t b)
893 {
894 float64 fa, fb, fr;
895
896 fa = t_to_float64(a);
897 fb = t_to_float64(b);
898 fr = float64_add(fa, fb, &FP_STATUS);
899 return float64_to_t(fr);
900 }
901
902 uint64_t helper_subt (uint64_t a, uint64_t b)
903 {
904 float64 fa, fb, fr;
905
906 fa = t_to_float64(a);
907 fb = t_to_float64(b);
908 fr = float64_sub(fa, fb, &FP_STATUS);
909 return float64_to_t(fr);
910 }
911
912 uint64_t helper_mult (uint64_t a, uint64_t b)
913 {
914 float64 fa, fb, fr;
915
916 fa = t_to_float64(a);
917 fb = t_to_float64(b);
918 fr = float64_mul(fa, fb, &FP_STATUS);
919 return float64_to_t(fr);
920 }
921
922 uint64_t helper_divt (uint64_t a, uint64_t b)
923 {
924 float64 fa, fb, fr;
925
926 fa = t_to_float64(a);
927 fb = t_to_float64(b);
928 fr = float64_div(fa, fb, &FP_STATUS);
929 return float64_to_t(fr);
930 }
931
932 uint64_t helper_sqrtt (uint64_t a)
933 {
934 float64 fa, fr;
935
936 fa = t_to_float64(a);
937 fr = float64_sqrt(fa, &FP_STATUS);
938 return float64_to_t(fr);
939 }
940
941 /* Comparisons */
942 uint64_t helper_cmptun (uint64_t a, uint64_t b)
943 {
944 float64 fa, fb;
945
946 fa = t_to_float64(a);
947 fb = t_to_float64(b);
948
949 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
950 return 0x4000000000000000ULL;
951 } else {
952 return 0;
953 }
954 }
955
956 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
957 {
958 float64 fa, fb;
959
960 fa = t_to_float64(a);
961 fb = t_to_float64(b);
962
963 if (float64_eq_quiet(fa, fb, &FP_STATUS))
964 return 0x4000000000000000ULL;
965 else
966 return 0;
967 }
968
969 uint64_t helper_cmptle(uint64_t a, uint64_t b)
970 {
971 float64 fa, fb;
972
973 fa = t_to_float64(a);
974 fb = t_to_float64(b);
975
976 if (float64_le(fa, fb, &FP_STATUS))
977 return 0x4000000000000000ULL;
978 else
979 return 0;
980 }
981
982 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
983 {
984 float64 fa, fb;
985
986 fa = t_to_float64(a);
987 fb = t_to_float64(b);
988
989 if (float64_lt(fa, fb, &FP_STATUS))
990 return 0x4000000000000000ULL;
991 else
992 return 0;
993 }
994
995 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
996 {
997 float64 fa, fb;
998
999 fa = g_to_float64(a);
1000 fb = g_to_float64(b);
1001
1002 if (float64_eq_quiet(fa, fb, &FP_STATUS))
1003 return 0x4000000000000000ULL;
1004 else
1005 return 0;
1006 }
1007
1008 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
1009 {
1010 float64 fa, fb;
1011
1012 fa = g_to_float64(a);
1013 fb = g_to_float64(b);
1014
1015 if (float64_le(fa, fb, &FP_STATUS))
1016 return 0x4000000000000000ULL;
1017 else
1018 return 0;
1019 }
1020
1021 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
1022 {
1023 float64 fa, fb;
1024
1025 fa = g_to_float64(a);
1026 fb = g_to_float64(b);
1027
1028 if (float64_lt(fa, fb, &FP_STATUS))
1029 return 0x4000000000000000ULL;
1030 else
1031 return 0;
1032 }
1033
1034 /* Floating point format conversion */
1035 uint64_t helper_cvtts (uint64_t a)
1036 {
1037 float64 fa;
1038 float32 fr;
1039
1040 fa = t_to_float64(a);
1041 fr = float64_to_float32(fa, &FP_STATUS);
1042 return float32_to_s(fr);
1043 }
1044
1045 uint64_t helper_cvtst (uint64_t a)
1046 {
1047 float32 fa;
1048 float64 fr;
1049
1050 fa = s_to_float32(a);
1051 fr = float32_to_float64(fa, &FP_STATUS);
1052 return float64_to_t(fr);
1053 }
1054
1055 uint64_t helper_cvtqs (uint64_t a)
1056 {
1057 float32 fr = int64_to_float32(a, &FP_STATUS);
1058 return float32_to_s(fr);
1059 }
1060
1061 /* Implement float64 to uint64 conversion without saturation -- we must
1062 supply the truncated result. This behaviour is used by the compiler
1063 to get unsigned conversion for free with the same instruction.
1064
1065 The VI flag is set when overflow or inexact exceptions should be raised. */
1066
1067 static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1068 {
1069 uint64_t frac, ret = 0;
1070 uint32_t exp, sign, exc = 0;
1071 int shift;
1072
1073 sign = (a >> 63);
1074 exp = (uint32_t)(a >> 52) & 0x7ff;
1075 frac = a & 0xfffffffffffffull;
1076
1077 if (exp == 0) {
1078 if (unlikely(frac != 0)) {
1079 goto do_underflow;
1080 }
1081 } else if (exp == 0x7ff) {
1082 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1083 } else {
1084 /* Restore implicit bit. */
1085 frac |= 0x10000000000000ull;
1086
1087 shift = exp - 1023 - 52;
1088 if (shift >= 0) {
1089 /* In this case the number is so large that we must shift
1090 the fraction left. There is no rounding to do. */
1091 if (shift < 63) {
1092 ret = frac << shift;
1093 if (VI && (ret >> shift) != frac) {
1094 exc = float_flag_overflow;
1095 }
1096 }
1097 } else {
1098 uint64_t round;
1099
1100 /* In this case the number is smaller than the fraction as
1101 represented by the 52 bit number. Here we must think
1102 about rounding the result. Handle this by shifting the
1103 fractional part of the number into the high bits of ROUND.
1104 This will let us efficiently handle round-to-nearest. */
1105 shift = -shift;
1106 if (shift < 63) {
1107 ret = frac >> shift;
1108 round = frac << (64 - shift);
1109 } else {
1110 /* The exponent is so small we shift out everything.
1111 Leave a sticky bit for proper rounding below. */
1112 do_underflow:
1113 round = 1;
1114 }
1115
1116 if (round) {
1117 exc = (VI ? float_flag_inexact : 0);
1118 switch (roundmode) {
1119 case float_round_nearest_even:
1120 if (round == (1ull << 63)) {
1121 /* Fraction is exactly 0.5; round to even. */
1122 ret += (ret & 1);
1123 } else if (round > (1ull << 63)) {
1124 ret += 1;
1125 }
1126 break;
1127 case float_round_to_zero:
1128 break;
1129 case float_round_up:
1130 ret += 1 - sign;
1131 break;
1132 case float_round_down:
1133 ret += sign;
1134 break;
1135 }
1136 }
1137 }
1138 if (sign) {
1139 ret = -ret;
1140 }
1141 }
1142 if (unlikely(exc)) {
1143 float_raise(exc, &FP_STATUS);
1144 }
1145
1146 return ret;
1147 }
1148
1149 uint64_t helper_cvttq(uint64_t a)
1150 {
1151 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1152 }
1153
1154 uint64_t helper_cvttq_c(uint64_t a)
1155 {
1156 return helper_cvttq_internal(a, float_round_to_zero, 0);
1157 }
1158
1159 uint64_t helper_cvttq_svic(uint64_t a)
1160 {
1161 return helper_cvttq_internal(a, float_round_to_zero, 1);
1162 }
1163
1164 uint64_t helper_cvtqt (uint64_t a)
1165 {
1166 float64 fr = int64_to_float64(a, &FP_STATUS);
1167 return float64_to_t(fr);
1168 }
1169
1170 uint64_t helper_cvtqf (uint64_t a)
1171 {
1172 float32 fr = int64_to_float32(a, &FP_STATUS);
1173 return float32_to_f(fr);
1174 }
1175
1176 uint64_t helper_cvtgf (uint64_t a)
1177 {
1178 float64 fa;
1179 float32 fr;
1180
1181 fa = g_to_float64(a);
1182 fr = float64_to_float32(fa, &FP_STATUS);
1183 return float32_to_f(fr);
1184 }
1185
1186 uint64_t helper_cvtgq (uint64_t a)
1187 {
1188 float64 fa = g_to_float64(a);
1189 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1190 }
1191
1192 uint64_t helper_cvtqg (uint64_t a)
1193 {
1194 float64 fr;
1195 fr = int64_to_float64(a, &FP_STATUS);
1196 return float64_to_g(fr);
1197 }
1198
1199 /* PALcode support special instructions */
1200 #if !defined (CONFIG_USER_ONLY)
1201 void helper_hw_ret (uint64_t a)
1202 {
1203 env->pc = a & ~3;
1204 env->intr_flag = 0;
1205 env->lock_addr = -1;
1206 if ((a & 1) == 0) {
1207 env->pal_mode = 0;
1208 swap_shadow_regs(env);
1209 }
1210 }
1211
1212 void helper_tbia(void)
1213 {
1214 tlb_flush(env, 1);
1215 }
1216
1217 void helper_tbis(uint64_t p)
1218 {
1219 tlb_flush_page(env, p);
1220 }
1221 #endif
1222
1223 /*****************************************************************************/
1224 /* Softmmu support */
1225 #if !defined (CONFIG_USER_ONLY)
1226 uint64_t helper_ldl_phys(uint64_t p)
1227 {
1228 return (int32_t)ldl_phys(p);
1229 }
1230
1231 uint64_t helper_ldq_phys(uint64_t p)
1232 {
1233 return ldq_phys(p);
1234 }
1235
1236 uint64_t helper_ldl_l_phys(uint64_t p)
1237 {
1238 env->lock_addr = p;
1239 return env->lock_value = (int32_t)ldl_phys(p);
1240 }
1241
1242 uint64_t helper_ldq_l_phys(uint64_t p)
1243 {
1244 env->lock_addr = p;
1245 return env->lock_value = ldl_phys(p);
1246 }
1247
1248 void helper_stl_phys(uint64_t p, uint64_t v)
1249 {
1250 stl_phys(p, v);
1251 }
1252
1253 void helper_stq_phys(uint64_t p, uint64_t v)
1254 {
1255 stq_phys(p, v);
1256 }
1257
1258 uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
1259 {
1260 uint64_t ret = 0;
1261
1262 if (p == env->lock_addr) {
1263 int32_t old = ldl_phys(p);
1264 if (old == (int32_t)env->lock_value) {
1265 stl_phys(p, v);
1266 ret = 1;
1267 }
1268 }
1269 env->lock_addr = -1;
1270
1271 return ret;
1272 }
1273
1274 uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
1275 {
1276 uint64_t ret = 0;
1277
1278 if (p == env->lock_addr) {
1279 uint64_t old = ldq_phys(p);
1280 if (old == env->lock_value) {
1281 stq_phys(p, v);
1282 ret = 1;
1283 }
1284 }
1285 env->lock_addr = -1;
1286
1287 return ret;
1288 }
1289
1290 static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
1291 int is_user, void *retaddr)
1292 {
1293 uint64_t pc;
1294 uint32_t insn;
1295
1296 do_restore_state(retaddr);
1297
1298 pc = env->pc;
1299 insn = ldl_code(pc);
1300
1301 env->trap_arg0 = addr;
1302 env->trap_arg1 = insn >> 26; /* opcode */
1303 env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
1304 helper_excp(EXCP_UNALIGN, 0);
1305 }
1306
1307 void QEMU_NORETURN cpu_unassigned_access(CPUState *env1,
1308 target_phys_addr_t addr, int is_write,
1309 int is_exec, int unused, int size)
1310 {
1311 env = env1;
1312 env->trap_arg0 = addr;
1313 env->trap_arg1 = is_write;
1314 dynamic_excp(EXCP_MCHK, 0);
1315 }
1316
1317 #include "softmmu_exec.h"
1318
1319 #define MMUSUFFIX _mmu
1320 #define ALIGNED_ONLY
1321
1322 #define SHIFT 0
1323 #include "softmmu_template.h"
1324
1325 #define SHIFT 1
1326 #include "softmmu_template.h"
1327
1328 #define SHIFT 2
1329 #include "softmmu_template.h"
1330
1331 #define SHIFT 3
1332 #include "softmmu_template.h"
1333
1334 /* try to fill the TLB and return an exception if error. If retaddr is
1335 NULL, it means that the function was called in C code (i.e. not
1336 from generated code or from helper.c) */
1337 /* XXX: fix it to restore all registers */
1338 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1339 {
1340 CPUState *saved_env;
1341 int ret;
1342
1343 /* XXX: hack to restore env in all cases, even if not called from
1344 generated code */
1345 saved_env = env;
1346 env = cpu_single_env;
1347 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx);
1348 if (unlikely(ret != 0)) {
1349 do_restore_state(retaddr);
1350 /* Exception index and error code are already set */
1351 cpu_loop_exit(env);
1352 }
1353 env = saved_env;
1354 }
1355 #endif