]> git.proxmox.com Git - qemu.git/blob - target-alpha/op_helper.c
lm32: softusb: claim to support full speed
[qemu.git] / target-alpha / op_helper.c
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "softfloat.h"
23 #include "helper.h"
24 #include "qemu-timer.h"
25
26 /*****************************************************************************/
27 /* Exceptions processing helpers */
28
29 /* This should only be called from translate, via gen_excp.
30 We expect that ENV->PC has already been updated. */
31 void QEMU_NORETURN helper_excp(int excp, int error)
32 {
33 env->exception_index = excp;
34 env->error_code = error;
35 cpu_loop_exit(env);
36 }
37
38 static void do_restore_state(void *retaddr)
39 {
40 unsigned long pc = (unsigned long)retaddr;
41
42 if (pc) {
43 TranslationBlock *tb = tb_find_pc(pc);
44 if (tb) {
45 cpu_restore_state(tb, env, pc);
46 }
47 }
48 }
49
50 /* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
51 static void QEMU_NORETURN dynamic_excp(int excp, int error)
52 {
53 env->exception_index = excp;
54 env->error_code = error;
55 do_restore_state(GETPC());
56 cpu_loop_exit(env);
57 }
58
59 static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
60 {
61 env->trap_arg0 = exc;
62 env->trap_arg1 = mask;
63 dynamic_excp(EXCP_ARITH, 0);
64 }
65
66 uint64_t helper_load_pcc (void)
67 {
68 #ifndef CONFIG_USER_ONLY
69 /* In system mode we have access to a decent high-resolution clock.
70 In order to make OS-level time accounting work with the RPCC,
71 present it with a well-timed clock fixed at 250MHz. */
72 return (((uint64_t)env->pcc_ofs << 32)
73 | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
74 #else
75 /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
76 clock ticks. Also, don't bother taking PCC_OFS into account. */
77 return (uint32_t)cpu_get_real_ticks();
78 #endif
79 }
80
81 uint64_t helper_load_fpcr (void)
82 {
83 return cpu_alpha_load_fpcr (env);
84 }
85
86 void helper_store_fpcr (uint64_t val)
87 {
88 cpu_alpha_store_fpcr (env, val);
89 }
90
91 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
92 {
93 uint64_t tmp = op1;
94 op1 += op2;
95 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
96 arith_excp(EXC_M_IOV, 0);
97 }
98 return op1;
99 }
100
101 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
102 {
103 uint64_t tmp = op1;
104 op1 = (uint32_t)(op1 + op2);
105 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
106 arith_excp(EXC_M_IOV, 0);
107 }
108 return op1;
109 }
110
111 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
112 {
113 uint64_t res;
114 res = op1 - op2;
115 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
116 arith_excp(EXC_M_IOV, 0);
117 }
118 return res;
119 }
120
121 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
122 {
123 uint32_t res;
124 res = op1 - op2;
125 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
126 arith_excp(EXC_M_IOV, 0);
127 }
128 return res;
129 }
130
131 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
132 {
133 int64_t res = (int64_t)op1 * (int64_t)op2;
134
135 if (unlikely((int32_t)res != res)) {
136 arith_excp(EXC_M_IOV, 0);
137 }
138 return (int64_t)((int32_t)res);
139 }
140
141 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
142 {
143 uint64_t tl, th;
144
145 muls64(&tl, &th, op1, op2);
146 /* If th != 0 && th != -1, then we had an overflow */
147 if (unlikely((th + 1) > 1)) {
148 arith_excp(EXC_M_IOV, 0);
149 }
150 return tl;
151 }
152
153 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
154 {
155 uint64_t tl, th;
156
157 mulu64(&tl, &th, op1, op2);
158 return th;
159 }
160
161 uint64_t helper_ctpop (uint64_t arg)
162 {
163 return ctpop64(arg);
164 }
165
166 uint64_t helper_ctlz (uint64_t arg)
167 {
168 return clz64(arg);
169 }
170
171 uint64_t helper_cttz (uint64_t arg)
172 {
173 return ctz64(arg);
174 }
175
176 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
177 {
178 uint64_t mask;
179
180 mask = 0;
181 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
182 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
183 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
184 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
185 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
186 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
187 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
188 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
189
190 return op & ~mask;
191 }
192
193 uint64_t helper_zap(uint64_t val, uint64_t mask)
194 {
195 return byte_zap(val, mask);
196 }
197
198 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
199 {
200 return byte_zap(val, ~mask);
201 }
202
203 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
204 {
205 uint8_t opa, opb, res;
206 int i;
207
208 res = 0;
209 for (i = 0; i < 8; i++) {
210 opa = op1 >> (i * 8);
211 opb = op2 >> (i * 8);
212 if (opa >= opb)
213 res |= 1 << i;
214 }
215 return res;
216 }
217
218 uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
219 {
220 uint64_t res = 0;
221 uint8_t opa, opb, opr;
222 int i;
223
224 for (i = 0; i < 8; ++i) {
225 opa = op1 >> (i * 8);
226 opb = op2 >> (i * 8);
227 opr = opa < opb ? opa : opb;
228 res |= (uint64_t)opr << (i * 8);
229 }
230 return res;
231 }
232
233 uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
234 {
235 uint64_t res = 0;
236 int8_t opa, opb;
237 uint8_t opr;
238 int i;
239
240 for (i = 0; i < 8; ++i) {
241 opa = op1 >> (i * 8);
242 opb = op2 >> (i * 8);
243 opr = opa < opb ? opa : opb;
244 res |= (uint64_t)opr << (i * 8);
245 }
246 return res;
247 }
248
249 uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
250 {
251 uint64_t res = 0;
252 uint16_t opa, opb, opr;
253 int i;
254
255 for (i = 0; i < 4; ++i) {
256 opa = op1 >> (i * 16);
257 opb = op2 >> (i * 16);
258 opr = opa < opb ? opa : opb;
259 res |= (uint64_t)opr << (i * 16);
260 }
261 return res;
262 }
263
264 uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
265 {
266 uint64_t res = 0;
267 int16_t opa, opb;
268 uint16_t opr;
269 int i;
270
271 for (i = 0; i < 4; ++i) {
272 opa = op1 >> (i * 16);
273 opb = op2 >> (i * 16);
274 opr = opa < opb ? opa : opb;
275 res |= (uint64_t)opr << (i * 16);
276 }
277 return res;
278 }
279
280 uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
281 {
282 uint64_t res = 0;
283 uint8_t opa, opb, opr;
284 int i;
285
286 for (i = 0; i < 8; ++i) {
287 opa = op1 >> (i * 8);
288 opb = op2 >> (i * 8);
289 opr = opa > opb ? opa : opb;
290 res |= (uint64_t)opr << (i * 8);
291 }
292 return res;
293 }
294
295 uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
296 {
297 uint64_t res = 0;
298 int8_t opa, opb;
299 uint8_t opr;
300 int i;
301
302 for (i = 0; i < 8; ++i) {
303 opa = op1 >> (i * 8);
304 opb = op2 >> (i * 8);
305 opr = opa > opb ? opa : opb;
306 res |= (uint64_t)opr << (i * 8);
307 }
308 return res;
309 }
310
311 uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
312 {
313 uint64_t res = 0;
314 uint16_t opa, opb, opr;
315 int i;
316
317 for (i = 0; i < 4; ++i) {
318 opa = op1 >> (i * 16);
319 opb = op2 >> (i * 16);
320 opr = opa > opb ? opa : opb;
321 res |= (uint64_t)opr << (i * 16);
322 }
323 return res;
324 }
325
326 uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
327 {
328 uint64_t res = 0;
329 int16_t opa, opb;
330 uint16_t opr;
331 int i;
332
333 for (i = 0; i < 4; ++i) {
334 opa = op1 >> (i * 16);
335 opb = op2 >> (i * 16);
336 opr = opa > opb ? opa : opb;
337 res |= (uint64_t)opr << (i * 16);
338 }
339 return res;
340 }
341
342 uint64_t helper_perr (uint64_t op1, uint64_t op2)
343 {
344 uint64_t res = 0;
345 uint8_t opa, opb, opr;
346 int i;
347
348 for (i = 0; i < 8; ++i) {
349 opa = op1 >> (i * 8);
350 opb = op2 >> (i * 8);
351 if (opa >= opb)
352 opr = opa - opb;
353 else
354 opr = opb - opa;
355 res += opr;
356 }
357 return res;
358 }
359
360 uint64_t helper_pklb (uint64_t op1)
361 {
362 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
363 }
364
365 uint64_t helper_pkwb (uint64_t op1)
366 {
367 return ((op1 & 0xff)
368 | ((op1 >> 8) & 0xff00)
369 | ((op1 >> 16) & 0xff0000)
370 | ((op1 >> 24) & 0xff000000));
371 }
372
373 uint64_t helper_unpkbl (uint64_t op1)
374 {
375 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
376 }
377
378 uint64_t helper_unpkbw (uint64_t op1)
379 {
380 return ((op1 & 0xff)
381 | ((op1 & 0xff00) << 8)
382 | ((op1 & 0xff0000) << 16)
383 | ((op1 & 0xff000000) << 24));
384 }
385
386 /* Floating point helpers */
387
388 void helper_setroundmode (uint32_t val)
389 {
390 set_float_rounding_mode(val, &FP_STATUS);
391 }
392
393 void helper_setflushzero (uint32_t val)
394 {
395 set_flush_to_zero(val, &FP_STATUS);
396 }
397
398 void helper_fp_exc_clear (void)
399 {
400 set_float_exception_flags(0, &FP_STATUS);
401 }
402
403 uint32_t helper_fp_exc_get (void)
404 {
405 return get_float_exception_flags(&FP_STATUS);
406 }
407
408 /* Raise exceptions for ieee fp insns without software completion.
409 In that case there are no exceptions that don't trap; the mask
410 doesn't apply. */
411 void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
412 {
413 if (exc) {
414 uint32_t hw_exc = 0;
415
416 if (exc & float_flag_invalid) {
417 hw_exc |= EXC_M_INV;
418 }
419 if (exc & float_flag_divbyzero) {
420 hw_exc |= EXC_M_DZE;
421 }
422 if (exc & float_flag_overflow) {
423 hw_exc |= EXC_M_FOV;
424 }
425 if (exc & float_flag_underflow) {
426 hw_exc |= EXC_M_UNF;
427 }
428 if (exc & float_flag_inexact) {
429 hw_exc |= EXC_M_INE;
430 }
431
432 arith_excp(hw_exc, 1ull << regno);
433 }
434 }
435
436 /* Raise exceptions for ieee fp insns with software completion. */
437 void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
438 {
439 if (exc) {
440 env->fpcr_exc_status |= exc;
441
442 exc &= ~env->fpcr_exc_mask;
443 if (exc) {
444 helper_fp_exc_raise(exc, regno);
445 }
446 }
447 }
448
449 /* Input remapping without software completion. Handle denormal-map-to-zero
450 and trap for all other non-finite numbers. */
451 uint64_t helper_ieee_input(uint64_t val)
452 {
453 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
454 uint64_t frac = val & 0xfffffffffffffull;
455
456 if (exp == 0) {
457 if (frac != 0) {
458 /* If DNZ is set flush denormals to zero on input. */
459 if (env->fpcr_dnz) {
460 val &= 1ull << 63;
461 } else {
462 arith_excp(EXC_M_UNF, 0);
463 }
464 }
465 } else if (exp == 0x7ff) {
466 /* Infinity or NaN. */
467 /* ??? I'm not sure these exception bit flags are correct. I do
468 know that the Linux kernel, at least, doesn't rely on them and
469 just emulates the insn to figure out what exception to use. */
470 arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
471 }
472 return val;
473 }
474
475 /* Similar, but does not trap for infinities. Used for comparisons. */
476 uint64_t helper_ieee_input_cmp(uint64_t val)
477 {
478 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
479 uint64_t frac = val & 0xfffffffffffffull;
480
481 if (exp == 0) {
482 if (frac != 0) {
483 /* If DNZ is set flush denormals to zero on input. */
484 if (env->fpcr_dnz) {
485 val &= 1ull << 63;
486 } else {
487 arith_excp(EXC_M_UNF, 0);
488 }
489 }
490 } else if (exp == 0x7ff && frac) {
491 /* NaN. */
492 arith_excp(EXC_M_INV, 0);
493 }
494 return val;
495 }
496
497 /* Input remapping with software completion enabled. All we have to do
498 is handle denormal-map-to-zero; all other inputs get exceptions as
499 needed from the actual operation. */
500 uint64_t helper_ieee_input_s(uint64_t val)
501 {
502 if (env->fpcr_dnz) {
503 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
504 if (exp == 0) {
505 val &= 1ull << 63;
506 }
507 }
508 return val;
509 }
510
511 /* F floating (VAX) */
512 static inline uint64_t float32_to_f(float32 fa)
513 {
514 uint64_t r, exp, mant, sig;
515 CPU_FloatU a;
516
517 a.f = fa;
518 sig = ((uint64_t)a.l & 0x80000000) << 32;
519 exp = (a.l >> 23) & 0xff;
520 mant = ((uint64_t)a.l & 0x007fffff) << 29;
521
522 if (exp == 255) {
523 /* NaN or infinity */
524 r = 1; /* VAX dirty zero */
525 } else if (exp == 0) {
526 if (mant == 0) {
527 /* Zero */
528 r = 0;
529 } else {
530 /* Denormalized */
531 r = sig | ((exp + 1) << 52) | mant;
532 }
533 } else {
534 if (exp >= 253) {
535 /* Overflow */
536 r = 1; /* VAX dirty zero */
537 } else {
538 r = sig | ((exp + 2) << 52);
539 }
540 }
541
542 return r;
543 }
544
545 static inline float32 f_to_float32(uint64_t a)
546 {
547 uint32_t exp, mant_sig;
548 CPU_FloatU r;
549
550 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
551 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
552
553 if (unlikely(!exp && mant_sig)) {
554 /* Reserved operands / Dirty zero */
555 dynamic_excp(EXCP_OPCDEC, 0);
556 }
557
558 if (exp < 3) {
559 /* Underflow */
560 r.l = 0;
561 } else {
562 r.l = ((exp - 2) << 23) | mant_sig;
563 }
564
565 return r.f;
566 }
567
568 uint32_t helper_f_to_memory (uint64_t a)
569 {
570 uint32_t r;
571 r = (a & 0x00001fffe0000000ull) >> 13;
572 r |= (a & 0x07ffe00000000000ull) >> 45;
573 r |= (a & 0xc000000000000000ull) >> 48;
574 return r;
575 }
576
577 uint64_t helper_memory_to_f (uint32_t a)
578 {
579 uint64_t r;
580 r = ((uint64_t)(a & 0x0000c000)) << 48;
581 r |= ((uint64_t)(a & 0x003fffff)) << 45;
582 r |= ((uint64_t)(a & 0xffff0000)) << 13;
583 if (!(a & 0x00004000))
584 r |= 0x7ll << 59;
585 return r;
586 }
587
588 /* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
589 either implement VAX arithmetic properly or just signal invalid opcode. */
590
591 uint64_t helper_addf (uint64_t a, uint64_t b)
592 {
593 float32 fa, fb, fr;
594
595 fa = f_to_float32(a);
596 fb = f_to_float32(b);
597 fr = float32_add(fa, fb, &FP_STATUS);
598 return float32_to_f(fr);
599 }
600
601 uint64_t helper_subf (uint64_t a, uint64_t b)
602 {
603 float32 fa, fb, fr;
604
605 fa = f_to_float32(a);
606 fb = f_to_float32(b);
607 fr = float32_sub(fa, fb, &FP_STATUS);
608 return float32_to_f(fr);
609 }
610
611 uint64_t helper_mulf (uint64_t a, uint64_t b)
612 {
613 float32 fa, fb, fr;
614
615 fa = f_to_float32(a);
616 fb = f_to_float32(b);
617 fr = float32_mul(fa, fb, &FP_STATUS);
618 return float32_to_f(fr);
619 }
620
621 uint64_t helper_divf (uint64_t a, uint64_t b)
622 {
623 float32 fa, fb, fr;
624
625 fa = f_to_float32(a);
626 fb = f_to_float32(b);
627 fr = float32_div(fa, fb, &FP_STATUS);
628 return float32_to_f(fr);
629 }
630
631 uint64_t helper_sqrtf (uint64_t t)
632 {
633 float32 ft, fr;
634
635 ft = f_to_float32(t);
636 fr = float32_sqrt(ft, &FP_STATUS);
637 return float32_to_f(fr);
638 }
639
640
641 /* G floating (VAX) */
642 static inline uint64_t float64_to_g(float64 fa)
643 {
644 uint64_t r, exp, mant, sig;
645 CPU_DoubleU a;
646
647 a.d = fa;
648 sig = a.ll & 0x8000000000000000ull;
649 exp = (a.ll >> 52) & 0x7ff;
650 mant = a.ll & 0x000fffffffffffffull;
651
652 if (exp == 2047) {
653 /* NaN or infinity */
654 r = 1; /* VAX dirty zero */
655 } else if (exp == 0) {
656 if (mant == 0) {
657 /* Zero */
658 r = 0;
659 } else {
660 /* Denormalized */
661 r = sig | ((exp + 1) << 52) | mant;
662 }
663 } else {
664 if (exp >= 2045) {
665 /* Overflow */
666 r = 1; /* VAX dirty zero */
667 } else {
668 r = sig | ((exp + 2) << 52);
669 }
670 }
671
672 return r;
673 }
674
675 static inline float64 g_to_float64(uint64_t a)
676 {
677 uint64_t exp, mant_sig;
678 CPU_DoubleU r;
679
680 exp = (a >> 52) & 0x7ff;
681 mant_sig = a & 0x800fffffffffffffull;
682
683 if (!exp && mant_sig) {
684 /* Reserved operands / Dirty zero */
685 dynamic_excp(EXCP_OPCDEC, 0);
686 }
687
688 if (exp < 3) {
689 /* Underflow */
690 r.ll = 0;
691 } else {
692 r.ll = ((exp - 2) << 52) | mant_sig;
693 }
694
695 return r.d;
696 }
697
698 uint64_t helper_g_to_memory (uint64_t a)
699 {
700 uint64_t r;
701 r = (a & 0x000000000000ffffull) << 48;
702 r |= (a & 0x00000000ffff0000ull) << 16;
703 r |= (a & 0x0000ffff00000000ull) >> 16;
704 r |= (a & 0xffff000000000000ull) >> 48;
705 return r;
706 }
707
708 uint64_t helper_memory_to_g (uint64_t a)
709 {
710 uint64_t r;
711 r = (a & 0x000000000000ffffull) << 48;
712 r |= (a & 0x00000000ffff0000ull) << 16;
713 r |= (a & 0x0000ffff00000000ull) >> 16;
714 r |= (a & 0xffff000000000000ull) >> 48;
715 return r;
716 }
717
718 uint64_t helper_addg (uint64_t a, uint64_t b)
719 {
720 float64 fa, fb, fr;
721
722 fa = g_to_float64(a);
723 fb = g_to_float64(b);
724 fr = float64_add(fa, fb, &FP_STATUS);
725 return float64_to_g(fr);
726 }
727
728 uint64_t helper_subg (uint64_t a, uint64_t b)
729 {
730 float64 fa, fb, fr;
731
732 fa = g_to_float64(a);
733 fb = g_to_float64(b);
734 fr = float64_sub(fa, fb, &FP_STATUS);
735 return float64_to_g(fr);
736 }
737
738 uint64_t helper_mulg (uint64_t a, uint64_t b)
739 {
740 float64 fa, fb, fr;
741
742 fa = g_to_float64(a);
743 fb = g_to_float64(b);
744 fr = float64_mul(fa, fb, &FP_STATUS);
745 return float64_to_g(fr);
746 }
747
748 uint64_t helper_divg (uint64_t a, uint64_t b)
749 {
750 float64 fa, fb, fr;
751
752 fa = g_to_float64(a);
753 fb = g_to_float64(b);
754 fr = float64_div(fa, fb, &FP_STATUS);
755 return float64_to_g(fr);
756 }
757
758 uint64_t helper_sqrtg (uint64_t a)
759 {
760 float64 fa, fr;
761
762 fa = g_to_float64(a);
763 fr = float64_sqrt(fa, &FP_STATUS);
764 return float64_to_g(fr);
765 }
766
767
768 /* S floating (single) */
769
770 /* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
771 static inline uint64_t float32_to_s_int(uint32_t fi)
772 {
773 uint32_t frac = fi & 0x7fffff;
774 uint32_t sign = fi >> 31;
775 uint32_t exp_msb = (fi >> 30) & 1;
776 uint32_t exp_low = (fi >> 23) & 0x7f;
777 uint32_t exp;
778
779 exp = (exp_msb << 10) | exp_low;
780 if (exp_msb) {
781 if (exp_low == 0x7f)
782 exp = 0x7ff;
783 } else {
784 if (exp_low != 0x00)
785 exp |= 0x380;
786 }
787
788 return (((uint64_t)sign << 63)
789 | ((uint64_t)exp << 52)
790 | ((uint64_t)frac << 29));
791 }
792
793 static inline uint64_t float32_to_s(float32 fa)
794 {
795 CPU_FloatU a;
796 a.f = fa;
797 return float32_to_s_int(a.l);
798 }
799
800 static inline uint32_t s_to_float32_int(uint64_t a)
801 {
802 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
803 }
804
805 static inline float32 s_to_float32(uint64_t a)
806 {
807 CPU_FloatU r;
808 r.l = s_to_float32_int(a);
809 return r.f;
810 }
811
812 uint32_t helper_s_to_memory (uint64_t a)
813 {
814 return s_to_float32_int(a);
815 }
816
817 uint64_t helper_memory_to_s (uint32_t a)
818 {
819 return float32_to_s_int(a);
820 }
821
822 uint64_t helper_adds (uint64_t a, uint64_t b)
823 {
824 float32 fa, fb, fr;
825
826 fa = s_to_float32(a);
827 fb = s_to_float32(b);
828 fr = float32_add(fa, fb, &FP_STATUS);
829 return float32_to_s(fr);
830 }
831
832 uint64_t helper_subs (uint64_t a, uint64_t b)
833 {
834 float32 fa, fb, fr;
835
836 fa = s_to_float32(a);
837 fb = s_to_float32(b);
838 fr = float32_sub(fa, fb, &FP_STATUS);
839 return float32_to_s(fr);
840 }
841
842 uint64_t helper_muls (uint64_t a, uint64_t b)
843 {
844 float32 fa, fb, fr;
845
846 fa = s_to_float32(a);
847 fb = s_to_float32(b);
848 fr = float32_mul(fa, fb, &FP_STATUS);
849 return float32_to_s(fr);
850 }
851
852 uint64_t helper_divs (uint64_t a, uint64_t b)
853 {
854 float32 fa, fb, fr;
855
856 fa = s_to_float32(a);
857 fb = s_to_float32(b);
858 fr = float32_div(fa, fb, &FP_STATUS);
859 return float32_to_s(fr);
860 }
861
862 uint64_t helper_sqrts (uint64_t a)
863 {
864 float32 fa, fr;
865
866 fa = s_to_float32(a);
867 fr = float32_sqrt(fa, &FP_STATUS);
868 return float32_to_s(fr);
869 }
870
871
872 /* T floating (double) */
873 static inline float64 t_to_float64(uint64_t a)
874 {
875 /* Memory format is the same as float64 */
876 CPU_DoubleU r;
877 r.ll = a;
878 return r.d;
879 }
880
881 static inline uint64_t float64_to_t(float64 fa)
882 {
883 /* Memory format is the same as float64 */
884 CPU_DoubleU r;
885 r.d = fa;
886 return r.ll;
887 }
888
889 uint64_t helper_addt (uint64_t a, uint64_t b)
890 {
891 float64 fa, fb, fr;
892
893 fa = t_to_float64(a);
894 fb = t_to_float64(b);
895 fr = float64_add(fa, fb, &FP_STATUS);
896 return float64_to_t(fr);
897 }
898
899 uint64_t helper_subt (uint64_t a, uint64_t b)
900 {
901 float64 fa, fb, fr;
902
903 fa = t_to_float64(a);
904 fb = t_to_float64(b);
905 fr = float64_sub(fa, fb, &FP_STATUS);
906 return float64_to_t(fr);
907 }
908
909 uint64_t helper_mult (uint64_t a, uint64_t b)
910 {
911 float64 fa, fb, fr;
912
913 fa = t_to_float64(a);
914 fb = t_to_float64(b);
915 fr = float64_mul(fa, fb, &FP_STATUS);
916 return float64_to_t(fr);
917 }
918
919 uint64_t helper_divt (uint64_t a, uint64_t b)
920 {
921 float64 fa, fb, fr;
922
923 fa = t_to_float64(a);
924 fb = t_to_float64(b);
925 fr = float64_div(fa, fb, &FP_STATUS);
926 return float64_to_t(fr);
927 }
928
929 uint64_t helper_sqrtt (uint64_t a)
930 {
931 float64 fa, fr;
932
933 fa = t_to_float64(a);
934 fr = float64_sqrt(fa, &FP_STATUS);
935 return float64_to_t(fr);
936 }
937
938 /* Comparisons */
939 uint64_t helper_cmptun (uint64_t a, uint64_t b)
940 {
941 float64 fa, fb;
942
943 fa = t_to_float64(a);
944 fb = t_to_float64(b);
945
946 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
947 return 0x4000000000000000ULL;
948 } else {
949 return 0;
950 }
951 }
952
953 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
954 {
955 float64 fa, fb;
956
957 fa = t_to_float64(a);
958 fb = t_to_float64(b);
959
960 if (float64_eq_quiet(fa, fb, &FP_STATUS))
961 return 0x4000000000000000ULL;
962 else
963 return 0;
964 }
965
966 uint64_t helper_cmptle(uint64_t a, uint64_t b)
967 {
968 float64 fa, fb;
969
970 fa = t_to_float64(a);
971 fb = t_to_float64(b);
972
973 if (float64_le(fa, fb, &FP_STATUS))
974 return 0x4000000000000000ULL;
975 else
976 return 0;
977 }
978
979 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
980 {
981 float64 fa, fb;
982
983 fa = t_to_float64(a);
984 fb = t_to_float64(b);
985
986 if (float64_lt(fa, fb, &FP_STATUS))
987 return 0x4000000000000000ULL;
988 else
989 return 0;
990 }
991
992 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
993 {
994 float64 fa, fb;
995
996 fa = g_to_float64(a);
997 fb = g_to_float64(b);
998
999 if (float64_eq_quiet(fa, fb, &FP_STATUS))
1000 return 0x4000000000000000ULL;
1001 else
1002 return 0;
1003 }
1004
1005 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
1006 {
1007 float64 fa, fb;
1008
1009 fa = g_to_float64(a);
1010 fb = g_to_float64(b);
1011
1012 if (float64_le(fa, fb, &FP_STATUS))
1013 return 0x4000000000000000ULL;
1014 else
1015 return 0;
1016 }
1017
1018 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
1019 {
1020 float64 fa, fb;
1021
1022 fa = g_to_float64(a);
1023 fb = g_to_float64(b);
1024
1025 if (float64_lt(fa, fb, &FP_STATUS))
1026 return 0x4000000000000000ULL;
1027 else
1028 return 0;
1029 }
1030
1031 /* Floating point format conversion */
1032 uint64_t helper_cvtts (uint64_t a)
1033 {
1034 float64 fa;
1035 float32 fr;
1036
1037 fa = t_to_float64(a);
1038 fr = float64_to_float32(fa, &FP_STATUS);
1039 return float32_to_s(fr);
1040 }
1041
1042 uint64_t helper_cvtst (uint64_t a)
1043 {
1044 float32 fa;
1045 float64 fr;
1046
1047 fa = s_to_float32(a);
1048 fr = float32_to_float64(fa, &FP_STATUS);
1049 return float64_to_t(fr);
1050 }
1051
1052 uint64_t helper_cvtqs (uint64_t a)
1053 {
1054 float32 fr = int64_to_float32(a, &FP_STATUS);
1055 return float32_to_s(fr);
1056 }
1057
1058 /* Implement float64 to uint64 conversion without saturation -- we must
1059 supply the truncated result. This behaviour is used by the compiler
1060 to get unsigned conversion for free with the same instruction.
1061
1062 The VI flag is set when overflow or inexact exceptions should be raised. */
1063
1064 static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
1065 {
1066 uint64_t frac, ret = 0;
1067 uint32_t exp, sign, exc = 0;
1068 int shift;
1069
1070 sign = (a >> 63);
1071 exp = (uint32_t)(a >> 52) & 0x7ff;
1072 frac = a & 0xfffffffffffffull;
1073
1074 if (exp == 0) {
1075 if (unlikely(frac != 0)) {
1076 goto do_underflow;
1077 }
1078 } else if (exp == 0x7ff) {
1079 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1080 } else {
1081 /* Restore implicit bit. */
1082 frac |= 0x10000000000000ull;
1083
1084 shift = exp - 1023 - 52;
1085 if (shift >= 0) {
1086 /* In this case the number is so large that we must shift
1087 the fraction left. There is no rounding to do. */
1088 if (shift < 63) {
1089 ret = frac << shift;
1090 if (VI && (ret >> shift) != frac) {
1091 exc = float_flag_overflow;
1092 }
1093 }
1094 } else {
1095 uint64_t round;
1096
1097 /* In this case the number is smaller than the fraction as
1098 represented by the 52 bit number. Here we must think
1099 about rounding the result. Handle this by shifting the
1100 fractional part of the number into the high bits of ROUND.
1101 This will let us efficiently handle round-to-nearest. */
1102 shift = -shift;
1103 if (shift < 63) {
1104 ret = frac >> shift;
1105 round = frac << (64 - shift);
1106 } else {
1107 /* The exponent is so small we shift out everything.
1108 Leave a sticky bit for proper rounding below. */
1109 do_underflow:
1110 round = 1;
1111 }
1112
1113 if (round) {
1114 exc = (VI ? float_flag_inexact : 0);
1115 switch (roundmode) {
1116 case float_round_nearest_even:
1117 if (round == (1ull << 63)) {
1118 /* Fraction is exactly 0.5; round to even. */
1119 ret += (ret & 1);
1120 } else if (round > (1ull << 63)) {
1121 ret += 1;
1122 }
1123 break;
1124 case float_round_to_zero:
1125 break;
1126 case float_round_up:
1127 ret += 1 - sign;
1128 break;
1129 case float_round_down:
1130 ret += sign;
1131 break;
1132 }
1133 }
1134 }
1135 if (sign) {
1136 ret = -ret;
1137 }
1138 }
1139 if (unlikely(exc)) {
1140 float_raise(exc, &FP_STATUS);
1141 }
1142
1143 return ret;
1144 }
1145
1146 uint64_t helper_cvttq(uint64_t a)
1147 {
1148 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1149 }
1150
1151 uint64_t helper_cvttq_c(uint64_t a)
1152 {
1153 return helper_cvttq_internal(a, float_round_to_zero, 0);
1154 }
1155
1156 uint64_t helper_cvttq_svic(uint64_t a)
1157 {
1158 return helper_cvttq_internal(a, float_round_to_zero, 1);
1159 }
1160
1161 uint64_t helper_cvtqt (uint64_t a)
1162 {
1163 float64 fr = int64_to_float64(a, &FP_STATUS);
1164 return float64_to_t(fr);
1165 }
1166
1167 uint64_t helper_cvtqf (uint64_t a)
1168 {
1169 float32 fr = int64_to_float32(a, &FP_STATUS);
1170 return float32_to_f(fr);
1171 }
1172
1173 uint64_t helper_cvtgf (uint64_t a)
1174 {
1175 float64 fa;
1176 float32 fr;
1177
1178 fa = g_to_float64(a);
1179 fr = float64_to_float32(fa, &FP_STATUS);
1180 return float32_to_f(fr);
1181 }
1182
1183 uint64_t helper_cvtgq (uint64_t a)
1184 {
1185 float64 fa = g_to_float64(a);
1186 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
1187 }
1188
1189 uint64_t helper_cvtqg (uint64_t a)
1190 {
1191 float64 fr;
1192 fr = int64_to_float64(a, &FP_STATUS);
1193 return float64_to_g(fr);
1194 }
1195
1196 /* PALcode support special instructions */
1197 #if !defined (CONFIG_USER_ONLY)
1198 void helper_hw_ret (uint64_t a)
1199 {
1200 env->pc = a & ~3;
1201 env->intr_flag = 0;
1202 env->lock_addr = -1;
1203 if ((a & 1) == 0) {
1204 env->pal_mode = 0;
1205 swap_shadow_regs(env);
1206 }
1207 }
1208
1209 void helper_tbia(void)
1210 {
1211 tlb_flush(env, 1);
1212 }
1213
1214 void helper_tbis(uint64_t p)
1215 {
1216 tlb_flush_page(env, p);
1217 }
1218 #endif
1219
1220 /*****************************************************************************/
1221 /* Softmmu support */
1222 #if !defined (CONFIG_USER_ONLY)
1223 uint64_t helper_ldl_phys(uint64_t p)
1224 {
1225 return (int32_t)ldl_phys(p);
1226 }
1227
1228 uint64_t helper_ldq_phys(uint64_t p)
1229 {
1230 return ldq_phys(p);
1231 }
1232
1233 uint64_t helper_ldl_l_phys(uint64_t p)
1234 {
1235 env->lock_addr = p;
1236 return env->lock_value = (int32_t)ldl_phys(p);
1237 }
1238
1239 uint64_t helper_ldq_l_phys(uint64_t p)
1240 {
1241 env->lock_addr = p;
1242 return env->lock_value = ldl_phys(p);
1243 }
1244
1245 void helper_stl_phys(uint64_t p, uint64_t v)
1246 {
1247 stl_phys(p, v);
1248 }
1249
1250 void helper_stq_phys(uint64_t p, uint64_t v)
1251 {
1252 stq_phys(p, v);
1253 }
1254
1255 uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
1256 {
1257 uint64_t ret = 0;
1258
1259 if (p == env->lock_addr) {
1260 int32_t old = ldl_phys(p);
1261 if (old == (int32_t)env->lock_value) {
1262 stl_phys(p, v);
1263 ret = 1;
1264 }
1265 }
1266 env->lock_addr = -1;
1267
1268 return ret;
1269 }
1270
1271 uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
1272 {
1273 uint64_t ret = 0;
1274
1275 if (p == env->lock_addr) {
1276 uint64_t old = ldq_phys(p);
1277 if (old == env->lock_value) {
1278 stq_phys(p, v);
1279 ret = 1;
1280 }
1281 }
1282 env->lock_addr = -1;
1283
1284 return ret;
1285 }
1286
1287 static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
1288 int is_user, void *retaddr)
1289 {
1290 uint64_t pc;
1291 uint32_t insn;
1292
1293 do_restore_state(retaddr);
1294
1295 pc = env->pc;
1296 insn = ldl_code(pc);
1297
1298 env->trap_arg0 = addr;
1299 env->trap_arg1 = insn >> 26; /* opcode */
1300 env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
1301 helper_excp(EXCP_UNALIGN, 0);
1302 }
1303
1304 void QEMU_NORETURN cpu_unassigned_access(CPUState *env1,
1305 target_phys_addr_t addr, int is_write,
1306 int is_exec, int unused, int size)
1307 {
1308 env = env1;
1309 env->trap_arg0 = addr;
1310 env->trap_arg1 = is_write;
1311 dynamic_excp(EXCP_MCHK, 0);
1312 }
1313
1314 #define MMUSUFFIX _mmu
1315 #define ALIGNED_ONLY
1316
1317 #define SHIFT 0
1318 #include "softmmu_template.h"
1319
1320 #define SHIFT 1
1321 #include "softmmu_template.h"
1322
1323 #define SHIFT 2
1324 #include "softmmu_template.h"
1325
1326 #define SHIFT 3
1327 #include "softmmu_template.h"
1328
1329 /* try to fill the TLB and return an exception if error. If retaddr is
1330 NULL, it means that the function was called in C code (i.e. not
1331 from generated code or from helper.c) */
1332 /* XXX: fix it to restore all registers */
1333 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1334 {
1335 CPUState *saved_env;
1336 int ret;
1337
1338 /* XXX: hack to restore env in all cases, even if not called from
1339 generated code */
1340 saved_env = env;
1341 env = cpu_single_env;
1342 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1343 if (unlikely(ret != 0)) {
1344 do_restore_state(retaddr);
1345 /* Exception index and error code are already set */
1346 cpu_loop_exit(env);
1347 }
1348 env = saved_env;
1349 }
1350 #endif