]> git.proxmox.com Git - qemu.git/blob - target-alpha/op_helper.c
target-alpha: Fixes for alpha-linux syscalls.
[qemu.git] / target-alpha / op_helper.c
1 /*
2 * Alpha emulation cpu micro-operations helpers for qemu.
3 *
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
20 #include "exec.h"
21 #include "host-utils.h"
22 #include "softfloat.h"
23 #include "helper.h"
24
25 /*****************************************************************************/
26 /* Exceptions processing helpers */
27 void helper_excp (int excp, int error)
28 {
29 env->exception_index = excp;
30 env->error_code = error;
31 cpu_loop_exit();
32 }
33
34 uint64_t helper_load_pcc (void)
35 {
36 /* XXX: TODO */
37 return 0;
38 }
39
40 uint64_t helper_load_fpcr (void)
41 {
42 return cpu_alpha_load_fpcr (env);
43 }
44
45 void helper_store_fpcr (uint64_t val)
46 {
47 cpu_alpha_store_fpcr (env, val);
48 }
49
50 static spinlock_t intr_cpu_lock = SPIN_LOCK_UNLOCKED;
51
52 uint64_t helper_rs(void)
53 {
54 uint64_t tmp;
55
56 spin_lock(&intr_cpu_lock);
57 tmp = env->intr_flag;
58 env->intr_flag = 1;
59 spin_unlock(&intr_cpu_lock);
60
61 return tmp;
62 }
63
64 uint64_t helper_rc(void)
65 {
66 uint64_t tmp;
67
68 spin_lock(&intr_cpu_lock);
69 tmp = env->intr_flag;
70 env->intr_flag = 0;
71 spin_unlock(&intr_cpu_lock);
72
73 return tmp;
74 }
75
76 uint64_t helper_addqv (uint64_t op1, uint64_t op2)
77 {
78 uint64_t tmp = op1;
79 op1 += op2;
80 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
81 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
82 }
83 return op1;
84 }
85
86 uint64_t helper_addlv (uint64_t op1, uint64_t op2)
87 {
88 uint64_t tmp = op1;
89 op1 = (uint32_t)(op1 + op2);
90 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
91 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
92 }
93 return op1;
94 }
95
96 uint64_t helper_subqv (uint64_t op1, uint64_t op2)
97 {
98 uint64_t res;
99 res = op1 - op2;
100 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
101 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
102 }
103 return res;
104 }
105
106 uint64_t helper_sublv (uint64_t op1, uint64_t op2)
107 {
108 uint32_t res;
109 res = op1 - op2;
110 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
111 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
112 }
113 return res;
114 }
115
116 uint64_t helper_mullv (uint64_t op1, uint64_t op2)
117 {
118 int64_t res = (int64_t)op1 * (int64_t)op2;
119
120 if (unlikely((int32_t)res != res)) {
121 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
122 }
123 return (int64_t)((int32_t)res);
124 }
125
126 uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
127 {
128 uint64_t tl, th;
129
130 muls64(&tl, &th, op1, op2);
131 /* If th != 0 && th != -1, then we had an overflow */
132 if (unlikely((th + 1) > 1)) {
133 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
134 }
135 return tl;
136 }
137
138 uint64_t helper_umulh (uint64_t op1, uint64_t op2)
139 {
140 uint64_t tl, th;
141
142 mulu64(&tl, &th, op1, op2);
143 return th;
144 }
145
146 uint64_t helper_ctpop (uint64_t arg)
147 {
148 return ctpop64(arg);
149 }
150
151 uint64_t helper_ctlz (uint64_t arg)
152 {
153 return clz64(arg);
154 }
155
156 uint64_t helper_cttz (uint64_t arg)
157 {
158 return ctz64(arg);
159 }
160
161 static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
162 {
163 uint64_t mask;
164
165 mask = 0;
166 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
167 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
168 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
169 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
170 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
171 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
172 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
173 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
174
175 return op & ~mask;
176 }
177
178 uint64_t helper_mskbl(uint64_t val, uint64_t mask)
179 {
180 return byte_zap(val, 0x01 << (mask & 7));
181 }
182
183 uint64_t helper_insbl(uint64_t val, uint64_t mask)
184 {
185 val <<= (mask & 7) * 8;
186 return byte_zap(val, ~(0x01 << (mask & 7)));
187 }
188
189 uint64_t helper_mskwl(uint64_t val, uint64_t mask)
190 {
191 return byte_zap(val, 0x03 << (mask & 7));
192 }
193
194 uint64_t helper_inswl(uint64_t val, uint64_t mask)
195 {
196 val <<= (mask & 7) * 8;
197 return byte_zap(val, ~(0x03 << (mask & 7)));
198 }
199
200 uint64_t helper_mskll(uint64_t val, uint64_t mask)
201 {
202 return byte_zap(val, 0x0F << (mask & 7));
203 }
204
205 uint64_t helper_insll(uint64_t val, uint64_t mask)
206 {
207 val <<= (mask & 7) * 8;
208 return byte_zap(val, ~(0x0F << (mask & 7)));
209 }
210
211 uint64_t helper_zap(uint64_t val, uint64_t mask)
212 {
213 return byte_zap(val, mask);
214 }
215
216 uint64_t helper_zapnot(uint64_t val, uint64_t mask)
217 {
218 return byte_zap(val, ~mask);
219 }
220
221 uint64_t helper_mskql(uint64_t val, uint64_t mask)
222 {
223 return byte_zap(val, 0xFF << (mask & 7));
224 }
225
226 uint64_t helper_insql(uint64_t val, uint64_t mask)
227 {
228 val <<= (mask & 7) * 8;
229 return byte_zap(val, ~(0xFF << (mask & 7)));
230 }
231
232 uint64_t helper_mskwh(uint64_t val, uint64_t mask)
233 {
234 return byte_zap(val, (0x03 << (mask & 7)) >> 8);
235 }
236
237 uint64_t helper_inswh(uint64_t val, uint64_t mask)
238 {
239 val >>= 64 - ((mask & 7) * 8);
240 return byte_zap(val, ~((0x03 << (mask & 7)) >> 8));
241 }
242
243 uint64_t helper_msklh(uint64_t val, uint64_t mask)
244 {
245 return byte_zap(val, (0x0F << (mask & 7)) >> 8);
246 }
247
248 uint64_t helper_inslh(uint64_t val, uint64_t mask)
249 {
250 val >>= 64 - ((mask & 7) * 8);
251 return byte_zap(val, ~((0x0F << (mask & 7)) >> 8));
252 }
253
254 uint64_t helper_mskqh(uint64_t val, uint64_t mask)
255 {
256 return byte_zap(val, (0xFF << (mask & 7)) >> 8);
257 }
258
259 uint64_t helper_insqh(uint64_t val, uint64_t mask)
260 {
261 val >>= 64 - ((mask & 7) * 8);
262 return byte_zap(val, ~((0xFF << (mask & 7)) >> 8));
263 }
264
265 uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
266 {
267 uint8_t opa, opb, res;
268 int i;
269
270 res = 0;
271 for (i = 0; i < 8; i++) {
272 opa = op1 >> (i * 8);
273 opb = op2 >> (i * 8);
274 if (opa >= opb)
275 res |= 1 << i;
276 }
277 return res;
278 }
279
280 /* Floating point helpers */
281
282 /* F floating (VAX) */
283 static inline uint64_t float32_to_f(float32 fa)
284 {
285 uint64_t r, exp, mant, sig;
286 CPU_FloatU a;
287
288 a.f = fa;
289 sig = ((uint64_t)a.l & 0x80000000) << 32;
290 exp = (a.l >> 23) & 0xff;
291 mant = ((uint64_t)a.l & 0x007fffff) << 29;
292
293 if (exp == 255) {
294 /* NaN or infinity */
295 r = 1; /* VAX dirty zero */
296 } else if (exp == 0) {
297 if (mant == 0) {
298 /* Zero */
299 r = 0;
300 } else {
301 /* Denormalized */
302 r = sig | ((exp + 1) << 52) | mant;
303 }
304 } else {
305 if (exp >= 253) {
306 /* Overflow */
307 r = 1; /* VAX dirty zero */
308 } else {
309 r = sig | ((exp + 2) << 52);
310 }
311 }
312
313 return r;
314 }
315
316 static inline float32 f_to_float32(uint64_t a)
317 {
318 uint32_t exp, mant_sig;
319 CPU_FloatU r;
320
321 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
322 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
323
324 if (unlikely(!exp && mant_sig)) {
325 /* Reserved operands / Dirty zero */
326 helper_excp(EXCP_OPCDEC, 0);
327 }
328
329 if (exp < 3) {
330 /* Underflow */
331 r.l = 0;
332 } else {
333 r.l = ((exp - 2) << 23) | mant_sig;
334 }
335
336 return r.f;
337 }
338
339 uint32_t helper_f_to_memory (uint64_t a)
340 {
341 uint32_t r;
342 r = (a & 0x00001fffe0000000ull) >> 13;
343 r |= (a & 0x07ffe00000000000ull) >> 45;
344 r |= (a & 0xc000000000000000ull) >> 48;
345 return r;
346 }
347
348 uint64_t helper_memory_to_f (uint32_t a)
349 {
350 uint64_t r;
351 r = ((uint64_t)(a & 0x0000c000)) << 48;
352 r |= ((uint64_t)(a & 0x003fffff)) << 45;
353 r |= ((uint64_t)(a & 0xffff0000)) << 13;
354 if (!(a & 0x00004000))
355 r |= 0x7ll << 59;
356 return r;
357 }
358
359 uint64_t helper_addf (uint64_t a, uint64_t b)
360 {
361 float32 fa, fb, fr;
362
363 fa = f_to_float32(a);
364 fb = f_to_float32(b);
365 fr = float32_add(fa, fb, &FP_STATUS);
366 return float32_to_f(fr);
367 }
368
369 uint64_t helper_subf (uint64_t a, uint64_t b)
370 {
371 float32 fa, fb, fr;
372
373 fa = f_to_float32(a);
374 fb = f_to_float32(b);
375 fr = float32_sub(fa, fb, &FP_STATUS);
376 return float32_to_f(fr);
377 }
378
379 uint64_t helper_mulf (uint64_t a, uint64_t b)
380 {
381 float32 fa, fb, fr;
382
383 fa = f_to_float32(a);
384 fb = f_to_float32(b);
385 fr = float32_mul(fa, fb, &FP_STATUS);
386 return float32_to_f(fr);
387 }
388
389 uint64_t helper_divf (uint64_t a, uint64_t b)
390 {
391 float32 fa, fb, fr;
392
393 fa = f_to_float32(a);
394 fb = f_to_float32(b);
395 fr = float32_div(fa, fb, &FP_STATUS);
396 return float32_to_f(fr);
397 }
398
399 uint64_t helper_sqrtf (uint64_t t)
400 {
401 float32 ft, fr;
402
403 ft = f_to_float32(t);
404 fr = float32_sqrt(ft, &FP_STATUS);
405 return float32_to_f(fr);
406 }
407
408
409 /* G floating (VAX) */
410 static inline uint64_t float64_to_g(float64 fa)
411 {
412 uint64_t r, exp, mant, sig;
413 CPU_DoubleU a;
414
415 a.d = fa;
416 sig = a.ll & 0x8000000000000000ull;
417 exp = (a.ll >> 52) & 0x7ff;
418 mant = a.ll & 0x000fffffffffffffull;
419
420 if (exp == 2047) {
421 /* NaN or infinity */
422 r = 1; /* VAX dirty zero */
423 } else if (exp == 0) {
424 if (mant == 0) {
425 /* Zero */
426 r = 0;
427 } else {
428 /* Denormalized */
429 r = sig | ((exp + 1) << 52) | mant;
430 }
431 } else {
432 if (exp >= 2045) {
433 /* Overflow */
434 r = 1; /* VAX dirty zero */
435 } else {
436 r = sig | ((exp + 2) << 52);
437 }
438 }
439
440 return r;
441 }
442
443 static inline float64 g_to_float64(uint64_t a)
444 {
445 uint64_t exp, mant_sig;
446 CPU_DoubleU r;
447
448 exp = (a >> 52) & 0x7ff;
449 mant_sig = a & 0x800fffffffffffffull;
450
451 if (!exp && mant_sig) {
452 /* Reserved operands / Dirty zero */
453 helper_excp(EXCP_OPCDEC, 0);
454 }
455
456 if (exp < 3) {
457 /* Underflow */
458 r.ll = 0;
459 } else {
460 r.ll = ((exp - 2) << 52) | mant_sig;
461 }
462
463 return r.d;
464 }
465
466 uint64_t helper_g_to_memory (uint64_t a)
467 {
468 uint64_t r;
469 r = (a & 0x000000000000ffffull) << 48;
470 r |= (a & 0x00000000ffff0000ull) << 16;
471 r |= (a & 0x0000ffff00000000ull) >> 16;
472 r |= (a & 0xffff000000000000ull) >> 48;
473 return r;
474 }
475
476 uint64_t helper_memory_to_g (uint64_t a)
477 {
478 uint64_t r;
479 r = (a & 0x000000000000ffffull) << 48;
480 r |= (a & 0x00000000ffff0000ull) << 16;
481 r |= (a & 0x0000ffff00000000ull) >> 16;
482 r |= (a & 0xffff000000000000ull) >> 48;
483 return r;
484 }
485
486 uint64_t helper_addg (uint64_t a, uint64_t b)
487 {
488 float64 fa, fb, fr;
489
490 fa = g_to_float64(a);
491 fb = g_to_float64(b);
492 fr = float64_add(fa, fb, &FP_STATUS);
493 return float64_to_g(fr);
494 }
495
496 uint64_t helper_subg (uint64_t a, uint64_t b)
497 {
498 float64 fa, fb, fr;
499
500 fa = g_to_float64(a);
501 fb = g_to_float64(b);
502 fr = float64_sub(fa, fb, &FP_STATUS);
503 return float64_to_g(fr);
504 }
505
506 uint64_t helper_mulg (uint64_t a, uint64_t b)
507 {
508 float64 fa, fb, fr;
509
510 fa = g_to_float64(a);
511 fb = g_to_float64(b);
512 fr = float64_mul(fa, fb, &FP_STATUS);
513 return float64_to_g(fr);
514 }
515
516 uint64_t helper_divg (uint64_t a, uint64_t b)
517 {
518 float64 fa, fb, fr;
519
520 fa = g_to_float64(a);
521 fb = g_to_float64(b);
522 fr = float64_div(fa, fb, &FP_STATUS);
523 return float64_to_g(fr);
524 }
525
526 uint64_t helper_sqrtg (uint64_t a)
527 {
528 float64 fa, fr;
529
530 fa = g_to_float64(a);
531 fr = float64_sqrt(fa, &FP_STATUS);
532 return float64_to_g(fr);
533 }
534
535
536 /* S floating (single) */
537 static inline uint64_t float32_to_s(float32 fa)
538 {
539 CPU_FloatU a;
540 uint64_t r;
541
542 a.f = fa;
543
544 r = (((uint64_t)(a.l & 0xc0000000)) << 32) | (((uint64_t)(a.l & 0x3fffffff)) << 29);
545 if (((a.l & 0x7f800000) != 0x7f800000) && (!(a.l & 0x40000000)))
546 r |= 0x7ll << 59;
547 return r;
548 }
549
550 static inline float32 s_to_float32(uint64_t a)
551 {
552 CPU_FloatU r;
553 r.l = ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
554 return r.f;
555 }
556
557 uint32_t helper_s_to_memory (uint64_t a)
558 {
559 /* Memory format is the same as float32 */
560 float32 fa = s_to_float32(a);
561 return *(uint32_t*)(&fa);
562 }
563
564 uint64_t helper_memory_to_s (uint32_t a)
565 {
566 /* Memory format is the same as float32 */
567 return float32_to_s(*(float32*)(&a));
568 }
569
570 uint64_t helper_adds (uint64_t a, uint64_t b)
571 {
572 float32 fa, fb, fr;
573
574 fa = s_to_float32(a);
575 fb = s_to_float32(b);
576 fr = float32_add(fa, fb, &FP_STATUS);
577 return float32_to_s(fr);
578 }
579
580 uint64_t helper_subs (uint64_t a, uint64_t b)
581 {
582 float32 fa, fb, fr;
583
584 fa = s_to_float32(a);
585 fb = s_to_float32(b);
586 fr = float32_sub(fa, fb, &FP_STATUS);
587 return float32_to_s(fr);
588 }
589
590 uint64_t helper_muls (uint64_t a, uint64_t b)
591 {
592 float32 fa, fb, fr;
593
594 fa = s_to_float32(a);
595 fb = s_to_float32(b);
596 fr = float32_mul(fa, fb, &FP_STATUS);
597 return float32_to_s(fr);
598 }
599
600 uint64_t helper_divs (uint64_t a, uint64_t b)
601 {
602 float32 fa, fb, fr;
603
604 fa = s_to_float32(a);
605 fb = s_to_float32(b);
606 fr = float32_div(fa, fb, &FP_STATUS);
607 return float32_to_s(fr);
608 }
609
610 uint64_t helper_sqrts (uint64_t a)
611 {
612 float32 fa, fr;
613
614 fa = s_to_float32(a);
615 fr = float32_sqrt(fa, &FP_STATUS);
616 return float32_to_s(fr);
617 }
618
619
620 /* T floating (double) */
621 static inline float64 t_to_float64(uint64_t a)
622 {
623 /* Memory format is the same as float64 */
624 CPU_DoubleU r;
625 r.ll = a;
626 return r.d;
627 }
628
629 static inline uint64_t float64_to_t(float64 fa)
630 {
631 /* Memory format is the same as float64 */
632 CPU_DoubleU r;
633 r.d = fa;
634 return r.ll;
635 }
636
637 uint64_t helper_addt (uint64_t a, uint64_t b)
638 {
639 float64 fa, fb, fr;
640
641 fa = t_to_float64(a);
642 fb = t_to_float64(b);
643 fr = float64_add(fa, fb, &FP_STATUS);
644 return float64_to_t(fr);
645 }
646
647 uint64_t helper_subt (uint64_t a, uint64_t b)
648 {
649 float64 fa, fb, fr;
650
651 fa = t_to_float64(a);
652 fb = t_to_float64(b);
653 fr = float64_sub(fa, fb, &FP_STATUS);
654 return float64_to_t(fr);
655 }
656
657 uint64_t helper_mult (uint64_t a, uint64_t b)
658 {
659 float64 fa, fb, fr;
660
661 fa = t_to_float64(a);
662 fb = t_to_float64(b);
663 fr = float64_mul(fa, fb, &FP_STATUS);
664 return float64_to_t(fr);
665 }
666
667 uint64_t helper_divt (uint64_t a, uint64_t b)
668 {
669 float64 fa, fb, fr;
670
671 fa = t_to_float64(a);
672 fb = t_to_float64(b);
673 fr = float64_div(fa, fb, &FP_STATUS);
674 return float64_to_t(fr);
675 }
676
677 uint64_t helper_sqrtt (uint64_t a)
678 {
679 float64 fa, fr;
680
681 fa = t_to_float64(a);
682 fr = float64_sqrt(fa, &FP_STATUS);
683 return float64_to_t(fr);
684 }
685
686
687 /* Sign copy */
688 uint64_t helper_cpys(uint64_t a, uint64_t b)
689 {
690 return (a & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
691 }
692
693 uint64_t helper_cpysn(uint64_t a, uint64_t b)
694 {
695 return ((~a) & 0x8000000000000000ULL) | (b & ~0x8000000000000000ULL);
696 }
697
698 uint64_t helper_cpyse(uint64_t a, uint64_t b)
699 {
700 return (a & 0xFFF0000000000000ULL) | (b & ~0xFFF0000000000000ULL);
701 }
702
703
704 /* Comparisons */
705 uint64_t helper_cmptun (uint64_t a, uint64_t b)
706 {
707 float64 fa, fb;
708
709 fa = t_to_float64(a);
710 fb = t_to_float64(b);
711
712 if (float64_is_nan(fa) || float64_is_nan(fb))
713 return 0x4000000000000000ULL;
714 else
715 return 0;
716 }
717
718 uint64_t helper_cmpteq(uint64_t a, uint64_t b)
719 {
720 float64 fa, fb;
721
722 fa = t_to_float64(a);
723 fb = t_to_float64(b);
724
725 if (float64_eq(fa, fb, &FP_STATUS))
726 return 0x4000000000000000ULL;
727 else
728 return 0;
729 }
730
731 uint64_t helper_cmptle(uint64_t a, uint64_t b)
732 {
733 float64 fa, fb;
734
735 fa = t_to_float64(a);
736 fb = t_to_float64(b);
737
738 if (float64_le(fa, fb, &FP_STATUS))
739 return 0x4000000000000000ULL;
740 else
741 return 0;
742 }
743
744 uint64_t helper_cmptlt(uint64_t a, uint64_t b)
745 {
746 float64 fa, fb;
747
748 fa = t_to_float64(a);
749 fb = t_to_float64(b);
750
751 if (float64_lt(fa, fb, &FP_STATUS))
752 return 0x4000000000000000ULL;
753 else
754 return 0;
755 }
756
757 uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
758 {
759 float64 fa, fb;
760
761 fa = g_to_float64(a);
762 fb = g_to_float64(b);
763
764 if (float64_eq(fa, fb, &FP_STATUS))
765 return 0x4000000000000000ULL;
766 else
767 return 0;
768 }
769
770 uint64_t helper_cmpgle(uint64_t a, uint64_t b)
771 {
772 float64 fa, fb;
773
774 fa = g_to_float64(a);
775 fb = g_to_float64(b);
776
777 if (float64_le(fa, fb, &FP_STATUS))
778 return 0x4000000000000000ULL;
779 else
780 return 0;
781 }
782
783 uint64_t helper_cmpglt(uint64_t a, uint64_t b)
784 {
785 float64 fa, fb;
786
787 fa = g_to_float64(a);
788 fb = g_to_float64(b);
789
790 if (float64_lt(fa, fb, &FP_STATUS))
791 return 0x4000000000000000ULL;
792 else
793 return 0;
794 }
795
796 uint64_t helper_cmpfeq (uint64_t a)
797 {
798 return !(a & 0x7FFFFFFFFFFFFFFFULL);
799 }
800
801 uint64_t helper_cmpfne (uint64_t a)
802 {
803 return (a & 0x7FFFFFFFFFFFFFFFULL);
804 }
805
806 uint64_t helper_cmpflt (uint64_t a)
807 {
808 return (a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
809 }
810
811 uint64_t helper_cmpfle (uint64_t a)
812 {
813 return (a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
814 }
815
816 uint64_t helper_cmpfgt (uint64_t a)
817 {
818 return !(a & 0x8000000000000000ULL) && (a & 0x7FFFFFFFFFFFFFFFULL);
819 }
820
821 uint64_t helper_cmpfge (uint64_t a)
822 {
823 return !(a & 0x8000000000000000ULL) || !(a & 0x7FFFFFFFFFFFFFFFULL);
824 }
825
826
827 /* Floating point format conversion */
828 uint64_t helper_cvtts (uint64_t a)
829 {
830 float64 fa;
831 float32 fr;
832
833 fa = t_to_float64(a);
834 fr = float64_to_float32(fa, &FP_STATUS);
835 return float32_to_s(fr);
836 }
837
838 uint64_t helper_cvtst (uint64_t a)
839 {
840 float32 fa;
841 float64 fr;
842
843 fa = s_to_float32(a);
844 fr = float32_to_float64(fa, &FP_STATUS);
845 return float64_to_t(fr);
846 }
847
848 uint64_t helper_cvtqs (uint64_t a)
849 {
850 float32 fr = int64_to_float32(a, &FP_STATUS);
851 return float32_to_s(fr);
852 }
853
854 uint64_t helper_cvttq (uint64_t a)
855 {
856 float64 fa = t_to_float64(a);
857 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
858 }
859
860 uint64_t helper_cvtqt (uint64_t a)
861 {
862 float64 fr = int64_to_float64(a, &FP_STATUS);
863 return float64_to_t(fr);
864 }
865
866 uint64_t helper_cvtqf (uint64_t a)
867 {
868 float32 fr = int64_to_float32(a, &FP_STATUS);
869 return float32_to_f(fr);
870 }
871
872 uint64_t helper_cvtgf (uint64_t a)
873 {
874 float64 fa;
875 float32 fr;
876
877 fa = g_to_float64(a);
878 fr = float64_to_float32(fa, &FP_STATUS);
879 return float32_to_f(fr);
880 }
881
882 uint64_t helper_cvtgq (uint64_t a)
883 {
884 float64 fa = g_to_float64(a);
885 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
886 }
887
888 uint64_t helper_cvtqg (uint64_t a)
889 {
890 float64 fr;
891 fr = int64_to_float64(a, &FP_STATUS);
892 return float64_to_g(fr);
893 }
894
895 uint64_t helper_cvtlq (uint64_t a)
896 {
897 return (int64_t)((int32_t)((a >> 32) | ((a >> 29) & 0x3FFFFFFF)));
898 }
899
900 static inline uint64_t __helper_cvtql(uint64_t a, int s, int v)
901 {
902 uint64_t r;
903
904 r = ((uint64_t)(a & 0xC0000000)) << 32;
905 r |= ((uint64_t)(a & 0x7FFFFFFF)) << 29;
906
907 if (v && (int64_t)((int32_t)r) != (int64_t)r) {
908 helper_excp(EXCP_ARITH, EXCP_ARITH_OVERFLOW);
909 }
910 if (s) {
911 /* TODO */
912 }
913 return r;
914 }
915
916 uint64_t helper_cvtql (uint64_t a)
917 {
918 return __helper_cvtql(a, 0, 0);
919 }
920
921 uint64_t helper_cvtqlv (uint64_t a)
922 {
923 return __helper_cvtql(a, 0, 1);
924 }
925
926 uint64_t helper_cvtqlsv (uint64_t a)
927 {
928 return __helper_cvtql(a, 1, 1);
929 }
930
931 /* PALcode support special instructions */
932 #if !defined (CONFIG_USER_ONLY)
933 void helper_hw_rei (void)
934 {
935 env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
936 env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
937 /* XXX: re-enable interrupts and memory mapping */
938 }
939
940 void helper_hw_ret (uint64_t a)
941 {
942 env->pc = a & ~3;
943 env->ipr[IPR_EXC_ADDR] = a & 1;
944 /* XXX: re-enable interrupts and memory mapping */
945 }
946
947 uint64_t helper_mfpr (int iprn, uint64_t val)
948 {
949 uint64_t tmp;
950
951 if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
952 val = tmp;
953
954 return val;
955 }
956
957 void helper_mtpr (int iprn, uint64_t val)
958 {
959 cpu_alpha_mtpr(env, iprn, val, NULL);
960 }
961
962 void helper_set_alt_mode (void)
963 {
964 env->saved_mode = env->ps & 0xC;
965 env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
966 }
967
968 void helper_restore_mode (void)
969 {
970 env->ps = (env->ps & ~0xC) | env->saved_mode;
971 }
972
973 #endif
974
975 /*****************************************************************************/
976 /* Softmmu support */
977 #if !defined (CONFIG_USER_ONLY)
978
979 /* XXX: the two following helpers are pure hacks.
980 * Hopefully, we emulate the PALcode, then we should never see
981 * HW_LD / HW_ST instructions.
982 */
983 uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
984 {
985 uint64_t tlb_addr, physaddr;
986 int index, mmu_idx;
987 void *retaddr;
988
989 mmu_idx = cpu_mmu_index(env);
990 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
991 redo:
992 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
993 if ((virtaddr & TARGET_PAGE_MASK) ==
994 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
995 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
996 } else {
997 /* the page is not in the TLB : fill it */
998 retaddr = GETPC();
999 tlb_fill(virtaddr, 0, mmu_idx, retaddr);
1000 goto redo;
1001 }
1002 return physaddr;
1003 }
1004
1005 uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
1006 {
1007 uint64_t tlb_addr, physaddr;
1008 int index, mmu_idx;
1009 void *retaddr;
1010
1011 mmu_idx = cpu_mmu_index(env);
1012 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
1013 redo:
1014 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
1015 if ((virtaddr & TARGET_PAGE_MASK) ==
1016 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
1017 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
1018 } else {
1019 /* the page is not in the TLB : fill it */
1020 retaddr = GETPC();
1021 tlb_fill(virtaddr, 1, mmu_idx, retaddr);
1022 goto redo;
1023 }
1024 return physaddr;
1025 }
1026
1027 void helper_ldl_raw(uint64_t t0, uint64_t t1)
1028 {
1029 ldl_raw(t1, t0);
1030 }
1031
1032 void helper_ldq_raw(uint64_t t0, uint64_t t1)
1033 {
1034 ldq_raw(t1, t0);
1035 }
1036
1037 void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1038 {
1039 env->lock = t1;
1040 ldl_raw(t1, t0);
1041 }
1042
1043 void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1044 {
1045 env->lock = t1;
1046 ldl_raw(t1, t0);
1047 }
1048
1049 void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1050 {
1051 ldl_kernel(t1, t0);
1052 }
1053
1054 void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1055 {
1056 ldq_kernel(t1, t0);
1057 }
1058
1059 void helper_ldl_data(uint64_t t0, uint64_t t1)
1060 {
1061 ldl_data(t1, t0);
1062 }
1063
1064 void helper_ldq_data(uint64_t t0, uint64_t t1)
1065 {
1066 ldq_data(t1, t0);
1067 }
1068
1069 void helper_stl_raw(uint64_t t0, uint64_t t1)
1070 {
1071 stl_raw(t1, t0);
1072 }
1073
1074 void helper_stq_raw(uint64_t t0, uint64_t t1)
1075 {
1076 stq_raw(t1, t0);
1077 }
1078
1079 uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1080 {
1081 uint64_t ret;
1082
1083 if (t1 == env->lock) {
1084 stl_raw(t1, t0);
1085 ret = 0;
1086 } else
1087 ret = 1;
1088
1089 env->lock = 1;
1090
1091 return ret;
1092 }
1093
1094 uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1095 {
1096 uint64_t ret;
1097
1098 if (t1 == env->lock) {
1099 stq_raw(t1, t0);
1100 ret = 0;
1101 } else
1102 ret = 1;
1103
1104 env->lock = 1;
1105
1106 return ret;
1107 }
1108
1109 #define MMUSUFFIX _mmu
1110
1111 #define SHIFT 0
1112 #include "softmmu_template.h"
1113
1114 #define SHIFT 1
1115 #include "softmmu_template.h"
1116
1117 #define SHIFT 2
1118 #include "softmmu_template.h"
1119
1120 #define SHIFT 3
1121 #include "softmmu_template.h"
1122
1123 /* try to fill the TLB and return an exception if error. If retaddr is
1124 NULL, it means that the function was called in C code (i.e. not
1125 from generated code or from helper.c) */
1126 /* XXX: fix it to restore all registers */
1127 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
1128 {
1129 TranslationBlock *tb;
1130 CPUState *saved_env;
1131 unsigned long pc;
1132 int ret;
1133
1134 /* XXX: hack to restore env in all cases, even if not called from
1135 generated code */
1136 saved_env = env;
1137 env = cpu_single_env;
1138 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
1139 if (!likely(ret == 0)) {
1140 if (likely(retaddr)) {
1141 /* now we have a real cpu fault */
1142 pc = (unsigned long)retaddr;
1143 tb = tb_find_pc(pc);
1144 if (likely(tb)) {
1145 /* the PC is inside the translated code. It means that we have
1146 a virtual CPU fault */
1147 cpu_restore_state(tb, env, pc, NULL);
1148 }
1149 }
1150 /* Exception index and error code are already set */
1151 cpu_loop_exit();
1152 }
1153 env = saved_env;
1154 }
1155
1156 #endif