]> git.proxmox.com Git - qemu.git/blame - target-alpha/op_helper.c
slirp: Fix restricted mode
[qemu.git] / target-alpha / op_helper.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu micro-operations helpers for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include "exec.h"
603fccce 21#include "host-utils.h"
4c9649a9 22#include "softfloat.h"
a7812ae4 23#include "helper.h"
18f8e2c0 24#include "qemu-timer.h"
4c9649a9 25
4c9649a9
JM
26/*****************************************************************************/
27/* Exceptions processing helpers */
2d9671d3
RH
28
29/* This should only be called from translate, via gen_excp.
30 We expect that ENV->PC has already been updated. */
31void QEMU_NORETURN helper_excp(int excp, int error)
32{
33 env->exception_index = excp;
34 env->error_code = error;
1162c041 35 cpu_loop_exit(env);
2d9671d3
RH
36}
37
38static void do_restore_state(void *retaddr)
39{
40 unsigned long pc = (unsigned long)retaddr;
41
42 if (pc) {
43 TranslationBlock *tb = tb_find_pc(pc);
44 if (tb) {
45 cpu_restore_state(tb, env, pc);
46 }
47 }
48}
49
50/* This may be called from any of the helpers to set up EXCEPTION_INDEX. */
51static void QEMU_NORETURN dynamic_excp(int excp, int error)
4c9649a9
JM
52{
53 env->exception_index = excp;
54 env->error_code = error;
2d9671d3 55 do_restore_state(GETPC());
1162c041 56 cpu_loop_exit(env);
4c9649a9
JM
57}
58
b5f1aa64
RH
59static void QEMU_NORETURN arith_excp(int exc, uint64_t mask)
60{
b5f1aa64
RH
61 env->trap_arg0 = exc;
62 env->trap_arg1 = mask;
2d9671d3 63 dynamic_excp(EXCP_ARITH, 0);
b5f1aa64
RH
64}
65
6ad02592 66uint64_t helper_load_pcc (void)
4c9649a9 67{
e5214853
RH
68#ifndef CONFIG_USER_ONLY
69 /* In system mode we have access to a decent high-resolution clock.
70 In order to make OS-level time accounting work with the RPCC,
71 present it with a well-timed clock fixed at 250MHz. */
72 return (((uint64_t)env->pcc_ofs << 32)
73 | (uint32_t)(qemu_get_clock_ns(vm_clock) >> 2));
74#else
75 /* In user-mode, vm_clock doesn't exist. Just pass through the host cpu
76 clock ticks. Also, don't bother taking PCC_OFS into account. */
18f8e2c0 77 return (uint32_t)cpu_get_real_ticks();
e5214853 78#endif
4c9649a9
JM
79}
80
f18cd223 81uint64_t helper_load_fpcr (void)
4c9649a9 82{
ba0e276d 83 return cpu_alpha_load_fpcr (env);
4c9649a9
JM
84}
85
f18cd223 86void helper_store_fpcr (uint64_t val)
4c9649a9 87{
ba0e276d 88 cpu_alpha_store_fpcr (env, val);
4c9649a9
JM
89}
90
04acd307 91uint64_t helper_addqv (uint64_t op1, uint64_t op2)
4c9649a9 92{
04acd307
AJ
93 uint64_t tmp = op1;
94 op1 += op2;
95 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
b5f1aa64 96 arith_excp(EXC_M_IOV, 0);
4c9649a9 97 }
04acd307 98 return op1;
4c9649a9
JM
99}
100
04acd307 101uint64_t helper_addlv (uint64_t op1, uint64_t op2)
4c9649a9 102{
04acd307
AJ
103 uint64_t tmp = op1;
104 op1 = (uint32_t)(op1 + op2);
105 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
b5f1aa64 106 arith_excp(EXC_M_IOV, 0);
4c9649a9 107 }
04acd307 108 return op1;
4c9649a9
JM
109}
110
04acd307 111uint64_t helper_subqv (uint64_t op1, uint64_t op2)
4c9649a9 112{
ecbb5ea1
AJ
113 uint64_t res;
114 res = op1 - op2;
115 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
b5f1aa64 116 arith_excp(EXC_M_IOV, 0);
4c9649a9 117 }
ecbb5ea1 118 return res;
4c9649a9
JM
119}
120
04acd307 121uint64_t helper_sublv (uint64_t op1, uint64_t op2)
4c9649a9 122{
ecbb5ea1
AJ
123 uint32_t res;
124 res = op1 - op2;
125 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
b5f1aa64 126 arith_excp(EXC_M_IOV, 0);
4c9649a9 127 }
ecbb5ea1 128 return res;
4c9649a9
JM
129}
130
04acd307 131uint64_t helper_mullv (uint64_t op1, uint64_t op2)
4c9649a9 132{
04acd307 133 int64_t res = (int64_t)op1 * (int64_t)op2;
4c9649a9
JM
134
135 if (unlikely((int32_t)res != res)) {
b5f1aa64 136 arith_excp(EXC_M_IOV, 0);
4c9649a9 137 }
04acd307 138 return (int64_t)((int32_t)res);
4c9649a9
JM
139}
140
04acd307 141uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
4c9649a9 142{
e14fe0a9
JM
143 uint64_t tl, th;
144
04acd307 145 muls64(&tl, &th, op1, op2);
e14fe0a9
JM
146 /* If th != 0 && th != -1, then we had an overflow */
147 if (unlikely((th + 1) > 1)) {
b5f1aa64 148 arith_excp(EXC_M_IOV, 0);
4c9649a9 149 }
04acd307
AJ
150 return tl;
151}
152
153uint64_t helper_umulh (uint64_t op1, uint64_t op2)
154{
155 uint64_t tl, th;
156
157 mulu64(&tl, &th, op1, op2);
158 return th;
4c9649a9
JM
159}
160
ae8ecd42 161uint64_t helper_ctpop (uint64_t arg)
4c9649a9 162{
ae8ecd42 163 return ctpop64(arg);
4c9649a9
JM
164}
165
ae8ecd42 166uint64_t helper_ctlz (uint64_t arg)
4c9649a9 167{
ae8ecd42 168 return clz64(arg);
4c9649a9
JM
169}
170
ae8ecd42 171uint64_t helper_cttz (uint64_t arg)
4c9649a9 172{
ae8ecd42 173 return ctz64(arg);
4c9649a9
JM
174}
175
636aa200 176static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
4c9649a9
JM
177{
178 uint64_t mask;
179
180 mask = 0;
181 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
182 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
183 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
184 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
185 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
186 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
187 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
188 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
189
190 return op & ~mask;
191}
192
b3249f63 193uint64_t helper_zap(uint64_t val, uint64_t mask)
4c9649a9 194{
b3249f63 195 return byte_zap(val, mask);
4c9649a9
JM
196}
197
b3249f63 198uint64_t helper_zapnot(uint64_t val, uint64_t mask)
4c9649a9 199{
b3249f63 200 return byte_zap(val, ~mask);
4c9649a9
JM
201}
202
04acd307 203uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
4c9649a9
JM
204{
205 uint8_t opa, opb, res;
206 int i;
207
208 res = 0;
970d622e 209 for (i = 0; i < 8; i++) {
04acd307
AJ
210 opa = op1 >> (i * 8);
211 opb = op2 >> (i * 8);
4c9649a9
JM
212 if (opa >= opb)
213 res |= 1 << i;
214 }
04acd307 215 return res;
4c9649a9
JM
216}
217
13e4df99
RH
218uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
219{
220 uint64_t res = 0;
221 uint8_t opa, opb, opr;
222 int i;
223
224 for (i = 0; i < 8; ++i) {
225 opa = op1 >> (i * 8);
226 opb = op2 >> (i * 8);
227 opr = opa < opb ? opa : opb;
228 res |= (uint64_t)opr << (i * 8);
229 }
230 return res;
231}
232
233uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
234{
235 uint64_t res = 0;
236 int8_t opa, opb;
237 uint8_t opr;
238 int i;
239
240 for (i = 0; i < 8; ++i) {
241 opa = op1 >> (i * 8);
242 opb = op2 >> (i * 8);
243 opr = opa < opb ? opa : opb;
244 res |= (uint64_t)opr << (i * 8);
245 }
246 return res;
247}
248
249uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
250{
251 uint64_t res = 0;
252 uint16_t opa, opb, opr;
253 int i;
254
255 for (i = 0; i < 4; ++i) {
256 opa = op1 >> (i * 16);
257 opb = op2 >> (i * 16);
258 opr = opa < opb ? opa : opb;
259 res |= (uint64_t)opr << (i * 16);
260 }
261 return res;
262}
263
264uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
265{
266 uint64_t res = 0;
267 int16_t opa, opb;
268 uint16_t opr;
269 int i;
270
271 for (i = 0; i < 4; ++i) {
272 opa = op1 >> (i * 16);
273 opb = op2 >> (i * 16);
274 opr = opa < opb ? opa : opb;
275 res |= (uint64_t)opr << (i * 16);
276 }
277 return res;
278}
279
280uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
281{
282 uint64_t res = 0;
283 uint8_t opa, opb, opr;
284 int i;
285
286 for (i = 0; i < 8; ++i) {
287 opa = op1 >> (i * 8);
288 opb = op2 >> (i * 8);
289 opr = opa > opb ? opa : opb;
290 res |= (uint64_t)opr << (i * 8);
291 }
292 return res;
293}
294
295uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
296{
297 uint64_t res = 0;
298 int8_t opa, opb;
299 uint8_t opr;
300 int i;
301
302 for (i = 0; i < 8; ++i) {
303 opa = op1 >> (i * 8);
304 opb = op2 >> (i * 8);
305 opr = opa > opb ? opa : opb;
306 res |= (uint64_t)opr << (i * 8);
307 }
308 return res;
309}
310
311uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
312{
313 uint64_t res = 0;
314 uint16_t opa, opb, opr;
315 int i;
316
317 for (i = 0; i < 4; ++i) {
318 opa = op1 >> (i * 16);
319 opb = op2 >> (i * 16);
320 opr = opa > opb ? opa : opb;
321 res |= (uint64_t)opr << (i * 16);
322 }
323 return res;
324}
325
326uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
327{
328 uint64_t res = 0;
329 int16_t opa, opb;
330 uint16_t opr;
331 int i;
332
333 for (i = 0; i < 4; ++i) {
334 opa = op1 >> (i * 16);
335 opb = op2 >> (i * 16);
336 opr = opa > opb ? opa : opb;
337 res |= (uint64_t)opr << (i * 16);
338 }
339 return res;
340}
341
342uint64_t helper_perr (uint64_t op1, uint64_t op2)
343{
344 uint64_t res = 0;
345 uint8_t opa, opb, opr;
346 int i;
347
348 for (i = 0; i < 8; ++i) {
349 opa = op1 >> (i * 8);
350 opb = op2 >> (i * 8);
351 if (opa >= opb)
352 opr = opa - opb;
353 else
354 opr = opb - opa;
355 res += opr;
356 }
357 return res;
358}
359
360uint64_t helper_pklb (uint64_t op1)
361{
362 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
363}
364
365uint64_t helper_pkwb (uint64_t op1)
366{
367 return ((op1 & 0xff)
368 | ((op1 >> 8) & 0xff00)
369 | ((op1 >> 16) & 0xff0000)
370 | ((op1 >> 24) & 0xff000000));
371}
372
373uint64_t helper_unpkbl (uint64_t op1)
374{
375 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
376}
377
378uint64_t helper_unpkbw (uint64_t op1)
379{
380 return ((op1 & 0xff)
381 | ((op1 & 0xff00) << 8)
382 | ((op1 & 0xff0000) << 16)
383 | ((op1 & 0xff000000) << 24));
384}
385
f18cd223
AJ
386/* Floating point helpers */
387
f24518b5
RH
388void helper_setroundmode (uint32_t val)
389{
390 set_float_rounding_mode(val, &FP_STATUS);
391}
392
393void helper_setflushzero (uint32_t val)
394{
395 set_flush_to_zero(val, &FP_STATUS);
396}
397
398void helper_fp_exc_clear (void)
399{
400 set_float_exception_flags(0, &FP_STATUS);
401}
402
403uint32_t helper_fp_exc_get (void)
404{
405 return get_float_exception_flags(&FP_STATUS);
406}
407
408/* Raise exceptions for ieee fp insns without software completion.
409 In that case there are no exceptions that don't trap; the mask
410 doesn't apply. */
411void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
412{
413 if (exc) {
414 uint32_t hw_exc = 0;
415
f24518b5
RH
416 if (exc & float_flag_invalid) {
417 hw_exc |= EXC_M_INV;
418 }
419 if (exc & float_flag_divbyzero) {
420 hw_exc |= EXC_M_DZE;
421 }
422 if (exc & float_flag_overflow) {
423 hw_exc |= EXC_M_FOV;
424 }
425 if (exc & float_flag_underflow) {
426 hw_exc |= EXC_M_UNF;
427 }
428 if (exc & float_flag_inexact) {
429 hw_exc |= EXC_M_INE;
430 }
b5f1aa64
RH
431
432 arith_excp(hw_exc, 1ull << regno);
f24518b5
RH
433 }
434}
435
436/* Raise exceptions for ieee fp insns with software completion. */
437void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
438{
439 if (exc) {
440 env->fpcr_exc_status |= exc;
441
442 exc &= ~env->fpcr_exc_mask;
443 if (exc) {
444 helper_fp_exc_raise(exc, regno);
445 }
446 }
447}
448
449/* Input remapping without software completion. Handle denormal-map-to-zero
450 and trap for all other non-finite numbers. */
451uint64_t helper_ieee_input(uint64_t val)
452{
453 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
454 uint64_t frac = val & 0xfffffffffffffull;
455
456 if (exp == 0) {
457 if (frac != 0) {
458 /* If DNZ is set flush denormals to zero on input. */
459 if (env->fpcr_dnz) {
460 val &= 1ull << 63;
461 } else {
b5f1aa64 462 arith_excp(EXC_M_UNF, 0);
f24518b5
RH
463 }
464 }
465 } else if (exp == 0x7ff) {
466 /* Infinity or NaN. */
467 /* ??? I'm not sure these exception bit flags are correct. I do
468 know that the Linux kernel, at least, doesn't rely on them and
469 just emulates the insn to figure out what exception to use. */
b5f1aa64 470 arith_excp(frac ? EXC_M_INV : EXC_M_FOV, 0);
f24518b5
RH
471 }
472 return val;
473}
474
475/* Similar, but does not trap for infinities. Used for comparisons. */
476uint64_t helper_ieee_input_cmp(uint64_t val)
477{
478 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
479 uint64_t frac = val & 0xfffffffffffffull;
480
481 if (exp == 0) {
482 if (frac != 0) {
483 /* If DNZ is set flush denormals to zero on input. */
484 if (env->fpcr_dnz) {
485 val &= 1ull << 63;
486 } else {
b5f1aa64 487 arith_excp(EXC_M_UNF, 0);
f24518b5
RH
488 }
489 }
490 } else if (exp == 0x7ff && frac) {
491 /* NaN. */
b5f1aa64 492 arith_excp(EXC_M_INV, 0);
f24518b5
RH
493 }
494 return val;
495}
496
497/* Input remapping with software completion enabled. All we have to do
498 is handle denormal-map-to-zero; all other inputs get exceptions as
499 needed from the actual operation. */
500uint64_t helper_ieee_input_s(uint64_t val)
501{
502 if (env->fpcr_dnz) {
503 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
504 if (exp == 0) {
505 val &= 1ull << 63;
506 }
507 }
508 return val;
509}
510
f18cd223 511/* F floating (VAX) */
636aa200 512static inline uint64_t float32_to_f(float32 fa)
4c9649a9 513{
f18cd223 514 uint64_t r, exp, mant, sig;
e2eb2798 515 CPU_FloatU a;
f18cd223 516
e2eb2798
AJ
517 a.f = fa;
518 sig = ((uint64_t)a.l & 0x80000000) << 32;
519 exp = (a.l >> 23) & 0xff;
520 mant = ((uint64_t)a.l & 0x007fffff) << 29;
f18cd223
AJ
521
522 if (exp == 255) {
523 /* NaN or infinity */
524 r = 1; /* VAX dirty zero */
525 } else if (exp == 0) {
526 if (mant == 0) {
527 /* Zero */
528 r = 0;
529 } else {
530 /* Denormalized */
531 r = sig | ((exp + 1) << 52) | mant;
532 }
533 } else {
534 if (exp >= 253) {
535 /* Overflow */
536 r = 1; /* VAX dirty zero */
537 } else {
538 r = sig | ((exp + 2) << 52);
539 }
540 }
541
542 return r;
4c9649a9
JM
543}
544
636aa200 545static inline float32 f_to_float32(uint64_t a)
4c9649a9 546{
e2eb2798
AJ
547 uint32_t exp, mant_sig;
548 CPU_FloatU r;
f18cd223
AJ
549
550 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
551 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
552
553 if (unlikely(!exp && mant_sig)) {
554 /* Reserved operands / Dirty zero */
2d9671d3 555 dynamic_excp(EXCP_OPCDEC, 0);
f18cd223
AJ
556 }
557
558 if (exp < 3) {
559 /* Underflow */
e2eb2798 560 r.l = 0;
f18cd223 561 } else {
e2eb2798 562 r.l = ((exp - 2) << 23) | mant_sig;
f18cd223
AJ
563 }
564
e2eb2798 565 return r.f;
4c9649a9
JM
566}
567
f18cd223 568uint32_t helper_f_to_memory (uint64_t a)
4c9649a9 569{
f18cd223
AJ
570 uint32_t r;
571 r = (a & 0x00001fffe0000000ull) >> 13;
572 r |= (a & 0x07ffe00000000000ull) >> 45;
573 r |= (a & 0xc000000000000000ull) >> 48;
574 return r;
575}
4c9649a9 576
f18cd223
AJ
577uint64_t helper_memory_to_f (uint32_t a)
578{
579 uint64_t r;
580 r = ((uint64_t)(a & 0x0000c000)) << 48;
581 r |= ((uint64_t)(a & 0x003fffff)) << 45;
582 r |= ((uint64_t)(a & 0xffff0000)) << 13;
583 if (!(a & 0x00004000))
584 r |= 0x7ll << 59;
585 return r;
4c9649a9
JM
586}
587
f24518b5
RH
588/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
589 either implement VAX arithmetic properly or just signal invalid opcode. */
590
f18cd223 591uint64_t helper_addf (uint64_t a, uint64_t b)
4c9649a9 592{
f18cd223 593 float32 fa, fb, fr;
4c9649a9 594
f18cd223
AJ
595 fa = f_to_float32(a);
596 fb = f_to_float32(b);
597 fr = float32_add(fa, fb, &FP_STATUS);
598 return float32_to_f(fr);
4c9649a9
JM
599}
600
f18cd223 601uint64_t helper_subf (uint64_t a, uint64_t b)
4c9649a9 602{
f18cd223 603 float32 fa, fb, fr;
4c9649a9 604
f18cd223
AJ
605 fa = f_to_float32(a);
606 fb = f_to_float32(b);
607 fr = float32_sub(fa, fb, &FP_STATUS);
608 return float32_to_f(fr);
4c9649a9
JM
609}
610
f18cd223 611uint64_t helper_mulf (uint64_t a, uint64_t b)
4c9649a9 612{
f18cd223 613 float32 fa, fb, fr;
4c9649a9 614
f18cd223
AJ
615 fa = f_to_float32(a);
616 fb = f_to_float32(b);
617 fr = float32_mul(fa, fb, &FP_STATUS);
618 return float32_to_f(fr);
4c9649a9
JM
619}
620
f18cd223 621uint64_t helper_divf (uint64_t a, uint64_t b)
4c9649a9 622{
f18cd223 623 float32 fa, fb, fr;
4c9649a9 624
f18cd223
AJ
625 fa = f_to_float32(a);
626 fb = f_to_float32(b);
627 fr = float32_div(fa, fb, &FP_STATUS);
628 return float32_to_f(fr);
4c9649a9
JM
629}
630
f18cd223 631uint64_t helper_sqrtf (uint64_t t)
4c9649a9 632{
f18cd223
AJ
633 float32 ft, fr;
634
635 ft = f_to_float32(t);
636 fr = float32_sqrt(ft, &FP_STATUS);
637 return float32_to_f(fr);
4c9649a9
JM
638}
639
f18cd223
AJ
640
641/* G floating (VAX) */
636aa200 642static inline uint64_t float64_to_g(float64 fa)
4c9649a9 643{
e2eb2798
AJ
644 uint64_t r, exp, mant, sig;
645 CPU_DoubleU a;
4c9649a9 646
e2eb2798
AJ
647 a.d = fa;
648 sig = a.ll & 0x8000000000000000ull;
649 exp = (a.ll >> 52) & 0x7ff;
650 mant = a.ll & 0x000fffffffffffffull;
f18cd223
AJ
651
652 if (exp == 2047) {
653 /* NaN or infinity */
654 r = 1; /* VAX dirty zero */
655 } else if (exp == 0) {
656 if (mant == 0) {
657 /* Zero */
658 r = 0;
659 } else {
660 /* Denormalized */
661 r = sig | ((exp + 1) << 52) | mant;
662 }
663 } else {
664 if (exp >= 2045) {
665 /* Overflow */
666 r = 1; /* VAX dirty zero */
667 } else {
668 r = sig | ((exp + 2) << 52);
669 }
670 }
671
672 return r;
4c9649a9
JM
673}
674
636aa200 675static inline float64 g_to_float64(uint64_t a)
4c9649a9 676{
e2eb2798
AJ
677 uint64_t exp, mant_sig;
678 CPU_DoubleU r;
f18cd223
AJ
679
680 exp = (a >> 52) & 0x7ff;
681 mant_sig = a & 0x800fffffffffffffull;
682
683 if (!exp && mant_sig) {
684 /* Reserved operands / Dirty zero */
2d9671d3 685 dynamic_excp(EXCP_OPCDEC, 0);
f18cd223 686 }
4c9649a9 687
f18cd223
AJ
688 if (exp < 3) {
689 /* Underflow */
e2eb2798 690 r.ll = 0;
f18cd223 691 } else {
e2eb2798 692 r.ll = ((exp - 2) << 52) | mant_sig;
f18cd223
AJ
693 }
694
e2eb2798 695 return r.d;
4c9649a9
JM
696}
697
f18cd223 698uint64_t helper_g_to_memory (uint64_t a)
4c9649a9 699{
f18cd223
AJ
700 uint64_t r;
701 r = (a & 0x000000000000ffffull) << 48;
702 r |= (a & 0x00000000ffff0000ull) << 16;
703 r |= (a & 0x0000ffff00000000ull) >> 16;
704 r |= (a & 0xffff000000000000ull) >> 48;
705 return r;
706}
4c9649a9 707
f18cd223
AJ
708uint64_t helper_memory_to_g (uint64_t a)
709{
710 uint64_t r;
711 r = (a & 0x000000000000ffffull) << 48;
712 r |= (a & 0x00000000ffff0000ull) << 16;
713 r |= (a & 0x0000ffff00000000ull) >> 16;
714 r |= (a & 0xffff000000000000ull) >> 48;
715 return r;
4c9649a9
JM
716}
717
f18cd223 718uint64_t helper_addg (uint64_t a, uint64_t b)
4c9649a9 719{
f18cd223 720 float64 fa, fb, fr;
4c9649a9 721
f18cd223
AJ
722 fa = g_to_float64(a);
723 fb = g_to_float64(b);
724 fr = float64_add(fa, fb, &FP_STATUS);
725 return float64_to_g(fr);
4c9649a9
JM
726}
727
f18cd223 728uint64_t helper_subg (uint64_t a, uint64_t b)
4c9649a9 729{
f18cd223 730 float64 fa, fb, fr;
4c9649a9 731
f18cd223
AJ
732 fa = g_to_float64(a);
733 fb = g_to_float64(b);
734 fr = float64_sub(fa, fb, &FP_STATUS);
735 return float64_to_g(fr);
4c9649a9
JM
736}
737
f18cd223 738uint64_t helper_mulg (uint64_t a, uint64_t b)
4c9649a9 739{
f18cd223 740 float64 fa, fb, fr;
4c9649a9 741
f18cd223
AJ
742 fa = g_to_float64(a);
743 fb = g_to_float64(b);
744 fr = float64_mul(fa, fb, &FP_STATUS);
745 return float64_to_g(fr);
4c9649a9
JM
746}
747
f18cd223 748uint64_t helper_divg (uint64_t a, uint64_t b)
4c9649a9 749{
f18cd223 750 float64 fa, fb, fr;
4c9649a9 751
f18cd223
AJ
752 fa = g_to_float64(a);
753 fb = g_to_float64(b);
754 fr = float64_div(fa, fb, &FP_STATUS);
755 return float64_to_g(fr);
756}
757
758uint64_t helper_sqrtg (uint64_t a)
759{
760 float64 fa, fr;
4c9649a9 761
f18cd223
AJ
762 fa = g_to_float64(a);
763 fr = float64_sqrt(fa, &FP_STATUS);
764 return float64_to_g(fr);
4c9649a9
JM
765}
766
f18cd223
AJ
767
768/* S floating (single) */
d0af5445
RH
769
770/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
771static inline uint64_t float32_to_s_int(uint32_t fi)
772{
773 uint32_t frac = fi & 0x7fffff;
774 uint32_t sign = fi >> 31;
775 uint32_t exp_msb = (fi >> 30) & 1;
776 uint32_t exp_low = (fi >> 23) & 0x7f;
777 uint32_t exp;
778
779 exp = (exp_msb << 10) | exp_low;
780 if (exp_msb) {
781 if (exp_low == 0x7f)
782 exp = 0x7ff;
783 } else {
784 if (exp_low != 0x00)
785 exp |= 0x380;
786 }
787
788 return (((uint64_t)sign << 63)
789 | ((uint64_t)exp << 52)
790 | ((uint64_t)frac << 29));
791}
792
636aa200 793static inline uint64_t float32_to_s(float32 fa)
4c9649a9 794{
e2eb2798 795 CPU_FloatU a;
e2eb2798 796 a.f = fa;
d0af5445
RH
797 return float32_to_s_int(a.l);
798}
4c9649a9 799
d0af5445
RH
800static inline uint32_t s_to_float32_int(uint64_t a)
801{
802 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
4c9649a9
JM
803}
804
636aa200 805static inline float32 s_to_float32(uint64_t a)
4c9649a9 806{
e2eb2798 807 CPU_FloatU r;
d0af5445 808 r.l = s_to_float32_int(a);
e2eb2798 809 return r.f;
f18cd223 810}
4c9649a9 811
f18cd223
AJ
812uint32_t helper_s_to_memory (uint64_t a)
813{
d0af5445 814 return s_to_float32_int(a);
f18cd223 815}
4c9649a9 816
f18cd223
AJ
817uint64_t helper_memory_to_s (uint32_t a)
818{
d0af5445 819 return float32_to_s_int(a);
4c9649a9
JM
820}
821
f18cd223 822uint64_t helper_adds (uint64_t a, uint64_t b)
4c9649a9 823{
f18cd223 824 float32 fa, fb, fr;
4c9649a9 825
f18cd223
AJ
826 fa = s_to_float32(a);
827 fb = s_to_float32(b);
828 fr = float32_add(fa, fb, &FP_STATUS);
829 return float32_to_s(fr);
4c9649a9
JM
830}
831
f18cd223 832uint64_t helper_subs (uint64_t a, uint64_t b)
4c9649a9 833{
f18cd223 834 float32 fa, fb, fr;
4c9649a9 835
f18cd223
AJ
836 fa = s_to_float32(a);
837 fb = s_to_float32(b);
838 fr = float32_sub(fa, fb, &FP_STATUS);
839 return float32_to_s(fr);
4c9649a9
JM
840}
841
f18cd223 842uint64_t helper_muls (uint64_t a, uint64_t b)
4c9649a9 843{
f18cd223 844 float32 fa, fb, fr;
4c9649a9 845
f18cd223
AJ
846 fa = s_to_float32(a);
847 fb = s_to_float32(b);
848 fr = float32_mul(fa, fb, &FP_STATUS);
849 return float32_to_s(fr);
4c9649a9
JM
850}
851
f18cd223 852uint64_t helper_divs (uint64_t a, uint64_t b)
4c9649a9 853{
f18cd223 854 float32 fa, fb, fr;
4c9649a9 855
f18cd223
AJ
856 fa = s_to_float32(a);
857 fb = s_to_float32(b);
858 fr = float32_div(fa, fb, &FP_STATUS);
859 return float32_to_s(fr);
4c9649a9
JM
860}
861
f18cd223 862uint64_t helper_sqrts (uint64_t a)
4c9649a9 863{
f18cd223 864 float32 fa, fr;
4c9649a9 865
f18cd223
AJ
866 fa = s_to_float32(a);
867 fr = float32_sqrt(fa, &FP_STATUS);
868 return float32_to_s(fr);
4c9649a9
JM
869}
870
f18cd223
AJ
871
872/* T floating (double) */
636aa200 873static inline float64 t_to_float64(uint64_t a)
4c9649a9 874{
f18cd223 875 /* Memory format is the same as float64 */
e2eb2798
AJ
876 CPU_DoubleU r;
877 r.ll = a;
878 return r.d;
4c9649a9
JM
879}
880
636aa200 881static inline uint64_t float64_to_t(float64 fa)
4c9649a9 882{
f18cd223 883 /* Memory format is the same as float64 */
e2eb2798
AJ
884 CPU_DoubleU r;
885 r.d = fa;
886 return r.ll;
f18cd223 887}
4c9649a9 888
f18cd223
AJ
889uint64_t helper_addt (uint64_t a, uint64_t b)
890{
891 float64 fa, fb, fr;
4c9649a9 892
f18cd223
AJ
893 fa = t_to_float64(a);
894 fb = t_to_float64(b);
895 fr = float64_add(fa, fb, &FP_STATUS);
896 return float64_to_t(fr);
4c9649a9
JM
897}
898
f18cd223 899uint64_t helper_subt (uint64_t a, uint64_t b)
4c9649a9 900{
f18cd223 901 float64 fa, fb, fr;
4c9649a9 902
f18cd223
AJ
903 fa = t_to_float64(a);
904 fb = t_to_float64(b);
905 fr = float64_sub(fa, fb, &FP_STATUS);
906 return float64_to_t(fr);
4c9649a9
JM
907}
908
f18cd223 909uint64_t helper_mult (uint64_t a, uint64_t b)
4c9649a9 910{
f18cd223 911 float64 fa, fb, fr;
4c9649a9 912
f18cd223
AJ
913 fa = t_to_float64(a);
914 fb = t_to_float64(b);
915 fr = float64_mul(fa, fb, &FP_STATUS);
916 return float64_to_t(fr);
4c9649a9
JM
917}
918
f18cd223 919uint64_t helper_divt (uint64_t a, uint64_t b)
4c9649a9 920{
f18cd223 921 float64 fa, fb, fr;
4c9649a9 922
f18cd223
AJ
923 fa = t_to_float64(a);
924 fb = t_to_float64(b);
925 fr = float64_div(fa, fb, &FP_STATUS);
926 return float64_to_t(fr);
4c9649a9
JM
927}
928
f18cd223 929uint64_t helper_sqrtt (uint64_t a)
4c9649a9 930{
f18cd223 931 float64 fa, fr;
4c9649a9 932
f18cd223
AJ
933 fa = t_to_float64(a);
934 fr = float64_sqrt(fa, &FP_STATUS);
935 return float64_to_t(fr);
4c9649a9
JM
936}
937
f18cd223
AJ
938/* Comparisons */
939uint64_t helper_cmptun (uint64_t a, uint64_t b)
4c9649a9 940{
f18cd223 941 float64 fa, fb;
4c9649a9 942
f18cd223
AJ
943 fa = t_to_float64(a);
944 fb = t_to_float64(b);
945
a4d2d1a0 946 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
f18cd223 947 return 0x4000000000000000ULL;
a4d2d1a0 948 } else {
f18cd223 949 return 0;
a4d2d1a0 950 }
4c9649a9
JM
951}
952
f18cd223 953uint64_t helper_cmpteq(uint64_t a, uint64_t b)
4c9649a9 954{
f18cd223 955 float64 fa, fb;
4c9649a9 956
f18cd223
AJ
957 fa = t_to_float64(a);
958 fb = t_to_float64(b);
959
211315fb 960 if (float64_eq_quiet(fa, fb, &FP_STATUS))
f18cd223
AJ
961 return 0x4000000000000000ULL;
962 else
963 return 0;
4c9649a9
JM
964}
965
f18cd223 966uint64_t helper_cmptle(uint64_t a, uint64_t b)
4c9649a9 967{
f18cd223 968 float64 fa, fb;
4c9649a9 969
f18cd223
AJ
970 fa = t_to_float64(a);
971 fb = t_to_float64(b);
972
973 if (float64_le(fa, fb, &FP_STATUS))
974 return 0x4000000000000000ULL;
975 else
976 return 0;
4c9649a9
JM
977}
978
f18cd223 979uint64_t helper_cmptlt(uint64_t a, uint64_t b)
4c9649a9 980{
f18cd223 981 float64 fa, fb;
4c9649a9 982
f18cd223
AJ
983 fa = t_to_float64(a);
984 fb = t_to_float64(b);
985
986 if (float64_lt(fa, fb, &FP_STATUS))
987 return 0x4000000000000000ULL;
988 else
989 return 0;
4c9649a9
JM
990}
991
f18cd223 992uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
4c9649a9 993{
f18cd223 994 float64 fa, fb;
4c9649a9 995
f18cd223
AJ
996 fa = g_to_float64(a);
997 fb = g_to_float64(b);
998
211315fb 999 if (float64_eq_quiet(fa, fb, &FP_STATUS))
f18cd223
AJ
1000 return 0x4000000000000000ULL;
1001 else
1002 return 0;
4c9649a9
JM
1003}
1004
f18cd223 1005uint64_t helper_cmpgle(uint64_t a, uint64_t b)
4c9649a9 1006{
f18cd223
AJ
1007 float64 fa, fb;
1008
1009 fa = g_to_float64(a);
1010 fb = g_to_float64(b);
4c9649a9 1011
f18cd223
AJ
1012 if (float64_le(fa, fb, &FP_STATUS))
1013 return 0x4000000000000000ULL;
1014 else
1015 return 0;
4c9649a9
JM
1016}
1017
f18cd223 1018uint64_t helper_cmpglt(uint64_t a, uint64_t b)
4c9649a9 1019{
f18cd223
AJ
1020 float64 fa, fb;
1021
1022 fa = g_to_float64(a);
1023 fb = g_to_float64(b);
4c9649a9 1024
f18cd223
AJ
1025 if (float64_lt(fa, fb, &FP_STATUS))
1026 return 0x4000000000000000ULL;
1027 else
1028 return 0;
4c9649a9
JM
1029}
1030
f18cd223
AJ
1031/* Floating point format conversion */
1032uint64_t helper_cvtts (uint64_t a)
4c9649a9 1033{
f18cd223
AJ
1034 float64 fa;
1035 float32 fr;
4c9649a9 1036
f18cd223
AJ
1037 fa = t_to_float64(a);
1038 fr = float64_to_float32(fa, &FP_STATUS);
1039 return float32_to_s(fr);
4c9649a9
JM
1040}
1041
f18cd223 1042uint64_t helper_cvtst (uint64_t a)
4c9649a9 1043{
f18cd223
AJ
1044 float32 fa;
1045 float64 fr;
1046
1047 fa = s_to_float32(a);
1048 fr = float32_to_float64(fa, &FP_STATUS);
1049 return float64_to_t(fr);
4c9649a9
JM
1050}
1051
f18cd223 1052uint64_t helper_cvtqs (uint64_t a)
4c9649a9 1053{
f18cd223
AJ
1054 float32 fr = int64_to_float32(a, &FP_STATUS);
1055 return float32_to_s(fr);
4c9649a9
JM
1056}
1057
f24518b5
RH
1058/* Implement float64 to uint64 conversion without saturation -- we must
1059 supply the truncated result. This behaviour is used by the compiler
1060 to get unsigned conversion for free with the same instruction.
1061
1062 The VI flag is set when overflow or inexact exceptions should be raised. */
1063
1064static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
4c9649a9 1065{
f24518b5
RH
1066 uint64_t frac, ret = 0;
1067 uint32_t exp, sign, exc = 0;
1068 int shift;
1069
1070 sign = (a >> 63);
1071 exp = (uint32_t)(a >> 52) & 0x7ff;
1072 frac = a & 0xfffffffffffffull;
1073
1074 if (exp == 0) {
1075 if (unlikely(frac != 0)) {
1076 goto do_underflow;
1077 }
1078 } else if (exp == 0x7ff) {
1079 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1080 } else {
1081 /* Restore implicit bit. */
1082 frac |= 0x10000000000000ull;
1083
1084 shift = exp - 1023 - 52;
1085 if (shift >= 0) {
1086 /* In this case the number is so large that we must shift
1087 the fraction left. There is no rounding to do. */
1088 if (shift < 63) {
1089 ret = frac << shift;
1090 if (VI && (ret >> shift) != frac) {
1091 exc = float_flag_overflow;
1092 }
1093 }
1094 } else {
1095 uint64_t round;
1096
1097 /* In this case the number is smaller than the fraction as
1098 represented by the 52 bit number. Here we must think
1099 about rounding the result. Handle this by shifting the
1100 fractional part of the number into the high bits of ROUND.
1101 This will let us efficiently handle round-to-nearest. */
1102 shift = -shift;
1103 if (shift < 63) {
1104 ret = frac >> shift;
1105 round = frac << (64 - shift);
1106 } else {
1107 /* The exponent is so small we shift out everything.
1108 Leave a sticky bit for proper rounding below. */
1109 do_underflow:
1110 round = 1;
1111 }
1112
1113 if (round) {
1114 exc = (VI ? float_flag_inexact : 0);
1115 switch (roundmode) {
1116 case float_round_nearest_even:
1117 if (round == (1ull << 63)) {
1118 /* Fraction is exactly 0.5; round to even. */
1119 ret += (ret & 1);
1120 } else if (round > (1ull << 63)) {
1121 ret += 1;
1122 }
1123 break;
1124 case float_round_to_zero:
1125 break;
1126 case float_round_up:
1127 ret += 1 - sign;
1128 break;
1129 case float_round_down:
1130 ret += sign;
1131 break;
1132 }
1133 }
1134 }
1135 if (sign) {
1136 ret = -ret;
1137 }
1138 }
1139 if (unlikely(exc)) {
1140 float_raise(exc, &FP_STATUS);
1141 }
1142
1143 return ret;
1144}
1145
1146uint64_t helper_cvttq(uint64_t a)
1147{
1148 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1149}
1150
1151uint64_t helper_cvttq_c(uint64_t a)
1152{
1153 return helper_cvttq_internal(a, float_round_to_zero, 0);
1154}
1155
1156uint64_t helper_cvttq_svic(uint64_t a)
1157{
1158 return helper_cvttq_internal(a, float_round_to_zero, 1);
f18cd223 1159}
4c9649a9 1160
f18cd223
AJ
1161uint64_t helper_cvtqt (uint64_t a)
1162{
1163 float64 fr = int64_to_float64(a, &FP_STATUS);
1164 return float64_to_t(fr);
4c9649a9
JM
1165}
1166
f18cd223 1167uint64_t helper_cvtqf (uint64_t a)
4c9649a9 1168{
f18cd223
AJ
1169 float32 fr = int64_to_float32(a, &FP_STATUS);
1170 return float32_to_f(fr);
4c9649a9
JM
1171}
1172
f18cd223 1173uint64_t helper_cvtgf (uint64_t a)
4c9649a9 1174{
f18cd223
AJ
1175 float64 fa;
1176 float32 fr;
1177
1178 fa = g_to_float64(a);
1179 fr = float64_to_float32(fa, &FP_STATUS);
1180 return float32_to_f(fr);
4c9649a9
JM
1181}
1182
f18cd223 1183uint64_t helper_cvtgq (uint64_t a)
4c9649a9 1184{
f18cd223
AJ
1185 float64 fa = g_to_float64(a);
1186 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
4c9649a9
JM
1187}
1188
f18cd223 1189uint64_t helper_cvtqg (uint64_t a)
4c9649a9 1190{
f18cd223
AJ
1191 float64 fr;
1192 fr = int64_to_float64(a, &FP_STATUS);
1193 return float64_to_g(fr);
4c9649a9
JM
1194}
1195
8bb6e981 1196/* PALcode support special instructions */
4c9649a9 1197#if !defined (CONFIG_USER_ONLY)
8bb6e981
AJ
1198void helper_hw_ret (uint64_t a)
1199{
1200 env->pc = a & ~3;
ac316ca4 1201 env->intr_flag = 0;
6910b8f6 1202 env->lock_addr = -1;
21d2beaa
RH
1203 if ((a & 1) == 0) {
1204 env->pal_mode = 0;
1205 swap_shadow_regs(env);
1206 }
8bb6e981 1207}
3b4fefd6
RH
1208
1209void helper_tbia(void)
1210{
1211 tlb_flush(env, 1);
1212}
1213
1214void helper_tbis(uint64_t p)
1215{
1216 tlb_flush_page(env, p);
1217}
4c9649a9
JM
1218#endif
1219
1220/*****************************************************************************/
1221/* Softmmu support */
1222#if !defined (CONFIG_USER_ONLY)
2374e73e 1223uint64_t helper_ldl_phys(uint64_t p)
8bb6e981 1224{
2374e73e 1225 return (int32_t)ldl_phys(p);
8bb6e981
AJ
1226}
1227
2374e73e 1228uint64_t helper_ldq_phys(uint64_t p)
8bb6e981 1229{
2374e73e 1230 return ldq_phys(p);
8bb6e981
AJ
1231}
1232
2374e73e 1233uint64_t helper_ldl_l_phys(uint64_t p)
8bb6e981 1234{
2374e73e
RH
1235 env->lock_addr = p;
1236 return env->lock_value = (int32_t)ldl_phys(p);
8bb6e981
AJ
1237}
1238
2374e73e 1239uint64_t helper_ldq_l_phys(uint64_t p)
8bb6e981 1240{
2374e73e
RH
1241 env->lock_addr = p;
1242 return env->lock_value = ldl_phys(p);
8bb6e981
AJ
1243}
1244
2374e73e 1245void helper_stl_phys(uint64_t p, uint64_t v)
8bb6e981 1246{
2374e73e 1247 stl_phys(p, v);
8bb6e981
AJ
1248}
1249
2374e73e 1250void helper_stq_phys(uint64_t p, uint64_t v)
8bb6e981 1251{
2374e73e 1252 stq_phys(p, v);
8bb6e981
AJ
1253}
1254
2374e73e 1255uint64_t helper_stl_c_phys(uint64_t p, uint64_t v)
8bb6e981 1256{
2374e73e 1257 uint64_t ret = 0;
8bb6e981 1258
2374e73e
RH
1259 if (p == env->lock_addr) {
1260 int32_t old = ldl_phys(p);
1261 if (old == (int32_t)env->lock_value) {
1262 stl_phys(p, v);
1263 ret = 1;
1264 }
1265 }
1266 env->lock_addr = -1;
8bb6e981
AJ
1267
1268 return ret;
1269}
1270
2374e73e 1271uint64_t helper_stq_c_phys(uint64_t p, uint64_t v)
8bb6e981 1272{
2374e73e 1273 uint64_t ret = 0;
8bb6e981 1274
2374e73e
RH
1275 if (p == env->lock_addr) {
1276 uint64_t old = ldq_phys(p);
1277 if (old == env->lock_value) {
1278 stq_phys(p, v);
1279 ret = 1;
1280 }
1281 }
1282 env->lock_addr = -1;
8bb6e981
AJ
1283
1284 return ret;
4c9649a9
JM
1285}
1286
5b450407
RH
1287static void QEMU_NORETURN do_unaligned_access(target_ulong addr, int is_write,
1288 int is_user, void *retaddr)
1289{
1290 uint64_t pc;
1291 uint32_t insn;
1292
1293 do_restore_state(retaddr);
1294
1295 pc = env->pc;
1296 insn = ldl_code(pc);
1297
1298 env->trap_arg0 = addr;
1299 env->trap_arg1 = insn >> 26; /* opcode */
1300 env->trap_arg2 = (insn >> 21) & 31; /* dest regno */
1301 helper_excp(EXCP_UNALIGN, 0);
1302}
1303
b14ef7c9
BS
1304void QEMU_NORETURN cpu_unassigned_access(CPUState *env1,
1305 target_phys_addr_t addr, int is_write,
1306 int is_exec, int unused, int size)
5b450407 1307{
b14ef7c9 1308 env = env1;
5b450407
RH
1309 env->trap_arg0 = addr;
1310 env->trap_arg1 = is_write;
1311 dynamic_excp(EXCP_MCHK, 0);
1312}
1313
4c9649a9 1314#define MMUSUFFIX _mmu
5b450407 1315#define ALIGNED_ONLY
4c9649a9
JM
1316
1317#define SHIFT 0
1318#include "softmmu_template.h"
1319
1320#define SHIFT 1
1321#include "softmmu_template.h"
1322
1323#define SHIFT 2
1324#include "softmmu_template.h"
1325
1326#define SHIFT 3
1327#include "softmmu_template.h"
1328
1329/* try to fill the TLB and return an exception if error. If retaddr is
1330 NULL, it means that the function was called in C code (i.e. not
1331 from generated code or from helper.c) */
1332/* XXX: fix it to restore all registers */
6ebbf390 1333void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4c9649a9 1334{
4c9649a9 1335 CPUState *saved_env;
4c9649a9
JM
1336 int ret;
1337
1338 /* XXX: hack to restore env in all cases, even if not called from
1339 generated code */
1340 saved_env = env;
1341 env = cpu_single_env;
6ebbf390 1342 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2d9671d3
RH
1343 if (unlikely(ret != 0)) {
1344 do_restore_state(retaddr);
4c9649a9 1345 /* Exception index and error code are already set */
1162c041 1346 cpu_loop_exit(env);
4c9649a9
JM
1347 }
1348 env = saved_env;
1349}
4c9649a9 1350#endif