]> git.proxmox.com Git - qemu.git/blame - target-alpha/op_helper.c
usb-linux: Add support for buffering iso out usb packets
[qemu.git] / target-alpha / op_helper.c
CommitLineData
4c9649a9
JM
1/*
2 * Alpha emulation cpu micro-operations helpers for qemu.
5fafdf24 3 *
4c9649a9
JM
4 * Copyright (c) 2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
8167ee88 17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
4c9649a9
JM
18 */
19
20#include "exec.h"
603fccce 21#include "host-utils.h"
4c9649a9 22#include "softfloat.h"
a7812ae4 23#include "helper.h"
18f8e2c0 24#include "qemu-timer.h"
4c9649a9 25
4c9649a9
JM
26/*****************************************************************************/
27/* Exceptions processing helpers */
c2c789cf 28void QEMU_NORETURN helper_excp (int excp, int error)
4c9649a9
JM
29{
30 env->exception_index = excp;
31 env->error_code = error;
32 cpu_loop_exit();
33}
34
6ad02592 35uint64_t helper_load_pcc (void)
4c9649a9 36{
18f8e2c0
RH
37 /* ??? This isn't a timer for which we have any rate info. */
38 return (uint32_t)cpu_get_real_ticks();
4c9649a9
JM
39}
40
f18cd223 41uint64_t helper_load_fpcr (void)
4c9649a9 42{
ba0e276d 43 return cpu_alpha_load_fpcr (env);
4c9649a9
JM
44}
45
f18cd223 46void helper_store_fpcr (uint64_t val)
4c9649a9 47{
ba0e276d 48 cpu_alpha_store_fpcr (env, val);
4c9649a9
JM
49}
50
04acd307 51uint64_t helper_addqv (uint64_t op1, uint64_t op2)
4c9649a9 52{
04acd307
AJ
53 uint64_t tmp = op1;
54 op1 += op2;
55 if (unlikely((tmp ^ op2 ^ (-1ULL)) & (tmp ^ op1) & (1ULL << 63))) {
866be65d 56 helper_excp(EXCP_ARITH, EXC_M_IOV);
4c9649a9 57 }
04acd307 58 return op1;
4c9649a9
JM
59}
60
04acd307 61uint64_t helper_addlv (uint64_t op1, uint64_t op2)
4c9649a9 62{
04acd307
AJ
63 uint64_t tmp = op1;
64 op1 = (uint32_t)(op1 + op2);
65 if (unlikely((tmp ^ op2 ^ (-1UL)) & (tmp ^ op1) & (1UL << 31))) {
866be65d 66 helper_excp(EXCP_ARITH, EXC_M_IOV);
4c9649a9 67 }
04acd307 68 return op1;
4c9649a9
JM
69}
70
04acd307 71uint64_t helper_subqv (uint64_t op1, uint64_t op2)
4c9649a9 72{
ecbb5ea1
AJ
73 uint64_t res;
74 res = op1 - op2;
75 if (unlikely((op1 ^ op2) & (res ^ op1) & (1ULL << 63))) {
866be65d 76 helper_excp(EXCP_ARITH, EXC_M_IOV);
4c9649a9 77 }
ecbb5ea1 78 return res;
4c9649a9
JM
79}
80
04acd307 81uint64_t helper_sublv (uint64_t op1, uint64_t op2)
4c9649a9 82{
ecbb5ea1
AJ
83 uint32_t res;
84 res = op1 - op2;
85 if (unlikely((op1 ^ op2) & (res ^ op1) & (1UL << 31))) {
866be65d 86 helper_excp(EXCP_ARITH, EXC_M_IOV);
4c9649a9 87 }
ecbb5ea1 88 return res;
4c9649a9
JM
89}
90
04acd307 91uint64_t helper_mullv (uint64_t op1, uint64_t op2)
4c9649a9 92{
04acd307 93 int64_t res = (int64_t)op1 * (int64_t)op2;
4c9649a9
JM
94
95 if (unlikely((int32_t)res != res)) {
866be65d 96 helper_excp(EXCP_ARITH, EXC_M_IOV);
4c9649a9 97 }
04acd307 98 return (int64_t)((int32_t)res);
4c9649a9
JM
99}
100
04acd307 101uint64_t helper_mulqv (uint64_t op1, uint64_t op2)
4c9649a9 102{
e14fe0a9
JM
103 uint64_t tl, th;
104
04acd307 105 muls64(&tl, &th, op1, op2);
e14fe0a9
JM
106 /* If th != 0 && th != -1, then we had an overflow */
107 if (unlikely((th + 1) > 1)) {
866be65d 108 helper_excp(EXCP_ARITH, EXC_M_IOV);
4c9649a9 109 }
04acd307
AJ
110 return tl;
111}
112
113uint64_t helper_umulh (uint64_t op1, uint64_t op2)
114{
115 uint64_t tl, th;
116
117 mulu64(&tl, &th, op1, op2);
118 return th;
4c9649a9
JM
119}
120
ae8ecd42 121uint64_t helper_ctpop (uint64_t arg)
4c9649a9 122{
ae8ecd42 123 return ctpop64(arg);
4c9649a9
JM
124}
125
ae8ecd42 126uint64_t helper_ctlz (uint64_t arg)
4c9649a9 127{
ae8ecd42 128 return clz64(arg);
4c9649a9
JM
129}
130
ae8ecd42 131uint64_t helper_cttz (uint64_t arg)
4c9649a9 132{
ae8ecd42 133 return ctz64(arg);
4c9649a9
JM
134}
135
636aa200 136static inline uint64_t byte_zap(uint64_t op, uint8_t mskb)
4c9649a9
JM
137{
138 uint64_t mask;
139
140 mask = 0;
141 mask |= ((mskb >> 0) & 1) * 0x00000000000000FFULL;
142 mask |= ((mskb >> 1) & 1) * 0x000000000000FF00ULL;
143 mask |= ((mskb >> 2) & 1) * 0x0000000000FF0000ULL;
144 mask |= ((mskb >> 3) & 1) * 0x00000000FF000000ULL;
145 mask |= ((mskb >> 4) & 1) * 0x000000FF00000000ULL;
146 mask |= ((mskb >> 5) & 1) * 0x0000FF0000000000ULL;
147 mask |= ((mskb >> 6) & 1) * 0x00FF000000000000ULL;
148 mask |= ((mskb >> 7) & 1) * 0xFF00000000000000ULL;
149
150 return op & ~mask;
151}
152
b3249f63 153uint64_t helper_zap(uint64_t val, uint64_t mask)
4c9649a9 154{
b3249f63 155 return byte_zap(val, mask);
4c9649a9
JM
156}
157
b3249f63 158uint64_t helper_zapnot(uint64_t val, uint64_t mask)
4c9649a9 159{
b3249f63 160 return byte_zap(val, ~mask);
4c9649a9
JM
161}
162
04acd307 163uint64_t helper_cmpbge (uint64_t op1, uint64_t op2)
4c9649a9
JM
164{
165 uint8_t opa, opb, res;
166 int i;
167
168 res = 0;
970d622e 169 for (i = 0; i < 8; i++) {
04acd307
AJ
170 opa = op1 >> (i * 8);
171 opb = op2 >> (i * 8);
4c9649a9
JM
172 if (opa >= opb)
173 res |= 1 << i;
174 }
04acd307 175 return res;
4c9649a9
JM
176}
177
13e4df99
RH
178uint64_t helper_minub8 (uint64_t op1, uint64_t op2)
179{
180 uint64_t res = 0;
181 uint8_t opa, opb, opr;
182 int i;
183
184 for (i = 0; i < 8; ++i) {
185 opa = op1 >> (i * 8);
186 opb = op2 >> (i * 8);
187 opr = opa < opb ? opa : opb;
188 res |= (uint64_t)opr << (i * 8);
189 }
190 return res;
191}
192
193uint64_t helper_minsb8 (uint64_t op1, uint64_t op2)
194{
195 uint64_t res = 0;
196 int8_t opa, opb;
197 uint8_t opr;
198 int i;
199
200 for (i = 0; i < 8; ++i) {
201 opa = op1 >> (i * 8);
202 opb = op2 >> (i * 8);
203 opr = opa < opb ? opa : opb;
204 res |= (uint64_t)opr << (i * 8);
205 }
206 return res;
207}
208
209uint64_t helper_minuw4 (uint64_t op1, uint64_t op2)
210{
211 uint64_t res = 0;
212 uint16_t opa, opb, opr;
213 int i;
214
215 for (i = 0; i < 4; ++i) {
216 opa = op1 >> (i * 16);
217 opb = op2 >> (i * 16);
218 opr = opa < opb ? opa : opb;
219 res |= (uint64_t)opr << (i * 16);
220 }
221 return res;
222}
223
224uint64_t helper_minsw4 (uint64_t op1, uint64_t op2)
225{
226 uint64_t res = 0;
227 int16_t opa, opb;
228 uint16_t opr;
229 int i;
230
231 for (i = 0; i < 4; ++i) {
232 opa = op1 >> (i * 16);
233 opb = op2 >> (i * 16);
234 opr = opa < opb ? opa : opb;
235 res |= (uint64_t)opr << (i * 16);
236 }
237 return res;
238}
239
240uint64_t helper_maxub8 (uint64_t op1, uint64_t op2)
241{
242 uint64_t res = 0;
243 uint8_t opa, opb, opr;
244 int i;
245
246 for (i = 0; i < 8; ++i) {
247 opa = op1 >> (i * 8);
248 opb = op2 >> (i * 8);
249 opr = opa > opb ? opa : opb;
250 res |= (uint64_t)opr << (i * 8);
251 }
252 return res;
253}
254
255uint64_t helper_maxsb8 (uint64_t op1, uint64_t op2)
256{
257 uint64_t res = 0;
258 int8_t opa, opb;
259 uint8_t opr;
260 int i;
261
262 for (i = 0; i < 8; ++i) {
263 opa = op1 >> (i * 8);
264 opb = op2 >> (i * 8);
265 opr = opa > opb ? opa : opb;
266 res |= (uint64_t)opr << (i * 8);
267 }
268 return res;
269}
270
271uint64_t helper_maxuw4 (uint64_t op1, uint64_t op2)
272{
273 uint64_t res = 0;
274 uint16_t opa, opb, opr;
275 int i;
276
277 for (i = 0; i < 4; ++i) {
278 opa = op1 >> (i * 16);
279 opb = op2 >> (i * 16);
280 opr = opa > opb ? opa : opb;
281 res |= (uint64_t)opr << (i * 16);
282 }
283 return res;
284}
285
286uint64_t helper_maxsw4 (uint64_t op1, uint64_t op2)
287{
288 uint64_t res = 0;
289 int16_t opa, opb;
290 uint16_t opr;
291 int i;
292
293 for (i = 0; i < 4; ++i) {
294 opa = op1 >> (i * 16);
295 opb = op2 >> (i * 16);
296 opr = opa > opb ? opa : opb;
297 res |= (uint64_t)opr << (i * 16);
298 }
299 return res;
300}
301
302uint64_t helper_perr (uint64_t op1, uint64_t op2)
303{
304 uint64_t res = 0;
305 uint8_t opa, opb, opr;
306 int i;
307
308 for (i = 0; i < 8; ++i) {
309 opa = op1 >> (i * 8);
310 opb = op2 >> (i * 8);
311 if (opa >= opb)
312 opr = opa - opb;
313 else
314 opr = opb - opa;
315 res += opr;
316 }
317 return res;
318}
319
320uint64_t helper_pklb (uint64_t op1)
321{
322 return (op1 & 0xff) | ((op1 >> 24) & 0xff00);
323}
324
325uint64_t helper_pkwb (uint64_t op1)
326{
327 return ((op1 & 0xff)
328 | ((op1 >> 8) & 0xff00)
329 | ((op1 >> 16) & 0xff0000)
330 | ((op1 >> 24) & 0xff000000));
331}
332
333uint64_t helper_unpkbl (uint64_t op1)
334{
335 return (op1 & 0xff) | ((op1 & 0xff00) << 24);
336}
337
338uint64_t helper_unpkbw (uint64_t op1)
339{
340 return ((op1 & 0xff)
341 | ((op1 & 0xff00) << 8)
342 | ((op1 & 0xff0000) << 16)
343 | ((op1 & 0xff000000) << 24));
344}
345
f18cd223
AJ
346/* Floating point helpers */
347
f24518b5
RH
348void helper_setroundmode (uint32_t val)
349{
350 set_float_rounding_mode(val, &FP_STATUS);
351}
352
353void helper_setflushzero (uint32_t val)
354{
355 set_flush_to_zero(val, &FP_STATUS);
356}
357
358void helper_fp_exc_clear (void)
359{
360 set_float_exception_flags(0, &FP_STATUS);
361}
362
363uint32_t helper_fp_exc_get (void)
364{
365 return get_float_exception_flags(&FP_STATUS);
366}
367
368/* Raise exceptions for ieee fp insns without software completion.
369 In that case there are no exceptions that don't trap; the mask
370 doesn't apply. */
371void helper_fp_exc_raise(uint32_t exc, uint32_t regno)
372{
373 if (exc) {
374 uint32_t hw_exc = 0;
375
376 env->ipr[IPR_EXC_MASK] |= 1ull << regno;
377
378 if (exc & float_flag_invalid) {
379 hw_exc |= EXC_M_INV;
380 }
381 if (exc & float_flag_divbyzero) {
382 hw_exc |= EXC_M_DZE;
383 }
384 if (exc & float_flag_overflow) {
385 hw_exc |= EXC_M_FOV;
386 }
387 if (exc & float_flag_underflow) {
388 hw_exc |= EXC_M_UNF;
389 }
390 if (exc & float_flag_inexact) {
391 hw_exc |= EXC_M_INE;
392 }
393 helper_excp(EXCP_ARITH, hw_exc);
394 }
395}
396
397/* Raise exceptions for ieee fp insns with software completion. */
398void helper_fp_exc_raise_s(uint32_t exc, uint32_t regno)
399{
400 if (exc) {
401 env->fpcr_exc_status |= exc;
402
403 exc &= ~env->fpcr_exc_mask;
404 if (exc) {
405 helper_fp_exc_raise(exc, regno);
406 }
407 }
408}
409
410/* Input remapping without software completion. Handle denormal-map-to-zero
411 and trap for all other non-finite numbers. */
412uint64_t helper_ieee_input(uint64_t val)
413{
414 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
415 uint64_t frac = val & 0xfffffffffffffull;
416
417 if (exp == 0) {
418 if (frac != 0) {
419 /* If DNZ is set flush denormals to zero on input. */
420 if (env->fpcr_dnz) {
421 val &= 1ull << 63;
422 } else {
423 helper_excp(EXCP_ARITH, EXC_M_UNF);
424 }
425 }
426 } else if (exp == 0x7ff) {
427 /* Infinity or NaN. */
428 /* ??? I'm not sure these exception bit flags are correct. I do
429 know that the Linux kernel, at least, doesn't rely on them and
430 just emulates the insn to figure out what exception to use. */
431 helper_excp(EXCP_ARITH, frac ? EXC_M_INV : EXC_M_FOV);
432 }
433 return val;
434}
435
436/* Similar, but does not trap for infinities. Used for comparisons. */
437uint64_t helper_ieee_input_cmp(uint64_t val)
438{
439 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
440 uint64_t frac = val & 0xfffffffffffffull;
441
442 if (exp == 0) {
443 if (frac != 0) {
444 /* If DNZ is set flush denormals to zero on input. */
445 if (env->fpcr_dnz) {
446 val &= 1ull << 63;
447 } else {
448 helper_excp(EXCP_ARITH, EXC_M_UNF);
449 }
450 }
451 } else if (exp == 0x7ff && frac) {
452 /* NaN. */
453 helper_excp(EXCP_ARITH, EXC_M_INV);
454 }
455 return val;
456}
457
458/* Input remapping with software completion enabled. All we have to do
459 is handle denormal-map-to-zero; all other inputs get exceptions as
460 needed from the actual operation. */
461uint64_t helper_ieee_input_s(uint64_t val)
462{
463 if (env->fpcr_dnz) {
464 uint32_t exp = (uint32_t)(val >> 52) & 0x7ff;
465 if (exp == 0) {
466 val &= 1ull << 63;
467 }
468 }
469 return val;
470}
471
f18cd223 472/* F floating (VAX) */
636aa200 473static inline uint64_t float32_to_f(float32 fa)
4c9649a9 474{
f18cd223 475 uint64_t r, exp, mant, sig;
e2eb2798 476 CPU_FloatU a;
f18cd223 477
e2eb2798
AJ
478 a.f = fa;
479 sig = ((uint64_t)a.l & 0x80000000) << 32;
480 exp = (a.l >> 23) & 0xff;
481 mant = ((uint64_t)a.l & 0x007fffff) << 29;
f18cd223
AJ
482
483 if (exp == 255) {
484 /* NaN or infinity */
485 r = 1; /* VAX dirty zero */
486 } else if (exp == 0) {
487 if (mant == 0) {
488 /* Zero */
489 r = 0;
490 } else {
491 /* Denormalized */
492 r = sig | ((exp + 1) << 52) | mant;
493 }
494 } else {
495 if (exp >= 253) {
496 /* Overflow */
497 r = 1; /* VAX dirty zero */
498 } else {
499 r = sig | ((exp + 2) << 52);
500 }
501 }
502
503 return r;
4c9649a9
JM
504}
505
636aa200 506static inline float32 f_to_float32(uint64_t a)
4c9649a9 507{
e2eb2798
AJ
508 uint32_t exp, mant_sig;
509 CPU_FloatU r;
f18cd223
AJ
510
511 exp = ((a >> 55) & 0x80) | ((a >> 52) & 0x7f);
512 mant_sig = ((a >> 32) & 0x80000000) | ((a >> 29) & 0x007fffff);
513
514 if (unlikely(!exp && mant_sig)) {
515 /* Reserved operands / Dirty zero */
516 helper_excp(EXCP_OPCDEC, 0);
517 }
518
519 if (exp < 3) {
520 /* Underflow */
e2eb2798 521 r.l = 0;
f18cd223 522 } else {
e2eb2798 523 r.l = ((exp - 2) << 23) | mant_sig;
f18cd223
AJ
524 }
525
e2eb2798 526 return r.f;
4c9649a9
JM
527}
528
f18cd223 529uint32_t helper_f_to_memory (uint64_t a)
4c9649a9 530{
f18cd223
AJ
531 uint32_t r;
532 r = (a & 0x00001fffe0000000ull) >> 13;
533 r |= (a & 0x07ffe00000000000ull) >> 45;
534 r |= (a & 0xc000000000000000ull) >> 48;
535 return r;
536}
4c9649a9 537
f18cd223
AJ
538uint64_t helper_memory_to_f (uint32_t a)
539{
540 uint64_t r;
541 r = ((uint64_t)(a & 0x0000c000)) << 48;
542 r |= ((uint64_t)(a & 0x003fffff)) << 45;
543 r |= ((uint64_t)(a & 0xffff0000)) << 13;
544 if (!(a & 0x00004000))
545 r |= 0x7ll << 59;
546 return r;
4c9649a9
JM
547}
548
f24518b5
RH
549/* ??? Emulating VAX arithmetic with IEEE arithmetic is wrong. We should
550 either implement VAX arithmetic properly or just signal invalid opcode. */
551
f18cd223 552uint64_t helper_addf (uint64_t a, uint64_t b)
4c9649a9 553{
f18cd223 554 float32 fa, fb, fr;
4c9649a9 555
f18cd223
AJ
556 fa = f_to_float32(a);
557 fb = f_to_float32(b);
558 fr = float32_add(fa, fb, &FP_STATUS);
559 return float32_to_f(fr);
4c9649a9
JM
560}
561
f18cd223 562uint64_t helper_subf (uint64_t a, uint64_t b)
4c9649a9 563{
f18cd223 564 float32 fa, fb, fr;
4c9649a9 565
f18cd223
AJ
566 fa = f_to_float32(a);
567 fb = f_to_float32(b);
568 fr = float32_sub(fa, fb, &FP_STATUS);
569 return float32_to_f(fr);
4c9649a9
JM
570}
571
f18cd223 572uint64_t helper_mulf (uint64_t a, uint64_t b)
4c9649a9 573{
f18cd223 574 float32 fa, fb, fr;
4c9649a9 575
f18cd223
AJ
576 fa = f_to_float32(a);
577 fb = f_to_float32(b);
578 fr = float32_mul(fa, fb, &FP_STATUS);
579 return float32_to_f(fr);
4c9649a9
JM
580}
581
f18cd223 582uint64_t helper_divf (uint64_t a, uint64_t b)
4c9649a9 583{
f18cd223 584 float32 fa, fb, fr;
4c9649a9 585
f18cd223
AJ
586 fa = f_to_float32(a);
587 fb = f_to_float32(b);
588 fr = float32_div(fa, fb, &FP_STATUS);
589 return float32_to_f(fr);
4c9649a9
JM
590}
591
f18cd223 592uint64_t helper_sqrtf (uint64_t t)
4c9649a9 593{
f18cd223
AJ
594 float32 ft, fr;
595
596 ft = f_to_float32(t);
597 fr = float32_sqrt(ft, &FP_STATUS);
598 return float32_to_f(fr);
4c9649a9
JM
599}
600
f18cd223
AJ
601
602/* G floating (VAX) */
636aa200 603static inline uint64_t float64_to_g(float64 fa)
4c9649a9 604{
e2eb2798
AJ
605 uint64_t r, exp, mant, sig;
606 CPU_DoubleU a;
4c9649a9 607
e2eb2798
AJ
608 a.d = fa;
609 sig = a.ll & 0x8000000000000000ull;
610 exp = (a.ll >> 52) & 0x7ff;
611 mant = a.ll & 0x000fffffffffffffull;
f18cd223
AJ
612
613 if (exp == 2047) {
614 /* NaN or infinity */
615 r = 1; /* VAX dirty zero */
616 } else if (exp == 0) {
617 if (mant == 0) {
618 /* Zero */
619 r = 0;
620 } else {
621 /* Denormalized */
622 r = sig | ((exp + 1) << 52) | mant;
623 }
624 } else {
625 if (exp >= 2045) {
626 /* Overflow */
627 r = 1; /* VAX dirty zero */
628 } else {
629 r = sig | ((exp + 2) << 52);
630 }
631 }
632
633 return r;
4c9649a9
JM
634}
635
636aa200 636static inline float64 g_to_float64(uint64_t a)
4c9649a9 637{
e2eb2798
AJ
638 uint64_t exp, mant_sig;
639 CPU_DoubleU r;
f18cd223
AJ
640
641 exp = (a >> 52) & 0x7ff;
642 mant_sig = a & 0x800fffffffffffffull;
643
644 if (!exp && mant_sig) {
645 /* Reserved operands / Dirty zero */
646 helper_excp(EXCP_OPCDEC, 0);
647 }
4c9649a9 648
f18cd223
AJ
649 if (exp < 3) {
650 /* Underflow */
e2eb2798 651 r.ll = 0;
f18cd223 652 } else {
e2eb2798 653 r.ll = ((exp - 2) << 52) | mant_sig;
f18cd223
AJ
654 }
655
e2eb2798 656 return r.d;
4c9649a9
JM
657}
658
f18cd223 659uint64_t helper_g_to_memory (uint64_t a)
4c9649a9 660{
f18cd223
AJ
661 uint64_t r;
662 r = (a & 0x000000000000ffffull) << 48;
663 r |= (a & 0x00000000ffff0000ull) << 16;
664 r |= (a & 0x0000ffff00000000ull) >> 16;
665 r |= (a & 0xffff000000000000ull) >> 48;
666 return r;
667}
4c9649a9 668
f18cd223
AJ
669uint64_t helper_memory_to_g (uint64_t a)
670{
671 uint64_t r;
672 r = (a & 0x000000000000ffffull) << 48;
673 r |= (a & 0x00000000ffff0000ull) << 16;
674 r |= (a & 0x0000ffff00000000ull) >> 16;
675 r |= (a & 0xffff000000000000ull) >> 48;
676 return r;
4c9649a9
JM
677}
678
f18cd223 679uint64_t helper_addg (uint64_t a, uint64_t b)
4c9649a9 680{
f18cd223 681 float64 fa, fb, fr;
4c9649a9 682
f18cd223
AJ
683 fa = g_to_float64(a);
684 fb = g_to_float64(b);
685 fr = float64_add(fa, fb, &FP_STATUS);
686 return float64_to_g(fr);
4c9649a9
JM
687}
688
f18cd223 689uint64_t helper_subg (uint64_t a, uint64_t b)
4c9649a9 690{
f18cd223 691 float64 fa, fb, fr;
4c9649a9 692
f18cd223
AJ
693 fa = g_to_float64(a);
694 fb = g_to_float64(b);
695 fr = float64_sub(fa, fb, &FP_STATUS);
696 return float64_to_g(fr);
4c9649a9
JM
697}
698
f18cd223 699uint64_t helper_mulg (uint64_t a, uint64_t b)
4c9649a9 700{
f18cd223 701 float64 fa, fb, fr;
4c9649a9 702
f18cd223
AJ
703 fa = g_to_float64(a);
704 fb = g_to_float64(b);
705 fr = float64_mul(fa, fb, &FP_STATUS);
706 return float64_to_g(fr);
4c9649a9
JM
707}
708
f18cd223 709uint64_t helper_divg (uint64_t a, uint64_t b)
4c9649a9 710{
f18cd223 711 float64 fa, fb, fr;
4c9649a9 712
f18cd223
AJ
713 fa = g_to_float64(a);
714 fb = g_to_float64(b);
715 fr = float64_div(fa, fb, &FP_STATUS);
716 return float64_to_g(fr);
717}
718
719uint64_t helper_sqrtg (uint64_t a)
720{
721 float64 fa, fr;
4c9649a9 722
f18cd223
AJ
723 fa = g_to_float64(a);
724 fr = float64_sqrt(fa, &FP_STATUS);
725 return float64_to_g(fr);
4c9649a9
JM
726}
727
f18cd223
AJ
728
729/* S floating (single) */
d0af5445
RH
730
731/* Taken from linux/arch/alpha/kernel/traps.c, s_mem_to_reg. */
732static inline uint64_t float32_to_s_int(uint32_t fi)
733{
734 uint32_t frac = fi & 0x7fffff;
735 uint32_t sign = fi >> 31;
736 uint32_t exp_msb = (fi >> 30) & 1;
737 uint32_t exp_low = (fi >> 23) & 0x7f;
738 uint32_t exp;
739
740 exp = (exp_msb << 10) | exp_low;
741 if (exp_msb) {
742 if (exp_low == 0x7f)
743 exp = 0x7ff;
744 } else {
745 if (exp_low != 0x00)
746 exp |= 0x380;
747 }
748
749 return (((uint64_t)sign << 63)
750 | ((uint64_t)exp << 52)
751 | ((uint64_t)frac << 29));
752}
753
636aa200 754static inline uint64_t float32_to_s(float32 fa)
4c9649a9 755{
e2eb2798 756 CPU_FloatU a;
e2eb2798 757 a.f = fa;
d0af5445
RH
758 return float32_to_s_int(a.l);
759}
4c9649a9 760
d0af5445
RH
761static inline uint32_t s_to_float32_int(uint64_t a)
762{
763 return ((a >> 32) & 0xc0000000) | ((a >> 29) & 0x3fffffff);
4c9649a9
JM
764}
765
636aa200 766static inline float32 s_to_float32(uint64_t a)
4c9649a9 767{
e2eb2798 768 CPU_FloatU r;
d0af5445 769 r.l = s_to_float32_int(a);
e2eb2798 770 return r.f;
f18cd223 771}
4c9649a9 772
f18cd223
AJ
773uint32_t helper_s_to_memory (uint64_t a)
774{
d0af5445 775 return s_to_float32_int(a);
f18cd223 776}
4c9649a9 777
f18cd223
AJ
778uint64_t helper_memory_to_s (uint32_t a)
779{
d0af5445 780 return float32_to_s_int(a);
4c9649a9
JM
781}
782
f18cd223 783uint64_t helper_adds (uint64_t a, uint64_t b)
4c9649a9 784{
f18cd223 785 float32 fa, fb, fr;
4c9649a9 786
f18cd223
AJ
787 fa = s_to_float32(a);
788 fb = s_to_float32(b);
789 fr = float32_add(fa, fb, &FP_STATUS);
790 return float32_to_s(fr);
4c9649a9
JM
791}
792
f18cd223 793uint64_t helper_subs (uint64_t a, uint64_t b)
4c9649a9 794{
f18cd223 795 float32 fa, fb, fr;
4c9649a9 796
f18cd223
AJ
797 fa = s_to_float32(a);
798 fb = s_to_float32(b);
799 fr = float32_sub(fa, fb, &FP_STATUS);
800 return float32_to_s(fr);
4c9649a9
JM
801}
802
f18cd223 803uint64_t helper_muls (uint64_t a, uint64_t b)
4c9649a9 804{
f18cd223 805 float32 fa, fb, fr;
4c9649a9 806
f18cd223
AJ
807 fa = s_to_float32(a);
808 fb = s_to_float32(b);
809 fr = float32_mul(fa, fb, &FP_STATUS);
810 return float32_to_s(fr);
4c9649a9
JM
811}
812
f18cd223 813uint64_t helper_divs (uint64_t a, uint64_t b)
4c9649a9 814{
f18cd223 815 float32 fa, fb, fr;
4c9649a9 816
f18cd223
AJ
817 fa = s_to_float32(a);
818 fb = s_to_float32(b);
819 fr = float32_div(fa, fb, &FP_STATUS);
820 return float32_to_s(fr);
4c9649a9
JM
821}
822
f18cd223 823uint64_t helper_sqrts (uint64_t a)
4c9649a9 824{
f18cd223 825 float32 fa, fr;
4c9649a9 826
f18cd223
AJ
827 fa = s_to_float32(a);
828 fr = float32_sqrt(fa, &FP_STATUS);
829 return float32_to_s(fr);
4c9649a9
JM
830}
831
f18cd223
AJ
832
833/* T floating (double) */
636aa200 834static inline float64 t_to_float64(uint64_t a)
4c9649a9 835{
f18cd223 836 /* Memory format is the same as float64 */
e2eb2798
AJ
837 CPU_DoubleU r;
838 r.ll = a;
839 return r.d;
4c9649a9
JM
840}
841
636aa200 842static inline uint64_t float64_to_t(float64 fa)
4c9649a9 843{
f18cd223 844 /* Memory format is the same as float64 */
e2eb2798
AJ
845 CPU_DoubleU r;
846 r.d = fa;
847 return r.ll;
f18cd223 848}
4c9649a9 849
f18cd223
AJ
850uint64_t helper_addt (uint64_t a, uint64_t b)
851{
852 float64 fa, fb, fr;
4c9649a9 853
f18cd223
AJ
854 fa = t_to_float64(a);
855 fb = t_to_float64(b);
856 fr = float64_add(fa, fb, &FP_STATUS);
857 return float64_to_t(fr);
4c9649a9
JM
858}
859
f18cd223 860uint64_t helper_subt (uint64_t a, uint64_t b)
4c9649a9 861{
f18cd223 862 float64 fa, fb, fr;
4c9649a9 863
f18cd223
AJ
864 fa = t_to_float64(a);
865 fb = t_to_float64(b);
866 fr = float64_sub(fa, fb, &FP_STATUS);
867 return float64_to_t(fr);
4c9649a9
JM
868}
869
f18cd223 870uint64_t helper_mult (uint64_t a, uint64_t b)
4c9649a9 871{
f18cd223 872 float64 fa, fb, fr;
4c9649a9 873
f18cd223
AJ
874 fa = t_to_float64(a);
875 fb = t_to_float64(b);
876 fr = float64_mul(fa, fb, &FP_STATUS);
877 return float64_to_t(fr);
4c9649a9
JM
878}
879
f18cd223 880uint64_t helper_divt (uint64_t a, uint64_t b)
4c9649a9 881{
f18cd223 882 float64 fa, fb, fr;
4c9649a9 883
f18cd223
AJ
884 fa = t_to_float64(a);
885 fb = t_to_float64(b);
886 fr = float64_div(fa, fb, &FP_STATUS);
887 return float64_to_t(fr);
4c9649a9
JM
888}
889
f18cd223 890uint64_t helper_sqrtt (uint64_t a)
4c9649a9 891{
f18cd223 892 float64 fa, fr;
4c9649a9 893
f18cd223
AJ
894 fa = t_to_float64(a);
895 fr = float64_sqrt(fa, &FP_STATUS);
896 return float64_to_t(fr);
4c9649a9
JM
897}
898
f18cd223
AJ
899/* Comparisons */
900uint64_t helper_cmptun (uint64_t a, uint64_t b)
4c9649a9 901{
f18cd223 902 float64 fa, fb;
4c9649a9 903
f18cd223
AJ
904 fa = t_to_float64(a);
905 fb = t_to_float64(b);
906
a4d2d1a0 907 if (float64_unordered_quiet(fa, fb, &FP_STATUS)) {
f18cd223 908 return 0x4000000000000000ULL;
a4d2d1a0 909 } else {
f18cd223 910 return 0;
a4d2d1a0 911 }
4c9649a9
JM
912}
913
f18cd223 914uint64_t helper_cmpteq(uint64_t a, uint64_t b)
4c9649a9 915{
f18cd223 916 float64 fa, fb;
4c9649a9 917
f18cd223
AJ
918 fa = t_to_float64(a);
919 fb = t_to_float64(b);
920
211315fb 921 if (float64_eq_quiet(fa, fb, &FP_STATUS))
f18cd223
AJ
922 return 0x4000000000000000ULL;
923 else
924 return 0;
4c9649a9
JM
925}
926
f18cd223 927uint64_t helper_cmptle(uint64_t a, uint64_t b)
4c9649a9 928{
f18cd223 929 float64 fa, fb;
4c9649a9 930
f18cd223
AJ
931 fa = t_to_float64(a);
932 fb = t_to_float64(b);
933
934 if (float64_le(fa, fb, &FP_STATUS))
935 return 0x4000000000000000ULL;
936 else
937 return 0;
4c9649a9
JM
938}
939
f18cd223 940uint64_t helper_cmptlt(uint64_t a, uint64_t b)
4c9649a9 941{
f18cd223 942 float64 fa, fb;
4c9649a9 943
f18cd223
AJ
944 fa = t_to_float64(a);
945 fb = t_to_float64(b);
946
947 if (float64_lt(fa, fb, &FP_STATUS))
948 return 0x4000000000000000ULL;
949 else
950 return 0;
4c9649a9
JM
951}
952
f18cd223 953uint64_t helper_cmpgeq(uint64_t a, uint64_t b)
4c9649a9 954{
f18cd223 955 float64 fa, fb;
4c9649a9 956
f18cd223
AJ
957 fa = g_to_float64(a);
958 fb = g_to_float64(b);
959
211315fb 960 if (float64_eq_quiet(fa, fb, &FP_STATUS))
f18cd223
AJ
961 return 0x4000000000000000ULL;
962 else
963 return 0;
4c9649a9
JM
964}
965
f18cd223 966uint64_t helper_cmpgle(uint64_t a, uint64_t b)
4c9649a9 967{
f18cd223
AJ
968 float64 fa, fb;
969
970 fa = g_to_float64(a);
971 fb = g_to_float64(b);
4c9649a9 972
f18cd223
AJ
973 if (float64_le(fa, fb, &FP_STATUS))
974 return 0x4000000000000000ULL;
975 else
976 return 0;
4c9649a9
JM
977}
978
f18cd223 979uint64_t helper_cmpglt(uint64_t a, uint64_t b)
4c9649a9 980{
f18cd223
AJ
981 float64 fa, fb;
982
983 fa = g_to_float64(a);
984 fb = g_to_float64(b);
4c9649a9 985
f18cd223
AJ
986 if (float64_lt(fa, fb, &FP_STATUS))
987 return 0x4000000000000000ULL;
988 else
989 return 0;
4c9649a9
JM
990}
991
f18cd223
AJ
992/* Floating point format conversion */
993uint64_t helper_cvtts (uint64_t a)
4c9649a9 994{
f18cd223
AJ
995 float64 fa;
996 float32 fr;
4c9649a9 997
f18cd223
AJ
998 fa = t_to_float64(a);
999 fr = float64_to_float32(fa, &FP_STATUS);
1000 return float32_to_s(fr);
4c9649a9
JM
1001}
1002
f18cd223 1003uint64_t helper_cvtst (uint64_t a)
4c9649a9 1004{
f18cd223
AJ
1005 float32 fa;
1006 float64 fr;
1007
1008 fa = s_to_float32(a);
1009 fr = float32_to_float64(fa, &FP_STATUS);
1010 return float64_to_t(fr);
4c9649a9
JM
1011}
1012
f18cd223 1013uint64_t helper_cvtqs (uint64_t a)
4c9649a9 1014{
f18cd223
AJ
1015 float32 fr = int64_to_float32(a, &FP_STATUS);
1016 return float32_to_s(fr);
4c9649a9
JM
1017}
1018
f24518b5
RH
1019/* Implement float64 to uint64 conversion without saturation -- we must
1020 supply the truncated result. This behaviour is used by the compiler
1021 to get unsigned conversion for free with the same instruction.
1022
1023 The VI flag is set when overflow or inexact exceptions should be raised. */
1024
1025static inline uint64_t helper_cvttq_internal(uint64_t a, int roundmode, int VI)
4c9649a9 1026{
f24518b5
RH
1027 uint64_t frac, ret = 0;
1028 uint32_t exp, sign, exc = 0;
1029 int shift;
1030
1031 sign = (a >> 63);
1032 exp = (uint32_t)(a >> 52) & 0x7ff;
1033 frac = a & 0xfffffffffffffull;
1034
1035 if (exp == 0) {
1036 if (unlikely(frac != 0)) {
1037 goto do_underflow;
1038 }
1039 } else if (exp == 0x7ff) {
1040 exc = (frac ? float_flag_invalid : VI ? float_flag_overflow : 0);
1041 } else {
1042 /* Restore implicit bit. */
1043 frac |= 0x10000000000000ull;
1044
1045 shift = exp - 1023 - 52;
1046 if (shift >= 0) {
1047 /* In this case the number is so large that we must shift
1048 the fraction left. There is no rounding to do. */
1049 if (shift < 63) {
1050 ret = frac << shift;
1051 if (VI && (ret >> shift) != frac) {
1052 exc = float_flag_overflow;
1053 }
1054 }
1055 } else {
1056 uint64_t round;
1057
1058 /* In this case the number is smaller than the fraction as
1059 represented by the 52 bit number. Here we must think
1060 about rounding the result. Handle this by shifting the
1061 fractional part of the number into the high bits of ROUND.
1062 This will let us efficiently handle round-to-nearest. */
1063 shift = -shift;
1064 if (shift < 63) {
1065 ret = frac >> shift;
1066 round = frac << (64 - shift);
1067 } else {
1068 /* The exponent is so small we shift out everything.
1069 Leave a sticky bit for proper rounding below. */
1070 do_underflow:
1071 round = 1;
1072 }
1073
1074 if (round) {
1075 exc = (VI ? float_flag_inexact : 0);
1076 switch (roundmode) {
1077 case float_round_nearest_even:
1078 if (round == (1ull << 63)) {
1079 /* Fraction is exactly 0.5; round to even. */
1080 ret += (ret & 1);
1081 } else if (round > (1ull << 63)) {
1082 ret += 1;
1083 }
1084 break;
1085 case float_round_to_zero:
1086 break;
1087 case float_round_up:
1088 ret += 1 - sign;
1089 break;
1090 case float_round_down:
1091 ret += sign;
1092 break;
1093 }
1094 }
1095 }
1096 if (sign) {
1097 ret = -ret;
1098 }
1099 }
1100 if (unlikely(exc)) {
1101 float_raise(exc, &FP_STATUS);
1102 }
1103
1104 return ret;
1105}
1106
1107uint64_t helper_cvttq(uint64_t a)
1108{
1109 return helper_cvttq_internal(a, FP_STATUS.float_rounding_mode, 1);
1110}
1111
1112uint64_t helper_cvttq_c(uint64_t a)
1113{
1114 return helper_cvttq_internal(a, float_round_to_zero, 0);
1115}
1116
1117uint64_t helper_cvttq_svic(uint64_t a)
1118{
1119 return helper_cvttq_internal(a, float_round_to_zero, 1);
f18cd223 1120}
4c9649a9 1121
f18cd223
AJ
1122uint64_t helper_cvtqt (uint64_t a)
1123{
1124 float64 fr = int64_to_float64(a, &FP_STATUS);
1125 return float64_to_t(fr);
4c9649a9
JM
1126}
1127
f18cd223 1128uint64_t helper_cvtqf (uint64_t a)
4c9649a9 1129{
f18cd223
AJ
1130 float32 fr = int64_to_float32(a, &FP_STATUS);
1131 return float32_to_f(fr);
4c9649a9
JM
1132}
1133
f18cd223 1134uint64_t helper_cvtgf (uint64_t a)
4c9649a9 1135{
f18cd223
AJ
1136 float64 fa;
1137 float32 fr;
1138
1139 fa = g_to_float64(a);
1140 fr = float64_to_float32(fa, &FP_STATUS);
1141 return float32_to_f(fr);
4c9649a9
JM
1142}
1143
f18cd223 1144uint64_t helper_cvtgq (uint64_t a)
4c9649a9 1145{
f18cd223
AJ
1146 float64 fa = g_to_float64(a);
1147 return float64_to_int64_round_to_zero(fa, &FP_STATUS);
4c9649a9
JM
1148}
1149
f18cd223 1150uint64_t helper_cvtqg (uint64_t a)
4c9649a9 1151{
f18cd223
AJ
1152 float64 fr;
1153 fr = int64_to_float64(a, &FP_STATUS);
1154 return float64_to_g(fr);
4c9649a9
JM
1155}
1156
8bb6e981 1157/* PALcode support special instructions */
4c9649a9 1158#if !defined (CONFIG_USER_ONLY)
8bb6e981
AJ
1159void helper_hw_rei (void)
1160{
1161 env->pc = env->ipr[IPR_EXC_ADDR] & ~3;
1162 env->ipr[IPR_EXC_ADDR] = env->ipr[IPR_EXC_ADDR] & 1;
ac316ca4 1163 env->intr_flag = 0;
6910b8f6 1164 env->lock_addr = -1;
8bb6e981
AJ
1165 /* XXX: re-enable interrupts and memory mapping */
1166}
1167
1168void helper_hw_ret (uint64_t a)
1169{
1170 env->pc = a & ~3;
1171 env->ipr[IPR_EXC_ADDR] = a & 1;
ac316ca4 1172 env->intr_flag = 0;
6910b8f6 1173 env->lock_addr = -1;
8bb6e981
AJ
1174 /* XXX: re-enable interrupts and memory mapping */
1175}
1176
1177uint64_t helper_mfpr (int iprn, uint64_t val)
1178{
1179 uint64_t tmp;
1180
1181 if (cpu_alpha_mfpr(env, iprn, &tmp) == 0)
1182 val = tmp;
1183
1184 return val;
1185}
1186
1187void helper_mtpr (int iprn, uint64_t val)
4c9649a9 1188{
8bb6e981
AJ
1189 cpu_alpha_mtpr(env, iprn, val, NULL);
1190}
4c9649a9 1191
8bb6e981
AJ
1192void helper_set_alt_mode (void)
1193{
1194 env->saved_mode = env->ps & 0xC;
1195 env->ps = (env->ps & ~0xC) | (env->ipr[IPR_ALT_MODE] & 0xC);
4c9649a9
JM
1196}
1197
8bb6e981 1198void helper_restore_mode (void)
4c9649a9 1199{
8bb6e981 1200 env->ps = (env->ps & ~0xC) | env->saved_mode;
4c9649a9 1201}
8bb6e981 1202
4c9649a9
JM
1203#endif
1204
1205/*****************************************************************************/
1206/* Softmmu support */
1207#if !defined (CONFIG_USER_ONLY)
1208
4c9649a9
JM
1209/* XXX: the two following helpers are pure hacks.
1210 * Hopefully, we emulate the PALcode, then we should never see
1211 * HW_LD / HW_ST instructions.
1212 */
8bb6e981 1213uint64_t helper_ld_virt_to_phys (uint64_t virtaddr)
4c9649a9
JM
1214{
1215 uint64_t tlb_addr, physaddr;
6ebbf390 1216 int index, mmu_idx;
4c9649a9
JM
1217 void *retaddr;
1218
6ebbf390 1219 mmu_idx = cpu_mmu_index(env);
8bb6e981 1220 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4c9649a9 1221 redo:
6ebbf390 1222 tlb_addr = env->tlb_table[mmu_idx][index].addr_read;
8bb6e981 1223 if ((virtaddr & TARGET_PAGE_MASK) ==
4c9649a9 1224 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
8bb6e981 1225 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
4c9649a9
JM
1226 } else {
1227 /* the page is not in the TLB : fill it */
1228 retaddr = GETPC();
8bb6e981 1229 tlb_fill(virtaddr, 0, mmu_idx, retaddr);
4c9649a9
JM
1230 goto redo;
1231 }
8bb6e981 1232 return physaddr;
4c9649a9
JM
1233}
1234
8bb6e981 1235uint64_t helper_st_virt_to_phys (uint64_t virtaddr)
4c9649a9
JM
1236{
1237 uint64_t tlb_addr, physaddr;
6ebbf390 1238 int index, mmu_idx;
4c9649a9
JM
1239 void *retaddr;
1240
6ebbf390 1241 mmu_idx = cpu_mmu_index(env);
8bb6e981 1242 index = (virtaddr >> TARGET_PAGE_BITS) & (CPU_TLB_SIZE - 1);
4c9649a9 1243 redo:
6ebbf390 1244 tlb_addr = env->tlb_table[mmu_idx][index].addr_write;
8bb6e981 1245 if ((virtaddr & TARGET_PAGE_MASK) ==
4c9649a9 1246 (tlb_addr & (TARGET_PAGE_MASK | TLB_INVALID_MASK))) {
8bb6e981 1247 physaddr = virtaddr + env->tlb_table[mmu_idx][index].addend;
4c9649a9
JM
1248 } else {
1249 /* the page is not in the TLB : fill it */
1250 retaddr = GETPC();
8bb6e981 1251 tlb_fill(virtaddr, 1, mmu_idx, retaddr);
4c9649a9
JM
1252 goto redo;
1253 }
8bb6e981
AJ
1254 return physaddr;
1255}
1256
1257void helper_ldl_raw(uint64_t t0, uint64_t t1)
1258{
1259 ldl_raw(t1, t0);
1260}
1261
1262void helper_ldq_raw(uint64_t t0, uint64_t t1)
1263{
1264 ldq_raw(t1, t0);
1265}
1266
1267void helper_ldl_l_raw(uint64_t t0, uint64_t t1)
1268{
1269 env->lock = t1;
1270 ldl_raw(t1, t0);
1271}
1272
1273void helper_ldq_l_raw(uint64_t t0, uint64_t t1)
1274{
1275 env->lock = t1;
1276 ldl_raw(t1, t0);
1277}
1278
1279void helper_ldl_kernel(uint64_t t0, uint64_t t1)
1280{
1281 ldl_kernel(t1, t0);
1282}
1283
1284void helper_ldq_kernel(uint64_t t0, uint64_t t1)
1285{
1286 ldq_kernel(t1, t0);
1287}
1288
1289void helper_ldl_data(uint64_t t0, uint64_t t1)
1290{
1291 ldl_data(t1, t0);
1292}
1293
1294void helper_ldq_data(uint64_t t0, uint64_t t1)
1295{
1296 ldq_data(t1, t0);
1297}
1298
1299void helper_stl_raw(uint64_t t0, uint64_t t1)
1300{
1301 stl_raw(t1, t0);
1302}
1303
1304void helper_stq_raw(uint64_t t0, uint64_t t1)
1305{
1306 stq_raw(t1, t0);
1307}
1308
1309uint64_t helper_stl_c_raw(uint64_t t0, uint64_t t1)
1310{
1311 uint64_t ret;
1312
1313 if (t1 == env->lock) {
1314 stl_raw(t1, t0);
1315 ret = 0;
1316 } else
1317 ret = 1;
1318
1319 env->lock = 1;
1320
1321 return ret;
1322}
1323
1324uint64_t helper_stq_c_raw(uint64_t t0, uint64_t t1)
1325{
1326 uint64_t ret;
1327
1328 if (t1 == env->lock) {
1329 stq_raw(t1, t0);
1330 ret = 0;
1331 } else
1332 ret = 1;
1333
1334 env->lock = 1;
1335
1336 return ret;
4c9649a9
JM
1337}
1338
1339#define MMUSUFFIX _mmu
1340
1341#define SHIFT 0
1342#include "softmmu_template.h"
1343
1344#define SHIFT 1
1345#include "softmmu_template.h"
1346
1347#define SHIFT 2
1348#include "softmmu_template.h"
1349
1350#define SHIFT 3
1351#include "softmmu_template.h"
1352
1353/* try to fill the TLB and return an exception if error. If retaddr is
1354 NULL, it means that the function was called in C code (i.e. not
1355 from generated code or from helper.c) */
1356/* XXX: fix it to restore all registers */
6ebbf390 1357void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
4c9649a9
JM
1358{
1359 TranslationBlock *tb;
1360 CPUState *saved_env;
44f8625d 1361 unsigned long pc;
4c9649a9
JM
1362 int ret;
1363
1364 /* XXX: hack to restore env in all cases, even if not called from
1365 generated code */
1366 saved_env = env;
1367 env = cpu_single_env;
6ebbf390 1368 ret = cpu_alpha_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
4c9649a9
JM
1369 if (!likely(ret == 0)) {
1370 if (likely(retaddr)) {
1371 /* now we have a real cpu fault */
44f8625d 1372 pc = (unsigned long)retaddr;
4c9649a9
JM
1373 tb = tb_find_pc(pc);
1374 if (likely(tb)) {
1375 /* the PC is inside the translated code. It means that we have
1376 a virtual CPU fault */
618ba8e6 1377 cpu_restore_state(tb, env, pc);
4c9649a9
JM
1378 }
1379 }
1380 /* Exception index and error code are already set */
1381 cpu_loop_exit();
1382 }
1383 env = saved_env;
1384}
1385
1386#endif