]> git.proxmox.com Git - mirror_qemu.git/blob - target-ppc/op_helper.c
Suppress gcc 4.x -Wpointer-sign (included in -Wall) warnings
[mirror_qemu.git] / target-ppc / op_helper.c
1 /*
2 * PowerPC emulation helpers for qemu.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, write to the Free Software
18 * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
19 */
20 #include "exec.h"
21 #include "host-utils.h"
22
23 #include "helper_regs.h"
24 #include "op_helper.h"
25
26 #define MEMSUFFIX _raw
27 #include "op_helper.h"
28 #include "op_helper_mem.h"
29 #if !defined(CONFIG_USER_ONLY)
30 #define MEMSUFFIX _user
31 #include "op_helper.h"
32 #include "op_helper_mem.h"
33 #define MEMSUFFIX _kernel
34 #include "op_helper.h"
35 #include "op_helper_mem.h"
36 #define MEMSUFFIX _hypv
37 #include "op_helper.h"
38 #include "op_helper_mem.h"
39 #endif
40
41 //#define DEBUG_OP
42 //#define DEBUG_EXCEPTIONS
43 //#define DEBUG_SOFTWARE_TLB
44
45 /*****************************************************************************/
46 /* Exceptions processing helpers */
47
48 void do_raise_exception_err (uint32_t exception, int error_code)
49 {
50 #if 0
51 printf("Raise exception %3x code : %d\n", exception, error_code);
52 #endif
53 env->exception_index = exception;
54 env->error_code = error_code;
55 cpu_loop_exit();
56 }
57
58 void do_raise_exception (uint32_t exception)
59 {
60 do_raise_exception_err(exception, 0);
61 }
62
63 void cpu_dump_EA (target_ulong EA);
64 void do_print_mem_EA (target_ulong EA)
65 {
66 cpu_dump_EA(EA);
67 }
68
69 /*****************************************************************************/
70 /* Registers load and stores */
71 void do_load_cr (void)
72 {
73 T0 = (env->crf[0] << 28) |
74 (env->crf[1] << 24) |
75 (env->crf[2] << 20) |
76 (env->crf[3] << 16) |
77 (env->crf[4] << 12) |
78 (env->crf[5] << 8) |
79 (env->crf[6] << 4) |
80 (env->crf[7] << 0);
81 }
82
83 void do_store_cr (uint32_t mask)
84 {
85 int i, sh;
86
87 for (i = 0, sh = 7; i < 8; i++, sh--) {
88 if (mask & (1 << sh))
89 env->crf[i] = (T0 >> (sh * 4)) & 0xFUL;
90 }
91 }
92
93 #if defined(TARGET_PPC64)
94 void do_store_pri (int prio)
95 {
96 env->spr[SPR_PPR] &= ~0x001C000000000000ULL;
97 env->spr[SPR_PPR] |= ((uint64_t)prio & 0x7) << 50;
98 }
99 #endif
100
101 target_ulong ppc_load_dump_spr (int sprn)
102 {
103 if (loglevel != 0) {
104 fprintf(logfile, "Read SPR %d %03x => " ADDRX "\n",
105 sprn, sprn, env->spr[sprn]);
106 }
107
108 return env->spr[sprn];
109 }
110
111 void ppc_store_dump_spr (int sprn, target_ulong val)
112 {
113 if (loglevel != 0) {
114 fprintf(logfile, "Write SPR %d %03x => " ADDRX " <= " ADDRX "\n",
115 sprn, sprn, env->spr[sprn], val);
116 }
117 env->spr[sprn] = val;
118 }
119
120 /*****************************************************************************/
121 /* Fixed point operations helpers */
122 void do_adde (void)
123 {
124 T2 = T0;
125 T0 += T1 + xer_ca;
126 if (likely(!((uint32_t)T0 < (uint32_t)T2 ||
127 (xer_ca == 1 && (uint32_t)T0 == (uint32_t)T2)))) {
128 xer_ca = 0;
129 } else {
130 xer_ca = 1;
131 }
132 }
133
134 #if defined(TARGET_PPC64)
135 void do_adde_64 (void)
136 {
137 T2 = T0;
138 T0 += T1 + xer_ca;
139 if (likely(!((uint64_t)T0 < (uint64_t)T2 ||
140 (xer_ca == 1 && (uint64_t)T0 == (uint64_t)T2)))) {
141 xer_ca = 0;
142 } else {
143 xer_ca = 1;
144 }
145 }
146 #endif
147
148 void do_addmeo (void)
149 {
150 T1 = T0;
151 T0 += xer_ca + (-1);
152 xer_ov = ((uint32_t)T1 & ((uint32_t)T1 ^ (uint32_t)T0)) >> 31;
153 xer_so |= xer_ov;
154 if (likely(T1 != 0))
155 xer_ca = 1;
156 else
157 xer_ca = 0;
158 }
159
160 #if defined(TARGET_PPC64)
161 void do_addmeo_64 (void)
162 {
163 T1 = T0;
164 T0 += xer_ca + (-1);
165 xer_ov = ((uint64_t)T1 & ((uint64_t)T1 ^ (uint64_t)T0)) >> 63;
166 xer_so |= xer_ov;
167 if (likely(T1 != 0))
168 xer_ca = 1;
169 else
170 xer_ca = 0;
171 }
172 #endif
173
174 void do_divwo (void)
175 {
176 if (likely(!(((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
177 (int32_t)T1 == 0))) {
178 xer_ov = 0;
179 T0 = (int32_t)T0 / (int32_t)T1;
180 } else {
181 xer_ov = 1;
182 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
183 }
184 xer_so |= xer_ov;
185 }
186
187 #if defined(TARGET_PPC64)
188 void do_divdo (void)
189 {
190 if (likely(!(((int64_t)T0 == INT64_MIN && (int64_t)T1 == (int64_t)-1LL) ||
191 (int64_t)T1 == 0))) {
192 xer_ov = 0;
193 T0 = (int64_t)T0 / (int64_t)T1;
194 } else {
195 xer_ov = 1;
196 T0 = UINT64_MAX * ((uint64_t)T0 >> 63);
197 }
198 xer_so |= xer_ov;
199 }
200 #endif
201
202 void do_divwuo (void)
203 {
204 if (likely((uint32_t)T1 != 0)) {
205 xer_ov = 0;
206 T0 = (uint32_t)T0 / (uint32_t)T1;
207 } else {
208 xer_ov = 1;
209 xer_so = 1;
210 T0 = 0;
211 }
212 }
213
214 #if defined(TARGET_PPC64)
215 void do_divduo (void)
216 {
217 if (likely((uint64_t)T1 != 0)) {
218 xer_ov = 0;
219 T0 = (uint64_t)T0 / (uint64_t)T1;
220 } else {
221 xer_ov = 1;
222 xer_so = 1;
223 T0 = 0;
224 }
225 }
226 #endif
227
228 void do_mullwo (void)
229 {
230 int64_t res = (int64_t)T0 * (int64_t)T1;
231
232 if (likely((int32_t)res == res)) {
233 xer_ov = 0;
234 } else {
235 xer_ov = 1;
236 xer_so = 1;
237 }
238 T0 = (int32_t)res;
239 }
240
241 #if defined(TARGET_PPC64)
242 void do_mulldo (void)
243 {
244 int64_t th;
245 uint64_t tl;
246
247 muls64(&tl, (uint64_t *)&th, T0, T1);
248 T0 = (int64_t)tl;
249 /* If th != 0 && th != -1, then we had an overflow */
250 if (likely((uint64_t)(th + 1) <= 1)) {
251 xer_ov = 0;
252 } else {
253 xer_ov = 1;
254 }
255 xer_so |= xer_ov;
256 }
257 #endif
258
259 void do_nego (void)
260 {
261 if (likely((int32_t)T0 != INT32_MIN)) {
262 xer_ov = 0;
263 T0 = -(int32_t)T0;
264 } else {
265 xer_ov = 1;
266 xer_so = 1;
267 }
268 }
269
270 #if defined(TARGET_PPC64)
271 void do_nego_64 (void)
272 {
273 if (likely((int64_t)T0 != INT64_MIN)) {
274 xer_ov = 0;
275 T0 = -(int64_t)T0;
276 } else {
277 xer_ov = 1;
278 xer_so = 1;
279 }
280 }
281 #endif
282
283 void do_subfe (void)
284 {
285 T0 = T1 + ~T0 + xer_ca;
286 if (likely((uint32_t)T0 >= (uint32_t)T1 &&
287 (xer_ca == 0 || (uint32_t)T0 != (uint32_t)T1))) {
288 xer_ca = 0;
289 } else {
290 xer_ca = 1;
291 }
292 }
293
294 #if defined(TARGET_PPC64)
295 void do_subfe_64 (void)
296 {
297 T0 = T1 + ~T0 + xer_ca;
298 if (likely((uint64_t)T0 >= (uint64_t)T1 &&
299 (xer_ca == 0 || (uint64_t)T0 != (uint64_t)T1))) {
300 xer_ca = 0;
301 } else {
302 xer_ca = 1;
303 }
304 }
305 #endif
306
307 void do_subfmeo (void)
308 {
309 T1 = T0;
310 T0 = ~T0 + xer_ca - 1;
311 xer_ov = ((uint32_t)~T1 & ((uint32_t)~T1 ^ (uint32_t)T0)) >> 31;
312 xer_so |= xer_ov;
313 if (likely((uint32_t)T1 != UINT32_MAX))
314 xer_ca = 1;
315 else
316 xer_ca = 0;
317 }
318
319 #if defined(TARGET_PPC64)
320 void do_subfmeo_64 (void)
321 {
322 T1 = T0;
323 T0 = ~T0 + xer_ca - 1;
324 xer_ov = ((uint64_t)~T1 & ((uint64_t)~T1 ^ (uint64_t)T0)) >> 63;
325 xer_so |= xer_ov;
326 if (likely((uint64_t)T1 != UINT64_MAX))
327 xer_ca = 1;
328 else
329 xer_ca = 0;
330 }
331 #endif
332
333 void do_subfzeo (void)
334 {
335 T1 = T0;
336 T0 = ~T0 + xer_ca;
337 xer_ov = (((uint32_t)~T1 ^ UINT32_MAX) &
338 ((uint32_t)(~T1) ^ (uint32_t)T0)) >> 31;
339 xer_so |= xer_ov;
340 if (likely((uint32_t)T0 >= (uint32_t)~T1)) {
341 xer_ca = 0;
342 } else {
343 xer_ca = 1;
344 }
345 }
346
347 #if defined(TARGET_PPC64)
348 void do_subfzeo_64 (void)
349 {
350 T1 = T0;
351 T0 = ~T0 + xer_ca;
352 xer_ov = (((uint64_t)~T1 ^ UINT64_MAX) &
353 ((uint64_t)(~T1) ^ (uint64_t)T0)) >> 63;
354 xer_so |= xer_ov;
355 if (likely((uint64_t)T0 >= (uint64_t)~T1)) {
356 xer_ca = 0;
357 } else {
358 xer_ca = 1;
359 }
360 }
361 #endif
362
363 void do_cntlzw (void)
364 {
365 T0 = clz32(T0);
366 }
367
368 #if defined(TARGET_PPC64)
369 void do_cntlzd (void)
370 {
371 T0 = clz64(T0);
372 }
373 #endif
374
375 /* shift right arithmetic helper */
376 void do_sraw (void)
377 {
378 int32_t ret;
379
380 if (likely(!(T1 & 0x20UL))) {
381 if (likely((uint32_t)T1 != 0)) {
382 ret = (int32_t)T0 >> (T1 & 0x1fUL);
383 if (likely(ret >= 0 || ((int32_t)T0 & ((1 << T1) - 1)) == 0)) {
384 xer_ca = 0;
385 } else {
386 xer_ca = 1;
387 }
388 } else {
389 ret = T0;
390 xer_ca = 0;
391 }
392 } else {
393 ret = UINT32_MAX * ((uint32_t)T0 >> 31);
394 if (likely(ret >= 0 || ((uint32_t)T0 & ~0x80000000UL) == 0)) {
395 xer_ca = 0;
396 } else {
397 xer_ca = 1;
398 }
399 }
400 T0 = ret;
401 }
402
403 #if defined(TARGET_PPC64)
404 void do_srad (void)
405 {
406 int64_t ret;
407
408 if (likely(!(T1 & 0x40UL))) {
409 if (likely((uint64_t)T1 != 0)) {
410 ret = (int64_t)T0 >> (T1 & 0x3FUL);
411 if (likely(ret >= 0 || ((int64_t)T0 & ((1 << T1) - 1)) == 0)) {
412 xer_ca = 0;
413 } else {
414 xer_ca = 1;
415 }
416 } else {
417 ret = T0;
418 xer_ca = 0;
419 }
420 } else {
421 ret = UINT64_MAX * ((uint64_t)T0 >> 63);
422 if (likely(ret >= 0 || ((uint64_t)T0 & ~0x8000000000000000ULL) == 0)) {
423 xer_ca = 0;
424 } else {
425 xer_ca = 1;
426 }
427 }
428 T0 = ret;
429 }
430 #endif
431
432 void do_popcntb (void)
433 {
434 uint32_t ret;
435 int i;
436
437 ret = 0;
438 for (i = 0; i < 32; i += 8)
439 ret |= ctpop8((T0 >> i) & 0xFF) << i;
440 T0 = ret;
441 }
442
443 #if defined(TARGET_PPC64)
444 void do_popcntb_64 (void)
445 {
446 uint64_t ret;
447 int i;
448
449 ret = 0;
450 for (i = 0; i < 64; i += 8)
451 ret |= ctpop8((T0 >> i) & 0xFF) << i;
452 T0 = ret;
453 }
454 #endif
455
456 /*****************************************************************************/
457 /* Floating point operations helpers */
458 static always_inline int fpisneg (float64 d)
459 {
460 CPU_DoubleU u;
461
462 u.d = d;
463
464 return u.ll >> 63 != 0;
465 }
466
467 static always_inline int isden (float64 d)
468 {
469 CPU_DoubleU u;
470
471 u.d = d;
472
473 return ((u.ll >> 52) & 0x7FF) == 0;
474 }
475
476 static always_inline int iszero (float64 d)
477 {
478 CPU_DoubleU u;
479
480 u.d = d;
481
482 return (u.ll & ~0x8000000000000000ULL) == 0;
483 }
484
485 static always_inline int isinfinity (float64 d)
486 {
487 CPU_DoubleU u;
488
489 u.d = d;
490
491 return ((u.ll >> 52) & 0x7FF) == 0x7FF &&
492 (u.ll & 0x000FFFFFFFFFFFFFULL) == 0;
493 }
494
495 #ifdef CONFIG_SOFTFLOAT
496 static always_inline int isfinite (float64 d)
497 {
498 CPU_DoubleU u;
499
500 u.d = d;
501
502 return (((u.ll >> 52) & 0x7FF) != 0x7FF);
503 }
504
505 static always_inline int isnormal (float64 d)
506 {
507 CPU_DoubleU u;
508
509 u.d = d;
510
511 uint32_t exp = (u.ll >> 52) & 0x7FF;
512 return ((0 < exp) && (exp < 0x7FF));
513 }
514 #endif
515
516 void do_compute_fprf (int set_fprf)
517 {
518 int isneg;
519
520 isneg = fpisneg(FT0);
521 if (unlikely(float64_is_nan(FT0))) {
522 if (float64_is_signaling_nan(FT0)) {
523 /* Signaling NaN: flags are undefined */
524 T0 = 0x00;
525 } else {
526 /* Quiet NaN */
527 T0 = 0x11;
528 }
529 } else if (unlikely(isinfinity(FT0))) {
530 /* +/- infinity */
531 if (isneg)
532 T0 = 0x09;
533 else
534 T0 = 0x05;
535 } else {
536 if (iszero(FT0)) {
537 /* +/- zero */
538 if (isneg)
539 T0 = 0x12;
540 else
541 T0 = 0x02;
542 } else {
543 if (isden(FT0)) {
544 /* Denormalized numbers */
545 T0 = 0x10;
546 } else {
547 /* Normalized numbers */
548 T0 = 0x00;
549 }
550 if (isneg) {
551 T0 |= 0x08;
552 } else {
553 T0 |= 0x04;
554 }
555 }
556 }
557 if (set_fprf) {
558 /* We update FPSCR_FPRF */
559 env->fpscr &= ~(0x1F << FPSCR_FPRF);
560 env->fpscr |= T0 << FPSCR_FPRF;
561 }
562 /* We just need fpcc to update Rc1 */
563 T0 &= 0xF;
564 }
565
566 /* Floating-point invalid operations exception */
567 static always_inline void fload_invalid_op_excp (int op)
568 {
569 int ve;
570
571 ve = fpscr_ve;
572 if (op & POWERPC_EXCP_FP_VXSNAN) {
573 /* Operation on signaling NaN */
574 env->fpscr |= 1 << FPSCR_VXSNAN;
575 }
576 if (op & POWERPC_EXCP_FP_VXSOFT) {
577 /* Software-defined condition */
578 env->fpscr |= 1 << FPSCR_VXSOFT;
579 }
580 switch (op & ~(POWERPC_EXCP_FP_VXSOFT | POWERPC_EXCP_FP_VXSNAN)) {
581 case POWERPC_EXCP_FP_VXISI:
582 /* Magnitude subtraction of infinities */
583 env->fpscr |= 1 << FPSCR_VXISI;
584 goto update_arith;
585 case POWERPC_EXCP_FP_VXIDI:
586 /* Division of infinity by infinity */
587 env->fpscr |= 1 << FPSCR_VXIDI;
588 goto update_arith;
589 case POWERPC_EXCP_FP_VXZDZ:
590 /* Division of zero by zero */
591 env->fpscr |= 1 << FPSCR_VXZDZ;
592 goto update_arith;
593 case POWERPC_EXCP_FP_VXIMZ:
594 /* Multiplication of zero by infinity */
595 env->fpscr |= 1 << FPSCR_VXIMZ;
596 goto update_arith;
597 case POWERPC_EXCP_FP_VXVC:
598 /* Ordered comparison of NaN */
599 env->fpscr |= 1 << FPSCR_VXVC;
600 env->fpscr &= ~(0xF << FPSCR_FPCC);
601 env->fpscr |= 0x11 << FPSCR_FPCC;
602 /* We must update the target FPR before raising the exception */
603 if (ve != 0) {
604 env->exception_index = POWERPC_EXCP_PROGRAM;
605 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_VXVC;
606 /* Update the floating-point enabled exception summary */
607 env->fpscr |= 1 << FPSCR_FEX;
608 /* Exception is differed */
609 ve = 0;
610 }
611 break;
612 case POWERPC_EXCP_FP_VXSQRT:
613 /* Square root of a negative number */
614 env->fpscr |= 1 << FPSCR_VXSQRT;
615 update_arith:
616 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
617 if (ve == 0) {
618 /* Set the result to quiet NaN */
619 FT0 = UINT64_MAX;
620 env->fpscr &= ~(0xF << FPSCR_FPCC);
621 env->fpscr |= 0x11 << FPSCR_FPCC;
622 }
623 break;
624 case POWERPC_EXCP_FP_VXCVI:
625 /* Invalid conversion */
626 env->fpscr |= 1 << FPSCR_VXCVI;
627 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
628 if (ve == 0) {
629 /* Set the result to quiet NaN */
630 FT0 = UINT64_MAX;
631 env->fpscr &= ~(0xF << FPSCR_FPCC);
632 env->fpscr |= 0x11 << FPSCR_FPCC;
633 }
634 break;
635 }
636 /* Update the floating-point invalid operation summary */
637 env->fpscr |= 1 << FPSCR_VX;
638 /* Update the floating-point exception summary */
639 env->fpscr |= 1 << FPSCR_FX;
640 if (ve != 0) {
641 /* Update the floating-point enabled exception summary */
642 env->fpscr |= 1 << FPSCR_FEX;
643 if (msr_fe0 != 0 || msr_fe1 != 0)
644 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_FP | op);
645 }
646 }
647
648 static always_inline void float_zero_divide_excp (void)
649 {
650 CPU_DoubleU u0, u1;
651
652 env->fpscr |= 1 << FPSCR_ZX;
653 env->fpscr &= ~((1 << FPSCR_FR) | (1 << FPSCR_FI));
654 /* Update the floating-point exception summary */
655 env->fpscr |= 1 << FPSCR_FX;
656 if (fpscr_ze != 0) {
657 /* Update the floating-point enabled exception summary */
658 env->fpscr |= 1 << FPSCR_FEX;
659 if (msr_fe0 != 0 || msr_fe1 != 0) {
660 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
661 POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX);
662 }
663 } else {
664 /* Set the result to infinity */
665 u0.d = FT0;
666 u1.d = FT1;
667 u0.ll = ((u0.ll ^ u1.ll) & 0x8000000000000000ULL);
668 u0.ll |= 0x7FFULL << 52;
669 FT0 = u0.d;
670 }
671 }
672
673 static always_inline void float_overflow_excp (void)
674 {
675 env->fpscr |= 1 << FPSCR_OX;
676 /* Update the floating-point exception summary */
677 env->fpscr |= 1 << FPSCR_FX;
678 if (fpscr_oe != 0) {
679 /* XXX: should adjust the result */
680 /* Update the floating-point enabled exception summary */
681 env->fpscr |= 1 << FPSCR_FEX;
682 /* We must update the target FPR before raising the exception */
683 env->exception_index = POWERPC_EXCP_PROGRAM;
684 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
685 } else {
686 env->fpscr |= 1 << FPSCR_XX;
687 env->fpscr |= 1 << FPSCR_FI;
688 }
689 }
690
691 static always_inline void float_underflow_excp (void)
692 {
693 env->fpscr |= 1 << FPSCR_UX;
694 /* Update the floating-point exception summary */
695 env->fpscr |= 1 << FPSCR_FX;
696 if (fpscr_ue != 0) {
697 /* XXX: should adjust the result */
698 /* Update the floating-point enabled exception summary */
699 env->fpscr |= 1 << FPSCR_FEX;
700 /* We must update the target FPR before raising the exception */
701 env->exception_index = POWERPC_EXCP_PROGRAM;
702 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
703 }
704 }
705
706 static always_inline void float_inexact_excp (void)
707 {
708 env->fpscr |= 1 << FPSCR_XX;
709 /* Update the floating-point exception summary */
710 env->fpscr |= 1 << FPSCR_FX;
711 if (fpscr_xe != 0) {
712 /* Update the floating-point enabled exception summary */
713 env->fpscr |= 1 << FPSCR_FEX;
714 /* We must update the target FPR before raising the exception */
715 env->exception_index = POWERPC_EXCP_PROGRAM;
716 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
717 }
718 }
719
720 static always_inline void fpscr_set_rounding_mode (void)
721 {
722 int rnd_type;
723
724 /* Set rounding mode */
725 switch (fpscr_rn) {
726 case 0:
727 /* Best approximation (round to nearest) */
728 rnd_type = float_round_nearest_even;
729 break;
730 case 1:
731 /* Smaller magnitude (round toward zero) */
732 rnd_type = float_round_to_zero;
733 break;
734 case 2:
735 /* Round toward +infinite */
736 rnd_type = float_round_up;
737 break;
738 default:
739 case 3:
740 /* Round toward -infinite */
741 rnd_type = float_round_down;
742 break;
743 }
744 set_float_rounding_mode(rnd_type, &env->fp_status);
745 }
746
747 void do_fpscr_setbit (int bit)
748 {
749 int prev;
750
751 prev = (env->fpscr >> bit) & 1;
752 env->fpscr |= 1 << bit;
753 if (prev == 0) {
754 switch (bit) {
755 case FPSCR_VX:
756 env->fpscr |= 1 << FPSCR_FX;
757 if (fpscr_ve)
758 goto raise_ve;
759 case FPSCR_OX:
760 env->fpscr |= 1 << FPSCR_FX;
761 if (fpscr_oe)
762 goto raise_oe;
763 break;
764 case FPSCR_UX:
765 env->fpscr |= 1 << FPSCR_FX;
766 if (fpscr_ue)
767 goto raise_ue;
768 break;
769 case FPSCR_ZX:
770 env->fpscr |= 1 << FPSCR_FX;
771 if (fpscr_ze)
772 goto raise_ze;
773 break;
774 case FPSCR_XX:
775 env->fpscr |= 1 << FPSCR_FX;
776 if (fpscr_xe)
777 goto raise_xe;
778 break;
779 case FPSCR_VXSNAN:
780 case FPSCR_VXISI:
781 case FPSCR_VXIDI:
782 case FPSCR_VXZDZ:
783 case FPSCR_VXIMZ:
784 case FPSCR_VXVC:
785 case FPSCR_VXSOFT:
786 case FPSCR_VXSQRT:
787 case FPSCR_VXCVI:
788 env->fpscr |= 1 << FPSCR_VX;
789 env->fpscr |= 1 << FPSCR_FX;
790 if (fpscr_ve != 0)
791 goto raise_ve;
792 break;
793 case FPSCR_VE:
794 if (fpscr_vx != 0) {
795 raise_ve:
796 env->error_code = POWERPC_EXCP_FP;
797 if (fpscr_vxsnan)
798 env->error_code |= POWERPC_EXCP_FP_VXSNAN;
799 if (fpscr_vxisi)
800 env->error_code |= POWERPC_EXCP_FP_VXISI;
801 if (fpscr_vxidi)
802 env->error_code |= POWERPC_EXCP_FP_VXIDI;
803 if (fpscr_vxzdz)
804 env->error_code |= POWERPC_EXCP_FP_VXZDZ;
805 if (fpscr_vximz)
806 env->error_code |= POWERPC_EXCP_FP_VXIMZ;
807 if (fpscr_vxvc)
808 env->error_code |= POWERPC_EXCP_FP_VXVC;
809 if (fpscr_vxsoft)
810 env->error_code |= POWERPC_EXCP_FP_VXSOFT;
811 if (fpscr_vxsqrt)
812 env->error_code |= POWERPC_EXCP_FP_VXSQRT;
813 if (fpscr_vxcvi)
814 env->error_code |= POWERPC_EXCP_FP_VXCVI;
815 goto raise_excp;
816 }
817 break;
818 case FPSCR_OE:
819 if (fpscr_ox != 0) {
820 raise_oe:
821 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_OX;
822 goto raise_excp;
823 }
824 break;
825 case FPSCR_UE:
826 if (fpscr_ux != 0) {
827 raise_ue:
828 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_UX;
829 goto raise_excp;
830 }
831 break;
832 case FPSCR_ZE:
833 if (fpscr_zx != 0) {
834 raise_ze:
835 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_ZX;
836 goto raise_excp;
837 }
838 break;
839 case FPSCR_XE:
840 if (fpscr_xx != 0) {
841 raise_xe:
842 env->error_code = POWERPC_EXCP_FP | POWERPC_EXCP_FP_XX;
843 goto raise_excp;
844 }
845 break;
846 case FPSCR_RN1:
847 case FPSCR_RN:
848 fpscr_set_rounding_mode();
849 break;
850 default:
851 break;
852 raise_excp:
853 /* Update the floating-point enabled exception summary */
854 env->fpscr |= 1 << FPSCR_FEX;
855 /* We have to update Rc1 before raising the exception */
856 env->exception_index = POWERPC_EXCP_PROGRAM;
857 break;
858 }
859 }
860 }
861
862 #if defined(WORDS_BIGENDIAN)
863 #define WORD0 0
864 #define WORD1 1
865 #else
866 #define WORD0 1
867 #define WORD1 0
868 #endif
869 void do_store_fpscr (uint32_t mask)
870 {
871 /*
872 * We use only the 32 LSB of the incoming fpr
873 */
874 CPU_DoubleU u;
875 uint32_t prev, new;
876 int i;
877
878 u.d = FT0;
879 prev = env->fpscr;
880 new = u.l.lower;
881 new &= ~0x90000000;
882 new |= prev & 0x90000000;
883 for (i = 0; i < 7; i++) {
884 if (mask & (1 << i)) {
885 env->fpscr &= ~(0xF << (4 * i));
886 env->fpscr |= new & (0xF << (4 * i));
887 }
888 }
889 /* Update VX and FEX */
890 if (fpscr_ix != 0)
891 env->fpscr |= 1 << FPSCR_VX;
892 else
893 env->fpscr &= ~(1 << FPSCR_VX);
894 if ((fpscr_ex & fpscr_eex) != 0) {
895 env->fpscr |= 1 << FPSCR_FEX;
896 env->exception_index = POWERPC_EXCP_PROGRAM;
897 /* XXX: we should compute it properly */
898 env->error_code = POWERPC_EXCP_FP;
899 }
900 else
901 env->fpscr &= ~(1 << FPSCR_FEX);
902 fpscr_set_rounding_mode();
903 }
904 #undef WORD0
905 #undef WORD1
906
907 #ifdef CONFIG_SOFTFLOAT
908 void do_float_check_status (void)
909 {
910 if (env->exception_index == POWERPC_EXCP_PROGRAM &&
911 (env->error_code & POWERPC_EXCP_FP)) {
912 /* Differred floating-point exception after target FPR update */
913 if (msr_fe0 != 0 || msr_fe1 != 0)
914 do_raise_exception_err(env->exception_index, env->error_code);
915 } else if (env->fp_status.float_exception_flags & float_flag_overflow) {
916 float_overflow_excp();
917 } else if (env->fp_status.float_exception_flags & float_flag_underflow) {
918 float_underflow_excp();
919 } else if (env->fp_status.float_exception_flags & float_flag_inexact) {
920 float_inexact_excp();
921 }
922 }
923 #endif
924
925 #if USE_PRECISE_EMULATION
926 void do_fadd (void)
927 {
928 if (unlikely(float64_is_signaling_nan(FT0) ||
929 float64_is_signaling_nan(FT1))) {
930 /* sNaN addition */
931 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
932 } else if (likely(isfinite(FT0) || isfinite(FT1) ||
933 fpisneg(FT0) == fpisneg(FT1))) {
934 FT0 = float64_add(FT0, FT1, &env->fp_status);
935 } else {
936 /* Magnitude subtraction of infinities */
937 fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
938 }
939 }
940
941 void do_fsub (void)
942 {
943 if (unlikely(float64_is_signaling_nan(FT0) ||
944 float64_is_signaling_nan(FT1))) {
945 /* sNaN subtraction */
946 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
947 } else if (likely(isfinite(FT0) || isfinite(FT1) ||
948 fpisneg(FT0) != fpisneg(FT1))) {
949 FT0 = float64_sub(FT0, FT1, &env->fp_status);
950 } else {
951 /* Magnitude subtraction of infinities */
952 fload_invalid_op_excp(POWERPC_EXCP_FP_VXISI);
953 }
954 }
955
956 void do_fmul (void)
957 {
958 if (unlikely(float64_is_signaling_nan(FT0) ||
959 float64_is_signaling_nan(FT1))) {
960 /* sNaN multiplication */
961 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
962 } else if (unlikely((isinfinity(FT0) && iszero(FT1)) ||
963 (iszero(FT0) && isinfinity(FT1)))) {
964 /* Multiplication of zero by infinity */
965 fload_invalid_op_excp(POWERPC_EXCP_FP_VXIMZ);
966 } else {
967 FT0 = float64_mul(FT0, FT1, &env->fp_status);
968 }
969 }
970
971 void do_fdiv (void)
972 {
973 if (unlikely(float64_is_signaling_nan(FT0) ||
974 float64_is_signaling_nan(FT1))) {
975 /* sNaN division */
976 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
977 } else if (unlikely(isinfinity(FT0) && isinfinity(FT1))) {
978 /* Division of infinity by infinity */
979 fload_invalid_op_excp(POWERPC_EXCP_FP_VXIDI);
980 } else if (unlikely(iszero(FT1))) {
981 if (iszero(FT0)) {
982 /* Division of zero by zero */
983 fload_invalid_op_excp(POWERPC_EXCP_FP_VXZDZ);
984 } else {
985 /* Division by zero */
986 float_zero_divide_excp();
987 }
988 } else {
989 FT0 = float64_div(FT0, FT1, &env->fp_status);
990 }
991 }
992 #endif /* USE_PRECISE_EMULATION */
993
994 void do_fctiw (void)
995 {
996 CPU_DoubleU p;
997
998 if (unlikely(float64_is_signaling_nan(FT0))) {
999 /* sNaN conversion */
1000 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1001 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1002 /* qNan / infinity conversion */
1003 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1004 } else {
1005 p.ll = float64_to_int32(FT0, &env->fp_status);
1006 #if USE_PRECISE_EMULATION
1007 /* XXX: higher bits are not supposed to be significant.
1008 * to make tests easier, return the same as a real PowerPC 750
1009 */
1010 p.ll |= 0xFFF80000ULL << 32;
1011 #endif
1012 FT0 = p.d;
1013 }
1014 }
1015
1016 void do_fctiwz (void)
1017 {
1018 CPU_DoubleU p;
1019
1020 if (unlikely(float64_is_signaling_nan(FT0))) {
1021 /* sNaN conversion */
1022 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1023 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1024 /* qNan / infinity conversion */
1025 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1026 } else {
1027 p.ll = float64_to_int32_round_to_zero(FT0, &env->fp_status);
1028 #if USE_PRECISE_EMULATION
1029 /* XXX: higher bits are not supposed to be significant.
1030 * to make tests easier, return the same as a real PowerPC 750
1031 */
1032 p.ll |= 0xFFF80000ULL << 32;
1033 #endif
1034 FT0 = p.d;
1035 }
1036 }
1037
1038 #if defined(TARGET_PPC64)
1039 void do_fcfid (void)
1040 {
1041 CPU_DoubleU p;
1042
1043 p.d = FT0;
1044 FT0 = int64_to_float64(p.ll, &env->fp_status);
1045 }
1046
1047 void do_fctid (void)
1048 {
1049 CPU_DoubleU p;
1050
1051 if (unlikely(float64_is_signaling_nan(FT0))) {
1052 /* sNaN conversion */
1053 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1054 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1055 /* qNan / infinity conversion */
1056 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1057 } else {
1058 p.ll = float64_to_int64(FT0, &env->fp_status);
1059 FT0 = p.d;
1060 }
1061 }
1062
1063 void do_fctidz (void)
1064 {
1065 CPU_DoubleU p;
1066
1067 if (unlikely(float64_is_signaling_nan(FT0))) {
1068 /* sNaN conversion */
1069 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1070 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1071 /* qNan / infinity conversion */
1072 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1073 } else {
1074 p.ll = float64_to_int64_round_to_zero(FT0, &env->fp_status);
1075 FT0 = p.d;
1076 }
1077 }
1078
1079 #endif
1080
1081 static always_inline void do_fri (int rounding_mode)
1082 {
1083 if (unlikely(float64_is_signaling_nan(FT0))) {
1084 /* sNaN round */
1085 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN | POWERPC_EXCP_FP_VXCVI);
1086 } else if (unlikely(float64_is_nan(FT0) || isinfinity(FT0))) {
1087 /* qNan / infinity round */
1088 fload_invalid_op_excp(POWERPC_EXCP_FP_VXCVI);
1089 } else {
1090 set_float_rounding_mode(rounding_mode, &env->fp_status);
1091 FT0 = float64_round_to_int(FT0, &env->fp_status);
1092 /* Restore rounding mode from FPSCR */
1093 fpscr_set_rounding_mode();
1094 }
1095 }
1096
1097 void do_frin (void)
1098 {
1099 do_fri(float_round_nearest_even);
1100 }
1101
1102 void do_friz (void)
1103 {
1104 do_fri(float_round_to_zero);
1105 }
1106
1107 void do_frip (void)
1108 {
1109 do_fri(float_round_up);
1110 }
1111
1112 void do_frim (void)
1113 {
1114 do_fri(float_round_down);
1115 }
1116
1117 #if USE_PRECISE_EMULATION
1118 void do_fmadd (void)
1119 {
1120 if (unlikely(float64_is_signaling_nan(FT0) ||
1121 float64_is_signaling_nan(FT1) ||
1122 float64_is_signaling_nan(FT2))) {
1123 /* sNaN operation */
1124 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1125 } else {
1126 #ifdef FLOAT128
1127 /* This is the way the PowerPC specification defines it */
1128 float128 ft0_128, ft1_128;
1129
1130 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1131 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1132 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1133 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1134 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1135 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1136 #else
1137 /* This is OK on x86 hosts */
1138 FT0 = (FT0 * FT1) + FT2;
1139 #endif
1140 }
1141 }
1142
1143 void do_fmsub (void)
1144 {
1145 if (unlikely(float64_is_signaling_nan(FT0) ||
1146 float64_is_signaling_nan(FT1) ||
1147 float64_is_signaling_nan(FT2))) {
1148 /* sNaN operation */
1149 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1150 } else {
1151 #ifdef FLOAT128
1152 /* This is the way the PowerPC specification defines it */
1153 float128 ft0_128, ft1_128;
1154
1155 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1156 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1157 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1158 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1159 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1160 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1161 #else
1162 /* This is OK on x86 hosts */
1163 FT0 = (FT0 * FT1) - FT2;
1164 #endif
1165 }
1166 }
1167 #endif /* USE_PRECISE_EMULATION */
1168
1169 void do_fnmadd (void)
1170 {
1171 if (unlikely(float64_is_signaling_nan(FT0) ||
1172 float64_is_signaling_nan(FT1) ||
1173 float64_is_signaling_nan(FT2))) {
1174 /* sNaN operation */
1175 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1176 } else {
1177 #if USE_PRECISE_EMULATION
1178 #ifdef FLOAT128
1179 /* This is the way the PowerPC specification defines it */
1180 float128 ft0_128, ft1_128;
1181
1182 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1183 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1184 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1185 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1186 ft0_128 = float128_add(ft0_128, ft1_128, &env->fp_status);
1187 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1188 #else
1189 /* This is OK on x86 hosts */
1190 FT0 = (FT0 * FT1) + FT2;
1191 #endif
1192 #else
1193 FT0 = float64_mul(FT0, FT1, &env->fp_status);
1194 FT0 = float64_add(FT0, FT2, &env->fp_status);
1195 #endif
1196 if (likely(!isnan(FT0)))
1197 FT0 = float64_chs(FT0);
1198 }
1199 }
1200
1201 void do_fnmsub (void)
1202 {
1203 if (unlikely(float64_is_signaling_nan(FT0) ||
1204 float64_is_signaling_nan(FT1) ||
1205 float64_is_signaling_nan(FT2))) {
1206 /* sNaN operation */
1207 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1208 } else {
1209 #if USE_PRECISE_EMULATION
1210 #ifdef FLOAT128
1211 /* This is the way the PowerPC specification defines it */
1212 float128 ft0_128, ft1_128;
1213
1214 ft0_128 = float64_to_float128(FT0, &env->fp_status);
1215 ft1_128 = float64_to_float128(FT1, &env->fp_status);
1216 ft0_128 = float128_mul(ft0_128, ft1_128, &env->fp_status);
1217 ft1_128 = float64_to_float128(FT2, &env->fp_status);
1218 ft0_128 = float128_sub(ft0_128, ft1_128, &env->fp_status);
1219 FT0 = float128_to_float64(ft0_128, &env->fp_status);
1220 #else
1221 /* This is OK on x86 hosts */
1222 FT0 = (FT0 * FT1) - FT2;
1223 #endif
1224 #else
1225 FT0 = float64_mul(FT0, FT1, &env->fp_status);
1226 FT0 = float64_sub(FT0, FT2, &env->fp_status);
1227 #endif
1228 if (likely(!isnan(FT0)))
1229 FT0 = float64_chs(FT0);
1230 }
1231 }
1232
1233 #if USE_PRECISE_EMULATION
1234 void do_frsp (void)
1235 {
1236 if (unlikely(float64_is_signaling_nan(FT0))) {
1237 /* sNaN square root */
1238 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1239 } else {
1240 FT0 = float64_to_float32(FT0, &env->fp_status);
1241 }
1242 }
1243 #endif /* USE_PRECISE_EMULATION */
1244
1245 void do_fsqrt (void)
1246 {
1247 if (unlikely(float64_is_signaling_nan(FT0))) {
1248 /* sNaN square root */
1249 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1250 } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
1251 /* Square root of a negative nonzero number */
1252 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1253 } else {
1254 FT0 = float64_sqrt(FT0, &env->fp_status);
1255 }
1256 }
1257
1258 void do_fre (void)
1259 {
1260 CPU_DoubleU p;
1261
1262 if (unlikely(float64_is_signaling_nan(FT0))) {
1263 /* sNaN reciprocal */
1264 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1265 } else if (unlikely(iszero(FT0))) {
1266 /* Zero reciprocal */
1267 float_zero_divide_excp();
1268 } else if (likely(isnormal(FT0))) {
1269 FT0 = float64_div(1.0, FT0, &env->fp_status);
1270 } else {
1271 p.d = FT0;
1272 if (p.ll == 0x8000000000000000ULL) {
1273 p.ll = 0xFFF0000000000000ULL;
1274 } else if (p.ll == 0x0000000000000000ULL) {
1275 p.ll = 0x7FF0000000000000ULL;
1276 } else if (isnan(FT0)) {
1277 p.ll = 0x7FF8000000000000ULL;
1278 } else if (fpisneg(FT0)) {
1279 p.ll = 0x8000000000000000ULL;
1280 } else {
1281 p.ll = 0x0000000000000000ULL;
1282 }
1283 FT0 = p.d;
1284 }
1285 }
1286
1287 void do_fres (void)
1288 {
1289 CPU_DoubleU p;
1290
1291 if (unlikely(float64_is_signaling_nan(FT0))) {
1292 /* sNaN reciprocal */
1293 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1294 } else if (unlikely(iszero(FT0))) {
1295 /* Zero reciprocal */
1296 float_zero_divide_excp();
1297 } else if (likely(isnormal(FT0))) {
1298 #if USE_PRECISE_EMULATION
1299 FT0 = float64_div(1.0, FT0, &env->fp_status);
1300 FT0 = float64_to_float32(FT0, &env->fp_status);
1301 #else
1302 FT0 = float32_div(1.0, FT0, &env->fp_status);
1303 #endif
1304 } else {
1305 p.d = FT0;
1306 if (p.ll == 0x8000000000000000ULL) {
1307 p.ll = 0xFFF0000000000000ULL;
1308 } else if (p.ll == 0x0000000000000000ULL) {
1309 p.ll = 0x7FF0000000000000ULL;
1310 } else if (isnan(FT0)) {
1311 p.ll = 0x7FF8000000000000ULL;
1312 } else if (fpisneg(FT0)) {
1313 p.ll = 0x8000000000000000ULL;
1314 } else {
1315 p.ll = 0x0000000000000000ULL;
1316 }
1317 FT0 = p.d;
1318 }
1319 }
1320
1321 void do_frsqrte (void)
1322 {
1323 CPU_DoubleU p;
1324
1325 if (unlikely(float64_is_signaling_nan(FT0))) {
1326 /* sNaN reciprocal square root */
1327 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1328 } else if (unlikely(fpisneg(FT0) && !iszero(FT0))) {
1329 /* Reciprocal square root of a negative nonzero number */
1330 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSQRT);
1331 } else if (likely(isnormal(FT0))) {
1332 FT0 = float64_sqrt(FT0, &env->fp_status);
1333 FT0 = float32_div(1.0, FT0, &env->fp_status);
1334 } else {
1335 p.d = FT0;
1336 if (p.ll == 0x8000000000000000ULL) {
1337 p.ll = 0xFFF0000000000000ULL;
1338 } else if (p.ll == 0x0000000000000000ULL) {
1339 p.ll = 0x7FF0000000000000ULL;
1340 } else if (isnan(FT0)) {
1341 p.ll |= 0x000FFFFFFFFFFFFFULL;
1342 } else if (fpisneg(FT0)) {
1343 p.ll = 0x7FF8000000000000ULL;
1344 } else {
1345 p.ll = 0x0000000000000000ULL;
1346 }
1347 FT0 = p.d;
1348 }
1349 }
1350
1351 void do_fsel (void)
1352 {
1353 if (!fpisneg(FT0) || iszero(FT0))
1354 FT0 = FT1;
1355 else
1356 FT0 = FT2;
1357 }
1358
1359 void do_fcmpu (void)
1360 {
1361 if (unlikely(float64_is_signaling_nan(FT0) ||
1362 float64_is_signaling_nan(FT1))) {
1363 /* sNaN comparison */
1364 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN);
1365 } else {
1366 if (float64_lt(FT0, FT1, &env->fp_status)) {
1367 T0 = 0x08UL;
1368 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
1369 T0 = 0x04UL;
1370 } else {
1371 T0 = 0x02UL;
1372 }
1373 }
1374 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1375 env->fpscr |= T0 << FPSCR_FPRF;
1376 }
1377
1378 void do_fcmpo (void)
1379 {
1380 if (unlikely(float64_is_nan(FT0) ||
1381 float64_is_nan(FT1))) {
1382 if (float64_is_signaling_nan(FT0) ||
1383 float64_is_signaling_nan(FT1)) {
1384 /* sNaN comparison */
1385 fload_invalid_op_excp(POWERPC_EXCP_FP_VXSNAN |
1386 POWERPC_EXCP_FP_VXVC);
1387 } else {
1388 /* qNaN comparison */
1389 fload_invalid_op_excp(POWERPC_EXCP_FP_VXVC);
1390 }
1391 } else {
1392 if (float64_lt(FT0, FT1, &env->fp_status)) {
1393 T0 = 0x08UL;
1394 } else if (!float64_le(FT0, FT1, &env->fp_status)) {
1395 T0 = 0x04UL;
1396 } else {
1397 T0 = 0x02UL;
1398 }
1399 }
1400 env->fpscr &= ~(0x0F << FPSCR_FPRF);
1401 env->fpscr |= T0 << FPSCR_FPRF;
1402 }
1403
1404 #if !defined (CONFIG_USER_ONLY)
1405 void cpu_dump_rfi (target_ulong RA, target_ulong msr);
1406
1407 void do_store_msr (void)
1408 {
1409 T0 = hreg_store_msr(env, T0, 0);
1410 if (T0 != 0) {
1411 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1412 do_raise_exception(T0);
1413 }
1414 }
1415
1416 static always_inline void __do_rfi (target_ulong nip, target_ulong msr,
1417 target_ulong msrm, int keep_msrh)
1418 {
1419 #if defined(TARGET_PPC64)
1420 if (msr & (1ULL << MSR_SF)) {
1421 nip = (uint64_t)nip;
1422 msr &= (uint64_t)msrm;
1423 } else {
1424 nip = (uint32_t)nip;
1425 msr = (uint32_t)(msr & msrm);
1426 if (keep_msrh)
1427 msr |= env->msr & ~((uint64_t)0xFFFFFFFF);
1428 }
1429 #else
1430 nip = (uint32_t)nip;
1431 msr &= (uint32_t)msrm;
1432 #endif
1433 /* XXX: beware: this is false if VLE is supported */
1434 env->nip = nip & ~((target_ulong)0x00000003);
1435 hreg_store_msr(env, msr, 1);
1436 #if defined (DEBUG_OP)
1437 cpu_dump_rfi(env->nip, env->msr);
1438 #endif
1439 /* No need to raise an exception here,
1440 * as rfi is always the last insn of a TB
1441 */
1442 env->interrupt_request |= CPU_INTERRUPT_EXITTB;
1443 }
1444
1445 void do_rfi (void)
1446 {
1447 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1448 ~((target_ulong)0xFFFF0000), 1);
1449 }
1450
1451 #if defined(TARGET_PPC64)
1452 void do_rfid (void)
1453 {
1454 __do_rfi(env->spr[SPR_SRR0], env->spr[SPR_SRR1],
1455 ~((target_ulong)0xFFFF0000), 0);
1456 }
1457
1458 void do_hrfid (void)
1459 {
1460 __do_rfi(env->spr[SPR_HSRR0], env->spr[SPR_HSRR1],
1461 ~((target_ulong)0xFFFF0000), 0);
1462 }
1463 #endif
1464 #endif
1465
1466 void do_tw (int flags)
1467 {
1468 if (!likely(!(((int32_t)T0 < (int32_t)T1 && (flags & 0x10)) ||
1469 ((int32_t)T0 > (int32_t)T1 && (flags & 0x08)) ||
1470 ((int32_t)T0 == (int32_t)T1 && (flags & 0x04)) ||
1471 ((uint32_t)T0 < (uint32_t)T1 && (flags & 0x02)) ||
1472 ((uint32_t)T0 > (uint32_t)T1 && (flags & 0x01))))) {
1473 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1474 }
1475 }
1476
1477 #if defined(TARGET_PPC64)
1478 void do_td (int flags)
1479 {
1480 if (!likely(!(((int64_t)T0 < (int64_t)T1 && (flags & 0x10)) ||
1481 ((int64_t)T0 > (int64_t)T1 && (flags & 0x08)) ||
1482 ((int64_t)T0 == (int64_t)T1 && (flags & 0x04)) ||
1483 ((uint64_t)T0 < (uint64_t)T1 && (flags & 0x02)) ||
1484 ((uint64_t)T0 > (uint64_t)T1 && (flags & 0x01)))))
1485 do_raise_exception_err(POWERPC_EXCP_PROGRAM, POWERPC_EXCP_TRAP);
1486 }
1487 #endif
1488
1489 /*****************************************************************************/
1490 /* PowerPC 601 specific instructions (POWER bridge) */
1491 void do_POWER_abso (void)
1492 {
1493 if ((int32_t)T0 == INT32_MIN) {
1494 T0 = INT32_MAX;
1495 xer_ov = 1;
1496 } else if ((int32_t)T0 < 0) {
1497 T0 = -T0;
1498 xer_ov = 0;
1499 } else {
1500 xer_ov = 0;
1501 }
1502 xer_so |= xer_ov;
1503 }
1504
1505 void do_POWER_clcs (void)
1506 {
1507 switch (T0) {
1508 case 0x0CUL:
1509 /* Instruction cache line size */
1510 T0 = env->icache_line_size;
1511 break;
1512 case 0x0DUL:
1513 /* Data cache line size */
1514 T0 = env->dcache_line_size;
1515 break;
1516 case 0x0EUL:
1517 /* Minimum cache line size */
1518 T0 = env->icache_line_size < env->dcache_line_size ?
1519 env->icache_line_size : env->dcache_line_size;
1520 break;
1521 case 0x0FUL:
1522 /* Maximum cache line size */
1523 T0 = env->icache_line_size > env->dcache_line_size ?
1524 env->icache_line_size : env->dcache_line_size;
1525 break;
1526 default:
1527 /* Undefined */
1528 break;
1529 }
1530 }
1531
1532 void do_POWER_div (void)
1533 {
1534 uint64_t tmp;
1535
1536 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1537 (int32_t)T1 == 0) {
1538 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1539 env->spr[SPR_MQ] = 0;
1540 } else {
1541 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1542 env->spr[SPR_MQ] = tmp % T1;
1543 T0 = tmp / (int32_t)T1;
1544 }
1545 }
1546
1547 void do_POWER_divo (void)
1548 {
1549 int64_t tmp;
1550
1551 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1552 (int32_t)T1 == 0) {
1553 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1554 env->spr[SPR_MQ] = 0;
1555 xer_ov = 1;
1556 } else {
1557 tmp = ((uint64_t)T0 << 32) | env->spr[SPR_MQ];
1558 env->spr[SPR_MQ] = tmp % T1;
1559 tmp /= (int32_t)T1;
1560 if (tmp > (int64_t)INT32_MAX || tmp < (int64_t)INT32_MIN) {
1561 xer_ov = 1;
1562 } else {
1563 xer_ov = 0;
1564 }
1565 T0 = tmp;
1566 }
1567 xer_so |= xer_ov;
1568 }
1569
1570 void do_POWER_divs (void)
1571 {
1572 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1573 (int32_t)T1 == 0) {
1574 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1575 env->spr[SPR_MQ] = 0;
1576 } else {
1577 env->spr[SPR_MQ] = T0 % T1;
1578 T0 = (int32_t)T0 / (int32_t)T1;
1579 }
1580 }
1581
1582 void do_POWER_divso (void)
1583 {
1584 if (((int32_t)T0 == INT32_MIN && (int32_t)T1 == (int32_t)-1) ||
1585 (int32_t)T1 == 0) {
1586 T0 = UINT32_MAX * ((uint32_t)T0 >> 31);
1587 env->spr[SPR_MQ] = 0;
1588 xer_ov = 1;
1589 } else {
1590 T0 = (int32_t)T0 / (int32_t)T1;
1591 env->spr[SPR_MQ] = (int32_t)T0 % (int32_t)T1;
1592 xer_ov = 0;
1593 }
1594 xer_so |= xer_ov;
1595 }
1596
1597 void do_POWER_dozo (void)
1598 {
1599 if ((int32_t)T1 > (int32_t)T0) {
1600 T2 = T0;
1601 T0 = T1 - T0;
1602 if (((uint32_t)(~T2) ^ (uint32_t)T1 ^ UINT32_MAX) &
1603 ((uint32_t)(~T2) ^ (uint32_t)T0) & (1UL << 31)) {
1604 xer_ov = 1;
1605 xer_so = 1;
1606 } else {
1607 xer_ov = 0;
1608 }
1609 } else {
1610 T0 = 0;
1611 xer_ov = 0;
1612 }
1613 }
1614
1615 void do_POWER_maskg (void)
1616 {
1617 uint32_t ret;
1618
1619 if ((uint32_t)T0 == (uint32_t)(T1 + 1)) {
1620 ret = UINT32_MAX;
1621 } else {
1622 ret = (UINT32_MAX >> ((uint32_t)T0)) ^
1623 ((UINT32_MAX >> ((uint32_t)T1)) >> 1);
1624 if ((uint32_t)T0 > (uint32_t)T1)
1625 ret = ~ret;
1626 }
1627 T0 = ret;
1628 }
1629
1630 void do_POWER_mulo (void)
1631 {
1632 uint64_t tmp;
1633
1634 tmp = (uint64_t)T0 * (uint64_t)T1;
1635 env->spr[SPR_MQ] = tmp >> 32;
1636 T0 = tmp;
1637 if (tmp >> 32 != ((uint64_t)T0 >> 16) * ((uint64_t)T1 >> 16)) {
1638 xer_ov = 1;
1639 xer_so = 1;
1640 } else {
1641 xer_ov = 0;
1642 }
1643 }
1644
1645 #if !defined (CONFIG_USER_ONLY)
1646 void do_POWER_rac (void)
1647 {
1648 mmu_ctx_t ctx;
1649 int nb_BATs;
1650
1651 /* We don't have to generate many instances of this instruction,
1652 * as rac is supervisor only.
1653 */
1654 /* XXX: FIX THIS: Pretend we have no BAT */
1655 nb_BATs = env->nb_BATs;
1656 env->nb_BATs = 0;
1657 if (get_physical_address(env, &ctx, T0, 0, ACCESS_INT) == 0)
1658 T0 = ctx.raddr;
1659 env->nb_BATs = nb_BATs;
1660 }
1661
1662 void do_POWER_rfsvc (void)
1663 {
1664 __do_rfi(env->lr, env->ctr, 0x0000FFFF, 0);
1665 }
1666
1667 void do_store_hid0_601 (void)
1668 {
1669 uint32_t hid0;
1670
1671 hid0 = env->spr[SPR_HID0];
1672 if ((T0 ^ hid0) & 0x00000008) {
1673 /* Change current endianness */
1674 env->hflags &= ~(1 << MSR_LE);
1675 env->hflags_nmsr &= ~(1 << MSR_LE);
1676 env->hflags_nmsr |= (1 << MSR_LE) & (((T0 >> 3) & 1) << MSR_LE);
1677 env->hflags |= env->hflags_nmsr;
1678 if (loglevel != 0) {
1679 fprintf(logfile, "%s: set endianness to %c => " ADDRX "\n",
1680 __func__, T0 & 0x8 ? 'l' : 'b', env->hflags);
1681 }
1682 }
1683 env->spr[SPR_HID0] = T0;
1684 }
1685 #endif
1686
1687 /*****************************************************************************/
1688 /* 602 specific instructions */
1689 /* mfrom is the most crazy instruction ever seen, imho ! */
1690 /* Real implementation uses a ROM table. Do the same */
1691 #define USE_MFROM_ROM_TABLE
1692 void do_op_602_mfrom (void)
1693 {
1694 if (likely(T0 < 602)) {
1695 #if defined(USE_MFROM_ROM_TABLE)
1696 #include "mfrom_table.c"
1697 T0 = mfrom_ROM_table[T0];
1698 #else
1699 double d;
1700 /* Extremly decomposed:
1701 * -T0 / 256
1702 * T0 = 256 * log10(10 + 1.0) + 0.5
1703 */
1704 d = T0;
1705 d = float64_div(d, 256, &env->fp_status);
1706 d = float64_chs(d);
1707 d = exp10(d); // XXX: use float emulation function
1708 d = float64_add(d, 1.0, &env->fp_status);
1709 d = log10(d); // XXX: use float emulation function
1710 d = float64_mul(d, 256, &env->fp_status);
1711 d = float64_add(d, 0.5, &env->fp_status);
1712 T0 = float64_round_to_int(d, &env->fp_status);
1713 #endif
1714 } else {
1715 T0 = 0;
1716 }
1717 }
1718
1719 /*****************************************************************************/
1720 /* Embedded PowerPC specific helpers */
1721 void do_405_check_sat (void)
1722 {
1723 if (!likely((((uint32_t)T1 ^ (uint32_t)T2) >> 31) ||
1724 !(((uint32_t)T0 ^ (uint32_t)T2) >> 31))) {
1725 /* Saturate result */
1726 if (T2 >> 31) {
1727 T0 = INT32_MIN;
1728 } else {
1729 T0 = INT32_MAX;
1730 }
1731 }
1732 }
1733
1734 /* XXX: to be improved to check access rights when in user-mode */
1735 void do_load_dcr (void)
1736 {
1737 target_ulong val;
1738
1739 if (unlikely(env->dcr_env == NULL)) {
1740 if (loglevel != 0) {
1741 fprintf(logfile, "No DCR environment\n");
1742 }
1743 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1744 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1745 } else if (unlikely(ppc_dcr_read(env->dcr_env, T0, &val) != 0)) {
1746 if (loglevel != 0) {
1747 fprintf(logfile, "DCR read error %d %03x\n", (int)T0, (int)T0);
1748 }
1749 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1750 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1751 } else {
1752 T0 = val;
1753 }
1754 }
1755
1756 void do_store_dcr (void)
1757 {
1758 if (unlikely(env->dcr_env == NULL)) {
1759 if (loglevel != 0) {
1760 fprintf(logfile, "No DCR environment\n");
1761 }
1762 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1763 POWERPC_EXCP_INVAL | POWERPC_EXCP_INVAL_INVAL);
1764 } else if (unlikely(ppc_dcr_write(env->dcr_env, T0, T1) != 0)) {
1765 if (loglevel != 0) {
1766 fprintf(logfile, "DCR write error %d %03x\n", (int)T0, (int)T0);
1767 }
1768 do_raise_exception_err(POWERPC_EXCP_PROGRAM,
1769 POWERPC_EXCP_INVAL | POWERPC_EXCP_PRIV_REG);
1770 }
1771 }
1772
1773 #if !defined(CONFIG_USER_ONLY)
1774 void do_40x_rfci (void)
1775 {
1776 __do_rfi(env->spr[SPR_40x_SRR2], env->spr[SPR_40x_SRR3],
1777 ~((target_ulong)0xFFFF0000), 0);
1778 }
1779
1780 void do_rfci (void)
1781 {
1782 __do_rfi(env->spr[SPR_BOOKE_CSRR0], SPR_BOOKE_CSRR1,
1783 ~((target_ulong)0x3FFF0000), 0);
1784 }
1785
1786 void do_rfdi (void)
1787 {
1788 __do_rfi(env->spr[SPR_BOOKE_DSRR0], SPR_BOOKE_DSRR1,
1789 ~((target_ulong)0x3FFF0000), 0);
1790 }
1791
1792 void do_rfmci (void)
1793 {
1794 __do_rfi(env->spr[SPR_BOOKE_MCSRR0], SPR_BOOKE_MCSRR1,
1795 ~((target_ulong)0x3FFF0000), 0);
1796 }
1797
1798 void do_load_403_pb (int num)
1799 {
1800 T0 = env->pb[num];
1801 }
1802
1803 void do_store_403_pb (int num)
1804 {
1805 if (likely(env->pb[num] != T0)) {
1806 env->pb[num] = T0;
1807 /* Should be optimized */
1808 tlb_flush(env, 1);
1809 }
1810 }
1811 #endif
1812
1813 /* 440 specific */
1814 void do_440_dlmzb (void)
1815 {
1816 target_ulong mask;
1817 int i;
1818
1819 i = 1;
1820 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1821 if ((T0 & mask) == 0)
1822 goto done;
1823 i++;
1824 }
1825 for (mask = 0xFF000000; mask != 0; mask = mask >> 8) {
1826 if ((T1 & mask) == 0)
1827 break;
1828 i++;
1829 }
1830 done:
1831 T0 = i;
1832 }
1833
1834 /* SPE extension helpers */
1835 /* Use a table to make this quicker */
1836 static uint8_t hbrev[16] = {
1837 0x0, 0x8, 0x4, 0xC, 0x2, 0xA, 0x6, 0xE,
1838 0x1, 0x9, 0x5, 0xD, 0x3, 0xB, 0x7, 0xF,
1839 };
1840
1841 static always_inline uint8_t byte_reverse (uint8_t val)
1842 {
1843 return hbrev[val >> 4] | (hbrev[val & 0xF] << 4);
1844 }
1845
1846 static always_inline uint32_t word_reverse (uint32_t val)
1847 {
1848 return byte_reverse(val >> 24) | (byte_reverse(val >> 16) << 8) |
1849 (byte_reverse(val >> 8) << 16) | (byte_reverse(val) << 24);
1850 }
1851
1852 #define MASKBITS 16 // Random value - to be fixed (implementation dependant)
1853 void do_brinc (void)
1854 {
1855 uint32_t a, b, d, mask;
1856
1857 mask = UINT32_MAX >> (32 - MASKBITS);
1858 a = T0 & mask;
1859 b = T1 & mask;
1860 d = word_reverse(1 + word_reverse(a | ~b));
1861 T0 = (T0 & ~mask) | (d & b);
1862 }
1863
1864 #define DO_SPE_OP2(name) \
1865 void do_ev##name (void) \
1866 { \
1867 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32, T1_64 >> 32) << 32) | \
1868 (uint64_t)_do_e##name(T0_64, T1_64); \
1869 }
1870
1871 #define DO_SPE_OP1(name) \
1872 void do_ev##name (void) \
1873 { \
1874 T0_64 = ((uint64_t)_do_e##name(T0_64 >> 32) << 32) | \
1875 (uint64_t)_do_e##name(T0_64); \
1876 }
1877
1878 /* Fixed-point vector arithmetic */
1879 static always_inline uint32_t _do_eabs (uint32_t val)
1880 {
1881 if ((val & 0x80000000) && val != 0x80000000)
1882 val -= val;
1883
1884 return val;
1885 }
1886
1887 static always_inline uint32_t _do_eaddw (uint32_t op1, uint32_t op2)
1888 {
1889 return op1 + op2;
1890 }
1891
1892 static always_inline int _do_ecntlsw (uint32_t val)
1893 {
1894 if (val & 0x80000000)
1895 return clz32(~val);
1896 else
1897 return clz32(val);
1898 }
1899
1900 static always_inline int _do_ecntlzw (uint32_t val)
1901 {
1902 return clz32(val);
1903 }
1904
1905 static always_inline uint32_t _do_eneg (uint32_t val)
1906 {
1907 if (val != 0x80000000)
1908 val -= val;
1909
1910 return val;
1911 }
1912
1913 static always_inline uint32_t _do_erlw (uint32_t op1, uint32_t op2)
1914 {
1915 return rotl32(op1, op2);
1916 }
1917
1918 static always_inline uint32_t _do_erndw (uint32_t val)
1919 {
1920 return (val + 0x000080000000) & 0xFFFF0000;
1921 }
1922
1923 static always_inline uint32_t _do_eslw (uint32_t op1, uint32_t op2)
1924 {
1925 /* No error here: 6 bits are used */
1926 return op1 << (op2 & 0x3F);
1927 }
1928
1929 static always_inline int32_t _do_esrws (int32_t op1, uint32_t op2)
1930 {
1931 /* No error here: 6 bits are used */
1932 return op1 >> (op2 & 0x3F);
1933 }
1934
1935 static always_inline uint32_t _do_esrwu (uint32_t op1, uint32_t op2)
1936 {
1937 /* No error here: 6 bits are used */
1938 return op1 >> (op2 & 0x3F);
1939 }
1940
1941 static always_inline uint32_t _do_esubfw (uint32_t op1, uint32_t op2)
1942 {
1943 return op2 - op1;
1944 }
1945
1946 /* evabs */
1947 DO_SPE_OP1(abs);
1948 /* evaddw */
1949 DO_SPE_OP2(addw);
1950 /* evcntlsw */
1951 DO_SPE_OP1(cntlsw);
1952 /* evcntlzw */
1953 DO_SPE_OP1(cntlzw);
1954 /* evneg */
1955 DO_SPE_OP1(neg);
1956 /* evrlw */
1957 DO_SPE_OP2(rlw);
1958 /* evrnd */
1959 DO_SPE_OP1(rndw);
1960 /* evslw */
1961 DO_SPE_OP2(slw);
1962 /* evsrws */
1963 DO_SPE_OP2(srws);
1964 /* evsrwu */
1965 DO_SPE_OP2(srwu);
1966 /* evsubfw */
1967 DO_SPE_OP2(subfw);
1968
1969 /* evsel is a little bit more complicated... */
1970 static always_inline uint32_t _do_esel (uint32_t op1, uint32_t op2, int n)
1971 {
1972 if (n)
1973 return op1;
1974 else
1975 return op2;
1976 }
1977
1978 void do_evsel (void)
1979 {
1980 T0_64 = ((uint64_t)_do_esel(T0_64 >> 32, T1_64 >> 32, T0 >> 3) << 32) |
1981 (uint64_t)_do_esel(T0_64, T1_64, (T0 >> 2) & 1);
1982 }
1983
1984 /* Fixed-point vector comparisons */
1985 #define DO_SPE_CMP(name) \
1986 void do_ev##name (void) \
1987 { \
1988 T0 = _do_evcmp_merge((uint64_t)_do_e##name(T0_64 >> 32, \
1989 T1_64 >> 32) << 32, \
1990 _do_e##name(T0_64, T1_64)); \
1991 }
1992
1993 static always_inline uint32_t _do_evcmp_merge (int t0, int t1)
1994 {
1995 return (t0 << 3) | (t1 << 2) | ((t0 | t1) << 1) | (t0 & t1);
1996 }
1997 static always_inline int _do_ecmpeq (uint32_t op1, uint32_t op2)
1998 {
1999 return op1 == op2 ? 1 : 0;
2000 }
2001
2002 static always_inline int _do_ecmpgts (int32_t op1, int32_t op2)
2003 {
2004 return op1 > op2 ? 1 : 0;
2005 }
2006
2007 static always_inline int _do_ecmpgtu (uint32_t op1, uint32_t op2)
2008 {
2009 return op1 > op2 ? 1 : 0;
2010 }
2011
2012 static always_inline int _do_ecmplts (int32_t op1, int32_t op2)
2013 {
2014 return op1 < op2 ? 1 : 0;
2015 }
2016
2017 static always_inline int _do_ecmpltu (uint32_t op1, uint32_t op2)
2018 {
2019 return op1 < op2 ? 1 : 0;
2020 }
2021
2022 /* evcmpeq */
2023 DO_SPE_CMP(cmpeq);
2024 /* evcmpgts */
2025 DO_SPE_CMP(cmpgts);
2026 /* evcmpgtu */
2027 DO_SPE_CMP(cmpgtu);
2028 /* evcmplts */
2029 DO_SPE_CMP(cmplts);
2030 /* evcmpltu */
2031 DO_SPE_CMP(cmpltu);
2032
2033 /* Single precision floating-point conversions from/to integer */
2034 static always_inline uint32_t _do_efscfsi (int32_t val)
2035 {
2036 CPU_FloatU u;
2037
2038 u.f = int32_to_float32(val, &env->spe_status);
2039
2040 return u.l;
2041 }
2042
2043 static always_inline uint32_t _do_efscfui (uint32_t val)
2044 {
2045 CPU_FloatU u;
2046
2047 u.f = uint32_to_float32(val, &env->spe_status);
2048
2049 return u.l;
2050 }
2051
2052 static always_inline int32_t _do_efsctsi (uint32_t val)
2053 {
2054 CPU_FloatU u;
2055
2056 u.l = val;
2057 /* NaN are not treated the same way IEEE 754 does */
2058 if (unlikely(isnan(u.f)))
2059 return 0;
2060
2061 return float32_to_int32(u.f, &env->spe_status);
2062 }
2063
2064 static always_inline uint32_t _do_efsctui (uint32_t val)
2065 {
2066 CPU_FloatU u;
2067
2068 u.l = val;
2069 /* NaN are not treated the same way IEEE 754 does */
2070 if (unlikely(isnan(u.f)))
2071 return 0;
2072
2073 return float32_to_uint32(u.f, &env->spe_status);
2074 }
2075
2076 static always_inline int32_t _do_efsctsiz (uint32_t val)
2077 {
2078 CPU_FloatU u;
2079
2080 u.l = val;
2081 /* NaN are not treated the same way IEEE 754 does */
2082 if (unlikely(isnan(u.f)))
2083 return 0;
2084
2085 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2086 }
2087
2088 static always_inline uint32_t _do_efsctuiz (uint32_t val)
2089 {
2090 CPU_FloatU u;
2091
2092 u.l = val;
2093 /* NaN are not treated the same way IEEE 754 does */
2094 if (unlikely(isnan(u.f)))
2095 return 0;
2096
2097 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2098 }
2099
2100 void do_efscfsi (void)
2101 {
2102 T0_64 = _do_efscfsi(T0_64);
2103 }
2104
2105 void do_efscfui (void)
2106 {
2107 T0_64 = _do_efscfui(T0_64);
2108 }
2109
2110 void do_efsctsi (void)
2111 {
2112 T0_64 = _do_efsctsi(T0_64);
2113 }
2114
2115 void do_efsctui (void)
2116 {
2117 T0_64 = _do_efsctui(T0_64);
2118 }
2119
2120 void do_efsctsiz (void)
2121 {
2122 T0_64 = _do_efsctsiz(T0_64);
2123 }
2124
2125 void do_efsctuiz (void)
2126 {
2127 T0_64 = _do_efsctuiz(T0_64);
2128 }
2129
2130 /* Single precision floating-point conversion to/from fractional */
2131 static always_inline uint32_t _do_efscfsf (uint32_t val)
2132 {
2133 CPU_FloatU u;
2134 float32 tmp;
2135
2136 u.f = int32_to_float32(val, &env->spe_status);
2137 tmp = int64_to_float32(1ULL << 32, &env->spe_status);
2138 u.f = float32_div(u.f, tmp, &env->spe_status);
2139
2140 return u.l;
2141 }
2142
2143 static always_inline uint32_t _do_efscfuf (uint32_t val)
2144 {
2145 CPU_FloatU u;
2146 float32 tmp;
2147
2148 u.f = uint32_to_float32(val, &env->spe_status);
2149 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2150 u.f = float32_div(u.f, tmp, &env->spe_status);
2151
2152 return u.l;
2153 }
2154
2155 static always_inline int32_t _do_efsctsf (uint32_t val)
2156 {
2157 CPU_FloatU u;
2158 float32 tmp;
2159
2160 u.l = val;
2161 /* NaN are not treated the same way IEEE 754 does */
2162 if (unlikely(isnan(u.f)))
2163 return 0;
2164 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2165 u.f = float32_mul(u.f, tmp, &env->spe_status);
2166
2167 return float32_to_int32(u.f, &env->spe_status);
2168 }
2169
2170 static always_inline uint32_t _do_efsctuf (uint32_t val)
2171 {
2172 CPU_FloatU u;
2173 float32 tmp;
2174
2175 u.l = val;
2176 /* NaN are not treated the same way IEEE 754 does */
2177 if (unlikely(isnan(u.f)))
2178 return 0;
2179 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2180 u.f = float32_mul(u.f, tmp, &env->spe_status);
2181
2182 return float32_to_uint32(u.f, &env->spe_status);
2183 }
2184
2185 static always_inline int32_t _do_efsctsfz (uint32_t val)
2186 {
2187 CPU_FloatU u;
2188 float32 tmp;
2189
2190 u.l = val;
2191 /* NaN are not treated the same way IEEE 754 does */
2192 if (unlikely(isnan(u.f)))
2193 return 0;
2194 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2195 u.f = float32_mul(u.f, tmp, &env->spe_status);
2196
2197 return float32_to_int32_round_to_zero(u.f, &env->spe_status);
2198 }
2199
2200 static always_inline uint32_t _do_efsctufz (uint32_t val)
2201 {
2202 CPU_FloatU u;
2203 float32 tmp;
2204
2205 u.l = val;
2206 /* NaN are not treated the same way IEEE 754 does */
2207 if (unlikely(isnan(u.f)))
2208 return 0;
2209 tmp = uint64_to_float32(1ULL << 32, &env->spe_status);
2210 u.f = float32_mul(u.f, tmp, &env->spe_status);
2211
2212 return float32_to_uint32_round_to_zero(u.f, &env->spe_status);
2213 }
2214
2215 void do_efscfsf (void)
2216 {
2217 T0_64 = _do_efscfsf(T0_64);
2218 }
2219
2220 void do_efscfuf (void)
2221 {
2222 T0_64 = _do_efscfuf(T0_64);
2223 }
2224
2225 void do_efsctsf (void)
2226 {
2227 T0_64 = _do_efsctsf(T0_64);
2228 }
2229
2230 void do_efsctuf (void)
2231 {
2232 T0_64 = _do_efsctuf(T0_64);
2233 }
2234
2235 void do_efsctsfz (void)
2236 {
2237 T0_64 = _do_efsctsfz(T0_64);
2238 }
2239
2240 void do_efsctufz (void)
2241 {
2242 T0_64 = _do_efsctufz(T0_64);
2243 }
2244
2245 /* Double precision floating point helpers */
2246 static always_inline int _do_efdcmplt (uint64_t op1, uint64_t op2)
2247 {
2248 /* XXX: TODO: test special values (NaN, infinites, ...) */
2249 return _do_efdtstlt(op1, op2);
2250 }
2251
2252 static always_inline int _do_efdcmpgt (uint64_t op1, uint64_t op2)
2253 {
2254 /* XXX: TODO: test special values (NaN, infinites, ...) */
2255 return _do_efdtstgt(op1, op2);
2256 }
2257
2258 static always_inline int _do_efdcmpeq (uint64_t op1, uint64_t op2)
2259 {
2260 /* XXX: TODO: test special values (NaN, infinites, ...) */
2261 return _do_efdtsteq(op1, op2);
2262 }
2263
2264 void do_efdcmplt (void)
2265 {
2266 T0 = _do_efdcmplt(T0_64, T1_64);
2267 }
2268
2269 void do_efdcmpgt (void)
2270 {
2271 T0 = _do_efdcmpgt(T0_64, T1_64);
2272 }
2273
2274 void do_efdcmpeq (void)
2275 {
2276 T0 = _do_efdcmpeq(T0_64, T1_64);
2277 }
2278
2279 /* Double precision floating-point conversion to/from integer */
2280 static always_inline uint64_t _do_efdcfsi (int64_t val)
2281 {
2282 CPU_DoubleU u;
2283
2284 u.d = int64_to_float64(val, &env->spe_status);
2285
2286 return u.ll;
2287 }
2288
2289 static always_inline uint64_t _do_efdcfui (uint64_t val)
2290 {
2291 CPU_DoubleU u;
2292
2293 u.d = uint64_to_float64(val, &env->spe_status);
2294
2295 return u.ll;
2296 }
2297
2298 static always_inline int64_t _do_efdctsi (uint64_t val)
2299 {
2300 CPU_DoubleU u;
2301
2302 u.ll = val;
2303 /* NaN are not treated the same way IEEE 754 does */
2304 if (unlikely(isnan(u.d)))
2305 return 0;
2306
2307 return float64_to_int64(u.d, &env->spe_status);
2308 }
2309
2310 static always_inline uint64_t _do_efdctui (uint64_t val)
2311 {
2312 CPU_DoubleU u;
2313
2314 u.ll = val;
2315 /* NaN are not treated the same way IEEE 754 does */
2316 if (unlikely(isnan(u.d)))
2317 return 0;
2318
2319 return float64_to_uint64(u.d, &env->spe_status);
2320 }
2321
2322 static always_inline int64_t _do_efdctsiz (uint64_t val)
2323 {
2324 CPU_DoubleU u;
2325
2326 u.ll = val;
2327 /* NaN are not treated the same way IEEE 754 does */
2328 if (unlikely(isnan(u.d)))
2329 return 0;
2330
2331 return float64_to_int64_round_to_zero(u.d, &env->spe_status);
2332 }
2333
2334 static always_inline uint64_t _do_efdctuiz (uint64_t val)
2335 {
2336 CPU_DoubleU u;
2337
2338 u.ll = val;
2339 /* NaN are not treated the same way IEEE 754 does */
2340 if (unlikely(isnan(u.d)))
2341 return 0;
2342
2343 return float64_to_uint64_round_to_zero(u.d, &env->spe_status);
2344 }
2345
2346 void do_efdcfsi (void)
2347 {
2348 T0_64 = _do_efdcfsi(T0_64);
2349 }
2350
2351 void do_efdcfui (void)
2352 {
2353 T0_64 = _do_efdcfui(T0_64);
2354 }
2355
2356 void do_efdctsi (void)
2357 {
2358 T0_64 = _do_efdctsi(T0_64);
2359 }
2360
2361 void do_efdctui (void)
2362 {
2363 T0_64 = _do_efdctui(T0_64);
2364 }
2365
2366 void do_efdctsiz (void)
2367 {
2368 T0_64 = _do_efdctsiz(T0_64);
2369 }
2370
2371 void do_efdctuiz (void)
2372 {
2373 T0_64 = _do_efdctuiz(T0_64);
2374 }
2375
2376 /* Double precision floating-point conversion to/from fractional */
2377 static always_inline uint64_t _do_efdcfsf (int64_t val)
2378 {
2379 CPU_DoubleU u;
2380 float64 tmp;
2381
2382 u.d = int32_to_float64(val, &env->spe_status);
2383 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2384 u.d = float64_div(u.d, tmp, &env->spe_status);
2385
2386 return u.ll;
2387 }
2388
2389 static always_inline uint64_t _do_efdcfuf (uint64_t val)
2390 {
2391 CPU_DoubleU u;
2392 float64 tmp;
2393
2394 u.d = uint32_to_float64(val, &env->spe_status);
2395 tmp = int64_to_float64(1ULL << 32, &env->spe_status);
2396 u.d = float64_div(u.d, tmp, &env->spe_status);
2397
2398 return u.ll;
2399 }
2400
2401 static always_inline int64_t _do_efdctsf (uint64_t val)
2402 {
2403 CPU_DoubleU u;
2404 float64 tmp;
2405
2406 u.ll = val;
2407 /* NaN are not treated the same way IEEE 754 does */
2408 if (unlikely(isnan(u.d)))
2409 return 0;
2410 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2411 u.d = float64_mul(u.d, tmp, &env->spe_status);
2412
2413 return float64_to_int32(u.d, &env->spe_status);
2414 }
2415
2416 static always_inline uint64_t _do_efdctuf (uint64_t val)
2417 {
2418 CPU_DoubleU u;
2419 float64 tmp;
2420
2421 u.ll = val;
2422 /* NaN are not treated the same way IEEE 754 does */
2423 if (unlikely(isnan(u.d)))
2424 return 0;
2425 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2426 u.d = float64_mul(u.d, tmp, &env->spe_status);
2427
2428 return float64_to_uint32(u.d, &env->spe_status);
2429 }
2430
2431 static always_inline int64_t _do_efdctsfz (uint64_t val)
2432 {
2433 CPU_DoubleU u;
2434 float64 tmp;
2435
2436 u.ll = val;
2437 /* NaN are not treated the same way IEEE 754 does */
2438 if (unlikely(isnan(u.d)))
2439 return 0;
2440 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2441 u.d = float64_mul(u.d, tmp, &env->spe_status);
2442
2443 return float64_to_int32_round_to_zero(u.d, &env->spe_status);
2444 }
2445
2446 static always_inline uint64_t _do_efdctufz (uint64_t val)
2447 {
2448 CPU_DoubleU u;
2449 float64 tmp;
2450
2451 u.ll = val;
2452 /* NaN are not treated the same way IEEE 754 does */
2453 if (unlikely(isnan(u.d)))
2454 return 0;
2455 tmp = uint64_to_float64(1ULL << 32, &env->spe_status);
2456 u.d = float64_mul(u.d, tmp, &env->spe_status);
2457
2458 return float64_to_uint32_round_to_zero(u.d, &env->spe_status);
2459 }
2460
2461 void do_efdcfsf (void)
2462 {
2463 T0_64 = _do_efdcfsf(T0_64);
2464 }
2465
2466 void do_efdcfuf (void)
2467 {
2468 T0_64 = _do_efdcfuf(T0_64);
2469 }
2470
2471 void do_efdctsf (void)
2472 {
2473 T0_64 = _do_efdctsf(T0_64);
2474 }
2475
2476 void do_efdctuf (void)
2477 {
2478 T0_64 = _do_efdctuf(T0_64);
2479 }
2480
2481 void do_efdctsfz (void)
2482 {
2483 T0_64 = _do_efdctsfz(T0_64);
2484 }
2485
2486 void do_efdctufz (void)
2487 {
2488 T0_64 = _do_efdctufz(T0_64);
2489 }
2490
2491 /* Floating point conversion between single and double precision */
2492 static always_inline uint32_t _do_efscfd (uint64_t val)
2493 {
2494 CPU_DoubleU u1;
2495 CPU_FloatU u2;
2496
2497 u1.ll = val;
2498 u2.f = float64_to_float32(u1.d, &env->spe_status);
2499
2500 return u2.l;
2501 }
2502
2503 static always_inline uint64_t _do_efdcfs (uint32_t val)
2504 {
2505 CPU_DoubleU u2;
2506 CPU_FloatU u1;
2507
2508 u1.l = val;
2509 u2.d = float32_to_float64(u1.f, &env->spe_status);
2510
2511 return u2.ll;
2512 }
2513
2514 void do_efscfd (void)
2515 {
2516 T0_64 = _do_efscfd(T0_64);
2517 }
2518
2519 void do_efdcfs (void)
2520 {
2521 T0_64 = _do_efdcfs(T0_64);
2522 }
2523
2524 /* Single precision fixed-point vector arithmetic */
2525 /* evfsabs */
2526 DO_SPE_OP1(fsabs);
2527 /* evfsnabs */
2528 DO_SPE_OP1(fsnabs);
2529 /* evfsneg */
2530 DO_SPE_OP1(fsneg);
2531 /* evfsadd */
2532 DO_SPE_OP2(fsadd);
2533 /* evfssub */
2534 DO_SPE_OP2(fssub);
2535 /* evfsmul */
2536 DO_SPE_OP2(fsmul);
2537 /* evfsdiv */
2538 DO_SPE_OP2(fsdiv);
2539
2540 /* Single-precision floating-point comparisons */
2541 static always_inline int _do_efscmplt (uint32_t op1, uint32_t op2)
2542 {
2543 /* XXX: TODO: test special values (NaN, infinites, ...) */
2544 return _do_efststlt(op1, op2);
2545 }
2546
2547 static always_inline int _do_efscmpgt (uint32_t op1, uint32_t op2)
2548 {
2549 /* XXX: TODO: test special values (NaN, infinites, ...) */
2550 return _do_efststgt(op1, op2);
2551 }
2552
2553 static always_inline int _do_efscmpeq (uint32_t op1, uint32_t op2)
2554 {
2555 /* XXX: TODO: test special values (NaN, infinites, ...) */
2556 return _do_efststeq(op1, op2);
2557 }
2558
2559 void do_efscmplt (void)
2560 {
2561 T0 = _do_efscmplt(T0_64, T1_64);
2562 }
2563
2564 void do_efscmpgt (void)
2565 {
2566 T0 = _do_efscmpgt(T0_64, T1_64);
2567 }
2568
2569 void do_efscmpeq (void)
2570 {
2571 T0 = _do_efscmpeq(T0_64, T1_64);
2572 }
2573
2574 /* Single-precision floating-point vector comparisons */
2575 /* evfscmplt */
2576 DO_SPE_CMP(fscmplt);
2577 /* evfscmpgt */
2578 DO_SPE_CMP(fscmpgt);
2579 /* evfscmpeq */
2580 DO_SPE_CMP(fscmpeq);
2581 /* evfststlt */
2582 DO_SPE_CMP(fststlt);
2583 /* evfststgt */
2584 DO_SPE_CMP(fststgt);
2585 /* evfststeq */
2586 DO_SPE_CMP(fststeq);
2587
2588 /* Single-precision floating-point vector conversions */
2589 /* evfscfsi */
2590 DO_SPE_OP1(fscfsi);
2591 /* evfscfui */
2592 DO_SPE_OP1(fscfui);
2593 /* evfscfuf */
2594 DO_SPE_OP1(fscfuf);
2595 /* evfscfsf */
2596 DO_SPE_OP1(fscfsf);
2597 /* evfsctsi */
2598 DO_SPE_OP1(fsctsi);
2599 /* evfsctui */
2600 DO_SPE_OP1(fsctui);
2601 /* evfsctsiz */
2602 DO_SPE_OP1(fsctsiz);
2603 /* evfsctuiz */
2604 DO_SPE_OP1(fsctuiz);
2605 /* evfsctsf */
2606 DO_SPE_OP1(fsctsf);
2607 /* evfsctuf */
2608 DO_SPE_OP1(fsctuf);
2609
2610 /*****************************************************************************/
2611 /* Softmmu support */
2612 #if !defined (CONFIG_USER_ONLY)
2613
2614 #define MMUSUFFIX _mmu
2615
2616 #define SHIFT 0
2617 #include "softmmu_template.h"
2618
2619 #define SHIFT 1
2620 #include "softmmu_template.h"
2621
2622 #define SHIFT 2
2623 #include "softmmu_template.h"
2624
2625 #define SHIFT 3
2626 #include "softmmu_template.h"
2627
2628 /* try to fill the TLB and return an exception if error. If retaddr is
2629 NULL, it means that the function was called in C code (i.e. not
2630 from generated code or from helper.c) */
2631 /* XXX: fix it to restore all registers */
2632 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
2633 {
2634 TranslationBlock *tb;
2635 CPUState *saved_env;
2636 unsigned long pc;
2637 int ret;
2638
2639 /* XXX: hack to restore env in all cases, even if not called from
2640 generated code */
2641 saved_env = env;
2642 env = cpu_single_env;
2643 ret = cpu_ppc_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
2644 if (unlikely(ret != 0)) {
2645 if (likely(retaddr)) {
2646 /* now we have a real cpu fault */
2647 pc = (unsigned long)retaddr;
2648 tb = tb_find_pc(pc);
2649 if (likely(tb)) {
2650 /* the PC is inside the translated code. It means that we have
2651 a virtual CPU fault */
2652 cpu_restore_state(tb, env, pc, NULL);
2653 }
2654 }
2655 do_raise_exception_err(env->exception_index, env->error_code);
2656 }
2657 env = saved_env;
2658 }
2659
2660 /* Software driven TLBs management */
2661 /* PowerPC 602/603 software TLB load instructions helpers */
2662 void do_load_6xx_tlb (int is_code)
2663 {
2664 target_ulong RPN, CMP, EPN;
2665 int way;
2666
2667 RPN = env->spr[SPR_RPA];
2668 if (is_code) {
2669 CMP = env->spr[SPR_ICMP];
2670 EPN = env->spr[SPR_IMISS];
2671 } else {
2672 CMP = env->spr[SPR_DCMP];
2673 EPN = env->spr[SPR_DMISS];
2674 }
2675 way = (env->spr[SPR_SRR1] >> 17) & 1;
2676 #if defined (DEBUG_SOFTWARE_TLB)
2677 if (loglevel != 0) {
2678 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2679 " PTE1 " ADDRX " way %d\n",
2680 __func__, T0, EPN, CMP, RPN, way);
2681 }
2682 #endif
2683 /* Store this TLB */
2684 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2685 way, is_code, CMP, RPN);
2686 }
2687
2688 void do_load_74xx_tlb (int is_code)
2689 {
2690 target_ulong RPN, CMP, EPN;
2691 int way;
2692
2693 RPN = env->spr[SPR_PTELO];
2694 CMP = env->spr[SPR_PTEHI];
2695 EPN = env->spr[SPR_TLBMISS] & ~0x3;
2696 way = env->spr[SPR_TLBMISS] & 0x3;
2697 #if defined (DEBUG_SOFTWARE_TLB)
2698 if (loglevel != 0) {
2699 fprintf(logfile, "%s: EPN " TDX " " ADDRX " PTE0 " ADDRX
2700 " PTE1 " ADDRX " way %d\n",
2701 __func__, T0, EPN, CMP, RPN, way);
2702 }
2703 #endif
2704 /* Store this TLB */
2705 ppc6xx_tlb_store(env, (uint32_t)(T0 & TARGET_PAGE_MASK),
2706 way, is_code, CMP, RPN);
2707 }
2708
2709 static always_inline target_ulong booke_tlb_to_page_size (int size)
2710 {
2711 return 1024 << (2 * size);
2712 }
2713
2714 static always_inline int booke_page_size_to_tlb (target_ulong page_size)
2715 {
2716 int size;
2717
2718 switch (page_size) {
2719 case 0x00000400UL:
2720 size = 0x0;
2721 break;
2722 case 0x00001000UL:
2723 size = 0x1;
2724 break;
2725 case 0x00004000UL:
2726 size = 0x2;
2727 break;
2728 case 0x00010000UL:
2729 size = 0x3;
2730 break;
2731 case 0x00040000UL:
2732 size = 0x4;
2733 break;
2734 case 0x00100000UL:
2735 size = 0x5;
2736 break;
2737 case 0x00400000UL:
2738 size = 0x6;
2739 break;
2740 case 0x01000000UL:
2741 size = 0x7;
2742 break;
2743 case 0x04000000UL:
2744 size = 0x8;
2745 break;
2746 case 0x10000000UL:
2747 size = 0x9;
2748 break;
2749 case 0x40000000UL:
2750 size = 0xA;
2751 break;
2752 #if defined (TARGET_PPC64)
2753 case 0x000100000000ULL:
2754 size = 0xB;
2755 break;
2756 case 0x000400000000ULL:
2757 size = 0xC;
2758 break;
2759 case 0x001000000000ULL:
2760 size = 0xD;
2761 break;
2762 case 0x004000000000ULL:
2763 size = 0xE;
2764 break;
2765 case 0x010000000000ULL:
2766 size = 0xF;
2767 break;
2768 #endif
2769 default:
2770 size = -1;
2771 break;
2772 }
2773
2774 return size;
2775 }
2776
2777 /* Helpers for 4xx TLB management */
2778 void do_4xx_tlbre_lo (void)
2779 {
2780 ppcemb_tlb_t *tlb;
2781 int size;
2782
2783 T0 &= 0x3F;
2784 tlb = &env->tlb[T0].tlbe;
2785 T0 = tlb->EPN;
2786 if (tlb->prot & PAGE_VALID)
2787 T0 |= 0x400;
2788 size = booke_page_size_to_tlb(tlb->size);
2789 if (size < 0 || size > 0x7)
2790 size = 1;
2791 T0 |= size << 7;
2792 env->spr[SPR_40x_PID] = tlb->PID;
2793 }
2794
2795 void do_4xx_tlbre_hi (void)
2796 {
2797 ppcemb_tlb_t *tlb;
2798
2799 T0 &= 0x3F;
2800 tlb = &env->tlb[T0].tlbe;
2801 T0 = tlb->RPN;
2802 if (tlb->prot & PAGE_EXEC)
2803 T0 |= 0x200;
2804 if (tlb->prot & PAGE_WRITE)
2805 T0 |= 0x100;
2806 }
2807
2808 void do_4xx_tlbwe_hi (void)
2809 {
2810 ppcemb_tlb_t *tlb;
2811 target_ulong page, end;
2812
2813 #if defined (DEBUG_SOFTWARE_TLB)
2814 if (loglevel != 0) {
2815 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2816 }
2817 #endif
2818 T0 &= 0x3F;
2819 tlb = &env->tlb[T0].tlbe;
2820 /* Invalidate previous TLB (if it's valid) */
2821 if (tlb->prot & PAGE_VALID) {
2822 end = tlb->EPN + tlb->size;
2823 #if defined (DEBUG_SOFTWARE_TLB)
2824 if (loglevel != 0) {
2825 fprintf(logfile, "%s: invalidate old TLB %d start " ADDRX
2826 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2827 }
2828 #endif
2829 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2830 tlb_flush_page(env, page);
2831 }
2832 tlb->size = booke_tlb_to_page_size((T1 >> 7) & 0x7);
2833 /* We cannot handle TLB size < TARGET_PAGE_SIZE.
2834 * If this ever occurs, one should use the ppcemb target instead
2835 * of the ppc or ppc64 one
2836 */
2837 if ((T1 & 0x40) && tlb->size < TARGET_PAGE_SIZE) {
2838 cpu_abort(env, "TLB size " TARGET_FMT_lu " < %u "
2839 "are not supported (%d)\n",
2840 tlb->size, TARGET_PAGE_SIZE, (int)((T1 >> 7) & 0x7));
2841 }
2842 tlb->EPN = T1 & ~(tlb->size - 1);
2843 if (T1 & 0x40)
2844 tlb->prot |= PAGE_VALID;
2845 else
2846 tlb->prot &= ~PAGE_VALID;
2847 if (T1 & 0x20) {
2848 /* XXX: TO BE FIXED */
2849 cpu_abort(env, "Little-endian TLB entries are not supported by now\n");
2850 }
2851 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
2852 tlb->attr = T1 & 0xFF;
2853 #if defined (DEBUG_SOFTWARE_TLB)
2854 if (loglevel != 0) {
2855 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2856 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2857 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2858 tlb->prot & PAGE_READ ? 'r' : '-',
2859 tlb->prot & PAGE_WRITE ? 'w' : '-',
2860 tlb->prot & PAGE_EXEC ? 'x' : '-',
2861 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2862 }
2863 #endif
2864 /* Invalidate new TLB (if valid) */
2865 if (tlb->prot & PAGE_VALID) {
2866 end = tlb->EPN + tlb->size;
2867 #if defined (DEBUG_SOFTWARE_TLB)
2868 if (loglevel != 0) {
2869 fprintf(logfile, "%s: invalidate TLB %d start " ADDRX
2870 " end " ADDRX "\n", __func__, (int)T0, tlb->EPN, end);
2871 }
2872 #endif
2873 for (page = tlb->EPN; page < end; page += TARGET_PAGE_SIZE)
2874 tlb_flush_page(env, page);
2875 }
2876 }
2877
2878 void do_4xx_tlbwe_lo (void)
2879 {
2880 ppcemb_tlb_t *tlb;
2881
2882 #if defined (DEBUG_SOFTWARE_TLB)
2883 if (loglevel != 0) {
2884 fprintf(logfile, "%s T0 " TDX " T1 " TDX "\n", __func__, T0, T1);
2885 }
2886 #endif
2887 T0 &= 0x3F;
2888 tlb = &env->tlb[T0].tlbe;
2889 tlb->RPN = T1 & 0xFFFFFC00;
2890 tlb->prot = PAGE_READ;
2891 if (T1 & 0x200)
2892 tlb->prot |= PAGE_EXEC;
2893 if (T1 & 0x100)
2894 tlb->prot |= PAGE_WRITE;
2895 #if defined (DEBUG_SOFTWARE_TLB)
2896 if (loglevel != 0) {
2897 fprintf(logfile, "%s: set up TLB %d RPN " PADDRX " EPN " ADDRX
2898 " size " ADDRX " prot %c%c%c%c PID %d\n", __func__,
2899 (int)T0, tlb->RPN, tlb->EPN, tlb->size,
2900 tlb->prot & PAGE_READ ? 'r' : '-',
2901 tlb->prot & PAGE_WRITE ? 'w' : '-',
2902 tlb->prot & PAGE_EXEC ? 'x' : '-',
2903 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
2904 }
2905 #endif
2906 }
2907
2908 /* PowerPC 440 TLB management */
2909 void do_440_tlbwe (int word)
2910 {
2911 ppcemb_tlb_t *tlb;
2912 target_ulong EPN, RPN, size;
2913 int do_flush_tlbs;
2914
2915 #if defined (DEBUG_SOFTWARE_TLB)
2916 if (loglevel != 0) {
2917 fprintf(logfile, "%s word %d T0 " TDX " T1 " TDX "\n",
2918 __func__, word, T0, T1);
2919 }
2920 #endif
2921 do_flush_tlbs = 0;
2922 T0 &= 0x3F;
2923 tlb = &env->tlb[T0].tlbe;
2924 switch (word) {
2925 default:
2926 /* Just here to please gcc */
2927 case 0:
2928 EPN = T1 & 0xFFFFFC00;
2929 if ((tlb->prot & PAGE_VALID) && EPN != tlb->EPN)
2930 do_flush_tlbs = 1;
2931 tlb->EPN = EPN;
2932 size = booke_tlb_to_page_size((T1 >> 4) & 0xF);
2933 if ((tlb->prot & PAGE_VALID) && tlb->size < size)
2934 do_flush_tlbs = 1;
2935 tlb->size = size;
2936 tlb->attr &= ~0x1;
2937 tlb->attr |= (T1 >> 8) & 1;
2938 if (T1 & 0x200) {
2939 tlb->prot |= PAGE_VALID;
2940 } else {
2941 if (tlb->prot & PAGE_VALID) {
2942 tlb->prot &= ~PAGE_VALID;
2943 do_flush_tlbs = 1;
2944 }
2945 }
2946 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
2947 if (do_flush_tlbs)
2948 tlb_flush(env, 1);
2949 break;
2950 case 1:
2951 RPN = T1 & 0xFFFFFC0F;
2952 if ((tlb->prot & PAGE_VALID) && tlb->RPN != RPN)
2953 tlb_flush(env, 1);
2954 tlb->RPN = RPN;
2955 break;
2956 case 2:
2957 tlb->attr = (tlb->attr & 0x1) | (T1 & 0x0000FF00);
2958 tlb->prot = tlb->prot & PAGE_VALID;
2959 if (T1 & 0x1)
2960 tlb->prot |= PAGE_READ << 4;
2961 if (T1 & 0x2)
2962 tlb->prot |= PAGE_WRITE << 4;
2963 if (T1 & 0x4)
2964 tlb->prot |= PAGE_EXEC << 4;
2965 if (T1 & 0x8)
2966 tlb->prot |= PAGE_READ;
2967 if (T1 & 0x10)
2968 tlb->prot |= PAGE_WRITE;
2969 if (T1 & 0x20)
2970 tlb->prot |= PAGE_EXEC;
2971 break;
2972 }
2973 }
2974
2975 void do_440_tlbre (int word)
2976 {
2977 ppcemb_tlb_t *tlb;
2978 int size;
2979
2980 T0 &= 0x3F;
2981 tlb = &env->tlb[T0].tlbe;
2982 switch (word) {
2983 default:
2984 /* Just here to please gcc */
2985 case 0:
2986 T0 = tlb->EPN;
2987 size = booke_page_size_to_tlb(tlb->size);
2988 if (size < 0 || size > 0xF)
2989 size = 1;
2990 T0 |= size << 4;
2991 if (tlb->attr & 0x1)
2992 T0 |= 0x100;
2993 if (tlb->prot & PAGE_VALID)
2994 T0 |= 0x200;
2995 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
2996 env->spr[SPR_440_MMUCR] |= tlb->PID;
2997 break;
2998 case 1:
2999 T0 = tlb->RPN;
3000 break;
3001 case 2:
3002 T0 = tlb->attr & ~0x1;
3003 if (tlb->prot & (PAGE_READ << 4))
3004 T0 |= 0x1;
3005 if (tlb->prot & (PAGE_WRITE << 4))
3006 T0 |= 0x2;
3007 if (tlb->prot & (PAGE_EXEC << 4))
3008 T0 |= 0x4;
3009 if (tlb->prot & PAGE_READ)
3010 T0 |= 0x8;
3011 if (tlb->prot & PAGE_WRITE)
3012 T0 |= 0x10;
3013 if (tlb->prot & PAGE_EXEC)
3014 T0 |= 0x20;
3015 break;
3016 }
3017 }
3018 #endif /* !CONFIG_USER_ONLY */