2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "host-utils.h"
26 #include <linux/kvm.h>
27 #include "qemu-timer.h"
29 /*****************************************************************************/
31 #if !defined (CONFIG_USER_ONLY)
33 #define MMUSUFFIX _mmu
36 #include "softmmu_template.h"
39 #include "softmmu_template.h"
42 #include "softmmu_template.h"
45 #include "softmmu_template.h"
47 /* try to fill the TLB and return an exception if error. If retaddr is
48 NULL, it means that the function was called in C code (i.e. not
49 from generated code or from helper.c) */
50 /* XXX: fix it to restore all registers */
51 void tlb_fill (target_ulong addr
, int is_write
, int mmu_idx
, void *retaddr
)
58 /* XXX: hack to restore env in all cases, even if not called from
62 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
, 1);
63 if (unlikely(ret
!= 0)) {
64 if (likely(retaddr
)) {
65 /* now we have a real cpu fault */
66 pc
= (unsigned long)retaddr
;
69 /* the PC is inside the translated code. It means that we have
70 a virtual CPU fault */
71 cpu_restore_state(tb
, env
, pc
);
81 /* #define DEBUG_HELPER */
83 #define HELPER_LOG(x...) qemu_log(x)
85 #define HELPER_LOG(x...)
88 /* raise an exception */
89 void HELPER(exception
)(uint32_t excp
)
91 HELPER_LOG("%s: exception %d\n", __FUNCTION__
, excp
);
92 env
->exception_index
= excp
;
96 #ifndef CONFIG_USER_ONLY
97 static void mvc_fast_memset(CPUState
*env
, uint32_t l
, uint64_t dest
,
100 target_phys_addr_t dest_phys
;
101 target_phys_addr_t len
= l
;
103 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
106 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
108 cpu_abort(env
, "should never reach here");
110 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
112 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
114 memset(dest_p
, byte
, len
);
116 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
119 static void mvc_fast_memmove(CPUState
*env
, uint32_t l
, uint64_t dest
,
122 target_phys_addr_t dest_phys
;
123 target_phys_addr_t src_phys
;
124 target_phys_addr_t len
= l
;
127 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
130 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
132 cpu_abort(env
, "should never reach here");
134 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
136 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
138 cpu_abort(env
, "should never reach here");
140 src_phys
|= src
& ~TARGET_PAGE_MASK
;
142 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
143 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
145 memmove(dest_p
, src_p
, len
);
147 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
148 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
153 uint32_t HELPER(nc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
159 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
160 __FUNCTION__
, l
, dest
, src
);
161 for (i
= 0; i
<= l
; i
++) {
162 x
= ldub(dest
+ i
) & ldub(src
+ i
);
172 uint32_t HELPER(xc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
178 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
179 __FUNCTION__
, l
, dest
, src
);
181 #ifndef CONFIG_USER_ONLY
182 /* xor with itself is the same as memset(0) */
183 if ((l
> 32) && (src
== dest
) &&
184 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
185 mvc_fast_memset(env
, l
+ 1, dest
, 0);
190 memset(g2h(dest
), 0, l
+ 1);
195 for (i
= 0; i
<= l
; i
++) {
196 x
= ldub(dest
+ i
) ^ ldub(src
+ i
);
206 uint32_t HELPER(oc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
212 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
213 __FUNCTION__
, l
, dest
, src
);
214 for (i
= 0; i
<= l
; i
++) {
215 x
= ldub(dest
+ i
) | ldub(src
+ i
);
225 void HELPER(mvc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
229 uint32_t l_64
= (l
+ 1) / 8;
231 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
232 __FUNCTION__
, l
, dest
, src
);
234 #ifndef CONFIG_USER_ONLY
236 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
237 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
238 if (dest
== (src
+ 1)) {
239 mvc_fast_memset(env
, l
+ 1, dest
, ldub(src
));
241 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
242 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
247 if (dest
== (src
+ 1)) {
248 memset(g2h(dest
), ldub(src
), l
+ 1);
251 memmove(g2h(dest
), g2h(src
), l
+ 1);
256 /* handle the parts that fit into 8-byte loads/stores */
257 if (dest
!= (src
+ 1)) {
258 for (i
= 0; i
< l_64
; i
++) {
259 stq(dest
+ x
, ldq(src
+ x
));
264 /* slow version crossing pages with byte accesses */
265 for (i
= x
; i
<= l
; i
++) {
266 stb(dest
+ i
, ldub(src
+ i
));
270 /* compare unsigned byte arrays */
271 uint32_t HELPER(clc
)(uint32_t l
, uint64_t s1
, uint64_t s2
)
276 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
277 __FUNCTION__
, l
, s1
, s2
);
278 for (i
= 0; i
<= l
; i
++) {
281 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
296 /* compare logical under mask */
297 uint32_t HELPER(clm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
301 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __FUNCTION__
, r1
,
307 r
= (r1
& 0xff000000UL
) >> 24;
308 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
319 mask
= (mask
<< 1) & 0xf;
326 /* store character under mask */
327 void HELPER(stcm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
330 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__
, r1
, mask
,
334 r
= (r1
& 0xff000000UL
) >> 24;
336 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
339 mask
= (mask
<< 1) & 0xf;
345 /* 64/64 -> 128 unsigned multiplication */
346 void HELPER(mlg
)(uint32_t r1
, uint64_t v2
)
348 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
349 /* assuming 64-bit hosts have __uint128_t */
350 __uint128_t res
= (__uint128_t
)env
->regs
[r1
+ 1];
351 res
*= (__uint128_t
)v2
;
352 env
->regs
[r1
] = (uint64_t)(res
>> 64);
353 env
->regs
[r1
+ 1] = (uint64_t)res
;
355 mulu64(&env
->regs
[r1
+ 1], &env
->regs
[r1
], env
->regs
[r1
+ 1], v2
);
359 /* 128 -> 64/64 unsigned division */
360 void HELPER(dlg
)(uint32_t r1
, uint64_t v2
)
362 uint64_t divisor
= v2
;
364 if (!env
->regs
[r1
]) {
365 /* 64 -> 64/64 case */
366 env
->regs
[r1
] = env
->regs
[r1
+1] % divisor
;
367 env
->regs
[r1
+1] = env
->regs
[r1
+1] / divisor
;
371 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
372 /* assuming 64-bit hosts have __uint128_t */
373 __uint128_t dividend
= (((__uint128_t
)env
->regs
[r1
]) << 64) |
375 __uint128_t quotient
= dividend
/ divisor
;
376 env
->regs
[r1
+1] = quotient
;
377 __uint128_t remainder
= dividend
% divisor
;
378 env
->regs
[r1
] = remainder
;
380 /* 32-bit hosts would need special wrapper functionality - just abort if
381 we encounter such a case; it's very unlikely anyways. */
382 cpu_abort(env
, "128 -> 64/64 division not implemented\n");
387 static inline uint64_t get_address(int x2
, int b2
, int d2
)
400 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
407 static inline uint64_t get_address_31fix(int reg
)
409 uint64_t r
= env
->regs
[reg
];
412 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
419 /* search string (c is byte to search, r2 is string, r1 end of string) */
420 uint32_t HELPER(srst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
424 uint64_t str
= get_address_31fix(r2
);
425 uint64_t end
= get_address_31fix(r1
);
427 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __FUNCTION__
,
428 c
, env
->regs
[r1
], env
->regs
[r2
]);
430 for (i
= str
; i
!= end
; i
++) {
441 /* unsigned string compare (c is string terminator) */
442 uint32_t HELPER(clst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
444 uint64_t s1
= get_address_31fix(r1
);
445 uint64_t s2
= get_address_31fix(r2
);
449 #ifdef CONFIG_USER_ONLY
451 HELPER_LOG("%s: comparing '%s' and '%s'\n",
452 __FUNCTION__
, (char*)g2h(s1
), (char*)g2h(s2
));
458 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
468 cc
= (v1
< v2
) ? 1 : 2;
469 /* FIXME: 31-bit mode! */
477 void HELPER(mvpg
)(uint64_t r0
, uint64_t r1
, uint64_t r2
)
479 /* XXX missing r0 handling */
480 #ifdef CONFIG_USER_ONLY
483 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
484 stb(r1
+ i
, ldub(r2
+ i
));
487 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
491 /* string copy (c is string terminator) */
492 void HELPER(mvst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
494 uint64_t dest
= get_address_31fix(r1
);
495 uint64_t src
= get_address_31fix(r2
);
498 #ifdef CONFIG_USER_ONLY
500 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__
, (char*)g2h(src
),
513 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
516 /* compare and swap 64-bit */
517 uint32_t HELPER(csg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
519 /* FIXME: locking? */
521 uint64_t v2
= ldq(a2
);
522 if (env
->regs
[r1
] == v2
) {
524 stq(a2
, env
->regs
[r3
]);
532 /* compare double and swap 64-bit */
533 uint32_t HELPER(cdsg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
535 /* FIXME: locking? */
537 uint64_t v2_hi
= ldq(a2
);
538 uint64_t v2_lo
= ldq(a2
+ 8);
539 uint64_t v1_hi
= env
->regs
[r1
];
540 uint64_t v1_lo
= env
->regs
[r1
+ 1];
542 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
544 stq(a2
, env
->regs
[r3
]);
545 stq(a2
+ 8, env
->regs
[r3
+ 1]);
548 env
->regs
[r1
] = v2_hi
;
549 env
->regs
[r1
+ 1] = v2_lo
;
555 /* compare and swap 32-bit */
556 uint32_t HELPER(cs
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
558 /* FIXME: locking? */
560 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__
, r1
, a2
, r3
);
561 uint32_t v2
= ldl(a2
);
562 if (((uint32_t)env
->regs
[r1
]) == v2
) {
564 stl(a2
, (uint32_t)env
->regs
[r3
]);
567 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
572 static uint32_t helper_icm(uint32_t r1
, uint64_t address
, uint32_t mask
)
574 int pos
= 24; /* top of the lower half of r1 */
575 uint64_t rmask
= 0xff000000ULL
;
582 env
->regs
[r1
] &= ~rmask
;
584 if ((val
& 0x80) && !ccd
) {
588 if (val
&& cc
== 0) {
591 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
594 mask
= (mask
<< 1) & 0xf;
602 /* execute instruction
603 this instruction executes an insn modified with the contents of r1
604 it does not change the executed instruction in memory
605 it does not change the program counter
606 in other words: tricky...
607 currently implemented by interpreting the cases it is most commonly used in
609 uint32_t HELPER(ex
)(uint32_t cc
, uint64_t v1
, uint64_t addr
, uint64_t ret
)
611 uint16_t insn
= lduw_code(addr
);
612 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__
, v1
, addr
,
614 if ((insn
& 0xf0ff) == 0xd000) {
615 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
617 insn2
= ldl_code(addr
+ 2);
618 b1
= (insn2
>> 28) & 0xf;
619 b2
= (insn2
>> 12) & 0xf;
620 d1
= (insn2
>> 16) & 0xfff;
622 switch (insn
& 0xf00) {
624 helper_mvc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
627 cc
= helper_clc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
630 cc
= helper_xc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
636 } else if ((insn
& 0xff00) == 0x0a00) {
637 /* supervisor call */
638 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__
, (insn
|v1
) & 0xff);
639 env
->psw
.addr
= ret
- 4;
640 env
->int_svc_code
= (insn
|v1
) & 0xff;
641 env
->int_svc_ilc
= 4;
642 helper_exception(EXCP_SVC
);
643 } else if ((insn
& 0xff00) == 0xbf00) {
644 uint32_t insn2
, r1
, r3
, b2
, d2
;
645 insn2
= ldl_code(addr
+ 2);
646 r1
= (insn2
>> 20) & 0xf;
647 r3
= (insn2
>> 16) & 0xf;
648 b2
= (insn2
>> 12) & 0xf;
650 cc
= helper_icm(r1
, get_address(0, b2
, d2
), r3
);
653 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
659 /* absolute value 32-bit */
660 uint32_t HELPER(abs_i32
)(int32_t val
)
669 /* negative absolute value 32-bit */
670 int32_t HELPER(nabs_i32
)(int32_t val
)
679 /* absolute value 64-bit */
680 uint64_t HELPER(abs_i64
)(int64_t val
)
682 HELPER_LOG("%s: val 0x%" PRIx64
"\n", __FUNCTION__
, val
);
691 /* negative absolute value 64-bit */
692 int64_t HELPER(nabs_i64
)(int64_t val
)
701 /* add with carry 32-bit unsigned */
702 uint32_t HELPER(addc_u32
)(uint32_t cc
, uint32_t v1
, uint32_t v2
)
714 /* store character under mask high operates on the upper half of r1 */
715 void HELPER(stcmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
717 int pos
= 56; /* top of the upper half of r1 */
721 stb(address
, (env
->regs
[r1
] >> pos
) & 0xff);
724 mask
= (mask
<< 1) & 0xf;
729 /* insert character under mask high; same as icm, but operates on the
731 uint32_t HELPER(icmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
733 int pos
= 56; /* top of the upper half of r1 */
734 uint64_t rmask
= 0xff00000000000000ULL
;
741 env
->regs
[r1
] &= ~rmask
;
743 if ((val
& 0x80) && !ccd
) {
747 if (val
&& cc
== 0) {
750 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
753 mask
= (mask
<< 1) & 0xf;
761 /* insert psw mask and condition code into r1 */
762 void HELPER(ipm
)(uint32_t cc
, uint32_t r1
)
764 uint64_t r
= env
->regs
[r1
];
766 r
&= 0xffffffff00ffffffULL
;
767 r
|= (cc
<< 28) | ( (env
->psw
.mask
>> 40) & 0xf );
769 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__
,
770 cc
, env
->psw
.mask
, r
);
773 /* load access registers r1 to r3 from memory at a2 */
774 void HELPER(lam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
778 for (i
= r1
;; i
= (i
+ 1) % 16) {
779 env
->aregs
[i
] = ldl(a2
);
788 /* store access registers r1 to r3 in memory at a2 */
789 void HELPER(stam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
793 for (i
= r1
;; i
= (i
+ 1) % 16) {
794 stl(a2
, env
->aregs
[i
]);
804 uint32_t HELPER(mvcl
)(uint32_t r1
, uint32_t r2
)
806 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
807 uint64_t dest
= get_address_31fix(r1
);
808 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
809 uint64_t src
= get_address_31fix(r2
);
810 uint8_t pad
= src
>> 24;
814 if (destlen
== srclen
) {
816 } else if (destlen
< srclen
) {
822 if (srclen
> destlen
) {
826 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
831 for (; destlen
; dest
++, destlen
--) {
835 env
->regs
[r1
+ 1] = destlen
;
836 /* can't use srclen here, we trunc'ed it */
837 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
838 env
->regs
[r1
] = dest
;
844 /* move long extended another memcopy insn with more bells and whistles */
845 uint32_t HELPER(mvcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
847 uint64_t destlen
= env
->regs
[r1
+ 1];
848 uint64_t dest
= env
->regs
[r1
];
849 uint64_t srclen
= env
->regs
[r3
+ 1];
850 uint64_t src
= env
->regs
[r3
];
851 uint8_t pad
= a2
& 0xff;
855 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
856 destlen
= (uint32_t)destlen
;
857 srclen
= (uint32_t)srclen
;
862 if (destlen
== srclen
) {
864 } else if (destlen
< srclen
) {
870 if (srclen
> destlen
) {
874 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
879 for (; destlen
; dest
++, destlen
--) {
883 env
->regs
[r1
+ 1] = destlen
;
884 /* can't use srclen here, we trunc'ed it */
885 /* FIXME: 31-bit mode! */
886 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
887 env
->regs
[r1
] = dest
;
893 /* compare logical long extended memcompare insn with padding */
894 uint32_t HELPER(clcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
896 uint64_t destlen
= env
->regs
[r1
+ 1];
897 uint64_t dest
= get_address_31fix(r1
);
898 uint64_t srclen
= env
->regs
[r3
+ 1];
899 uint64_t src
= get_address_31fix(r3
);
900 uint8_t pad
= a2
& 0xff;
901 uint8_t v1
= 0,v2
= 0;
904 if (!(destlen
|| srclen
)) {
908 if (srclen
> destlen
) {
912 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
913 v1
= srclen
? ldub(src
) : pad
;
914 v2
= destlen
? ldub(dest
) : pad
;
916 cc
= (v1
< v2
) ? 1 : 2;
921 env
->regs
[r1
+ 1] = destlen
;
922 /* can't use srclen here, we trunc'ed it */
923 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
924 env
->regs
[r1
] = dest
;
930 /* subtract unsigned v2 from v1 with borrow */
931 uint32_t HELPER(slb
)(uint32_t cc
, uint32_t r1
, uint32_t v2
)
933 uint32_t v1
= env
->regs
[r1
];
934 uint32_t res
= v1
+ (~v2
) + (cc
>> 1);
936 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | res
;
945 /* subtract unsigned v2 from v1 with borrow */
946 uint32_t HELPER(slbg
)(uint32_t cc
, uint32_t r1
, uint64_t v1
, uint64_t v2
)
948 uint64_t res
= v1
+ (~v2
) + (cc
>> 1);
959 static inline int float_comp_to_cc(int float_compare
)
961 switch (float_compare
) {
962 case float_relation_equal
:
964 case float_relation_less
:
966 case float_relation_greater
:
968 case float_relation_unordered
:
971 cpu_abort(env
, "unknown return value for float compare\n");
975 /* condition codes for binary FP ops */
976 static uint32_t set_cc_f32(float32 v1
, float32 v2
)
978 return float_comp_to_cc(float32_compare_quiet(v1
, v2
, &env
->fpu_status
));
981 static uint32_t set_cc_f64(float64 v1
, float64 v2
)
983 return float_comp_to_cc(float64_compare_quiet(v1
, v2
, &env
->fpu_status
));
986 /* condition codes for unary FP ops */
987 static uint32_t set_cc_nz_f32(float32 v
)
989 if (float32_is_any_nan(v
)) {
991 } else if (float32_is_zero(v
)) {
993 } else if (float32_is_neg(v
)) {
1000 static uint32_t set_cc_nz_f64(float64 v
)
1002 if (float64_is_any_nan(v
)) {
1004 } else if (float64_is_zero(v
)) {
1006 } else if (float64_is_neg(v
)) {
1013 static uint32_t set_cc_nz_f128(float128 v
)
1015 if (float128_is_any_nan(v
)) {
1017 } else if (float128_is_zero(v
)) {
1019 } else if (float128_is_neg(v
)) {
1026 /* convert 32-bit int to 64-bit float */
1027 void HELPER(cdfbr
)(uint32_t f1
, int32_t v2
)
1029 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__
, v2
, f1
);
1030 env
->fregs
[f1
].d
= int32_to_float64(v2
, &env
->fpu_status
);
1033 /* convert 32-bit int to 128-bit float */
1034 void HELPER(cxfbr
)(uint32_t f1
, int32_t v2
)
1037 v1
.q
= int32_to_float128(v2
, &env
->fpu_status
);
1038 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1039 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1042 /* convert 64-bit int to 32-bit float */
1043 void HELPER(cegbr
)(uint32_t f1
, int64_t v2
)
1045 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1046 env
->fregs
[f1
].l
.upper
= int64_to_float32(v2
, &env
->fpu_status
);
1049 /* convert 64-bit int to 64-bit float */
1050 void HELPER(cdgbr
)(uint32_t f1
, int64_t v2
)
1052 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1053 env
->fregs
[f1
].d
= int64_to_float64(v2
, &env
->fpu_status
);
1056 /* convert 64-bit int to 128-bit float */
1057 void HELPER(cxgbr
)(uint32_t f1
, int64_t v2
)
1060 x1
.q
= int64_to_float128(v2
, &env
->fpu_status
);
1061 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__
, v2
,
1062 x1
.ll
.upper
, x1
.ll
.lower
);
1063 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1064 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1067 /* convert 32-bit int to 32-bit float */
1068 void HELPER(cefbr
)(uint32_t f1
, int32_t v2
)
1070 env
->fregs
[f1
].l
.upper
= int32_to_float32(v2
, &env
->fpu_status
);
1071 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__
, v2
,
1072 env
->fregs
[f1
].l
.upper
, f1
);
1075 /* 32-bit FP addition RR */
1076 uint32_t HELPER(aebr
)(uint32_t f1
, uint32_t f2
)
1078 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1079 env
->fregs
[f2
].l
.upper
,
1081 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1082 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1084 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1087 /* 64-bit FP addition RR */
1088 uint32_t HELPER(adbr
)(uint32_t f1
, uint32_t f2
)
1090 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1092 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__
,
1093 env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1095 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1098 /* 32-bit FP subtraction RR */
1099 uint32_t HELPER(sebr
)(uint32_t f1
, uint32_t f2
)
1101 env
->fregs
[f1
].l
.upper
= float32_sub(env
->fregs
[f1
].l
.upper
,
1102 env
->fregs
[f2
].l
.upper
,
1104 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1105 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1107 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1110 /* 64-bit FP subtraction RR */
1111 uint32_t HELPER(sdbr
)(uint32_t f1
, uint32_t f2
)
1113 env
->fregs
[f1
].d
= float64_sub(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1115 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1116 __FUNCTION__
, env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1118 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1121 /* 32-bit FP division RR */
1122 void HELPER(debr
)(uint32_t f1
, uint32_t f2
)
1124 env
->fregs
[f1
].l
.upper
= float32_div(env
->fregs
[f1
].l
.upper
,
1125 env
->fregs
[f2
].l
.upper
,
1129 /* 128-bit FP division RR */
1130 void HELPER(dxbr
)(uint32_t f1
, uint32_t f2
)
1133 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1134 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1136 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1137 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1139 res
.q
= float128_div(v1
.q
, v2
.q
, &env
->fpu_status
);
1140 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1141 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1144 /* 64-bit FP multiplication RR */
1145 void HELPER(mdbr
)(uint32_t f1
, uint32_t f2
)
1147 env
->fregs
[f1
].d
= float64_mul(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1151 /* 128-bit FP multiplication RR */
1152 void HELPER(mxbr
)(uint32_t f1
, uint32_t f2
)
1155 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1156 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1158 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1159 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1161 res
.q
= float128_mul(v1
.q
, v2
.q
, &env
->fpu_status
);
1162 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1163 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1166 /* convert 32-bit float to 64-bit float */
1167 void HELPER(ldebr
)(uint32_t r1
, uint32_t r2
)
1169 env
->fregs
[r1
].d
= float32_to_float64(env
->fregs
[r2
].l
.upper
,
1173 /* convert 128-bit float to 64-bit float */
1174 void HELPER(ldxbr
)(uint32_t f1
, uint32_t f2
)
1177 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1178 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1179 env
->fregs
[f1
].d
= float128_to_float64(x2
.q
, &env
->fpu_status
);
1180 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__
, env
->fregs
[f1
].d
);
1183 /* convert 64-bit float to 128-bit float */
1184 void HELPER(lxdbr
)(uint32_t f1
, uint32_t f2
)
1187 res
.q
= float64_to_float128(env
->fregs
[f2
].d
, &env
->fpu_status
);
1188 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1189 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1192 /* convert 64-bit float to 32-bit float */
1193 void HELPER(ledbr
)(uint32_t f1
, uint32_t f2
)
1195 float64 d2
= env
->fregs
[f2
].d
;
1196 env
->fregs
[f1
].l
.upper
= float64_to_float32(d2
, &env
->fpu_status
);
1199 /* convert 128-bit float to 32-bit float */
1200 void HELPER(lexbr
)(uint32_t f1
, uint32_t f2
)
1203 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1204 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1205 env
->fregs
[f1
].l
.upper
= float128_to_float32(x2
.q
, &env
->fpu_status
);
1206 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__
, env
->fregs
[f1
].l
.upper
);
1209 /* absolute value of 32-bit float */
1210 uint32_t HELPER(lpebr
)(uint32_t f1
, uint32_t f2
)
1213 float32 v2
= env
->fregs
[f2
].d
;
1214 v1
= float32_abs(v2
);
1215 env
->fregs
[f1
].d
= v1
;
1216 return set_cc_nz_f32(v1
);
1219 /* absolute value of 64-bit float */
1220 uint32_t HELPER(lpdbr
)(uint32_t f1
, uint32_t f2
)
1223 float64 v2
= env
->fregs
[f2
].d
;
1224 v1
= float64_abs(v2
);
1225 env
->fregs
[f1
].d
= v1
;
1226 return set_cc_nz_f64(v1
);
1229 /* absolute value of 128-bit float */
1230 uint32_t HELPER(lpxbr
)(uint32_t f1
, uint32_t f2
)
1234 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1235 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1236 v1
.q
= float128_abs(v2
.q
);
1237 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1238 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1239 return set_cc_nz_f128(v1
.q
);
1242 /* load and test 64-bit float */
1243 uint32_t HELPER(ltdbr
)(uint32_t f1
, uint32_t f2
)
1245 env
->fregs
[f1
].d
= env
->fregs
[f2
].d
;
1246 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1249 /* load and test 32-bit float */
1250 uint32_t HELPER(ltebr
)(uint32_t f1
, uint32_t f2
)
1252 env
->fregs
[f1
].l
.upper
= env
->fregs
[f2
].l
.upper
;
1253 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1256 /* load and test 128-bit float */
1257 uint32_t HELPER(ltxbr
)(uint32_t f1
, uint32_t f2
)
1260 x
.ll
.upper
= env
->fregs
[f2
].ll
;
1261 x
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1262 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1263 env
->fregs
[f1
+ 2].ll
= x
.ll
.lower
;
1264 return set_cc_nz_f128(x
.q
);
1267 /* load complement of 32-bit float */
1268 uint32_t HELPER(lcebr
)(uint32_t f1
, uint32_t f2
)
1270 env
->fregs
[f1
].l
.upper
= float32_chs(env
->fregs
[f2
].l
.upper
);
1272 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1275 /* load complement of 64-bit float */
1276 uint32_t HELPER(lcdbr
)(uint32_t f1
, uint32_t f2
)
1278 env
->fregs
[f1
].d
= float64_chs(env
->fregs
[f2
].d
);
1280 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1283 /* load complement of 128-bit float */
1284 uint32_t HELPER(lcxbr
)(uint32_t f1
, uint32_t f2
)
1287 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1288 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1289 x1
.q
= float128_chs(x2
.q
);
1290 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1291 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1292 return set_cc_nz_f128(x1
.q
);
1295 /* 32-bit FP addition RM */
1296 void HELPER(aeb
)(uint32_t f1
, uint32_t val
)
1298 float32 v1
= env
->fregs
[f1
].l
.upper
;
1301 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1303 env
->fregs
[f1
].l
.upper
= float32_add(v1
, v2
.f
, &env
->fpu_status
);
1306 /* 32-bit FP division RM */
1307 void HELPER(deb
)(uint32_t f1
, uint32_t val
)
1309 float32 v1
= env
->fregs
[f1
].l
.upper
;
1312 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__
,
1314 env
->fregs
[f1
].l
.upper
= float32_div(v1
, v2
.f
, &env
->fpu_status
);
1317 /* 32-bit FP multiplication RM */
1318 void HELPER(meeb
)(uint32_t f1
, uint32_t val
)
1320 float32 v1
= env
->fregs
[f1
].l
.upper
;
1323 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1325 env
->fregs
[f1
].l
.upper
= float32_mul(v1
, v2
.f
, &env
->fpu_status
);
1328 /* 32-bit FP compare RR */
1329 uint32_t HELPER(cebr
)(uint32_t f1
, uint32_t f2
)
1331 float32 v1
= env
->fregs
[f1
].l
.upper
;
1332 float32 v2
= env
->fregs
[f2
].l
.upper
;;
1333 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1335 return set_cc_f32(v1
, v2
);
1338 /* 64-bit FP compare RR */
1339 uint32_t HELPER(cdbr
)(uint32_t f1
, uint32_t f2
)
1341 float64 v1
= env
->fregs
[f1
].d
;
1342 float64 v2
= env
->fregs
[f2
].d
;;
1343 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__
,
1345 return set_cc_f64(v1
, v2
);
1348 /* 128-bit FP compare RR */
1349 uint32_t HELPER(cxbr
)(uint32_t f1
, uint32_t f2
)
1352 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1353 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1355 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1356 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1358 return float_comp_to_cc(float128_compare_quiet(v1
.q
, v2
.q
,
1362 /* 64-bit FP compare RM */
1363 uint32_t HELPER(cdb
)(uint32_t f1
, uint64_t a2
)
1365 float64 v1
= env
->fregs
[f1
].d
;
1368 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__
, v1
,
1370 return set_cc_f64(v1
, v2
.d
);
1373 /* 64-bit FP addition RM */
1374 uint32_t HELPER(adb
)(uint32_t f1
, uint64_t a2
)
1376 float64 v1
= env
->fregs
[f1
].d
;
1379 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__
,
1381 env
->fregs
[f1
].d
= v1
= float64_add(v1
, v2
.d
, &env
->fpu_status
);
1382 return set_cc_nz_f64(v1
);
1385 /* 32-bit FP subtraction RM */
1386 void HELPER(seb
)(uint32_t f1
, uint32_t val
)
1388 float32 v1
= env
->fregs
[f1
].l
.upper
;
1391 env
->fregs
[f1
].l
.upper
= float32_sub(v1
, v2
.f
, &env
->fpu_status
);
1394 /* 64-bit FP subtraction RM */
1395 uint32_t HELPER(sdb
)(uint32_t f1
, uint64_t a2
)
1397 float64 v1
= env
->fregs
[f1
].d
;
1400 env
->fregs
[f1
].d
= v1
= float64_sub(v1
, v2
.d
, &env
->fpu_status
);
1401 return set_cc_nz_f64(v1
);
1404 /* 64-bit FP multiplication RM */
1405 void HELPER(mdb
)(uint32_t f1
, uint64_t a2
)
1407 float64 v1
= env
->fregs
[f1
].d
;
1410 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__
,
1412 env
->fregs
[f1
].d
= float64_mul(v1
, v2
.d
, &env
->fpu_status
);
1415 /* 64-bit FP division RM */
1416 void HELPER(ddb
)(uint32_t f1
, uint64_t a2
)
1418 float64 v1
= env
->fregs
[f1
].d
;
1421 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__
,
1423 env
->fregs
[f1
].d
= float64_div(v1
, v2
.d
, &env
->fpu_status
);
1426 static void set_round_mode(int m3
)
1433 /* biased round no nearest */
1435 /* round to nearest */
1436 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu_status
);
1440 set_float_rounding_mode(float_round_to_zero
, &env
->fpu_status
);
1444 set_float_rounding_mode(float_round_up
, &env
->fpu_status
);
1448 set_float_rounding_mode(float_round_down
, &env
->fpu_status
);
1453 /* convert 32-bit float to 64-bit int */
1454 uint32_t HELPER(cgebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1456 float32 v2
= env
->fregs
[f2
].l
.upper
;
1458 env
->regs
[r1
] = float32_to_int64(v2
, &env
->fpu_status
);
1459 return set_cc_nz_f32(v2
);
1462 /* convert 64-bit float to 64-bit int */
1463 uint32_t HELPER(cgdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1465 float64 v2
= env
->fregs
[f2
].d
;
1467 env
->regs
[r1
] = float64_to_int64(v2
, &env
->fpu_status
);
1468 return set_cc_nz_f64(v2
);
1471 /* convert 128-bit float to 64-bit int */
1472 uint32_t HELPER(cgxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1475 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1476 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1478 env
->regs
[r1
] = float128_to_int64(v2
.q
, &env
->fpu_status
);
1479 if (float128_is_any_nan(v2
.q
)) {
1481 } else if (float128_is_zero(v2
.q
)) {
1483 } else if (float128_is_neg(v2
.q
)) {
1490 /* convert 32-bit float to 32-bit int */
1491 uint32_t HELPER(cfebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1493 float32 v2
= env
->fregs
[f2
].l
.upper
;
1495 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1496 float32_to_int32(v2
, &env
->fpu_status
);
1497 return set_cc_nz_f32(v2
);
1500 /* convert 64-bit float to 32-bit int */
1501 uint32_t HELPER(cfdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1503 float64 v2
= env
->fregs
[f2
].d
;
1505 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1506 float64_to_int32(v2
, &env
->fpu_status
);
1507 return set_cc_nz_f64(v2
);
1510 /* convert 128-bit float to 32-bit int */
1511 uint32_t HELPER(cfxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1514 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1515 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1516 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1517 float128_to_int32(v2
.q
, &env
->fpu_status
);
1518 return set_cc_nz_f128(v2
.q
);
1521 /* load 32-bit FP zero */
1522 void HELPER(lzer
)(uint32_t f1
)
1524 env
->fregs
[f1
].l
.upper
= float32_zero
;
1527 /* load 64-bit FP zero */
1528 void HELPER(lzdr
)(uint32_t f1
)
1530 env
->fregs
[f1
].d
= float64_zero
;
1533 /* load 128-bit FP zero */
1534 void HELPER(lzxr
)(uint32_t f1
)
1537 x
.q
= float64_to_float128(float64_zero
, &env
->fpu_status
);
1538 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1539 env
->fregs
[f1
+ 1].ll
= x
.ll
.lower
;
1542 /* 128-bit FP subtraction RR */
1543 uint32_t HELPER(sxbr
)(uint32_t f1
, uint32_t f2
)
1546 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1547 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1549 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1550 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1552 res
.q
= float128_sub(v1
.q
, v2
.q
, &env
->fpu_status
);
1553 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1554 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1555 return set_cc_nz_f128(res
.q
);
1558 /* 128-bit FP addition RR */
1559 uint32_t HELPER(axbr
)(uint32_t f1
, uint32_t f2
)
1562 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1563 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1565 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1566 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1568 res
.q
= float128_add(v1
.q
, v2
.q
, &env
->fpu_status
);
1569 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1570 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1571 return set_cc_nz_f128(res
.q
);
1574 /* 32-bit FP multiplication RR */
1575 void HELPER(meebr
)(uint32_t f1
, uint32_t f2
)
1577 env
->fregs
[f1
].l
.upper
= float32_mul(env
->fregs
[f1
].l
.upper
,
1578 env
->fregs
[f2
].l
.upper
,
1582 /* 64-bit FP division RR */
1583 void HELPER(ddbr
)(uint32_t f1
, uint32_t f2
)
1585 env
->fregs
[f1
].d
= float64_div(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1589 /* 64-bit FP multiply and add RM */
1590 void HELPER(madb
)(uint32_t f1
, uint64_t a2
, uint32_t f3
)
1592 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__
, f1
, a2
, f3
);
1595 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
,
1596 float64_mul(v2
.d
, env
->fregs
[f3
].d
,
1601 /* 64-bit FP multiply and add RR */
1602 void HELPER(madbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1604 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1605 env
->fregs
[f1
].d
= float64_add(float64_mul(env
->fregs
[f2
].d
,
1608 env
->fregs
[f1
].d
, &env
->fpu_status
);
1611 /* 64-bit FP multiply and subtract RR */
1612 void HELPER(msdbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1614 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1615 env
->fregs
[f1
].d
= float64_sub(float64_mul(env
->fregs
[f2
].d
,
1618 env
->fregs
[f1
].d
, &env
->fpu_status
);
1621 /* 32-bit FP multiply and add RR */
1622 void HELPER(maebr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1624 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1625 float32_mul(env
->fregs
[f2
].l
.upper
,
1626 env
->fregs
[f3
].l
.upper
,
1631 /* convert 64-bit float to 128-bit float */
1632 void HELPER(lxdb
)(uint32_t f1
, uint64_t a2
)
1637 v1
.q
= float64_to_float128(v2
.d
, &env
->fpu_status
);
1638 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1639 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1642 /* test data class 32-bit */
1643 uint32_t HELPER(tceb
)(uint32_t f1
, uint64_t m2
)
1645 float32 v1
= env
->fregs
[f1
].l
.upper
;
1646 int neg
= float32_is_neg(v1
);
1649 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, (long)v1
, m2
, neg
);
1650 if ((float32_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1651 (float32_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1652 (float32_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1653 (float32_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1655 } else if (m2
& (1 << (9-neg
))) {
1656 /* assume normalized number */
1660 /* FIXME: denormalized? */
1664 /* test data class 64-bit */
1665 uint32_t HELPER(tcdb
)(uint32_t f1
, uint64_t m2
)
1667 float64 v1
= env
->fregs
[f1
].d
;
1668 int neg
= float64_is_neg(v1
);
1671 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, v1
, m2
, neg
);
1672 if ((float64_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1673 (float64_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1674 (float64_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1675 (float64_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1677 } else if (m2
& (1 << (9-neg
))) {
1678 /* assume normalized number */
1681 /* FIXME: denormalized? */
1685 /* test data class 128-bit */
1686 uint32_t HELPER(tcxb
)(uint32_t f1
, uint64_t m2
)
1690 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1691 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1693 int neg
= float128_is_neg(v1
.q
);
1694 if ((float128_is_zero(v1
.q
) && (m2
& (1 << (11-neg
)))) ||
1695 (float128_is_infinity(v1
.q
) && (m2
& (1 << (5-neg
)))) ||
1696 (float128_is_any_nan(v1
.q
) && (m2
& (1 << (3-neg
)))) ||
1697 (float128_is_signaling_nan(v1
.q
) && (m2
& (1 << (1-neg
))))) {
1699 } else if (m2
& (1 << (9-neg
))) {
1700 /* assume normalized number */
1703 /* FIXME: denormalized? */
1707 /* find leftmost one */
1708 uint32_t HELPER(flogr
)(uint32_t r1
, uint64_t v2
)
1713 while (!(v2
& 0x8000000000000000ULL
) && v2
) {
1720 env
->regs
[r1
+ 1] = 0;
1723 env
->regs
[r1
] = res
;
1724 env
->regs
[r1
+ 1] = ov2
& ~(0x8000000000000000ULL
>> res
);
1729 /* square root 64-bit RR */
1730 void HELPER(sqdbr
)(uint32_t f1
, uint32_t f2
)
1732 env
->fregs
[f1
].d
= float64_sqrt(env
->fregs
[f2
].d
, &env
->fpu_status
);
1736 void HELPER(cksm
)(uint32_t r1
, uint32_t r2
)
1738 uint64_t src
= get_address_31fix(r2
);
1739 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
1740 uint64_t cksm
= (uint32_t)env
->regs
[r1
];
1742 while (src_len
>= 4) {
1745 /* move to next word */
1754 cksm
+= ldub(src
) << 24;
1757 cksm
+= lduw(src
) << 16;
1760 cksm
+= lduw(src
) << 16;
1761 cksm
+= ldub(src
+ 2) << 8;
1765 /* indicate we've processed everything */
1766 env
->regs
[r2
] = src
+ src_len
;
1767 env
->regs
[(r2
+ 1) & 15] = 0;
1770 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1771 ((uint32_t)cksm
+ (cksm
>> 32));
1774 static inline uint32_t cc_calc_ltgt_32(CPUState
*env
, int32_t src
,
1779 } else if (src
< dst
) {
1786 static inline uint32_t cc_calc_ltgt0_32(CPUState
*env
, int32_t dst
)
1788 return cc_calc_ltgt_32(env
, dst
, 0);
1791 static inline uint32_t cc_calc_ltgt_64(CPUState
*env
, int64_t src
,
1796 } else if (src
< dst
) {
1803 static inline uint32_t cc_calc_ltgt0_64(CPUState
*env
, int64_t dst
)
1805 return cc_calc_ltgt_64(env
, dst
, 0);
1808 static inline uint32_t cc_calc_ltugtu_32(CPUState
*env
, uint32_t src
,
1813 } else if (src
< dst
) {
1820 static inline uint32_t cc_calc_ltugtu_64(CPUState
*env
, uint64_t src
,
1825 } else if (src
< dst
) {
1832 static inline uint32_t cc_calc_tm_32(CPUState
*env
, uint32_t val
, uint32_t mask
)
1834 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__
, val
, mask
);
1835 uint16_t r
= val
& mask
;
1836 if (r
== 0 || mask
== 0) {
1838 } else if (r
== mask
) {
1845 /* set condition code for test under mask */
1846 static inline uint32_t cc_calc_tm_64(CPUState
*env
, uint64_t val
, uint32_t mask
)
1848 uint16_t r
= val
& mask
;
1849 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__
, val
, mask
, r
);
1850 if (r
== 0 || mask
== 0) {
1852 } else if (r
== mask
) {
1855 while (!(mask
& 0x8000)) {
1867 static inline uint32_t cc_calc_nz(CPUState
*env
, uint64_t dst
)
1872 static inline uint32_t cc_calc_add_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1875 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1876 return 3; /* overflow */
1880 } else if (ar
> 0) {
1888 static inline uint32_t cc_calc_addu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1898 if (ar
< a1
|| ar
< a2
) {
1906 static inline uint32_t cc_calc_sub_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1909 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
1910 return 3; /* overflow */
1914 } else if (ar
> 0) {
1922 static inline uint32_t cc_calc_subu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1936 static inline uint32_t cc_calc_abs_64(CPUState
*env
, int64_t dst
)
1938 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1947 static inline uint32_t cc_calc_nabs_64(CPUState
*env
, int64_t dst
)
1952 static inline uint32_t cc_calc_comp_64(CPUState
*env
, int64_t dst
)
1954 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1956 } else if (dst
< 0) {
1958 } else if (dst
> 0) {
1966 static inline uint32_t cc_calc_add_32(CPUState
*env
, int32_t a1
, int32_t a2
,
1969 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1970 return 3; /* overflow */
1974 } else if (ar
> 0) {
1982 static inline uint32_t cc_calc_addu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
1992 if (ar
< a1
|| ar
< a2
) {
2000 static inline uint32_t cc_calc_sub_32(CPUState
*env
, int32_t a1
, int32_t a2
,
2003 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
2004 return 3; /* overflow */
2008 } else if (ar
> 0) {
2016 static inline uint32_t cc_calc_subu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2030 static inline uint32_t cc_calc_abs_32(CPUState
*env
, int32_t dst
)
2032 if ((uint32_t)dst
== 0x80000000UL
) {
2041 static inline uint32_t cc_calc_nabs_32(CPUState
*env
, int32_t dst
)
2046 static inline uint32_t cc_calc_comp_32(CPUState
*env
, int32_t dst
)
2048 if ((uint32_t)dst
== 0x80000000UL
) {
2050 } else if (dst
< 0) {
2052 } else if (dst
> 0) {
2059 /* calculate condition code for insert character under mask insn */
2060 static inline uint32_t cc_calc_icm_32(CPUState
*env
, uint32_t mask
, uint32_t val
)
2062 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__
, mask
, val
);
2068 } else if (val
& 0x80000000) {
2075 if (!val
|| !mask
) {
2091 static inline uint32_t cc_calc_slag(CPUState
*env
, uint64_t src
, uint64_t shift
)
2093 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
2096 /* check if the sign bit stays the same */
2097 if (src
& (1ULL << 63)) {
2103 if ((src
& mask
) != match
) {
2108 r
= ((src
<< shift
) & ((1ULL << 63) - 1)) | (src
& (1ULL << 63));
2110 if ((int64_t)r
== 0) {
2112 } else if ((int64_t)r
< 0) {
2120 static inline uint32_t do_calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
,
2121 uint64_t dst
, uint64_t vr
)
2130 /* cc_op value _is_ cc */
2133 case CC_OP_LTGT0_32
:
2134 r
= cc_calc_ltgt0_32(env
, dst
);
2136 case CC_OP_LTGT0_64
:
2137 r
= cc_calc_ltgt0_64(env
, dst
);
2140 r
= cc_calc_ltgt_32(env
, src
, dst
);
2143 r
= cc_calc_ltgt_64(env
, src
, dst
);
2145 case CC_OP_LTUGTU_32
:
2146 r
= cc_calc_ltugtu_32(env
, src
, dst
);
2148 case CC_OP_LTUGTU_64
:
2149 r
= cc_calc_ltugtu_64(env
, src
, dst
);
2152 r
= cc_calc_tm_32(env
, src
, dst
);
2155 r
= cc_calc_tm_64(env
, src
, dst
);
2158 r
= cc_calc_nz(env
, dst
);
2161 r
= cc_calc_add_64(env
, src
, dst
, vr
);
2164 r
= cc_calc_addu_64(env
, src
, dst
, vr
);
2167 r
= cc_calc_sub_64(env
, src
, dst
, vr
);
2170 r
= cc_calc_subu_64(env
, src
, dst
, vr
);
2173 r
= cc_calc_abs_64(env
, dst
);
2176 r
= cc_calc_nabs_64(env
, dst
);
2179 r
= cc_calc_comp_64(env
, dst
);
2183 r
= cc_calc_add_32(env
, src
, dst
, vr
);
2186 r
= cc_calc_addu_32(env
, src
, dst
, vr
);
2189 r
= cc_calc_sub_32(env
, src
, dst
, vr
);
2192 r
= cc_calc_subu_32(env
, src
, dst
, vr
);
2195 r
= cc_calc_abs_64(env
, dst
);
2198 r
= cc_calc_nabs_64(env
, dst
);
2201 r
= cc_calc_comp_32(env
, dst
);
2205 r
= cc_calc_icm_32(env
, src
, dst
);
2208 r
= cc_calc_slag(env
, src
, dst
);
2211 case CC_OP_LTGT_F32
:
2212 r
= set_cc_f32(src
, dst
);
2214 case CC_OP_LTGT_F64
:
2215 r
= set_cc_f64(src
, dst
);
2218 r
= set_cc_nz_f32(dst
);
2221 r
= set_cc_nz_f64(dst
);
2225 cpu_abort(env
, "Unknown CC operation: %s\n", cc_name(cc_op
));
2228 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__
,
2229 cc_name(cc_op
), src
, dst
, vr
, r
);
2233 uint32_t calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2236 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2239 uint32_t HELPER(calc_cc
)(uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2242 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2245 uint64_t HELPER(cvd
)(int32_t bin
)
2248 uint64_t dec
= 0x0c;
2256 for (shift
= 4; (shift
< 64) && bin
; shift
+= 4) {
2257 int current_number
= bin
% 10;
2259 dec
|= (current_number
) << shift
;
2266 void HELPER(unpk
)(uint32_t len
, uint64_t dest
, uint64_t src
)
2268 int len_dest
= len
>> 4;
2269 int len_src
= len
& 0xf;
2271 int second_nibble
= 0;
2276 /* last byte is special, it only flips the nibbles */
2278 stb(dest
, (b
<< 4) | (b
>> 4));
2282 /* now pad every nibble with 0xf0 */
2284 while (len_dest
> 0) {
2285 uint8_t cur_byte
= 0;
2288 cur_byte
= ldub(src
);
2294 /* only advance one nibble at a time */
2295 if (second_nibble
) {
2300 second_nibble
= !second_nibble
;
2303 cur_byte
= (cur_byte
& 0xf);
2307 stb(dest
, cur_byte
);
2311 void HELPER(tr
)(uint32_t len
, uint64_t array
, uint64_t trans
)
2315 for (i
= 0; i
<= len
; i
++) {
2316 uint8_t byte
= ldub(array
+ i
);
2317 uint8_t new_byte
= ldub(trans
+ byte
);
2318 stb(array
+ i
, new_byte
);
2322 #ifndef CONFIG_USER_ONLY
2324 void HELPER(load_psw
)(uint64_t mask
, uint64_t addr
)
2326 load_psw(env
, mask
, addr
);
2330 static void program_interrupt(CPUState
*env
, uint32_t code
, int ilc
)
2332 qemu_log("program interrupt at %#" PRIx64
"\n", env
->psw
.addr
);
2334 if (kvm_enabled()) {
2335 kvm_s390_interrupt(env
, KVM_S390_PROGRAM_INT
, code
);
2337 env
->int_pgm_code
= code
;
2338 env
->int_pgm_ilc
= ilc
;
2339 env
->exception_index
= EXCP_PGM
;
2344 static void ext_interrupt(CPUState
*env
, int type
, uint32_t param
,
2347 cpu_inject_ext(env
, type
, param
, param64
);
2350 int sclp_service_call(CPUState
*env
, uint32_t sccb
, uint64_t code
)
2356 printf("sclp(0x%x, 0x%" PRIx64
")\n", sccb
, code
);
2359 if (sccb
& ~0x7ffffff8ul
) {
2360 fprintf(stderr
, "KVM: invalid sccb address 0x%x\n", sccb
);
2366 case SCLP_CMDW_READ_SCP_INFO
:
2367 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
2368 while ((ram_size
>> (20 + shift
)) > 65535) {
2371 stw_phys(sccb
+ SCP_MEM_CODE
, ram_size
>> (20 + shift
));
2372 stb_phys(sccb
+ SCP_INCREMENT
, 1 << shift
);
2373 stw_phys(sccb
+ SCP_RESPONSE_CODE
, 0x10);
2375 if (kvm_enabled()) {
2377 kvm_s390_interrupt_internal(env
, KVM_S390_INT_SERVICE
,
2382 ext_interrupt(env
, EXT_SERVICE
, sccb
& ~3, 0);
2387 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64
"x\n", sccb
, code
);
2397 /* SCLP service call */
2398 uint32_t HELPER(servc
)(uint32_t r1
, uint64_t r2
)
2400 if (sclp_service_call(env
, r1
, r2
)) {
2408 uint64_t HELPER(diag
)(uint32_t num
, uint64_t mem
, uint64_t code
)
2415 r
= s390_virtio_hypercall(env
, mem
, code
);
2431 program_interrupt(env
, PGM_OPERATION
, ILC_LATER_INC
);
2438 void HELPER(stidp
)(uint64_t a1
)
2440 stq(a1
, env
->cpu_num
);
2444 void HELPER(spx
)(uint64_t a1
)
2449 env
->psa
= prefix
& 0xfffff000;
2450 qemu_log("prefix: %#x\n", prefix
);
2451 tlb_flush_page(env
, 0);
2452 tlb_flush_page(env
, TARGET_PAGE_SIZE
);
2456 uint32_t HELPER(sck
)(uint64_t a1
)
2458 /* XXX not implemented - is it necessary? */
2463 static inline uint64_t clock_value(CPUState
*env
)
2467 time
= env
->tod_offset
+
2468 time2tod(qemu_get_clock_ns(vm_clock
) - env
->tod_basetime
);
2474 uint32_t HELPER(stck
)(uint64_t a1
)
2476 stq(a1
, clock_value(env
));
2481 /* Store Clock Extended */
2482 uint32_t HELPER(stcke
)(uint64_t a1
)
2485 /* basically the same value as stck */
2486 stq(a1
+ 1, clock_value(env
) | env
->cpu_num
);
2487 /* more fine grained than stck */
2489 /* XXX programmable fields */
2496 /* Set Clock Comparator */
2497 void HELPER(sckc
)(uint64_t a1
)
2499 uint64_t time
= ldq(a1
);
2501 if (time
== -1ULL) {
2505 /* difference between now and then */
2506 time
-= clock_value(env
);
2508 time
= (time
* 125) >> 9;
2510 qemu_mod_timer(env
->tod_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2513 /* Store Clock Comparator */
2514 void HELPER(stckc
)(uint64_t a1
)
2521 void HELPER(spt
)(uint64_t a1
)
2523 uint64_t time
= ldq(a1
);
2525 if (time
== -1ULL) {
2530 time
= (time
* 125) >> 9;
2532 qemu_mod_timer(env
->cpu_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2535 /* Store CPU Timer */
2536 void HELPER(stpt
)(uint64_t a1
)
2542 /* Store System Information */
2543 uint32_t HELPER(stsi
)(uint64_t a0
, uint32_t r0
, uint32_t r1
)
2548 if ((r0
& STSI_LEVEL_MASK
) <= STSI_LEVEL_3
&&
2549 ((r0
& STSI_R0_RESERVED_MASK
) || (r1
& STSI_R1_RESERVED_MASK
))) {
2550 /* valid function code, invalid reserved bits */
2551 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2554 sel1
= r0
& STSI_R0_SEL1_MASK
;
2555 sel2
= r1
& STSI_R1_SEL2_MASK
;
2557 /* XXX: spec exception if sysib is not 4k-aligned */
2559 switch (r0
& STSI_LEVEL_MASK
) {
2561 if ((sel1
== 1) && (sel2
== 1)) {
2562 /* Basic Machine Configuration */
2563 struct sysib_111 sysib
;
2565 memset(&sysib
, 0, sizeof(sysib
));
2566 ebcdic_put(sysib
.manuf
, "QEMU ", 16);
2567 /* same as machine type number in STORE CPU ID */
2568 ebcdic_put(sysib
.type
, "QEMU", 4);
2569 /* same as model number in STORE CPU ID */
2570 ebcdic_put(sysib
.model
, "QEMU ", 16);
2571 ebcdic_put(sysib
.sequence
, "QEMU ", 16);
2572 ebcdic_put(sysib
.plant
, "QEMU", 4);
2573 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2574 } else if ((sel1
== 2) && (sel2
== 1)) {
2575 /* Basic Machine CPU */
2576 struct sysib_121 sysib
;
2578 memset(&sysib
, 0, sizeof(sysib
));
2579 /* XXX make different for different CPUs? */
2580 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2581 ebcdic_put(sysib
.plant
, "QEMU", 4);
2582 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2583 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2584 } else if ((sel1
== 2) && (sel2
== 2)) {
2585 /* Basic Machine CPUs */
2586 struct sysib_122 sysib
;
2588 memset(&sysib
, 0, sizeof(sysib
));
2589 stl_p(&sysib
.capability
, 0x443afc29);
2590 /* XXX change when SMP comes */
2591 stw_p(&sysib
.total_cpus
, 1);
2592 stw_p(&sysib
.active_cpus
, 1);
2593 stw_p(&sysib
.standby_cpus
, 0);
2594 stw_p(&sysib
.reserved_cpus
, 0);
2595 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2602 if ((sel1
== 2) && (sel2
== 1)) {
2604 struct sysib_221 sysib
;
2606 memset(&sysib
, 0, sizeof(sysib
));
2607 /* XXX make different for different CPUs? */
2608 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2609 ebcdic_put(sysib
.plant
, "QEMU", 4);
2610 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2611 stw_p(&sysib
.cpu_id
, 0);
2612 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2613 } else if ((sel1
== 2) && (sel2
== 2)) {
2615 struct sysib_222 sysib
;
2617 memset(&sysib
, 0, sizeof(sysib
));
2618 stw_p(&sysib
.lpar_num
, 0);
2620 /* XXX change when SMP comes */
2621 stw_p(&sysib
.total_cpus
, 1);
2622 stw_p(&sysib
.conf_cpus
, 1);
2623 stw_p(&sysib
.standby_cpus
, 0);
2624 stw_p(&sysib
.reserved_cpus
, 0);
2625 ebcdic_put(sysib
.name
, "QEMU ", 8);
2626 stl_p(&sysib
.caf
, 1000);
2627 stw_p(&sysib
.dedicated_cpus
, 0);
2628 stw_p(&sysib
.shared_cpus
, 0);
2629 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2637 if ((sel1
== 2) && (sel2
== 2)) {
2639 struct sysib_322 sysib
;
2641 memset(&sysib
, 0, sizeof(sysib
));
2643 /* XXX change when SMP comes */
2644 stw_p(&sysib
.vm
[0].total_cpus
, 1);
2645 stw_p(&sysib
.vm
[0].conf_cpus
, 1);
2646 stw_p(&sysib
.vm
[0].standby_cpus
, 0);
2647 stw_p(&sysib
.vm
[0].reserved_cpus
, 0);
2648 ebcdic_put(sysib
.vm
[0].name
, "KVMguest", 8);
2649 stl_p(&sysib
.vm
[0].caf
, 1000);
2650 ebcdic_put(sysib
.vm
[0].cpi
, "KVM/Linux ", 16);
2651 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2657 case STSI_LEVEL_CURRENT
:
2658 env
->regs
[0] = STSI_LEVEL_3
;
2668 void HELPER(lctlg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2673 for (i
= r1
;; i
= (i
+ 1) % 16) {
2674 env
->cregs
[i
] = ldq(src
);
2675 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2676 i
, src
, env
->cregs
[i
]);
2677 src
+= sizeof(uint64_t);
2687 void HELPER(lctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2692 for (i
= r1
;; i
= (i
+ 1) % 16) {
2693 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | ldl(src
);
2694 src
+= sizeof(uint32_t);
2704 void HELPER(stctg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2709 for (i
= r1
;; i
= (i
+ 1) % 16) {
2710 stq(dest
, env
->cregs
[i
]);
2711 dest
+= sizeof(uint64_t);
2719 void HELPER(stctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2724 for (i
= r1
;; i
= (i
+ 1) % 16) {
2725 stl(dest
, env
->cregs
[i
]);
2726 dest
+= sizeof(uint32_t);
2734 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
2741 /* insert storage key extended */
2742 uint64_t HELPER(iske
)(uint64_t r2
)
2744 uint64_t addr
= get_address(0, 0, r2
);
2746 if (addr
> ram_size
) {
2750 /* XXX maybe use qemu's internal keys? */
2751 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
2754 /* set storage key extended */
2755 void HELPER(sske
)(uint32_t r1
, uint64_t r2
)
2757 uint64_t addr
= get_address(0, 0, r2
);
2759 if (addr
> ram_size
) {
2763 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
2766 /* reset reference bit extended */
2767 uint32_t HELPER(rrbe
)(uint32_t r1
, uint64_t r2
)
2769 if (r2
> ram_size
) {
2775 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] &= ~SK_REFERENCED
;
2781 * 0 Reference bit zero; change bit zero
2782 * 1 Reference bit zero; change bit one
2783 * 2 Reference bit one; change bit zero
2784 * 3 Reference bit one; change bit one
2789 /* compare and swap and purge */
2790 uint32_t HELPER(csp
)(uint32_t r1
, uint32_t r2
)
2793 uint32_t o1
= env
->regs
[r1
];
2794 uint64_t a2
= get_address_31fix(r2
) & ~3ULL;
2795 uint32_t o2
= ldl(a2
);
2798 stl(a2
, env
->regs
[(r1
+ 1) & 15]);
2799 if (env
->regs
[r2
] & 0x3) {
2800 /* flush TLB / ALB */
2805 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
2812 static uint32_t mvc_asc(int64_t l
, uint64_t a1
, uint64_t mode1
, uint64_t a2
,
2815 target_ulong src
, dest
;
2816 int flags
, cc
= 0, i
;
2820 } else if (l
> 256) {
2826 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
2829 dest
|= a1
& ~TARGET_PAGE_MASK
;
2831 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
2834 src
|= a2
& ~TARGET_PAGE_MASK
;
2836 /* XXX replace w/ memcpy */
2837 for (i
= 0; i
< l
; i
++) {
2838 /* XXX be more clever */
2839 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
2840 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
2841 mvc_asc(l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
2844 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
2850 uint32_t HELPER(mvcs
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2852 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2853 __FUNCTION__
, l
, a1
, a2
);
2855 return mvc_asc(l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
2858 uint32_t HELPER(mvcp
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2860 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2861 __FUNCTION__
, l
, a1
, a2
);
2863 return mvc_asc(l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
2866 uint32_t HELPER(sigp
)(uint64_t order_code
, uint32_t r1
, uint64_t cpu_addr
)
2870 HELPER_LOG("%s: %016" PRIx64
" %08x %016" PRIx64
"\n",
2871 __FUNCTION__
, order_code
, r1
, cpu_addr
);
2873 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2874 as parameter (input). Status (output) is always R1. */
2876 switch (order_code
) {
2881 /* enumerate CPU status */
2883 /* XXX implement when SMP comes */
2886 env
->regs
[r1
] &= 0xffffffff00000000ULL
;
2891 fprintf(stderr
, "XXX unknown sigp: 0x%" PRIx64
"\n", order_code
);
2898 void HELPER(sacf
)(uint64_t a1
)
2900 HELPER_LOG("%s: %16" PRIx64
"\n", __FUNCTION__
, a1
);
2902 switch (a1
& 0xf00) {
2904 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2905 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
2908 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2909 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
2912 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2913 env
->psw
.mask
|= PSW_ASC_HOME
;
2916 qemu_log("unknown sacf mode: %" PRIx64
"\n", a1
);
2917 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2922 /* invalidate pte */
2923 void HELPER(ipte
)(uint64_t pte_addr
, uint64_t vaddr
)
2925 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2928 /* XXX broadcast to other CPUs */
2930 /* XXX Linux is nice enough to give us the exact pte address.
2931 According to spec we'd have to find it out ourselves */
2932 /* XXX Linux is fine with overwriting the pte, the spec requires
2933 us to only set the invalid bit */
2934 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
2936 /* XXX we exploit the fact that Linux passes the exact virtual
2937 address here - it's not obliged to! */
2938 tlb_flush_page(env
, page
);
2941 /* flush local tlb */
2942 void HELPER(ptlb
)(void)
2947 /* store using real address */
2948 void HELPER(stura
)(uint64_t addr
, uint32_t v1
)
2950 stw_phys(get_address(0, 0, addr
), v1
);
2953 /* load real address */
2954 uint32_t HELPER(lra
)(uint64_t addr
, uint32_t r1
)
2957 int old_exc
= env
->exception_index
;
2958 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2962 /* XXX incomplete - has more corner cases */
2963 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2964 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
2967 env
->exception_index
= old_exc
;
2968 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
2971 if (env
->exception_index
== EXCP_PGM
) {
2972 ret
= env
->int_pgm_code
| 0x80000000;
2974 ret
|= addr
& ~TARGET_PAGE_MASK
;
2976 env
->exception_index
= old_exc
;
2978 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
2979 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (ret
& 0xffffffffULL
);
2981 env
->regs
[r1
] = ret
;