2 * S/390 helper routines
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
27 #include "qemu-timer.h"
29 #include <linux/kvm.h>
32 /*****************************************************************************/
34 #if !defined (CONFIG_USER_ONLY)
35 #include "softmmu_exec.h"
37 #define MMUSUFFIX _mmu
40 #include "softmmu_template.h"
43 #include "softmmu_template.h"
46 #include "softmmu_template.h"
49 #include "softmmu_template.h"
51 /* try to fill the TLB and return an exception if error. If retaddr is
52 NULL, it means that the function was called in C code (i.e. not
53 from generated code or from helper.c) */
54 /* XXX: fix it to restore all registers */
55 void tlb_fill(CPUState
*env1
, target_ulong addr
, int is_write
, int mmu_idx
,
65 ret
= cpu_s390x_handle_mmu_fault(env
, addr
, is_write
, mmu_idx
);
66 if (unlikely(ret
!= 0)) {
67 if (likely(retaddr
)) {
68 /* now we have a real cpu fault */
69 pc
= (unsigned long)retaddr
;
72 /* the PC is inside the translated code. It means that we have
73 a virtual CPU fault */
74 cpu_restore_state(tb
, env
, pc
);
84 /* #define DEBUG_HELPER */
86 #define HELPER_LOG(x...) qemu_log(x)
88 #define HELPER_LOG(x...)
91 /* raise an exception */
92 void HELPER(exception
)(uint32_t excp
)
94 HELPER_LOG("%s: exception %d\n", __FUNCTION__
, excp
);
95 env
->exception_index
= excp
;
99 #ifndef CONFIG_USER_ONLY
100 static void mvc_fast_memset(CPUState
*env
, uint32_t l
, uint64_t dest
,
103 target_phys_addr_t dest_phys
;
104 target_phys_addr_t len
= l
;
106 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
109 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
111 cpu_abort(env
, "should never reach here");
113 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
115 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
117 memset(dest_p
, byte
, len
);
119 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
122 static void mvc_fast_memmove(CPUState
*env
, uint32_t l
, uint64_t dest
,
125 target_phys_addr_t dest_phys
;
126 target_phys_addr_t src_phys
;
127 target_phys_addr_t len
= l
;
130 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
133 if (mmu_translate(env
, dest
, 1, asc
, &dest_phys
, &flags
)) {
135 cpu_abort(env
, "should never reach here");
137 dest_phys
|= dest
& ~TARGET_PAGE_MASK
;
139 if (mmu_translate(env
, src
, 0, asc
, &src_phys
, &flags
)) {
141 cpu_abort(env
, "should never reach here");
143 src_phys
|= src
& ~TARGET_PAGE_MASK
;
145 dest_p
= cpu_physical_memory_map(dest_phys
, &len
, 1);
146 src_p
= cpu_physical_memory_map(src_phys
, &len
, 0);
148 memmove(dest_p
, src_p
, len
);
150 cpu_physical_memory_unmap(dest_p
, 1, len
, len
);
151 cpu_physical_memory_unmap(src_p
, 0, len
, len
);
156 uint32_t HELPER(nc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
162 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
163 __FUNCTION__
, l
, dest
, src
);
164 for (i
= 0; i
<= l
; i
++) {
165 x
= ldub(dest
+ i
) & ldub(src
+ i
);
175 uint32_t HELPER(xc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
181 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
182 __FUNCTION__
, l
, dest
, src
);
184 #ifndef CONFIG_USER_ONLY
185 /* xor with itself is the same as memset(0) */
186 if ((l
> 32) && (src
== dest
) &&
187 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
)) {
188 mvc_fast_memset(env
, l
+ 1, dest
, 0);
193 memset(g2h(dest
), 0, l
+ 1);
198 for (i
= 0; i
<= l
; i
++) {
199 x
= ldub(dest
+ i
) ^ ldub(src
+ i
);
209 uint32_t HELPER(oc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
215 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
216 __FUNCTION__
, l
, dest
, src
);
217 for (i
= 0; i
<= l
; i
++) {
218 x
= ldub(dest
+ i
) | ldub(src
+ i
);
228 void HELPER(mvc
)(uint32_t l
, uint64_t dest
, uint64_t src
)
232 uint32_t l_64
= (l
+ 1) / 8;
234 HELPER_LOG("%s l %d dest %" PRIx64
" src %" PRIx64
"\n",
235 __FUNCTION__
, l
, dest
, src
);
237 #ifndef CONFIG_USER_ONLY
239 (src
& TARGET_PAGE_MASK
) == ((src
+ l
) & TARGET_PAGE_MASK
) &&
240 (dest
& TARGET_PAGE_MASK
) == ((dest
+ l
) & TARGET_PAGE_MASK
)) {
241 if (dest
== (src
+ 1)) {
242 mvc_fast_memset(env
, l
+ 1, dest
, ldub(src
));
244 } else if ((src
& TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) {
245 mvc_fast_memmove(env
, l
+ 1, dest
, src
);
250 if (dest
== (src
+ 1)) {
251 memset(g2h(dest
), ldub(src
), l
+ 1);
254 memmove(g2h(dest
), g2h(src
), l
+ 1);
259 /* handle the parts that fit into 8-byte loads/stores */
260 if (dest
!= (src
+ 1)) {
261 for (i
= 0; i
< l_64
; i
++) {
262 stq(dest
+ x
, ldq(src
+ x
));
267 /* slow version crossing pages with byte accesses */
268 for (i
= x
; i
<= l
; i
++) {
269 stb(dest
+ i
, ldub(src
+ i
));
273 /* compare unsigned byte arrays */
274 uint32_t HELPER(clc
)(uint32_t l
, uint64_t s1
, uint64_t s2
)
279 HELPER_LOG("%s l %d s1 %" PRIx64
" s2 %" PRIx64
"\n",
280 __FUNCTION__
, l
, s1
, s2
);
281 for (i
= 0; i
<= l
; i
++) {
284 HELPER_LOG("%02x (%c)/%02x (%c) ", x
, x
, y
, y
);
299 /* compare logical under mask */
300 uint32_t HELPER(clm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
304 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64
"\n", __FUNCTION__
, r1
,
310 r
= (r1
& 0xff000000UL
) >> 24;
311 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64
") ", mask
, r
, d
,
322 mask
= (mask
<< 1) & 0xf;
329 /* store character under mask */
330 void HELPER(stcm
)(uint32_t r1
, uint32_t mask
, uint64_t addr
)
333 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__
, r1
, mask
,
337 r
= (r1
& 0xff000000UL
) >> 24;
339 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask
, r
, addr
);
342 mask
= (mask
<< 1) & 0xf;
348 /* 64/64 -> 128 unsigned multiplication */
349 void HELPER(mlg
)(uint32_t r1
, uint64_t v2
)
351 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
352 /* assuming 64-bit hosts have __uint128_t */
353 __uint128_t res
= (__uint128_t
)env
->regs
[r1
+ 1];
354 res
*= (__uint128_t
)v2
;
355 env
->regs
[r1
] = (uint64_t)(res
>> 64);
356 env
->regs
[r1
+ 1] = (uint64_t)res
;
358 mulu64(&env
->regs
[r1
+ 1], &env
->regs
[r1
], env
->regs
[r1
+ 1], v2
);
362 /* 128 -> 64/64 unsigned division */
363 void HELPER(dlg
)(uint32_t r1
, uint64_t v2
)
365 uint64_t divisor
= v2
;
367 if (!env
->regs
[r1
]) {
368 /* 64 -> 64/64 case */
369 env
->regs
[r1
] = env
->regs
[r1
+1] % divisor
;
370 env
->regs
[r1
+1] = env
->regs
[r1
+1] / divisor
;
374 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
375 /* assuming 64-bit hosts have __uint128_t */
376 __uint128_t dividend
= (((__uint128_t
)env
->regs
[r1
]) << 64) |
378 __uint128_t quotient
= dividend
/ divisor
;
379 env
->regs
[r1
+1] = quotient
;
380 __uint128_t remainder
= dividend
% divisor
;
381 env
->regs
[r1
] = remainder
;
383 /* 32-bit hosts would need special wrapper functionality - just abort if
384 we encounter such a case; it's very unlikely anyways. */
385 cpu_abort(env
, "128 -> 64/64 division not implemented\n");
390 static inline uint64_t get_address(int x2
, int b2
, int d2
)
403 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
410 static inline uint64_t get_address_31fix(int reg
)
412 uint64_t r
= env
->regs
[reg
];
415 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
422 /* search string (c is byte to search, r2 is string, r1 end of string) */
423 uint32_t HELPER(srst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
427 uint64_t str
= get_address_31fix(r2
);
428 uint64_t end
= get_address_31fix(r1
);
430 HELPER_LOG("%s: c %d *r1 0x%" PRIx64
" *r2 0x%" PRIx64
"\n", __FUNCTION__
,
431 c
, env
->regs
[r1
], env
->regs
[r2
]);
433 for (i
= str
; i
!= end
; i
++) {
444 /* unsigned string compare (c is string terminator) */
445 uint32_t HELPER(clst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
447 uint64_t s1
= get_address_31fix(r1
);
448 uint64_t s2
= get_address_31fix(r2
);
452 #ifdef CONFIG_USER_ONLY
454 HELPER_LOG("%s: comparing '%s' and '%s'\n",
455 __FUNCTION__
, (char*)g2h(s1
), (char*)g2h(s2
));
461 if ((v1
== c
|| v2
== c
) || (v1
!= v2
)) {
471 cc
= (v1
< v2
) ? 1 : 2;
472 /* FIXME: 31-bit mode! */
480 void HELPER(mvpg
)(uint64_t r0
, uint64_t r1
, uint64_t r2
)
482 /* XXX missing r0 handling */
483 #ifdef CONFIG_USER_ONLY
486 for (i
= 0; i
< TARGET_PAGE_SIZE
; i
++) {
487 stb(r1
+ i
, ldub(r2
+ i
));
490 mvc_fast_memmove(env
, TARGET_PAGE_SIZE
, r1
, r2
);
494 /* string copy (c is string terminator) */
495 void HELPER(mvst
)(uint32_t c
, uint32_t r1
, uint32_t r2
)
497 uint64_t dest
= get_address_31fix(r1
);
498 uint64_t src
= get_address_31fix(r2
);
501 #ifdef CONFIG_USER_ONLY
503 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__
, (char*)g2h(src
),
516 env
->regs
[r1
] = dest
; /* FIXME: 31-bit mode! */
519 /* compare and swap 64-bit */
520 uint32_t HELPER(csg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
522 /* FIXME: locking? */
524 uint64_t v2
= ldq(a2
);
525 if (env
->regs
[r1
] == v2
) {
527 stq(a2
, env
->regs
[r3
]);
535 /* compare double and swap 64-bit */
536 uint32_t HELPER(cdsg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
538 /* FIXME: locking? */
540 uint64_t v2_hi
= ldq(a2
);
541 uint64_t v2_lo
= ldq(a2
+ 8);
542 uint64_t v1_hi
= env
->regs
[r1
];
543 uint64_t v1_lo
= env
->regs
[r1
+ 1];
545 if ((v1_hi
== v2_hi
) && (v1_lo
== v2_lo
)) {
547 stq(a2
, env
->regs
[r3
]);
548 stq(a2
+ 8, env
->regs
[r3
+ 1]);
551 env
->regs
[r1
] = v2_hi
;
552 env
->regs
[r1
+ 1] = v2_lo
;
558 /* compare and swap 32-bit */
559 uint32_t HELPER(cs
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
561 /* FIXME: locking? */
563 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__
, r1
, a2
, r3
);
564 uint32_t v2
= ldl(a2
);
565 if (((uint32_t)env
->regs
[r1
]) == v2
) {
567 stl(a2
, (uint32_t)env
->regs
[r3
]);
570 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | v2
;
575 static uint32_t helper_icm(uint32_t r1
, uint64_t address
, uint32_t mask
)
577 int pos
= 24; /* top of the lower half of r1 */
578 uint64_t rmask
= 0xff000000ULL
;
585 env
->regs
[r1
] &= ~rmask
;
587 if ((val
& 0x80) && !ccd
) {
591 if (val
&& cc
== 0) {
594 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
597 mask
= (mask
<< 1) & 0xf;
605 /* execute instruction
606 this instruction executes an insn modified with the contents of r1
607 it does not change the executed instruction in memory
608 it does not change the program counter
609 in other words: tricky...
610 currently implemented by interpreting the cases it is most commonly used in
612 uint32_t HELPER(ex
)(uint32_t cc
, uint64_t v1
, uint64_t addr
, uint64_t ret
)
614 uint16_t insn
= lduw_code(addr
);
615 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__
, v1
, addr
,
617 if ((insn
& 0xf0ff) == 0xd000) {
618 uint32_t l
, insn2
, b1
, b2
, d1
, d2
;
620 insn2
= ldl_code(addr
+ 2);
621 b1
= (insn2
>> 28) & 0xf;
622 b2
= (insn2
>> 12) & 0xf;
623 d1
= (insn2
>> 16) & 0xfff;
625 switch (insn
& 0xf00) {
627 helper_mvc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
630 cc
= helper_clc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
633 cc
= helper_xc(l
, get_address(0, b1
, d1
), get_address(0, b2
, d2
));
639 } else if ((insn
& 0xff00) == 0x0a00) {
640 /* supervisor call */
641 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__
, (insn
|v1
) & 0xff);
642 env
->psw
.addr
= ret
- 4;
643 env
->int_svc_code
= (insn
|v1
) & 0xff;
644 env
->int_svc_ilc
= 4;
645 helper_exception(EXCP_SVC
);
646 } else if ((insn
& 0xff00) == 0xbf00) {
647 uint32_t insn2
, r1
, r3
, b2
, d2
;
648 insn2
= ldl_code(addr
+ 2);
649 r1
= (insn2
>> 20) & 0xf;
650 r3
= (insn2
>> 16) & 0xf;
651 b2
= (insn2
>> 12) & 0xf;
653 cc
= helper_icm(r1
, get_address(0, b2
, d2
), r3
);
656 cpu_abort(env
, "EXECUTE on instruction prefix 0x%x not implemented\n",
662 /* absolute value 32-bit */
663 uint32_t HELPER(abs_i32
)(int32_t val
)
672 /* negative absolute value 32-bit */
673 int32_t HELPER(nabs_i32
)(int32_t val
)
682 /* absolute value 64-bit */
683 uint64_t HELPER(abs_i64
)(int64_t val
)
685 HELPER_LOG("%s: val 0x%" PRIx64
"\n", __FUNCTION__
, val
);
694 /* negative absolute value 64-bit */
695 int64_t HELPER(nabs_i64
)(int64_t val
)
704 /* add with carry 32-bit unsigned */
705 uint32_t HELPER(addc_u32
)(uint32_t cc
, uint32_t v1
, uint32_t v2
)
717 /* store character under mask high operates on the upper half of r1 */
718 void HELPER(stcmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
720 int pos
= 56; /* top of the upper half of r1 */
724 stb(address
, (env
->regs
[r1
] >> pos
) & 0xff);
727 mask
= (mask
<< 1) & 0xf;
732 /* insert character under mask high; same as icm, but operates on the
734 uint32_t HELPER(icmh
)(uint32_t r1
, uint64_t address
, uint32_t mask
)
736 int pos
= 56; /* top of the upper half of r1 */
737 uint64_t rmask
= 0xff00000000000000ULL
;
744 env
->regs
[r1
] &= ~rmask
;
746 if ((val
& 0x80) && !ccd
) {
750 if (val
&& cc
== 0) {
753 env
->regs
[r1
] |= (uint64_t)val
<< pos
;
756 mask
= (mask
<< 1) & 0xf;
764 /* insert psw mask and condition code into r1 */
765 void HELPER(ipm
)(uint32_t cc
, uint32_t r1
)
767 uint64_t r
= env
->regs
[r1
];
769 r
&= 0xffffffff00ffffffULL
;
770 r
|= (cc
<< 28) | ( (env
->psw
.mask
>> 40) & 0xf );
772 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__
,
773 cc
, env
->psw
.mask
, r
);
776 /* load access registers r1 to r3 from memory at a2 */
777 void HELPER(lam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
781 for (i
= r1
;; i
= (i
+ 1) % 16) {
782 env
->aregs
[i
] = ldl(a2
);
791 /* store access registers r1 to r3 in memory at a2 */
792 void HELPER(stam
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
796 for (i
= r1
;; i
= (i
+ 1) % 16) {
797 stl(a2
, env
->aregs
[i
]);
807 uint32_t HELPER(mvcl
)(uint32_t r1
, uint32_t r2
)
809 uint64_t destlen
= env
->regs
[r1
+ 1] & 0xffffff;
810 uint64_t dest
= get_address_31fix(r1
);
811 uint64_t srclen
= env
->regs
[r2
+ 1] & 0xffffff;
812 uint64_t src
= get_address_31fix(r2
);
813 uint8_t pad
= src
>> 24;
817 if (destlen
== srclen
) {
819 } else if (destlen
< srclen
) {
825 if (srclen
> destlen
) {
829 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
834 for (; destlen
; dest
++, destlen
--) {
838 env
->regs
[r1
+ 1] = destlen
;
839 /* can't use srclen here, we trunc'ed it */
840 env
->regs
[r2
+ 1] -= src
- env
->regs
[r2
];
841 env
->regs
[r1
] = dest
;
847 /* move long extended another memcopy insn with more bells and whistles */
848 uint32_t HELPER(mvcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
850 uint64_t destlen
= env
->regs
[r1
+ 1];
851 uint64_t dest
= env
->regs
[r1
];
852 uint64_t srclen
= env
->regs
[r3
+ 1];
853 uint64_t src
= env
->regs
[r3
];
854 uint8_t pad
= a2
& 0xff;
858 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
859 destlen
= (uint32_t)destlen
;
860 srclen
= (uint32_t)srclen
;
865 if (destlen
== srclen
) {
867 } else if (destlen
< srclen
) {
873 if (srclen
> destlen
) {
877 for (; destlen
&& srclen
; src
++, dest
++, destlen
--, srclen
--) {
882 for (; destlen
; dest
++, destlen
--) {
886 env
->regs
[r1
+ 1] = destlen
;
887 /* can't use srclen here, we trunc'ed it */
888 /* FIXME: 31-bit mode! */
889 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
890 env
->regs
[r1
] = dest
;
896 /* compare logical long extended memcompare insn with padding */
897 uint32_t HELPER(clcle
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
899 uint64_t destlen
= env
->regs
[r1
+ 1];
900 uint64_t dest
= get_address_31fix(r1
);
901 uint64_t srclen
= env
->regs
[r3
+ 1];
902 uint64_t src
= get_address_31fix(r3
);
903 uint8_t pad
= a2
& 0xff;
904 uint8_t v1
= 0,v2
= 0;
907 if (!(destlen
|| srclen
)) {
911 if (srclen
> destlen
) {
915 for (; destlen
|| srclen
; src
++, dest
++, destlen
--, srclen
--) {
916 v1
= srclen
? ldub(src
) : pad
;
917 v2
= destlen
? ldub(dest
) : pad
;
919 cc
= (v1
< v2
) ? 1 : 2;
924 env
->regs
[r1
+ 1] = destlen
;
925 /* can't use srclen here, we trunc'ed it */
926 env
->regs
[r3
+ 1] -= src
- env
->regs
[r3
];
927 env
->regs
[r1
] = dest
;
933 /* subtract unsigned v2 from v1 with borrow */
934 uint32_t HELPER(slb
)(uint32_t cc
, uint32_t r1
, uint32_t v2
)
936 uint32_t v1
= env
->regs
[r1
];
937 uint32_t res
= v1
+ (~v2
) + (cc
>> 1);
939 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | res
;
948 /* subtract unsigned v2 from v1 with borrow */
949 uint32_t HELPER(slbg
)(uint32_t cc
, uint32_t r1
, uint64_t v1
, uint64_t v2
)
951 uint64_t res
= v1
+ (~v2
) + (cc
>> 1);
962 static inline int float_comp_to_cc(int float_compare
)
964 switch (float_compare
) {
965 case float_relation_equal
:
967 case float_relation_less
:
969 case float_relation_greater
:
971 case float_relation_unordered
:
974 cpu_abort(env
, "unknown return value for float compare\n");
978 /* condition codes for binary FP ops */
979 static uint32_t set_cc_f32(float32 v1
, float32 v2
)
981 return float_comp_to_cc(float32_compare_quiet(v1
, v2
, &env
->fpu_status
));
984 static uint32_t set_cc_f64(float64 v1
, float64 v2
)
986 return float_comp_to_cc(float64_compare_quiet(v1
, v2
, &env
->fpu_status
));
989 /* condition codes for unary FP ops */
990 static uint32_t set_cc_nz_f32(float32 v
)
992 if (float32_is_any_nan(v
)) {
994 } else if (float32_is_zero(v
)) {
996 } else if (float32_is_neg(v
)) {
1003 static uint32_t set_cc_nz_f64(float64 v
)
1005 if (float64_is_any_nan(v
)) {
1007 } else if (float64_is_zero(v
)) {
1009 } else if (float64_is_neg(v
)) {
1016 static uint32_t set_cc_nz_f128(float128 v
)
1018 if (float128_is_any_nan(v
)) {
1020 } else if (float128_is_zero(v
)) {
1022 } else if (float128_is_neg(v
)) {
1029 /* convert 32-bit int to 64-bit float */
1030 void HELPER(cdfbr
)(uint32_t f1
, int32_t v2
)
1032 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__
, v2
, f1
);
1033 env
->fregs
[f1
].d
= int32_to_float64(v2
, &env
->fpu_status
);
1036 /* convert 32-bit int to 128-bit float */
1037 void HELPER(cxfbr
)(uint32_t f1
, int32_t v2
)
1040 v1
.q
= int32_to_float128(v2
, &env
->fpu_status
);
1041 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1042 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1045 /* convert 64-bit int to 32-bit float */
1046 void HELPER(cegbr
)(uint32_t f1
, int64_t v2
)
1048 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1049 env
->fregs
[f1
].l
.upper
= int64_to_float32(v2
, &env
->fpu_status
);
1052 /* convert 64-bit int to 64-bit float */
1053 void HELPER(cdgbr
)(uint32_t f1
, int64_t v2
)
1055 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__
, v2
, f1
);
1056 env
->fregs
[f1
].d
= int64_to_float64(v2
, &env
->fpu_status
);
1059 /* convert 64-bit int to 128-bit float */
1060 void HELPER(cxgbr
)(uint32_t f1
, int64_t v2
)
1063 x1
.q
= int64_to_float128(v2
, &env
->fpu_status
);
1064 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__
, v2
,
1065 x1
.ll
.upper
, x1
.ll
.lower
);
1066 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1067 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1070 /* convert 32-bit int to 32-bit float */
1071 void HELPER(cefbr
)(uint32_t f1
, int32_t v2
)
1073 env
->fregs
[f1
].l
.upper
= int32_to_float32(v2
, &env
->fpu_status
);
1074 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__
, v2
,
1075 env
->fregs
[f1
].l
.upper
, f1
);
1078 /* 32-bit FP addition RR */
1079 uint32_t HELPER(aebr
)(uint32_t f1
, uint32_t f2
)
1081 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1082 env
->fregs
[f2
].l
.upper
,
1084 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1085 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1087 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1090 /* 64-bit FP addition RR */
1091 uint32_t HELPER(adbr
)(uint32_t f1
, uint32_t f2
)
1093 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1095 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__
,
1096 env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1098 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1101 /* 32-bit FP subtraction RR */
1102 uint32_t HELPER(sebr
)(uint32_t f1
, uint32_t f2
)
1104 env
->fregs
[f1
].l
.upper
= float32_sub(env
->fregs
[f1
].l
.upper
,
1105 env
->fregs
[f2
].l
.upper
,
1107 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__
,
1108 env
->fregs
[f2
].l
.upper
, env
->fregs
[f1
].l
.upper
, f1
);
1110 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1113 /* 64-bit FP subtraction RR */
1114 uint32_t HELPER(sdbr
)(uint32_t f1
, uint32_t f2
)
1116 env
->fregs
[f1
].d
= float64_sub(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1118 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1119 __FUNCTION__
, env
->fregs
[f2
].d
, env
->fregs
[f1
].d
, f1
);
1121 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1124 /* 32-bit FP division RR */
1125 void HELPER(debr
)(uint32_t f1
, uint32_t f2
)
1127 env
->fregs
[f1
].l
.upper
= float32_div(env
->fregs
[f1
].l
.upper
,
1128 env
->fregs
[f2
].l
.upper
,
1132 /* 128-bit FP division RR */
1133 void HELPER(dxbr
)(uint32_t f1
, uint32_t f2
)
1136 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1137 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1139 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1140 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1142 res
.q
= float128_div(v1
.q
, v2
.q
, &env
->fpu_status
);
1143 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1144 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1147 /* 64-bit FP multiplication RR */
1148 void HELPER(mdbr
)(uint32_t f1
, uint32_t f2
)
1150 env
->fregs
[f1
].d
= float64_mul(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1154 /* 128-bit FP multiplication RR */
1155 void HELPER(mxbr
)(uint32_t f1
, uint32_t f2
)
1158 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1159 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1161 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1162 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1164 res
.q
= float128_mul(v1
.q
, v2
.q
, &env
->fpu_status
);
1165 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1166 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1169 /* convert 32-bit float to 64-bit float */
1170 void HELPER(ldebr
)(uint32_t r1
, uint32_t r2
)
1172 env
->fregs
[r1
].d
= float32_to_float64(env
->fregs
[r2
].l
.upper
,
1176 /* convert 128-bit float to 64-bit float */
1177 void HELPER(ldxbr
)(uint32_t f1
, uint32_t f2
)
1180 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1181 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1182 env
->fregs
[f1
].d
= float128_to_float64(x2
.q
, &env
->fpu_status
);
1183 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__
, env
->fregs
[f1
].d
);
1186 /* convert 64-bit float to 128-bit float */
1187 void HELPER(lxdbr
)(uint32_t f1
, uint32_t f2
)
1190 res
.q
= float64_to_float128(env
->fregs
[f2
].d
, &env
->fpu_status
);
1191 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1192 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1195 /* convert 64-bit float to 32-bit float */
1196 void HELPER(ledbr
)(uint32_t f1
, uint32_t f2
)
1198 float64 d2
= env
->fregs
[f2
].d
;
1199 env
->fregs
[f1
].l
.upper
= float64_to_float32(d2
, &env
->fpu_status
);
1202 /* convert 128-bit float to 32-bit float */
1203 void HELPER(lexbr
)(uint32_t f1
, uint32_t f2
)
1206 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1207 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1208 env
->fregs
[f1
].l
.upper
= float128_to_float32(x2
.q
, &env
->fpu_status
);
1209 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__
, env
->fregs
[f1
].l
.upper
);
1212 /* absolute value of 32-bit float */
1213 uint32_t HELPER(lpebr
)(uint32_t f1
, uint32_t f2
)
1216 float32 v2
= env
->fregs
[f2
].d
;
1217 v1
= float32_abs(v2
);
1218 env
->fregs
[f1
].d
= v1
;
1219 return set_cc_nz_f32(v1
);
1222 /* absolute value of 64-bit float */
1223 uint32_t HELPER(lpdbr
)(uint32_t f1
, uint32_t f2
)
1226 float64 v2
= env
->fregs
[f2
].d
;
1227 v1
= float64_abs(v2
);
1228 env
->fregs
[f1
].d
= v1
;
1229 return set_cc_nz_f64(v1
);
1232 /* absolute value of 128-bit float */
1233 uint32_t HELPER(lpxbr
)(uint32_t f1
, uint32_t f2
)
1237 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1238 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1239 v1
.q
= float128_abs(v2
.q
);
1240 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1241 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1242 return set_cc_nz_f128(v1
.q
);
1245 /* load and test 64-bit float */
1246 uint32_t HELPER(ltdbr
)(uint32_t f1
, uint32_t f2
)
1248 env
->fregs
[f1
].d
= env
->fregs
[f2
].d
;
1249 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1252 /* load and test 32-bit float */
1253 uint32_t HELPER(ltebr
)(uint32_t f1
, uint32_t f2
)
1255 env
->fregs
[f1
].l
.upper
= env
->fregs
[f2
].l
.upper
;
1256 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1259 /* load and test 128-bit float */
1260 uint32_t HELPER(ltxbr
)(uint32_t f1
, uint32_t f2
)
1263 x
.ll
.upper
= env
->fregs
[f2
].ll
;
1264 x
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1265 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1266 env
->fregs
[f1
+ 2].ll
= x
.ll
.lower
;
1267 return set_cc_nz_f128(x
.q
);
1270 /* load complement of 32-bit float */
1271 uint32_t HELPER(lcebr
)(uint32_t f1
, uint32_t f2
)
1273 env
->fregs
[f1
].l
.upper
= float32_chs(env
->fregs
[f2
].l
.upper
);
1275 return set_cc_nz_f32(env
->fregs
[f1
].l
.upper
);
1278 /* load complement of 64-bit float */
1279 uint32_t HELPER(lcdbr
)(uint32_t f1
, uint32_t f2
)
1281 env
->fregs
[f1
].d
= float64_chs(env
->fregs
[f2
].d
);
1283 return set_cc_nz_f64(env
->fregs
[f1
].d
);
1286 /* load complement of 128-bit float */
1287 uint32_t HELPER(lcxbr
)(uint32_t f1
, uint32_t f2
)
1290 x2
.ll
.upper
= env
->fregs
[f2
].ll
;
1291 x2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1292 x1
.q
= float128_chs(x2
.q
);
1293 env
->fregs
[f1
].ll
= x1
.ll
.upper
;
1294 env
->fregs
[f1
+ 2].ll
= x1
.ll
.lower
;
1295 return set_cc_nz_f128(x1
.q
);
1298 /* 32-bit FP addition RM */
1299 void HELPER(aeb
)(uint32_t f1
, uint32_t val
)
1301 float32 v1
= env
->fregs
[f1
].l
.upper
;
1304 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1306 env
->fregs
[f1
].l
.upper
= float32_add(v1
, v2
.f
, &env
->fpu_status
);
1309 /* 32-bit FP division RM */
1310 void HELPER(deb
)(uint32_t f1
, uint32_t val
)
1312 float32 v1
= env
->fregs
[f1
].l
.upper
;
1315 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__
,
1317 env
->fregs
[f1
].l
.upper
= float32_div(v1
, v2
.f
, &env
->fpu_status
);
1320 /* 32-bit FP multiplication RM */
1321 void HELPER(meeb
)(uint32_t f1
, uint32_t val
)
1323 float32 v1
= env
->fregs
[f1
].l
.upper
;
1326 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1328 env
->fregs
[f1
].l
.upper
= float32_mul(v1
, v2
.f
, &env
->fpu_status
);
1331 /* 32-bit FP compare RR */
1332 uint32_t HELPER(cebr
)(uint32_t f1
, uint32_t f2
)
1334 float32 v1
= env
->fregs
[f1
].l
.upper
;
1335 float32 v2
= env
->fregs
[f2
].l
.upper
;;
1336 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__
,
1338 return set_cc_f32(v1
, v2
);
1341 /* 64-bit FP compare RR */
1342 uint32_t HELPER(cdbr
)(uint32_t f1
, uint32_t f2
)
1344 float64 v1
= env
->fregs
[f1
].d
;
1345 float64 v2
= env
->fregs
[f2
].d
;;
1346 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__
,
1348 return set_cc_f64(v1
, v2
);
1351 /* 128-bit FP compare RR */
1352 uint32_t HELPER(cxbr
)(uint32_t f1
, uint32_t f2
)
1355 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1356 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1358 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1359 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1361 return float_comp_to_cc(float128_compare_quiet(v1
.q
, v2
.q
,
1365 /* 64-bit FP compare RM */
1366 uint32_t HELPER(cdb
)(uint32_t f1
, uint64_t a2
)
1368 float64 v1
= env
->fregs
[f1
].d
;
1371 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__
, v1
,
1373 return set_cc_f64(v1
, v2
.d
);
1376 /* 64-bit FP addition RM */
1377 uint32_t HELPER(adb
)(uint32_t f1
, uint64_t a2
)
1379 float64 v1
= env
->fregs
[f1
].d
;
1382 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__
,
1384 env
->fregs
[f1
].d
= v1
= float64_add(v1
, v2
.d
, &env
->fpu_status
);
1385 return set_cc_nz_f64(v1
);
1388 /* 32-bit FP subtraction RM */
1389 void HELPER(seb
)(uint32_t f1
, uint32_t val
)
1391 float32 v1
= env
->fregs
[f1
].l
.upper
;
1394 env
->fregs
[f1
].l
.upper
= float32_sub(v1
, v2
.f
, &env
->fpu_status
);
1397 /* 64-bit FP subtraction RM */
1398 uint32_t HELPER(sdb
)(uint32_t f1
, uint64_t a2
)
1400 float64 v1
= env
->fregs
[f1
].d
;
1403 env
->fregs
[f1
].d
= v1
= float64_sub(v1
, v2
.d
, &env
->fpu_status
);
1404 return set_cc_nz_f64(v1
);
1407 /* 64-bit FP multiplication RM */
1408 void HELPER(mdb
)(uint32_t f1
, uint64_t a2
)
1410 float64 v1
= env
->fregs
[f1
].d
;
1413 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__
,
1415 env
->fregs
[f1
].d
= float64_mul(v1
, v2
.d
, &env
->fpu_status
);
1418 /* 64-bit FP division RM */
1419 void HELPER(ddb
)(uint32_t f1
, uint64_t a2
)
1421 float64 v1
= env
->fregs
[f1
].d
;
1424 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__
,
1426 env
->fregs
[f1
].d
= float64_div(v1
, v2
.d
, &env
->fpu_status
);
1429 static void set_round_mode(int m3
)
1436 /* biased round no nearest */
1438 /* round to nearest */
1439 set_float_rounding_mode(float_round_nearest_even
, &env
->fpu_status
);
1443 set_float_rounding_mode(float_round_to_zero
, &env
->fpu_status
);
1447 set_float_rounding_mode(float_round_up
, &env
->fpu_status
);
1451 set_float_rounding_mode(float_round_down
, &env
->fpu_status
);
1456 /* convert 32-bit float to 64-bit int */
1457 uint32_t HELPER(cgebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1459 float32 v2
= env
->fregs
[f2
].l
.upper
;
1461 env
->regs
[r1
] = float32_to_int64(v2
, &env
->fpu_status
);
1462 return set_cc_nz_f32(v2
);
1465 /* convert 64-bit float to 64-bit int */
1466 uint32_t HELPER(cgdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1468 float64 v2
= env
->fregs
[f2
].d
;
1470 env
->regs
[r1
] = float64_to_int64(v2
, &env
->fpu_status
);
1471 return set_cc_nz_f64(v2
);
1474 /* convert 128-bit float to 64-bit int */
1475 uint32_t HELPER(cgxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1478 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1479 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1481 env
->regs
[r1
] = float128_to_int64(v2
.q
, &env
->fpu_status
);
1482 if (float128_is_any_nan(v2
.q
)) {
1484 } else if (float128_is_zero(v2
.q
)) {
1486 } else if (float128_is_neg(v2
.q
)) {
1493 /* convert 32-bit float to 32-bit int */
1494 uint32_t HELPER(cfebr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1496 float32 v2
= env
->fregs
[f2
].l
.upper
;
1498 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1499 float32_to_int32(v2
, &env
->fpu_status
);
1500 return set_cc_nz_f32(v2
);
1503 /* convert 64-bit float to 32-bit int */
1504 uint32_t HELPER(cfdbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1506 float64 v2
= env
->fregs
[f2
].d
;
1508 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1509 float64_to_int32(v2
, &env
->fpu_status
);
1510 return set_cc_nz_f64(v2
);
1513 /* convert 128-bit float to 32-bit int */
1514 uint32_t HELPER(cfxbr
)(uint32_t r1
, uint32_t f2
, uint32_t m3
)
1517 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1518 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1519 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1520 float128_to_int32(v2
.q
, &env
->fpu_status
);
1521 return set_cc_nz_f128(v2
.q
);
1524 /* load 32-bit FP zero */
1525 void HELPER(lzer
)(uint32_t f1
)
1527 env
->fregs
[f1
].l
.upper
= float32_zero
;
1530 /* load 64-bit FP zero */
1531 void HELPER(lzdr
)(uint32_t f1
)
1533 env
->fregs
[f1
].d
= float64_zero
;
1536 /* load 128-bit FP zero */
1537 void HELPER(lzxr
)(uint32_t f1
)
1540 x
.q
= float64_to_float128(float64_zero
, &env
->fpu_status
);
1541 env
->fregs
[f1
].ll
= x
.ll
.upper
;
1542 env
->fregs
[f1
+ 1].ll
= x
.ll
.lower
;
1545 /* 128-bit FP subtraction RR */
1546 uint32_t HELPER(sxbr
)(uint32_t f1
, uint32_t f2
)
1549 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1550 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1552 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1553 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1555 res
.q
= float128_sub(v1
.q
, v2
.q
, &env
->fpu_status
);
1556 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1557 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1558 return set_cc_nz_f128(res
.q
);
1561 /* 128-bit FP addition RR */
1562 uint32_t HELPER(axbr
)(uint32_t f1
, uint32_t f2
)
1565 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1566 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1568 v2
.ll
.upper
= env
->fregs
[f2
].ll
;
1569 v2
.ll
.lower
= env
->fregs
[f2
+ 2].ll
;
1571 res
.q
= float128_add(v1
.q
, v2
.q
, &env
->fpu_status
);
1572 env
->fregs
[f1
].ll
= res
.ll
.upper
;
1573 env
->fregs
[f1
+ 2].ll
= res
.ll
.lower
;
1574 return set_cc_nz_f128(res
.q
);
1577 /* 32-bit FP multiplication RR */
1578 void HELPER(meebr
)(uint32_t f1
, uint32_t f2
)
1580 env
->fregs
[f1
].l
.upper
= float32_mul(env
->fregs
[f1
].l
.upper
,
1581 env
->fregs
[f2
].l
.upper
,
1585 /* 64-bit FP division RR */
1586 void HELPER(ddbr
)(uint32_t f1
, uint32_t f2
)
1588 env
->fregs
[f1
].d
= float64_div(env
->fregs
[f1
].d
, env
->fregs
[f2
].d
,
1592 /* 64-bit FP multiply and add RM */
1593 void HELPER(madb
)(uint32_t f1
, uint64_t a2
, uint32_t f3
)
1595 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__
, f1
, a2
, f3
);
1598 env
->fregs
[f1
].d
= float64_add(env
->fregs
[f1
].d
,
1599 float64_mul(v2
.d
, env
->fregs
[f3
].d
,
1604 /* 64-bit FP multiply and add RR */
1605 void HELPER(madbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1607 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1608 env
->fregs
[f1
].d
= float64_add(float64_mul(env
->fregs
[f2
].d
,
1611 env
->fregs
[f1
].d
, &env
->fpu_status
);
1614 /* 64-bit FP multiply and subtract RR */
1615 void HELPER(msdbr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1617 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__
, f1
, f2
, f3
);
1618 env
->fregs
[f1
].d
= float64_sub(float64_mul(env
->fregs
[f2
].d
,
1621 env
->fregs
[f1
].d
, &env
->fpu_status
);
1624 /* 32-bit FP multiply and add RR */
1625 void HELPER(maebr
)(uint32_t f1
, uint32_t f3
, uint32_t f2
)
1627 env
->fregs
[f1
].l
.upper
= float32_add(env
->fregs
[f1
].l
.upper
,
1628 float32_mul(env
->fregs
[f2
].l
.upper
,
1629 env
->fregs
[f3
].l
.upper
,
1634 /* convert 32-bit float to 64-bit float */
1635 void HELPER(ldeb
)(uint32_t f1
, uint64_t a2
)
1639 env
->fregs
[f1
].d
= float32_to_float64(v2
,
1643 /* convert 64-bit float to 128-bit float */
1644 void HELPER(lxdb
)(uint32_t f1
, uint64_t a2
)
1649 v1
.q
= float64_to_float128(v2
.d
, &env
->fpu_status
);
1650 env
->fregs
[f1
].ll
= v1
.ll
.upper
;
1651 env
->fregs
[f1
+ 2].ll
= v1
.ll
.lower
;
1654 /* test data class 32-bit */
1655 uint32_t HELPER(tceb
)(uint32_t f1
, uint64_t m2
)
1657 float32 v1
= env
->fregs
[f1
].l
.upper
;
1658 int neg
= float32_is_neg(v1
);
1661 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, (long)v1
, m2
, neg
);
1662 if ((float32_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1663 (float32_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1664 (float32_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1665 (float32_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1667 } else if (m2
& (1 << (9-neg
))) {
1668 /* assume normalized number */
1672 /* FIXME: denormalized? */
1676 /* test data class 64-bit */
1677 uint32_t HELPER(tcdb
)(uint32_t f1
, uint64_t m2
)
1679 float64 v1
= env
->fregs
[f1
].d
;
1680 int neg
= float64_is_neg(v1
);
1683 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__
, v1
, m2
, neg
);
1684 if ((float64_is_zero(v1
) && (m2
& (1 << (11-neg
)))) ||
1685 (float64_is_infinity(v1
) && (m2
& (1 << (5-neg
)))) ||
1686 (float64_is_any_nan(v1
) && (m2
& (1 << (3-neg
)))) ||
1687 (float64_is_signaling_nan(v1
) && (m2
& (1 << (1-neg
))))) {
1689 } else if (m2
& (1 << (9-neg
))) {
1690 /* assume normalized number */
1693 /* FIXME: denormalized? */
1697 /* test data class 128-bit */
1698 uint32_t HELPER(tcxb
)(uint32_t f1
, uint64_t m2
)
1702 v1
.ll
.upper
= env
->fregs
[f1
].ll
;
1703 v1
.ll
.lower
= env
->fregs
[f1
+ 2].ll
;
1705 int neg
= float128_is_neg(v1
.q
);
1706 if ((float128_is_zero(v1
.q
) && (m2
& (1 << (11-neg
)))) ||
1707 (float128_is_infinity(v1
.q
) && (m2
& (1 << (5-neg
)))) ||
1708 (float128_is_any_nan(v1
.q
) && (m2
& (1 << (3-neg
)))) ||
1709 (float128_is_signaling_nan(v1
.q
) && (m2
& (1 << (1-neg
))))) {
1711 } else if (m2
& (1 << (9-neg
))) {
1712 /* assume normalized number */
1715 /* FIXME: denormalized? */
1719 /* find leftmost one */
1720 uint32_t HELPER(flogr
)(uint32_t r1
, uint64_t v2
)
1725 while (!(v2
& 0x8000000000000000ULL
) && v2
) {
1732 env
->regs
[r1
+ 1] = 0;
1735 env
->regs
[r1
] = res
;
1736 env
->regs
[r1
+ 1] = ov2
& ~(0x8000000000000000ULL
>> res
);
1741 /* square root 64-bit RR */
1742 void HELPER(sqdbr
)(uint32_t f1
, uint32_t f2
)
1744 env
->fregs
[f1
].d
= float64_sqrt(env
->fregs
[f2
].d
, &env
->fpu_status
);
1748 void HELPER(cksm
)(uint32_t r1
, uint32_t r2
)
1750 uint64_t src
= get_address_31fix(r2
);
1751 uint64_t src_len
= env
->regs
[(r2
+ 1) & 15];
1752 uint64_t cksm
= (uint32_t)env
->regs
[r1
];
1754 while (src_len
>= 4) {
1757 /* move to next word */
1766 cksm
+= ldub(src
) << 24;
1769 cksm
+= lduw(src
) << 16;
1772 cksm
+= lduw(src
) << 16;
1773 cksm
+= ldub(src
+ 2) << 8;
1777 /* indicate we've processed everything */
1778 env
->regs
[r2
] = src
+ src_len
;
1779 env
->regs
[(r2
+ 1) & 15] = 0;
1782 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) |
1783 ((uint32_t)cksm
+ (cksm
>> 32));
1786 static inline uint32_t cc_calc_ltgt_32(CPUState
*env
, int32_t src
,
1791 } else if (src
< dst
) {
1798 static inline uint32_t cc_calc_ltgt0_32(CPUState
*env
, int32_t dst
)
1800 return cc_calc_ltgt_32(env
, dst
, 0);
1803 static inline uint32_t cc_calc_ltgt_64(CPUState
*env
, int64_t src
,
1808 } else if (src
< dst
) {
1815 static inline uint32_t cc_calc_ltgt0_64(CPUState
*env
, int64_t dst
)
1817 return cc_calc_ltgt_64(env
, dst
, 0);
1820 static inline uint32_t cc_calc_ltugtu_32(CPUState
*env
, uint32_t src
,
1825 } else if (src
< dst
) {
1832 static inline uint32_t cc_calc_ltugtu_64(CPUState
*env
, uint64_t src
,
1837 } else if (src
< dst
) {
1844 static inline uint32_t cc_calc_tm_32(CPUState
*env
, uint32_t val
, uint32_t mask
)
1846 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__
, val
, mask
);
1847 uint16_t r
= val
& mask
;
1848 if (r
== 0 || mask
== 0) {
1850 } else if (r
== mask
) {
1857 /* set condition code for test under mask */
1858 static inline uint32_t cc_calc_tm_64(CPUState
*env
, uint64_t val
, uint32_t mask
)
1860 uint16_t r
= val
& mask
;
1861 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__
, val
, mask
, r
);
1862 if (r
== 0 || mask
== 0) {
1864 } else if (r
== mask
) {
1867 while (!(mask
& 0x8000)) {
1879 static inline uint32_t cc_calc_nz(CPUState
*env
, uint64_t dst
)
1884 static inline uint32_t cc_calc_add_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1887 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1888 return 3; /* overflow */
1892 } else if (ar
> 0) {
1900 static inline uint32_t cc_calc_addu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1910 if (ar
< a1
|| ar
< a2
) {
1918 static inline uint32_t cc_calc_sub_64(CPUState
*env
, int64_t a1
, int64_t a2
,
1921 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
1922 return 3; /* overflow */
1926 } else if (ar
> 0) {
1934 static inline uint32_t cc_calc_subu_64(CPUState
*env
, uint64_t a1
, uint64_t a2
,
1948 static inline uint32_t cc_calc_abs_64(CPUState
*env
, int64_t dst
)
1950 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1959 static inline uint32_t cc_calc_nabs_64(CPUState
*env
, int64_t dst
)
1964 static inline uint32_t cc_calc_comp_64(CPUState
*env
, int64_t dst
)
1966 if ((uint64_t)dst
== 0x8000000000000000ULL
) {
1968 } else if (dst
< 0) {
1970 } else if (dst
> 0) {
1978 static inline uint32_t cc_calc_add_32(CPUState
*env
, int32_t a1
, int32_t a2
,
1981 if ((a1
> 0 && a2
> 0 && ar
< 0) || (a1
< 0 && a2
< 0 && ar
> 0)) {
1982 return 3; /* overflow */
1986 } else if (ar
> 0) {
1994 static inline uint32_t cc_calc_addu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2004 if (ar
< a1
|| ar
< a2
) {
2012 static inline uint32_t cc_calc_sub_32(CPUState
*env
, int32_t a1
, int32_t a2
,
2015 if ((a1
> 0 && a2
< 0 && ar
< 0) || (a1
< 0 && a2
> 0 && ar
> 0)) {
2016 return 3; /* overflow */
2020 } else if (ar
> 0) {
2028 static inline uint32_t cc_calc_subu_32(CPUState
*env
, uint32_t a1
, uint32_t a2
,
2042 static inline uint32_t cc_calc_abs_32(CPUState
*env
, int32_t dst
)
2044 if ((uint32_t)dst
== 0x80000000UL
) {
2053 static inline uint32_t cc_calc_nabs_32(CPUState
*env
, int32_t dst
)
2058 static inline uint32_t cc_calc_comp_32(CPUState
*env
, int32_t dst
)
2060 if ((uint32_t)dst
== 0x80000000UL
) {
2062 } else if (dst
< 0) {
2064 } else if (dst
> 0) {
2071 /* calculate condition code for insert character under mask insn */
2072 static inline uint32_t cc_calc_icm_32(CPUState
*env
, uint32_t mask
, uint32_t val
)
2074 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__
, mask
, val
);
2080 } else if (val
& 0x80000000) {
2087 if (!val
|| !mask
) {
2103 static inline uint32_t cc_calc_slag(CPUState
*env
, uint64_t src
, uint64_t shift
)
2105 uint64_t mask
= ((1ULL << shift
) - 1ULL) << (64 - shift
);
2108 /* check if the sign bit stays the same */
2109 if (src
& (1ULL << 63)) {
2115 if ((src
& mask
) != match
) {
2120 r
= ((src
<< shift
) & ((1ULL << 63) - 1)) | (src
& (1ULL << 63));
2122 if ((int64_t)r
== 0) {
2124 } else if ((int64_t)r
< 0) {
2132 static inline uint32_t do_calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
,
2133 uint64_t dst
, uint64_t vr
)
2142 /* cc_op value _is_ cc */
2145 case CC_OP_LTGT0_32
:
2146 r
= cc_calc_ltgt0_32(env
, dst
);
2148 case CC_OP_LTGT0_64
:
2149 r
= cc_calc_ltgt0_64(env
, dst
);
2152 r
= cc_calc_ltgt_32(env
, src
, dst
);
2155 r
= cc_calc_ltgt_64(env
, src
, dst
);
2157 case CC_OP_LTUGTU_32
:
2158 r
= cc_calc_ltugtu_32(env
, src
, dst
);
2160 case CC_OP_LTUGTU_64
:
2161 r
= cc_calc_ltugtu_64(env
, src
, dst
);
2164 r
= cc_calc_tm_32(env
, src
, dst
);
2167 r
= cc_calc_tm_64(env
, src
, dst
);
2170 r
= cc_calc_nz(env
, dst
);
2173 r
= cc_calc_add_64(env
, src
, dst
, vr
);
2176 r
= cc_calc_addu_64(env
, src
, dst
, vr
);
2179 r
= cc_calc_sub_64(env
, src
, dst
, vr
);
2182 r
= cc_calc_subu_64(env
, src
, dst
, vr
);
2185 r
= cc_calc_abs_64(env
, dst
);
2188 r
= cc_calc_nabs_64(env
, dst
);
2191 r
= cc_calc_comp_64(env
, dst
);
2195 r
= cc_calc_add_32(env
, src
, dst
, vr
);
2198 r
= cc_calc_addu_32(env
, src
, dst
, vr
);
2201 r
= cc_calc_sub_32(env
, src
, dst
, vr
);
2204 r
= cc_calc_subu_32(env
, src
, dst
, vr
);
2207 r
= cc_calc_abs_64(env
, dst
);
2210 r
= cc_calc_nabs_64(env
, dst
);
2213 r
= cc_calc_comp_32(env
, dst
);
2217 r
= cc_calc_icm_32(env
, src
, dst
);
2220 r
= cc_calc_slag(env
, src
, dst
);
2223 case CC_OP_LTGT_F32
:
2224 r
= set_cc_f32(src
, dst
);
2226 case CC_OP_LTGT_F64
:
2227 r
= set_cc_f64(src
, dst
);
2230 r
= set_cc_nz_f32(dst
);
2233 r
= set_cc_nz_f64(dst
);
2237 cpu_abort(env
, "Unknown CC operation: %s\n", cc_name(cc_op
));
2240 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__
,
2241 cc_name(cc_op
), src
, dst
, vr
, r
);
2245 uint32_t calc_cc(CPUState
*env
, uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2248 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2251 uint32_t HELPER(calc_cc
)(uint32_t cc_op
, uint64_t src
, uint64_t dst
,
2254 return do_calc_cc(env
, cc_op
, src
, dst
, vr
);
2257 uint64_t HELPER(cvd
)(int32_t bin
)
2260 uint64_t dec
= 0x0c;
2268 for (shift
= 4; (shift
< 64) && bin
; shift
+= 4) {
2269 int current_number
= bin
% 10;
2271 dec
|= (current_number
) << shift
;
2278 void HELPER(unpk
)(uint32_t len
, uint64_t dest
, uint64_t src
)
2280 int len_dest
= len
>> 4;
2281 int len_src
= len
& 0xf;
2283 int second_nibble
= 0;
2288 /* last byte is special, it only flips the nibbles */
2290 stb(dest
, (b
<< 4) | (b
>> 4));
2294 /* now pad every nibble with 0xf0 */
2296 while (len_dest
> 0) {
2297 uint8_t cur_byte
= 0;
2300 cur_byte
= ldub(src
);
2306 /* only advance one nibble at a time */
2307 if (second_nibble
) {
2312 second_nibble
= !second_nibble
;
2315 cur_byte
= (cur_byte
& 0xf);
2319 stb(dest
, cur_byte
);
2323 void HELPER(tr
)(uint32_t len
, uint64_t array
, uint64_t trans
)
2327 for (i
= 0; i
<= len
; i
++) {
2328 uint8_t byte
= ldub(array
+ i
);
2329 uint8_t new_byte
= ldub(trans
+ byte
);
2330 stb(array
+ i
, new_byte
);
2334 #ifndef CONFIG_USER_ONLY
2336 void HELPER(load_psw
)(uint64_t mask
, uint64_t addr
)
2338 load_psw(env
, mask
, addr
);
2342 static void program_interrupt(CPUState
*env
, uint32_t code
, int ilc
)
2344 qemu_log("program interrupt at %#" PRIx64
"\n", env
->psw
.addr
);
2346 if (kvm_enabled()) {
2348 kvm_s390_interrupt(env
, KVM_S390_PROGRAM_INT
, code
);
2351 env
->int_pgm_code
= code
;
2352 env
->int_pgm_ilc
= ilc
;
2353 env
->exception_index
= EXCP_PGM
;
2358 static void ext_interrupt(CPUState
*env
, int type
, uint32_t param
,
2361 cpu_inject_ext(env
, type
, param
, param64
);
2364 int sclp_service_call(CPUState
*env
, uint32_t sccb
, uint64_t code
)
2370 printf("sclp(0x%x, 0x%" PRIx64
")\n", sccb
, code
);
2373 if (sccb
& ~0x7ffffff8ul
) {
2374 fprintf(stderr
, "KVM: invalid sccb address 0x%x\n", sccb
);
2380 case SCLP_CMDW_READ_SCP_INFO
:
2381 case SCLP_CMDW_READ_SCP_INFO_FORCED
:
2382 while ((ram_size
>> (20 + shift
)) > 65535) {
2385 stw_phys(sccb
+ SCP_MEM_CODE
, ram_size
>> (20 + shift
));
2386 stb_phys(sccb
+ SCP_INCREMENT
, 1 << shift
);
2387 stw_phys(sccb
+ SCP_RESPONSE_CODE
, 0x10);
2389 if (kvm_enabled()) {
2391 kvm_s390_interrupt_internal(env
, KVM_S390_INT_SERVICE
,
2396 ext_interrupt(env
, EXT_SERVICE
, sccb
& ~3, 0);
2401 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64
"x\n", sccb
, code
);
2411 /* SCLP service call */
2412 uint32_t HELPER(servc
)(uint32_t r1
, uint64_t r2
)
2414 if (sclp_service_call(env
, r1
, r2
)) {
2422 uint64_t HELPER(diag
)(uint32_t num
, uint64_t mem
, uint64_t code
)
2429 r
= s390_virtio_hypercall(env
, mem
, code
);
2445 program_interrupt(env
, PGM_OPERATION
, ILC_LATER_INC
);
2452 void HELPER(stidp
)(uint64_t a1
)
2454 stq(a1
, env
->cpu_num
);
2458 void HELPER(spx
)(uint64_t a1
)
2463 env
->psa
= prefix
& 0xfffff000;
2464 qemu_log("prefix: %#x\n", prefix
);
2465 tlb_flush_page(env
, 0);
2466 tlb_flush_page(env
, TARGET_PAGE_SIZE
);
2470 uint32_t HELPER(sck
)(uint64_t a1
)
2472 /* XXX not implemented - is it necessary? */
2477 static inline uint64_t clock_value(CPUState
*env
)
2481 time
= env
->tod_offset
+
2482 time2tod(qemu_get_clock_ns(vm_clock
) - env
->tod_basetime
);
2488 uint32_t HELPER(stck
)(uint64_t a1
)
2490 stq(a1
, clock_value(env
));
2495 /* Store Clock Extended */
2496 uint32_t HELPER(stcke
)(uint64_t a1
)
2499 /* basically the same value as stck */
2500 stq(a1
+ 1, clock_value(env
) | env
->cpu_num
);
2501 /* more fine grained than stck */
2503 /* XXX programmable fields */
2510 /* Set Clock Comparator */
2511 void HELPER(sckc
)(uint64_t a1
)
2513 uint64_t time
= ldq(a1
);
2515 if (time
== -1ULL) {
2519 /* difference between now and then */
2520 time
-= clock_value(env
);
2522 time
= (time
* 125) >> 9;
2524 qemu_mod_timer(env
->tod_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2527 /* Store Clock Comparator */
2528 void HELPER(stckc
)(uint64_t a1
)
2535 void HELPER(spt
)(uint64_t a1
)
2537 uint64_t time
= ldq(a1
);
2539 if (time
== -1ULL) {
2544 time
= (time
* 125) >> 9;
2546 qemu_mod_timer(env
->cpu_timer
, qemu_get_clock_ns(vm_clock
) + time
);
2549 /* Store CPU Timer */
2550 void HELPER(stpt
)(uint64_t a1
)
2556 /* Store System Information */
2557 uint32_t HELPER(stsi
)(uint64_t a0
, uint32_t r0
, uint32_t r1
)
2562 if ((r0
& STSI_LEVEL_MASK
) <= STSI_LEVEL_3
&&
2563 ((r0
& STSI_R0_RESERVED_MASK
) || (r1
& STSI_R1_RESERVED_MASK
))) {
2564 /* valid function code, invalid reserved bits */
2565 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2568 sel1
= r0
& STSI_R0_SEL1_MASK
;
2569 sel2
= r1
& STSI_R1_SEL2_MASK
;
2571 /* XXX: spec exception if sysib is not 4k-aligned */
2573 switch (r0
& STSI_LEVEL_MASK
) {
2575 if ((sel1
== 1) && (sel2
== 1)) {
2576 /* Basic Machine Configuration */
2577 struct sysib_111 sysib
;
2579 memset(&sysib
, 0, sizeof(sysib
));
2580 ebcdic_put(sysib
.manuf
, "QEMU ", 16);
2581 /* same as machine type number in STORE CPU ID */
2582 ebcdic_put(sysib
.type
, "QEMU", 4);
2583 /* same as model number in STORE CPU ID */
2584 ebcdic_put(sysib
.model
, "QEMU ", 16);
2585 ebcdic_put(sysib
.sequence
, "QEMU ", 16);
2586 ebcdic_put(sysib
.plant
, "QEMU", 4);
2587 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2588 } else if ((sel1
== 2) && (sel2
== 1)) {
2589 /* Basic Machine CPU */
2590 struct sysib_121 sysib
;
2592 memset(&sysib
, 0, sizeof(sysib
));
2593 /* XXX make different for different CPUs? */
2594 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2595 ebcdic_put(sysib
.plant
, "QEMU", 4);
2596 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2597 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2598 } else if ((sel1
== 2) && (sel2
== 2)) {
2599 /* Basic Machine CPUs */
2600 struct sysib_122 sysib
;
2602 memset(&sysib
, 0, sizeof(sysib
));
2603 stl_p(&sysib
.capability
, 0x443afc29);
2604 /* XXX change when SMP comes */
2605 stw_p(&sysib
.total_cpus
, 1);
2606 stw_p(&sysib
.active_cpus
, 1);
2607 stw_p(&sysib
.standby_cpus
, 0);
2608 stw_p(&sysib
.reserved_cpus
, 0);
2609 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2616 if ((sel1
== 2) && (sel2
== 1)) {
2618 struct sysib_221 sysib
;
2620 memset(&sysib
, 0, sizeof(sysib
));
2621 /* XXX make different for different CPUs? */
2622 ebcdic_put(sysib
.sequence
, "QEMUQEMUQEMUQEMU", 16);
2623 ebcdic_put(sysib
.plant
, "QEMU", 4);
2624 stw_p(&sysib
.cpu_addr
, env
->cpu_num
);
2625 stw_p(&sysib
.cpu_id
, 0);
2626 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2627 } else if ((sel1
== 2) && (sel2
== 2)) {
2629 struct sysib_222 sysib
;
2631 memset(&sysib
, 0, sizeof(sysib
));
2632 stw_p(&sysib
.lpar_num
, 0);
2634 /* XXX change when SMP comes */
2635 stw_p(&sysib
.total_cpus
, 1);
2636 stw_p(&sysib
.conf_cpus
, 1);
2637 stw_p(&sysib
.standby_cpus
, 0);
2638 stw_p(&sysib
.reserved_cpus
, 0);
2639 ebcdic_put(sysib
.name
, "QEMU ", 8);
2640 stl_p(&sysib
.caf
, 1000);
2641 stw_p(&sysib
.dedicated_cpus
, 0);
2642 stw_p(&sysib
.shared_cpus
, 0);
2643 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2651 if ((sel1
== 2) && (sel2
== 2)) {
2653 struct sysib_322 sysib
;
2655 memset(&sysib
, 0, sizeof(sysib
));
2657 /* XXX change when SMP comes */
2658 stw_p(&sysib
.vm
[0].total_cpus
, 1);
2659 stw_p(&sysib
.vm
[0].conf_cpus
, 1);
2660 stw_p(&sysib
.vm
[0].standby_cpus
, 0);
2661 stw_p(&sysib
.vm
[0].reserved_cpus
, 0);
2662 ebcdic_put(sysib
.vm
[0].name
, "KVMguest", 8);
2663 stl_p(&sysib
.vm
[0].caf
, 1000);
2664 ebcdic_put(sysib
.vm
[0].cpi
, "KVM/Linux ", 16);
2665 cpu_physical_memory_rw(a0
, (uint8_t*)&sysib
, sizeof(sysib
), 1);
2671 case STSI_LEVEL_CURRENT
:
2672 env
->regs
[0] = STSI_LEVEL_3
;
2682 void HELPER(lctlg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2687 for (i
= r1
;; i
= (i
+ 1) % 16) {
2688 env
->cregs
[i
] = ldq(src
);
2689 HELPER_LOG("load ctl %d from 0x%" PRIx64
" == 0x%" PRIx64
"\n",
2690 i
, src
, env
->cregs
[i
]);
2691 src
+= sizeof(uint64_t);
2701 void HELPER(lctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2706 for (i
= r1
;; i
= (i
+ 1) % 16) {
2707 env
->cregs
[i
] = (env
->cregs
[i
] & 0xFFFFFFFF00000000ULL
) | ldl(src
);
2708 src
+= sizeof(uint32_t);
2718 void HELPER(stctg
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2723 for (i
= r1
;; i
= (i
+ 1) % 16) {
2724 stq(dest
, env
->cregs
[i
]);
2725 dest
+= sizeof(uint64_t);
2733 void HELPER(stctl
)(uint32_t r1
, uint64_t a2
, uint32_t r3
)
2738 for (i
= r1
;; i
= (i
+ 1) % 16) {
2739 stl(dest
, env
->cregs
[i
]);
2740 dest
+= sizeof(uint32_t);
2748 uint32_t HELPER(tprot
)(uint64_t a1
, uint64_t a2
)
2755 /* insert storage key extended */
2756 uint64_t HELPER(iske
)(uint64_t r2
)
2758 uint64_t addr
= get_address(0, 0, r2
);
2760 if (addr
> ram_size
) {
2764 /* XXX maybe use qemu's internal keys? */
2765 return env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
];
2768 /* set storage key extended */
2769 void HELPER(sske
)(uint32_t r1
, uint64_t r2
)
2771 uint64_t addr
= get_address(0, 0, r2
);
2773 if (addr
> ram_size
) {
2777 env
->storage_keys
[addr
/ TARGET_PAGE_SIZE
] = r1
;
2780 /* reset reference bit extended */
2781 uint32_t HELPER(rrbe
)(uint32_t r1
, uint64_t r2
)
2783 if (r2
> ram_size
) {
2789 env
->storage_keys
[r2
/ TARGET_PAGE_SIZE
] &= ~SK_REFERENCED
;
2795 * 0 Reference bit zero; change bit zero
2796 * 1 Reference bit zero; change bit one
2797 * 2 Reference bit one; change bit zero
2798 * 3 Reference bit one; change bit one
2803 /* compare and swap and purge */
2804 uint32_t HELPER(csp
)(uint32_t r1
, uint32_t r2
)
2807 uint32_t o1
= env
->regs
[r1
];
2808 uint64_t a2
= get_address_31fix(r2
) & ~3ULL;
2809 uint32_t o2
= ldl(a2
);
2812 stl(a2
, env
->regs
[(r1
+ 1) & 15]);
2813 if (env
->regs
[r2
] & 0x3) {
2814 /* flush TLB / ALB */
2819 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | o2
;
2826 static uint32_t mvc_asc(int64_t l
, uint64_t a1
, uint64_t mode1
, uint64_t a2
,
2829 target_ulong src
, dest
;
2830 int flags
, cc
= 0, i
;
2834 } else if (l
> 256) {
2840 if (mmu_translate(env
, a1
& TARGET_PAGE_MASK
, 1, mode1
, &dest
, &flags
)) {
2843 dest
|= a1
& ~TARGET_PAGE_MASK
;
2845 if (mmu_translate(env
, a2
& TARGET_PAGE_MASK
, 0, mode2
, &src
, &flags
)) {
2848 src
|= a2
& ~TARGET_PAGE_MASK
;
2850 /* XXX replace w/ memcpy */
2851 for (i
= 0; i
< l
; i
++) {
2852 /* XXX be more clever */
2853 if ((((dest
+ i
) & TARGET_PAGE_MASK
) != (dest
& TARGET_PAGE_MASK
)) ||
2854 (((src
+ i
) & TARGET_PAGE_MASK
) != (src
& TARGET_PAGE_MASK
))) {
2855 mvc_asc(l
- i
, a1
+ i
, mode1
, a2
+ i
, mode2
);
2858 stb_phys(dest
+ i
, ldub_phys(src
+ i
));
2864 uint32_t HELPER(mvcs
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2866 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2867 __FUNCTION__
, l
, a1
, a2
);
2869 return mvc_asc(l
, a1
, PSW_ASC_SECONDARY
, a2
, PSW_ASC_PRIMARY
);
2872 uint32_t HELPER(mvcp
)(uint64_t l
, uint64_t a1
, uint64_t a2
)
2874 HELPER_LOG("%s: %16" PRIx64
" %16" PRIx64
" %16" PRIx64
"\n",
2875 __FUNCTION__
, l
, a1
, a2
);
2877 return mvc_asc(l
, a1
, PSW_ASC_PRIMARY
, a2
, PSW_ASC_SECONDARY
);
2880 uint32_t HELPER(sigp
)(uint64_t order_code
, uint32_t r1
, uint64_t cpu_addr
)
2884 HELPER_LOG("%s: %016" PRIx64
" %08x %016" PRIx64
"\n",
2885 __FUNCTION__
, order_code
, r1
, cpu_addr
);
2887 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2888 as parameter (input). Status (output) is always R1. */
2890 switch (order_code
) {
2895 /* enumerate CPU status */
2897 /* XXX implement when SMP comes */
2900 env
->regs
[r1
] &= 0xffffffff00000000ULL
;
2905 fprintf(stderr
, "XXX unknown sigp: 0x%" PRIx64
"\n", order_code
);
2912 void HELPER(sacf
)(uint64_t a1
)
2914 HELPER_LOG("%s: %16" PRIx64
"\n", __FUNCTION__
, a1
);
2916 switch (a1
& 0xf00) {
2918 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2919 env
->psw
.mask
|= PSW_ASC_PRIMARY
;
2922 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2923 env
->psw
.mask
|= PSW_ASC_SECONDARY
;
2926 env
->psw
.mask
&= ~PSW_MASK_ASC
;
2927 env
->psw
.mask
|= PSW_ASC_HOME
;
2930 qemu_log("unknown sacf mode: %" PRIx64
"\n", a1
);
2931 program_interrupt(env
, PGM_SPECIFICATION
, 2);
2936 /* invalidate pte */
2937 void HELPER(ipte
)(uint64_t pte_addr
, uint64_t vaddr
)
2939 uint64_t page
= vaddr
& TARGET_PAGE_MASK
;
2942 /* XXX broadcast to other CPUs */
2944 /* XXX Linux is nice enough to give us the exact pte address.
2945 According to spec we'd have to find it out ourselves */
2946 /* XXX Linux is fine with overwriting the pte, the spec requires
2947 us to only set the invalid bit */
2948 stq_phys(pte_addr
, pte
| _PAGE_INVALID
);
2950 /* XXX we exploit the fact that Linux passes the exact virtual
2951 address here - it's not obliged to! */
2952 tlb_flush_page(env
, page
);
2955 /* flush local tlb */
2956 void HELPER(ptlb
)(void)
2961 /* store using real address */
2962 void HELPER(stura
)(uint64_t addr
, uint32_t v1
)
2964 stw_phys(get_address(0, 0, addr
), v1
);
2967 /* load real address */
2968 uint32_t HELPER(lra
)(uint64_t addr
, uint32_t r1
)
2971 int old_exc
= env
->exception_index
;
2972 uint64_t asc
= env
->psw
.mask
& PSW_MASK_ASC
;
2976 /* XXX incomplete - has more corner cases */
2977 if (!(env
->psw
.mask
& PSW_MASK_64
) && (addr
>> 32)) {
2978 program_interrupt(env
, PGM_SPECIAL_OP
, 2);
2981 env
->exception_index
= old_exc
;
2982 if (mmu_translate(env
, addr
, 0, asc
, &ret
, &flags
)) {
2985 if (env
->exception_index
== EXCP_PGM
) {
2986 ret
= env
->int_pgm_code
| 0x80000000;
2988 ret
|= addr
& ~TARGET_PAGE_MASK
;
2990 env
->exception_index
= old_exc
;
2992 if (!(env
->psw
.mask
& PSW_MASK_64
)) {
2993 env
->regs
[r1
] = (env
->regs
[r1
] & 0xffffffff00000000ULL
) | (ret
& 0xffffffffULL
);
2995 env
->regs
[r1
] = ret
;