]> git.proxmox.com Git - qemu.git/blob - target-s390x/op_helper.c
s390x: add ldeb instruction
[qemu.git] / target-s390x / op_helper.c
1 /*
2 * S/390 helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "dyngen-exec.h"
23 #include "host-utils.h"
24 #include "helpers.h"
25 #include <string.h>
26 #include "kvm.h"
27 #include "qemu-timer.h"
28 #ifdef CONFIG_KVM
29 #include <linux/kvm.h>
30 #endif
31
32 /*****************************************************************************/
33 /* Softmmu support */
34 #if !defined (CONFIG_USER_ONLY)
35 #include "softmmu_exec.h"
36
37 #define MMUSUFFIX _mmu
38
39 #define SHIFT 0
40 #include "softmmu_template.h"
41
42 #define SHIFT 1
43 #include "softmmu_template.h"
44
45 #define SHIFT 2
46 #include "softmmu_template.h"
47
48 #define SHIFT 3
49 #include "softmmu_template.h"
50
51 /* try to fill the TLB and return an exception if error. If retaddr is
52 NULL, it means that the function was called in C code (i.e. not
53 from generated code or from helper.c) */
54 /* XXX: fix it to restore all registers */
55 void tlb_fill(CPUState *env1, target_ulong addr, int is_write, int mmu_idx,
56 void *retaddr)
57 {
58 TranslationBlock *tb;
59 CPUState *saved_env;
60 unsigned long pc;
61 int ret;
62
63 saved_env = env;
64 env = env1;
65 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx);
66 if (unlikely(ret != 0)) {
67 if (likely(retaddr)) {
68 /* now we have a real cpu fault */
69 pc = (unsigned long)retaddr;
70 tb = tb_find_pc(pc);
71 if (likely(tb)) {
72 /* the PC is inside the translated code. It means that we have
73 a virtual CPU fault */
74 cpu_restore_state(tb, env, pc);
75 }
76 }
77 cpu_loop_exit(env);
78 }
79 env = saved_env;
80 }
81
82 #endif
83
84 /* #define DEBUG_HELPER */
85 #ifdef DEBUG_HELPER
86 #define HELPER_LOG(x...) qemu_log(x)
87 #else
88 #define HELPER_LOG(x...)
89 #endif
90
91 /* raise an exception */
92 void HELPER(exception)(uint32_t excp)
93 {
94 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
95 env->exception_index = excp;
96 cpu_loop_exit(env);
97 }
98
99 #ifndef CONFIG_USER_ONLY
100 static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
101 uint8_t byte)
102 {
103 target_phys_addr_t dest_phys;
104 target_phys_addr_t len = l;
105 void *dest_p;
106 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
107 int flags;
108
109 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
110 stb(dest, byte);
111 cpu_abort(env, "should never reach here");
112 }
113 dest_phys |= dest & ~TARGET_PAGE_MASK;
114
115 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
116
117 memset(dest_p, byte, len);
118
119 cpu_physical_memory_unmap(dest_p, 1, len, len);
120 }
121
122 static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
123 uint64_t src)
124 {
125 target_phys_addr_t dest_phys;
126 target_phys_addr_t src_phys;
127 target_phys_addr_t len = l;
128 void *dest_p;
129 void *src_p;
130 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
131 int flags;
132
133 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
134 stb(dest, 0);
135 cpu_abort(env, "should never reach here");
136 }
137 dest_phys |= dest & ~TARGET_PAGE_MASK;
138
139 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
140 ldub(src);
141 cpu_abort(env, "should never reach here");
142 }
143 src_phys |= src & ~TARGET_PAGE_MASK;
144
145 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
146 src_p = cpu_physical_memory_map(src_phys, &len, 0);
147
148 memmove(dest_p, src_p, len);
149
150 cpu_physical_memory_unmap(dest_p, 1, len, len);
151 cpu_physical_memory_unmap(src_p, 0, len, len);
152 }
153 #endif
154
155 /* and on array */
156 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
157 {
158 int i;
159 unsigned char x;
160 uint32_t cc = 0;
161
162 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
163 __FUNCTION__, l, dest, src);
164 for (i = 0; i <= l; i++) {
165 x = ldub(dest + i) & ldub(src + i);
166 if (x) {
167 cc = 1;
168 }
169 stb(dest + i, x);
170 }
171 return cc;
172 }
173
174 /* xor on array */
175 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
176 {
177 int i;
178 unsigned char x;
179 uint32_t cc = 0;
180
181 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
182 __FUNCTION__, l, dest, src);
183
184 #ifndef CONFIG_USER_ONLY
185 /* xor with itself is the same as memset(0) */
186 if ((l > 32) && (src == dest) &&
187 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
188 mvc_fast_memset(env, l + 1, dest, 0);
189 return 0;
190 }
191 #else
192 if (src == dest) {
193 memset(g2h(dest), 0, l + 1);
194 return 0;
195 }
196 #endif
197
198 for (i = 0; i <= l; i++) {
199 x = ldub(dest + i) ^ ldub(src + i);
200 if (x) {
201 cc = 1;
202 }
203 stb(dest + i, x);
204 }
205 return cc;
206 }
207
208 /* or on array */
209 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
210 {
211 int i;
212 unsigned char x;
213 uint32_t cc = 0;
214
215 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
216 __FUNCTION__, l, dest, src);
217 for (i = 0; i <= l; i++) {
218 x = ldub(dest + i) | ldub(src + i);
219 if (x) {
220 cc = 1;
221 }
222 stb(dest + i, x);
223 }
224 return cc;
225 }
226
227 /* memmove */
228 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
229 {
230 int i = 0;
231 int x = 0;
232 uint32_t l_64 = (l + 1) / 8;
233
234 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
235 __FUNCTION__, l, dest, src);
236
237 #ifndef CONFIG_USER_ONLY
238 if ((l > 32) &&
239 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
240 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
241 if (dest == (src + 1)) {
242 mvc_fast_memset(env, l + 1, dest, ldub(src));
243 return;
244 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
245 mvc_fast_memmove(env, l + 1, dest, src);
246 return;
247 }
248 }
249 #else
250 if (dest == (src + 1)) {
251 memset(g2h(dest), ldub(src), l + 1);
252 return;
253 } else {
254 memmove(g2h(dest), g2h(src), l + 1);
255 return;
256 }
257 #endif
258
259 /* handle the parts that fit into 8-byte loads/stores */
260 if (dest != (src + 1)) {
261 for (i = 0; i < l_64; i++) {
262 stq(dest + x, ldq(src + x));
263 x += 8;
264 }
265 }
266
267 /* slow version crossing pages with byte accesses */
268 for (i = x; i <= l; i++) {
269 stb(dest + i, ldub(src + i));
270 }
271 }
272
273 /* compare unsigned byte arrays */
274 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
275 {
276 int i;
277 unsigned char x,y;
278 uint32_t cc;
279 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
280 __FUNCTION__, l, s1, s2);
281 for (i = 0; i <= l; i++) {
282 x = ldub(s1 + i);
283 y = ldub(s2 + i);
284 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
285 if (x < y) {
286 cc = 1;
287 goto done;
288 } else if (x > y) {
289 cc = 2;
290 goto done;
291 }
292 }
293 cc = 0;
294 done:
295 HELPER_LOG("\n");
296 return cc;
297 }
298
299 /* compare logical under mask */
300 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
301 {
302 uint8_t r,d;
303 uint32_t cc;
304 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
305 mask, addr);
306 cc = 0;
307 while (mask) {
308 if (mask & 8) {
309 d = ldub(addr);
310 r = (r1 & 0xff000000UL) >> 24;
311 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
312 addr);
313 if (r < d) {
314 cc = 1;
315 break;
316 } else if (r > d) {
317 cc = 2;
318 break;
319 }
320 addr++;
321 }
322 mask = (mask << 1) & 0xf;
323 r1 <<= 8;
324 }
325 HELPER_LOG("\n");
326 return cc;
327 }
328
329 /* store character under mask */
330 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
331 {
332 uint8_t r;
333 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
334 addr);
335 while (mask) {
336 if (mask & 8) {
337 r = (r1 & 0xff000000UL) >> 24;
338 stb(addr, r);
339 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
340 addr++;
341 }
342 mask = (mask << 1) & 0xf;
343 r1 <<= 8;
344 }
345 HELPER_LOG("\n");
346 }
347
348 /* 64/64 -> 128 unsigned multiplication */
349 void HELPER(mlg)(uint32_t r1, uint64_t v2)
350 {
351 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
352 /* assuming 64-bit hosts have __uint128_t */
353 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
354 res *= (__uint128_t)v2;
355 env->regs[r1] = (uint64_t)(res >> 64);
356 env->regs[r1 + 1] = (uint64_t)res;
357 #else
358 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
359 #endif
360 }
361
362 /* 128 -> 64/64 unsigned division */
363 void HELPER(dlg)(uint32_t r1, uint64_t v2)
364 {
365 uint64_t divisor = v2;
366
367 if (!env->regs[r1]) {
368 /* 64 -> 64/64 case */
369 env->regs[r1] = env->regs[r1+1] % divisor;
370 env->regs[r1+1] = env->regs[r1+1] / divisor;
371 return;
372 } else {
373
374 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
375 /* assuming 64-bit hosts have __uint128_t */
376 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
377 (env->regs[r1+1]);
378 __uint128_t quotient = dividend / divisor;
379 env->regs[r1+1] = quotient;
380 __uint128_t remainder = dividend % divisor;
381 env->regs[r1] = remainder;
382 #else
383 /* 32-bit hosts would need special wrapper functionality - just abort if
384 we encounter such a case; it's very unlikely anyways. */
385 cpu_abort(env, "128 -> 64/64 division not implemented\n");
386 #endif
387 }
388 }
389
390 static inline uint64_t get_address(int x2, int b2, int d2)
391 {
392 uint64_t r = d2;
393
394 if (x2) {
395 r += env->regs[x2];
396 }
397
398 if (b2) {
399 r += env->regs[b2];
400 }
401
402 /* 31-Bit mode */
403 if (!(env->psw.mask & PSW_MASK_64)) {
404 r &= 0x7fffffff;
405 }
406
407 return r;
408 }
409
410 static inline uint64_t get_address_31fix(int reg)
411 {
412 uint64_t r = env->regs[reg];
413
414 /* 31-Bit mode */
415 if (!(env->psw.mask & PSW_MASK_64)) {
416 r &= 0x7fffffff;
417 }
418
419 return r;
420 }
421
422 /* search string (c is byte to search, r2 is string, r1 end of string) */
423 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
424 {
425 uint64_t i;
426 uint32_t cc = 2;
427 uint64_t str = get_address_31fix(r2);
428 uint64_t end = get_address_31fix(r1);
429
430 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
431 c, env->regs[r1], env->regs[r2]);
432
433 for (i = str; i != end; i++) {
434 if (ldub(i) == c) {
435 env->regs[r1] = i;
436 cc = 1;
437 break;
438 }
439 }
440
441 return cc;
442 }
443
444 /* unsigned string compare (c is string terminator) */
445 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
446 {
447 uint64_t s1 = get_address_31fix(r1);
448 uint64_t s2 = get_address_31fix(r2);
449 uint8_t v1, v2;
450 uint32_t cc;
451 c = c & 0xff;
452 #ifdef CONFIG_USER_ONLY
453 if (!c) {
454 HELPER_LOG("%s: comparing '%s' and '%s'\n",
455 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
456 }
457 #endif
458 for (;;) {
459 v1 = ldub(s1);
460 v2 = ldub(s2);
461 if ((v1 == c || v2 == c) || (v1 != v2)) {
462 break;
463 }
464 s1++;
465 s2++;
466 }
467
468 if (v1 == v2) {
469 cc = 0;
470 } else {
471 cc = (v1 < v2) ? 1 : 2;
472 /* FIXME: 31-bit mode! */
473 env->regs[r1] = s1;
474 env->regs[r2] = s2;
475 }
476 return cc;
477 }
478
479 /* move page */
480 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
481 {
482 /* XXX missing r0 handling */
483 #ifdef CONFIG_USER_ONLY
484 int i;
485
486 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
487 stb(r1 + i, ldub(r2 + i));
488 }
489 #else
490 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
491 #endif
492 }
493
494 /* string copy (c is string terminator) */
495 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
496 {
497 uint64_t dest = get_address_31fix(r1);
498 uint64_t src = get_address_31fix(r2);
499 uint8_t v;
500 c = c & 0xff;
501 #ifdef CONFIG_USER_ONLY
502 if (!c) {
503 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
504 dest);
505 }
506 #endif
507 for (;;) {
508 v = ldub(src);
509 stb(dest, v);
510 if (v == c) {
511 break;
512 }
513 src++;
514 dest++;
515 }
516 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
517 }
518
519 /* compare and swap 64-bit */
520 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
521 {
522 /* FIXME: locking? */
523 uint32_t cc;
524 uint64_t v2 = ldq(a2);
525 if (env->regs[r1] == v2) {
526 cc = 0;
527 stq(a2, env->regs[r3]);
528 } else {
529 cc = 1;
530 env->regs[r1] = v2;
531 }
532 return cc;
533 }
534
535 /* compare double and swap 64-bit */
536 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
537 {
538 /* FIXME: locking? */
539 uint32_t cc;
540 uint64_t v2_hi = ldq(a2);
541 uint64_t v2_lo = ldq(a2 + 8);
542 uint64_t v1_hi = env->regs[r1];
543 uint64_t v1_lo = env->regs[r1 + 1];
544
545 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
546 cc = 0;
547 stq(a2, env->regs[r3]);
548 stq(a2 + 8, env->regs[r3 + 1]);
549 } else {
550 cc = 1;
551 env->regs[r1] = v2_hi;
552 env->regs[r1 + 1] = v2_lo;
553 }
554
555 return cc;
556 }
557
558 /* compare and swap 32-bit */
559 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
560 {
561 /* FIXME: locking? */
562 uint32_t cc;
563 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
564 uint32_t v2 = ldl(a2);
565 if (((uint32_t)env->regs[r1]) == v2) {
566 cc = 0;
567 stl(a2, (uint32_t)env->regs[r3]);
568 } else {
569 cc = 1;
570 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
571 }
572 return cc;
573 }
574
575 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
576 {
577 int pos = 24; /* top of the lower half of r1 */
578 uint64_t rmask = 0xff000000ULL;
579 uint8_t val = 0;
580 int ccd = 0;
581 uint32_t cc = 0;
582
583 while (mask) {
584 if (mask & 8) {
585 env->regs[r1] &= ~rmask;
586 val = ldub(address);
587 if ((val & 0x80) && !ccd) {
588 cc = 1;
589 }
590 ccd = 1;
591 if (val && cc == 0) {
592 cc = 2;
593 }
594 env->regs[r1] |= (uint64_t)val << pos;
595 address++;
596 }
597 mask = (mask << 1) & 0xf;
598 pos -= 8;
599 rmask >>= 8;
600 }
601
602 return cc;
603 }
604
605 /* execute instruction
606 this instruction executes an insn modified with the contents of r1
607 it does not change the executed instruction in memory
608 it does not change the program counter
609 in other words: tricky...
610 currently implemented by interpreting the cases it is most commonly used in
611 */
612 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
613 {
614 uint16_t insn = lduw_code(addr);
615 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
616 insn);
617 if ((insn & 0xf0ff) == 0xd000) {
618 uint32_t l, insn2, b1, b2, d1, d2;
619 l = v1 & 0xff;
620 insn2 = ldl_code(addr + 2);
621 b1 = (insn2 >> 28) & 0xf;
622 b2 = (insn2 >> 12) & 0xf;
623 d1 = (insn2 >> 16) & 0xfff;
624 d2 = insn2 & 0xfff;
625 switch (insn & 0xf00) {
626 case 0x200:
627 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
628 break;
629 case 0x500:
630 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
631 break;
632 case 0x700:
633 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
634 break;
635 default:
636 goto abort;
637 break;
638 }
639 } else if ((insn & 0xff00) == 0x0a00) {
640 /* supervisor call */
641 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
642 env->psw.addr = ret - 4;
643 env->int_svc_code = (insn|v1) & 0xff;
644 env->int_svc_ilc = 4;
645 helper_exception(EXCP_SVC);
646 } else if ((insn & 0xff00) == 0xbf00) {
647 uint32_t insn2, r1, r3, b2, d2;
648 insn2 = ldl_code(addr + 2);
649 r1 = (insn2 >> 20) & 0xf;
650 r3 = (insn2 >> 16) & 0xf;
651 b2 = (insn2 >> 12) & 0xf;
652 d2 = insn2 & 0xfff;
653 cc = helper_icm(r1, get_address(0, b2, d2), r3);
654 } else {
655 abort:
656 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
657 insn);
658 }
659 return cc;
660 }
661
662 /* absolute value 32-bit */
663 uint32_t HELPER(abs_i32)(int32_t val)
664 {
665 if (val < 0) {
666 return -val;
667 } else {
668 return val;
669 }
670 }
671
672 /* negative absolute value 32-bit */
673 int32_t HELPER(nabs_i32)(int32_t val)
674 {
675 if (val < 0) {
676 return val;
677 } else {
678 return -val;
679 }
680 }
681
682 /* absolute value 64-bit */
683 uint64_t HELPER(abs_i64)(int64_t val)
684 {
685 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
686
687 if (val < 0) {
688 return -val;
689 } else {
690 return val;
691 }
692 }
693
694 /* negative absolute value 64-bit */
695 int64_t HELPER(nabs_i64)(int64_t val)
696 {
697 if (val < 0) {
698 return val;
699 } else {
700 return -val;
701 }
702 }
703
704 /* add with carry 32-bit unsigned */
705 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
706 {
707 uint32_t res;
708
709 res = v1 + v2;
710 if (cc & 2) {
711 res++;
712 }
713
714 return res;
715 }
716
717 /* store character under mask high operates on the upper half of r1 */
718 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
719 {
720 int pos = 56; /* top of the upper half of r1 */
721
722 while (mask) {
723 if (mask & 8) {
724 stb(address, (env->regs[r1] >> pos) & 0xff);
725 address++;
726 }
727 mask = (mask << 1) & 0xf;
728 pos -= 8;
729 }
730 }
731
732 /* insert character under mask high; same as icm, but operates on the
733 upper half of r1 */
734 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
735 {
736 int pos = 56; /* top of the upper half of r1 */
737 uint64_t rmask = 0xff00000000000000ULL;
738 uint8_t val = 0;
739 int ccd = 0;
740 uint32_t cc = 0;
741
742 while (mask) {
743 if (mask & 8) {
744 env->regs[r1] &= ~rmask;
745 val = ldub(address);
746 if ((val & 0x80) && !ccd) {
747 cc = 1;
748 }
749 ccd = 1;
750 if (val && cc == 0) {
751 cc = 2;
752 }
753 env->regs[r1] |= (uint64_t)val << pos;
754 address++;
755 }
756 mask = (mask << 1) & 0xf;
757 pos -= 8;
758 rmask >>= 8;
759 }
760
761 return cc;
762 }
763
764 /* insert psw mask and condition code into r1 */
765 void HELPER(ipm)(uint32_t cc, uint32_t r1)
766 {
767 uint64_t r = env->regs[r1];
768
769 r &= 0xffffffff00ffffffULL;
770 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
771 env->regs[r1] = r;
772 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
773 cc, env->psw.mask, r);
774 }
775
776 /* load access registers r1 to r3 from memory at a2 */
777 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
778 {
779 int i;
780
781 for (i = r1;; i = (i + 1) % 16) {
782 env->aregs[i] = ldl(a2);
783 a2 += 4;
784
785 if (i == r3) {
786 break;
787 }
788 }
789 }
790
791 /* store access registers r1 to r3 in memory at a2 */
792 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
793 {
794 int i;
795
796 for (i = r1;; i = (i + 1) % 16) {
797 stl(a2, env->aregs[i]);
798 a2 += 4;
799
800 if (i == r3) {
801 break;
802 }
803 }
804 }
805
806 /* move long */
807 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
808 {
809 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
810 uint64_t dest = get_address_31fix(r1);
811 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
812 uint64_t src = get_address_31fix(r2);
813 uint8_t pad = src >> 24;
814 uint8_t v;
815 uint32_t cc;
816
817 if (destlen == srclen) {
818 cc = 0;
819 } else if (destlen < srclen) {
820 cc = 1;
821 } else {
822 cc = 2;
823 }
824
825 if (srclen > destlen) {
826 srclen = destlen;
827 }
828
829 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
830 v = ldub(src);
831 stb(dest, v);
832 }
833
834 for (; destlen; dest++, destlen--) {
835 stb(dest, pad);
836 }
837
838 env->regs[r1 + 1] = destlen;
839 /* can't use srclen here, we trunc'ed it */
840 env->regs[r2 + 1] -= src - env->regs[r2];
841 env->regs[r1] = dest;
842 env->regs[r2] = src;
843
844 return cc;
845 }
846
847 /* move long extended another memcopy insn with more bells and whistles */
848 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
849 {
850 uint64_t destlen = env->regs[r1 + 1];
851 uint64_t dest = env->regs[r1];
852 uint64_t srclen = env->regs[r3 + 1];
853 uint64_t src = env->regs[r3];
854 uint8_t pad = a2 & 0xff;
855 uint8_t v;
856 uint32_t cc;
857
858 if (!(env->psw.mask & PSW_MASK_64)) {
859 destlen = (uint32_t)destlen;
860 srclen = (uint32_t)srclen;
861 dest &= 0x7fffffff;
862 src &= 0x7fffffff;
863 }
864
865 if (destlen == srclen) {
866 cc = 0;
867 } else if (destlen < srclen) {
868 cc = 1;
869 } else {
870 cc = 2;
871 }
872
873 if (srclen > destlen) {
874 srclen = destlen;
875 }
876
877 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
878 v = ldub(src);
879 stb(dest, v);
880 }
881
882 for (; destlen; dest++, destlen--) {
883 stb(dest, pad);
884 }
885
886 env->regs[r1 + 1] = destlen;
887 /* can't use srclen here, we trunc'ed it */
888 /* FIXME: 31-bit mode! */
889 env->regs[r3 + 1] -= src - env->regs[r3];
890 env->regs[r1] = dest;
891 env->regs[r3] = src;
892
893 return cc;
894 }
895
896 /* compare logical long extended memcompare insn with padding */
897 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
898 {
899 uint64_t destlen = env->regs[r1 + 1];
900 uint64_t dest = get_address_31fix(r1);
901 uint64_t srclen = env->regs[r3 + 1];
902 uint64_t src = get_address_31fix(r3);
903 uint8_t pad = a2 & 0xff;
904 uint8_t v1 = 0,v2 = 0;
905 uint32_t cc = 0;
906
907 if (!(destlen || srclen)) {
908 return cc;
909 }
910
911 if (srclen > destlen) {
912 srclen = destlen;
913 }
914
915 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
916 v1 = srclen ? ldub(src) : pad;
917 v2 = destlen ? ldub(dest) : pad;
918 if (v1 != v2) {
919 cc = (v1 < v2) ? 1 : 2;
920 break;
921 }
922 }
923
924 env->regs[r1 + 1] = destlen;
925 /* can't use srclen here, we trunc'ed it */
926 env->regs[r3 + 1] -= src - env->regs[r3];
927 env->regs[r1] = dest;
928 env->regs[r3] = src;
929
930 return cc;
931 }
932
933 /* subtract unsigned v2 from v1 with borrow */
934 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
935 {
936 uint32_t v1 = env->regs[r1];
937 uint32_t res = v1 + (~v2) + (cc >> 1);
938
939 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
940 if (cc & 2) {
941 /* borrow */
942 return v1 ? 1 : 0;
943 } else {
944 return v1 ? 3 : 2;
945 }
946 }
947
948 /* subtract unsigned v2 from v1 with borrow */
949 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
950 {
951 uint64_t res = v1 + (~v2) + (cc >> 1);
952
953 env->regs[r1] = res;
954 if (cc & 2) {
955 /* borrow */
956 return v1 ? 1 : 0;
957 } else {
958 return v1 ? 3 : 2;
959 }
960 }
961
962 static inline int float_comp_to_cc(int float_compare)
963 {
964 switch (float_compare) {
965 case float_relation_equal:
966 return 0;
967 case float_relation_less:
968 return 1;
969 case float_relation_greater:
970 return 2;
971 case float_relation_unordered:
972 return 3;
973 default:
974 cpu_abort(env, "unknown return value for float compare\n");
975 }
976 }
977
978 /* condition codes for binary FP ops */
979 static uint32_t set_cc_f32(float32 v1, float32 v2)
980 {
981 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
982 }
983
984 static uint32_t set_cc_f64(float64 v1, float64 v2)
985 {
986 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
987 }
988
989 /* condition codes for unary FP ops */
990 static uint32_t set_cc_nz_f32(float32 v)
991 {
992 if (float32_is_any_nan(v)) {
993 return 3;
994 } else if (float32_is_zero(v)) {
995 return 0;
996 } else if (float32_is_neg(v)) {
997 return 1;
998 } else {
999 return 2;
1000 }
1001 }
1002
1003 static uint32_t set_cc_nz_f64(float64 v)
1004 {
1005 if (float64_is_any_nan(v)) {
1006 return 3;
1007 } else if (float64_is_zero(v)) {
1008 return 0;
1009 } else if (float64_is_neg(v)) {
1010 return 1;
1011 } else {
1012 return 2;
1013 }
1014 }
1015
1016 static uint32_t set_cc_nz_f128(float128 v)
1017 {
1018 if (float128_is_any_nan(v)) {
1019 return 3;
1020 } else if (float128_is_zero(v)) {
1021 return 0;
1022 } else if (float128_is_neg(v)) {
1023 return 1;
1024 } else {
1025 return 2;
1026 }
1027 }
1028
1029 /* convert 32-bit int to 64-bit float */
1030 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1031 {
1032 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1033 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1034 }
1035
1036 /* convert 32-bit int to 128-bit float */
1037 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1038 {
1039 CPU_QuadU v1;
1040 v1.q = int32_to_float128(v2, &env->fpu_status);
1041 env->fregs[f1].ll = v1.ll.upper;
1042 env->fregs[f1 + 2].ll = v1.ll.lower;
1043 }
1044
1045 /* convert 64-bit int to 32-bit float */
1046 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1047 {
1048 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1049 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1050 }
1051
1052 /* convert 64-bit int to 64-bit float */
1053 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1054 {
1055 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1056 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1057 }
1058
1059 /* convert 64-bit int to 128-bit float */
1060 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1061 {
1062 CPU_QuadU x1;
1063 x1.q = int64_to_float128(v2, &env->fpu_status);
1064 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1065 x1.ll.upper, x1.ll.lower);
1066 env->fregs[f1].ll = x1.ll.upper;
1067 env->fregs[f1 + 2].ll = x1.ll.lower;
1068 }
1069
1070 /* convert 32-bit int to 32-bit float */
1071 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1072 {
1073 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1074 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1075 env->fregs[f1].l.upper, f1);
1076 }
1077
1078 /* 32-bit FP addition RR */
1079 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1080 {
1081 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1082 env->fregs[f2].l.upper,
1083 &env->fpu_status);
1084 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1085 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1086
1087 return set_cc_nz_f32(env->fregs[f1].l.upper);
1088 }
1089
1090 /* 64-bit FP addition RR */
1091 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1092 {
1093 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1094 &env->fpu_status);
1095 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1096 env->fregs[f2].d, env->fregs[f1].d, f1);
1097
1098 return set_cc_nz_f64(env->fregs[f1].d);
1099 }
1100
1101 /* 32-bit FP subtraction RR */
1102 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1103 {
1104 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1105 env->fregs[f2].l.upper,
1106 &env->fpu_status);
1107 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1108 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1109
1110 return set_cc_nz_f32(env->fregs[f1].l.upper);
1111 }
1112
1113 /* 64-bit FP subtraction RR */
1114 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1115 {
1116 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1117 &env->fpu_status);
1118 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1119 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1120
1121 return set_cc_nz_f64(env->fregs[f1].d);
1122 }
1123
1124 /* 32-bit FP division RR */
1125 void HELPER(debr)(uint32_t f1, uint32_t f2)
1126 {
1127 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1128 env->fregs[f2].l.upper,
1129 &env->fpu_status);
1130 }
1131
1132 /* 128-bit FP division RR */
1133 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1134 {
1135 CPU_QuadU v1;
1136 v1.ll.upper = env->fregs[f1].ll;
1137 v1.ll.lower = env->fregs[f1 + 2].ll;
1138 CPU_QuadU v2;
1139 v2.ll.upper = env->fregs[f2].ll;
1140 v2.ll.lower = env->fregs[f2 + 2].ll;
1141 CPU_QuadU res;
1142 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1143 env->fregs[f1].ll = res.ll.upper;
1144 env->fregs[f1 + 2].ll = res.ll.lower;
1145 }
1146
1147 /* 64-bit FP multiplication RR */
1148 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1149 {
1150 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1151 &env->fpu_status);
1152 }
1153
1154 /* 128-bit FP multiplication RR */
1155 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1156 {
1157 CPU_QuadU v1;
1158 v1.ll.upper = env->fregs[f1].ll;
1159 v1.ll.lower = env->fregs[f1 + 2].ll;
1160 CPU_QuadU v2;
1161 v2.ll.upper = env->fregs[f2].ll;
1162 v2.ll.lower = env->fregs[f2 + 2].ll;
1163 CPU_QuadU res;
1164 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1165 env->fregs[f1].ll = res.ll.upper;
1166 env->fregs[f1 + 2].ll = res.ll.lower;
1167 }
1168
1169 /* convert 32-bit float to 64-bit float */
1170 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1171 {
1172 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1173 &env->fpu_status);
1174 }
1175
1176 /* convert 128-bit float to 64-bit float */
1177 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1178 {
1179 CPU_QuadU x2;
1180 x2.ll.upper = env->fregs[f2].ll;
1181 x2.ll.lower = env->fregs[f2 + 2].ll;
1182 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1183 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1184 }
1185
1186 /* convert 64-bit float to 128-bit float */
1187 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1188 {
1189 CPU_QuadU res;
1190 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1191 env->fregs[f1].ll = res.ll.upper;
1192 env->fregs[f1 + 2].ll = res.ll.lower;
1193 }
1194
1195 /* convert 64-bit float to 32-bit float */
1196 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1197 {
1198 float64 d2 = env->fregs[f2].d;
1199 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1200 }
1201
1202 /* convert 128-bit float to 32-bit float */
1203 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1204 {
1205 CPU_QuadU x2;
1206 x2.ll.upper = env->fregs[f2].ll;
1207 x2.ll.lower = env->fregs[f2 + 2].ll;
1208 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1209 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1210 }
1211
1212 /* absolute value of 32-bit float */
1213 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1214 {
1215 float32 v1;
1216 float32 v2 = env->fregs[f2].d;
1217 v1 = float32_abs(v2);
1218 env->fregs[f1].d = v1;
1219 return set_cc_nz_f32(v1);
1220 }
1221
1222 /* absolute value of 64-bit float */
1223 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1224 {
1225 float64 v1;
1226 float64 v2 = env->fregs[f2].d;
1227 v1 = float64_abs(v2);
1228 env->fregs[f1].d = v1;
1229 return set_cc_nz_f64(v1);
1230 }
1231
1232 /* absolute value of 128-bit float */
1233 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1234 {
1235 CPU_QuadU v1;
1236 CPU_QuadU v2;
1237 v2.ll.upper = env->fregs[f2].ll;
1238 v2.ll.lower = env->fregs[f2 + 2].ll;
1239 v1.q = float128_abs(v2.q);
1240 env->fregs[f1].ll = v1.ll.upper;
1241 env->fregs[f1 + 2].ll = v1.ll.lower;
1242 return set_cc_nz_f128(v1.q);
1243 }
1244
1245 /* load and test 64-bit float */
1246 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1247 {
1248 env->fregs[f1].d = env->fregs[f2].d;
1249 return set_cc_nz_f64(env->fregs[f1].d);
1250 }
1251
1252 /* load and test 32-bit float */
1253 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1254 {
1255 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1256 return set_cc_nz_f32(env->fregs[f1].l.upper);
1257 }
1258
1259 /* load and test 128-bit float */
1260 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1261 {
1262 CPU_QuadU x;
1263 x.ll.upper = env->fregs[f2].ll;
1264 x.ll.lower = env->fregs[f2 + 2].ll;
1265 env->fregs[f1].ll = x.ll.upper;
1266 env->fregs[f1 + 2].ll = x.ll.lower;
1267 return set_cc_nz_f128(x.q);
1268 }
1269
1270 /* load complement of 32-bit float */
1271 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1272 {
1273 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1274
1275 return set_cc_nz_f32(env->fregs[f1].l.upper);
1276 }
1277
1278 /* load complement of 64-bit float */
1279 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1280 {
1281 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1282
1283 return set_cc_nz_f64(env->fregs[f1].d);
1284 }
1285
1286 /* load complement of 128-bit float */
1287 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1288 {
1289 CPU_QuadU x1, x2;
1290 x2.ll.upper = env->fregs[f2].ll;
1291 x2.ll.lower = env->fregs[f2 + 2].ll;
1292 x1.q = float128_chs(x2.q);
1293 env->fregs[f1].ll = x1.ll.upper;
1294 env->fregs[f1 + 2].ll = x1.ll.lower;
1295 return set_cc_nz_f128(x1.q);
1296 }
1297
1298 /* 32-bit FP addition RM */
1299 void HELPER(aeb)(uint32_t f1, uint32_t val)
1300 {
1301 float32 v1 = env->fregs[f1].l.upper;
1302 CPU_FloatU v2;
1303 v2.l = val;
1304 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1305 v1, f1, v2.f);
1306 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1307 }
1308
1309 /* 32-bit FP division RM */
1310 void HELPER(deb)(uint32_t f1, uint32_t val)
1311 {
1312 float32 v1 = env->fregs[f1].l.upper;
1313 CPU_FloatU v2;
1314 v2.l = val;
1315 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1316 v1, f1, v2.f);
1317 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1318 }
1319
1320 /* 32-bit FP multiplication RM */
1321 void HELPER(meeb)(uint32_t f1, uint32_t val)
1322 {
1323 float32 v1 = env->fregs[f1].l.upper;
1324 CPU_FloatU v2;
1325 v2.l = val;
1326 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1327 v1, f1, v2.f);
1328 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1329 }
1330
1331 /* 32-bit FP compare RR */
1332 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1333 {
1334 float32 v1 = env->fregs[f1].l.upper;
1335 float32 v2 = env->fregs[f2].l.upper;;
1336 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1337 v1, f1, v2);
1338 return set_cc_f32(v1, v2);
1339 }
1340
1341 /* 64-bit FP compare RR */
1342 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1343 {
1344 float64 v1 = env->fregs[f1].d;
1345 float64 v2 = env->fregs[f2].d;;
1346 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1347 v1, f1, v2);
1348 return set_cc_f64(v1, v2);
1349 }
1350
1351 /* 128-bit FP compare RR */
1352 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1353 {
1354 CPU_QuadU v1;
1355 v1.ll.upper = env->fregs[f1].ll;
1356 v1.ll.lower = env->fregs[f1 + 2].ll;
1357 CPU_QuadU v2;
1358 v2.ll.upper = env->fregs[f2].ll;
1359 v2.ll.lower = env->fregs[f2 + 2].ll;
1360
1361 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1362 &env->fpu_status));
1363 }
1364
1365 /* 64-bit FP compare RM */
1366 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1367 {
1368 float64 v1 = env->fregs[f1].d;
1369 CPU_DoubleU v2;
1370 v2.ll = ldq(a2);
1371 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1372 f1, v2.d);
1373 return set_cc_f64(v1, v2.d);
1374 }
1375
1376 /* 64-bit FP addition RM */
1377 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1378 {
1379 float64 v1 = env->fregs[f1].d;
1380 CPU_DoubleU v2;
1381 v2.ll = ldq(a2);
1382 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1383 v1, f1, v2.d);
1384 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1385 return set_cc_nz_f64(v1);
1386 }
1387
1388 /* 32-bit FP subtraction RM */
1389 void HELPER(seb)(uint32_t f1, uint32_t val)
1390 {
1391 float32 v1 = env->fregs[f1].l.upper;
1392 CPU_FloatU v2;
1393 v2.l = val;
1394 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1395 }
1396
1397 /* 64-bit FP subtraction RM */
1398 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1399 {
1400 float64 v1 = env->fregs[f1].d;
1401 CPU_DoubleU v2;
1402 v2.ll = ldq(a2);
1403 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1404 return set_cc_nz_f64(v1);
1405 }
1406
1407 /* 64-bit FP multiplication RM */
1408 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1409 {
1410 float64 v1 = env->fregs[f1].d;
1411 CPU_DoubleU v2;
1412 v2.ll = ldq(a2);
1413 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1414 v1, f1, v2.d);
1415 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1416 }
1417
1418 /* 64-bit FP division RM */
1419 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1420 {
1421 float64 v1 = env->fregs[f1].d;
1422 CPU_DoubleU v2;
1423 v2.ll = ldq(a2);
1424 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1425 v1, f1, v2.d);
1426 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1427 }
1428
1429 static void set_round_mode(int m3)
1430 {
1431 switch (m3) {
1432 case 0:
1433 /* current mode */
1434 break;
1435 case 1:
1436 /* biased round no nearest */
1437 case 4:
1438 /* round to nearest */
1439 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1440 break;
1441 case 5:
1442 /* round to zero */
1443 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1444 break;
1445 case 6:
1446 /* round to +inf */
1447 set_float_rounding_mode(float_round_up, &env->fpu_status);
1448 break;
1449 case 7:
1450 /* round to -inf */
1451 set_float_rounding_mode(float_round_down, &env->fpu_status);
1452 break;
1453 }
1454 }
1455
1456 /* convert 32-bit float to 64-bit int */
1457 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1458 {
1459 float32 v2 = env->fregs[f2].l.upper;
1460 set_round_mode(m3);
1461 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1462 return set_cc_nz_f32(v2);
1463 }
1464
1465 /* convert 64-bit float to 64-bit int */
1466 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1467 {
1468 float64 v2 = env->fregs[f2].d;
1469 set_round_mode(m3);
1470 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1471 return set_cc_nz_f64(v2);
1472 }
1473
1474 /* convert 128-bit float to 64-bit int */
1475 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1476 {
1477 CPU_QuadU v2;
1478 v2.ll.upper = env->fregs[f2].ll;
1479 v2.ll.lower = env->fregs[f2 + 2].ll;
1480 set_round_mode(m3);
1481 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1482 if (float128_is_any_nan(v2.q)) {
1483 return 3;
1484 } else if (float128_is_zero(v2.q)) {
1485 return 0;
1486 } else if (float128_is_neg(v2.q)) {
1487 return 1;
1488 } else {
1489 return 2;
1490 }
1491 }
1492
1493 /* convert 32-bit float to 32-bit int */
1494 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1495 {
1496 float32 v2 = env->fregs[f2].l.upper;
1497 set_round_mode(m3);
1498 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1499 float32_to_int32(v2, &env->fpu_status);
1500 return set_cc_nz_f32(v2);
1501 }
1502
1503 /* convert 64-bit float to 32-bit int */
1504 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1505 {
1506 float64 v2 = env->fregs[f2].d;
1507 set_round_mode(m3);
1508 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1509 float64_to_int32(v2, &env->fpu_status);
1510 return set_cc_nz_f64(v2);
1511 }
1512
1513 /* convert 128-bit float to 32-bit int */
1514 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1515 {
1516 CPU_QuadU v2;
1517 v2.ll.upper = env->fregs[f2].ll;
1518 v2.ll.lower = env->fregs[f2 + 2].ll;
1519 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1520 float128_to_int32(v2.q, &env->fpu_status);
1521 return set_cc_nz_f128(v2.q);
1522 }
1523
1524 /* load 32-bit FP zero */
1525 void HELPER(lzer)(uint32_t f1)
1526 {
1527 env->fregs[f1].l.upper = float32_zero;
1528 }
1529
1530 /* load 64-bit FP zero */
1531 void HELPER(lzdr)(uint32_t f1)
1532 {
1533 env->fregs[f1].d = float64_zero;
1534 }
1535
1536 /* load 128-bit FP zero */
1537 void HELPER(lzxr)(uint32_t f1)
1538 {
1539 CPU_QuadU x;
1540 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1541 env->fregs[f1].ll = x.ll.upper;
1542 env->fregs[f1 + 1].ll = x.ll.lower;
1543 }
1544
1545 /* 128-bit FP subtraction RR */
1546 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1547 {
1548 CPU_QuadU v1;
1549 v1.ll.upper = env->fregs[f1].ll;
1550 v1.ll.lower = env->fregs[f1 + 2].ll;
1551 CPU_QuadU v2;
1552 v2.ll.upper = env->fregs[f2].ll;
1553 v2.ll.lower = env->fregs[f2 + 2].ll;
1554 CPU_QuadU res;
1555 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1556 env->fregs[f1].ll = res.ll.upper;
1557 env->fregs[f1 + 2].ll = res.ll.lower;
1558 return set_cc_nz_f128(res.q);
1559 }
1560
1561 /* 128-bit FP addition RR */
1562 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1563 {
1564 CPU_QuadU v1;
1565 v1.ll.upper = env->fregs[f1].ll;
1566 v1.ll.lower = env->fregs[f1 + 2].ll;
1567 CPU_QuadU v2;
1568 v2.ll.upper = env->fregs[f2].ll;
1569 v2.ll.lower = env->fregs[f2 + 2].ll;
1570 CPU_QuadU res;
1571 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1572 env->fregs[f1].ll = res.ll.upper;
1573 env->fregs[f1 + 2].ll = res.ll.lower;
1574 return set_cc_nz_f128(res.q);
1575 }
1576
1577 /* 32-bit FP multiplication RR */
1578 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1579 {
1580 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1581 env->fregs[f2].l.upper,
1582 &env->fpu_status);
1583 }
1584
1585 /* 64-bit FP division RR */
1586 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1587 {
1588 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1589 &env->fpu_status);
1590 }
1591
1592 /* 64-bit FP multiply and add RM */
1593 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1594 {
1595 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1596 CPU_DoubleU v2;
1597 v2.ll = ldq(a2);
1598 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1599 float64_mul(v2.d, env->fregs[f3].d,
1600 &env->fpu_status),
1601 &env->fpu_status);
1602 }
1603
1604 /* 64-bit FP multiply and add RR */
1605 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1606 {
1607 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1608 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1609 env->fregs[f3].d,
1610 &env->fpu_status),
1611 env->fregs[f1].d, &env->fpu_status);
1612 }
1613
1614 /* 64-bit FP multiply and subtract RR */
1615 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1616 {
1617 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1618 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1619 env->fregs[f3].d,
1620 &env->fpu_status),
1621 env->fregs[f1].d, &env->fpu_status);
1622 }
1623
1624 /* 32-bit FP multiply and add RR */
1625 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1626 {
1627 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1628 float32_mul(env->fregs[f2].l.upper,
1629 env->fregs[f3].l.upper,
1630 &env->fpu_status),
1631 &env->fpu_status);
1632 }
1633
1634 /* convert 32-bit float to 64-bit float */
1635 void HELPER(ldeb)(uint32_t f1, uint64_t a2)
1636 {
1637 uint32_t v2;
1638 v2 = ldl(a2);
1639 env->fregs[f1].d = float32_to_float64(v2,
1640 &env->fpu_status);
1641 }
1642
1643 /* convert 64-bit float to 128-bit float */
1644 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1645 {
1646 CPU_DoubleU v2;
1647 v2.ll = ldq(a2);
1648 CPU_QuadU v1;
1649 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1650 env->fregs[f1].ll = v1.ll.upper;
1651 env->fregs[f1 + 2].ll = v1.ll.lower;
1652 }
1653
1654 /* test data class 32-bit */
1655 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1656 {
1657 float32 v1 = env->fregs[f1].l.upper;
1658 int neg = float32_is_neg(v1);
1659 uint32_t cc = 0;
1660
1661 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1662 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1663 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1664 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1665 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1666 cc = 1;
1667 } else if (m2 & (1 << (9-neg))) {
1668 /* assume normalized number */
1669 cc = 1;
1670 }
1671
1672 /* FIXME: denormalized? */
1673 return cc;
1674 }
1675
1676 /* test data class 64-bit */
1677 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1678 {
1679 float64 v1 = env->fregs[f1].d;
1680 int neg = float64_is_neg(v1);
1681 uint32_t cc = 0;
1682
1683 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1684 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1685 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1686 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1687 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1688 cc = 1;
1689 } else if (m2 & (1 << (9-neg))) {
1690 /* assume normalized number */
1691 cc = 1;
1692 }
1693 /* FIXME: denormalized? */
1694 return cc;
1695 }
1696
1697 /* test data class 128-bit */
1698 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1699 {
1700 CPU_QuadU v1;
1701 uint32_t cc = 0;
1702 v1.ll.upper = env->fregs[f1].ll;
1703 v1.ll.lower = env->fregs[f1 + 2].ll;
1704
1705 int neg = float128_is_neg(v1.q);
1706 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1707 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1708 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1709 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1710 cc = 1;
1711 } else if (m2 & (1 << (9-neg))) {
1712 /* assume normalized number */
1713 cc = 1;
1714 }
1715 /* FIXME: denormalized? */
1716 return cc;
1717 }
1718
1719 /* find leftmost one */
1720 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1721 {
1722 uint64_t res = 0;
1723 uint64_t ov2 = v2;
1724
1725 while (!(v2 & 0x8000000000000000ULL) && v2) {
1726 v2 <<= 1;
1727 res++;
1728 }
1729
1730 if (!v2) {
1731 env->regs[r1] = 64;
1732 env->regs[r1 + 1] = 0;
1733 return 0;
1734 } else {
1735 env->regs[r1] = res;
1736 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1737 return 2;
1738 }
1739 }
1740
1741 /* square root 64-bit RR */
1742 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1743 {
1744 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1745 }
1746
1747 /* checksum */
1748 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1749 {
1750 uint64_t src = get_address_31fix(r2);
1751 uint64_t src_len = env->regs[(r2 + 1) & 15];
1752 uint64_t cksm = (uint32_t)env->regs[r1];
1753
1754 while (src_len >= 4) {
1755 cksm += ldl(src);
1756
1757 /* move to next word */
1758 src_len -= 4;
1759 src += 4;
1760 }
1761
1762 switch (src_len) {
1763 case 0:
1764 break;
1765 case 1:
1766 cksm += ldub(src) << 24;
1767 break;
1768 case 2:
1769 cksm += lduw(src) << 16;
1770 break;
1771 case 3:
1772 cksm += lduw(src) << 16;
1773 cksm += ldub(src + 2) << 8;
1774 break;
1775 }
1776
1777 /* indicate we've processed everything */
1778 env->regs[r2] = src + src_len;
1779 env->regs[(r2 + 1) & 15] = 0;
1780
1781 /* store result */
1782 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1783 ((uint32_t)cksm + (cksm >> 32));
1784 }
1785
1786 static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1787 int32_t dst)
1788 {
1789 if (src == dst) {
1790 return 0;
1791 } else if (src < dst) {
1792 return 1;
1793 } else {
1794 return 2;
1795 }
1796 }
1797
1798 static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1799 {
1800 return cc_calc_ltgt_32(env, dst, 0);
1801 }
1802
1803 static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1804 int64_t dst)
1805 {
1806 if (src == dst) {
1807 return 0;
1808 } else if (src < dst) {
1809 return 1;
1810 } else {
1811 return 2;
1812 }
1813 }
1814
1815 static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1816 {
1817 return cc_calc_ltgt_64(env, dst, 0);
1818 }
1819
1820 static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1821 uint32_t dst)
1822 {
1823 if (src == dst) {
1824 return 0;
1825 } else if (src < dst) {
1826 return 1;
1827 } else {
1828 return 2;
1829 }
1830 }
1831
1832 static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1833 uint64_t dst)
1834 {
1835 if (src == dst) {
1836 return 0;
1837 } else if (src < dst) {
1838 return 1;
1839 } else {
1840 return 2;
1841 }
1842 }
1843
1844 static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1845 {
1846 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1847 uint16_t r = val & mask;
1848 if (r == 0 || mask == 0) {
1849 return 0;
1850 } else if (r == mask) {
1851 return 3;
1852 } else {
1853 return 1;
1854 }
1855 }
1856
1857 /* set condition code for test under mask */
1858 static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1859 {
1860 uint16_t r = val & mask;
1861 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1862 if (r == 0 || mask == 0) {
1863 return 0;
1864 } else if (r == mask) {
1865 return 3;
1866 } else {
1867 while (!(mask & 0x8000)) {
1868 mask <<= 1;
1869 val <<= 1;
1870 }
1871 if (val & 0x8000) {
1872 return 2;
1873 } else {
1874 return 1;
1875 }
1876 }
1877 }
1878
1879 static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1880 {
1881 return !!dst;
1882 }
1883
1884 static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1885 int64_t ar)
1886 {
1887 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1888 return 3; /* overflow */
1889 } else {
1890 if (ar < 0) {
1891 return 1;
1892 } else if (ar > 0) {
1893 return 2;
1894 } else {
1895 return 0;
1896 }
1897 }
1898 }
1899
1900 static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1901 uint64_t ar)
1902 {
1903 if (ar == 0) {
1904 if (a1) {
1905 return 2;
1906 } else {
1907 return 0;
1908 }
1909 } else {
1910 if (ar < a1 || ar < a2) {
1911 return 3;
1912 } else {
1913 return 1;
1914 }
1915 }
1916 }
1917
1918 static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1919 int64_t ar)
1920 {
1921 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1922 return 3; /* overflow */
1923 } else {
1924 if (ar < 0) {
1925 return 1;
1926 } else if (ar > 0) {
1927 return 2;
1928 } else {
1929 return 0;
1930 }
1931 }
1932 }
1933
1934 static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1935 uint64_t ar)
1936 {
1937 if (ar == 0) {
1938 return 2;
1939 } else {
1940 if (a2 > a1) {
1941 return 1;
1942 } else {
1943 return 3;
1944 }
1945 }
1946 }
1947
1948 static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1949 {
1950 if ((uint64_t)dst == 0x8000000000000000ULL) {
1951 return 3;
1952 } else if (dst) {
1953 return 1;
1954 } else {
1955 return 0;
1956 }
1957 }
1958
1959 static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1960 {
1961 return !!dst;
1962 }
1963
1964 static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1965 {
1966 if ((uint64_t)dst == 0x8000000000000000ULL) {
1967 return 3;
1968 } else if (dst < 0) {
1969 return 1;
1970 } else if (dst > 0) {
1971 return 2;
1972 } else {
1973 return 0;
1974 }
1975 }
1976
1977
1978 static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1979 int32_t ar)
1980 {
1981 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1982 return 3; /* overflow */
1983 } else {
1984 if (ar < 0) {
1985 return 1;
1986 } else if (ar > 0) {
1987 return 2;
1988 } else {
1989 return 0;
1990 }
1991 }
1992 }
1993
1994 static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
1995 uint32_t ar)
1996 {
1997 if (ar == 0) {
1998 if (a1) {
1999 return 2;
2000 } else {
2001 return 0;
2002 }
2003 } else {
2004 if (ar < a1 || ar < a2) {
2005 return 3;
2006 } else {
2007 return 1;
2008 }
2009 }
2010 }
2011
2012 static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2013 int32_t ar)
2014 {
2015 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2016 return 3; /* overflow */
2017 } else {
2018 if (ar < 0) {
2019 return 1;
2020 } else if (ar > 0) {
2021 return 2;
2022 } else {
2023 return 0;
2024 }
2025 }
2026 }
2027
2028 static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2029 uint32_t ar)
2030 {
2031 if (ar == 0) {
2032 return 2;
2033 } else {
2034 if (a2 > a1) {
2035 return 1;
2036 } else {
2037 return 3;
2038 }
2039 }
2040 }
2041
2042 static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2043 {
2044 if ((uint32_t)dst == 0x80000000UL) {
2045 return 3;
2046 } else if (dst) {
2047 return 1;
2048 } else {
2049 return 0;
2050 }
2051 }
2052
2053 static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2054 {
2055 return !!dst;
2056 }
2057
2058 static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2059 {
2060 if ((uint32_t)dst == 0x80000000UL) {
2061 return 3;
2062 } else if (dst < 0) {
2063 return 1;
2064 } else if (dst > 0) {
2065 return 2;
2066 } else {
2067 return 0;
2068 }
2069 }
2070
2071 /* calculate condition code for insert character under mask insn */
2072 static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2073 {
2074 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2075 uint32_t cc;
2076
2077 if (mask == 0xf) {
2078 if (!val) {
2079 return 0;
2080 } else if (val & 0x80000000) {
2081 return 1;
2082 } else {
2083 return 2;
2084 }
2085 }
2086
2087 if (!val || !mask) {
2088 cc = 0;
2089 } else {
2090 while (mask != 1) {
2091 mask >>= 1;
2092 val >>= 8;
2093 }
2094 if (val & 0x80) {
2095 cc = 1;
2096 } else {
2097 cc = 2;
2098 }
2099 }
2100 return cc;
2101 }
2102
2103 static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2104 {
2105 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2106 uint64_t match, r;
2107
2108 /* check if the sign bit stays the same */
2109 if (src & (1ULL << 63)) {
2110 match = mask;
2111 } else {
2112 match = 0;
2113 }
2114
2115 if ((src & mask) != match) {
2116 /* overflow */
2117 return 3;
2118 }
2119
2120 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2121
2122 if ((int64_t)r == 0) {
2123 return 0;
2124 } else if ((int64_t)r < 0) {
2125 return 1;
2126 }
2127
2128 return 2;
2129 }
2130
2131
2132 static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2133 uint64_t dst, uint64_t vr)
2134 {
2135 uint32_t r = 0;
2136
2137 switch (cc_op) {
2138 case CC_OP_CONST0:
2139 case CC_OP_CONST1:
2140 case CC_OP_CONST2:
2141 case CC_OP_CONST3:
2142 /* cc_op value _is_ cc */
2143 r = cc_op;
2144 break;
2145 case CC_OP_LTGT0_32:
2146 r = cc_calc_ltgt0_32(env, dst);
2147 break;
2148 case CC_OP_LTGT0_64:
2149 r = cc_calc_ltgt0_64(env, dst);
2150 break;
2151 case CC_OP_LTGT_32:
2152 r = cc_calc_ltgt_32(env, src, dst);
2153 break;
2154 case CC_OP_LTGT_64:
2155 r = cc_calc_ltgt_64(env, src, dst);
2156 break;
2157 case CC_OP_LTUGTU_32:
2158 r = cc_calc_ltugtu_32(env, src, dst);
2159 break;
2160 case CC_OP_LTUGTU_64:
2161 r = cc_calc_ltugtu_64(env, src, dst);
2162 break;
2163 case CC_OP_TM_32:
2164 r = cc_calc_tm_32(env, src, dst);
2165 break;
2166 case CC_OP_TM_64:
2167 r = cc_calc_tm_64(env, src, dst);
2168 break;
2169 case CC_OP_NZ:
2170 r = cc_calc_nz(env, dst);
2171 break;
2172 case CC_OP_ADD_64:
2173 r = cc_calc_add_64(env, src, dst, vr);
2174 break;
2175 case CC_OP_ADDU_64:
2176 r = cc_calc_addu_64(env, src, dst, vr);
2177 break;
2178 case CC_OP_SUB_64:
2179 r = cc_calc_sub_64(env, src, dst, vr);
2180 break;
2181 case CC_OP_SUBU_64:
2182 r = cc_calc_subu_64(env, src, dst, vr);
2183 break;
2184 case CC_OP_ABS_64:
2185 r = cc_calc_abs_64(env, dst);
2186 break;
2187 case CC_OP_NABS_64:
2188 r = cc_calc_nabs_64(env, dst);
2189 break;
2190 case CC_OP_COMP_64:
2191 r = cc_calc_comp_64(env, dst);
2192 break;
2193
2194 case CC_OP_ADD_32:
2195 r = cc_calc_add_32(env, src, dst, vr);
2196 break;
2197 case CC_OP_ADDU_32:
2198 r = cc_calc_addu_32(env, src, dst, vr);
2199 break;
2200 case CC_OP_SUB_32:
2201 r = cc_calc_sub_32(env, src, dst, vr);
2202 break;
2203 case CC_OP_SUBU_32:
2204 r = cc_calc_subu_32(env, src, dst, vr);
2205 break;
2206 case CC_OP_ABS_32:
2207 r = cc_calc_abs_64(env, dst);
2208 break;
2209 case CC_OP_NABS_32:
2210 r = cc_calc_nabs_64(env, dst);
2211 break;
2212 case CC_OP_COMP_32:
2213 r = cc_calc_comp_32(env, dst);
2214 break;
2215
2216 case CC_OP_ICM:
2217 r = cc_calc_icm_32(env, src, dst);
2218 break;
2219 case CC_OP_SLAG:
2220 r = cc_calc_slag(env, src, dst);
2221 break;
2222
2223 case CC_OP_LTGT_F32:
2224 r = set_cc_f32(src, dst);
2225 break;
2226 case CC_OP_LTGT_F64:
2227 r = set_cc_f64(src, dst);
2228 break;
2229 case CC_OP_NZ_F32:
2230 r = set_cc_nz_f32(dst);
2231 break;
2232 case CC_OP_NZ_F64:
2233 r = set_cc_nz_f64(dst);
2234 break;
2235
2236 default:
2237 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2238 }
2239
2240 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2241 cc_name(cc_op), src, dst, vr, r);
2242 return r;
2243 }
2244
2245 uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2246 uint64_t vr)
2247 {
2248 return do_calc_cc(env, cc_op, src, dst, vr);
2249 }
2250
2251 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2252 uint64_t vr)
2253 {
2254 return do_calc_cc(env, cc_op, src, dst, vr);
2255 }
2256
2257 uint64_t HELPER(cvd)(int32_t bin)
2258 {
2259 /* positive 0 */
2260 uint64_t dec = 0x0c;
2261 int shift = 4;
2262
2263 if (bin < 0) {
2264 bin = -bin;
2265 dec = 0x0d;
2266 }
2267
2268 for (shift = 4; (shift < 64) && bin; shift += 4) {
2269 int current_number = bin % 10;
2270
2271 dec |= (current_number) << shift;
2272 bin /= 10;
2273 }
2274
2275 return dec;
2276 }
2277
2278 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2279 {
2280 int len_dest = len >> 4;
2281 int len_src = len & 0xf;
2282 uint8_t b;
2283 int second_nibble = 0;
2284
2285 dest += len_dest;
2286 src += len_src;
2287
2288 /* last byte is special, it only flips the nibbles */
2289 b = ldub(src);
2290 stb(dest, (b << 4) | (b >> 4));
2291 src--;
2292 len_src--;
2293
2294 /* now pad every nibble with 0xf0 */
2295
2296 while (len_dest > 0) {
2297 uint8_t cur_byte = 0;
2298
2299 if (len_src > 0) {
2300 cur_byte = ldub(src);
2301 }
2302
2303 len_dest--;
2304 dest--;
2305
2306 /* only advance one nibble at a time */
2307 if (second_nibble) {
2308 cur_byte >>= 4;
2309 len_src--;
2310 src--;
2311 }
2312 second_nibble = !second_nibble;
2313
2314 /* digit */
2315 cur_byte = (cur_byte & 0xf);
2316 /* zone bits */
2317 cur_byte |= 0xf0;
2318
2319 stb(dest, cur_byte);
2320 }
2321 }
2322
2323 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2324 {
2325 int i;
2326
2327 for (i = 0; i <= len; i++) {
2328 uint8_t byte = ldub(array + i);
2329 uint8_t new_byte = ldub(trans + byte);
2330 stb(array + i, new_byte);
2331 }
2332 }
2333
2334 #ifndef CONFIG_USER_ONLY
2335
2336 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2337 {
2338 load_psw(env, mask, addr);
2339 cpu_loop_exit(env);
2340 }
2341
2342 static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2343 {
2344 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2345
2346 if (kvm_enabled()) {
2347 #ifdef CONFIG_KVM
2348 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2349 #endif
2350 } else {
2351 env->int_pgm_code = code;
2352 env->int_pgm_ilc = ilc;
2353 env->exception_index = EXCP_PGM;
2354 cpu_loop_exit(env);
2355 }
2356 }
2357
2358 static void ext_interrupt(CPUState *env, int type, uint32_t param,
2359 uint64_t param64)
2360 {
2361 cpu_inject_ext(env, type, param, param64);
2362 }
2363
2364 int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2365 {
2366 int r = 0;
2367 int shift = 0;
2368
2369 #ifdef DEBUG_HELPER
2370 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2371 #endif
2372
2373 if (sccb & ~0x7ffffff8ul) {
2374 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2375 r = -1;
2376 goto out;
2377 }
2378
2379 switch(code) {
2380 case SCLP_CMDW_READ_SCP_INFO:
2381 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2382 while ((ram_size >> (20 + shift)) > 65535) {
2383 shift++;
2384 }
2385 stw_phys(sccb + SCP_MEM_CODE, ram_size >> (20 + shift));
2386 stb_phys(sccb + SCP_INCREMENT, 1 << shift);
2387 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2388
2389 if (kvm_enabled()) {
2390 #ifdef CONFIG_KVM
2391 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2392 sccb & ~3, 0, 1);
2393 #endif
2394 } else {
2395 env->psw.addr += 4;
2396 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2397 }
2398 break;
2399 default:
2400 #ifdef DEBUG_HELPER
2401 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2402 #endif
2403 r = -1;
2404 break;
2405 }
2406
2407 out:
2408 return r;
2409 }
2410
2411 /* SCLP service call */
2412 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2413 {
2414 if (sclp_service_call(env, r1, r2)) {
2415 return 3;
2416 }
2417
2418 return 0;
2419 }
2420
2421 /* DIAG */
2422 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2423 {
2424 uint64_t r;
2425
2426 switch (num) {
2427 case 0x500:
2428 /* KVM hypercall */
2429 r = s390_virtio_hypercall(env, mem, code);
2430 break;
2431 case 0x44:
2432 /* yield */
2433 r = 0;
2434 break;
2435 case 0x308:
2436 /* ipl */
2437 r = 0;
2438 break;
2439 default:
2440 r = -1;
2441 break;
2442 }
2443
2444 if (r) {
2445 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2446 }
2447
2448 return r;
2449 }
2450
2451 /* Store CPU ID */
2452 void HELPER(stidp)(uint64_t a1)
2453 {
2454 stq(a1, env->cpu_num);
2455 }
2456
2457 /* Set Prefix */
2458 void HELPER(spx)(uint64_t a1)
2459 {
2460 uint32_t prefix;
2461
2462 prefix = ldl(a1);
2463 env->psa = prefix & 0xfffff000;
2464 qemu_log("prefix: %#x\n", prefix);
2465 tlb_flush_page(env, 0);
2466 tlb_flush_page(env, TARGET_PAGE_SIZE);
2467 }
2468
2469 /* Set Clock */
2470 uint32_t HELPER(sck)(uint64_t a1)
2471 {
2472 /* XXX not implemented - is it necessary? */
2473
2474 return 0;
2475 }
2476
2477 static inline uint64_t clock_value(CPUState *env)
2478 {
2479 uint64_t time;
2480
2481 time = env->tod_offset +
2482 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2483
2484 return time;
2485 }
2486
2487 /* Store Clock */
2488 uint32_t HELPER(stck)(uint64_t a1)
2489 {
2490 stq(a1, clock_value(env));
2491
2492 return 0;
2493 }
2494
2495 /* Store Clock Extended */
2496 uint32_t HELPER(stcke)(uint64_t a1)
2497 {
2498 stb(a1, 0);
2499 /* basically the same value as stck */
2500 stq(a1 + 1, clock_value(env) | env->cpu_num);
2501 /* more fine grained than stck */
2502 stq(a1 + 9, 0);
2503 /* XXX programmable fields */
2504 stw(a1 + 17, 0);
2505
2506
2507 return 0;
2508 }
2509
2510 /* Set Clock Comparator */
2511 void HELPER(sckc)(uint64_t a1)
2512 {
2513 uint64_t time = ldq(a1);
2514
2515 if (time == -1ULL) {
2516 return;
2517 }
2518
2519 /* difference between now and then */
2520 time -= clock_value(env);
2521 /* nanoseconds */
2522 time = (time * 125) >> 9;
2523
2524 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2525 }
2526
2527 /* Store Clock Comparator */
2528 void HELPER(stckc)(uint64_t a1)
2529 {
2530 /* XXX implement */
2531 stq(a1, 0);
2532 }
2533
2534 /* Set CPU Timer */
2535 void HELPER(spt)(uint64_t a1)
2536 {
2537 uint64_t time = ldq(a1);
2538
2539 if (time == -1ULL) {
2540 return;
2541 }
2542
2543 /* nanoseconds */
2544 time = (time * 125) >> 9;
2545
2546 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2547 }
2548
2549 /* Store CPU Timer */
2550 void HELPER(stpt)(uint64_t a1)
2551 {
2552 /* XXX implement */
2553 stq(a1, 0);
2554 }
2555
2556 /* Store System Information */
2557 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2558 {
2559 int cc = 0;
2560 int sel1, sel2;
2561
2562 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2563 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2564 /* valid function code, invalid reserved bits */
2565 program_interrupt(env, PGM_SPECIFICATION, 2);
2566 }
2567
2568 sel1 = r0 & STSI_R0_SEL1_MASK;
2569 sel2 = r1 & STSI_R1_SEL2_MASK;
2570
2571 /* XXX: spec exception if sysib is not 4k-aligned */
2572
2573 switch (r0 & STSI_LEVEL_MASK) {
2574 case STSI_LEVEL_1:
2575 if ((sel1 == 1) && (sel2 == 1)) {
2576 /* Basic Machine Configuration */
2577 struct sysib_111 sysib;
2578
2579 memset(&sysib, 0, sizeof(sysib));
2580 ebcdic_put(sysib.manuf, "QEMU ", 16);
2581 /* same as machine type number in STORE CPU ID */
2582 ebcdic_put(sysib.type, "QEMU", 4);
2583 /* same as model number in STORE CPU ID */
2584 ebcdic_put(sysib.model, "QEMU ", 16);
2585 ebcdic_put(sysib.sequence, "QEMU ", 16);
2586 ebcdic_put(sysib.plant, "QEMU", 4);
2587 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2588 } else if ((sel1 == 2) && (sel2 == 1)) {
2589 /* Basic Machine CPU */
2590 struct sysib_121 sysib;
2591
2592 memset(&sysib, 0, sizeof(sysib));
2593 /* XXX make different for different CPUs? */
2594 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2595 ebcdic_put(sysib.plant, "QEMU", 4);
2596 stw_p(&sysib.cpu_addr, env->cpu_num);
2597 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2598 } else if ((sel1 == 2) && (sel2 == 2)) {
2599 /* Basic Machine CPUs */
2600 struct sysib_122 sysib;
2601
2602 memset(&sysib, 0, sizeof(sysib));
2603 stl_p(&sysib.capability, 0x443afc29);
2604 /* XXX change when SMP comes */
2605 stw_p(&sysib.total_cpus, 1);
2606 stw_p(&sysib.active_cpus, 1);
2607 stw_p(&sysib.standby_cpus, 0);
2608 stw_p(&sysib.reserved_cpus, 0);
2609 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2610 } else {
2611 cc = 3;
2612 }
2613 break;
2614 case STSI_LEVEL_2:
2615 {
2616 if ((sel1 == 2) && (sel2 == 1)) {
2617 /* LPAR CPU */
2618 struct sysib_221 sysib;
2619
2620 memset(&sysib, 0, sizeof(sysib));
2621 /* XXX make different for different CPUs? */
2622 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2623 ebcdic_put(sysib.plant, "QEMU", 4);
2624 stw_p(&sysib.cpu_addr, env->cpu_num);
2625 stw_p(&sysib.cpu_id, 0);
2626 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2627 } else if ((sel1 == 2) && (sel2 == 2)) {
2628 /* LPAR CPUs */
2629 struct sysib_222 sysib;
2630
2631 memset(&sysib, 0, sizeof(sysib));
2632 stw_p(&sysib.lpar_num, 0);
2633 sysib.lcpuc = 0;
2634 /* XXX change when SMP comes */
2635 stw_p(&sysib.total_cpus, 1);
2636 stw_p(&sysib.conf_cpus, 1);
2637 stw_p(&sysib.standby_cpus, 0);
2638 stw_p(&sysib.reserved_cpus, 0);
2639 ebcdic_put(sysib.name, "QEMU ", 8);
2640 stl_p(&sysib.caf, 1000);
2641 stw_p(&sysib.dedicated_cpus, 0);
2642 stw_p(&sysib.shared_cpus, 0);
2643 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2644 } else {
2645 cc = 3;
2646 }
2647 break;
2648 }
2649 case STSI_LEVEL_3:
2650 {
2651 if ((sel1 == 2) && (sel2 == 2)) {
2652 /* VM CPUs */
2653 struct sysib_322 sysib;
2654
2655 memset(&sysib, 0, sizeof(sysib));
2656 sysib.count = 1;
2657 /* XXX change when SMP comes */
2658 stw_p(&sysib.vm[0].total_cpus, 1);
2659 stw_p(&sysib.vm[0].conf_cpus, 1);
2660 stw_p(&sysib.vm[0].standby_cpus, 0);
2661 stw_p(&sysib.vm[0].reserved_cpus, 0);
2662 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2663 stl_p(&sysib.vm[0].caf, 1000);
2664 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2665 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2666 } else {
2667 cc = 3;
2668 }
2669 break;
2670 }
2671 case STSI_LEVEL_CURRENT:
2672 env->regs[0] = STSI_LEVEL_3;
2673 break;
2674 default:
2675 cc = 3;
2676 break;
2677 }
2678
2679 return cc;
2680 }
2681
2682 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2683 {
2684 int i;
2685 uint64_t src = a2;
2686
2687 for (i = r1;; i = (i + 1) % 16) {
2688 env->cregs[i] = ldq(src);
2689 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2690 i, src, env->cregs[i]);
2691 src += sizeof(uint64_t);
2692
2693 if (i == r3) {
2694 break;
2695 }
2696 }
2697
2698 tlb_flush(env, 1);
2699 }
2700
2701 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2702 {
2703 int i;
2704 uint64_t src = a2;
2705
2706 for (i = r1;; i = (i + 1) % 16) {
2707 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2708 src += sizeof(uint32_t);
2709
2710 if (i == r3) {
2711 break;
2712 }
2713 }
2714
2715 tlb_flush(env, 1);
2716 }
2717
2718 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2719 {
2720 int i;
2721 uint64_t dest = a2;
2722
2723 for (i = r1;; i = (i + 1) % 16) {
2724 stq(dest, env->cregs[i]);
2725 dest += sizeof(uint64_t);
2726
2727 if (i == r3) {
2728 break;
2729 }
2730 }
2731 }
2732
2733 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2734 {
2735 int i;
2736 uint64_t dest = a2;
2737
2738 for (i = r1;; i = (i + 1) % 16) {
2739 stl(dest, env->cregs[i]);
2740 dest += sizeof(uint32_t);
2741
2742 if (i == r3) {
2743 break;
2744 }
2745 }
2746 }
2747
2748 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2749 {
2750 /* XXX implement */
2751
2752 return 0;
2753 }
2754
2755 /* insert storage key extended */
2756 uint64_t HELPER(iske)(uint64_t r2)
2757 {
2758 uint64_t addr = get_address(0, 0, r2);
2759
2760 if (addr > ram_size) {
2761 return 0;
2762 }
2763
2764 /* XXX maybe use qemu's internal keys? */
2765 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2766 }
2767
2768 /* set storage key extended */
2769 void HELPER(sske)(uint32_t r1, uint64_t r2)
2770 {
2771 uint64_t addr = get_address(0, 0, r2);
2772
2773 if (addr > ram_size) {
2774 return;
2775 }
2776
2777 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2778 }
2779
2780 /* reset reference bit extended */
2781 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2782 {
2783 if (r2 > ram_size) {
2784 return 0;
2785 }
2786
2787 /* XXX implement */
2788 #if 0
2789 env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED;
2790 #endif
2791
2792 /*
2793 * cc
2794 *
2795 * 0 Reference bit zero; change bit zero
2796 * 1 Reference bit zero; change bit one
2797 * 2 Reference bit one; change bit zero
2798 * 3 Reference bit one; change bit one
2799 */
2800 return 0;
2801 }
2802
2803 /* compare and swap and purge */
2804 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2805 {
2806 uint32_t cc;
2807 uint32_t o1 = env->regs[r1];
2808 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2809 uint32_t o2 = ldl(a2);
2810
2811 if (o1 == o2) {
2812 stl(a2, env->regs[(r1 + 1) & 15]);
2813 if (env->regs[r2] & 0x3) {
2814 /* flush TLB / ALB */
2815 tlb_flush(env, 1);
2816 }
2817 cc = 0;
2818 } else {
2819 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2820 cc = 1;
2821 }
2822
2823 return cc;
2824 }
2825
2826 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2827 uint64_t mode2)
2828 {
2829 target_ulong src, dest;
2830 int flags, cc = 0, i;
2831
2832 if (!l) {
2833 return 0;
2834 } else if (l > 256) {
2835 /* max 256 */
2836 l = 256;
2837 cc = 3;
2838 }
2839
2840 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2841 cpu_loop_exit(env);
2842 }
2843 dest |= a1 & ~TARGET_PAGE_MASK;
2844
2845 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2846 cpu_loop_exit(env);
2847 }
2848 src |= a2 & ~TARGET_PAGE_MASK;
2849
2850 /* XXX replace w/ memcpy */
2851 for (i = 0; i < l; i++) {
2852 /* XXX be more clever */
2853 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2854 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2855 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2856 break;
2857 }
2858 stb_phys(dest + i, ldub_phys(src + i));
2859 }
2860
2861 return cc;
2862 }
2863
2864 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2865 {
2866 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2867 __FUNCTION__, l, a1, a2);
2868
2869 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2870 }
2871
2872 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2873 {
2874 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2875 __FUNCTION__, l, a1, a2);
2876
2877 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2878 }
2879
2880 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2881 {
2882 int cc = 0;
2883
2884 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2885 __FUNCTION__, order_code, r1, cpu_addr);
2886
2887 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2888 as parameter (input). Status (output) is always R1. */
2889
2890 switch (order_code) {
2891 case SIGP_SET_ARCH:
2892 /* switch arch */
2893 break;
2894 case SIGP_SENSE:
2895 /* enumerate CPU status */
2896 if (cpu_addr) {
2897 /* XXX implement when SMP comes */
2898 return 3;
2899 }
2900 env->regs[r1] &= 0xffffffff00000000ULL;
2901 cc = 1;
2902 break;
2903 default:
2904 /* unknown sigp */
2905 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2906 cc = 3;
2907 }
2908
2909 return cc;
2910 }
2911
2912 void HELPER(sacf)(uint64_t a1)
2913 {
2914 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2915
2916 switch (a1 & 0xf00) {
2917 case 0x000:
2918 env->psw.mask &= ~PSW_MASK_ASC;
2919 env->psw.mask |= PSW_ASC_PRIMARY;
2920 break;
2921 case 0x100:
2922 env->psw.mask &= ~PSW_MASK_ASC;
2923 env->psw.mask |= PSW_ASC_SECONDARY;
2924 break;
2925 case 0x300:
2926 env->psw.mask &= ~PSW_MASK_ASC;
2927 env->psw.mask |= PSW_ASC_HOME;
2928 break;
2929 default:
2930 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2931 program_interrupt(env, PGM_SPECIFICATION, 2);
2932 break;
2933 }
2934 }
2935
2936 /* invalidate pte */
2937 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2938 {
2939 uint64_t page = vaddr & TARGET_PAGE_MASK;
2940 uint64_t pte = 0;
2941
2942 /* XXX broadcast to other CPUs */
2943
2944 /* XXX Linux is nice enough to give us the exact pte address.
2945 According to spec we'd have to find it out ourselves */
2946 /* XXX Linux is fine with overwriting the pte, the spec requires
2947 us to only set the invalid bit */
2948 stq_phys(pte_addr, pte | _PAGE_INVALID);
2949
2950 /* XXX we exploit the fact that Linux passes the exact virtual
2951 address here - it's not obliged to! */
2952 tlb_flush_page(env, page);
2953 }
2954
2955 /* flush local tlb */
2956 void HELPER(ptlb)(void)
2957 {
2958 tlb_flush(env, 1);
2959 }
2960
2961 /* store using real address */
2962 void HELPER(stura)(uint64_t addr, uint32_t v1)
2963 {
2964 stw_phys(get_address(0, 0, addr), v1);
2965 }
2966
2967 /* load real address */
2968 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2969 {
2970 uint32_t cc = 0;
2971 int old_exc = env->exception_index;
2972 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2973 uint64_t ret;
2974 int flags;
2975
2976 /* XXX incomplete - has more corner cases */
2977 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2978 program_interrupt(env, PGM_SPECIAL_OP, 2);
2979 }
2980
2981 env->exception_index = old_exc;
2982 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
2983 cc = 3;
2984 }
2985 if (env->exception_index == EXCP_PGM) {
2986 ret = env->int_pgm_code | 0x80000000;
2987 } else {
2988 ret |= addr & ~TARGET_PAGE_MASK;
2989 }
2990 env->exception_index = old_exc;
2991
2992 if (!(env->psw.mask & PSW_MASK_64)) {
2993 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
2994 } else {
2995 env->regs[r1] = ret;
2996 }
2997
2998 return cc;
2999 }
3000
3001 #endif