]> git.proxmox.com Git - qemu.git/blob - target-s390x/op_helper.c
91539405402cc5da7a08ef24e5fab865f61e8a2e
[qemu.git] / target-s390x / op_helper.c
1 /*
2 * S/390 helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "exec.h"
22 #include "host-utils.h"
23 #include "helpers.h"
24 #include <string.h>
25 #include "kvm.h"
26 #include "qemu-timer.h"
27
28 /*****************************************************************************/
29 /* Softmmu support */
30 #if !defined (CONFIG_USER_ONLY)
31
32 #define MMUSUFFIX _mmu
33
34 #define SHIFT 0
35 #include "softmmu_template.h"
36
37 #define SHIFT 1
38 #include "softmmu_template.h"
39
40 #define SHIFT 2
41 #include "softmmu_template.h"
42
43 #define SHIFT 3
44 #include "softmmu_template.h"
45
46 /* try to fill the TLB and return an exception if error. If retaddr is
47 NULL, it means that the function was called in C code (i.e. not
48 from generated code or from helper.c) */
49 /* XXX: fix it to restore all registers */
50 void tlb_fill (target_ulong addr, int is_write, int mmu_idx, void *retaddr)
51 {
52 TranslationBlock *tb;
53 CPUState *saved_env;
54 unsigned long pc;
55 int ret;
56
57 /* XXX: hack to restore env in all cases, even if not called from
58 generated code */
59 saved_env = env;
60 env = cpu_single_env;
61 ret = cpu_s390x_handle_mmu_fault(env, addr, is_write, mmu_idx, 1);
62 if (unlikely(ret != 0)) {
63 if (likely(retaddr)) {
64 /* now we have a real cpu fault */
65 pc = (unsigned long)retaddr;
66 tb = tb_find_pc(pc);
67 if (likely(tb)) {
68 /* the PC is inside the translated code. It means that we have
69 a virtual CPU fault */
70 cpu_restore_state(tb, env, pc);
71 }
72 }
73 cpu_loop_exit();
74 }
75 env = saved_env;
76 }
77
78 #endif
79
80 /* #define DEBUG_HELPER */
81 #ifdef DEBUG_HELPER
82 #define HELPER_LOG(x...) qemu_log(x)
83 #else
84 #define HELPER_LOG(x...)
85 #endif
86
87 /* raise an exception */
88 void HELPER(exception)(uint32_t excp)
89 {
90 HELPER_LOG("%s: exception %d\n", __FUNCTION__, excp);
91 env->exception_index = excp;
92 cpu_loop_exit();
93 }
94
95 #ifndef CONFIG_USER_ONLY
96 static void mvc_fast_memset(CPUState *env, uint32_t l, uint64_t dest,
97 uint8_t byte)
98 {
99 target_phys_addr_t dest_phys;
100 target_phys_addr_t len = l;
101 void *dest_p;
102 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
103 int flags;
104
105 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
106 stb(dest, byte);
107 cpu_abort(env, "should never reach here");
108 }
109 dest_phys |= dest & ~TARGET_PAGE_MASK;
110
111 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
112
113 memset(dest_p, byte, len);
114
115 cpu_physical_memory_unmap(dest_p, 1, len, len);
116 }
117
118 static void mvc_fast_memmove(CPUState *env, uint32_t l, uint64_t dest,
119 uint64_t src)
120 {
121 target_phys_addr_t dest_phys;
122 target_phys_addr_t src_phys;
123 target_phys_addr_t len = l;
124 void *dest_p;
125 void *src_p;
126 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
127 int flags;
128
129 if (mmu_translate(env, dest, 1, asc, &dest_phys, &flags)) {
130 stb(dest, 0);
131 cpu_abort(env, "should never reach here");
132 }
133 dest_phys |= dest & ~TARGET_PAGE_MASK;
134
135 if (mmu_translate(env, src, 0, asc, &src_phys, &flags)) {
136 ldub(src);
137 cpu_abort(env, "should never reach here");
138 }
139 src_phys |= src & ~TARGET_PAGE_MASK;
140
141 dest_p = cpu_physical_memory_map(dest_phys, &len, 1);
142 src_p = cpu_physical_memory_map(src_phys, &len, 0);
143
144 memmove(dest_p, src_p, len);
145
146 cpu_physical_memory_unmap(dest_p, 1, len, len);
147 cpu_physical_memory_unmap(src_p, 0, len, len);
148 }
149 #endif
150
151 /* and on array */
152 uint32_t HELPER(nc)(uint32_t l, uint64_t dest, uint64_t src)
153 {
154 int i;
155 unsigned char x;
156 uint32_t cc = 0;
157
158 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
159 __FUNCTION__, l, dest, src);
160 for (i = 0; i <= l; i++) {
161 x = ldub(dest + i) & ldub(src + i);
162 if (x) {
163 cc = 1;
164 }
165 stb(dest + i, x);
166 }
167 return cc;
168 }
169
170 /* xor on array */
171 uint32_t HELPER(xc)(uint32_t l, uint64_t dest, uint64_t src)
172 {
173 int i;
174 unsigned char x;
175 uint32_t cc = 0;
176
177 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
178 __FUNCTION__, l, dest, src);
179
180 #ifndef CONFIG_USER_ONLY
181 /* xor with itself is the same as memset(0) */
182 if ((l > 32) && (src == dest) &&
183 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK)) {
184 mvc_fast_memset(env, l + 1, dest, 0);
185 return 0;
186 }
187 #else
188 if (src == dest) {
189 memset(g2h(dest), 0, l + 1);
190 return 0;
191 }
192 #endif
193
194 for (i = 0; i <= l; i++) {
195 x = ldub(dest + i) ^ ldub(src + i);
196 if (x) {
197 cc = 1;
198 }
199 stb(dest + i, x);
200 }
201 return cc;
202 }
203
204 /* or on array */
205 uint32_t HELPER(oc)(uint32_t l, uint64_t dest, uint64_t src)
206 {
207 int i;
208 unsigned char x;
209 uint32_t cc = 0;
210
211 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
212 __FUNCTION__, l, dest, src);
213 for (i = 0; i <= l; i++) {
214 x = ldub(dest + i) | ldub(src + i);
215 if (x) {
216 cc = 1;
217 }
218 stb(dest + i, x);
219 }
220 return cc;
221 }
222
223 /* memmove */
224 void HELPER(mvc)(uint32_t l, uint64_t dest, uint64_t src)
225 {
226 int i = 0;
227 int x = 0;
228 uint32_t l_64 = (l + 1) / 8;
229
230 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
231 __FUNCTION__, l, dest, src);
232
233 #ifndef CONFIG_USER_ONLY
234 if ((l > 32) &&
235 (src & TARGET_PAGE_MASK) == ((src + l) & TARGET_PAGE_MASK) &&
236 (dest & TARGET_PAGE_MASK) == ((dest + l) & TARGET_PAGE_MASK)) {
237 if (dest == (src + 1)) {
238 mvc_fast_memset(env, l + 1, dest, ldub(src));
239 return;
240 } else if ((src & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) {
241 mvc_fast_memmove(env, l + 1, dest, src);
242 return;
243 }
244 }
245 #else
246 if (dest == (src + 1)) {
247 memset(g2h(dest), ldub(src), l + 1);
248 return;
249 } else {
250 memmove(g2h(dest), g2h(src), l + 1);
251 return;
252 }
253 #endif
254
255 /* handle the parts that fit into 8-byte loads/stores */
256 if (dest != (src + 1)) {
257 for (i = 0; i < l_64; i++) {
258 stq(dest + x, ldq(src + x));
259 x += 8;
260 }
261 }
262
263 /* slow version crossing pages with byte accesses */
264 for (i = x; i <= l; i++) {
265 stb(dest + i, ldub(src + i));
266 }
267 }
268
269 /* compare unsigned byte arrays */
270 uint32_t HELPER(clc)(uint32_t l, uint64_t s1, uint64_t s2)
271 {
272 int i;
273 unsigned char x,y;
274 uint32_t cc;
275 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
276 __FUNCTION__, l, s1, s2);
277 for (i = 0; i <= l; i++) {
278 x = ldub(s1 + i);
279 y = ldub(s2 + i);
280 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
281 if (x < y) {
282 cc = 1;
283 goto done;
284 } else if (x > y) {
285 cc = 2;
286 goto done;
287 }
288 }
289 cc = 0;
290 done:
291 HELPER_LOG("\n");
292 return cc;
293 }
294
295 /* compare logical under mask */
296 uint32_t HELPER(clm)(uint32_t r1, uint32_t mask, uint64_t addr)
297 {
298 uint8_t r,d;
299 uint32_t cc;
300 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __FUNCTION__, r1,
301 mask, addr);
302 cc = 0;
303 while (mask) {
304 if (mask & 8) {
305 d = ldub(addr);
306 r = (r1 & 0xff000000UL) >> 24;
307 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
308 addr);
309 if (r < d) {
310 cc = 1;
311 break;
312 } else if (r > d) {
313 cc = 2;
314 break;
315 }
316 addr++;
317 }
318 mask = (mask << 1) & 0xf;
319 r1 <<= 8;
320 }
321 HELPER_LOG("\n");
322 return cc;
323 }
324
325 /* store character under mask */
326 void HELPER(stcm)(uint32_t r1, uint32_t mask, uint64_t addr)
327 {
328 uint8_t r;
329 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%lx\n", __FUNCTION__, r1, mask,
330 addr);
331 while (mask) {
332 if (mask & 8) {
333 r = (r1 & 0xff000000UL) >> 24;
334 stb(addr, r);
335 HELPER_LOG("mask 0x%x %02x (0x%lx) ", mask, r, addr);
336 addr++;
337 }
338 mask = (mask << 1) & 0xf;
339 r1 <<= 8;
340 }
341 HELPER_LOG("\n");
342 }
343
344 /* 64/64 -> 128 unsigned multiplication */
345 void HELPER(mlg)(uint32_t r1, uint64_t v2)
346 {
347 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
348 /* assuming 64-bit hosts have __uint128_t */
349 __uint128_t res = (__uint128_t)env->regs[r1 + 1];
350 res *= (__uint128_t)v2;
351 env->regs[r1] = (uint64_t)(res >> 64);
352 env->regs[r1 + 1] = (uint64_t)res;
353 #else
354 mulu64(&env->regs[r1 + 1], &env->regs[r1], env->regs[r1 + 1], v2);
355 #endif
356 }
357
358 /* 128 -> 64/64 unsigned division */
359 void HELPER(dlg)(uint32_t r1, uint64_t v2)
360 {
361 uint64_t divisor = v2;
362
363 if (!env->regs[r1]) {
364 /* 64 -> 64/64 case */
365 env->regs[r1] = env->regs[r1+1] % divisor;
366 env->regs[r1+1] = env->regs[r1+1] / divisor;
367 return;
368 } else {
369
370 #if HOST_LONG_BITS == 64 && defined(__GNUC__)
371 /* assuming 64-bit hosts have __uint128_t */
372 __uint128_t dividend = (((__uint128_t)env->regs[r1]) << 64) |
373 (env->regs[r1+1]);
374 __uint128_t quotient = dividend / divisor;
375 env->regs[r1+1] = quotient;
376 __uint128_t remainder = dividend % divisor;
377 env->regs[r1] = remainder;
378 #else
379 /* 32-bit hosts would need special wrapper functionality - just abort if
380 we encounter such a case; it's very unlikely anyways. */
381 cpu_abort(env, "128 -> 64/64 division not implemented\n");
382 #endif
383 }
384 }
385
386 static inline uint64_t get_address(int x2, int b2, int d2)
387 {
388 uint64_t r = d2;
389
390 if (x2) {
391 r += env->regs[x2];
392 }
393
394 if (b2) {
395 r += env->regs[b2];
396 }
397
398 /* 31-Bit mode */
399 if (!(env->psw.mask & PSW_MASK_64)) {
400 r &= 0x7fffffff;
401 }
402
403 return r;
404 }
405
406 static inline uint64_t get_address_31fix(int reg)
407 {
408 uint64_t r = env->regs[reg];
409
410 /* 31-Bit mode */
411 if (!(env->psw.mask & PSW_MASK_64)) {
412 r &= 0x7fffffff;
413 }
414
415 return r;
416 }
417
418 /* search string (c is byte to search, r2 is string, r1 end of string) */
419 uint32_t HELPER(srst)(uint32_t c, uint32_t r1, uint32_t r2)
420 {
421 uint64_t i;
422 uint32_t cc = 2;
423 uint64_t str = get_address_31fix(r2);
424 uint64_t end = get_address_31fix(r1);
425
426 HELPER_LOG("%s: c %d *r1 0x%" PRIx64 " *r2 0x%" PRIx64 "\n", __FUNCTION__,
427 c, env->regs[r1], env->regs[r2]);
428
429 for (i = str; i != end; i++) {
430 if (ldub(i) == c) {
431 env->regs[r1] = i;
432 cc = 1;
433 break;
434 }
435 }
436
437 return cc;
438 }
439
440 /* unsigned string compare (c is string terminator) */
441 uint32_t HELPER(clst)(uint32_t c, uint32_t r1, uint32_t r2)
442 {
443 uint64_t s1 = get_address_31fix(r1);
444 uint64_t s2 = get_address_31fix(r2);
445 uint8_t v1, v2;
446 uint32_t cc;
447 c = c & 0xff;
448 #ifdef CONFIG_USER_ONLY
449 if (!c) {
450 HELPER_LOG("%s: comparing '%s' and '%s'\n",
451 __FUNCTION__, (char*)g2h(s1), (char*)g2h(s2));
452 }
453 #endif
454 for (;;) {
455 v1 = ldub(s1);
456 v2 = ldub(s2);
457 if ((v1 == c || v2 == c) || (v1 != v2)) {
458 break;
459 }
460 s1++;
461 s2++;
462 }
463
464 if (v1 == v2) {
465 cc = 0;
466 } else {
467 cc = (v1 < v2) ? 1 : 2;
468 /* FIXME: 31-bit mode! */
469 env->regs[r1] = s1;
470 env->regs[r2] = s2;
471 }
472 return cc;
473 }
474
475 /* move page */
476 void HELPER(mvpg)(uint64_t r0, uint64_t r1, uint64_t r2)
477 {
478 /* XXX missing r0 handling */
479 #ifdef CONFIG_USER_ONLY
480 int i;
481
482 for (i = 0; i < TARGET_PAGE_SIZE; i++) {
483 stb(r1 + i, ldub(r2 + i));
484 }
485 #else
486 mvc_fast_memmove(env, TARGET_PAGE_SIZE, r1, r2);
487 #endif
488 }
489
490 /* string copy (c is string terminator) */
491 void HELPER(mvst)(uint32_t c, uint32_t r1, uint32_t r2)
492 {
493 uint64_t dest = get_address_31fix(r1);
494 uint64_t src = get_address_31fix(r2);
495 uint8_t v;
496 c = c & 0xff;
497 #ifdef CONFIG_USER_ONLY
498 if (!c) {
499 HELPER_LOG("%s: copy '%s' to 0x%lx\n", __FUNCTION__, (char*)g2h(src),
500 dest);
501 }
502 #endif
503 for (;;) {
504 v = ldub(src);
505 stb(dest, v);
506 if (v == c) {
507 break;
508 }
509 src++;
510 dest++;
511 }
512 env->regs[r1] = dest; /* FIXME: 31-bit mode! */
513 }
514
515 /* compare and swap 64-bit */
516 uint32_t HELPER(csg)(uint32_t r1, uint64_t a2, uint32_t r3)
517 {
518 /* FIXME: locking? */
519 uint32_t cc;
520 uint64_t v2 = ldq(a2);
521 if (env->regs[r1] == v2) {
522 cc = 0;
523 stq(a2, env->regs[r3]);
524 } else {
525 cc = 1;
526 env->regs[r1] = v2;
527 }
528 return cc;
529 }
530
531 /* compare double and swap 64-bit */
532 uint32_t HELPER(cdsg)(uint32_t r1, uint64_t a2, uint32_t r3)
533 {
534 /* FIXME: locking? */
535 uint32_t cc;
536 uint64_t v2_hi = ldq(a2);
537 uint64_t v2_lo = ldq(a2 + 8);
538 uint64_t v1_hi = env->regs[r1];
539 uint64_t v1_lo = env->regs[r1 + 1];
540
541 if ((v1_hi == v2_hi) && (v1_lo == v2_lo)) {
542 cc = 0;
543 stq(a2, env->regs[r3]);
544 stq(a2 + 8, env->regs[r3 + 1]);
545 } else {
546 cc = 1;
547 env->regs[r1] = v2_hi;
548 env->regs[r1 + 1] = v2_lo;
549 }
550
551 return cc;
552 }
553
554 /* compare and swap 32-bit */
555 uint32_t HELPER(cs)(uint32_t r1, uint64_t a2, uint32_t r3)
556 {
557 /* FIXME: locking? */
558 uint32_t cc;
559 HELPER_LOG("%s: r1 %d a2 0x%lx r3 %d\n", __FUNCTION__, r1, a2, r3);
560 uint32_t v2 = ldl(a2);
561 if (((uint32_t)env->regs[r1]) == v2) {
562 cc = 0;
563 stl(a2, (uint32_t)env->regs[r3]);
564 } else {
565 cc = 1;
566 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | v2;
567 }
568 return cc;
569 }
570
571 static uint32_t helper_icm(uint32_t r1, uint64_t address, uint32_t mask)
572 {
573 int pos = 24; /* top of the lower half of r1 */
574 uint64_t rmask = 0xff000000ULL;
575 uint8_t val = 0;
576 int ccd = 0;
577 uint32_t cc = 0;
578
579 while (mask) {
580 if (mask & 8) {
581 env->regs[r1] &= ~rmask;
582 val = ldub(address);
583 if ((val & 0x80) && !ccd) {
584 cc = 1;
585 }
586 ccd = 1;
587 if (val && cc == 0) {
588 cc = 2;
589 }
590 env->regs[r1] |= (uint64_t)val << pos;
591 address++;
592 }
593 mask = (mask << 1) & 0xf;
594 pos -= 8;
595 rmask >>= 8;
596 }
597
598 return cc;
599 }
600
601 /* execute instruction
602 this instruction executes an insn modified with the contents of r1
603 it does not change the executed instruction in memory
604 it does not change the program counter
605 in other words: tricky...
606 currently implemented by interpreting the cases it is most commonly used in
607 */
608 uint32_t HELPER(ex)(uint32_t cc, uint64_t v1, uint64_t addr, uint64_t ret)
609 {
610 uint16_t insn = lduw_code(addr);
611 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __FUNCTION__, v1, addr,
612 insn);
613 if ((insn & 0xf0ff) == 0xd000) {
614 uint32_t l, insn2, b1, b2, d1, d2;
615 l = v1 & 0xff;
616 insn2 = ldl_code(addr + 2);
617 b1 = (insn2 >> 28) & 0xf;
618 b2 = (insn2 >> 12) & 0xf;
619 d1 = (insn2 >> 16) & 0xfff;
620 d2 = insn2 & 0xfff;
621 switch (insn & 0xf00) {
622 case 0x200:
623 helper_mvc(l, get_address(0, b1, d1), get_address(0, b2, d2));
624 break;
625 case 0x500:
626 cc = helper_clc(l, get_address(0, b1, d1), get_address(0, b2, d2));
627 break;
628 case 0x700:
629 cc = helper_xc(l, get_address(0, b1, d1), get_address(0, b2, d2));
630 break;
631 default:
632 goto abort;
633 break;
634 }
635 } else if ((insn & 0xff00) == 0x0a00) {
636 /* supervisor call */
637 HELPER_LOG("%s: svc %ld via execute\n", __FUNCTION__, (insn|v1) & 0xff);
638 env->psw.addr = ret - 4;
639 env->int_svc_code = (insn|v1) & 0xff;
640 env->int_svc_ilc = 4;
641 helper_exception(EXCP_SVC);
642 } else if ((insn & 0xff00) == 0xbf00) {
643 uint32_t insn2, r1, r3, b2, d2;
644 insn2 = ldl_code(addr + 2);
645 r1 = (insn2 >> 20) & 0xf;
646 r3 = (insn2 >> 16) & 0xf;
647 b2 = (insn2 >> 12) & 0xf;
648 d2 = insn2 & 0xfff;
649 cc = helper_icm(r1, get_address(0, b2, d2), r3);
650 } else {
651 abort:
652 cpu_abort(env, "EXECUTE on instruction prefix 0x%x not implemented\n",
653 insn);
654 }
655 return cc;
656 }
657
658 /* absolute value 32-bit */
659 uint32_t HELPER(abs_i32)(int32_t val)
660 {
661 if (val < 0) {
662 return -val;
663 } else {
664 return val;
665 }
666 }
667
668 /* negative absolute value 32-bit */
669 int32_t HELPER(nabs_i32)(int32_t val)
670 {
671 if (val < 0) {
672 return val;
673 } else {
674 return -val;
675 }
676 }
677
678 /* absolute value 64-bit */
679 uint64_t HELPER(abs_i64)(int64_t val)
680 {
681 HELPER_LOG("%s: val 0x%" PRIx64 "\n", __FUNCTION__, val);
682
683 if (val < 0) {
684 return -val;
685 } else {
686 return val;
687 }
688 }
689
690 /* negative absolute value 64-bit */
691 int64_t HELPER(nabs_i64)(int64_t val)
692 {
693 if (val < 0) {
694 return val;
695 } else {
696 return -val;
697 }
698 }
699
700 /* add with carry 32-bit unsigned */
701 uint32_t HELPER(addc_u32)(uint32_t cc, uint32_t v1, uint32_t v2)
702 {
703 uint32_t res;
704
705 res = v1 + v2;
706 if (cc & 2) {
707 res++;
708 }
709
710 return res;
711 }
712
713 /* store character under mask high operates on the upper half of r1 */
714 void HELPER(stcmh)(uint32_t r1, uint64_t address, uint32_t mask)
715 {
716 int pos = 56; /* top of the upper half of r1 */
717
718 while (mask) {
719 if (mask & 8) {
720 stb(address, (env->regs[r1] >> pos) & 0xff);
721 address++;
722 }
723 mask = (mask << 1) & 0xf;
724 pos -= 8;
725 }
726 }
727
728 /* insert character under mask high; same as icm, but operates on the
729 upper half of r1 */
730 uint32_t HELPER(icmh)(uint32_t r1, uint64_t address, uint32_t mask)
731 {
732 int pos = 56; /* top of the upper half of r1 */
733 uint64_t rmask = 0xff00000000000000ULL;
734 uint8_t val = 0;
735 int ccd = 0;
736 uint32_t cc = 0;
737
738 while (mask) {
739 if (mask & 8) {
740 env->regs[r1] &= ~rmask;
741 val = ldub(address);
742 if ((val & 0x80) && !ccd) {
743 cc = 1;
744 }
745 ccd = 1;
746 if (val && cc == 0) {
747 cc = 2;
748 }
749 env->regs[r1] |= (uint64_t)val << pos;
750 address++;
751 }
752 mask = (mask << 1) & 0xf;
753 pos -= 8;
754 rmask >>= 8;
755 }
756
757 return cc;
758 }
759
760 /* insert psw mask and condition code into r1 */
761 void HELPER(ipm)(uint32_t cc, uint32_t r1)
762 {
763 uint64_t r = env->regs[r1];
764
765 r &= 0xffffffff00ffffffULL;
766 r |= (cc << 28) | ( (env->psw.mask >> 40) & 0xf );
767 env->regs[r1] = r;
768 HELPER_LOG("%s: cc %d psw.mask 0x%lx r1 0x%lx\n", __FUNCTION__,
769 cc, env->psw.mask, r);
770 }
771
772 /* load access registers r1 to r3 from memory at a2 */
773 void HELPER(lam)(uint32_t r1, uint64_t a2, uint32_t r3)
774 {
775 int i;
776
777 for (i = r1;; i = (i + 1) % 16) {
778 env->aregs[i] = ldl(a2);
779 a2 += 4;
780
781 if (i == r3) {
782 break;
783 }
784 }
785 }
786
787 /* store access registers r1 to r3 in memory at a2 */
788 void HELPER(stam)(uint32_t r1, uint64_t a2, uint32_t r3)
789 {
790 int i;
791
792 for (i = r1;; i = (i + 1) % 16) {
793 stl(a2, env->aregs[i]);
794 a2 += 4;
795
796 if (i == r3) {
797 break;
798 }
799 }
800 }
801
802 /* move long */
803 uint32_t HELPER(mvcl)(uint32_t r1, uint32_t r2)
804 {
805 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
806 uint64_t dest = get_address_31fix(r1);
807 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
808 uint64_t src = get_address_31fix(r2);
809 uint8_t pad = src >> 24;
810 uint8_t v;
811 uint32_t cc;
812
813 if (destlen == srclen) {
814 cc = 0;
815 } else if (destlen < srclen) {
816 cc = 1;
817 } else {
818 cc = 2;
819 }
820
821 if (srclen > destlen) {
822 srclen = destlen;
823 }
824
825 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
826 v = ldub(src);
827 stb(dest, v);
828 }
829
830 for (; destlen; dest++, destlen--) {
831 stb(dest, pad);
832 }
833
834 env->regs[r1 + 1] = destlen;
835 /* can't use srclen here, we trunc'ed it */
836 env->regs[r2 + 1] -= src - env->regs[r2];
837 env->regs[r1] = dest;
838 env->regs[r2] = src;
839
840 return cc;
841 }
842
843 /* move long extended another memcopy insn with more bells and whistles */
844 uint32_t HELPER(mvcle)(uint32_t r1, uint64_t a2, uint32_t r3)
845 {
846 uint64_t destlen = env->regs[r1 + 1];
847 uint64_t dest = env->regs[r1];
848 uint64_t srclen = env->regs[r3 + 1];
849 uint64_t src = env->regs[r3];
850 uint8_t pad = a2 & 0xff;
851 uint8_t v;
852 uint32_t cc;
853
854 if (!(env->psw.mask & PSW_MASK_64)) {
855 destlen = (uint32_t)destlen;
856 srclen = (uint32_t)srclen;
857 dest &= 0x7fffffff;
858 src &= 0x7fffffff;
859 }
860
861 if (destlen == srclen) {
862 cc = 0;
863 } else if (destlen < srclen) {
864 cc = 1;
865 } else {
866 cc = 2;
867 }
868
869 if (srclen > destlen) {
870 srclen = destlen;
871 }
872
873 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
874 v = ldub(src);
875 stb(dest, v);
876 }
877
878 for (; destlen; dest++, destlen--) {
879 stb(dest, pad);
880 }
881
882 env->regs[r1 + 1] = destlen;
883 /* can't use srclen here, we trunc'ed it */
884 /* FIXME: 31-bit mode! */
885 env->regs[r3 + 1] -= src - env->regs[r3];
886 env->regs[r1] = dest;
887 env->regs[r3] = src;
888
889 return cc;
890 }
891
892 /* compare logical long extended memcompare insn with padding */
893 uint32_t HELPER(clcle)(uint32_t r1, uint64_t a2, uint32_t r3)
894 {
895 uint64_t destlen = env->regs[r1 + 1];
896 uint64_t dest = get_address_31fix(r1);
897 uint64_t srclen = env->regs[r3 + 1];
898 uint64_t src = get_address_31fix(r3);
899 uint8_t pad = a2 & 0xff;
900 uint8_t v1 = 0,v2 = 0;
901 uint32_t cc = 0;
902
903 if (!(destlen || srclen)) {
904 return cc;
905 }
906
907 if (srclen > destlen) {
908 srclen = destlen;
909 }
910
911 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
912 v1 = srclen ? ldub(src) : pad;
913 v2 = destlen ? ldub(dest) : pad;
914 if (v1 != v2) {
915 cc = (v1 < v2) ? 1 : 2;
916 break;
917 }
918 }
919
920 env->regs[r1 + 1] = destlen;
921 /* can't use srclen here, we trunc'ed it */
922 env->regs[r3 + 1] -= src - env->regs[r3];
923 env->regs[r1] = dest;
924 env->regs[r3] = src;
925
926 return cc;
927 }
928
929 /* subtract unsigned v2 from v1 with borrow */
930 uint32_t HELPER(slb)(uint32_t cc, uint32_t r1, uint32_t v2)
931 {
932 uint32_t v1 = env->regs[r1];
933 uint32_t res = v1 + (~v2) + (cc >> 1);
934
935 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | res;
936 if (cc & 2) {
937 /* borrow */
938 return v1 ? 1 : 0;
939 } else {
940 return v1 ? 3 : 2;
941 }
942 }
943
944 /* subtract unsigned v2 from v1 with borrow */
945 uint32_t HELPER(slbg)(uint32_t cc, uint32_t r1, uint64_t v1, uint64_t v2)
946 {
947 uint64_t res = v1 + (~v2) + (cc >> 1);
948
949 env->regs[r1] = res;
950 if (cc & 2) {
951 /* borrow */
952 return v1 ? 1 : 0;
953 } else {
954 return v1 ? 3 : 2;
955 }
956 }
957
958 static inline int float_comp_to_cc(int float_compare)
959 {
960 switch (float_compare) {
961 case float_relation_equal:
962 return 0;
963 case float_relation_less:
964 return 1;
965 case float_relation_greater:
966 return 2;
967 case float_relation_unordered:
968 return 3;
969 default:
970 cpu_abort(env, "unknown return value for float compare\n");
971 }
972 }
973
974 /* condition codes for binary FP ops */
975 static uint32_t set_cc_f32(float32 v1, float32 v2)
976 {
977 return float_comp_to_cc(float32_compare_quiet(v1, v2, &env->fpu_status));
978 }
979
980 static uint32_t set_cc_f64(float64 v1, float64 v2)
981 {
982 return float_comp_to_cc(float64_compare_quiet(v1, v2, &env->fpu_status));
983 }
984
985 /* condition codes for unary FP ops */
986 static uint32_t set_cc_nz_f32(float32 v)
987 {
988 if (float32_is_any_nan(v)) {
989 return 3;
990 } else if (float32_is_zero(v)) {
991 return 0;
992 } else if (float32_is_neg(v)) {
993 return 1;
994 } else {
995 return 2;
996 }
997 }
998
999 static uint32_t set_cc_nz_f64(float64 v)
1000 {
1001 if (float64_is_any_nan(v)) {
1002 return 3;
1003 } else if (float64_is_zero(v)) {
1004 return 0;
1005 } else if (float64_is_neg(v)) {
1006 return 1;
1007 } else {
1008 return 2;
1009 }
1010 }
1011
1012 static uint32_t set_cc_nz_f128(float128 v)
1013 {
1014 if (float128_is_any_nan(v)) {
1015 return 3;
1016 } else if (float128_is_zero(v)) {
1017 return 0;
1018 } else if (float128_is_neg(v)) {
1019 return 1;
1020 } else {
1021 return 2;
1022 }
1023 }
1024
1025 /* convert 32-bit int to 64-bit float */
1026 void HELPER(cdfbr)(uint32_t f1, int32_t v2)
1027 {
1028 HELPER_LOG("%s: converting %d to f%d\n", __FUNCTION__, v2, f1);
1029 env->fregs[f1].d = int32_to_float64(v2, &env->fpu_status);
1030 }
1031
1032 /* convert 32-bit int to 128-bit float */
1033 void HELPER(cxfbr)(uint32_t f1, int32_t v2)
1034 {
1035 CPU_QuadU v1;
1036 v1.q = int32_to_float128(v2, &env->fpu_status);
1037 env->fregs[f1].ll = v1.ll.upper;
1038 env->fregs[f1 + 2].ll = v1.ll.lower;
1039 }
1040
1041 /* convert 64-bit int to 32-bit float */
1042 void HELPER(cegbr)(uint32_t f1, int64_t v2)
1043 {
1044 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1045 env->fregs[f1].l.upper = int64_to_float32(v2, &env->fpu_status);
1046 }
1047
1048 /* convert 64-bit int to 64-bit float */
1049 void HELPER(cdgbr)(uint32_t f1, int64_t v2)
1050 {
1051 HELPER_LOG("%s: converting %ld to f%d\n", __FUNCTION__, v2, f1);
1052 env->fregs[f1].d = int64_to_float64(v2, &env->fpu_status);
1053 }
1054
1055 /* convert 64-bit int to 128-bit float */
1056 void HELPER(cxgbr)(uint32_t f1, int64_t v2)
1057 {
1058 CPU_QuadU x1;
1059 x1.q = int64_to_float128(v2, &env->fpu_status);
1060 HELPER_LOG("%s: converted %ld to 0x%lx and 0x%lx\n", __FUNCTION__, v2,
1061 x1.ll.upper, x1.ll.lower);
1062 env->fregs[f1].ll = x1.ll.upper;
1063 env->fregs[f1 + 2].ll = x1.ll.lower;
1064 }
1065
1066 /* convert 32-bit int to 32-bit float */
1067 void HELPER(cefbr)(uint32_t f1, int32_t v2)
1068 {
1069 env->fregs[f1].l.upper = int32_to_float32(v2, &env->fpu_status);
1070 HELPER_LOG("%s: converting %d to 0x%d in f%d\n", __FUNCTION__, v2,
1071 env->fregs[f1].l.upper, f1);
1072 }
1073
1074 /* 32-bit FP addition RR */
1075 uint32_t HELPER(aebr)(uint32_t f1, uint32_t f2)
1076 {
1077 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1078 env->fregs[f2].l.upper,
1079 &env->fpu_status);
1080 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1081 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1082
1083 return set_cc_nz_f32(env->fregs[f1].l.upper);
1084 }
1085
1086 /* 64-bit FP addition RR */
1087 uint32_t HELPER(adbr)(uint32_t f1, uint32_t f2)
1088 {
1089 env->fregs[f1].d = float64_add(env->fregs[f1].d, env->fregs[f2].d,
1090 &env->fpu_status);
1091 HELPER_LOG("%s: adding 0x%ld resulting in 0x%ld in f%d\n", __FUNCTION__,
1092 env->fregs[f2].d, env->fregs[f1].d, f1);
1093
1094 return set_cc_nz_f64(env->fregs[f1].d);
1095 }
1096
1097 /* 32-bit FP subtraction RR */
1098 uint32_t HELPER(sebr)(uint32_t f1, uint32_t f2)
1099 {
1100 env->fregs[f1].l.upper = float32_sub(env->fregs[f1].l.upper,
1101 env->fregs[f2].l.upper,
1102 &env->fpu_status);
1103 HELPER_LOG("%s: adding 0x%d resulting in 0x%d in f%d\n", __FUNCTION__,
1104 env->fregs[f2].l.upper, env->fregs[f1].l.upper, f1);
1105
1106 return set_cc_nz_f32(env->fregs[f1].l.upper);
1107 }
1108
1109 /* 64-bit FP subtraction RR */
1110 uint32_t HELPER(sdbr)(uint32_t f1, uint32_t f2)
1111 {
1112 env->fregs[f1].d = float64_sub(env->fregs[f1].d, env->fregs[f2].d,
1113 &env->fpu_status);
1114 HELPER_LOG("%s: subtracting 0x%ld resulting in 0x%ld in f%d\n",
1115 __FUNCTION__, env->fregs[f2].d, env->fregs[f1].d, f1);
1116
1117 return set_cc_nz_f64(env->fregs[f1].d);
1118 }
1119
1120 /* 32-bit FP division RR */
1121 void HELPER(debr)(uint32_t f1, uint32_t f2)
1122 {
1123 env->fregs[f1].l.upper = float32_div(env->fregs[f1].l.upper,
1124 env->fregs[f2].l.upper,
1125 &env->fpu_status);
1126 }
1127
1128 /* 128-bit FP division RR */
1129 void HELPER(dxbr)(uint32_t f1, uint32_t f2)
1130 {
1131 CPU_QuadU v1;
1132 v1.ll.upper = env->fregs[f1].ll;
1133 v1.ll.lower = env->fregs[f1 + 2].ll;
1134 CPU_QuadU v2;
1135 v2.ll.upper = env->fregs[f2].ll;
1136 v2.ll.lower = env->fregs[f2 + 2].ll;
1137 CPU_QuadU res;
1138 res.q = float128_div(v1.q, v2.q, &env->fpu_status);
1139 env->fregs[f1].ll = res.ll.upper;
1140 env->fregs[f1 + 2].ll = res.ll.lower;
1141 }
1142
1143 /* 64-bit FP multiplication RR */
1144 void HELPER(mdbr)(uint32_t f1, uint32_t f2)
1145 {
1146 env->fregs[f1].d = float64_mul(env->fregs[f1].d, env->fregs[f2].d,
1147 &env->fpu_status);
1148 }
1149
1150 /* 128-bit FP multiplication RR */
1151 void HELPER(mxbr)(uint32_t f1, uint32_t f2)
1152 {
1153 CPU_QuadU v1;
1154 v1.ll.upper = env->fregs[f1].ll;
1155 v1.ll.lower = env->fregs[f1 + 2].ll;
1156 CPU_QuadU v2;
1157 v2.ll.upper = env->fregs[f2].ll;
1158 v2.ll.lower = env->fregs[f2 + 2].ll;
1159 CPU_QuadU res;
1160 res.q = float128_mul(v1.q, v2.q, &env->fpu_status);
1161 env->fregs[f1].ll = res.ll.upper;
1162 env->fregs[f1 + 2].ll = res.ll.lower;
1163 }
1164
1165 /* convert 32-bit float to 64-bit float */
1166 void HELPER(ldebr)(uint32_t r1, uint32_t r2)
1167 {
1168 env->fregs[r1].d = float32_to_float64(env->fregs[r2].l.upper,
1169 &env->fpu_status);
1170 }
1171
1172 /* convert 128-bit float to 64-bit float */
1173 void HELPER(ldxbr)(uint32_t f1, uint32_t f2)
1174 {
1175 CPU_QuadU x2;
1176 x2.ll.upper = env->fregs[f2].ll;
1177 x2.ll.lower = env->fregs[f2 + 2].ll;
1178 env->fregs[f1].d = float128_to_float64(x2.q, &env->fpu_status);
1179 HELPER_LOG("%s: to 0x%ld\n", __FUNCTION__, env->fregs[f1].d);
1180 }
1181
1182 /* convert 64-bit float to 128-bit float */
1183 void HELPER(lxdbr)(uint32_t f1, uint32_t f2)
1184 {
1185 CPU_QuadU res;
1186 res.q = float64_to_float128(env->fregs[f2].d, &env->fpu_status);
1187 env->fregs[f1].ll = res.ll.upper;
1188 env->fregs[f1 + 2].ll = res.ll.lower;
1189 }
1190
1191 /* convert 64-bit float to 32-bit float */
1192 void HELPER(ledbr)(uint32_t f1, uint32_t f2)
1193 {
1194 float64 d2 = env->fregs[f2].d;
1195 env->fregs[f1].l.upper = float64_to_float32(d2, &env->fpu_status);
1196 }
1197
1198 /* convert 128-bit float to 32-bit float */
1199 void HELPER(lexbr)(uint32_t f1, uint32_t f2)
1200 {
1201 CPU_QuadU x2;
1202 x2.ll.upper = env->fregs[f2].ll;
1203 x2.ll.lower = env->fregs[f2 + 2].ll;
1204 env->fregs[f1].l.upper = float128_to_float32(x2.q, &env->fpu_status);
1205 HELPER_LOG("%s: to 0x%d\n", __FUNCTION__, env->fregs[f1].l.upper);
1206 }
1207
1208 /* absolute value of 32-bit float */
1209 uint32_t HELPER(lpebr)(uint32_t f1, uint32_t f2)
1210 {
1211 float32 v1;
1212 float32 v2 = env->fregs[f2].d;
1213 v1 = float32_abs(v2);
1214 env->fregs[f1].d = v1;
1215 return set_cc_nz_f32(v1);
1216 }
1217
1218 /* absolute value of 64-bit float */
1219 uint32_t HELPER(lpdbr)(uint32_t f1, uint32_t f2)
1220 {
1221 float64 v1;
1222 float64 v2 = env->fregs[f2].d;
1223 v1 = float64_abs(v2);
1224 env->fregs[f1].d = v1;
1225 return set_cc_nz_f64(v1);
1226 }
1227
1228 /* absolute value of 128-bit float */
1229 uint32_t HELPER(lpxbr)(uint32_t f1, uint32_t f2)
1230 {
1231 CPU_QuadU v1;
1232 CPU_QuadU v2;
1233 v2.ll.upper = env->fregs[f2].ll;
1234 v2.ll.lower = env->fregs[f2 + 2].ll;
1235 v1.q = float128_abs(v2.q);
1236 env->fregs[f1].ll = v1.ll.upper;
1237 env->fregs[f1 + 2].ll = v1.ll.lower;
1238 return set_cc_nz_f128(v1.q);
1239 }
1240
1241 /* load and test 64-bit float */
1242 uint32_t HELPER(ltdbr)(uint32_t f1, uint32_t f2)
1243 {
1244 env->fregs[f1].d = env->fregs[f2].d;
1245 return set_cc_nz_f64(env->fregs[f1].d);
1246 }
1247
1248 /* load and test 32-bit float */
1249 uint32_t HELPER(ltebr)(uint32_t f1, uint32_t f2)
1250 {
1251 env->fregs[f1].l.upper = env->fregs[f2].l.upper;
1252 return set_cc_nz_f32(env->fregs[f1].l.upper);
1253 }
1254
1255 /* load and test 128-bit float */
1256 uint32_t HELPER(ltxbr)(uint32_t f1, uint32_t f2)
1257 {
1258 CPU_QuadU x;
1259 x.ll.upper = env->fregs[f2].ll;
1260 x.ll.lower = env->fregs[f2 + 2].ll;
1261 env->fregs[f1].ll = x.ll.upper;
1262 env->fregs[f1 + 2].ll = x.ll.lower;
1263 return set_cc_nz_f128(x.q);
1264 }
1265
1266 /* load complement of 32-bit float */
1267 uint32_t HELPER(lcebr)(uint32_t f1, uint32_t f2)
1268 {
1269 env->fregs[f1].l.upper = float32_chs(env->fregs[f2].l.upper);
1270
1271 return set_cc_nz_f32(env->fregs[f1].l.upper);
1272 }
1273
1274 /* load complement of 64-bit float */
1275 uint32_t HELPER(lcdbr)(uint32_t f1, uint32_t f2)
1276 {
1277 env->fregs[f1].d = float64_chs(env->fregs[f2].d);
1278
1279 return set_cc_nz_f64(env->fregs[f1].d);
1280 }
1281
1282 /* load complement of 128-bit float */
1283 uint32_t HELPER(lcxbr)(uint32_t f1, uint32_t f2)
1284 {
1285 CPU_QuadU x1, x2;
1286 x2.ll.upper = env->fregs[f2].ll;
1287 x2.ll.lower = env->fregs[f2 + 2].ll;
1288 x1.q = float128_chs(x2.q);
1289 env->fregs[f1].ll = x1.ll.upper;
1290 env->fregs[f1 + 2].ll = x1.ll.lower;
1291 return set_cc_nz_f128(x1.q);
1292 }
1293
1294 /* 32-bit FP addition RM */
1295 void HELPER(aeb)(uint32_t f1, uint32_t val)
1296 {
1297 float32 v1 = env->fregs[f1].l.upper;
1298 CPU_FloatU v2;
1299 v2.l = val;
1300 HELPER_LOG("%s: adding 0x%d from f%d and 0x%d\n", __FUNCTION__,
1301 v1, f1, v2.f);
1302 env->fregs[f1].l.upper = float32_add(v1, v2.f, &env->fpu_status);
1303 }
1304
1305 /* 32-bit FP division RM */
1306 void HELPER(deb)(uint32_t f1, uint32_t val)
1307 {
1308 float32 v1 = env->fregs[f1].l.upper;
1309 CPU_FloatU v2;
1310 v2.l = val;
1311 HELPER_LOG("%s: dividing 0x%d from f%d by 0x%d\n", __FUNCTION__,
1312 v1, f1, v2.f);
1313 env->fregs[f1].l.upper = float32_div(v1, v2.f, &env->fpu_status);
1314 }
1315
1316 /* 32-bit FP multiplication RM */
1317 void HELPER(meeb)(uint32_t f1, uint32_t val)
1318 {
1319 float32 v1 = env->fregs[f1].l.upper;
1320 CPU_FloatU v2;
1321 v2.l = val;
1322 HELPER_LOG("%s: multiplying 0x%d from f%d and 0x%d\n", __FUNCTION__,
1323 v1, f1, v2.f);
1324 env->fregs[f1].l.upper = float32_mul(v1, v2.f, &env->fpu_status);
1325 }
1326
1327 /* 32-bit FP compare RR */
1328 uint32_t HELPER(cebr)(uint32_t f1, uint32_t f2)
1329 {
1330 float32 v1 = env->fregs[f1].l.upper;
1331 float32 v2 = env->fregs[f2].l.upper;;
1332 HELPER_LOG("%s: comparing 0x%d from f%d and 0x%d\n", __FUNCTION__,
1333 v1, f1, v2);
1334 return set_cc_f32(v1, v2);
1335 }
1336
1337 /* 64-bit FP compare RR */
1338 uint32_t HELPER(cdbr)(uint32_t f1, uint32_t f2)
1339 {
1340 float64 v1 = env->fregs[f1].d;
1341 float64 v2 = env->fregs[f2].d;;
1342 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%ld\n", __FUNCTION__,
1343 v1, f1, v2);
1344 return set_cc_f64(v1, v2);
1345 }
1346
1347 /* 128-bit FP compare RR */
1348 uint32_t HELPER(cxbr)(uint32_t f1, uint32_t f2)
1349 {
1350 CPU_QuadU v1;
1351 v1.ll.upper = env->fregs[f1].ll;
1352 v1.ll.lower = env->fregs[f1 + 2].ll;
1353 CPU_QuadU v2;
1354 v2.ll.upper = env->fregs[f2].ll;
1355 v2.ll.lower = env->fregs[f2 + 2].ll;
1356
1357 return float_comp_to_cc(float128_compare_quiet(v1.q, v2.q,
1358 &env->fpu_status));
1359 }
1360
1361 /* 64-bit FP compare RM */
1362 uint32_t HELPER(cdb)(uint32_t f1, uint64_t a2)
1363 {
1364 float64 v1 = env->fregs[f1].d;
1365 CPU_DoubleU v2;
1366 v2.ll = ldq(a2);
1367 HELPER_LOG("%s: comparing 0x%ld from f%d and 0x%lx\n", __FUNCTION__, v1,
1368 f1, v2.d);
1369 return set_cc_f64(v1, v2.d);
1370 }
1371
1372 /* 64-bit FP addition RM */
1373 uint32_t HELPER(adb)(uint32_t f1, uint64_t a2)
1374 {
1375 float64 v1 = env->fregs[f1].d;
1376 CPU_DoubleU v2;
1377 v2.ll = ldq(a2);
1378 HELPER_LOG("%s: adding 0x%lx from f%d and 0x%lx\n", __FUNCTION__,
1379 v1, f1, v2.d);
1380 env->fregs[f1].d = v1 = float64_add(v1, v2.d, &env->fpu_status);
1381 return set_cc_nz_f64(v1);
1382 }
1383
1384 /* 32-bit FP subtraction RM */
1385 void HELPER(seb)(uint32_t f1, uint32_t val)
1386 {
1387 float32 v1 = env->fregs[f1].l.upper;
1388 CPU_FloatU v2;
1389 v2.l = val;
1390 env->fregs[f1].l.upper = float32_sub(v1, v2.f, &env->fpu_status);
1391 }
1392
1393 /* 64-bit FP subtraction RM */
1394 uint32_t HELPER(sdb)(uint32_t f1, uint64_t a2)
1395 {
1396 float64 v1 = env->fregs[f1].d;
1397 CPU_DoubleU v2;
1398 v2.ll = ldq(a2);
1399 env->fregs[f1].d = v1 = float64_sub(v1, v2.d, &env->fpu_status);
1400 return set_cc_nz_f64(v1);
1401 }
1402
1403 /* 64-bit FP multiplication RM */
1404 void HELPER(mdb)(uint32_t f1, uint64_t a2)
1405 {
1406 float64 v1 = env->fregs[f1].d;
1407 CPU_DoubleU v2;
1408 v2.ll = ldq(a2);
1409 HELPER_LOG("%s: multiplying 0x%lx from f%d and 0x%ld\n", __FUNCTION__,
1410 v1, f1, v2.d);
1411 env->fregs[f1].d = float64_mul(v1, v2.d, &env->fpu_status);
1412 }
1413
1414 /* 64-bit FP division RM */
1415 void HELPER(ddb)(uint32_t f1, uint64_t a2)
1416 {
1417 float64 v1 = env->fregs[f1].d;
1418 CPU_DoubleU v2;
1419 v2.ll = ldq(a2);
1420 HELPER_LOG("%s: dividing 0x%lx from f%d by 0x%ld\n", __FUNCTION__,
1421 v1, f1, v2.d);
1422 env->fregs[f1].d = float64_div(v1, v2.d, &env->fpu_status);
1423 }
1424
1425 static void set_round_mode(int m3)
1426 {
1427 switch (m3) {
1428 case 0:
1429 /* current mode */
1430 break;
1431 case 1:
1432 /* biased round no nearest */
1433 case 4:
1434 /* round to nearest */
1435 set_float_rounding_mode(float_round_nearest_even, &env->fpu_status);
1436 break;
1437 case 5:
1438 /* round to zero */
1439 set_float_rounding_mode(float_round_to_zero, &env->fpu_status);
1440 break;
1441 case 6:
1442 /* round to +inf */
1443 set_float_rounding_mode(float_round_up, &env->fpu_status);
1444 break;
1445 case 7:
1446 /* round to -inf */
1447 set_float_rounding_mode(float_round_down, &env->fpu_status);
1448 break;
1449 }
1450 }
1451
1452 /* convert 32-bit float to 64-bit int */
1453 uint32_t HELPER(cgebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1454 {
1455 float32 v2 = env->fregs[f2].l.upper;
1456 set_round_mode(m3);
1457 env->regs[r1] = float32_to_int64(v2, &env->fpu_status);
1458 return set_cc_nz_f32(v2);
1459 }
1460
1461 /* convert 64-bit float to 64-bit int */
1462 uint32_t HELPER(cgdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1463 {
1464 float64 v2 = env->fregs[f2].d;
1465 set_round_mode(m3);
1466 env->regs[r1] = float64_to_int64(v2, &env->fpu_status);
1467 return set_cc_nz_f64(v2);
1468 }
1469
1470 /* convert 128-bit float to 64-bit int */
1471 uint32_t HELPER(cgxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1472 {
1473 CPU_QuadU v2;
1474 v2.ll.upper = env->fregs[f2].ll;
1475 v2.ll.lower = env->fregs[f2 + 2].ll;
1476 set_round_mode(m3);
1477 env->regs[r1] = float128_to_int64(v2.q, &env->fpu_status);
1478 if (float128_is_any_nan(v2.q)) {
1479 return 3;
1480 } else if (float128_is_zero(v2.q)) {
1481 return 0;
1482 } else if (float128_is_neg(v2.q)) {
1483 return 1;
1484 } else {
1485 return 2;
1486 }
1487 }
1488
1489 /* convert 32-bit float to 32-bit int */
1490 uint32_t HELPER(cfebr)(uint32_t r1, uint32_t f2, uint32_t m3)
1491 {
1492 float32 v2 = env->fregs[f2].l.upper;
1493 set_round_mode(m3);
1494 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1495 float32_to_int32(v2, &env->fpu_status);
1496 return set_cc_nz_f32(v2);
1497 }
1498
1499 /* convert 64-bit float to 32-bit int */
1500 uint32_t HELPER(cfdbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1501 {
1502 float64 v2 = env->fregs[f2].d;
1503 set_round_mode(m3);
1504 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1505 float64_to_int32(v2, &env->fpu_status);
1506 return set_cc_nz_f64(v2);
1507 }
1508
1509 /* convert 128-bit float to 32-bit int */
1510 uint32_t HELPER(cfxbr)(uint32_t r1, uint32_t f2, uint32_t m3)
1511 {
1512 CPU_QuadU v2;
1513 v2.ll.upper = env->fregs[f2].ll;
1514 v2.ll.lower = env->fregs[f2 + 2].ll;
1515 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) |
1516 float128_to_int32(v2.q, &env->fpu_status);
1517 return set_cc_nz_f128(v2.q);
1518 }
1519
1520 /* load 32-bit FP zero */
1521 void HELPER(lzer)(uint32_t f1)
1522 {
1523 env->fregs[f1].l.upper = float32_zero;
1524 }
1525
1526 /* load 64-bit FP zero */
1527 void HELPER(lzdr)(uint32_t f1)
1528 {
1529 env->fregs[f1].d = float64_zero;
1530 }
1531
1532 /* load 128-bit FP zero */
1533 void HELPER(lzxr)(uint32_t f1)
1534 {
1535 CPU_QuadU x;
1536 x.q = float64_to_float128(float64_zero, &env->fpu_status);
1537 env->fregs[f1].ll = x.ll.upper;
1538 env->fregs[f1 + 1].ll = x.ll.lower;
1539 }
1540
1541 /* 128-bit FP subtraction RR */
1542 uint32_t HELPER(sxbr)(uint32_t f1, uint32_t f2)
1543 {
1544 CPU_QuadU v1;
1545 v1.ll.upper = env->fregs[f1].ll;
1546 v1.ll.lower = env->fregs[f1 + 2].ll;
1547 CPU_QuadU v2;
1548 v2.ll.upper = env->fregs[f2].ll;
1549 v2.ll.lower = env->fregs[f2 + 2].ll;
1550 CPU_QuadU res;
1551 res.q = float128_sub(v1.q, v2.q, &env->fpu_status);
1552 env->fregs[f1].ll = res.ll.upper;
1553 env->fregs[f1 + 2].ll = res.ll.lower;
1554 return set_cc_nz_f128(res.q);
1555 }
1556
1557 /* 128-bit FP addition RR */
1558 uint32_t HELPER(axbr)(uint32_t f1, uint32_t f2)
1559 {
1560 CPU_QuadU v1;
1561 v1.ll.upper = env->fregs[f1].ll;
1562 v1.ll.lower = env->fregs[f1 + 2].ll;
1563 CPU_QuadU v2;
1564 v2.ll.upper = env->fregs[f2].ll;
1565 v2.ll.lower = env->fregs[f2 + 2].ll;
1566 CPU_QuadU res;
1567 res.q = float128_add(v1.q, v2.q, &env->fpu_status);
1568 env->fregs[f1].ll = res.ll.upper;
1569 env->fregs[f1 + 2].ll = res.ll.lower;
1570 return set_cc_nz_f128(res.q);
1571 }
1572
1573 /* 32-bit FP multiplication RR */
1574 void HELPER(meebr)(uint32_t f1, uint32_t f2)
1575 {
1576 env->fregs[f1].l.upper = float32_mul(env->fregs[f1].l.upper,
1577 env->fregs[f2].l.upper,
1578 &env->fpu_status);
1579 }
1580
1581 /* 64-bit FP division RR */
1582 void HELPER(ddbr)(uint32_t f1, uint32_t f2)
1583 {
1584 env->fregs[f1].d = float64_div(env->fregs[f1].d, env->fregs[f2].d,
1585 &env->fpu_status);
1586 }
1587
1588 /* 64-bit FP multiply and add RM */
1589 void HELPER(madb)(uint32_t f1, uint64_t a2, uint32_t f3)
1590 {
1591 HELPER_LOG("%s: f1 %d a2 0x%lx f3 %d\n", __FUNCTION__, f1, a2, f3);
1592 CPU_DoubleU v2;
1593 v2.ll = ldq(a2);
1594 env->fregs[f1].d = float64_add(env->fregs[f1].d,
1595 float64_mul(v2.d, env->fregs[f3].d,
1596 &env->fpu_status),
1597 &env->fpu_status);
1598 }
1599
1600 /* 64-bit FP multiply and add RR */
1601 void HELPER(madbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1602 {
1603 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1604 env->fregs[f1].d = float64_add(float64_mul(env->fregs[f2].d,
1605 env->fregs[f3].d,
1606 &env->fpu_status),
1607 env->fregs[f1].d, &env->fpu_status);
1608 }
1609
1610 /* 64-bit FP multiply and subtract RR */
1611 void HELPER(msdbr)(uint32_t f1, uint32_t f3, uint32_t f2)
1612 {
1613 HELPER_LOG("%s: f1 %d f2 %d f3 %d\n", __FUNCTION__, f1, f2, f3);
1614 env->fregs[f1].d = float64_sub(float64_mul(env->fregs[f2].d,
1615 env->fregs[f3].d,
1616 &env->fpu_status),
1617 env->fregs[f1].d, &env->fpu_status);
1618 }
1619
1620 /* 32-bit FP multiply and add RR */
1621 void HELPER(maebr)(uint32_t f1, uint32_t f3, uint32_t f2)
1622 {
1623 env->fregs[f1].l.upper = float32_add(env->fregs[f1].l.upper,
1624 float32_mul(env->fregs[f2].l.upper,
1625 env->fregs[f3].l.upper,
1626 &env->fpu_status),
1627 &env->fpu_status);
1628 }
1629
1630 /* convert 64-bit float to 128-bit float */
1631 void HELPER(lxdb)(uint32_t f1, uint64_t a2)
1632 {
1633 CPU_DoubleU v2;
1634 v2.ll = ldq(a2);
1635 CPU_QuadU v1;
1636 v1.q = float64_to_float128(v2.d, &env->fpu_status);
1637 env->fregs[f1].ll = v1.ll.upper;
1638 env->fregs[f1 + 2].ll = v1.ll.lower;
1639 }
1640
1641 /* test data class 32-bit */
1642 uint32_t HELPER(tceb)(uint32_t f1, uint64_t m2)
1643 {
1644 float32 v1 = env->fregs[f1].l.upper;
1645 int neg = float32_is_neg(v1);
1646 uint32_t cc = 0;
1647
1648 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, (long)v1, m2, neg);
1649 if ((float32_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1650 (float32_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1651 (float32_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1652 (float32_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1653 cc = 1;
1654 } else if (m2 & (1 << (9-neg))) {
1655 /* assume normalized number */
1656 cc = 1;
1657 }
1658
1659 /* FIXME: denormalized? */
1660 return cc;
1661 }
1662
1663 /* test data class 64-bit */
1664 uint32_t HELPER(tcdb)(uint32_t f1, uint64_t m2)
1665 {
1666 float64 v1 = env->fregs[f1].d;
1667 int neg = float64_is_neg(v1);
1668 uint32_t cc = 0;
1669
1670 HELPER_LOG("%s: v1 0x%lx m2 0x%lx neg %d\n", __FUNCTION__, v1, m2, neg);
1671 if ((float64_is_zero(v1) && (m2 & (1 << (11-neg)))) ||
1672 (float64_is_infinity(v1) && (m2 & (1 << (5-neg)))) ||
1673 (float64_is_any_nan(v1) && (m2 & (1 << (3-neg)))) ||
1674 (float64_is_signaling_nan(v1) && (m2 & (1 << (1-neg))))) {
1675 cc = 1;
1676 } else if (m2 & (1 << (9-neg))) {
1677 /* assume normalized number */
1678 cc = 1;
1679 }
1680 /* FIXME: denormalized? */
1681 return cc;
1682 }
1683
1684 /* test data class 128-bit */
1685 uint32_t HELPER(tcxb)(uint32_t f1, uint64_t m2)
1686 {
1687 CPU_QuadU v1;
1688 uint32_t cc = 0;
1689 v1.ll.upper = env->fregs[f1].ll;
1690 v1.ll.lower = env->fregs[f1 + 2].ll;
1691
1692 int neg = float128_is_neg(v1.q);
1693 if ((float128_is_zero(v1.q) && (m2 & (1 << (11-neg)))) ||
1694 (float128_is_infinity(v1.q) && (m2 & (1 << (5-neg)))) ||
1695 (float128_is_any_nan(v1.q) && (m2 & (1 << (3-neg)))) ||
1696 (float128_is_signaling_nan(v1.q) && (m2 & (1 << (1-neg))))) {
1697 cc = 1;
1698 } else if (m2 & (1 << (9-neg))) {
1699 /* assume normalized number */
1700 cc = 1;
1701 }
1702 /* FIXME: denormalized? */
1703 return cc;
1704 }
1705
1706 /* find leftmost one */
1707 uint32_t HELPER(flogr)(uint32_t r1, uint64_t v2)
1708 {
1709 uint64_t res = 0;
1710 uint64_t ov2 = v2;
1711
1712 while (!(v2 & 0x8000000000000000ULL) && v2) {
1713 v2 <<= 1;
1714 res++;
1715 }
1716
1717 if (!v2) {
1718 env->regs[r1] = 64;
1719 env->regs[r1 + 1] = 0;
1720 return 0;
1721 } else {
1722 env->regs[r1] = res;
1723 env->regs[r1 + 1] = ov2 & ~(0x8000000000000000ULL >> res);
1724 return 2;
1725 }
1726 }
1727
1728 /* square root 64-bit RR */
1729 void HELPER(sqdbr)(uint32_t f1, uint32_t f2)
1730 {
1731 env->fregs[f1].d = float64_sqrt(env->fregs[f2].d, &env->fpu_status);
1732 }
1733
1734 static inline uint64_t cksm_overflow(uint64_t cksm)
1735 {
1736 if (cksm > 0xffffffffULL) {
1737 cksm &= 0xffffffffULL;
1738 cksm++;
1739 }
1740 return cksm;
1741 }
1742
1743 /* checksum */
1744 void HELPER(cksm)(uint32_t r1, uint32_t r2)
1745 {
1746 uint64_t src = get_address_31fix(r2);
1747 uint64_t src_len = env->regs[(r2 + 1) & 15];
1748 uint64_t cksm = 0;
1749
1750 while (src_len >= 4) {
1751 cksm += ldl(src);
1752 cksm = cksm_overflow(cksm);
1753
1754 /* move to next word */
1755 src_len -= 4;
1756 src += 4;
1757 }
1758
1759 switch (src_len) {
1760 case 0:
1761 break;
1762 case 1:
1763 cksm += ldub(src);
1764 cksm = cksm_overflow(cksm);
1765 break;
1766 case 2:
1767 cksm += lduw(src);
1768 cksm = cksm_overflow(cksm);
1769 break;
1770 case 3:
1771 /* XXX check if this really is correct */
1772 cksm += lduw(src) << 8;
1773 cksm += ldub(src + 2);
1774 cksm = cksm_overflow(cksm);
1775 break;
1776 }
1777
1778 /* indicate we've processed everything */
1779 env->regs[(r2 + 1) & 15] = 0;
1780
1781 /* store result */
1782 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (uint32_t)cksm;
1783 }
1784
1785 static inline uint32_t cc_calc_ltgt_32(CPUState *env, int32_t src,
1786 int32_t dst)
1787 {
1788 if (src == dst) {
1789 return 0;
1790 } else if (src < dst) {
1791 return 1;
1792 } else {
1793 return 2;
1794 }
1795 }
1796
1797 static inline uint32_t cc_calc_ltgt0_32(CPUState *env, int32_t dst)
1798 {
1799 return cc_calc_ltgt_32(env, dst, 0);
1800 }
1801
1802 static inline uint32_t cc_calc_ltgt_64(CPUState *env, int64_t src,
1803 int64_t dst)
1804 {
1805 if (src == dst) {
1806 return 0;
1807 } else if (src < dst) {
1808 return 1;
1809 } else {
1810 return 2;
1811 }
1812 }
1813
1814 static inline uint32_t cc_calc_ltgt0_64(CPUState *env, int64_t dst)
1815 {
1816 return cc_calc_ltgt_64(env, dst, 0);
1817 }
1818
1819 static inline uint32_t cc_calc_ltugtu_32(CPUState *env, uint32_t src,
1820 uint32_t dst)
1821 {
1822 if (src == dst) {
1823 return 0;
1824 } else if (src < dst) {
1825 return 1;
1826 } else {
1827 return 2;
1828 }
1829 }
1830
1831 static inline uint32_t cc_calc_ltugtu_64(CPUState *env, uint64_t src,
1832 uint64_t dst)
1833 {
1834 if (src == dst) {
1835 return 0;
1836 } else if (src < dst) {
1837 return 1;
1838 } else {
1839 return 2;
1840 }
1841 }
1842
1843 static inline uint32_t cc_calc_tm_32(CPUState *env, uint32_t val, uint32_t mask)
1844 {
1845 HELPER_LOG("%s: val 0x%x mask 0x%x\n", __FUNCTION__, val, mask);
1846 uint16_t r = val & mask;
1847 if (r == 0 || mask == 0) {
1848 return 0;
1849 } else if (r == mask) {
1850 return 3;
1851 } else {
1852 return 1;
1853 }
1854 }
1855
1856 /* set condition code for test under mask */
1857 static inline uint32_t cc_calc_tm_64(CPUState *env, uint64_t val, uint32_t mask)
1858 {
1859 uint16_t r = val & mask;
1860 HELPER_LOG("%s: val 0x%lx mask 0x%x r 0x%x\n", __FUNCTION__, val, mask, r);
1861 if (r == 0 || mask == 0) {
1862 return 0;
1863 } else if (r == mask) {
1864 return 3;
1865 } else {
1866 while (!(mask & 0x8000)) {
1867 mask <<= 1;
1868 val <<= 1;
1869 }
1870 if (val & 0x8000) {
1871 return 2;
1872 } else {
1873 return 1;
1874 }
1875 }
1876 }
1877
1878 static inline uint32_t cc_calc_nz(CPUState *env, uint64_t dst)
1879 {
1880 return !!dst;
1881 }
1882
1883 static inline uint32_t cc_calc_add_64(CPUState *env, int64_t a1, int64_t a2,
1884 int64_t ar)
1885 {
1886 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1887 return 3; /* overflow */
1888 } else {
1889 if (ar < 0) {
1890 return 1;
1891 } else if (ar > 0) {
1892 return 2;
1893 } else {
1894 return 0;
1895 }
1896 }
1897 }
1898
1899 static inline uint32_t cc_calc_addu_64(CPUState *env, uint64_t a1, uint64_t a2,
1900 uint64_t ar)
1901 {
1902 if (ar == 0) {
1903 if (a1) {
1904 return 2;
1905 } else {
1906 return 0;
1907 }
1908 } else {
1909 if (ar < a1 || ar < a2) {
1910 return 3;
1911 } else {
1912 return 1;
1913 }
1914 }
1915 }
1916
1917 static inline uint32_t cc_calc_sub_64(CPUState *env, int64_t a1, int64_t a2,
1918 int64_t ar)
1919 {
1920 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
1921 return 3; /* overflow */
1922 } else {
1923 if (ar < 0) {
1924 return 1;
1925 } else if (ar > 0) {
1926 return 2;
1927 } else {
1928 return 0;
1929 }
1930 }
1931 }
1932
1933 static inline uint32_t cc_calc_subu_64(CPUState *env, uint64_t a1, uint64_t a2,
1934 uint64_t ar)
1935 {
1936 if (ar == 0) {
1937 return 2;
1938 } else {
1939 if (a2 > a1) {
1940 return 1;
1941 } else {
1942 return 3;
1943 }
1944 }
1945 }
1946
1947 static inline uint32_t cc_calc_abs_64(CPUState *env, int64_t dst)
1948 {
1949 if ((uint64_t)dst == 0x8000000000000000ULL) {
1950 return 3;
1951 } else if (dst) {
1952 return 1;
1953 } else {
1954 return 0;
1955 }
1956 }
1957
1958 static inline uint32_t cc_calc_nabs_64(CPUState *env, int64_t dst)
1959 {
1960 return !!dst;
1961 }
1962
1963 static inline uint32_t cc_calc_comp_64(CPUState *env, int64_t dst)
1964 {
1965 if ((uint64_t)dst == 0x8000000000000000ULL) {
1966 return 3;
1967 } else if (dst < 0) {
1968 return 1;
1969 } else if (dst > 0) {
1970 return 2;
1971 } else {
1972 return 0;
1973 }
1974 }
1975
1976
1977 static inline uint32_t cc_calc_add_32(CPUState *env, int32_t a1, int32_t a2,
1978 int32_t ar)
1979 {
1980 if ((a1 > 0 && a2 > 0 && ar < 0) || (a1 < 0 && a2 < 0 && ar > 0)) {
1981 return 3; /* overflow */
1982 } else {
1983 if (ar < 0) {
1984 return 1;
1985 } else if (ar > 0) {
1986 return 2;
1987 } else {
1988 return 0;
1989 }
1990 }
1991 }
1992
1993 static inline uint32_t cc_calc_addu_32(CPUState *env, uint32_t a1, uint32_t a2,
1994 uint32_t ar)
1995 {
1996 if (ar == 0) {
1997 if (a1) {
1998 return 2;
1999 } else {
2000 return 0;
2001 }
2002 } else {
2003 if (ar < a1 || ar < a2) {
2004 return 3;
2005 } else {
2006 return 1;
2007 }
2008 }
2009 }
2010
2011 static inline uint32_t cc_calc_sub_32(CPUState *env, int32_t a1, int32_t a2,
2012 int32_t ar)
2013 {
2014 if ((a1 > 0 && a2 < 0 && ar < 0) || (a1 < 0 && a2 > 0 && ar > 0)) {
2015 return 3; /* overflow */
2016 } else {
2017 if (ar < 0) {
2018 return 1;
2019 } else if (ar > 0) {
2020 return 2;
2021 } else {
2022 return 0;
2023 }
2024 }
2025 }
2026
2027 static inline uint32_t cc_calc_subu_32(CPUState *env, uint32_t a1, uint32_t a2,
2028 uint32_t ar)
2029 {
2030 if (ar == 0) {
2031 return 2;
2032 } else {
2033 if (a2 > a1) {
2034 return 1;
2035 } else {
2036 return 3;
2037 }
2038 }
2039 }
2040
2041 static inline uint32_t cc_calc_abs_32(CPUState *env, int32_t dst)
2042 {
2043 if ((uint32_t)dst == 0x80000000UL) {
2044 return 3;
2045 } else if (dst) {
2046 return 1;
2047 } else {
2048 return 0;
2049 }
2050 }
2051
2052 static inline uint32_t cc_calc_nabs_32(CPUState *env, int32_t dst)
2053 {
2054 return !!dst;
2055 }
2056
2057 static inline uint32_t cc_calc_comp_32(CPUState *env, int32_t dst)
2058 {
2059 if ((uint32_t)dst == 0x80000000UL) {
2060 return 3;
2061 } else if (dst < 0) {
2062 return 1;
2063 } else if (dst > 0) {
2064 return 2;
2065 } else {
2066 return 0;
2067 }
2068 }
2069
2070 /* calculate condition code for insert character under mask insn */
2071 static inline uint32_t cc_calc_icm_32(CPUState *env, uint32_t mask, uint32_t val)
2072 {
2073 HELPER_LOG("%s: mask 0x%x val %d\n", __FUNCTION__, mask, val);
2074 uint32_t cc;
2075
2076 if (mask == 0xf) {
2077 if (!val) {
2078 return 0;
2079 } else if (val & 0x80000000) {
2080 return 1;
2081 } else {
2082 return 2;
2083 }
2084 }
2085
2086 if (!val || !mask) {
2087 cc = 0;
2088 } else {
2089 while (mask != 1) {
2090 mask >>= 1;
2091 val >>= 8;
2092 }
2093 if (val & 0x80) {
2094 cc = 1;
2095 } else {
2096 cc = 2;
2097 }
2098 }
2099 return cc;
2100 }
2101
2102 static inline uint32_t cc_calc_slag(CPUState *env, uint64_t src, uint64_t shift)
2103 {
2104 uint64_t mask = ((1ULL << shift) - 1ULL) << (64 - shift);
2105 uint64_t match, r;
2106
2107 /* check if the sign bit stays the same */
2108 if (src & (1ULL << 63)) {
2109 match = mask;
2110 } else {
2111 match = 0;
2112 }
2113
2114 if ((src & mask) != match) {
2115 /* overflow */
2116 return 3;
2117 }
2118
2119 r = ((src << shift) & ((1ULL << 63) - 1)) | (src & (1ULL << 63));
2120
2121 if ((int64_t)r == 0) {
2122 return 0;
2123 } else if ((int64_t)r < 0) {
2124 return 1;
2125 }
2126
2127 return 2;
2128 }
2129
2130
2131 static inline uint32_t do_calc_cc(CPUState *env, uint32_t cc_op, uint64_t src,
2132 uint64_t dst, uint64_t vr)
2133 {
2134 uint32_t r = 0;
2135
2136 switch (cc_op) {
2137 case CC_OP_CONST0:
2138 case CC_OP_CONST1:
2139 case CC_OP_CONST2:
2140 case CC_OP_CONST3:
2141 /* cc_op value _is_ cc */
2142 r = cc_op;
2143 break;
2144 case CC_OP_LTGT0_32:
2145 r = cc_calc_ltgt0_32(env, dst);
2146 break;
2147 case CC_OP_LTGT0_64:
2148 r = cc_calc_ltgt0_64(env, dst);
2149 break;
2150 case CC_OP_LTGT_32:
2151 r = cc_calc_ltgt_32(env, src, dst);
2152 break;
2153 case CC_OP_LTGT_64:
2154 r = cc_calc_ltgt_64(env, src, dst);
2155 break;
2156 case CC_OP_LTUGTU_32:
2157 r = cc_calc_ltugtu_32(env, src, dst);
2158 break;
2159 case CC_OP_LTUGTU_64:
2160 r = cc_calc_ltugtu_64(env, src, dst);
2161 break;
2162 case CC_OP_TM_32:
2163 r = cc_calc_tm_32(env, src, dst);
2164 break;
2165 case CC_OP_TM_64:
2166 r = cc_calc_tm_64(env, src, dst);
2167 break;
2168 case CC_OP_NZ:
2169 r = cc_calc_nz(env, dst);
2170 break;
2171 case CC_OP_ADD_64:
2172 r = cc_calc_add_64(env, src, dst, vr);
2173 break;
2174 case CC_OP_ADDU_64:
2175 r = cc_calc_addu_64(env, src, dst, vr);
2176 break;
2177 case CC_OP_SUB_64:
2178 r = cc_calc_sub_64(env, src, dst, vr);
2179 break;
2180 case CC_OP_SUBU_64:
2181 r = cc_calc_subu_64(env, src, dst, vr);
2182 break;
2183 case CC_OP_ABS_64:
2184 r = cc_calc_abs_64(env, dst);
2185 break;
2186 case CC_OP_NABS_64:
2187 r = cc_calc_nabs_64(env, dst);
2188 break;
2189 case CC_OP_COMP_64:
2190 r = cc_calc_comp_64(env, dst);
2191 break;
2192
2193 case CC_OP_ADD_32:
2194 r = cc_calc_add_32(env, src, dst, vr);
2195 break;
2196 case CC_OP_ADDU_32:
2197 r = cc_calc_addu_32(env, src, dst, vr);
2198 break;
2199 case CC_OP_SUB_32:
2200 r = cc_calc_sub_32(env, src, dst, vr);
2201 break;
2202 case CC_OP_SUBU_32:
2203 r = cc_calc_subu_32(env, src, dst, vr);
2204 break;
2205 case CC_OP_ABS_32:
2206 r = cc_calc_abs_64(env, dst);
2207 break;
2208 case CC_OP_NABS_32:
2209 r = cc_calc_nabs_64(env, dst);
2210 break;
2211 case CC_OP_COMP_32:
2212 r = cc_calc_comp_32(env, dst);
2213 break;
2214
2215 case CC_OP_ICM:
2216 r = cc_calc_icm_32(env, src, dst);
2217 break;
2218 case CC_OP_SLAG:
2219 r = cc_calc_slag(env, src, dst);
2220 break;
2221
2222 case CC_OP_LTGT_F32:
2223 r = set_cc_f32(src, dst);
2224 break;
2225 case CC_OP_LTGT_F64:
2226 r = set_cc_f64(src, dst);
2227 break;
2228 case CC_OP_NZ_F32:
2229 r = set_cc_nz_f32(dst);
2230 break;
2231 case CC_OP_NZ_F64:
2232 r = set_cc_nz_f64(dst);
2233 break;
2234
2235 default:
2236 cpu_abort(env, "Unknown CC operation: %s\n", cc_name(cc_op));
2237 }
2238
2239 HELPER_LOG("%s: %15s 0x%016lx 0x%016lx 0x%016lx = %d\n", __FUNCTION__,
2240 cc_name(cc_op), src, dst, vr, r);
2241 return r;
2242 }
2243
2244 uint32_t calc_cc(CPUState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
2245 uint64_t vr)
2246 {
2247 return do_calc_cc(env, cc_op, src, dst, vr);
2248 }
2249
2250 uint32_t HELPER(calc_cc)(uint32_t cc_op, uint64_t src, uint64_t dst,
2251 uint64_t vr)
2252 {
2253 return do_calc_cc(env, cc_op, src, dst, vr);
2254 }
2255
2256 uint64_t HELPER(cvd)(int32_t bin)
2257 {
2258 /* positive 0 */
2259 uint64_t dec = 0x0c;
2260 int shift = 4;
2261
2262 if (bin < 0) {
2263 bin = -bin;
2264 dec = 0x0d;
2265 }
2266
2267 for (shift = 4; (shift < 64) && bin; shift += 4) {
2268 int current_number = bin % 10;
2269
2270 dec |= (current_number) << shift;
2271 bin /= 10;
2272 }
2273
2274 return dec;
2275 }
2276
2277 void HELPER(unpk)(uint32_t len, uint64_t dest, uint64_t src)
2278 {
2279 int len_dest = len >> 4;
2280 int len_src = len & 0xf;
2281 uint8_t b;
2282 int second_nibble = 0;
2283
2284 dest += len_dest;
2285 src += len_src;
2286
2287 /* last byte is special, it only flips the nibbles */
2288 b = ldub(src);
2289 stb(dest, (b << 4) | (b >> 4));
2290 src--;
2291 len_src--;
2292
2293 /* now pad every nibble with 0xf0 */
2294
2295 while (len_dest > 0) {
2296 uint8_t cur_byte = 0;
2297
2298 if (len_src > 0) {
2299 cur_byte = ldub(src);
2300 }
2301
2302 len_dest--;
2303 dest--;
2304
2305 /* only advance one nibble at a time */
2306 if (second_nibble) {
2307 cur_byte >>= 4;
2308 len_src--;
2309 src--;
2310 }
2311 second_nibble = !second_nibble;
2312
2313 /* digit */
2314 cur_byte = (cur_byte & 0xf);
2315 /* zone bits */
2316 cur_byte |= 0xf0;
2317
2318 stb(dest, cur_byte);
2319 }
2320 }
2321
2322 void HELPER(tr)(uint32_t len, uint64_t array, uint64_t trans)
2323 {
2324 int i;
2325
2326 for (i = 0; i <= len; i++) {
2327 uint8_t byte = ldub(array + i);
2328 uint8_t new_byte = ldub(trans + byte);
2329 stb(array + i, new_byte);
2330 }
2331 }
2332
2333 #ifndef CONFIG_USER_ONLY
2334
2335 void HELPER(load_psw)(uint64_t mask, uint64_t addr)
2336 {
2337 load_psw(env, mask, addr);
2338 cpu_loop_exit();
2339 }
2340
2341 static void program_interrupt(CPUState *env, uint32_t code, int ilc)
2342 {
2343 qemu_log("program interrupt at %#" PRIx64 "\n", env->psw.addr);
2344
2345 if (kvm_enabled()) {
2346 kvm_s390_interrupt(env, KVM_S390_PROGRAM_INT, code);
2347 } else {
2348 env->int_pgm_code = code;
2349 env->int_pgm_ilc = ilc;
2350 env->exception_index = EXCP_PGM;
2351 cpu_loop_exit();
2352 }
2353 }
2354
2355 static void ext_interrupt(CPUState *env, int type, uint32_t param,
2356 uint64_t param64)
2357 {
2358 cpu_inject_ext(env, type, param, param64);
2359 }
2360
2361 int sclp_service_call(CPUState *env, uint32_t sccb, uint64_t code)
2362 {
2363 int r = 0;
2364
2365 #ifdef DEBUG_HELPER
2366 printf("sclp(0x%x, 0x%" PRIx64 ")\n", sccb, code);
2367 #endif
2368
2369 if (sccb & ~0x7ffffff8ul) {
2370 fprintf(stderr, "KVM: invalid sccb address 0x%x\n", sccb);
2371 r = -1;
2372 goto out;
2373 }
2374
2375 switch(code) {
2376 case SCLP_CMDW_READ_SCP_INFO:
2377 case SCLP_CMDW_READ_SCP_INFO_FORCED:
2378 stw_phys(sccb + SCP_MEM_CODE, ram_size >> 20);
2379 stb_phys(sccb + SCP_INCREMENT, 1);
2380 stw_phys(sccb + SCP_RESPONSE_CODE, 0x10);
2381
2382 if (kvm_enabled()) {
2383 #ifdef CONFIG_KVM
2384 kvm_s390_interrupt_internal(env, KVM_S390_INT_SERVICE,
2385 sccb & ~3, 0, 1);
2386 #endif
2387 } else {
2388 env->psw.addr += 4;
2389 ext_interrupt(env, EXT_SERVICE, sccb & ~3, 0);
2390 }
2391 break;
2392 default:
2393 #ifdef DEBUG_HELPER
2394 printf("KVM: invalid sclp call 0x%x / 0x%" PRIx64 "x\n", sccb, code);
2395 #endif
2396 r = -1;
2397 break;
2398 }
2399
2400 out:
2401 return r;
2402 }
2403
2404 /* SCLP service call */
2405 uint32_t HELPER(servc)(uint32_t r1, uint64_t r2)
2406 {
2407 if (sclp_service_call(env, r1, r2)) {
2408 return 3;
2409 }
2410
2411 return 0;
2412 }
2413
2414 /* DIAG */
2415 uint64_t HELPER(diag)(uint32_t num, uint64_t mem, uint64_t code)
2416 {
2417 uint64_t r;
2418
2419 switch (num) {
2420 case 0x500:
2421 /* KVM hypercall */
2422 r = s390_virtio_hypercall(env, mem, code);
2423 break;
2424 case 0x44:
2425 /* yield */
2426 r = 0;
2427 break;
2428 case 0x308:
2429 /* ipl */
2430 r = 0;
2431 break;
2432 default:
2433 r = -1;
2434 break;
2435 }
2436
2437 if (r) {
2438 program_interrupt(env, PGM_OPERATION, ILC_LATER_INC);
2439 }
2440
2441 return r;
2442 }
2443
2444 /* Store CPU ID */
2445 void HELPER(stidp)(uint64_t a1)
2446 {
2447 stq(a1, env->cpu_num);
2448 }
2449
2450 /* Set Prefix */
2451 void HELPER(spx)(uint64_t a1)
2452 {
2453 uint32_t prefix;
2454
2455 prefix = ldl(a1);
2456 env->psa = prefix & 0xfffff000;
2457 qemu_log("prefix: %#x\n", prefix);
2458 tlb_flush_page(env, 0);
2459 tlb_flush_page(env, TARGET_PAGE_SIZE);
2460 }
2461
2462 /* Set Clock */
2463 uint32_t HELPER(sck)(uint64_t a1)
2464 {
2465 /* XXX not implemented - is it necessary? */
2466
2467 return 0;
2468 }
2469
2470 static inline uint64_t clock_value(CPUState *env)
2471 {
2472 uint64_t time;
2473
2474 time = env->tod_offset +
2475 time2tod(qemu_get_clock_ns(vm_clock) - env->tod_basetime);
2476
2477 return time;
2478 }
2479
2480 /* Store Clock */
2481 uint32_t HELPER(stck)(uint64_t a1)
2482 {
2483 stq(a1, clock_value(env));
2484
2485 return 0;
2486 }
2487
2488 /* Store Clock Extended */
2489 uint32_t HELPER(stcke)(uint64_t a1)
2490 {
2491 stb(a1, 0);
2492 /* basically the same value as stck */
2493 stq(a1 + 1, clock_value(env) | env->cpu_num);
2494 /* more fine grained than stck */
2495 stq(a1 + 9, 0);
2496 /* XXX programmable fields */
2497 stw(a1 + 17, 0);
2498
2499
2500 return 0;
2501 }
2502
2503 /* Set Clock Comparator */
2504 void HELPER(sckc)(uint64_t a1)
2505 {
2506 uint64_t time = ldq(a1);
2507
2508 if (time == -1ULL) {
2509 return;
2510 }
2511
2512 /* difference between now and then */
2513 time -= clock_value(env);
2514 /* nanoseconds */
2515 time = (time * 125) >> 9;
2516
2517 qemu_mod_timer(env->tod_timer, qemu_get_clock_ns(vm_clock) + time);
2518 }
2519
2520 /* Store Clock Comparator */
2521 void HELPER(stckc)(uint64_t a1)
2522 {
2523 /* XXX implement */
2524 stq(a1, 0);
2525 }
2526
2527 /* Set CPU Timer */
2528 void HELPER(spt)(uint64_t a1)
2529 {
2530 uint64_t time = ldq(a1);
2531
2532 if (time == -1ULL) {
2533 return;
2534 }
2535
2536 /* nanoseconds */
2537 time = (time * 125) >> 9;
2538
2539 qemu_mod_timer(env->cpu_timer, qemu_get_clock_ns(vm_clock) + time);
2540 }
2541
2542 /* Store CPU Timer */
2543 void HELPER(stpt)(uint64_t a1)
2544 {
2545 /* XXX implement */
2546 stq(a1, 0);
2547 }
2548
2549 /* Store System Information */
2550 uint32_t HELPER(stsi)(uint64_t a0, uint32_t r0, uint32_t r1)
2551 {
2552 int cc = 0;
2553 int sel1, sel2;
2554
2555 if ((r0 & STSI_LEVEL_MASK) <= STSI_LEVEL_3 &&
2556 ((r0 & STSI_R0_RESERVED_MASK) || (r1 & STSI_R1_RESERVED_MASK))) {
2557 /* valid function code, invalid reserved bits */
2558 program_interrupt(env, PGM_SPECIFICATION, 2);
2559 }
2560
2561 sel1 = r0 & STSI_R0_SEL1_MASK;
2562 sel2 = r1 & STSI_R1_SEL2_MASK;
2563
2564 /* XXX: spec exception if sysib is not 4k-aligned */
2565
2566 switch (r0 & STSI_LEVEL_MASK) {
2567 case STSI_LEVEL_1:
2568 if ((sel1 == 1) && (sel2 == 1)) {
2569 /* Basic Machine Configuration */
2570 struct sysib_111 sysib;
2571
2572 memset(&sysib, 0, sizeof(sysib));
2573 ebcdic_put(sysib.manuf, "QEMU ", 16);
2574 /* same as machine type number in STORE CPU ID */
2575 ebcdic_put(sysib.type, "QEMU", 4);
2576 /* same as model number in STORE CPU ID */
2577 ebcdic_put(sysib.model, "QEMU ", 16);
2578 ebcdic_put(sysib.sequence, "QEMU ", 16);
2579 ebcdic_put(sysib.plant, "QEMU", 4);
2580 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2581 } else if ((sel1 == 2) && (sel2 == 1)) {
2582 /* Basic Machine CPU */
2583 struct sysib_121 sysib;
2584
2585 memset(&sysib, 0, sizeof(sysib));
2586 /* XXX make different for different CPUs? */
2587 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2588 ebcdic_put(sysib.plant, "QEMU", 4);
2589 stw_p(&sysib.cpu_addr, env->cpu_num);
2590 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2591 } else if ((sel1 == 2) && (sel2 == 2)) {
2592 /* Basic Machine CPUs */
2593 struct sysib_122 sysib;
2594
2595 memset(&sysib, 0, sizeof(sysib));
2596 stl_p(&sysib.capability, 0x443afc29);
2597 /* XXX change when SMP comes */
2598 stw_p(&sysib.total_cpus, 1);
2599 stw_p(&sysib.active_cpus, 1);
2600 stw_p(&sysib.standby_cpus, 0);
2601 stw_p(&sysib.reserved_cpus, 0);
2602 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2603 } else {
2604 cc = 3;
2605 }
2606 break;
2607 case STSI_LEVEL_2:
2608 {
2609 if ((sel1 == 2) && (sel2 == 1)) {
2610 /* LPAR CPU */
2611 struct sysib_221 sysib;
2612
2613 memset(&sysib, 0, sizeof(sysib));
2614 /* XXX make different for different CPUs? */
2615 ebcdic_put(sysib.sequence, "QEMUQEMUQEMUQEMU", 16);
2616 ebcdic_put(sysib.plant, "QEMU", 4);
2617 stw_p(&sysib.cpu_addr, env->cpu_num);
2618 stw_p(&sysib.cpu_id, 0);
2619 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2620 } else if ((sel1 == 2) && (sel2 == 2)) {
2621 /* LPAR CPUs */
2622 struct sysib_222 sysib;
2623
2624 memset(&sysib, 0, sizeof(sysib));
2625 stw_p(&sysib.lpar_num, 0);
2626 sysib.lcpuc = 0;
2627 /* XXX change when SMP comes */
2628 stw_p(&sysib.total_cpus, 1);
2629 stw_p(&sysib.conf_cpus, 1);
2630 stw_p(&sysib.standby_cpus, 0);
2631 stw_p(&sysib.reserved_cpus, 0);
2632 ebcdic_put(sysib.name, "QEMU ", 8);
2633 stl_p(&sysib.caf, 1000);
2634 stw_p(&sysib.dedicated_cpus, 0);
2635 stw_p(&sysib.shared_cpus, 0);
2636 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2637 } else {
2638 cc = 3;
2639 }
2640 break;
2641 }
2642 case STSI_LEVEL_3:
2643 {
2644 if ((sel1 == 2) && (sel2 == 2)) {
2645 /* VM CPUs */
2646 struct sysib_322 sysib;
2647
2648 memset(&sysib, 0, sizeof(sysib));
2649 sysib.count = 1;
2650 /* XXX change when SMP comes */
2651 stw_p(&sysib.vm[0].total_cpus, 1);
2652 stw_p(&sysib.vm[0].conf_cpus, 1);
2653 stw_p(&sysib.vm[0].standby_cpus, 0);
2654 stw_p(&sysib.vm[0].reserved_cpus, 0);
2655 ebcdic_put(sysib.vm[0].name, "KVMguest", 8);
2656 stl_p(&sysib.vm[0].caf, 1000);
2657 ebcdic_put(sysib.vm[0].cpi, "KVM/Linux ", 16);
2658 cpu_physical_memory_rw(a0, (uint8_t*)&sysib, sizeof(sysib), 1);
2659 } else {
2660 cc = 3;
2661 }
2662 break;
2663 }
2664 case STSI_LEVEL_CURRENT:
2665 env->regs[0] = STSI_LEVEL_3;
2666 break;
2667 default:
2668 cc = 3;
2669 break;
2670 }
2671
2672 return cc;
2673 }
2674
2675 void HELPER(lctlg)(uint32_t r1, uint64_t a2, uint32_t r3)
2676 {
2677 int i;
2678 uint64_t src = a2;
2679
2680 for (i = r1;; i = (i + 1) % 16) {
2681 env->cregs[i] = ldq(src);
2682 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
2683 i, src, env->cregs[i]);
2684 src += sizeof(uint64_t);
2685
2686 if (i == r3) {
2687 break;
2688 }
2689 }
2690
2691 tlb_flush(env, 1);
2692 }
2693
2694 void HELPER(lctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2695 {
2696 int i;
2697 uint64_t src = a2;
2698
2699 for (i = r1;; i = (i + 1) % 16) {
2700 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | ldl(src);
2701 src += sizeof(uint32_t);
2702
2703 if (i == r3) {
2704 break;
2705 }
2706 }
2707
2708 tlb_flush(env, 1);
2709 }
2710
2711 void HELPER(stctg)(uint32_t r1, uint64_t a2, uint32_t r3)
2712 {
2713 int i;
2714 uint64_t dest = a2;
2715
2716 for (i = r1;; i = (i + 1) % 16) {
2717 stq(dest, env->cregs[i]);
2718 dest += sizeof(uint64_t);
2719
2720 if (i == r3) {
2721 break;
2722 }
2723 }
2724 }
2725
2726 void HELPER(stctl)(uint32_t r1, uint64_t a2, uint32_t r3)
2727 {
2728 int i;
2729 uint64_t dest = a2;
2730
2731 for (i = r1;; i = (i + 1) % 16) {
2732 stl(dest, env->cregs[i]);
2733 dest += sizeof(uint32_t);
2734
2735 if (i == r3) {
2736 break;
2737 }
2738 }
2739 }
2740
2741 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
2742 {
2743 /* XXX implement */
2744
2745 return 0;
2746 }
2747
2748 /* insert storage key extended */
2749 uint64_t HELPER(iske)(uint64_t r2)
2750 {
2751 uint64_t addr = get_address(0, 0, r2);
2752
2753 if (addr > ram_size) {
2754 return 0;
2755 }
2756
2757 /* XXX maybe use qemu's internal keys? */
2758 return env->storage_keys[addr / TARGET_PAGE_SIZE];
2759 }
2760
2761 /* set storage key extended */
2762 void HELPER(sske)(uint32_t r1, uint64_t r2)
2763 {
2764 uint64_t addr = get_address(0, 0, r2);
2765
2766 if (addr > ram_size) {
2767 return;
2768 }
2769
2770 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
2771 }
2772
2773 /* reset reference bit extended */
2774 uint32_t HELPER(rrbe)(uint32_t r1, uint64_t r2)
2775 {
2776 if (r2 > ram_size) {
2777 return 0;
2778 }
2779
2780 /* XXX implement */
2781 #if 0
2782 env->storage_keys[r2 / TARGET_PAGE_SIZE] &= ~SK_REFERENCED;
2783 #endif
2784
2785 /*
2786 * cc
2787 *
2788 * 0 Reference bit zero; change bit zero
2789 * 1 Reference bit zero; change bit one
2790 * 2 Reference bit one; change bit zero
2791 * 3 Reference bit one; change bit one
2792 */
2793 return 0;
2794 }
2795
2796 /* compare and swap and purge */
2797 uint32_t HELPER(csp)(uint32_t r1, uint32_t r2)
2798 {
2799 uint32_t cc;
2800 uint32_t o1 = env->regs[r1];
2801 uint64_t a2 = get_address_31fix(r2) & ~3ULL;
2802 uint32_t o2 = ldl(a2);
2803
2804 if (o1 == o2) {
2805 stl(a2, env->regs[(r1 + 1) & 15]);
2806 if (env->regs[r2] & 0x3) {
2807 /* flush TLB / ALB */
2808 tlb_flush(env, 1);
2809 }
2810 cc = 0;
2811 } else {
2812 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
2813 cc = 1;
2814 }
2815
2816 return cc;
2817 }
2818
2819 static uint32_t mvc_asc(int64_t l, uint64_t a1, uint64_t mode1, uint64_t a2,
2820 uint64_t mode2)
2821 {
2822 target_ulong src, dest;
2823 int flags, cc = 0, i;
2824
2825 if (!l) {
2826 return 0;
2827 } else if (l > 256) {
2828 /* max 256 */
2829 l = 256;
2830 cc = 3;
2831 }
2832
2833 if (mmu_translate(env, a1 & TARGET_PAGE_MASK, 1, mode1, &dest, &flags)) {
2834 cpu_loop_exit();
2835 }
2836 dest |= a1 & ~TARGET_PAGE_MASK;
2837
2838 if (mmu_translate(env, a2 & TARGET_PAGE_MASK, 0, mode2, &src, &flags)) {
2839 cpu_loop_exit();
2840 }
2841 src |= a2 & ~TARGET_PAGE_MASK;
2842
2843 /* XXX replace w/ memcpy */
2844 for (i = 0; i < l; i++) {
2845 /* XXX be more clever */
2846 if ((((dest + i) & TARGET_PAGE_MASK) != (dest & TARGET_PAGE_MASK)) ||
2847 (((src + i) & TARGET_PAGE_MASK) != (src & TARGET_PAGE_MASK))) {
2848 mvc_asc(l - i, a1 + i, mode1, a2 + i, mode2);
2849 break;
2850 }
2851 stb_phys(dest + i, ldub_phys(src + i));
2852 }
2853
2854 return cc;
2855 }
2856
2857 uint32_t HELPER(mvcs)(uint64_t l, uint64_t a1, uint64_t a2)
2858 {
2859 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2860 __FUNCTION__, l, a1, a2);
2861
2862 return mvc_asc(l, a1, PSW_ASC_SECONDARY, a2, PSW_ASC_PRIMARY);
2863 }
2864
2865 uint32_t HELPER(mvcp)(uint64_t l, uint64_t a1, uint64_t a2)
2866 {
2867 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
2868 __FUNCTION__, l, a1, a2);
2869
2870 return mvc_asc(l, a1, PSW_ASC_PRIMARY, a2, PSW_ASC_SECONDARY);
2871 }
2872
2873 uint32_t HELPER(sigp)(uint64_t order_code, uint32_t r1, uint64_t cpu_addr)
2874 {
2875 int cc = 0;
2876
2877 HELPER_LOG("%s: %016" PRIx64 " %08x %016" PRIx64 "\n",
2878 __FUNCTION__, order_code, r1, cpu_addr);
2879
2880 /* Remember: Use "R1 or R1+1, whichever is the odd-numbered register"
2881 as parameter (input). Status (output) is always R1. */
2882
2883 switch (order_code) {
2884 case SIGP_SET_ARCH:
2885 /* switch arch */
2886 break;
2887 case SIGP_SENSE:
2888 /* enumerate CPU status */
2889 if (cpu_addr) {
2890 /* XXX implement when SMP comes */
2891 return 3;
2892 }
2893 env->regs[r1] &= 0xffffffff00000000ULL;
2894 cc = 1;
2895 break;
2896 default:
2897 /* unknown sigp */
2898 fprintf(stderr, "XXX unknown sigp: 0x%" PRIx64 "\n", order_code);
2899 cc = 3;
2900 }
2901
2902 return cc;
2903 }
2904
2905 void HELPER(sacf)(uint64_t a1)
2906 {
2907 HELPER_LOG("%s: %16" PRIx64 "\n", __FUNCTION__, a1);
2908
2909 switch (a1 & 0xf00) {
2910 case 0x000:
2911 env->psw.mask &= ~PSW_MASK_ASC;
2912 env->psw.mask |= PSW_ASC_PRIMARY;
2913 break;
2914 case 0x100:
2915 env->psw.mask &= ~PSW_MASK_ASC;
2916 env->psw.mask |= PSW_ASC_SECONDARY;
2917 break;
2918 case 0x300:
2919 env->psw.mask &= ~PSW_MASK_ASC;
2920 env->psw.mask |= PSW_ASC_HOME;
2921 break;
2922 default:
2923 qemu_log("unknown sacf mode: %" PRIx64 "\n", a1);
2924 program_interrupt(env, PGM_SPECIFICATION, 2);
2925 break;
2926 }
2927 }
2928
2929 /* invalidate pte */
2930 void HELPER(ipte)(uint64_t pte_addr, uint64_t vaddr)
2931 {
2932 uint64_t page = vaddr & TARGET_PAGE_MASK;
2933 uint64_t pte = 0;
2934
2935 /* XXX broadcast to other CPUs */
2936
2937 /* XXX Linux is nice enough to give us the exact pte address.
2938 According to spec we'd have to find it out ourselves */
2939 /* XXX Linux is fine with overwriting the pte, the spec requires
2940 us to only set the invalid bit */
2941 stq_phys(pte_addr, pte | _PAGE_INVALID);
2942
2943 /* XXX we exploit the fact that Linux passes the exact virtual
2944 address here - it's not obliged to! */
2945 tlb_flush_page(env, page);
2946 }
2947
2948 /* flush local tlb */
2949 void HELPER(ptlb)(void)
2950 {
2951 tlb_flush(env, 1);
2952 }
2953
2954 /* store using real address */
2955 void HELPER(stura)(uint64_t addr, uint32_t v1)
2956 {
2957 stw_phys(get_address(0, 0, addr), v1);
2958 }
2959
2960 /* load real address */
2961 uint32_t HELPER(lra)(uint64_t addr, uint32_t r1)
2962 {
2963 uint32_t cc = 0;
2964 int old_exc = env->exception_index;
2965 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
2966 uint64_t ret;
2967 int flags;
2968
2969 /* XXX incomplete - has more corner cases */
2970 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
2971 program_interrupt(env, PGM_SPECIAL_OP, 2);
2972 }
2973
2974 env->exception_index = old_exc;
2975 if (mmu_translate(env, addr, 0, asc, &ret, &flags)) {
2976 cc = 3;
2977 }
2978 if (env->exception_index == EXCP_PGM) {
2979 ret = env->int_pgm_code | 0x80000000;
2980 } else {
2981 ret |= addr & ~TARGET_PAGE_MASK;
2982 }
2983 env->exception_index = old_exc;
2984
2985 if (!(env->psw.mask & PSW_MASK_64)) {
2986 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | (ret & 0xffffffffULL);
2987 } else {
2988 env->regs[r1] = ret;
2989 }
2990
2991 return cc;
2992 }
2993
2994 #endif