]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/mem_helper.c
Merge remote-tracking branch 'remotes/mst/tags/for_upstream' into staging
[mirror_qemu.git] / target-s390x / mem_helper.c
1 /*
2 * S/390 memory access helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
24
25 /*****************************************************************************/
26 /* Softmmu support */
27 #if !defined(CONFIG_USER_ONLY)
28
29 /* try to fill the TLB and return an exception if error. If retaddr is
30 NULL, it means that the function was called in C code (i.e. not
31 from generated code or from helper.c) */
32 /* XXX: fix it to restore all registers */
33 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
34 uintptr_t retaddr)
35 {
36 int ret;
37
38 ret = s390_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
39 if (unlikely(ret != 0)) {
40 if (likely(retaddr)) {
41 /* now we have a real cpu fault */
42 cpu_restore_state(cs, retaddr);
43 }
44 cpu_loop_exit(cs);
45 }
46 }
47
48 #endif
49
50 /* #define DEBUG_HELPER */
51 #ifdef DEBUG_HELPER
52 #define HELPER_LOG(x...) qemu_log(x)
53 #else
54 #define HELPER_LOG(x...)
55 #endif
56
57 /* Reduce the length so that addr + len doesn't cross a page boundary. */
58 static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
59 {
60 #ifndef CONFIG_USER_ONLY
61 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
62 return -addr & ~TARGET_PAGE_MASK;
63 }
64 #endif
65 return len;
66 }
67
68 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
69 uint32_t l)
70 {
71 int mmu_idx = cpu_mmu_index(env);
72
73 while (l > 0) {
74 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
75 if (p) {
76 /* Access to the whole page in write mode granted. */
77 int l_adj = adj_len_to_page(l, dest);
78 memset(p, byte, l_adj);
79 dest += l_adj;
80 l -= l_adj;
81 } else {
82 /* We failed to get access to the whole page. The next write
83 access will likely fill the QEMU TLB for the next iteration. */
84 cpu_stb_data(env, dest, byte);
85 dest++;
86 l--;
87 }
88 }
89 }
90
91 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
92 uint32_t l)
93 {
94 int mmu_idx = cpu_mmu_index(env);
95
96 while (l > 0) {
97 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
98 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
99 if (src_p && dest_p) {
100 /* Access to both whole pages granted. */
101 int l_adj = adj_len_to_page(l, src);
102 l_adj = adj_len_to_page(l_adj, dest);
103 memmove(dest_p, src_p, l_adj);
104 src += l_adj;
105 dest += l_adj;
106 l -= l_adj;
107 } else {
108 /* We failed to get access to one or both whole pages. The next
109 read or write access will likely fill the QEMU TLB for the
110 next iteration. */
111 cpu_stb_data(env, dest, cpu_ldub_data(env, src));
112 src++;
113 dest++;
114 l--;
115 }
116 }
117 }
118
119 /* and on array */
120 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
121 uint64_t src)
122 {
123 int i;
124 unsigned char x;
125 uint32_t cc = 0;
126
127 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
128 __func__, l, dest, src);
129 for (i = 0; i <= l; i++) {
130 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
131 if (x) {
132 cc = 1;
133 }
134 cpu_stb_data(env, dest + i, x);
135 }
136 return cc;
137 }
138
139 /* xor on array */
140 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
141 uint64_t src)
142 {
143 int i;
144 unsigned char x;
145 uint32_t cc = 0;
146
147 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
148 __func__, l, dest, src);
149
150 /* xor with itself is the same as memset(0) */
151 if (src == dest) {
152 fast_memset(env, dest, 0, l + 1);
153 return 0;
154 }
155
156 for (i = 0; i <= l; i++) {
157 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
158 if (x) {
159 cc = 1;
160 }
161 cpu_stb_data(env, dest + i, x);
162 }
163 return cc;
164 }
165
166 /* or on array */
167 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
168 uint64_t src)
169 {
170 int i;
171 unsigned char x;
172 uint32_t cc = 0;
173
174 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
175 __func__, l, dest, src);
176 for (i = 0; i <= l; i++) {
177 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
178 if (x) {
179 cc = 1;
180 }
181 cpu_stb_data(env, dest + i, x);
182 }
183 return cc;
184 }
185
186 /* memmove */
187 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
188 {
189 int i = 0;
190
191 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
192 __func__, l, dest, src);
193
194 /* mvc with source pointing to the byte after the destination is the
195 same as memset with the first source byte */
196 if (dest == (src + 1)) {
197 fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
198 return;
199 }
200
201 /* mvc and memmove do not behave the same when areas overlap! */
202 if ((dest < src) || (src + l < dest)) {
203 fast_memmove(env, dest, src, l + 1);
204 return;
205 }
206
207 /* slow version with byte accesses which always work */
208 for (i = 0; i <= l; i++) {
209 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
210 }
211 }
212
213 /* compare unsigned byte arrays */
214 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
215 {
216 int i;
217 unsigned char x, y;
218 uint32_t cc;
219
220 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
221 __func__, l, s1, s2);
222 for (i = 0; i <= l; i++) {
223 x = cpu_ldub_data(env, s1 + i);
224 y = cpu_ldub_data(env, s2 + i);
225 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
226 if (x < y) {
227 cc = 1;
228 goto done;
229 } else if (x > y) {
230 cc = 2;
231 goto done;
232 }
233 }
234 cc = 0;
235 done:
236 HELPER_LOG("\n");
237 return cc;
238 }
239
240 /* compare logical under mask */
241 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
242 uint64_t addr)
243 {
244 uint8_t r, d;
245 uint32_t cc;
246
247 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
248 mask, addr);
249 cc = 0;
250 while (mask) {
251 if (mask & 8) {
252 d = cpu_ldub_data(env, addr);
253 r = (r1 & 0xff000000UL) >> 24;
254 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
255 addr);
256 if (r < d) {
257 cc = 1;
258 break;
259 } else if (r > d) {
260 cc = 2;
261 break;
262 }
263 addr++;
264 }
265 mask = (mask << 1) & 0xf;
266 r1 <<= 8;
267 }
268 HELPER_LOG("\n");
269 return cc;
270 }
271
272 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
273 {
274 /* 31-Bit mode */
275 if (!(env->psw.mask & PSW_MASK_64)) {
276 a &= 0x7fffffff;
277 }
278 return a;
279 }
280
281 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
282 {
283 uint64_t r = d2;
284 if (x2) {
285 r += env->regs[x2];
286 }
287 if (b2) {
288 r += env->regs[b2];
289 }
290 return fix_address(env, r);
291 }
292
293 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
294 {
295 return fix_address(env, env->regs[reg]);
296 }
297
298 /* search string (c is byte to search, r2 is string, r1 end of string) */
299 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
300 uint64_t str)
301 {
302 uint32_t len;
303 uint8_t v, c = r0;
304
305 str = fix_address(env, str);
306 end = fix_address(env, end);
307
308 /* Assume for now that R2 is unmodified. */
309 env->retxl = str;
310
311 /* Lest we fail to service interrupts in a timely manner, limit the
312 amount of work we're willing to do. For now, let's cap at 8k. */
313 for (len = 0; len < 0x2000; ++len) {
314 if (str + len == end) {
315 /* Character not found. R1 & R2 are unmodified. */
316 env->cc_op = 2;
317 return end;
318 }
319 v = cpu_ldub_data(env, str + len);
320 if (v == c) {
321 /* Character found. Set R1 to the location; R2 is unmodified. */
322 env->cc_op = 1;
323 return str + len;
324 }
325 }
326
327 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
328 env->retxl = str + len;
329 env->cc_op = 3;
330 return end;
331 }
332
333 /* unsigned string compare (c is string terminator) */
334 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
335 {
336 uint32_t len;
337
338 c = c & 0xff;
339 s1 = fix_address(env, s1);
340 s2 = fix_address(env, s2);
341
342 /* Lest we fail to service interrupts in a timely manner, limit the
343 amount of work we're willing to do. For now, let's cap at 8k. */
344 for (len = 0; len < 0x2000; ++len) {
345 uint8_t v1 = cpu_ldub_data(env, s1 + len);
346 uint8_t v2 = cpu_ldub_data(env, s2 + len);
347 if (v1 == v2) {
348 if (v1 == c) {
349 /* Equal. CC=0, and don't advance the registers. */
350 env->cc_op = 0;
351 env->retxl = s2;
352 return s1;
353 }
354 } else {
355 /* Unequal. CC={1,2}, and advance the registers. Note that
356 the terminator need not be zero, but the string that contains
357 the terminator is by definition "low". */
358 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
359 env->retxl = s2 + len;
360 return s1 + len;
361 }
362 }
363
364 /* CPU-determined bytes equal; advance the registers. */
365 env->cc_op = 3;
366 env->retxl = s2 + len;
367 return s1 + len;
368 }
369
370 /* move page */
371 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
372 {
373 /* XXX missing r0 handling */
374 env->cc_op = 0;
375 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
376 }
377
378 /* string copy (c is string terminator) */
379 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
380 {
381 uint32_t len;
382
383 c = c & 0xff;
384 d = fix_address(env, d);
385 s = fix_address(env, s);
386
387 /* Lest we fail to service interrupts in a timely manner, limit the
388 amount of work we're willing to do. For now, let's cap at 8k. */
389 for (len = 0; len < 0x2000; ++len) {
390 uint8_t v = cpu_ldub_data(env, s + len);
391 cpu_stb_data(env, d + len, v);
392 if (v == c) {
393 /* Complete. Set CC=1 and advance R1. */
394 env->cc_op = 1;
395 env->retxl = s;
396 return d + len;
397 }
398 }
399
400 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
401 env->cc_op = 3;
402 env->retxl = s + len;
403 return d + len;
404 }
405
406 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
407 uint32_t mask)
408 {
409 int pos = 24; /* top of the lower half of r1 */
410 uint64_t rmask = 0xff000000ULL;
411 uint8_t val = 0;
412 int ccd = 0;
413 uint32_t cc = 0;
414
415 while (mask) {
416 if (mask & 8) {
417 env->regs[r1] &= ~rmask;
418 val = cpu_ldub_data(env, address);
419 if ((val & 0x80) && !ccd) {
420 cc = 1;
421 }
422 ccd = 1;
423 if (val && cc == 0) {
424 cc = 2;
425 }
426 env->regs[r1] |= (uint64_t)val << pos;
427 address++;
428 }
429 mask = (mask << 1) & 0xf;
430 pos -= 8;
431 rmask >>= 8;
432 }
433
434 return cc;
435 }
436
437 /* execute instruction
438 this instruction executes an insn modified with the contents of r1
439 it does not change the executed instruction in memory
440 it does not change the program counter
441 in other words: tricky...
442 currently implemented by interpreting the cases it is most commonly used in
443 */
444 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
445 uint64_t addr, uint64_t ret)
446 {
447 S390CPU *cpu = s390_env_get_cpu(env);
448 uint16_t insn = cpu_lduw_code(env, addr);
449
450 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
451 insn);
452 if ((insn & 0xf0ff) == 0xd000) {
453 uint32_t l, insn2, b1, b2, d1, d2;
454
455 l = v1 & 0xff;
456 insn2 = cpu_ldl_code(env, addr + 2);
457 b1 = (insn2 >> 28) & 0xf;
458 b2 = (insn2 >> 12) & 0xf;
459 d1 = (insn2 >> 16) & 0xfff;
460 d2 = insn2 & 0xfff;
461 switch (insn & 0xf00) {
462 case 0x200:
463 helper_mvc(env, l, get_address(env, 0, b1, d1),
464 get_address(env, 0, b2, d2));
465 break;
466 case 0x400:
467 cc = helper_nc(env, l, get_address(env, 0, b1, d1),
468 get_address(env, 0, b2, d2));
469 break;
470 case 0x500:
471 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
472 get_address(env, 0, b2, d2));
473 break;
474 case 0x600:
475 cc = helper_oc(env, l, get_address(env, 0, b1, d1),
476 get_address(env, 0, b2, d2));
477 break;
478 case 0x700:
479 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
480 get_address(env, 0, b2, d2));
481 break;
482 case 0xc00:
483 helper_tr(env, l, get_address(env, 0, b1, d1),
484 get_address(env, 0, b2, d2));
485 break;
486 case 0xd00:
487 cc = helper_trt(env, l, get_address(env, 0, b1, d1),
488 get_address(env, 0, b2, d2));
489 break;
490 default:
491 goto abort;
492 }
493 } else if ((insn & 0xff00) == 0x0a00) {
494 /* supervisor call */
495 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
496 env->psw.addr = ret - 4;
497 env->int_svc_code = (insn | v1) & 0xff;
498 env->int_svc_ilen = 4;
499 helper_exception(env, EXCP_SVC);
500 } else if ((insn & 0xff00) == 0xbf00) {
501 uint32_t insn2, r1, r3, b2, d2;
502
503 insn2 = cpu_ldl_code(env, addr + 2);
504 r1 = (insn2 >> 20) & 0xf;
505 r3 = (insn2 >> 16) & 0xf;
506 b2 = (insn2 >> 12) & 0xf;
507 d2 = insn2 & 0xfff;
508 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
509 } else {
510 abort:
511 cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
512 insn);
513 }
514 return cc;
515 }
516
517 /* load access registers r1 to r3 from memory at a2 */
518 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
519 {
520 int i;
521
522 for (i = r1;; i = (i + 1) % 16) {
523 env->aregs[i] = cpu_ldl_data(env, a2);
524 a2 += 4;
525
526 if (i == r3) {
527 break;
528 }
529 }
530 }
531
532 /* store access registers r1 to r3 in memory at a2 */
533 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
534 {
535 int i;
536
537 for (i = r1;; i = (i + 1) % 16) {
538 cpu_stl_data(env, a2, env->aregs[i]);
539 a2 += 4;
540
541 if (i == r3) {
542 break;
543 }
544 }
545 }
546
547 /* move long */
548 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
549 {
550 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
551 uint64_t dest = get_address_31fix(env, r1);
552 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
553 uint64_t src = get_address_31fix(env, r2);
554 uint8_t pad = env->regs[r2 + 1] >> 24;
555 uint8_t v;
556 uint32_t cc;
557
558 if (destlen == srclen) {
559 cc = 0;
560 } else if (destlen < srclen) {
561 cc = 1;
562 } else {
563 cc = 2;
564 }
565
566 if (srclen > destlen) {
567 srclen = destlen;
568 }
569
570 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
571 v = cpu_ldub_data(env, src);
572 cpu_stb_data(env, dest, v);
573 }
574
575 for (; destlen; dest++, destlen--) {
576 cpu_stb_data(env, dest, pad);
577 }
578
579 env->regs[r1 + 1] = destlen;
580 /* can't use srclen here, we trunc'ed it */
581 env->regs[r2 + 1] -= src - env->regs[r2];
582 env->regs[r1] = dest;
583 env->regs[r2] = src;
584
585 return cc;
586 }
587
588 /* move long extended another memcopy insn with more bells and whistles */
589 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
590 uint32_t r3)
591 {
592 uint64_t destlen = env->regs[r1 + 1];
593 uint64_t dest = env->regs[r1];
594 uint64_t srclen = env->regs[r3 + 1];
595 uint64_t src = env->regs[r3];
596 uint8_t pad = a2 & 0xff;
597 uint8_t v;
598 uint32_t cc;
599
600 if (!(env->psw.mask & PSW_MASK_64)) {
601 destlen = (uint32_t)destlen;
602 srclen = (uint32_t)srclen;
603 dest &= 0x7fffffff;
604 src &= 0x7fffffff;
605 }
606
607 if (destlen == srclen) {
608 cc = 0;
609 } else if (destlen < srclen) {
610 cc = 1;
611 } else {
612 cc = 2;
613 }
614
615 if (srclen > destlen) {
616 srclen = destlen;
617 }
618
619 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
620 v = cpu_ldub_data(env, src);
621 cpu_stb_data(env, dest, v);
622 }
623
624 for (; destlen; dest++, destlen--) {
625 cpu_stb_data(env, dest, pad);
626 }
627
628 env->regs[r1 + 1] = destlen;
629 /* can't use srclen here, we trunc'ed it */
630 /* FIXME: 31-bit mode! */
631 env->regs[r3 + 1] -= src - env->regs[r3];
632 env->regs[r1] = dest;
633 env->regs[r3] = src;
634
635 return cc;
636 }
637
638 /* compare logical long extended memcompare insn with padding */
639 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
640 uint32_t r3)
641 {
642 uint64_t destlen = env->regs[r1 + 1];
643 uint64_t dest = get_address_31fix(env, r1);
644 uint64_t srclen = env->regs[r3 + 1];
645 uint64_t src = get_address_31fix(env, r3);
646 uint8_t pad = a2 & 0xff;
647 uint8_t v1 = 0, v2 = 0;
648 uint32_t cc = 0;
649
650 if (!(destlen || srclen)) {
651 return cc;
652 }
653
654 if (srclen > destlen) {
655 srclen = destlen;
656 }
657
658 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
659 v1 = srclen ? cpu_ldub_data(env, src) : pad;
660 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
661 if (v1 != v2) {
662 cc = (v1 < v2) ? 1 : 2;
663 break;
664 }
665 }
666
667 env->regs[r1 + 1] = destlen;
668 /* can't use srclen here, we trunc'ed it */
669 env->regs[r3 + 1] -= src - env->regs[r3];
670 env->regs[r1] = dest;
671 env->regs[r3] = src;
672
673 return cc;
674 }
675
676 /* checksum */
677 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
678 uint64_t src, uint64_t src_len)
679 {
680 uint64_t max_len, len;
681 uint64_t cksm = (uint32_t)r1;
682
683 /* Lest we fail to service interrupts in a timely manner, limit the
684 amount of work we're willing to do. For now, let's cap at 8k. */
685 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
686
687 /* Process full words as available. */
688 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
689 cksm += (uint32_t)cpu_ldl_data(env, src);
690 }
691
692 switch (max_len - len) {
693 case 1:
694 cksm += cpu_ldub_data(env, src) << 24;
695 len += 1;
696 break;
697 case 2:
698 cksm += cpu_lduw_data(env, src) << 16;
699 len += 2;
700 break;
701 case 3:
702 cksm += cpu_lduw_data(env, src) << 16;
703 cksm += cpu_ldub_data(env, src + 2) << 8;
704 len += 3;
705 break;
706 }
707
708 /* Fold the carry from the checksum. Note that we can see carry-out
709 during folding more than once (but probably not more than twice). */
710 while (cksm > 0xffffffffull) {
711 cksm = (uint32_t)cksm + (cksm >> 32);
712 }
713
714 /* Indicate whether or not we've processed everything. */
715 env->cc_op = (len == src_len ? 0 : 3);
716
717 /* Return both cksm and processed length. */
718 env->retxl = cksm;
719 return len;
720 }
721
722 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
723 uint64_t src)
724 {
725 int len_dest = len >> 4;
726 int len_src = len & 0xf;
727 uint8_t b;
728 int second_nibble = 0;
729
730 dest += len_dest;
731 src += len_src;
732
733 /* last byte is special, it only flips the nibbles */
734 b = cpu_ldub_data(env, src);
735 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
736 src--;
737 len_src--;
738
739 /* now pad every nibble with 0xf0 */
740
741 while (len_dest > 0) {
742 uint8_t cur_byte = 0;
743
744 if (len_src > 0) {
745 cur_byte = cpu_ldub_data(env, src);
746 }
747
748 len_dest--;
749 dest--;
750
751 /* only advance one nibble at a time */
752 if (second_nibble) {
753 cur_byte >>= 4;
754 len_src--;
755 src--;
756 }
757 second_nibble = !second_nibble;
758
759 /* digit */
760 cur_byte = (cur_byte & 0xf);
761 /* zone bits */
762 cur_byte |= 0xf0;
763
764 cpu_stb_data(env, dest, cur_byte);
765 }
766 }
767
768 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
769 uint64_t trans)
770 {
771 int i;
772
773 for (i = 0; i <= len; i++) {
774 uint8_t byte = cpu_ldub_data(env, array + i);
775 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
776
777 cpu_stb_data(env, array + i, new_byte);
778 }
779 }
780
781 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
782 uint64_t len, uint64_t trans)
783 {
784 uint8_t end = env->regs[0] & 0xff;
785 uint64_t l = len;
786 uint64_t i;
787
788 if (!(env->psw.mask & PSW_MASK_64)) {
789 array &= 0x7fffffff;
790 l = (uint32_t)l;
791 }
792
793 /* Lest we fail to service interrupts in a timely manner, limit the
794 amount of work we're willing to do. For now, let's cap at 8k. */
795 if (l > 0x2000) {
796 l = 0x2000;
797 env->cc_op = 3;
798 } else {
799 env->cc_op = 0;
800 }
801
802 for (i = 0; i < l; i++) {
803 uint8_t byte, new_byte;
804
805 byte = cpu_ldub_data(env, array + i);
806
807 if (byte == end) {
808 env->cc_op = 1;
809 break;
810 }
811
812 new_byte = cpu_ldub_data(env, trans + byte);
813 cpu_stb_data(env, array + i, new_byte);
814 }
815
816 env->retxl = len - i;
817 return array + i;
818 }
819
820 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
821 uint64_t trans)
822 {
823 uint32_t cc = 0;
824 int i;
825
826 for (i = 0; i <= len; i++) {
827 uint8_t byte = cpu_ldub_data(env, array + i);
828 uint8_t sbyte = cpu_ldub_data(env, trans + byte);
829
830 if (sbyte != 0) {
831 env->regs[1] = array + i;
832 env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
833 cc = (i == len) ? 2 : 1;
834 break;
835 }
836 }
837
838 return cc;
839 }
840
841 #if !defined(CONFIG_USER_ONLY)
842 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
843 {
844 S390CPU *cpu = s390_env_get_cpu(env);
845 bool PERchanged = false;
846 int i;
847 uint64_t src = a2;
848 uint64_t val;
849
850 for (i = r1;; i = (i + 1) % 16) {
851 val = cpu_ldq_data(env, src);
852 if (env->cregs[i] != val && i >= 9 && i <= 11) {
853 PERchanged = true;
854 }
855 env->cregs[i] = val;
856 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
857 i, src, env->cregs[i]);
858 src += sizeof(uint64_t);
859
860 if (i == r3) {
861 break;
862 }
863 }
864
865 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
866 s390_cpu_recompute_watchpoints(CPU(cpu));
867 }
868
869 tlb_flush(CPU(cpu), 1);
870 }
871
872 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
873 {
874 S390CPU *cpu = s390_env_get_cpu(env);
875 bool PERchanged = false;
876 int i;
877 uint64_t src = a2;
878 uint32_t val;
879
880 for (i = r1;; i = (i + 1) % 16) {
881 val = cpu_ldl_data(env, src);
882 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
883 PERchanged = true;
884 }
885 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
886 src += sizeof(uint32_t);
887
888 if (i == r3) {
889 break;
890 }
891 }
892
893 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
894 s390_cpu_recompute_watchpoints(CPU(cpu));
895 }
896
897 tlb_flush(CPU(cpu), 1);
898 }
899
900 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
901 {
902 int i;
903 uint64_t dest = a2;
904
905 for (i = r1;; i = (i + 1) % 16) {
906 cpu_stq_data(env, dest, env->cregs[i]);
907 dest += sizeof(uint64_t);
908
909 if (i == r3) {
910 break;
911 }
912 }
913 }
914
915 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
916 {
917 int i;
918 uint64_t dest = a2;
919
920 for (i = r1;; i = (i + 1) % 16) {
921 cpu_stl_data(env, dest, env->cregs[i]);
922 dest += sizeof(uint32_t);
923
924 if (i == r3) {
925 break;
926 }
927 }
928 }
929
930 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
931 {
932 /* XXX implement */
933
934 return 0;
935 }
936
937 /* insert storage key extended */
938 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
939 {
940 uint64_t addr = get_address(env, 0, 0, r2);
941
942 if (addr > ram_size) {
943 return 0;
944 }
945
946 return env->storage_keys[addr / TARGET_PAGE_SIZE];
947 }
948
949 /* set storage key extended */
950 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
951 {
952 uint64_t addr = get_address(env, 0, 0, r2);
953
954 if (addr > ram_size) {
955 return;
956 }
957
958 env->storage_keys[addr / TARGET_PAGE_SIZE] = r1;
959 }
960
961 /* reset reference bit extended */
962 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
963 {
964 uint8_t re;
965 uint8_t key;
966
967 if (r2 > ram_size) {
968 return 0;
969 }
970
971 key = env->storage_keys[r2 / TARGET_PAGE_SIZE];
972 re = key & (SK_R | SK_C);
973 env->storage_keys[r2 / TARGET_PAGE_SIZE] = (key & ~SK_R);
974
975 /*
976 * cc
977 *
978 * 0 Reference bit zero; change bit zero
979 * 1 Reference bit zero; change bit one
980 * 2 Reference bit one; change bit zero
981 * 3 Reference bit one; change bit one
982 */
983
984 return re >> 1;
985 }
986
987 /* compare and swap and purge */
988 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
989 {
990 S390CPU *cpu = s390_env_get_cpu(env);
991 uint32_t cc;
992 uint32_t o1 = env->regs[r1];
993 uint64_t a2 = r2 & ~3ULL;
994 uint32_t o2 = cpu_ldl_data(env, a2);
995
996 if (o1 == o2) {
997 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
998 if (r2 & 0x3) {
999 /* flush TLB / ALB */
1000 tlb_flush(CPU(cpu), 1);
1001 }
1002 cc = 0;
1003 } else {
1004 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1005 cc = 1;
1006 }
1007
1008 return cc;
1009 }
1010
1011 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1012 {
1013 int cc = 0, i;
1014
1015 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1016 __func__, l, a1, a2);
1017
1018 if (l > 256) {
1019 /* max 256 */
1020 l = 256;
1021 cc = 3;
1022 }
1023
1024 /* XXX replace w/ memcpy */
1025 for (i = 0; i < l; i++) {
1026 cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
1027 }
1028
1029 return cc;
1030 }
1031
1032 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1033 {
1034 int cc = 0, i;
1035
1036 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1037 __func__, l, a1, a2);
1038
1039 if (l > 256) {
1040 /* max 256 */
1041 l = 256;
1042 cc = 3;
1043 }
1044
1045 /* XXX replace w/ memcpy */
1046 for (i = 0; i < l; i++) {
1047 cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
1048 }
1049
1050 return cc;
1051 }
1052
1053 /* invalidate pte */
1054 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1055 {
1056 CPUState *cs = CPU(s390_env_get_cpu(env));
1057 uint64_t page = vaddr & TARGET_PAGE_MASK;
1058 uint64_t pte = 0;
1059
1060 /* XXX broadcast to other CPUs */
1061
1062 /* XXX Linux is nice enough to give us the exact pte address.
1063 According to spec we'd have to find it out ourselves */
1064 /* XXX Linux is fine with overwriting the pte, the spec requires
1065 us to only set the invalid bit */
1066 stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
1067
1068 /* XXX we exploit the fact that Linux passes the exact virtual
1069 address here - it's not obliged to! */
1070 tlb_flush_page(cs, page);
1071
1072 /* XXX 31-bit hack */
1073 if (page & 0x80000000) {
1074 tlb_flush_page(cs, page & ~0x80000000);
1075 } else {
1076 tlb_flush_page(cs, page | 0x80000000);
1077 }
1078 }
1079
1080 /* flush local tlb */
1081 void HELPER(ptlb)(CPUS390XState *env)
1082 {
1083 S390CPU *cpu = s390_env_get_cpu(env);
1084
1085 tlb_flush(CPU(cpu), 1);
1086 }
1087
1088 /* load using real address */
1089 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
1090 {
1091 CPUState *cs = CPU(s390_env_get_cpu(env));
1092
1093 return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
1094 }
1095
1096 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
1097 {
1098 CPUState *cs = CPU(s390_env_get_cpu(env));
1099
1100 return ldq_phys(cs->as, get_address(env, 0, 0, addr));
1101 }
1102
1103 /* store using real address */
1104 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1105 {
1106 CPUState *cs = CPU(s390_env_get_cpu(env));
1107
1108 stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
1109
1110 if ((env->psw.mask & PSW_MASK_PER) &&
1111 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1112 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1113 /* PSW is saved just before calling the helper. */
1114 env->per_address = env->psw.addr;
1115 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1116 }
1117 }
1118
1119 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1120 {
1121 CPUState *cs = CPU(s390_env_get_cpu(env));
1122
1123 stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
1124
1125 if ((env->psw.mask & PSW_MASK_PER) &&
1126 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1127 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1128 /* PSW is saved just before calling the helper. */
1129 env->per_address = env->psw.addr;
1130 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1131 }
1132 }
1133
1134 /* load real address */
1135 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1136 {
1137 CPUState *cs = CPU(s390_env_get_cpu(env));
1138 uint32_t cc = 0;
1139 int old_exc = cs->exception_index;
1140 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1141 uint64_t ret;
1142 int flags;
1143
1144 /* XXX incomplete - has more corner cases */
1145 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1146 program_interrupt(env, PGM_SPECIAL_OP, 2);
1147 }
1148
1149 cs->exception_index = old_exc;
1150 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
1151 cc = 3;
1152 }
1153 if (cs->exception_index == EXCP_PGM) {
1154 ret = env->int_pgm_code | 0x80000000;
1155 } else {
1156 ret |= addr & ~TARGET_PAGE_MASK;
1157 }
1158 cs->exception_index = old_exc;
1159
1160 env->cc_op = cc;
1161 return ret;
1162 }
1163 #endif