]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/mem_helper.c
Merge remote-tracking branch 'remotes/rth/tags/pull-tcg-20150911' into staging
[mirror_qemu.git] / target-s390x / mem_helper.c
1 /*
2 * S/390 memory access helper routines
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2009 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "exec/helper-proto.h"
23 #include "exec/cpu_ldst.h"
24 #include "hw/s390x/storage-keys.h"
25
26 /*****************************************************************************/
27 /* Softmmu support */
28 #if !defined(CONFIG_USER_ONLY)
29
30 /* try to fill the TLB and return an exception if error. If retaddr is
31 NULL, it means that the function was called in C code (i.e. not
32 from generated code or from helper.c) */
33 /* XXX: fix it to restore all registers */
34 void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx,
35 uintptr_t retaddr)
36 {
37 int ret;
38
39 ret = s390_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx);
40 if (unlikely(ret != 0)) {
41 if (likely(retaddr)) {
42 /* now we have a real cpu fault */
43 cpu_restore_state(cs, retaddr);
44 }
45 cpu_loop_exit(cs);
46 }
47 }
48
49 #endif
50
51 /* #define DEBUG_HELPER */
52 #ifdef DEBUG_HELPER
53 #define HELPER_LOG(x...) qemu_log(x)
54 #else
55 #define HELPER_LOG(x...)
56 #endif
57
58 /* Reduce the length so that addr + len doesn't cross a page boundary. */
59 static inline uint64_t adj_len_to_page(uint64_t len, uint64_t addr)
60 {
61 #ifndef CONFIG_USER_ONLY
62 if ((addr & ~TARGET_PAGE_MASK) + len - 1 >= TARGET_PAGE_SIZE) {
63 return -addr & ~TARGET_PAGE_MASK;
64 }
65 #endif
66 return len;
67 }
68
69 static void fast_memset(CPUS390XState *env, uint64_t dest, uint8_t byte,
70 uint32_t l)
71 {
72 int mmu_idx = cpu_mmu_index(env, false);
73
74 while (l > 0) {
75 void *p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
76 if (p) {
77 /* Access to the whole page in write mode granted. */
78 int l_adj = adj_len_to_page(l, dest);
79 memset(p, byte, l_adj);
80 dest += l_adj;
81 l -= l_adj;
82 } else {
83 /* We failed to get access to the whole page. The next write
84 access will likely fill the QEMU TLB for the next iteration. */
85 cpu_stb_data(env, dest, byte);
86 dest++;
87 l--;
88 }
89 }
90 }
91
92 static void fast_memmove(CPUS390XState *env, uint64_t dest, uint64_t src,
93 uint32_t l)
94 {
95 int mmu_idx = cpu_mmu_index(env, false);
96
97 while (l > 0) {
98 void *src_p = tlb_vaddr_to_host(env, src, MMU_DATA_LOAD, mmu_idx);
99 void *dest_p = tlb_vaddr_to_host(env, dest, MMU_DATA_STORE, mmu_idx);
100 if (src_p && dest_p) {
101 /* Access to both whole pages granted. */
102 int l_adj = adj_len_to_page(l, src);
103 l_adj = adj_len_to_page(l_adj, dest);
104 memmove(dest_p, src_p, l_adj);
105 src += l_adj;
106 dest += l_adj;
107 l -= l_adj;
108 } else {
109 /* We failed to get access to one or both whole pages. The next
110 read or write access will likely fill the QEMU TLB for the
111 next iteration. */
112 cpu_stb_data(env, dest, cpu_ldub_data(env, src));
113 src++;
114 dest++;
115 l--;
116 }
117 }
118 }
119
120 /* and on array */
121 uint32_t HELPER(nc)(CPUS390XState *env, uint32_t l, uint64_t dest,
122 uint64_t src)
123 {
124 int i;
125 unsigned char x;
126 uint32_t cc = 0;
127
128 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
129 __func__, l, dest, src);
130 for (i = 0; i <= l; i++) {
131 x = cpu_ldub_data(env, dest + i) & cpu_ldub_data(env, src + i);
132 if (x) {
133 cc = 1;
134 }
135 cpu_stb_data(env, dest + i, x);
136 }
137 return cc;
138 }
139
140 /* xor on array */
141 uint32_t HELPER(xc)(CPUS390XState *env, uint32_t l, uint64_t dest,
142 uint64_t src)
143 {
144 int i;
145 unsigned char x;
146 uint32_t cc = 0;
147
148 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
149 __func__, l, dest, src);
150
151 /* xor with itself is the same as memset(0) */
152 if (src == dest) {
153 fast_memset(env, dest, 0, l + 1);
154 return 0;
155 }
156
157 for (i = 0; i <= l; i++) {
158 x = cpu_ldub_data(env, dest + i) ^ cpu_ldub_data(env, src + i);
159 if (x) {
160 cc = 1;
161 }
162 cpu_stb_data(env, dest + i, x);
163 }
164 return cc;
165 }
166
167 /* or on array */
168 uint32_t HELPER(oc)(CPUS390XState *env, uint32_t l, uint64_t dest,
169 uint64_t src)
170 {
171 int i;
172 unsigned char x;
173 uint32_t cc = 0;
174
175 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
176 __func__, l, dest, src);
177 for (i = 0; i <= l; i++) {
178 x = cpu_ldub_data(env, dest + i) | cpu_ldub_data(env, src + i);
179 if (x) {
180 cc = 1;
181 }
182 cpu_stb_data(env, dest + i, x);
183 }
184 return cc;
185 }
186
187 /* memmove */
188 void HELPER(mvc)(CPUS390XState *env, uint32_t l, uint64_t dest, uint64_t src)
189 {
190 int i = 0;
191
192 HELPER_LOG("%s l %d dest %" PRIx64 " src %" PRIx64 "\n",
193 __func__, l, dest, src);
194
195 /* mvc with source pointing to the byte after the destination is the
196 same as memset with the first source byte */
197 if (dest == (src + 1)) {
198 fast_memset(env, dest, cpu_ldub_data(env, src), l + 1);
199 return;
200 }
201
202 /* mvc and memmove do not behave the same when areas overlap! */
203 if ((dest < src) || (src + l < dest)) {
204 fast_memmove(env, dest, src, l + 1);
205 return;
206 }
207
208 /* slow version with byte accesses which always work */
209 for (i = 0; i <= l; i++) {
210 cpu_stb_data(env, dest + i, cpu_ldub_data(env, src + i));
211 }
212 }
213
214 /* compare unsigned byte arrays */
215 uint32_t HELPER(clc)(CPUS390XState *env, uint32_t l, uint64_t s1, uint64_t s2)
216 {
217 int i;
218 unsigned char x, y;
219 uint32_t cc;
220
221 HELPER_LOG("%s l %d s1 %" PRIx64 " s2 %" PRIx64 "\n",
222 __func__, l, s1, s2);
223 for (i = 0; i <= l; i++) {
224 x = cpu_ldub_data(env, s1 + i);
225 y = cpu_ldub_data(env, s2 + i);
226 HELPER_LOG("%02x (%c)/%02x (%c) ", x, x, y, y);
227 if (x < y) {
228 cc = 1;
229 goto done;
230 } else if (x > y) {
231 cc = 2;
232 goto done;
233 }
234 }
235 cc = 0;
236 done:
237 HELPER_LOG("\n");
238 return cc;
239 }
240
241 /* compare logical under mask */
242 uint32_t HELPER(clm)(CPUS390XState *env, uint32_t r1, uint32_t mask,
243 uint64_t addr)
244 {
245 uint8_t r, d;
246 uint32_t cc;
247
248 HELPER_LOG("%s: r1 0x%x mask 0x%x addr 0x%" PRIx64 "\n", __func__, r1,
249 mask, addr);
250 cc = 0;
251 while (mask) {
252 if (mask & 8) {
253 d = cpu_ldub_data(env, addr);
254 r = (r1 & 0xff000000UL) >> 24;
255 HELPER_LOG("mask 0x%x %02x/%02x (0x%" PRIx64 ") ", mask, r, d,
256 addr);
257 if (r < d) {
258 cc = 1;
259 break;
260 } else if (r > d) {
261 cc = 2;
262 break;
263 }
264 addr++;
265 }
266 mask = (mask << 1) & 0xf;
267 r1 <<= 8;
268 }
269 HELPER_LOG("\n");
270 return cc;
271 }
272
273 static inline uint64_t fix_address(CPUS390XState *env, uint64_t a)
274 {
275 /* 31-Bit mode */
276 if (!(env->psw.mask & PSW_MASK_64)) {
277 a &= 0x7fffffff;
278 }
279 return a;
280 }
281
282 static inline uint64_t get_address(CPUS390XState *env, int x2, int b2, int d2)
283 {
284 uint64_t r = d2;
285 if (x2) {
286 r += env->regs[x2];
287 }
288 if (b2) {
289 r += env->regs[b2];
290 }
291 return fix_address(env, r);
292 }
293
294 static inline uint64_t get_address_31fix(CPUS390XState *env, int reg)
295 {
296 return fix_address(env, env->regs[reg]);
297 }
298
299 /* search string (c is byte to search, r2 is string, r1 end of string) */
300 uint64_t HELPER(srst)(CPUS390XState *env, uint64_t r0, uint64_t end,
301 uint64_t str)
302 {
303 uint32_t len;
304 uint8_t v, c = r0;
305
306 str = fix_address(env, str);
307 end = fix_address(env, end);
308
309 /* Assume for now that R2 is unmodified. */
310 env->retxl = str;
311
312 /* Lest we fail to service interrupts in a timely manner, limit the
313 amount of work we're willing to do. For now, let's cap at 8k. */
314 for (len = 0; len < 0x2000; ++len) {
315 if (str + len == end) {
316 /* Character not found. R1 & R2 are unmodified. */
317 env->cc_op = 2;
318 return end;
319 }
320 v = cpu_ldub_data(env, str + len);
321 if (v == c) {
322 /* Character found. Set R1 to the location; R2 is unmodified. */
323 env->cc_op = 1;
324 return str + len;
325 }
326 }
327
328 /* CPU-determined bytes processed. Advance R2 to next byte to process. */
329 env->retxl = str + len;
330 env->cc_op = 3;
331 return end;
332 }
333
334 /* unsigned string compare (c is string terminator) */
335 uint64_t HELPER(clst)(CPUS390XState *env, uint64_t c, uint64_t s1, uint64_t s2)
336 {
337 uint32_t len;
338
339 c = c & 0xff;
340 s1 = fix_address(env, s1);
341 s2 = fix_address(env, s2);
342
343 /* Lest we fail to service interrupts in a timely manner, limit the
344 amount of work we're willing to do. For now, let's cap at 8k. */
345 for (len = 0; len < 0x2000; ++len) {
346 uint8_t v1 = cpu_ldub_data(env, s1 + len);
347 uint8_t v2 = cpu_ldub_data(env, s2 + len);
348 if (v1 == v2) {
349 if (v1 == c) {
350 /* Equal. CC=0, and don't advance the registers. */
351 env->cc_op = 0;
352 env->retxl = s2;
353 return s1;
354 }
355 } else {
356 /* Unequal. CC={1,2}, and advance the registers. Note that
357 the terminator need not be zero, but the string that contains
358 the terminator is by definition "low". */
359 env->cc_op = (v1 == c ? 1 : v2 == c ? 2 : v1 < v2 ? 1 : 2);
360 env->retxl = s2 + len;
361 return s1 + len;
362 }
363 }
364
365 /* CPU-determined bytes equal; advance the registers. */
366 env->cc_op = 3;
367 env->retxl = s2 + len;
368 return s1 + len;
369 }
370
371 /* move page */
372 void HELPER(mvpg)(CPUS390XState *env, uint64_t r0, uint64_t r1, uint64_t r2)
373 {
374 /* XXX missing r0 handling */
375 env->cc_op = 0;
376 fast_memmove(env, r1, r2, TARGET_PAGE_SIZE);
377 }
378
379 /* string copy (c is string terminator) */
380 uint64_t HELPER(mvst)(CPUS390XState *env, uint64_t c, uint64_t d, uint64_t s)
381 {
382 uint32_t len;
383
384 c = c & 0xff;
385 d = fix_address(env, d);
386 s = fix_address(env, s);
387
388 /* Lest we fail to service interrupts in a timely manner, limit the
389 amount of work we're willing to do. For now, let's cap at 8k. */
390 for (len = 0; len < 0x2000; ++len) {
391 uint8_t v = cpu_ldub_data(env, s + len);
392 cpu_stb_data(env, d + len, v);
393 if (v == c) {
394 /* Complete. Set CC=1 and advance R1. */
395 env->cc_op = 1;
396 env->retxl = s;
397 return d + len;
398 }
399 }
400
401 /* Incomplete. Set CC=3 and signal to advance R1 and R2. */
402 env->cc_op = 3;
403 env->retxl = s + len;
404 return d + len;
405 }
406
407 static uint32_t helper_icm(CPUS390XState *env, uint32_t r1, uint64_t address,
408 uint32_t mask)
409 {
410 int pos = 24; /* top of the lower half of r1 */
411 uint64_t rmask = 0xff000000ULL;
412 uint8_t val = 0;
413 int ccd = 0;
414 uint32_t cc = 0;
415
416 while (mask) {
417 if (mask & 8) {
418 env->regs[r1] &= ~rmask;
419 val = cpu_ldub_data(env, address);
420 if ((val & 0x80) && !ccd) {
421 cc = 1;
422 }
423 ccd = 1;
424 if (val && cc == 0) {
425 cc = 2;
426 }
427 env->regs[r1] |= (uint64_t)val << pos;
428 address++;
429 }
430 mask = (mask << 1) & 0xf;
431 pos -= 8;
432 rmask >>= 8;
433 }
434
435 return cc;
436 }
437
438 /* execute instruction
439 this instruction executes an insn modified with the contents of r1
440 it does not change the executed instruction in memory
441 it does not change the program counter
442 in other words: tricky...
443 currently implemented by interpreting the cases it is most commonly used in
444 */
445 uint32_t HELPER(ex)(CPUS390XState *env, uint32_t cc, uint64_t v1,
446 uint64_t addr, uint64_t ret)
447 {
448 S390CPU *cpu = s390_env_get_cpu(env);
449 uint16_t insn = cpu_lduw_code(env, addr);
450
451 HELPER_LOG("%s: v1 0x%lx addr 0x%lx insn 0x%x\n", __func__, v1, addr,
452 insn);
453 if ((insn & 0xf0ff) == 0xd000) {
454 uint32_t l, insn2, b1, b2, d1, d2;
455
456 l = v1 & 0xff;
457 insn2 = cpu_ldl_code(env, addr + 2);
458 b1 = (insn2 >> 28) & 0xf;
459 b2 = (insn2 >> 12) & 0xf;
460 d1 = (insn2 >> 16) & 0xfff;
461 d2 = insn2 & 0xfff;
462 switch (insn & 0xf00) {
463 case 0x200:
464 helper_mvc(env, l, get_address(env, 0, b1, d1),
465 get_address(env, 0, b2, d2));
466 break;
467 case 0x400:
468 cc = helper_nc(env, l, get_address(env, 0, b1, d1),
469 get_address(env, 0, b2, d2));
470 break;
471 case 0x500:
472 cc = helper_clc(env, l, get_address(env, 0, b1, d1),
473 get_address(env, 0, b2, d2));
474 break;
475 case 0x600:
476 cc = helper_oc(env, l, get_address(env, 0, b1, d1),
477 get_address(env, 0, b2, d2));
478 break;
479 case 0x700:
480 cc = helper_xc(env, l, get_address(env, 0, b1, d1),
481 get_address(env, 0, b2, d2));
482 break;
483 case 0xc00:
484 helper_tr(env, l, get_address(env, 0, b1, d1),
485 get_address(env, 0, b2, d2));
486 break;
487 case 0xd00:
488 cc = helper_trt(env, l, get_address(env, 0, b1, d1),
489 get_address(env, 0, b2, d2));
490 break;
491 default:
492 goto abort;
493 }
494 } else if ((insn & 0xff00) == 0x0a00) {
495 /* supervisor call */
496 HELPER_LOG("%s: svc %ld via execute\n", __func__, (insn | v1) & 0xff);
497 env->psw.addr = ret - 4;
498 env->int_svc_code = (insn | v1) & 0xff;
499 env->int_svc_ilen = 4;
500 helper_exception(env, EXCP_SVC);
501 } else if ((insn & 0xff00) == 0xbf00) {
502 uint32_t insn2, r1, r3, b2, d2;
503
504 insn2 = cpu_ldl_code(env, addr + 2);
505 r1 = (insn2 >> 20) & 0xf;
506 r3 = (insn2 >> 16) & 0xf;
507 b2 = (insn2 >> 12) & 0xf;
508 d2 = insn2 & 0xfff;
509 cc = helper_icm(env, r1, get_address(env, 0, b2, d2), r3);
510 } else {
511 abort:
512 cpu_abort(CPU(cpu), "EXECUTE on instruction prefix 0x%x not implemented\n",
513 insn);
514 }
515 return cc;
516 }
517
518 /* load access registers r1 to r3 from memory at a2 */
519 void HELPER(lam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
520 {
521 int i;
522
523 for (i = r1;; i = (i + 1) % 16) {
524 env->aregs[i] = cpu_ldl_data(env, a2);
525 a2 += 4;
526
527 if (i == r3) {
528 break;
529 }
530 }
531 }
532
533 /* store access registers r1 to r3 in memory at a2 */
534 void HELPER(stam)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
535 {
536 int i;
537
538 for (i = r1;; i = (i + 1) % 16) {
539 cpu_stl_data(env, a2, env->aregs[i]);
540 a2 += 4;
541
542 if (i == r3) {
543 break;
544 }
545 }
546 }
547
548 /* move long */
549 uint32_t HELPER(mvcl)(CPUS390XState *env, uint32_t r1, uint32_t r2)
550 {
551 uint64_t destlen = env->regs[r1 + 1] & 0xffffff;
552 uint64_t dest = get_address_31fix(env, r1);
553 uint64_t srclen = env->regs[r2 + 1] & 0xffffff;
554 uint64_t src = get_address_31fix(env, r2);
555 uint8_t pad = env->regs[r2 + 1] >> 24;
556 uint8_t v;
557 uint32_t cc;
558
559 if (destlen == srclen) {
560 cc = 0;
561 } else if (destlen < srclen) {
562 cc = 1;
563 } else {
564 cc = 2;
565 }
566
567 if (srclen > destlen) {
568 srclen = destlen;
569 }
570
571 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
572 v = cpu_ldub_data(env, src);
573 cpu_stb_data(env, dest, v);
574 }
575
576 for (; destlen; dest++, destlen--) {
577 cpu_stb_data(env, dest, pad);
578 }
579
580 env->regs[r1 + 1] = destlen;
581 /* can't use srclen here, we trunc'ed it */
582 env->regs[r2 + 1] -= src - env->regs[r2];
583 env->regs[r1] = dest;
584 env->regs[r2] = src;
585
586 return cc;
587 }
588
589 /* move long extended another memcopy insn with more bells and whistles */
590 uint32_t HELPER(mvcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
591 uint32_t r3)
592 {
593 uint64_t destlen = env->regs[r1 + 1];
594 uint64_t dest = env->regs[r1];
595 uint64_t srclen = env->regs[r3 + 1];
596 uint64_t src = env->regs[r3];
597 uint8_t pad = a2 & 0xff;
598 uint8_t v;
599 uint32_t cc;
600
601 if (!(env->psw.mask & PSW_MASK_64)) {
602 destlen = (uint32_t)destlen;
603 srclen = (uint32_t)srclen;
604 dest &= 0x7fffffff;
605 src &= 0x7fffffff;
606 }
607
608 if (destlen == srclen) {
609 cc = 0;
610 } else if (destlen < srclen) {
611 cc = 1;
612 } else {
613 cc = 2;
614 }
615
616 if (srclen > destlen) {
617 srclen = destlen;
618 }
619
620 for (; destlen && srclen; src++, dest++, destlen--, srclen--) {
621 v = cpu_ldub_data(env, src);
622 cpu_stb_data(env, dest, v);
623 }
624
625 for (; destlen; dest++, destlen--) {
626 cpu_stb_data(env, dest, pad);
627 }
628
629 env->regs[r1 + 1] = destlen;
630 /* can't use srclen here, we trunc'ed it */
631 /* FIXME: 31-bit mode! */
632 env->regs[r3 + 1] -= src - env->regs[r3];
633 env->regs[r1] = dest;
634 env->regs[r3] = src;
635
636 return cc;
637 }
638
639 /* compare logical long extended memcompare insn with padding */
640 uint32_t HELPER(clcle)(CPUS390XState *env, uint32_t r1, uint64_t a2,
641 uint32_t r3)
642 {
643 uint64_t destlen = env->regs[r1 + 1];
644 uint64_t dest = get_address_31fix(env, r1);
645 uint64_t srclen = env->regs[r3 + 1];
646 uint64_t src = get_address_31fix(env, r3);
647 uint8_t pad = a2 & 0xff;
648 uint8_t v1 = 0, v2 = 0;
649 uint32_t cc = 0;
650
651 if (!(destlen || srclen)) {
652 return cc;
653 }
654
655 if (srclen > destlen) {
656 srclen = destlen;
657 }
658
659 for (; destlen || srclen; src++, dest++, destlen--, srclen--) {
660 v1 = srclen ? cpu_ldub_data(env, src) : pad;
661 v2 = destlen ? cpu_ldub_data(env, dest) : pad;
662 if (v1 != v2) {
663 cc = (v1 < v2) ? 1 : 2;
664 break;
665 }
666 }
667
668 env->regs[r1 + 1] = destlen;
669 /* can't use srclen here, we trunc'ed it */
670 env->regs[r3 + 1] -= src - env->regs[r3];
671 env->regs[r1] = dest;
672 env->regs[r3] = src;
673
674 return cc;
675 }
676
677 /* checksum */
678 uint64_t HELPER(cksm)(CPUS390XState *env, uint64_t r1,
679 uint64_t src, uint64_t src_len)
680 {
681 uint64_t max_len, len;
682 uint64_t cksm = (uint32_t)r1;
683
684 /* Lest we fail to service interrupts in a timely manner, limit the
685 amount of work we're willing to do. For now, let's cap at 8k. */
686 max_len = (src_len > 0x2000 ? 0x2000 : src_len);
687
688 /* Process full words as available. */
689 for (len = 0; len + 4 <= max_len; len += 4, src += 4) {
690 cksm += (uint32_t)cpu_ldl_data(env, src);
691 }
692
693 switch (max_len - len) {
694 case 1:
695 cksm += cpu_ldub_data(env, src) << 24;
696 len += 1;
697 break;
698 case 2:
699 cksm += cpu_lduw_data(env, src) << 16;
700 len += 2;
701 break;
702 case 3:
703 cksm += cpu_lduw_data(env, src) << 16;
704 cksm += cpu_ldub_data(env, src + 2) << 8;
705 len += 3;
706 break;
707 }
708
709 /* Fold the carry from the checksum. Note that we can see carry-out
710 during folding more than once (but probably not more than twice). */
711 while (cksm > 0xffffffffull) {
712 cksm = (uint32_t)cksm + (cksm >> 32);
713 }
714
715 /* Indicate whether or not we've processed everything. */
716 env->cc_op = (len == src_len ? 0 : 3);
717
718 /* Return both cksm and processed length. */
719 env->retxl = cksm;
720 return len;
721 }
722
723 void HELPER(unpk)(CPUS390XState *env, uint32_t len, uint64_t dest,
724 uint64_t src)
725 {
726 int len_dest = len >> 4;
727 int len_src = len & 0xf;
728 uint8_t b;
729 int second_nibble = 0;
730
731 dest += len_dest;
732 src += len_src;
733
734 /* last byte is special, it only flips the nibbles */
735 b = cpu_ldub_data(env, src);
736 cpu_stb_data(env, dest, (b << 4) | (b >> 4));
737 src--;
738 len_src--;
739
740 /* now pad every nibble with 0xf0 */
741
742 while (len_dest > 0) {
743 uint8_t cur_byte = 0;
744
745 if (len_src > 0) {
746 cur_byte = cpu_ldub_data(env, src);
747 }
748
749 len_dest--;
750 dest--;
751
752 /* only advance one nibble at a time */
753 if (second_nibble) {
754 cur_byte >>= 4;
755 len_src--;
756 src--;
757 }
758 second_nibble = !second_nibble;
759
760 /* digit */
761 cur_byte = (cur_byte & 0xf);
762 /* zone bits */
763 cur_byte |= 0xf0;
764
765 cpu_stb_data(env, dest, cur_byte);
766 }
767 }
768
769 void HELPER(tr)(CPUS390XState *env, uint32_t len, uint64_t array,
770 uint64_t trans)
771 {
772 int i;
773
774 for (i = 0; i <= len; i++) {
775 uint8_t byte = cpu_ldub_data(env, array + i);
776 uint8_t new_byte = cpu_ldub_data(env, trans + byte);
777
778 cpu_stb_data(env, array + i, new_byte);
779 }
780 }
781
782 uint64_t HELPER(tre)(CPUS390XState *env, uint64_t array,
783 uint64_t len, uint64_t trans)
784 {
785 uint8_t end = env->regs[0] & 0xff;
786 uint64_t l = len;
787 uint64_t i;
788
789 if (!(env->psw.mask & PSW_MASK_64)) {
790 array &= 0x7fffffff;
791 l = (uint32_t)l;
792 }
793
794 /* Lest we fail to service interrupts in a timely manner, limit the
795 amount of work we're willing to do. For now, let's cap at 8k. */
796 if (l > 0x2000) {
797 l = 0x2000;
798 env->cc_op = 3;
799 } else {
800 env->cc_op = 0;
801 }
802
803 for (i = 0; i < l; i++) {
804 uint8_t byte, new_byte;
805
806 byte = cpu_ldub_data(env, array + i);
807
808 if (byte == end) {
809 env->cc_op = 1;
810 break;
811 }
812
813 new_byte = cpu_ldub_data(env, trans + byte);
814 cpu_stb_data(env, array + i, new_byte);
815 }
816
817 env->retxl = len - i;
818 return array + i;
819 }
820
821 uint32_t HELPER(trt)(CPUS390XState *env, uint32_t len, uint64_t array,
822 uint64_t trans)
823 {
824 uint32_t cc = 0;
825 int i;
826
827 for (i = 0; i <= len; i++) {
828 uint8_t byte = cpu_ldub_data(env, array + i);
829 uint8_t sbyte = cpu_ldub_data(env, trans + byte);
830
831 if (sbyte != 0) {
832 env->regs[1] = array + i;
833 env->regs[2] = (env->regs[2] & ~0xff) | sbyte;
834 cc = (i == len) ? 2 : 1;
835 break;
836 }
837 }
838
839 return cc;
840 }
841
842 #if !defined(CONFIG_USER_ONLY)
843 void HELPER(lctlg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
844 {
845 S390CPU *cpu = s390_env_get_cpu(env);
846 bool PERchanged = false;
847 int i;
848 uint64_t src = a2;
849 uint64_t val;
850
851 for (i = r1;; i = (i + 1) % 16) {
852 val = cpu_ldq_data(env, src);
853 if (env->cregs[i] != val && i >= 9 && i <= 11) {
854 PERchanged = true;
855 }
856 env->cregs[i] = val;
857 HELPER_LOG("load ctl %d from 0x%" PRIx64 " == 0x%" PRIx64 "\n",
858 i, src, env->cregs[i]);
859 src += sizeof(uint64_t);
860
861 if (i == r3) {
862 break;
863 }
864 }
865
866 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
867 s390_cpu_recompute_watchpoints(CPU(cpu));
868 }
869
870 tlb_flush(CPU(cpu), 1);
871 }
872
873 void HELPER(lctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
874 {
875 S390CPU *cpu = s390_env_get_cpu(env);
876 bool PERchanged = false;
877 int i;
878 uint64_t src = a2;
879 uint32_t val;
880
881 for (i = r1;; i = (i + 1) % 16) {
882 val = cpu_ldl_data(env, src);
883 if ((uint32_t)env->cregs[i] != val && i >= 9 && i <= 11) {
884 PERchanged = true;
885 }
886 env->cregs[i] = (env->cregs[i] & 0xFFFFFFFF00000000ULL) | val;
887 src += sizeof(uint32_t);
888
889 if (i == r3) {
890 break;
891 }
892 }
893
894 if (PERchanged && env->psw.mask & PSW_MASK_PER) {
895 s390_cpu_recompute_watchpoints(CPU(cpu));
896 }
897
898 tlb_flush(CPU(cpu), 1);
899 }
900
901 void HELPER(stctg)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
902 {
903 int i;
904 uint64_t dest = a2;
905
906 for (i = r1;; i = (i + 1) % 16) {
907 cpu_stq_data(env, dest, env->cregs[i]);
908 dest += sizeof(uint64_t);
909
910 if (i == r3) {
911 break;
912 }
913 }
914 }
915
916 void HELPER(stctl)(CPUS390XState *env, uint32_t r1, uint64_t a2, uint32_t r3)
917 {
918 int i;
919 uint64_t dest = a2;
920
921 for (i = r1;; i = (i + 1) % 16) {
922 cpu_stl_data(env, dest, env->cregs[i]);
923 dest += sizeof(uint32_t);
924
925 if (i == r3) {
926 break;
927 }
928 }
929 }
930
931 uint32_t HELPER(tprot)(uint64_t a1, uint64_t a2)
932 {
933 /* XXX implement */
934
935 return 0;
936 }
937
938 /* insert storage key extended */
939 uint64_t HELPER(iske)(CPUS390XState *env, uint64_t r2)
940 {
941 static S390SKeysState *ss;
942 static S390SKeysClass *skeyclass;
943 uint64_t addr = get_address(env, 0, 0, r2);
944 uint8_t key;
945
946 if (addr > ram_size) {
947 return 0;
948 }
949
950 if (unlikely(!ss)) {
951 ss = s390_get_skeys_device();
952 skeyclass = S390_SKEYS_GET_CLASS(ss);
953 }
954
955 if (skeyclass->get_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key)) {
956 return 0;
957 }
958 return key;
959 }
960
961 /* set storage key extended */
962 void HELPER(sske)(CPUS390XState *env, uint64_t r1, uint64_t r2)
963 {
964 static S390SKeysState *ss;
965 static S390SKeysClass *skeyclass;
966 uint64_t addr = get_address(env, 0, 0, r2);
967 uint8_t key;
968
969 if (addr > ram_size) {
970 return;
971 }
972
973 if (unlikely(!ss)) {
974 ss = s390_get_skeys_device();
975 skeyclass = S390_SKEYS_GET_CLASS(ss);
976 }
977
978 key = (uint8_t) r1;
979 skeyclass->set_skeys(ss, addr / TARGET_PAGE_SIZE, 1, &key);
980 }
981
982 /* reset reference bit extended */
983 uint32_t HELPER(rrbe)(CPUS390XState *env, uint64_t r2)
984 {
985 static S390SKeysState *ss;
986 static S390SKeysClass *skeyclass;
987 uint8_t re, key;
988
989 if (r2 > ram_size) {
990 return 0;
991 }
992
993 if (unlikely(!ss)) {
994 ss = s390_get_skeys_device();
995 skeyclass = S390_SKEYS_GET_CLASS(ss);
996 }
997
998 if (skeyclass->get_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
999 return 0;
1000 }
1001
1002 re = key & (SK_R | SK_C);
1003 key &= ~SK_R;
1004
1005 if (skeyclass->set_skeys(ss, r2 / TARGET_PAGE_SIZE, 1, &key)) {
1006 return 0;
1007 }
1008
1009 /*
1010 * cc
1011 *
1012 * 0 Reference bit zero; change bit zero
1013 * 1 Reference bit zero; change bit one
1014 * 2 Reference bit one; change bit zero
1015 * 3 Reference bit one; change bit one
1016 */
1017
1018 return re >> 1;
1019 }
1020
1021 /* compare and swap and purge */
1022 uint32_t HELPER(csp)(CPUS390XState *env, uint32_t r1, uint64_t r2)
1023 {
1024 S390CPU *cpu = s390_env_get_cpu(env);
1025 uint32_t cc;
1026 uint32_t o1 = env->regs[r1];
1027 uint64_t a2 = r2 & ~3ULL;
1028 uint32_t o2 = cpu_ldl_data(env, a2);
1029
1030 if (o1 == o2) {
1031 cpu_stl_data(env, a2, env->regs[(r1 + 1) & 15]);
1032 if (r2 & 0x3) {
1033 /* flush TLB / ALB */
1034 tlb_flush(CPU(cpu), 1);
1035 }
1036 cc = 0;
1037 } else {
1038 env->regs[r1] = (env->regs[r1] & 0xffffffff00000000ULL) | o2;
1039 cc = 1;
1040 }
1041
1042 return cc;
1043 }
1044
1045 uint32_t HELPER(mvcs)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1046 {
1047 int cc = 0, i;
1048
1049 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1050 __func__, l, a1, a2);
1051
1052 if (l > 256) {
1053 /* max 256 */
1054 l = 256;
1055 cc = 3;
1056 }
1057
1058 /* XXX replace w/ memcpy */
1059 for (i = 0; i < l; i++) {
1060 cpu_stb_secondary(env, a1 + i, cpu_ldub_primary(env, a2 + i));
1061 }
1062
1063 return cc;
1064 }
1065
1066 uint32_t HELPER(mvcp)(CPUS390XState *env, uint64_t l, uint64_t a1, uint64_t a2)
1067 {
1068 int cc = 0, i;
1069
1070 HELPER_LOG("%s: %16" PRIx64 " %16" PRIx64 " %16" PRIx64 "\n",
1071 __func__, l, a1, a2);
1072
1073 if (l > 256) {
1074 /* max 256 */
1075 l = 256;
1076 cc = 3;
1077 }
1078
1079 /* XXX replace w/ memcpy */
1080 for (i = 0; i < l; i++) {
1081 cpu_stb_primary(env, a1 + i, cpu_ldub_secondary(env, a2 + i));
1082 }
1083
1084 return cc;
1085 }
1086
1087 /* invalidate pte */
1088 void HELPER(ipte)(CPUS390XState *env, uint64_t pte_addr, uint64_t vaddr)
1089 {
1090 CPUState *cs = CPU(s390_env_get_cpu(env));
1091 uint64_t page = vaddr & TARGET_PAGE_MASK;
1092 uint64_t pte = 0;
1093
1094 /* XXX broadcast to other CPUs */
1095
1096 /* XXX Linux is nice enough to give us the exact pte address.
1097 According to spec we'd have to find it out ourselves */
1098 /* XXX Linux is fine with overwriting the pte, the spec requires
1099 us to only set the invalid bit */
1100 stq_phys(cs->as, pte_addr, pte | _PAGE_INVALID);
1101
1102 /* XXX we exploit the fact that Linux passes the exact virtual
1103 address here - it's not obliged to! */
1104 tlb_flush_page(cs, page);
1105
1106 /* XXX 31-bit hack */
1107 if (page & 0x80000000) {
1108 tlb_flush_page(cs, page & ~0x80000000);
1109 } else {
1110 tlb_flush_page(cs, page | 0x80000000);
1111 }
1112 }
1113
1114 /* flush local tlb */
1115 void HELPER(ptlb)(CPUS390XState *env)
1116 {
1117 S390CPU *cpu = s390_env_get_cpu(env);
1118
1119 tlb_flush(CPU(cpu), 1);
1120 }
1121
1122 /* load using real address */
1123 uint64_t HELPER(lura)(CPUS390XState *env, uint64_t addr)
1124 {
1125 CPUState *cs = CPU(s390_env_get_cpu(env));
1126
1127 return (uint32_t)ldl_phys(cs->as, get_address(env, 0, 0, addr));
1128 }
1129
1130 uint64_t HELPER(lurag)(CPUS390XState *env, uint64_t addr)
1131 {
1132 CPUState *cs = CPU(s390_env_get_cpu(env));
1133
1134 return ldq_phys(cs->as, get_address(env, 0, 0, addr));
1135 }
1136
1137 /* store using real address */
1138 void HELPER(stura)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1139 {
1140 CPUState *cs = CPU(s390_env_get_cpu(env));
1141
1142 stl_phys(cs->as, get_address(env, 0, 0, addr), (uint32_t)v1);
1143
1144 if ((env->psw.mask & PSW_MASK_PER) &&
1145 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1146 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1147 /* PSW is saved just before calling the helper. */
1148 env->per_address = env->psw.addr;
1149 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1150 }
1151 }
1152
1153 void HELPER(sturg)(CPUS390XState *env, uint64_t addr, uint64_t v1)
1154 {
1155 CPUState *cs = CPU(s390_env_get_cpu(env));
1156
1157 stq_phys(cs->as, get_address(env, 0, 0, addr), v1);
1158
1159 if ((env->psw.mask & PSW_MASK_PER) &&
1160 (env->cregs[9] & PER_CR9_EVENT_STORE) &&
1161 (env->cregs[9] & PER_CR9_EVENT_STORE_REAL)) {
1162 /* PSW is saved just before calling the helper. */
1163 env->per_address = env->psw.addr;
1164 env->per_perc_atmid = PER_CODE_EVENT_STORE_REAL | get_per_atmid(env);
1165 }
1166 }
1167
1168 /* load real address */
1169 uint64_t HELPER(lra)(CPUS390XState *env, uint64_t addr)
1170 {
1171 CPUState *cs = CPU(s390_env_get_cpu(env));
1172 uint32_t cc = 0;
1173 int old_exc = cs->exception_index;
1174 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
1175 uint64_t ret;
1176 int flags;
1177
1178 /* XXX incomplete - has more corner cases */
1179 if (!(env->psw.mask & PSW_MASK_64) && (addr >> 32)) {
1180 program_interrupt(env, PGM_SPECIAL_OP, 2);
1181 }
1182
1183 cs->exception_index = old_exc;
1184 if (mmu_translate(env, addr, 0, asc, &ret, &flags, true)) {
1185 cc = 3;
1186 }
1187 if (cs->exception_index == EXCP_PGM) {
1188 ret = env->int_pgm_code | 0x80000000;
1189 } else {
1190 ret |= addr & ~TARGET_PAGE_MASK;
1191 }
1192 cs->exception_index = old_exc;
1193
1194 env->cc_op = cc;
1195 return ret;
1196 }
1197 #endif