]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/helper.c
Merge remote-tracking branch 'remotes/berrange/tags/pull-io-win32-2016-03-11-1' into...
[mirror_qemu.git] / target-s390x / helper.c
1 /*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "exec/gdbstub.h"
24 #include "qemu/timer.h"
25 #include "exec/cpu_ldst.h"
26 #ifndef CONFIG_USER_ONLY
27 #include "sysemu/sysemu.h"
28 #endif
29
30 //#define DEBUG_S390
31 //#define DEBUG_S390_STDOUT
32
33 #ifdef DEBUG_S390
34 #ifdef DEBUG_S390_STDOUT
35 #define DPRINTF(fmt, ...) \
36 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
37 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
38 #else
39 #define DPRINTF(fmt, ...) \
40 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
41 #endif
42 #else
43 #define DPRINTF(fmt, ...) \
44 do { } while (0)
45 #endif
46
47
48 #ifndef CONFIG_USER_ONLY
49 void s390x_tod_timer(void *opaque)
50 {
51 S390CPU *cpu = opaque;
52 CPUS390XState *env = &cpu->env;
53
54 env->pending_int |= INTERRUPT_TOD;
55 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
56 }
57
58 void s390x_cpu_timer(void *opaque)
59 {
60 S390CPU *cpu = opaque;
61 CPUS390XState *env = &cpu->env;
62
63 env->pending_int |= INTERRUPT_CPUTIMER;
64 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
65 }
66 #endif
67
68 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
69 {
70 S390CPU *cpu;
71
72 cpu = S390_CPU(object_new(TYPE_S390_CPU));
73
74 return cpu;
75 }
76
77 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
78 {
79 S390CPU *cpu;
80 Error *err = NULL;
81
82 cpu = cpu_s390x_create(cpu_model, &err);
83 if (err != NULL) {
84 goto out;
85 }
86
87 object_property_set_int(OBJECT(cpu), id, "id", &err);
88 if (err != NULL) {
89 goto out;
90 }
91 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
92
93 out:
94 if (err) {
95 error_propagate(errp, err);
96 object_unref(OBJECT(cpu));
97 cpu = NULL;
98 }
99 return cpu;
100 }
101
102 S390CPU *cpu_s390x_init(const char *cpu_model)
103 {
104 Error *err = NULL;
105 S390CPU *cpu;
106 /* Use to track CPU ID for linux-user only */
107 static int64_t next_cpu_id;
108
109 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
110 if (err) {
111 error_report_err(err);
112 }
113 return cpu;
114 }
115
116 #if defined(CONFIG_USER_ONLY)
117
118 void s390_cpu_do_interrupt(CPUState *cs)
119 {
120 cs->exception_index = -1;
121 }
122
123 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
124 int rw, int mmu_idx)
125 {
126 S390CPU *cpu = S390_CPU(cs);
127
128 cs->exception_index = EXCP_PGM;
129 cpu->env.int_pgm_code = PGM_ADDRESSING;
130 /* On real machines this value is dropped into LowMem. Since this
131 is userland, simply put this someplace that cpu_loop can find it. */
132 cpu->env.__excp_addr = address;
133 return 1;
134 }
135
136 #else /* !CONFIG_USER_ONLY */
137
138 /* Ensure to exit the TB after this call! */
139 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
140 {
141 CPUState *cs = CPU(s390_env_get_cpu(env));
142
143 cs->exception_index = EXCP_PGM;
144 env->int_pgm_code = code;
145 env->int_pgm_ilen = ilen;
146 }
147
148 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
149 int rw, int mmu_idx)
150 {
151 S390CPU *cpu = S390_CPU(cs);
152 CPUS390XState *env = &cpu->env;
153 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
154 target_ulong vaddr, raddr;
155 int prot;
156
157 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
158 __func__, orig_vaddr, rw, mmu_idx);
159
160 orig_vaddr &= TARGET_PAGE_MASK;
161 vaddr = orig_vaddr;
162
163 /* 31-Bit mode */
164 if (!(env->psw.mask & PSW_MASK_64)) {
165 vaddr &= 0x7fffffff;
166 }
167
168 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
169 /* Translation ended in exception */
170 return 1;
171 }
172
173 /* check out of RAM access */
174 if (raddr > ram_size) {
175 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
176 (uint64_t)raddr, (uint64_t)ram_size);
177 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
178 return 1;
179 }
180
181 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
182 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
183
184 tlb_set_page(cs, orig_vaddr, raddr, prot,
185 mmu_idx, TARGET_PAGE_SIZE);
186
187 return 0;
188 }
189
190 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
191 {
192 S390CPU *cpu = S390_CPU(cs);
193 CPUS390XState *env = &cpu->env;
194 target_ulong raddr;
195 int prot;
196 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
197
198 /* 31-Bit mode */
199 if (!(env->psw.mask & PSW_MASK_64)) {
200 vaddr &= 0x7fffffff;
201 }
202
203 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
204 return -1;
205 }
206 return raddr;
207 }
208
209 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
210 {
211 hwaddr phys_addr;
212 target_ulong page;
213
214 page = vaddr & TARGET_PAGE_MASK;
215 phys_addr = cpu_get_phys_page_debug(cs, page);
216 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
217
218 return phys_addr;
219 }
220
221 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
222 {
223 uint64_t old_mask = env->psw.mask;
224
225 env->psw.addr = addr;
226 env->psw.mask = mask;
227 if (tcg_enabled()) {
228 env->cc_op = (mask >> 44) & 3;
229 }
230
231 if ((old_mask ^ mask) & PSW_MASK_PER) {
232 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
233 }
234
235 if (mask & PSW_MASK_WAIT) {
236 S390CPU *cpu = s390_env_get_cpu(env);
237 if (s390_cpu_halt(cpu) == 0) {
238 #ifndef CONFIG_USER_ONLY
239 qemu_system_shutdown_request();
240 #endif
241 }
242 }
243 }
244
245 static uint64_t get_psw_mask(CPUS390XState *env)
246 {
247 uint64_t r = env->psw.mask;
248
249 if (tcg_enabled()) {
250 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
251 env->cc_vr);
252
253 r &= ~PSW_MASK_CC;
254 assert(!(env->cc_op & ~3));
255 r |= (uint64_t)env->cc_op << 44;
256 }
257
258 return r;
259 }
260
261 static LowCore *cpu_map_lowcore(CPUS390XState *env)
262 {
263 S390CPU *cpu = s390_env_get_cpu(env);
264 LowCore *lowcore;
265 hwaddr len = sizeof(LowCore);
266
267 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
268
269 if (len < sizeof(LowCore)) {
270 cpu_abort(CPU(cpu), "Could not map lowcore\n");
271 }
272
273 return lowcore;
274 }
275
276 static void cpu_unmap_lowcore(LowCore *lowcore)
277 {
278 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
279 }
280
281 void do_restart_interrupt(CPUS390XState *env)
282 {
283 uint64_t mask, addr;
284 LowCore *lowcore;
285
286 lowcore = cpu_map_lowcore(env);
287
288 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
289 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
290 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
291 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
292
293 cpu_unmap_lowcore(lowcore);
294
295 load_psw(env, mask, addr);
296 }
297
298 static void do_program_interrupt(CPUS390XState *env)
299 {
300 uint64_t mask, addr;
301 LowCore *lowcore;
302 int ilen = env->int_pgm_ilen;
303
304 switch (ilen) {
305 case ILEN_LATER:
306 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
307 break;
308 case ILEN_LATER_INC:
309 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
310 env->psw.addr += ilen;
311 break;
312 default:
313 assert(ilen == 2 || ilen == 4 || ilen == 6);
314 }
315
316 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
317 __func__, env->int_pgm_code, ilen);
318
319 lowcore = cpu_map_lowcore(env);
320
321 /* Signal PER events with the exception. */
322 if (env->per_perc_atmid) {
323 env->int_pgm_code |= PGM_PER;
324 lowcore->per_address = cpu_to_be64(env->per_address);
325 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
326 env->per_perc_atmid = 0;
327 }
328
329 lowcore->pgm_ilen = cpu_to_be16(ilen);
330 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
331 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
332 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
333 mask = be64_to_cpu(lowcore->program_new_psw.mask);
334 addr = be64_to_cpu(lowcore->program_new_psw.addr);
335 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
336
337 cpu_unmap_lowcore(lowcore);
338
339 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
340 env->int_pgm_code, ilen, env->psw.mask,
341 env->psw.addr);
342
343 load_psw(env, mask, addr);
344 }
345
346 static void do_svc_interrupt(CPUS390XState *env)
347 {
348 uint64_t mask, addr;
349 LowCore *lowcore;
350
351 lowcore = cpu_map_lowcore(env);
352
353 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
354 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
355 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
356 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
357 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
358 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
359
360 cpu_unmap_lowcore(lowcore);
361
362 load_psw(env, mask, addr);
363
364 /* When a PER event is pending, the PER exception has to happen
365 immediately after the SERVICE CALL one. */
366 if (env->per_perc_atmid) {
367 env->int_pgm_code = PGM_PER;
368 env->int_pgm_ilen = env->int_svc_ilen;
369 do_program_interrupt(env);
370 }
371 }
372
373 #define VIRTIO_SUBCODE_64 0x0D00
374
375 static void do_ext_interrupt(CPUS390XState *env)
376 {
377 S390CPU *cpu = s390_env_get_cpu(env);
378 uint64_t mask, addr;
379 LowCore *lowcore;
380 ExtQueue *q;
381
382 if (!(env->psw.mask & PSW_MASK_EXT)) {
383 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
384 }
385
386 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
387 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
388 }
389
390 q = &env->ext_queue[env->ext_index];
391 lowcore = cpu_map_lowcore(env);
392
393 lowcore->ext_int_code = cpu_to_be16(q->code);
394 lowcore->ext_params = cpu_to_be32(q->param);
395 lowcore->ext_params2 = cpu_to_be64(q->param64);
396 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
397 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
398 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
399 mask = be64_to_cpu(lowcore->external_new_psw.mask);
400 addr = be64_to_cpu(lowcore->external_new_psw.addr);
401
402 cpu_unmap_lowcore(lowcore);
403
404 env->ext_index--;
405 if (env->ext_index == -1) {
406 env->pending_int &= ~INTERRUPT_EXT;
407 }
408
409 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
410 env->psw.mask, env->psw.addr);
411
412 load_psw(env, mask, addr);
413 }
414
415 static void do_io_interrupt(CPUS390XState *env)
416 {
417 S390CPU *cpu = s390_env_get_cpu(env);
418 LowCore *lowcore;
419 IOIntQueue *q;
420 uint8_t isc;
421 int disable = 1;
422 int found = 0;
423
424 if (!(env->psw.mask & PSW_MASK_IO)) {
425 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
426 }
427
428 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
429 uint64_t isc_bits;
430
431 if (env->io_index[isc] < 0) {
432 continue;
433 }
434 if (env->io_index[isc] >= MAX_IO_QUEUE) {
435 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
436 isc, env->io_index[isc]);
437 }
438
439 q = &env->io_queue[env->io_index[isc]][isc];
440 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
441 if (!(env->cregs[6] & isc_bits)) {
442 disable = 0;
443 continue;
444 }
445 if (!found) {
446 uint64_t mask, addr;
447
448 found = 1;
449 lowcore = cpu_map_lowcore(env);
450
451 lowcore->subchannel_id = cpu_to_be16(q->id);
452 lowcore->subchannel_nr = cpu_to_be16(q->nr);
453 lowcore->io_int_parm = cpu_to_be32(q->parm);
454 lowcore->io_int_word = cpu_to_be32(q->word);
455 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
456 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
457 mask = be64_to_cpu(lowcore->io_new_psw.mask);
458 addr = be64_to_cpu(lowcore->io_new_psw.addr);
459
460 cpu_unmap_lowcore(lowcore);
461
462 env->io_index[isc]--;
463
464 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
465 env->psw.mask, env->psw.addr);
466 load_psw(env, mask, addr);
467 }
468 if (env->io_index[isc] >= 0) {
469 disable = 0;
470 }
471 continue;
472 }
473
474 if (disable) {
475 env->pending_int &= ~INTERRUPT_IO;
476 }
477
478 }
479
480 static void do_mchk_interrupt(CPUS390XState *env)
481 {
482 S390CPU *cpu = s390_env_get_cpu(env);
483 uint64_t mask, addr;
484 LowCore *lowcore;
485 MchkQueue *q;
486 int i;
487
488 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
489 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
490 }
491
492 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
493 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
494 }
495
496 q = &env->mchk_queue[env->mchk_index];
497
498 if (q->type != 1) {
499 /* Don't know how to handle this... */
500 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
501 }
502 if (!(env->cregs[14] & (1 << 28))) {
503 /* CRW machine checks disabled */
504 return;
505 }
506
507 lowcore = cpu_map_lowcore(env);
508
509 for (i = 0; i < 16; i++) {
510 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
511 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
512 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
513 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
514 }
515 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
516 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
517 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
518 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
519 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
520 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
521 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
522
523 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
524 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
525 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
526 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
527 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
528 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
529
530 cpu_unmap_lowcore(lowcore);
531
532 env->mchk_index--;
533 if (env->mchk_index == -1) {
534 env->pending_int &= ~INTERRUPT_MCHK;
535 }
536
537 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
538 env->psw.mask, env->psw.addr);
539
540 load_psw(env, mask, addr);
541 }
542
543 void s390_cpu_do_interrupt(CPUState *cs)
544 {
545 S390CPU *cpu = S390_CPU(cs);
546 CPUS390XState *env = &cpu->env;
547
548 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
549 __func__, cs->exception_index, env->psw.addr);
550
551 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
552 /* handle machine checks */
553 if ((env->psw.mask & PSW_MASK_MCHECK) &&
554 (cs->exception_index == -1)) {
555 if (env->pending_int & INTERRUPT_MCHK) {
556 cs->exception_index = EXCP_MCHK;
557 }
558 }
559 /* handle external interrupts */
560 if ((env->psw.mask & PSW_MASK_EXT) &&
561 cs->exception_index == -1) {
562 if (env->pending_int & INTERRUPT_EXT) {
563 /* code is already in env */
564 cs->exception_index = EXCP_EXT;
565 } else if (env->pending_int & INTERRUPT_TOD) {
566 cpu_inject_ext(cpu, 0x1004, 0, 0);
567 cs->exception_index = EXCP_EXT;
568 env->pending_int &= ~INTERRUPT_EXT;
569 env->pending_int &= ~INTERRUPT_TOD;
570 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
571 cpu_inject_ext(cpu, 0x1005, 0, 0);
572 cs->exception_index = EXCP_EXT;
573 env->pending_int &= ~INTERRUPT_EXT;
574 env->pending_int &= ~INTERRUPT_TOD;
575 }
576 }
577 /* handle I/O interrupts */
578 if ((env->psw.mask & PSW_MASK_IO) &&
579 (cs->exception_index == -1)) {
580 if (env->pending_int & INTERRUPT_IO) {
581 cs->exception_index = EXCP_IO;
582 }
583 }
584
585 switch (cs->exception_index) {
586 case EXCP_PGM:
587 do_program_interrupt(env);
588 break;
589 case EXCP_SVC:
590 do_svc_interrupt(env);
591 break;
592 case EXCP_EXT:
593 do_ext_interrupt(env);
594 break;
595 case EXCP_IO:
596 do_io_interrupt(env);
597 break;
598 case EXCP_MCHK:
599 do_mchk_interrupt(env);
600 break;
601 }
602 cs->exception_index = -1;
603
604 if (!env->pending_int) {
605 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
606 }
607 }
608
609 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
610 {
611 if (interrupt_request & CPU_INTERRUPT_HARD) {
612 S390CPU *cpu = S390_CPU(cs);
613 CPUS390XState *env = &cpu->env;
614
615 if (env->psw.mask & PSW_MASK_EXT) {
616 s390_cpu_do_interrupt(cs);
617 return true;
618 }
619 }
620 return false;
621 }
622
623 void s390_cpu_recompute_watchpoints(CPUState *cs)
624 {
625 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
626 S390CPU *cpu = S390_CPU(cs);
627 CPUS390XState *env = &cpu->env;
628
629 /* We are called when the watchpoints have changed. First
630 remove them all. */
631 cpu_watchpoint_remove_all(cs, BP_CPU);
632
633 /* Return if PER is not enabled */
634 if (!(env->psw.mask & PSW_MASK_PER)) {
635 return;
636 }
637
638 /* Return if storage-alteration event is not enabled. */
639 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
640 return;
641 }
642
643 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
644 /* We can't create a watchoint spanning the whole memory range, so
645 split it in two parts. */
646 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
647 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
648 } else if (env->cregs[10] > env->cregs[11]) {
649 /* The address range loops, create two watchpoints. */
650 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
651 wp_flags, NULL);
652 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
653
654 } else {
655 /* Default case, create a single watchpoint. */
656 cpu_watchpoint_insert(cs, env->cregs[10],
657 env->cregs[11] - env->cregs[10] + 1,
658 wp_flags, NULL);
659 }
660 }
661
662 void s390x_cpu_debug_excp_handler(CPUState *cs)
663 {
664 S390CPU *cpu = S390_CPU(cs);
665 CPUS390XState *env = &cpu->env;
666 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
667
668 if (wp_hit && wp_hit->flags & BP_CPU) {
669 /* FIXME: When the storage-alteration-space control bit is set,
670 the exception should only be triggered if the memory access
671 is done using an address space with the storage-alteration-event
672 bit set. We have no way to detect that with the current
673 watchpoint code. */
674 cs->watchpoint_hit = NULL;
675
676 env->per_address = env->psw.addr;
677 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
678 /* FIXME: We currently no way to detect the address space used
679 to trigger the watchpoint. For now just consider it is the
680 current default ASC. This turn to be true except when MVCP
681 and MVCS instrutions are not used. */
682 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
683
684 /* Remove all watchpoints to re-execute the code. A PER exception
685 will be triggered, it will call load_psw which will recompute
686 the watchpoints. */
687 cpu_watchpoint_remove_all(cs, BP_CPU);
688 cpu_resume_from_signal(cs, NULL);
689 }
690 }
691 #endif /* CONFIG_USER_ONLY */