]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/helper.c
qcow2: insert assert into qcow2_get_specific_info()
[mirror_qemu.git] / target-s390x / helper.c
1 /*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "cpu.h"
22 #include "exec/gdbstub.h"
23 #include "qemu/timer.h"
24 #include "exec/cpu_ldst.h"
25 #ifndef CONFIG_USER_ONLY
26 #include "sysemu/sysemu.h"
27 #endif
28
29 //#define DEBUG_S390
30 //#define DEBUG_S390_STDOUT
31
32 #ifdef DEBUG_S390
33 #ifdef DEBUG_S390_STDOUT
34 #define DPRINTF(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
36 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
37 #else
38 #define DPRINTF(fmt, ...) \
39 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
40 #endif
41 #else
42 #define DPRINTF(fmt, ...) \
43 do { } while (0)
44 #endif
45
46
47 #ifndef CONFIG_USER_ONLY
48 void s390x_tod_timer(void *opaque)
49 {
50 S390CPU *cpu = opaque;
51 CPUS390XState *env = &cpu->env;
52
53 env->pending_int |= INTERRUPT_TOD;
54 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
55 }
56
57 void s390x_cpu_timer(void *opaque)
58 {
59 S390CPU *cpu = opaque;
60 CPUS390XState *env = &cpu->env;
61
62 env->pending_int |= INTERRUPT_CPUTIMER;
63 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
64 }
65 #endif
66
67 S390CPU *cpu_s390x_init(const char *cpu_model)
68 {
69 S390CPU *cpu;
70
71 cpu = S390_CPU(object_new(TYPE_S390_CPU));
72
73 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
74
75 return cpu;
76 }
77
78 #if defined(CONFIG_USER_ONLY)
79
80 void s390_cpu_do_interrupt(CPUState *cs)
81 {
82 cs->exception_index = -1;
83 }
84
85 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
86 int rw, int mmu_idx)
87 {
88 S390CPU *cpu = S390_CPU(cs);
89
90 cs->exception_index = EXCP_PGM;
91 cpu->env.int_pgm_code = PGM_ADDRESSING;
92 /* On real machines this value is dropped into LowMem. Since this
93 is userland, simply put this someplace that cpu_loop can find it. */
94 cpu->env.__excp_addr = address;
95 return 1;
96 }
97
98 #else /* !CONFIG_USER_ONLY */
99
100 /* Ensure to exit the TB after this call! */
101 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
102 {
103 CPUState *cs = CPU(s390_env_get_cpu(env));
104
105 cs->exception_index = EXCP_PGM;
106 env->int_pgm_code = code;
107 env->int_pgm_ilen = ilen;
108 }
109
110 int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
111 int rw, int mmu_idx)
112 {
113 S390CPU *cpu = S390_CPU(cs);
114 CPUS390XState *env = &cpu->env;
115 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
116 target_ulong vaddr, raddr;
117 int prot;
118
119 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
120 __func__, orig_vaddr, rw, mmu_idx);
121
122 orig_vaddr &= TARGET_PAGE_MASK;
123 vaddr = orig_vaddr;
124
125 /* 31-Bit mode */
126 if (!(env->psw.mask & PSW_MASK_64)) {
127 vaddr &= 0x7fffffff;
128 }
129
130 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
131 /* Translation ended in exception */
132 return 1;
133 }
134
135 /* check out of RAM access */
136 if (raddr > (ram_size + virtio_size)) {
137 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
138 (uint64_t)raddr, (uint64_t)ram_size);
139 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
140 return 1;
141 }
142
143 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
144 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
145
146 tlb_set_page(cs, orig_vaddr, raddr, prot,
147 mmu_idx, TARGET_PAGE_SIZE);
148
149 return 0;
150 }
151
152 hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
153 {
154 S390CPU *cpu = S390_CPU(cs);
155 CPUS390XState *env = &cpu->env;
156 target_ulong raddr;
157 int prot;
158 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
159
160 /* 31-Bit mode */
161 if (!(env->psw.mask & PSW_MASK_64)) {
162 vaddr &= 0x7fffffff;
163 }
164
165 mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false);
166
167 return raddr;
168 }
169
170 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
171 {
172 hwaddr phys_addr;
173 target_ulong page;
174
175 page = vaddr & TARGET_PAGE_MASK;
176 phys_addr = cpu_get_phys_page_debug(cs, page);
177 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
178
179 return phys_addr;
180 }
181
182 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
183 {
184 uint64_t old_mask = env->psw.mask;
185
186 env->psw.addr = addr;
187 env->psw.mask = mask;
188 if (tcg_enabled()) {
189 env->cc_op = (mask >> 44) & 3;
190 }
191
192 if ((old_mask ^ mask) & PSW_MASK_PER) {
193 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
194 }
195
196 if (mask & PSW_MASK_WAIT) {
197 S390CPU *cpu = s390_env_get_cpu(env);
198 if (s390_cpu_halt(cpu) == 0) {
199 #ifndef CONFIG_USER_ONLY
200 qemu_system_shutdown_request();
201 #endif
202 }
203 }
204 }
205
206 static uint64_t get_psw_mask(CPUS390XState *env)
207 {
208 uint64_t r = env->psw.mask;
209
210 if (tcg_enabled()) {
211 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
212 env->cc_vr);
213
214 r &= ~PSW_MASK_CC;
215 assert(!(env->cc_op & ~3));
216 r |= (uint64_t)env->cc_op << 44;
217 }
218
219 return r;
220 }
221
222 static LowCore *cpu_map_lowcore(CPUS390XState *env)
223 {
224 S390CPU *cpu = s390_env_get_cpu(env);
225 LowCore *lowcore;
226 hwaddr len = sizeof(LowCore);
227
228 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
229
230 if (len < sizeof(LowCore)) {
231 cpu_abort(CPU(cpu), "Could not map lowcore\n");
232 }
233
234 return lowcore;
235 }
236
237 static void cpu_unmap_lowcore(LowCore *lowcore)
238 {
239 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
240 }
241
242 void do_restart_interrupt(CPUS390XState *env)
243 {
244 uint64_t mask, addr;
245 LowCore *lowcore;
246
247 lowcore = cpu_map_lowcore(env);
248
249 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
250 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
251 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
252 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
253
254 cpu_unmap_lowcore(lowcore);
255
256 load_psw(env, mask, addr);
257 }
258
259 static void do_program_interrupt(CPUS390XState *env)
260 {
261 uint64_t mask, addr;
262 LowCore *lowcore;
263 int ilen = env->int_pgm_ilen;
264
265 switch (ilen) {
266 case ILEN_LATER:
267 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
268 break;
269 case ILEN_LATER_INC:
270 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
271 env->psw.addr += ilen;
272 break;
273 default:
274 assert(ilen == 2 || ilen == 4 || ilen == 6);
275 }
276
277 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
278 __func__, env->int_pgm_code, ilen);
279
280 lowcore = cpu_map_lowcore(env);
281
282 /* Signal PER events with the exception. */
283 if (env->per_perc_atmid) {
284 env->int_pgm_code |= PGM_PER;
285 lowcore->per_address = cpu_to_be64(env->per_address);
286 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
287 env->per_perc_atmid = 0;
288 }
289
290 lowcore->pgm_ilen = cpu_to_be16(ilen);
291 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
292 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
293 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
294 mask = be64_to_cpu(lowcore->program_new_psw.mask);
295 addr = be64_to_cpu(lowcore->program_new_psw.addr);
296 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
297
298 cpu_unmap_lowcore(lowcore);
299
300 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
301 env->int_pgm_code, ilen, env->psw.mask,
302 env->psw.addr);
303
304 load_psw(env, mask, addr);
305 }
306
307 static void do_svc_interrupt(CPUS390XState *env)
308 {
309 uint64_t mask, addr;
310 LowCore *lowcore;
311
312 lowcore = cpu_map_lowcore(env);
313
314 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
315 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
316 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
317 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
318 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
319 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
320
321 cpu_unmap_lowcore(lowcore);
322
323 load_psw(env, mask, addr);
324
325 /* When a PER event is pending, the PER exception has to happen
326 immediately after the SERVICE CALL one. */
327 if (env->per_perc_atmid) {
328 env->int_pgm_code = PGM_PER;
329 env->int_pgm_ilen = env->int_svc_ilen;
330 do_program_interrupt(env);
331 }
332 }
333
334 #define VIRTIO_SUBCODE_64 0x0D00
335
336 static void do_ext_interrupt(CPUS390XState *env)
337 {
338 S390CPU *cpu = s390_env_get_cpu(env);
339 uint64_t mask, addr;
340 LowCore *lowcore;
341 ExtQueue *q;
342
343 if (!(env->psw.mask & PSW_MASK_EXT)) {
344 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
345 }
346
347 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
348 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
349 }
350
351 q = &env->ext_queue[env->ext_index];
352 lowcore = cpu_map_lowcore(env);
353
354 lowcore->ext_int_code = cpu_to_be16(q->code);
355 lowcore->ext_params = cpu_to_be32(q->param);
356 lowcore->ext_params2 = cpu_to_be64(q->param64);
357 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
358 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
359 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
360 mask = be64_to_cpu(lowcore->external_new_psw.mask);
361 addr = be64_to_cpu(lowcore->external_new_psw.addr);
362
363 cpu_unmap_lowcore(lowcore);
364
365 env->ext_index--;
366 if (env->ext_index == -1) {
367 env->pending_int &= ~INTERRUPT_EXT;
368 }
369
370 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
371 env->psw.mask, env->psw.addr);
372
373 load_psw(env, mask, addr);
374 }
375
376 static void do_io_interrupt(CPUS390XState *env)
377 {
378 S390CPU *cpu = s390_env_get_cpu(env);
379 LowCore *lowcore;
380 IOIntQueue *q;
381 uint8_t isc;
382 int disable = 1;
383 int found = 0;
384
385 if (!(env->psw.mask & PSW_MASK_IO)) {
386 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
387 }
388
389 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
390 uint64_t isc_bits;
391
392 if (env->io_index[isc] < 0) {
393 continue;
394 }
395 if (env->io_index[isc] >= MAX_IO_QUEUE) {
396 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
397 isc, env->io_index[isc]);
398 }
399
400 q = &env->io_queue[env->io_index[isc]][isc];
401 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
402 if (!(env->cregs[6] & isc_bits)) {
403 disable = 0;
404 continue;
405 }
406 if (!found) {
407 uint64_t mask, addr;
408
409 found = 1;
410 lowcore = cpu_map_lowcore(env);
411
412 lowcore->subchannel_id = cpu_to_be16(q->id);
413 lowcore->subchannel_nr = cpu_to_be16(q->nr);
414 lowcore->io_int_parm = cpu_to_be32(q->parm);
415 lowcore->io_int_word = cpu_to_be32(q->word);
416 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
417 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
418 mask = be64_to_cpu(lowcore->io_new_psw.mask);
419 addr = be64_to_cpu(lowcore->io_new_psw.addr);
420
421 cpu_unmap_lowcore(lowcore);
422
423 env->io_index[isc]--;
424
425 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
426 env->psw.mask, env->psw.addr);
427 load_psw(env, mask, addr);
428 }
429 if (env->io_index[isc] >= 0) {
430 disable = 0;
431 }
432 continue;
433 }
434
435 if (disable) {
436 env->pending_int &= ~INTERRUPT_IO;
437 }
438
439 }
440
441 static void do_mchk_interrupt(CPUS390XState *env)
442 {
443 S390CPU *cpu = s390_env_get_cpu(env);
444 uint64_t mask, addr;
445 LowCore *lowcore;
446 MchkQueue *q;
447 int i;
448
449 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
450 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
451 }
452
453 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
454 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
455 }
456
457 q = &env->mchk_queue[env->mchk_index];
458
459 if (q->type != 1) {
460 /* Don't know how to handle this... */
461 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
462 }
463 if (!(env->cregs[14] & (1 << 28))) {
464 /* CRW machine checks disabled */
465 return;
466 }
467
468 lowcore = cpu_map_lowcore(env);
469
470 for (i = 0; i < 16; i++) {
471 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
472 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
473 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
474 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
475 }
476 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
477 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
478 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
479 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
480 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
481 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
482 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
483
484 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
485 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
486 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
487 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
488 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
489 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
490
491 cpu_unmap_lowcore(lowcore);
492
493 env->mchk_index--;
494 if (env->mchk_index == -1) {
495 env->pending_int &= ~INTERRUPT_MCHK;
496 }
497
498 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
499 env->psw.mask, env->psw.addr);
500
501 load_psw(env, mask, addr);
502 }
503
504 void s390_cpu_do_interrupt(CPUState *cs)
505 {
506 S390CPU *cpu = S390_CPU(cs);
507 CPUS390XState *env = &cpu->env;
508
509 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
510 __func__, cs->exception_index, env->psw.addr);
511
512 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
513 /* handle machine checks */
514 if ((env->psw.mask & PSW_MASK_MCHECK) &&
515 (cs->exception_index == -1)) {
516 if (env->pending_int & INTERRUPT_MCHK) {
517 cs->exception_index = EXCP_MCHK;
518 }
519 }
520 /* handle external interrupts */
521 if ((env->psw.mask & PSW_MASK_EXT) &&
522 cs->exception_index == -1) {
523 if (env->pending_int & INTERRUPT_EXT) {
524 /* code is already in env */
525 cs->exception_index = EXCP_EXT;
526 } else if (env->pending_int & INTERRUPT_TOD) {
527 cpu_inject_ext(cpu, 0x1004, 0, 0);
528 cs->exception_index = EXCP_EXT;
529 env->pending_int &= ~INTERRUPT_EXT;
530 env->pending_int &= ~INTERRUPT_TOD;
531 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
532 cpu_inject_ext(cpu, 0x1005, 0, 0);
533 cs->exception_index = EXCP_EXT;
534 env->pending_int &= ~INTERRUPT_EXT;
535 env->pending_int &= ~INTERRUPT_TOD;
536 }
537 }
538 /* handle I/O interrupts */
539 if ((env->psw.mask & PSW_MASK_IO) &&
540 (cs->exception_index == -1)) {
541 if (env->pending_int & INTERRUPT_IO) {
542 cs->exception_index = EXCP_IO;
543 }
544 }
545
546 switch (cs->exception_index) {
547 case EXCP_PGM:
548 do_program_interrupt(env);
549 break;
550 case EXCP_SVC:
551 do_svc_interrupt(env);
552 break;
553 case EXCP_EXT:
554 do_ext_interrupt(env);
555 break;
556 case EXCP_IO:
557 do_io_interrupt(env);
558 break;
559 case EXCP_MCHK:
560 do_mchk_interrupt(env);
561 break;
562 }
563 cs->exception_index = -1;
564
565 if (!env->pending_int) {
566 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
567 }
568 }
569
570 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
571 {
572 if (interrupt_request & CPU_INTERRUPT_HARD) {
573 S390CPU *cpu = S390_CPU(cs);
574 CPUS390XState *env = &cpu->env;
575
576 if (env->psw.mask & PSW_MASK_EXT) {
577 s390_cpu_do_interrupt(cs);
578 return true;
579 }
580 }
581 return false;
582 }
583
584 void s390_cpu_recompute_watchpoints(CPUState *cs)
585 {
586 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
587 S390CPU *cpu = S390_CPU(cs);
588 CPUS390XState *env = &cpu->env;
589
590 /* We are called when the watchpoints have changed. First
591 remove them all. */
592 cpu_watchpoint_remove_all(cs, BP_CPU);
593
594 /* Return if PER is not enabled */
595 if (!(env->psw.mask & PSW_MASK_PER)) {
596 return;
597 }
598
599 /* Return if storage-alteration event is not enabled. */
600 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
601 return;
602 }
603
604 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
605 /* We can't create a watchoint spanning the whole memory range, so
606 split it in two parts. */
607 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
608 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
609 } else if (env->cregs[10] > env->cregs[11]) {
610 /* The address range loops, create two watchpoints. */
611 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
612 wp_flags, NULL);
613 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
614
615 } else {
616 /* Default case, create a single watchpoint. */
617 cpu_watchpoint_insert(cs, env->cregs[10],
618 env->cregs[11] - env->cregs[10] + 1,
619 wp_flags, NULL);
620 }
621 }
622
623 void s390x_cpu_debug_excp_handler(CPUState *cs)
624 {
625 S390CPU *cpu = S390_CPU(cs);
626 CPUS390XState *env = &cpu->env;
627 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
628
629 if (wp_hit && wp_hit->flags & BP_CPU) {
630 /* FIXME: When the storage-alteration-space control bit is set,
631 the exception should only be triggered if the memory access
632 is done using an address space with the storage-alteration-event
633 bit set. We have no way to detect that with the current
634 watchpoint code. */
635 cs->watchpoint_hit = NULL;
636
637 env->per_address = env->psw.addr;
638 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
639 /* FIXME: We currently no way to detect the address space used
640 to trigger the watchpoint. For now just consider it is the
641 current default ASC. This turn to be true except when MVCP
642 and MVCS instrutions are not used. */
643 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
644
645 /* Remove all watchpoints to re-execute the code. A PER exception
646 will be triggered, it will call load_psw which will recompute
647 the watchpoints. */
648 cpu_watchpoint_remove_all(cs, BP_CPU);
649 cpu_resume_from_signal(cs, NULL);
650 }
651 }
652 #endif /* CONFIG_USER_ONLY */