]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/excp_helper.c
target/s390x: Rely on unwinding in s390_cpu_tlb_fill
[mirror_qemu.git] / target / s390x / excp_helper.c
1 /*
2 * s390x exception / interrupt helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 * Copyright (c) 2011 Alexander Graf
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2.1 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
20
21 #include "qemu/osdep.h"
22 #include "cpu.h"
23 #include "internal.h"
24 #include "exec/helper-proto.h"
25 #include "qemu/timer.h"
26 #include "exec/exec-all.h"
27 #include "exec/cpu_ldst.h"
28 #include "hw/s390x/ioinst.h"
29 #include "exec/address-spaces.h"
30 #include "tcg_s390x.h"
31 #ifndef CONFIG_USER_ONLY
32 #include "sysemu/sysemu.h"
33 #include "hw/s390x/s390_flic.h"
34 #include "hw/boards.h"
35 #endif
36
37 void QEMU_NORETURN tcg_s390_program_interrupt(CPUS390XState *env,
38 uint32_t code, uintptr_t ra)
39 {
40 CPUState *cs = env_cpu(env);
41
42 cpu_restore_state(cs, ra, true);
43 qemu_log_mask(CPU_LOG_INT, "program interrupt at %#" PRIx64 "\n",
44 env->psw.addr);
45 trigger_pgm_exception(env, code, ILEN_UNWIND);
46 cpu_loop_exit(cs);
47 }
48
49 void QEMU_NORETURN tcg_s390_data_exception(CPUS390XState *env, uint32_t dxc,
50 uintptr_t ra)
51 {
52 g_assert(dxc <= 0xff);
53 #if !defined(CONFIG_USER_ONLY)
54 /* Store the DXC into the lowcore */
55 stl_phys(env_cpu(env)->as,
56 env->psa + offsetof(LowCore, data_exc_code), dxc);
57 #endif
58
59 /* Store the DXC into the FPC if AFP is enabled */
60 if (env->cregs[0] & CR0_AFP) {
61 env->fpc = deposit32(env->fpc, 8, 8, dxc);
62 }
63 tcg_s390_program_interrupt(env, PGM_DATA, ra);
64 }
65
66 void QEMU_NORETURN tcg_s390_vector_exception(CPUS390XState *env, uint32_t vxc,
67 uintptr_t ra)
68 {
69 g_assert(vxc <= 0xff);
70 #if !defined(CONFIG_USER_ONLY)
71 /* Always store the VXC into the lowcore, without AFP it is undefined */
72 stl_phys(env_cpu(env)->as,
73 env->psa + offsetof(LowCore, data_exc_code), vxc);
74 #endif
75
76 /* Always store the VXC into the FPC, without AFP it is undefined */
77 env->fpc = deposit32(env->fpc, 8, 8, vxc);
78 tcg_s390_program_interrupt(env, PGM_VECTOR_PROCESSING, ra);
79 }
80
81 void HELPER(data_exception)(CPUS390XState *env, uint32_t dxc)
82 {
83 tcg_s390_data_exception(env, dxc, GETPC());
84 }
85
86 #if defined(CONFIG_USER_ONLY)
87
88 void s390_cpu_do_interrupt(CPUState *cs)
89 {
90 cs->exception_index = -1;
91 }
92
93 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
94 MMUAccessType access_type, int mmu_idx,
95 bool probe, uintptr_t retaddr)
96 {
97 S390CPU *cpu = S390_CPU(cs);
98
99 trigger_pgm_exception(&cpu->env, PGM_ADDRESSING, ILEN_UNWIND);
100 /* On real machines this value is dropped into LowMem. Since this
101 is userland, simply put this someplace that cpu_loop can find it. */
102 cpu->env.__excp_addr = address;
103 cpu_loop_exit_restore(cs, retaddr);
104 }
105
106 #else /* !CONFIG_USER_ONLY */
107
108 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
109 {
110 switch (mmu_idx) {
111 case MMU_PRIMARY_IDX:
112 return PSW_ASC_PRIMARY;
113 case MMU_SECONDARY_IDX:
114 return PSW_ASC_SECONDARY;
115 case MMU_HOME_IDX:
116 return PSW_ASC_HOME;
117 default:
118 abort();
119 }
120 }
121
122 bool s390_cpu_tlb_fill(CPUState *cs, vaddr address, int size,
123 MMUAccessType access_type, int mmu_idx,
124 bool probe, uintptr_t retaddr)
125 {
126 S390CPU *cpu = S390_CPU(cs);
127 CPUS390XState *env = &cpu->env;
128 target_ulong vaddr, raddr;
129 uint64_t asc, tec;
130 int prot, excp;
131
132 qemu_log_mask(CPU_LOG_MMU, "%s: addr 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
133 __func__, address, access_type, mmu_idx);
134
135 vaddr = address;
136
137 if (mmu_idx < MMU_REAL_IDX) {
138 asc = cpu_mmu_idx_to_asc(mmu_idx);
139 /* 31-Bit mode */
140 if (!(env->psw.mask & PSW_MASK_64)) {
141 vaddr &= 0x7fffffff;
142 }
143 excp = mmu_translate(env, vaddr, access_type, asc, &raddr, &prot, &tec);
144 } else if (mmu_idx == MMU_REAL_IDX) {
145 /* 31-Bit mode */
146 if (!(env->psw.mask & PSW_MASK_64)) {
147 vaddr &= 0x7fffffff;
148 }
149 excp = mmu_translate_real(env, vaddr, access_type, &raddr, &prot, &tec);
150 } else {
151 g_assert_not_reached();
152 }
153
154 /* check out of RAM access */
155 if (!excp &&
156 !address_space_access_valid(&address_space_memory, raddr,
157 TARGET_PAGE_SIZE, access_type,
158 MEMTXATTRS_UNSPECIFIED)) {
159 qemu_log_mask(CPU_LOG_MMU,
160 "%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n",
161 __func__, (uint64_t)raddr, (uint64_t)ram_size);
162 excp = PGM_ADDRESSING;
163 tec = 0; /* unused */
164 }
165
166 if (!excp) {
167 qemu_log_mask(CPU_LOG_MMU,
168 "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
169 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
170 tlb_set_page(cs, address & TARGET_PAGE_MASK, raddr, prot,
171 mmu_idx, TARGET_PAGE_SIZE);
172 return true;
173 }
174 if (probe) {
175 return false;
176 }
177
178 if (excp != PGM_ADDRESSING) {
179 stq_phys(env_cpu(env)->as,
180 env->psa + offsetof(LowCore, trans_exc_code), tec);
181 }
182
183 /*
184 * For data accesses, ILEN will be filled in from the unwind info,
185 * within cpu_loop_exit_restore. For code accesses, retaddr == 0,
186 * and so unwinding will not occur. However, ILEN is also undefined
187 * for that case -- we choose to set ILEN = 2.
188 */
189 trigger_pgm_exception(env, excp, 2);
190 cpu_loop_exit_restore(cs, retaddr);
191 }
192
193 static void do_program_interrupt(CPUS390XState *env)
194 {
195 uint64_t mask, addr;
196 LowCore *lowcore;
197 int ilen = env->int_pgm_ilen;
198
199 if (ilen == ILEN_AUTO) {
200 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
201 }
202 assert(ilen == 2 || ilen == 4 || ilen == 6);
203
204 switch (env->int_pgm_code) {
205 case PGM_PER:
206 if (env->per_perc_atmid & PER_CODE_EVENT_NULLIFICATION) {
207 break;
208 }
209 /* FALL THROUGH */
210 case PGM_OPERATION:
211 case PGM_PRIVILEGED:
212 case PGM_EXECUTE:
213 case PGM_PROTECTION:
214 case PGM_ADDRESSING:
215 case PGM_SPECIFICATION:
216 case PGM_DATA:
217 case PGM_FIXPT_OVERFLOW:
218 case PGM_FIXPT_DIVIDE:
219 case PGM_DEC_OVERFLOW:
220 case PGM_DEC_DIVIDE:
221 case PGM_HFP_EXP_OVERFLOW:
222 case PGM_HFP_EXP_UNDERFLOW:
223 case PGM_HFP_SIGNIFICANCE:
224 case PGM_HFP_DIVIDE:
225 case PGM_TRANS_SPEC:
226 case PGM_SPECIAL_OP:
227 case PGM_OPERAND:
228 case PGM_HFP_SQRT:
229 case PGM_PC_TRANS_SPEC:
230 case PGM_ALET_SPEC:
231 case PGM_MONITOR:
232 /* advance the PSW if our exception is not nullifying */
233 env->psw.addr += ilen;
234 break;
235 }
236
237 qemu_log_mask(CPU_LOG_INT,
238 "%s: code=0x%x ilen=%d psw: %" PRIx64 " %" PRIx64 "\n",
239 __func__, env->int_pgm_code, ilen, env->psw.mask,
240 env->psw.addr);
241
242 lowcore = cpu_map_lowcore(env);
243
244 /* Signal PER events with the exception. */
245 if (env->per_perc_atmid) {
246 env->int_pgm_code |= PGM_PER;
247 lowcore->per_address = cpu_to_be64(env->per_address);
248 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
249 env->per_perc_atmid = 0;
250 }
251
252 lowcore->pgm_ilen = cpu_to_be16(ilen);
253 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
254 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
255 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
256 mask = be64_to_cpu(lowcore->program_new_psw.mask);
257 addr = be64_to_cpu(lowcore->program_new_psw.addr);
258 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
259
260 cpu_unmap_lowcore(lowcore);
261
262 load_psw(env, mask, addr);
263 }
264
265 static void do_svc_interrupt(CPUS390XState *env)
266 {
267 uint64_t mask, addr;
268 LowCore *lowcore;
269
270 lowcore = cpu_map_lowcore(env);
271
272 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
273 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
274 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
275 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
276 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
277 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
278
279 cpu_unmap_lowcore(lowcore);
280
281 load_psw(env, mask, addr);
282
283 /* When a PER event is pending, the PER exception has to happen
284 immediately after the SERVICE CALL one. */
285 if (env->per_perc_atmid) {
286 env->int_pgm_code = PGM_PER;
287 env->int_pgm_ilen = env->int_svc_ilen;
288 do_program_interrupt(env);
289 }
290 }
291
292 #define VIRTIO_SUBCODE_64 0x0D00
293
294 static void do_ext_interrupt(CPUS390XState *env)
295 {
296 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
297 S390CPU *cpu = env_archcpu(env);
298 uint64_t mask, addr;
299 uint16_t cpu_addr;
300 LowCore *lowcore;
301
302 if (!(env->psw.mask & PSW_MASK_EXT)) {
303 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
304 }
305
306 lowcore = cpu_map_lowcore(env);
307
308 if ((env->pending_int & INTERRUPT_EMERGENCY_SIGNAL) &&
309 (env->cregs[0] & CR0_EMERGENCY_SIGNAL_SC)) {
310 lowcore->ext_int_code = cpu_to_be16(EXT_EMERGENCY);
311 cpu_addr = find_first_bit(env->emergency_signals, S390_MAX_CPUS);
312 g_assert(cpu_addr < S390_MAX_CPUS);
313 lowcore->cpu_addr = cpu_to_be16(cpu_addr);
314 clear_bit(cpu_addr, env->emergency_signals);
315 #ifndef CONFIG_USER_ONLY
316 MachineState *ms = MACHINE(qdev_get_machine());
317 unsigned int max_cpus = ms->smp.max_cpus;
318 #endif
319 if (bitmap_empty(env->emergency_signals, max_cpus)) {
320 env->pending_int &= ~INTERRUPT_EMERGENCY_SIGNAL;
321 }
322 } else if ((env->pending_int & INTERRUPT_EXTERNAL_CALL) &&
323 (env->cregs[0] & CR0_EXTERNAL_CALL_SC)) {
324 lowcore->ext_int_code = cpu_to_be16(EXT_EXTERNAL_CALL);
325 lowcore->cpu_addr = cpu_to_be16(env->external_call_addr);
326 env->pending_int &= ~INTERRUPT_EXTERNAL_CALL;
327 } else if ((env->pending_int & INTERRUPT_EXT_CLOCK_COMPARATOR) &&
328 (env->cregs[0] & CR0_CKC_SC)) {
329 lowcore->ext_int_code = cpu_to_be16(EXT_CLOCK_COMP);
330 lowcore->cpu_addr = 0;
331 env->pending_int &= ~INTERRUPT_EXT_CLOCK_COMPARATOR;
332 } else if ((env->pending_int & INTERRUPT_EXT_CPU_TIMER) &&
333 (env->cregs[0] & CR0_CPU_TIMER_SC)) {
334 lowcore->ext_int_code = cpu_to_be16(EXT_CPU_TIMER);
335 lowcore->cpu_addr = 0;
336 env->pending_int &= ~INTERRUPT_EXT_CPU_TIMER;
337 } else if (qemu_s390_flic_has_service(flic) &&
338 (env->cregs[0] & CR0_SERVICE_SC)) {
339 uint32_t param;
340
341 param = qemu_s390_flic_dequeue_service(flic);
342 lowcore->ext_int_code = cpu_to_be16(EXT_SERVICE);
343 lowcore->ext_params = cpu_to_be32(param);
344 lowcore->cpu_addr = 0;
345 } else {
346 g_assert_not_reached();
347 }
348
349 mask = be64_to_cpu(lowcore->external_new_psw.mask);
350 addr = be64_to_cpu(lowcore->external_new_psw.addr);
351 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
352 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
353
354 cpu_unmap_lowcore(lowcore);
355
356 load_psw(env, mask, addr);
357 }
358
359 static void do_io_interrupt(CPUS390XState *env)
360 {
361 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
362 uint64_t mask, addr;
363 QEMUS390FlicIO *io;
364 LowCore *lowcore;
365
366 g_assert(env->psw.mask & PSW_MASK_IO);
367 io = qemu_s390_flic_dequeue_io(flic, env->cregs[6]);
368 g_assert(io);
369
370 lowcore = cpu_map_lowcore(env);
371
372 lowcore->subchannel_id = cpu_to_be16(io->id);
373 lowcore->subchannel_nr = cpu_to_be16(io->nr);
374 lowcore->io_int_parm = cpu_to_be32(io->parm);
375 lowcore->io_int_word = cpu_to_be32(io->word);
376 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
377 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
378 mask = be64_to_cpu(lowcore->io_new_psw.mask);
379 addr = be64_to_cpu(lowcore->io_new_psw.addr);
380
381 cpu_unmap_lowcore(lowcore);
382 g_free(io);
383
384 load_psw(env, mask, addr);
385 }
386
387 typedef struct MchkExtSaveArea {
388 uint64_t vregs[32][2]; /* 0x0000 */
389 uint8_t pad_0x0200[0x0400 - 0x0200]; /* 0x0200 */
390 } MchkExtSaveArea;
391 QEMU_BUILD_BUG_ON(sizeof(MchkExtSaveArea) != 1024);
392
393 static int mchk_store_vregs(CPUS390XState *env, uint64_t mcesao)
394 {
395 hwaddr len = sizeof(MchkExtSaveArea);
396 MchkExtSaveArea *sa;
397 int i;
398
399 sa = cpu_physical_memory_map(mcesao, &len, 1);
400 if (!sa) {
401 return -EFAULT;
402 }
403 if (len != sizeof(MchkExtSaveArea)) {
404 cpu_physical_memory_unmap(sa, len, 1, 0);
405 return -EFAULT;
406 }
407
408 for (i = 0; i < 32; i++) {
409 sa->vregs[i][0] = cpu_to_be64(env->vregs[i][0]);
410 sa->vregs[i][1] = cpu_to_be64(env->vregs[i][1]);
411 }
412
413 cpu_physical_memory_unmap(sa, len, 1, len);
414 return 0;
415 }
416
417 static void do_mchk_interrupt(CPUS390XState *env)
418 {
419 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
420 uint64_t mcic = s390_build_validity_mcic() | MCIC_SC_CP;
421 uint64_t mask, addr, mcesao = 0;
422 LowCore *lowcore;
423 int i;
424
425 /* for now we only support channel report machine checks (floating) */
426 g_assert(env->psw.mask & PSW_MASK_MCHECK);
427 g_assert(env->cregs[14] & CR14_CHANNEL_REPORT_SC);
428
429 qemu_s390_flic_dequeue_crw_mchk(flic);
430
431 lowcore = cpu_map_lowcore(env);
432
433 /* extended save area */
434 if (mcic & MCIC_VB_VR) {
435 /* length and alignment is 1024 bytes */
436 mcesao = be64_to_cpu(lowcore->mcesad) & ~0x3ffull;
437 }
438
439 /* try to store vector registers */
440 if (!mcesao || mchk_store_vregs(env, mcesao)) {
441 mcic &= ~MCIC_VB_VR;
442 }
443
444 /* we are always in z/Architecture mode */
445 lowcore->ar_access_id = 1;
446
447 for (i = 0; i < 16; i++) {
448 lowcore->floating_pt_save_area[i] = cpu_to_be64(*get_freg(env, i));
449 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
450 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
451 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
452 }
453 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
454 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
455 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
456 lowcore->cpu_timer_save_area = cpu_to_be64(env->cputm);
457 lowcore->clock_comp_save_area = cpu_to_be64(env->ckc >> 8);
458
459 lowcore->mcic = cpu_to_be64(mcic);
460 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
461 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
462 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
463 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
464
465 cpu_unmap_lowcore(lowcore);
466
467 load_psw(env, mask, addr);
468 }
469
470 void s390_cpu_do_interrupt(CPUState *cs)
471 {
472 QEMUS390FLICState *flic = QEMU_S390_FLIC(s390_get_flic());
473 S390CPU *cpu = S390_CPU(cs);
474 CPUS390XState *env = &cpu->env;
475 bool stopped = false;
476
477 qemu_log_mask(CPU_LOG_INT, "%s: %d at psw=%" PRIx64 ":%" PRIx64 "\n",
478 __func__, cs->exception_index, env->psw.mask, env->psw.addr);
479
480 try_deliver:
481 /* handle machine checks */
482 if (cs->exception_index == -1 && s390_cpu_has_mcck_int(cpu)) {
483 cs->exception_index = EXCP_MCHK;
484 }
485 /* handle external interrupts */
486 if (cs->exception_index == -1 && s390_cpu_has_ext_int(cpu)) {
487 cs->exception_index = EXCP_EXT;
488 }
489 /* handle I/O interrupts */
490 if (cs->exception_index == -1 && s390_cpu_has_io_int(cpu)) {
491 cs->exception_index = EXCP_IO;
492 }
493 /* RESTART interrupt */
494 if (cs->exception_index == -1 && s390_cpu_has_restart_int(cpu)) {
495 cs->exception_index = EXCP_RESTART;
496 }
497 /* STOP interrupt has least priority */
498 if (cs->exception_index == -1 && s390_cpu_has_stop_int(cpu)) {
499 cs->exception_index = EXCP_STOP;
500 }
501
502 switch (cs->exception_index) {
503 case EXCP_PGM:
504 do_program_interrupt(env);
505 break;
506 case EXCP_SVC:
507 do_svc_interrupt(env);
508 break;
509 case EXCP_EXT:
510 do_ext_interrupt(env);
511 break;
512 case EXCP_IO:
513 do_io_interrupt(env);
514 break;
515 case EXCP_MCHK:
516 do_mchk_interrupt(env);
517 break;
518 case EXCP_RESTART:
519 do_restart_interrupt(env);
520 break;
521 case EXCP_STOP:
522 do_stop_interrupt(env);
523 stopped = true;
524 break;
525 }
526
527 if (cs->exception_index != -1 && !stopped) {
528 /* check if there are more pending interrupts to deliver */
529 cs->exception_index = -1;
530 goto try_deliver;
531 }
532 cs->exception_index = -1;
533
534 /* we might still have pending interrupts, but not deliverable */
535 if (!env->pending_int && !qemu_s390_flic_has_any(flic)) {
536 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
537 }
538
539 /* WAIT PSW during interrupt injection or STOP interrupt */
540 if ((env->psw.mask & PSW_MASK_WAIT) || stopped) {
541 /* don't trigger a cpu_loop_exit(), use an interrupt instead */
542 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
543 } else if (cs->halted) {
544 /* unhalt if we had a WAIT PSW somehwere in our injection chain */
545 s390_cpu_unhalt(cpu);
546 }
547 }
548
549 bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
550 {
551 if (interrupt_request & CPU_INTERRUPT_HARD) {
552 S390CPU *cpu = S390_CPU(cs);
553 CPUS390XState *env = &cpu->env;
554
555 if (env->ex_value) {
556 /* Execution of the target insn is indivisible from
557 the parent EXECUTE insn. */
558 return false;
559 }
560 if (s390_cpu_has_int(cpu)) {
561 s390_cpu_do_interrupt(cs);
562 return true;
563 }
564 if (env->psw.mask & PSW_MASK_WAIT) {
565 /* Woken up because of a floating interrupt but it has already
566 * been delivered. Go back to sleep. */
567 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HALT);
568 }
569 }
570 return false;
571 }
572
573 void s390x_cpu_debug_excp_handler(CPUState *cs)
574 {
575 S390CPU *cpu = S390_CPU(cs);
576 CPUS390XState *env = &cpu->env;
577 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
578
579 if (wp_hit && wp_hit->flags & BP_CPU) {
580 /* FIXME: When the storage-alteration-space control bit is set,
581 the exception should only be triggered if the memory access
582 is done using an address space with the storage-alteration-event
583 bit set. We have no way to detect that with the current
584 watchpoint code. */
585 cs->watchpoint_hit = NULL;
586
587 env->per_address = env->psw.addr;
588 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
589 /* FIXME: We currently no way to detect the address space used
590 to trigger the watchpoint. For now just consider it is the
591 current default ASC. This turn to be true except when MVCP
592 and MVCS instrutions are not used. */
593 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
594
595 /* Remove all watchpoints to re-execute the code. A PER exception
596 will be triggered, it will call load_psw which will recompute
597 the watchpoints. */
598 cpu_watchpoint_remove_all(cs, BP_CPU);
599 cpu_loop_exit_noexc(cs);
600 }
601 }
602
603 /* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
604 this is only for the atomic operations, for which we want to raise a
605 specification exception. */
606 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
607 MMUAccessType access_type,
608 int mmu_idx, uintptr_t retaddr)
609 {
610 S390CPU *cpu = S390_CPU(cs);
611 CPUS390XState *env = &cpu->env;
612
613 tcg_s390_program_interrupt(env, PGM_SPECIFICATION, retaddr);
614 }
615
616 #endif /* CONFIG_USER_ONLY */