]> git.proxmox.com Git - mirror_qemu.git/blame - target-s390x/helper.c
s390x: s390_cpu_get_phys_page_debug has to return -1
[mirror_qemu.git] / target-s390x / helper.c
CommitLineData
10ec5117
AG
1/*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
d5a43964 5 * Copyright (c) 2011 Alexander Graf
10ec5117
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
70539e18 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
10ec5117
AG
19 */
20
10ec5117 21#include "cpu.h"
022c62cb 22#include "exec/gdbstub.h"
1de7afc9 23#include "qemu/timer.h"
f08b6170 24#include "exec/cpu_ldst.h"
ef81522b 25#ifndef CONFIG_USER_ONLY
9c17d615 26#include "sysemu/sysemu.h"
ef81522b 27#endif
10ec5117 28
d5a43964 29//#define DEBUG_S390
d5a43964
AG
30//#define DEBUG_S390_STDOUT
31
32#ifdef DEBUG_S390
33#ifdef DEBUG_S390_STDOUT
34#define DPRINTF(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
013a2942 36 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
d5a43964
AG
37#else
38#define DPRINTF(fmt, ...) \
39 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
40#endif
41#else
42#define DPRINTF(fmt, ...) \
43 do { } while (0)
44#endif
45
d5a43964
AG
46
47#ifndef CONFIG_USER_ONLY
8f22e0df 48void s390x_tod_timer(void *opaque)
d5a43964 49{
b8ba6799
AF
50 S390CPU *cpu = opaque;
51 CPUS390XState *env = &cpu->env;
d5a43964
AG
52
53 env->pending_int |= INTERRUPT_TOD;
c3affe56 54 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
55}
56
8f22e0df 57void s390x_cpu_timer(void *opaque)
d5a43964 58{
b8ba6799
AF
59 S390CPU *cpu = opaque;
60 CPUS390XState *env = &cpu->env;
d5a43964
AG
61
62 env->pending_int |= INTERRUPT_CPUTIMER;
c3affe56 63 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
64}
65#endif
10c339a0 66
564b863d 67S390CPU *cpu_s390x_init(const char *cpu_model)
10ec5117 68{
29e4bcb2 69 S390CPU *cpu;
10ec5117 70
29e4bcb2 71 cpu = S390_CPU(object_new(TYPE_S390_CPU));
1f136632
AF
72
73 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
74
564b863d 75 return cpu;
10ec5117
AG
76}
77
d5a43964
AG
78#if defined(CONFIG_USER_ONLY)
79
97a8ea5a 80void s390_cpu_do_interrupt(CPUState *cs)
d5a43964 81{
27103424 82 cs->exception_index = -1;
d5a43964
AG
83}
84
7510454e
AF
85int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
86 int rw, int mmu_idx)
d5a43964 87{
7510454e
AF
88 S390CPU *cpu = S390_CPU(cs);
89
27103424 90 cs->exception_index = EXCP_PGM;
7510454e 91 cpu->env.int_pgm_code = PGM_ADDRESSING;
d5a103cd
RH
92 /* On real machines this value is dropped into LowMem. Since this
93 is userland, simply put this someplace that cpu_loop can find it. */
7510454e 94 cpu->env.__excp_addr = address;
d5a43964
AG
95 return 1;
96}
97
b7e516ce 98#else /* !CONFIG_USER_ONLY */
d5a43964
AG
99
100/* Ensure to exit the TB after this call! */
dfebd7a7 101void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
d5a43964 102{
27103424
AF
103 CPUState *cs = CPU(s390_env_get_cpu(env));
104
105 cs->exception_index = EXCP_PGM;
d5a43964 106 env->int_pgm_code = code;
d5a103cd 107 env->int_pgm_ilen = ilen;
d5a43964
AG
108}
109
7510454e
AF
110int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
111 int rw, int mmu_idx)
10c339a0 112{
7510454e
AF
113 S390CPU *cpu = S390_CPU(cs);
114 CPUS390XState *env = &cpu->env;
c255ac60 115 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
d5a43964 116 target_ulong vaddr, raddr;
10c339a0
AG
117 int prot;
118
7510454e 119 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
07cc7d12 120 __func__, orig_vaddr, rw, mmu_idx);
d5a43964 121
71e47088
BS
122 orig_vaddr &= TARGET_PAGE_MASK;
123 vaddr = orig_vaddr;
d5a43964
AG
124
125 /* 31-Bit mode */
126 if (!(env->psw.mask & PSW_MASK_64)) {
127 vaddr &= 0x7fffffff;
128 }
129
e3e09d87 130 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
d5a43964
AG
131 /* Translation ended in exception */
132 return 1;
133 }
10c339a0 134
d5a43964 135 /* check out of RAM access */
7b3fdbd9 136 if (raddr > ram_size) {
a6f921b0
AF
137 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
138 (uint64_t)raddr, (uint64_t)ram_size);
d5a103cd 139 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
d5a43964
AG
140 return 1;
141 }
10c339a0 142
339aaf5b
AP
143 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
144 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
d5a43964 145
0c591eb0 146 tlb_set_page(cs, orig_vaddr, raddr, prot,
d4c430a8 147 mmu_idx, TARGET_PAGE_SIZE);
d5a43964 148
d4c430a8 149 return 0;
10c339a0 150}
d5a43964 151
00b941e5 152hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
d5a43964 153{
00b941e5
AF
154 S390CPU *cpu = S390_CPU(cs);
155 CPUS390XState *env = &cpu->env;
d5a43964 156 target_ulong raddr;
e3e09d87 157 int prot;
d5a43964
AG
158 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
159
160 /* 31-Bit mode */
161 if (!(env->psw.mask & PSW_MASK_64)) {
162 vaddr &= 0x7fffffff;
163 }
164
234779a2
DH
165 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
166 return -1;
167 }
d5a43964
AG
168 return raddr;
169}
170
770a6379
DH
171hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
172{
173 hwaddr phys_addr;
174 target_ulong page;
175
176 page = vaddr & TARGET_PAGE_MASK;
177 phys_addr = cpu_get_phys_page_debug(cs, page);
178 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
179
180 return phys_addr;
181}
182
a4e3ad19 183void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
d5a43964 184{
311918b9
AJ
185 uint64_t old_mask = env->psw.mask;
186
eb24f7c6
DH
187 env->psw.addr = addr;
188 env->psw.mask = mask;
3f10341f
DH
189 if (tcg_enabled()) {
190 env->cc_op = (mask >> 44) & 3;
191 }
eb24f7c6 192
311918b9
AJ
193 if ((old_mask ^ mask) & PSW_MASK_PER) {
194 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
195 }
196
d5a43964 197 if (mask & PSW_MASK_WAIT) {
49e15878 198 S390CPU *cpu = s390_env_get_cpu(env);
eb24f7c6 199 if (s390_cpu_halt(cpu) == 0) {
ef81522b 200#ifndef CONFIG_USER_ONLY
eb24f7c6 201 qemu_system_shutdown_request();
ef81522b 202#endif
d5a43964
AG
203 }
204 }
d5a43964
AG
205}
206
a4e3ad19 207static uint64_t get_psw_mask(CPUS390XState *env)
d5a43964 208{
3f10341f 209 uint64_t r = env->psw.mask;
d5a43964 210
3f10341f
DH
211 if (tcg_enabled()) {
212 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
213 env->cc_vr);
d5a43964 214
3f10341f
DH
215 r &= ~PSW_MASK_CC;
216 assert(!(env->cc_op & ~3));
217 r |= (uint64_t)env->cc_op << 44;
218 }
d5a43964
AG
219
220 return r;
221}
222
4782a23b
CH
223static LowCore *cpu_map_lowcore(CPUS390XState *env)
224{
a47dddd7 225 S390CPU *cpu = s390_env_get_cpu(env);
4782a23b
CH
226 LowCore *lowcore;
227 hwaddr len = sizeof(LowCore);
228
229 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
230
231 if (len < sizeof(LowCore)) {
a47dddd7 232 cpu_abort(CPU(cpu), "Could not map lowcore\n");
4782a23b
CH
233 }
234
235 return lowcore;
236}
237
238static void cpu_unmap_lowcore(LowCore *lowcore)
239{
240 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
241}
242
3f10341f
DH
243void do_restart_interrupt(CPUS390XState *env)
244{
245 uint64_t mask, addr;
246 LowCore *lowcore;
247
248 lowcore = cpu_map_lowcore(env);
249
250 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
251 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
252 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
253 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
254
255 cpu_unmap_lowcore(lowcore);
256
257 load_psw(env, mask, addr);
258}
259
a4e3ad19 260static void do_program_interrupt(CPUS390XState *env)
d5a43964
AG
261{
262 uint64_t mask, addr;
263 LowCore *lowcore;
d5a103cd 264 int ilen = env->int_pgm_ilen;
d5a43964 265
d5a103cd
RH
266 switch (ilen) {
267 case ILEN_LATER:
268 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
d5a43964 269 break;
d5a103cd
RH
270 case ILEN_LATER_INC:
271 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
272 env->psw.addr += ilen;
d5a43964 273 break;
d5a103cd
RH
274 default:
275 assert(ilen == 2 || ilen == 4 || ilen == 6);
d5a43964
AG
276 }
277
d5a103cd
RH
278 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
279 __func__, env->int_pgm_code, ilen);
d5a43964 280
4782a23b 281 lowcore = cpu_map_lowcore(env);
d5a43964 282
777c98c3
AJ
283 /* Signal PER events with the exception. */
284 if (env->per_perc_atmid) {
285 env->int_pgm_code |= PGM_PER;
286 lowcore->per_address = cpu_to_be64(env->per_address);
287 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
288 env->per_perc_atmid = 0;
289 }
290
d5a103cd 291 lowcore->pgm_ilen = cpu_to_be16(ilen);
d5a43964
AG
292 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
293 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
294 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
295 mask = be64_to_cpu(lowcore->program_new_psw.mask);
296 addr = be64_to_cpu(lowcore->program_new_psw.addr);
3da0ab35 297 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
d5a43964 298
4782a23b 299 cpu_unmap_lowcore(lowcore);
d5a43964 300
71e47088 301 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
d5a103cd 302 env->int_pgm_code, ilen, env->psw.mask,
d5a43964
AG
303 env->psw.addr);
304
305 load_psw(env, mask, addr);
306}
307
777c98c3
AJ
308static void do_svc_interrupt(CPUS390XState *env)
309{
310 uint64_t mask, addr;
311 LowCore *lowcore;
312
313 lowcore = cpu_map_lowcore(env);
314
315 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
316 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
317 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
318 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
319 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
320 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
321
322 cpu_unmap_lowcore(lowcore);
323
324 load_psw(env, mask, addr);
325
326 /* When a PER event is pending, the PER exception has to happen
327 immediately after the SERVICE CALL one. */
328 if (env->per_perc_atmid) {
329 env->int_pgm_code = PGM_PER;
330 env->int_pgm_ilen = env->int_svc_ilen;
331 do_program_interrupt(env);
332 }
333}
334
d5a43964
AG
335#define VIRTIO_SUBCODE_64 0x0D00
336
a4e3ad19 337static void do_ext_interrupt(CPUS390XState *env)
d5a43964 338{
a47dddd7 339 S390CPU *cpu = s390_env_get_cpu(env);
d5a43964
AG
340 uint64_t mask, addr;
341 LowCore *lowcore;
d5a43964
AG
342 ExtQueue *q;
343
344 if (!(env->psw.mask & PSW_MASK_EXT)) {
a47dddd7 345 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
d5a43964
AG
346 }
347
1a719923 348 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
a47dddd7 349 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
d5a43964
AG
350 }
351
352 q = &env->ext_queue[env->ext_index];
4782a23b 353 lowcore = cpu_map_lowcore(env);
d5a43964
AG
354
355 lowcore->ext_int_code = cpu_to_be16(q->code);
356 lowcore->ext_params = cpu_to_be32(q->param);
357 lowcore->ext_params2 = cpu_to_be64(q->param64);
358 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
359 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
360 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
361 mask = be64_to_cpu(lowcore->external_new_psw.mask);
362 addr = be64_to_cpu(lowcore->external_new_psw.addr);
363
4782a23b 364 cpu_unmap_lowcore(lowcore);
d5a43964
AG
365
366 env->ext_index--;
367 if (env->ext_index == -1) {
368 env->pending_int &= ~INTERRUPT_EXT;
369 }
370
71e47088 371 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
d5a43964
AG
372 env->psw.mask, env->psw.addr);
373
374 load_psw(env, mask, addr);
375}
3110e292 376
5d69c547
CH
377static void do_io_interrupt(CPUS390XState *env)
378{
a47dddd7 379 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
380 LowCore *lowcore;
381 IOIntQueue *q;
382 uint8_t isc;
383 int disable = 1;
384 int found = 0;
385
386 if (!(env->psw.mask & PSW_MASK_IO)) {
a47dddd7 387 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
5d69c547
CH
388 }
389
390 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
91b0a8f3
CH
391 uint64_t isc_bits;
392
5d69c547
CH
393 if (env->io_index[isc] < 0) {
394 continue;
395 }
1a719923 396 if (env->io_index[isc] >= MAX_IO_QUEUE) {
a47dddd7 397 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
5d69c547
CH
398 isc, env->io_index[isc]);
399 }
400
401 q = &env->io_queue[env->io_index[isc]][isc];
91b0a8f3
CH
402 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
403 if (!(env->cregs[6] & isc_bits)) {
5d69c547
CH
404 disable = 0;
405 continue;
406 }
bd9a8d85
CH
407 if (!found) {
408 uint64_t mask, addr;
5d69c547 409
bd9a8d85
CH
410 found = 1;
411 lowcore = cpu_map_lowcore(env);
5d69c547 412
bd9a8d85
CH
413 lowcore->subchannel_id = cpu_to_be16(q->id);
414 lowcore->subchannel_nr = cpu_to_be16(q->nr);
415 lowcore->io_int_parm = cpu_to_be32(q->parm);
416 lowcore->io_int_word = cpu_to_be32(q->word);
417 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
418 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
419 mask = be64_to_cpu(lowcore->io_new_psw.mask);
420 addr = be64_to_cpu(lowcore->io_new_psw.addr);
5d69c547 421
bd9a8d85
CH
422 cpu_unmap_lowcore(lowcore);
423
424 env->io_index[isc]--;
425
426 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
427 env->psw.mask, env->psw.addr);
428 load_psw(env, mask, addr);
429 }
b22dd124 430 if (env->io_index[isc] >= 0) {
5d69c547
CH
431 disable = 0;
432 }
bd9a8d85 433 continue;
5d69c547
CH
434 }
435
436 if (disable) {
437 env->pending_int &= ~INTERRUPT_IO;
438 }
439
5d69c547
CH
440}
441
442static void do_mchk_interrupt(CPUS390XState *env)
443{
a47dddd7 444 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
445 uint64_t mask, addr;
446 LowCore *lowcore;
447 MchkQueue *q;
448 int i;
449
450 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
a47dddd7 451 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
5d69c547
CH
452 }
453
1a719923 454 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
a47dddd7 455 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
5d69c547
CH
456 }
457
458 q = &env->mchk_queue[env->mchk_index];
459
460 if (q->type != 1) {
461 /* Don't know how to handle this... */
a47dddd7 462 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
5d69c547
CH
463 }
464 if (!(env->cregs[14] & (1 << 28))) {
465 /* CRW machine checks disabled */
466 return;
467 }
468
469 lowcore = cpu_map_lowcore(env);
470
471 for (i = 0; i < 16; i++) {
c498d8e3 472 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
5d69c547
CH
473 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
474 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
475 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
476 }
477 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
478 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
479 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
480 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
481 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
482 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
483 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
484
485 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
486 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
487 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
488 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
489 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
490 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
491
492 cpu_unmap_lowcore(lowcore);
493
494 env->mchk_index--;
495 if (env->mchk_index == -1) {
496 env->pending_int &= ~INTERRUPT_MCHK;
497 }
498
499 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
500 env->psw.mask, env->psw.addr);
501
502 load_psw(env, mask, addr);
503}
504
97a8ea5a 505void s390_cpu_do_interrupt(CPUState *cs)
3110e292 506{
97a8ea5a
AF
507 S390CPU *cpu = S390_CPU(cs);
508 CPUS390XState *env = &cpu->env;
f9466733 509
0d404541 510 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
27103424 511 __func__, cs->exception_index, env->psw.addr);
d5a43964 512
eb24f7c6 513 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
5d69c547
CH
514 /* handle machine checks */
515 if ((env->psw.mask & PSW_MASK_MCHECK) &&
27103424 516 (cs->exception_index == -1)) {
5d69c547 517 if (env->pending_int & INTERRUPT_MCHK) {
27103424 518 cs->exception_index = EXCP_MCHK;
5d69c547
CH
519 }
520 }
d5a43964
AG
521 /* handle external interrupts */
522 if ((env->psw.mask & PSW_MASK_EXT) &&
27103424 523 cs->exception_index == -1) {
d5a43964
AG
524 if (env->pending_int & INTERRUPT_EXT) {
525 /* code is already in env */
27103424 526 cs->exception_index = EXCP_EXT;
d5a43964 527 } else if (env->pending_int & INTERRUPT_TOD) {
f9466733 528 cpu_inject_ext(cpu, 0x1004, 0, 0);
27103424 529 cs->exception_index = EXCP_EXT;
d5a43964
AG
530 env->pending_int &= ~INTERRUPT_EXT;
531 env->pending_int &= ~INTERRUPT_TOD;
532 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
f9466733 533 cpu_inject_ext(cpu, 0x1005, 0, 0);
27103424 534 cs->exception_index = EXCP_EXT;
d5a43964
AG
535 env->pending_int &= ~INTERRUPT_EXT;
536 env->pending_int &= ~INTERRUPT_TOD;
537 }
538 }
5d69c547
CH
539 /* handle I/O interrupts */
540 if ((env->psw.mask & PSW_MASK_IO) &&
27103424 541 (cs->exception_index == -1)) {
5d69c547 542 if (env->pending_int & INTERRUPT_IO) {
27103424 543 cs->exception_index = EXCP_IO;
5d69c547
CH
544 }
545 }
d5a43964 546
27103424 547 switch (cs->exception_index) {
d5a43964
AG
548 case EXCP_PGM:
549 do_program_interrupt(env);
550 break;
551 case EXCP_SVC:
552 do_svc_interrupt(env);
553 break;
554 case EXCP_EXT:
555 do_ext_interrupt(env);
556 break;
5d69c547
CH
557 case EXCP_IO:
558 do_io_interrupt(env);
559 break;
560 case EXCP_MCHK:
561 do_mchk_interrupt(env);
562 break;
d5a43964 563 }
27103424 564 cs->exception_index = -1;
d5a43964
AG
565
566 if (!env->pending_int) {
259186a7 567 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
d5a43964 568 }
3110e292 569}
d5a43964 570
02bb9bbf
RH
571bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
572{
573 if (interrupt_request & CPU_INTERRUPT_HARD) {
574 S390CPU *cpu = S390_CPU(cs);
575 CPUS390XState *env = &cpu->env;
576
577 if (env->psw.mask & PSW_MASK_EXT) {
578 s390_cpu_do_interrupt(cs);
579 return true;
580 }
581 }
582 return false;
583}
311918b9
AJ
584
585void s390_cpu_recompute_watchpoints(CPUState *cs)
586{
587 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
588 S390CPU *cpu = S390_CPU(cs);
589 CPUS390XState *env = &cpu->env;
590
591 /* We are called when the watchpoints have changed. First
592 remove them all. */
593 cpu_watchpoint_remove_all(cs, BP_CPU);
594
595 /* Return if PER is not enabled */
596 if (!(env->psw.mask & PSW_MASK_PER)) {
597 return;
598 }
599
600 /* Return if storage-alteration event is not enabled. */
601 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
602 return;
603 }
604
605 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
606 /* We can't create a watchoint spanning the whole memory range, so
607 split it in two parts. */
608 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
609 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
610 } else if (env->cregs[10] > env->cregs[11]) {
611 /* The address range loops, create two watchpoints. */
612 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
613 wp_flags, NULL);
614 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
615
616 } else {
617 /* Default case, create a single watchpoint. */
618 cpu_watchpoint_insert(cs, env->cregs[10],
619 env->cregs[11] - env->cregs[10] + 1,
620 wp_flags, NULL);
621 }
622}
623
624void s390x_cpu_debug_excp_handler(CPUState *cs)
625{
626 S390CPU *cpu = S390_CPU(cs);
627 CPUS390XState *env = &cpu->env;
628 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
629
630 if (wp_hit && wp_hit->flags & BP_CPU) {
631 /* FIXME: When the storage-alteration-space control bit is set,
632 the exception should only be triggered if the memory access
633 is done using an address space with the storage-alteration-event
634 bit set. We have no way to detect that with the current
635 watchpoint code. */
636 cs->watchpoint_hit = NULL;
637
638 env->per_address = env->psw.addr;
639 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
640 /* FIXME: We currently no way to detect the address space used
641 to trigger the watchpoint. For now just consider it is the
642 current default ASC. This turn to be true except when MVCP
643 and MVCS instrutions are not used. */
644 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
645
646 /* Remove all watchpoints to re-execute the code. A PER exception
647 will be triggered, it will call load_psw which will recompute
648 the watchpoints. */
649 cpu_watchpoint_remove_all(cs, BP_CPU);
650 cpu_resume_from_signal(cs, NULL);
651 }
652}
d5a43964 653#endif /* CONFIG_USER_ONLY */