]> git.proxmox.com Git - mirror_qemu.git/blame - target-s390x/helper.c
s390x: remove s390-virtio devices
[mirror_qemu.git] / target-s390x / helper.c
CommitLineData
10ec5117
AG
1/*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
d5a43964 5 * Copyright (c) 2011 Alexander Graf
10ec5117
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
70539e18 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
10ec5117
AG
19 */
20
10ec5117 21#include "cpu.h"
022c62cb 22#include "exec/gdbstub.h"
1de7afc9 23#include "qemu/timer.h"
f08b6170 24#include "exec/cpu_ldst.h"
ef81522b 25#ifndef CONFIG_USER_ONLY
9c17d615 26#include "sysemu/sysemu.h"
ef81522b 27#endif
10ec5117 28
d5a43964 29//#define DEBUG_S390
d5a43964
AG
30//#define DEBUG_S390_STDOUT
31
32#ifdef DEBUG_S390
33#ifdef DEBUG_S390_STDOUT
34#define DPRINTF(fmt, ...) \
35 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
013a2942 36 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
d5a43964
AG
37#else
38#define DPRINTF(fmt, ...) \
39 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
40#endif
41#else
42#define DPRINTF(fmt, ...) \
43 do { } while (0)
44#endif
45
d5a43964
AG
46
47#ifndef CONFIG_USER_ONLY
8f22e0df 48void s390x_tod_timer(void *opaque)
d5a43964 49{
b8ba6799
AF
50 S390CPU *cpu = opaque;
51 CPUS390XState *env = &cpu->env;
d5a43964
AG
52
53 env->pending_int |= INTERRUPT_TOD;
c3affe56 54 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
55}
56
8f22e0df 57void s390x_cpu_timer(void *opaque)
d5a43964 58{
b8ba6799
AF
59 S390CPU *cpu = opaque;
60 CPUS390XState *env = &cpu->env;
d5a43964
AG
61
62 env->pending_int |= INTERRUPT_CPUTIMER;
c3affe56 63 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
64}
65#endif
10c339a0 66
564b863d 67S390CPU *cpu_s390x_init(const char *cpu_model)
10ec5117 68{
29e4bcb2 69 S390CPU *cpu;
10ec5117 70
29e4bcb2 71 cpu = S390_CPU(object_new(TYPE_S390_CPU));
1f136632
AF
72
73 object_property_set_bool(OBJECT(cpu), true, "realized", NULL);
74
564b863d 75 return cpu;
10ec5117
AG
76}
77
d5a43964
AG
78#if defined(CONFIG_USER_ONLY)
79
97a8ea5a 80void s390_cpu_do_interrupt(CPUState *cs)
d5a43964 81{
27103424 82 cs->exception_index = -1;
d5a43964
AG
83}
84
7510454e
AF
85int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
86 int rw, int mmu_idx)
d5a43964 87{
7510454e
AF
88 S390CPU *cpu = S390_CPU(cs);
89
27103424 90 cs->exception_index = EXCP_PGM;
7510454e 91 cpu->env.int_pgm_code = PGM_ADDRESSING;
d5a103cd
RH
92 /* On real machines this value is dropped into LowMem. Since this
93 is userland, simply put this someplace that cpu_loop can find it. */
7510454e 94 cpu->env.__excp_addr = address;
d5a43964
AG
95 return 1;
96}
97
b7e516ce 98#else /* !CONFIG_USER_ONLY */
d5a43964
AG
99
100/* Ensure to exit the TB after this call! */
dfebd7a7 101void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
d5a43964 102{
27103424
AF
103 CPUState *cs = CPU(s390_env_get_cpu(env));
104
105 cs->exception_index = EXCP_PGM;
d5a43964 106 env->int_pgm_code = code;
d5a103cd 107 env->int_pgm_ilen = ilen;
d5a43964
AG
108}
109
7510454e
AF
110int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
111 int rw, int mmu_idx)
10c339a0 112{
7510454e
AF
113 S390CPU *cpu = S390_CPU(cs);
114 CPUS390XState *env = &cpu->env;
c255ac60 115 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
d5a43964 116 target_ulong vaddr, raddr;
10c339a0
AG
117 int prot;
118
7510454e 119 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
07cc7d12 120 __func__, orig_vaddr, rw, mmu_idx);
d5a43964 121
71e47088
BS
122 orig_vaddr &= TARGET_PAGE_MASK;
123 vaddr = orig_vaddr;
d5a43964
AG
124
125 /* 31-Bit mode */
126 if (!(env->psw.mask & PSW_MASK_64)) {
127 vaddr &= 0x7fffffff;
128 }
129
e3e09d87 130 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
d5a43964
AG
131 /* Translation ended in exception */
132 return 1;
133 }
10c339a0 134
d5a43964 135 /* check out of RAM access */
7b3fdbd9 136 if (raddr > ram_size) {
a6f921b0
AF
137 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
138 (uint64_t)raddr, (uint64_t)ram_size);
d5a103cd 139 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
d5a43964
AG
140 return 1;
141 }
10c339a0 142
339aaf5b
AP
143 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
144 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
d5a43964 145
0c591eb0 146 tlb_set_page(cs, orig_vaddr, raddr, prot,
d4c430a8 147 mmu_idx, TARGET_PAGE_SIZE);
d5a43964 148
d4c430a8 149 return 0;
10c339a0 150}
d5a43964 151
00b941e5 152hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
d5a43964 153{
00b941e5
AF
154 S390CPU *cpu = S390_CPU(cs);
155 CPUS390XState *env = &cpu->env;
d5a43964 156 target_ulong raddr;
e3e09d87 157 int prot;
d5a43964
AG
158 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
159
160 /* 31-Bit mode */
161 if (!(env->psw.mask & PSW_MASK_64)) {
162 vaddr &= 0x7fffffff;
163 }
164
217a4acb 165 mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false);
d5a43964
AG
166
167 return raddr;
168}
169
770a6379
DH
170hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
171{
172 hwaddr phys_addr;
173 target_ulong page;
174
175 page = vaddr & TARGET_PAGE_MASK;
176 phys_addr = cpu_get_phys_page_debug(cs, page);
177 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
178
179 return phys_addr;
180}
181
a4e3ad19 182void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
d5a43964 183{
311918b9
AJ
184 uint64_t old_mask = env->psw.mask;
185
eb24f7c6
DH
186 env->psw.addr = addr;
187 env->psw.mask = mask;
3f10341f
DH
188 if (tcg_enabled()) {
189 env->cc_op = (mask >> 44) & 3;
190 }
eb24f7c6 191
311918b9
AJ
192 if ((old_mask ^ mask) & PSW_MASK_PER) {
193 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
194 }
195
d5a43964 196 if (mask & PSW_MASK_WAIT) {
49e15878 197 S390CPU *cpu = s390_env_get_cpu(env);
eb24f7c6 198 if (s390_cpu_halt(cpu) == 0) {
ef81522b 199#ifndef CONFIG_USER_ONLY
eb24f7c6 200 qemu_system_shutdown_request();
ef81522b 201#endif
d5a43964
AG
202 }
203 }
d5a43964
AG
204}
205
a4e3ad19 206static uint64_t get_psw_mask(CPUS390XState *env)
d5a43964 207{
3f10341f 208 uint64_t r = env->psw.mask;
d5a43964 209
3f10341f
DH
210 if (tcg_enabled()) {
211 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
212 env->cc_vr);
d5a43964 213
3f10341f
DH
214 r &= ~PSW_MASK_CC;
215 assert(!(env->cc_op & ~3));
216 r |= (uint64_t)env->cc_op << 44;
217 }
d5a43964
AG
218
219 return r;
220}
221
4782a23b
CH
222static LowCore *cpu_map_lowcore(CPUS390XState *env)
223{
a47dddd7 224 S390CPU *cpu = s390_env_get_cpu(env);
4782a23b
CH
225 LowCore *lowcore;
226 hwaddr len = sizeof(LowCore);
227
228 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
229
230 if (len < sizeof(LowCore)) {
a47dddd7 231 cpu_abort(CPU(cpu), "Could not map lowcore\n");
4782a23b
CH
232 }
233
234 return lowcore;
235}
236
237static void cpu_unmap_lowcore(LowCore *lowcore)
238{
239 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
240}
241
3f10341f
DH
242void do_restart_interrupt(CPUS390XState *env)
243{
244 uint64_t mask, addr;
245 LowCore *lowcore;
246
247 lowcore = cpu_map_lowcore(env);
248
249 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
250 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
251 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
252 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
253
254 cpu_unmap_lowcore(lowcore);
255
256 load_psw(env, mask, addr);
257}
258
a4e3ad19 259static void do_program_interrupt(CPUS390XState *env)
d5a43964
AG
260{
261 uint64_t mask, addr;
262 LowCore *lowcore;
d5a103cd 263 int ilen = env->int_pgm_ilen;
d5a43964 264
d5a103cd
RH
265 switch (ilen) {
266 case ILEN_LATER:
267 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
d5a43964 268 break;
d5a103cd
RH
269 case ILEN_LATER_INC:
270 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
271 env->psw.addr += ilen;
d5a43964 272 break;
d5a103cd
RH
273 default:
274 assert(ilen == 2 || ilen == 4 || ilen == 6);
d5a43964
AG
275 }
276
d5a103cd
RH
277 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
278 __func__, env->int_pgm_code, ilen);
d5a43964 279
4782a23b 280 lowcore = cpu_map_lowcore(env);
d5a43964 281
777c98c3
AJ
282 /* Signal PER events with the exception. */
283 if (env->per_perc_atmid) {
284 env->int_pgm_code |= PGM_PER;
285 lowcore->per_address = cpu_to_be64(env->per_address);
286 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
287 env->per_perc_atmid = 0;
288 }
289
d5a103cd 290 lowcore->pgm_ilen = cpu_to_be16(ilen);
d5a43964
AG
291 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
292 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
293 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
294 mask = be64_to_cpu(lowcore->program_new_psw.mask);
295 addr = be64_to_cpu(lowcore->program_new_psw.addr);
3da0ab35 296 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
d5a43964 297
4782a23b 298 cpu_unmap_lowcore(lowcore);
d5a43964 299
71e47088 300 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
d5a103cd 301 env->int_pgm_code, ilen, env->psw.mask,
d5a43964
AG
302 env->psw.addr);
303
304 load_psw(env, mask, addr);
305}
306
777c98c3
AJ
307static void do_svc_interrupt(CPUS390XState *env)
308{
309 uint64_t mask, addr;
310 LowCore *lowcore;
311
312 lowcore = cpu_map_lowcore(env);
313
314 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
315 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
316 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
317 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
318 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
319 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
320
321 cpu_unmap_lowcore(lowcore);
322
323 load_psw(env, mask, addr);
324
325 /* When a PER event is pending, the PER exception has to happen
326 immediately after the SERVICE CALL one. */
327 if (env->per_perc_atmid) {
328 env->int_pgm_code = PGM_PER;
329 env->int_pgm_ilen = env->int_svc_ilen;
330 do_program_interrupt(env);
331 }
332}
333
d5a43964
AG
334#define VIRTIO_SUBCODE_64 0x0D00
335
a4e3ad19 336static void do_ext_interrupt(CPUS390XState *env)
d5a43964 337{
a47dddd7 338 S390CPU *cpu = s390_env_get_cpu(env);
d5a43964
AG
339 uint64_t mask, addr;
340 LowCore *lowcore;
d5a43964
AG
341 ExtQueue *q;
342
343 if (!(env->psw.mask & PSW_MASK_EXT)) {
a47dddd7 344 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
d5a43964
AG
345 }
346
1a719923 347 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
a47dddd7 348 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
d5a43964
AG
349 }
350
351 q = &env->ext_queue[env->ext_index];
4782a23b 352 lowcore = cpu_map_lowcore(env);
d5a43964
AG
353
354 lowcore->ext_int_code = cpu_to_be16(q->code);
355 lowcore->ext_params = cpu_to_be32(q->param);
356 lowcore->ext_params2 = cpu_to_be64(q->param64);
357 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
358 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
359 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
360 mask = be64_to_cpu(lowcore->external_new_psw.mask);
361 addr = be64_to_cpu(lowcore->external_new_psw.addr);
362
4782a23b 363 cpu_unmap_lowcore(lowcore);
d5a43964
AG
364
365 env->ext_index--;
366 if (env->ext_index == -1) {
367 env->pending_int &= ~INTERRUPT_EXT;
368 }
369
71e47088 370 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
d5a43964
AG
371 env->psw.mask, env->psw.addr);
372
373 load_psw(env, mask, addr);
374}
3110e292 375
5d69c547
CH
376static void do_io_interrupt(CPUS390XState *env)
377{
a47dddd7 378 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
379 LowCore *lowcore;
380 IOIntQueue *q;
381 uint8_t isc;
382 int disable = 1;
383 int found = 0;
384
385 if (!(env->psw.mask & PSW_MASK_IO)) {
a47dddd7 386 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
5d69c547
CH
387 }
388
389 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
91b0a8f3
CH
390 uint64_t isc_bits;
391
5d69c547
CH
392 if (env->io_index[isc] < 0) {
393 continue;
394 }
1a719923 395 if (env->io_index[isc] >= MAX_IO_QUEUE) {
a47dddd7 396 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
5d69c547
CH
397 isc, env->io_index[isc]);
398 }
399
400 q = &env->io_queue[env->io_index[isc]][isc];
91b0a8f3
CH
401 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
402 if (!(env->cregs[6] & isc_bits)) {
5d69c547
CH
403 disable = 0;
404 continue;
405 }
bd9a8d85
CH
406 if (!found) {
407 uint64_t mask, addr;
5d69c547 408
bd9a8d85
CH
409 found = 1;
410 lowcore = cpu_map_lowcore(env);
5d69c547 411
bd9a8d85
CH
412 lowcore->subchannel_id = cpu_to_be16(q->id);
413 lowcore->subchannel_nr = cpu_to_be16(q->nr);
414 lowcore->io_int_parm = cpu_to_be32(q->parm);
415 lowcore->io_int_word = cpu_to_be32(q->word);
416 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
417 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
418 mask = be64_to_cpu(lowcore->io_new_psw.mask);
419 addr = be64_to_cpu(lowcore->io_new_psw.addr);
5d69c547 420
bd9a8d85
CH
421 cpu_unmap_lowcore(lowcore);
422
423 env->io_index[isc]--;
424
425 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
426 env->psw.mask, env->psw.addr);
427 load_psw(env, mask, addr);
428 }
b22dd124 429 if (env->io_index[isc] >= 0) {
5d69c547
CH
430 disable = 0;
431 }
bd9a8d85 432 continue;
5d69c547
CH
433 }
434
435 if (disable) {
436 env->pending_int &= ~INTERRUPT_IO;
437 }
438
5d69c547
CH
439}
440
441static void do_mchk_interrupt(CPUS390XState *env)
442{
a47dddd7 443 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
444 uint64_t mask, addr;
445 LowCore *lowcore;
446 MchkQueue *q;
447 int i;
448
449 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
a47dddd7 450 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
5d69c547
CH
451 }
452
1a719923 453 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
a47dddd7 454 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
5d69c547
CH
455 }
456
457 q = &env->mchk_queue[env->mchk_index];
458
459 if (q->type != 1) {
460 /* Don't know how to handle this... */
a47dddd7 461 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
5d69c547
CH
462 }
463 if (!(env->cregs[14] & (1 << 28))) {
464 /* CRW machine checks disabled */
465 return;
466 }
467
468 lowcore = cpu_map_lowcore(env);
469
470 for (i = 0; i < 16; i++) {
c498d8e3 471 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
5d69c547
CH
472 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
473 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
474 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
475 }
476 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
477 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
478 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
479 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
480 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
481 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
482 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
483
484 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
485 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
486 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
487 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
488 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
489 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
490
491 cpu_unmap_lowcore(lowcore);
492
493 env->mchk_index--;
494 if (env->mchk_index == -1) {
495 env->pending_int &= ~INTERRUPT_MCHK;
496 }
497
498 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
499 env->psw.mask, env->psw.addr);
500
501 load_psw(env, mask, addr);
502}
503
97a8ea5a 504void s390_cpu_do_interrupt(CPUState *cs)
3110e292 505{
97a8ea5a
AF
506 S390CPU *cpu = S390_CPU(cs);
507 CPUS390XState *env = &cpu->env;
f9466733 508
0d404541 509 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
27103424 510 __func__, cs->exception_index, env->psw.addr);
d5a43964 511
eb24f7c6 512 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
5d69c547
CH
513 /* handle machine checks */
514 if ((env->psw.mask & PSW_MASK_MCHECK) &&
27103424 515 (cs->exception_index == -1)) {
5d69c547 516 if (env->pending_int & INTERRUPT_MCHK) {
27103424 517 cs->exception_index = EXCP_MCHK;
5d69c547
CH
518 }
519 }
d5a43964
AG
520 /* handle external interrupts */
521 if ((env->psw.mask & PSW_MASK_EXT) &&
27103424 522 cs->exception_index == -1) {
d5a43964
AG
523 if (env->pending_int & INTERRUPT_EXT) {
524 /* code is already in env */
27103424 525 cs->exception_index = EXCP_EXT;
d5a43964 526 } else if (env->pending_int & INTERRUPT_TOD) {
f9466733 527 cpu_inject_ext(cpu, 0x1004, 0, 0);
27103424 528 cs->exception_index = EXCP_EXT;
d5a43964
AG
529 env->pending_int &= ~INTERRUPT_EXT;
530 env->pending_int &= ~INTERRUPT_TOD;
531 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
f9466733 532 cpu_inject_ext(cpu, 0x1005, 0, 0);
27103424 533 cs->exception_index = EXCP_EXT;
d5a43964
AG
534 env->pending_int &= ~INTERRUPT_EXT;
535 env->pending_int &= ~INTERRUPT_TOD;
536 }
537 }
5d69c547
CH
538 /* handle I/O interrupts */
539 if ((env->psw.mask & PSW_MASK_IO) &&
27103424 540 (cs->exception_index == -1)) {
5d69c547 541 if (env->pending_int & INTERRUPT_IO) {
27103424 542 cs->exception_index = EXCP_IO;
5d69c547
CH
543 }
544 }
d5a43964 545
27103424 546 switch (cs->exception_index) {
d5a43964
AG
547 case EXCP_PGM:
548 do_program_interrupt(env);
549 break;
550 case EXCP_SVC:
551 do_svc_interrupt(env);
552 break;
553 case EXCP_EXT:
554 do_ext_interrupt(env);
555 break;
5d69c547
CH
556 case EXCP_IO:
557 do_io_interrupt(env);
558 break;
559 case EXCP_MCHK:
560 do_mchk_interrupt(env);
561 break;
d5a43964 562 }
27103424 563 cs->exception_index = -1;
d5a43964
AG
564
565 if (!env->pending_int) {
259186a7 566 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
d5a43964 567 }
3110e292 568}
d5a43964 569
02bb9bbf
RH
570bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
571{
572 if (interrupt_request & CPU_INTERRUPT_HARD) {
573 S390CPU *cpu = S390_CPU(cs);
574 CPUS390XState *env = &cpu->env;
575
576 if (env->psw.mask & PSW_MASK_EXT) {
577 s390_cpu_do_interrupt(cs);
578 return true;
579 }
580 }
581 return false;
582}
311918b9
AJ
583
584void s390_cpu_recompute_watchpoints(CPUState *cs)
585{
586 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
587 S390CPU *cpu = S390_CPU(cs);
588 CPUS390XState *env = &cpu->env;
589
590 /* We are called when the watchpoints have changed. First
591 remove them all. */
592 cpu_watchpoint_remove_all(cs, BP_CPU);
593
594 /* Return if PER is not enabled */
595 if (!(env->psw.mask & PSW_MASK_PER)) {
596 return;
597 }
598
599 /* Return if storage-alteration event is not enabled. */
600 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
601 return;
602 }
603
604 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
605 /* We can't create a watchoint spanning the whole memory range, so
606 split it in two parts. */
607 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
608 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
609 } else if (env->cregs[10] > env->cregs[11]) {
610 /* The address range loops, create two watchpoints. */
611 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
612 wp_flags, NULL);
613 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
614
615 } else {
616 /* Default case, create a single watchpoint. */
617 cpu_watchpoint_insert(cs, env->cregs[10],
618 env->cregs[11] - env->cregs[10] + 1,
619 wp_flags, NULL);
620 }
621}
622
623void s390x_cpu_debug_excp_handler(CPUState *cs)
624{
625 S390CPU *cpu = S390_CPU(cs);
626 CPUS390XState *env = &cpu->env;
627 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
628
629 if (wp_hit && wp_hit->flags & BP_CPU) {
630 /* FIXME: When the storage-alteration-space control bit is set,
631 the exception should only be triggered if the memory access
632 is done using an address space with the storage-alteration-event
633 bit set. We have no way to detect that with the current
634 watchpoint code. */
635 cs->watchpoint_hit = NULL;
636
637 env->per_address = env->psw.addr;
638 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
639 /* FIXME: We currently no way to detect the address space used
640 to trigger the watchpoint. For now just consider it is the
641 current default ASC. This turn to be true except when MVCP
642 and MVCS instrutions are not used. */
643 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
644
645 /* Remove all watchpoints to re-execute the code. A PER exception
646 will be triggered, it will call load_psw which will recompute
647 the watchpoints. */
648 cpu_watchpoint_remove_all(cs, BP_CPU);
649 cpu_resume_from_signal(cs, NULL);
650 }
651}
d5a43964 652#endif /* CONFIG_USER_ONLY */