]> git.proxmox.com Git - mirror_qemu.git/blame - target-s390x/helper.c
ivshmem: Fix 64 bit memory bar configuration
[mirror_qemu.git] / target-s390x / helper.c
CommitLineData
10ec5117
AG
1/*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
d5a43964 5 * Copyright (c) 2011 Alexander Graf
10ec5117
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
70539e18 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
10ec5117
AG
19 */
20
9615495a 21#include "qemu/osdep.h"
da34e65c 22#include "qapi/error.h"
10ec5117 23#include "cpu.h"
022c62cb 24#include "exec/gdbstub.h"
1de7afc9 25#include "qemu/timer.h"
63c91552 26#include "exec/exec-all.h"
f08b6170 27#include "exec/cpu_ldst.h"
bd3f16ac 28#include "hw/s390x/ioinst.h"
ef81522b 29#ifndef CONFIG_USER_ONLY
9c17d615 30#include "sysemu/sysemu.h"
ef81522b 31#endif
10ec5117 32
d5a43964 33//#define DEBUG_S390
d5a43964
AG
34//#define DEBUG_S390_STDOUT
35
36#ifdef DEBUG_S390
37#ifdef DEBUG_S390_STDOUT
38#define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
013a2942 40 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
d5a43964
AG
41#else
42#define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44#endif
45#else
46#define DPRINTF(fmt, ...) \
47 do { } while (0)
48#endif
49
d5a43964
AG
50
51#ifndef CONFIG_USER_ONLY
8f22e0df 52void s390x_tod_timer(void *opaque)
d5a43964 53{
b8ba6799
AF
54 S390CPU *cpu = opaque;
55 CPUS390XState *env = &cpu->env;
d5a43964
AG
56
57 env->pending_int |= INTERRUPT_TOD;
c3affe56 58 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
59}
60
8f22e0df 61void s390x_cpu_timer(void *opaque)
d5a43964 62{
b8ba6799
AF
63 S390CPU *cpu = opaque;
64 CPUS390XState *env = &cpu->env;
d5a43964
AG
65
66 env->pending_int |= INTERRUPT_CPUTIMER;
c3affe56 67 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
68}
69#endif
10c339a0 70
96b1a8bb 71S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
10ec5117 72{
9be38598 73 return S390_CPU(object_new(TYPE_S390_CPU));
96b1a8bb
MR
74}
75
76S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
77{
78 S390CPU *cpu;
79 Error *err = NULL;
80
81 cpu = cpu_s390x_create(cpu_model, &err);
82 if (err != NULL) {
83 goto out;
84 }
85
86 object_property_set_int(OBJECT(cpu), id, "id", &err);
87 if (err != NULL) {
88 goto out;
89 }
90 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
91
92out:
93 if (err) {
94 error_propagate(errp, err);
95 object_unref(OBJECT(cpu));
96 cpu = NULL;
97 }
98 return cpu;
99}
100
101S390CPU *cpu_s390x_init(const char *cpu_model)
102{
103 Error *err = NULL;
104 S390CPU *cpu;
105 /* Use to track CPU ID for linux-user only */
106 static int64_t next_cpu_id;
1f136632 107
96b1a8bb
MR
108 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
109 if (err) {
110 error_report_err(err);
111 }
564b863d 112 return cpu;
10ec5117
AG
113}
114
d5a43964
AG
115#if defined(CONFIG_USER_ONLY)
116
97a8ea5a 117void s390_cpu_do_interrupt(CPUState *cs)
d5a43964 118{
27103424 119 cs->exception_index = -1;
d5a43964
AG
120}
121
7510454e
AF
122int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
123 int rw, int mmu_idx)
d5a43964 124{
7510454e
AF
125 S390CPU *cpu = S390_CPU(cs);
126
27103424 127 cs->exception_index = EXCP_PGM;
7510454e 128 cpu->env.int_pgm_code = PGM_ADDRESSING;
d5a103cd
RH
129 /* On real machines this value is dropped into LowMem. Since this
130 is userland, simply put this someplace that cpu_loop can find it. */
7510454e 131 cpu->env.__excp_addr = address;
d5a43964
AG
132 return 1;
133}
134
b7e516ce 135#else /* !CONFIG_USER_ONLY */
d5a43964
AG
136
137/* Ensure to exit the TB after this call! */
dfebd7a7 138void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
d5a43964 139{
27103424
AF
140 CPUState *cs = CPU(s390_env_get_cpu(env));
141
142 cs->exception_index = EXCP_PGM;
d5a43964 143 env->int_pgm_code = code;
d5a103cd 144 env->int_pgm_ilen = ilen;
d5a43964
AG
145}
146
7510454e
AF
147int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
148 int rw, int mmu_idx)
10c339a0 149{
7510454e
AF
150 S390CPU *cpu = S390_CPU(cs);
151 CPUS390XState *env = &cpu->env;
c255ac60 152 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
d5a43964 153 target_ulong vaddr, raddr;
10c339a0
AG
154 int prot;
155
7510454e 156 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
07cc7d12 157 __func__, orig_vaddr, rw, mmu_idx);
d5a43964 158
71e47088
BS
159 orig_vaddr &= TARGET_PAGE_MASK;
160 vaddr = orig_vaddr;
d5a43964
AG
161
162 /* 31-Bit mode */
163 if (!(env->psw.mask & PSW_MASK_64)) {
164 vaddr &= 0x7fffffff;
165 }
166
e3e09d87 167 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
d5a43964
AG
168 /* Translation ended in exception */
169 return 1;
170 }
10c339a0 171
d5a43964 172 /* check out of RAM access */
7b3fdbd9 173 if (raddr > ram_size) {
a6f921b0
AF
174 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
175 (uint64_t)raddr, (uint64_t)ram_size);
d5a103cd 176 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
d5a43964
AG
177 return 1;
178 }
10c339a0 179
339aaf5b
AP
180 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
181 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
d5a43964 182
0c591eb0 183 tlb_set_page(cs, orig_vaddr, raddr, prot,
d4c430a8 184 mmu_idx, TARGET_PAGE_SIZE);
d5a43964 185
d4c430a8 186 return 0;
10c339a0 187}
d5a43964 188
00b941e5 189hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
d5a43964 190{
00b941e5
AF
191 S390CPU *cpu = S390_CPU(cs);
192 CPUS390XState *env = &cpu->env;
d5a43964 193 target_ulong raddr;
e3e09d87 194 int prot;
d5a43964
AG
195 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
196
197 /* 31-Bit mode */
198 if (!(env->psw.mask & PSW_MASK_64)) {
199 vaddr &= 0x7fffffff;
200 }
201
234779a2
DH
202 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
203 return -1;
204 }
d5a43964
AG
205 return raddr;
206}
207
770a6379
DH
208hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
209{
210 hwaddr phys_addr;
211 target_ulong page;
212
213 page = vaddr & TARGET_PAGE_MASK;
214 phys_addr = cpu_get_phys_page_debug(cs, page);
215 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
216
217 return phys_addr;
218}
219
a4e3ad19 220void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
d5a43964 221{
311918b9
AJ
222 uint64_t old_mask = env->psw.mask;
223
eb24f7c6
DH
224 env->psw.addr = addr;
225 env->psw.mask = mask;
3f10341f
DH
226 if (tcg_enabled()) {
227 env->cc_op = (mask >> 44) & 3;
228 }
eb24f7c6 229
311918b9
AJ
230 if ((old_mask ^ mask) & PSW_MASK_PER) {
231 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
232 }
233
d5a43964 234 if (mask & PSW_MASK_WAIT) {
49e15878 235 S390CPU *cpu = s390_env_get_cpu(env);
eb24f7c6 236 if (s390_cpu_halt(cpu) == 0) {
ef81522b 237#ifndef CONFIG_USER_ONLY
eb24f7c6 238 qemu_system_shutdown_request();
ef81522b 239#endif
d5a43964
AG
240 }
241 }
d5a43964
AG
242}
243
a4e3ad19 244static uint64_t get_psw_mask(CPUS390XState *env)
d5a43964 245{
3f10341f 246 uint64_t r = env->psw.mask;
d5a43964 247
3f10341f
DH
248 if (tcg_enabled()) {
249 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
250 env->cc_vr);
d5a43964 251
3f10341f
DH
252 r &= ~PSW_MASK_CC;
253 assert(!(env->cc_op & ~3));
254 r |= (uint64_t)env->cc_op << 44;
255 }
d5a43964
AG
256
257 return r;
258}
259
4782a23b
CH
260static LowCore *cpu_map_lowcore(CPUS390XState *env)
261{
a47dddd7 262 S390CPU *cpu = s390_env_get_cpu(env);
4782a23b
CH
263 LowCore *lowcore;
264 hwaddr len = sizeof(LowCore);
265
266 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
267
268 if (len < sizeof(LowCore)) {
a47dddd7 269 cpu_abort(CPU(cpu), "Could not map lowcore\n");
4782a23b
CH
270 }
271
272 return lowcore;
273}
274
275static void cpu_unmap_lowcore(LowCore *lowcore)
276{
277 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
278}
279
3f10341f
DH
280void do_restart_interrupt(CPUS390XState *env)
281{
282 uint64_t mask, addr;
283 LowCore *lowcore;
284
285 lowcore = cpu_map_lowcore(env);
286
287 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
288 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
289 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
290 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
291
292 cpu_unmap_lowcore(lowcore);
293
294 load_psw(env, mask, addr);
295}
296
a4e3ad19 297static void do_program_interrupt(CPUS390XState *env)
d5a43964
AG
298{
299 uint64_t mask, addr;
300 LowCore *lowcore;
d5a103cd 301 int ilen = env->int_pgm_ilen;
d5a43964 302
d5a103cd
RH
303 switch (ilen) {
304 case ILEN_LATER:
305 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
d5a43964 306 break;
d5a103cd
RH
307 case ILEN_LATER_INC:
308 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
309 env->psw.addr += ilen;
d5a43964 310 break;
d5a103cd
RH
311 default:
312 assert(ilen == 2 || ilen == 4 || ilen == 6);
d5a43964
AG
313 }
314
d5a103cd
RH
315 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
316 __func__, env->int_pgm_code, ilen);
d5a43964 317
4782a23b 318 lowcore = cpu_map_lowcore(env);
d5a43964 319
777c98c3
AJ
320 /* Signal PER events with the exception. */
321 if (env->per_perc_atmid) {
322 env->int_pgm_code |= PGM_PER;
323 lowcore->per_address = cpu_to_be64(env->per_address);
324 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
325 env->per_perc_atmid = 0;
326 }
327
d5a103cd 328 lowcore->pgm_ilen = cpu_to_be16(ilen);
d5a43964
AG
329 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
330 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
331 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
332 mask = be64_to_cpu(lowcore->program_new_psw.mask);
333 addr = be64_to_cpu(lowcore->program_new_psw.addr);
3da0ab35 334 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
d5a43964 335
4782a23b 336 cpu_unmap_lowcore(lowcore);
d5a43964 337
71e47088 338 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
d5a103cd 339 env->int_pgm_code, ilen, env->psw.mask,
d5a43964
AG
340 env->psw.addr);
341
342 load_psw(env, mask, addr);
343}
344
777c98c3
AJ
345static void do_svc_interrupt(CPUS390XState *env)
346{
347 uint64_t mask, addr;
348 LowCore *lowcore;
349
350 lowcore = cpu_map_lowcore(env);
351
352 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
353 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
354 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
355 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
356 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
357 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
358
359 cpu_unmap_lowcore(lowcore);
360
361 load_psw(env, mask, addr);
362
363 /* When a PER event is pending, the PER exception has to happen
364 immediately after the SERVICE CALL one. */
365 if (env->per_perc_atmid) {
366 env->int_pgm_code = PGM_PER;
367 env->int_pgm_ilen = env->int_svc_ilen;
368 do_program_interrupt(env);
369 }
370}
371
d5a43964
AG
372#define VIRTIO_SUBCODE_64 0x0D00
373
a4e3ad19 374static void do_ext_interrupt(CPUS390XState *env)
d5a43964 375{
a47dddd7 376 S390CPU *cpu = s390_env_get_cpu(env);
d5a43964
AG
377 uint64_t mask, addr;
378 LowCore *lowcore;
d5a43964
AG
379 ExtQueue *q;
380
381 if (!(env->psw.mask & PSW_MASK_EXT)) {
a47dddd7 382 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
d5a43964
AG
383 }
384
1a719923 385 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
a47dddd7 386 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
d5a43964
AG
387 }
388
389 q = &env->ext_queue[env->ext_index];
4782a23b 390 lowcore = cpu_map_lowcore(env);
d5a43964
AG
391
392 lowcore->ext_int_code = cpu_to_be16(q->code);
393 lowcore->ext_params = cpu_to_be32(q->param);
394 lowcore->ext_params2 = cpu_to_be64(q->param64);
395 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
396 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
397 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
398 mask = be64_to_cpu(lowcore->external_new_psw.mask);
399 addr = be64_to_cpu(lowcore->external_new_psw.addr);
400
4782a23b 401 cpu_unmap_lowcore(lowcore);
d5a43964
AG
402
403 env->ext_index--;
404 if (env->ext_index == -1) {
405 env->pending_int &= ~INTERRUPT_EXT;
406 }
407
71e47088 408 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
d5a43964
AG
409 env->psw.mask, env->psw.addr);
410
411 load_psw(env, mask, addr);
412}
3110e292 413
5d69c547
CH
414static void do_io_interrupt(CPUS390XState *env)
415{
a47dddd7 416 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
417 LowCore *lowcore;
418 IOIntQueue *q;
419 uint8_t isc;
420 int disable = 1;
421 int found = 0;
422
423 if (!(env->psw.mask & PSW_MASK_IO)) {
a47dddd7 424 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
5d69c547
CH
425 }
426
427 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
91b0a8f3
CH
428 uint64_t isc_bits;
429
5d69c547
CH
430 if (env->io_index[isc] < 0) {
431 continue;
432 }
1a719923 433 if (env->io_index[isc] >= MAX_IO_QUEUE) {
a47dddd7 434 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
5d69c547
CH
435 isc, env->io_index[isc]);
436 }
437
438 q = &env->io_queue[env->io_index[isc]][isc];
91b0a8f3
CH
439 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
440 if (!(env->cregs[6] & isc_bits)) {
5d69c547
CH
441 disable = 0;
442 continue;
443 }
bd9a8d85
CH
444 if (!found) {
445 uint64_t mask, addr;
5d69c547 446
bd9a8d85
CH
447 found = 1;
448 lowcore = cpu_map_lowcore(env);
5d69c547 449
bd9a8d85
CH
450 lowcore->subchannel_id = cpu_to_be16(q->id);
451 lowcore->subchannel_nr = cpu_to_be16(q->nr);
452 lowcore->io_int_parm = cpu_to_be32(q->parm);
453 lowcore->io_int_word = cpu_to_be32(q->word);
454 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
455 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
456 mask = be64_to_cpu(lowcore->io_new_psw.mask);
457 addr = be64_to_cpu(lowcore->io_new_psw.addr);
5d69c547 458
bd9a8d85
CH
459 cpu_unmap_lowcore(lowcore);
460
461 env->io_index[isc]--;
462
463 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
464 env->psw.mask, env->psw.addr);
465 load_psw(env, mask, addr);
466 }
b22dd124 467 if (env->io_index[isc] >= 0) {
5d69c547
CH
468 disable = 0;
469 }
bd9a8d85 470 continue;
5d69c547
CH
471 }
472
473 if (disable) {
474 env->pending_int &= ~INTERRUPT_IO;
475 }
476
5d69c547
CH
477}
478
479static void do_mchk_interrupt(CPUS390XState *env)
480{
a47dddd7 481 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
482 uint64_t mask, addr;
483 LowCore *lowcore;
484 MchkQueue *q;
485 int i;
486
487 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
a47dddd7 488 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
5d69c547
CH
489 }
490
1a719923 491 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
a47dddd7 492 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
5d69c547
CH
493 }
494
495 q = &env->mchk_queue[env->mchk_index];
496
497 if (q->type != 1) {
498 /* Don't know how to handle this... */
a47dddd7 499 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
5d69c547
CH
500 }
501 if (!(env->cregs[14] & (1 << 28))) {
502 /* CRW machine checks disabled */
503 return;
504 }
505
506 lowcore = cpu_map_lowcore(env);
507
508 for (i = 0; i < 16; i++) {
c498d8e3 509 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
5d69c547
CH
510 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
511 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
512 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
513 }
514 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
515 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
516 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
517 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
518 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
519 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
520 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
521
522 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
523 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
524 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
525 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
526 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
527 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
528
529 cpu_unmap_lowcore(lowcore);
530
531 env->mchk_index--;
532 if (env->mchk_index == -1) {
533 env->pending_int &= ~INTERRUPT_MCHK;
534 }
535
536 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
537 env->psw.mask, env->psw.addr);
538
539 load_psw(env, mask, addr);
540}
541
97a8ea5a 542void s390_cpu_do_interrupt(CPUState *cs)
3110e292 543{
97a8ea5a
AF
544 S390CPU *cpu = S390_CPU(cs);
545 CPUS390XState *env = &cpu->env;
f9466733 546
0d404541 547 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
27103424 548 __func__, cs->exception_index, env->psw.addr);
d5a43964 549
eb24f7c6 550 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
5d69c547
CH
551 /* handle machine checks */
552 if ((env->psw.mask & PSW_MASK_MCHECK) &&
27103424 553 (cs->exception_index == -1)) {
5d69c547 554 if (env->pending_int & INTERRUPT_MCHK) {
27103424 555 cs->exception_index = EXCP_MCHK;
5d69c547
CH
556 }
557 }
d5a43964
AG
558 /* handle external interrupts */
559 if ((env->psw.mask & PSW_MASK_EXT) &&
27103424 560 cs->exception_index == -1) {
d5a43964
AG
561 if (env->pending_int & INTERRUPT_EXT) {
562 /* code is already in env */
27103424 563 cs->exception_index = EXCP_EXT;
d5a43964 564 } else if (env->pending_int & INTERRUPT_TOD) {
f9466733 565 cpu_inject_ext(cpu, 0x1004, 0, 0);
27103424 566 cs->exception_index = EXCP_EXT;
d5a43964
AG
567 env->pending_int &= ~INTERRUPT_EXT;
568 env->pending_int &= ~INTERRUPT_TOD;
569 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
f9466733 570 cpu_inject_ext(cpu, 0x1005, 0, 0);
27103424 571 cs->exception_index = EXCP_EXT;
d5a43964
AG
572 env->pending_int &= ~INTERRUPT_EXT;
573 env->pending_int &= ~INTERRUPT_TOD;
574 }
575 }
5d69c547
CH
576 /* handle I/O interrupts */
577 if ((env->psw.mask & PSW_MASK_IO) &&
27103424 578 (cs->exception_index == -1)) {
5d69c547 579 if (env->pending_int & INTERRUPT_IO) {
27103424 580 cs->exception_index = EXCP_IO;
5d69c547
CH
581 }
582 }
d5a43964 583
27103424 584 switch (cs->exception_index) {
d5a43964
AG
585 case EXCP_PGM:
586 do_program_interrupt(env);
587 break;
588 case EXCP_SVC:
589 do_svc_interrupt(env);
590 break;
591 case EXCP_EXT:
592 do_ext_interrupt(env);
593 break;
5d69c547
CH
594 case EXCP_IO:
595 do_io_interrupt(env);
596 break;
597 case EXCP_MCHK:
598 do_mchk_interrupt(env);
599 break;
d5a43964 600 }
27103424 601 cs->exception_index = -1;
d5a43964
AG
602
603 if (!env->pending_int) {
259186a7 604 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
d5a43964 605 }
3110e292 606}
d5a43964 607
02bb9bbf
RH
608bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
609{
610 if (interrupt_request & CPU_INTERRUPT_HARD) {
611 S390CPU *cpu = S390_CPU(cs);
612 CPUS390XState *env = &cpu->env;
613
614 if (env->psw.mask & PSW_MASK_EXT) {
615 s390_cpu_do_interrupt(cs);
616 return true;
617 }
618 }
619 return false;
620}
311918b9
AJ
621
622void s390_cpu_recompute_watchpoints(CPUState *cs)
623{
624 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
625 S390CPU *cpu = S390_CPU(cs);
626 CPUS390XState *env = &cpu->env;
627
628 /* We are called when the watchpoints have changed. First
629 remove them all. */
630 cpu_watchpoint_remove_all(cs, BP_CPU);
631
632 /* Return if PER is not enabled */
633 if (!(env->psw.mask & PSW_MASK_PER)) {
634 return;
635 }
636
637 /* Return if storage-alteration event is not enabled. */
638 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
639 return;
640 }
641
642 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
643 /* We can't create a watchoint spanning the whole memory range, so
644 split it in two parts. */
645 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
646 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
647 } else if (env->cregs[10] > env->cregs[11]) {
648 /* The address range loops, create two watchpoints. */
649 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
650 wp_flags, NULL);
651 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
652
653 } else {
654 /* Default case, create a single watchpoint. */
655 cpu_watchpoint_insert(cs, env->cregs[10],
656 env->cregs[11] - env->cregs[10] + 1,
657 wp_flags, NULL);
658 }
659}
660
661void s390x_cpu_debug_excp_handler(CPUState *cs)
662{
663 S390CPU *cpu = S390_CPU(cs);
664 CPUS390XState *env = &cpu->env;
665 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
666
667 if (wp_hit && wp_hit->flags & BP_CPU) {
668 /* FIXME: When the storage-alteration-space control bit is set,
669 the exception should only be triggered if the memory access
670 is done using an address space with the storage-alteration-event
671 bit set. We have no way to detect that with the current
672 watchpoint code. */
673 cs->watchpoint_hit = NULL;
674
675 env->per_address = env->psw.addr;
676 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
677 /* FIXME: We currently no way to detect the address space used
678 to trigger the watchpoint. For now just consider it is the
679 current default ASC. This turn to be true except when MVCP
680 and MVCS instrutions are not used. */
681 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
682
683 /* Remove all watchpoints to re-execute the code. A PER exception
684 will be triggered, it will call load_psw which will recompute
685 the watchpoints. */
686 cpu_watchpoint_remove_all(cs, BP_CPU);
6886b980 687 cpu_loop_exit_noexc(cs);
311918b9
AJ
688 }
689}
d5a43964 690#endif /* CONFIG_USER_ONLY */