]> git.proxmox.com Git - mirror_qemu.git/blame - target/s390x/helper.c
shutdown: Add source information to SHUTDOWN and RESET
[mirror_qemu.git] / target / s390x / helper.c
CommitLineData
10ec5117
AG
1/*
2 * S/390 helpers
3 *
4 * Copyright (c) 2009 Ulrich Hecht
d5a43964 5 * Copyright (c) 2011 Alexander Graf
10ec5117
AG
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
70539e18 18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
10ec5117
AG
19 */
20
9615495a 21#include "qemu/osdep.h"
da34e65c 22#include "qapi/error.h"
10ec5117 23#include "cpu.h"
022c62cb 24#include "exec/gdbstub.h"
1de7afc9 25#include "qemu/timer.h"
63c91552 26#include "exec/exec-all.h"
f08b6170 27#include "exec/cpu_ldst.h"
bd3f16ac 28#include "hw/s390x/ioinst.h"
ef81522b 29#ifndef CONFIG_USER_ONLY
9c17d615 30#include "sysemu/sysemu.h"
ef81522b 31#endif
10ec5117 32
d5a43964 33//#define DEBUG_S390
d5a43964
AG
34//#define DEBUG_S390_STDOUT
35
36#ifdef DEBUG_S390
37#ifdef DEBUG_S390_STDOUT
38#define DPRINTF(fmt, ...) \
39 do { fprintf(stderr, fmt, ## __VA_ARGS__); \
013a2942 40 if (qemu_log_separate()) qemu_log(fmt, ##__VA_ARGS__); } while (0)
d5a43964
AG
41#else
42#define DPRINTF(fmt, ...) \
43 do { qemu_log(fmt, ## __VA_ARGS__); } while (0)
44#endif
45#else
46#define DPRINTF(fmt, ...) \
47 do { } while (0)
48#endif
49
d5a43964
AG
50
51#ifndef CONFIG_USER_ONLY
8f22e0df 52void s390x_tod_timer(void *opaque)
d5a43964 53{
b8ba6799
AF
54 S390CPU *cpu = opaque;
55 CPUS390XState *env = &cpu->env;
d5a43964
AG
56
57 env->pending_int |= INTERRUPT_TOD;
c3affe56 58 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
59}
60
8f22e0df 61void s390x_cpu_timer(void *opaque)
d5a43964 62{
b8ba6799
AF
63 S390CPU *cpu = opaque;
64 CPUS390XState *env = &cpu->env;
d5a43964
AG
65
66 env->pending_int |= INTERRUPT_CPUTIMER;
c3affe56 67 cpu_interrupt(CPU(cpu), CPU_INTERRUPT_HARD);
d5a43964
AG
68}
69#endif
10c339a0 70
96b1a8bb 71S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp)
10ec5117 72{
41868f84
DH
73 static bool features_parsed;
74 char *name, *features;
75 const char *typename;
76 ObjectClass *oc;
77 CPUClass *cc;
78
79 name = g_strdup(cpu_model);
80 features = strchr(name, ',');
81 if (features) {
82 features[0] = 0;
83 features++;
84 }
85
86 oc = cpu_class_by_name(TYPE_S390_CPU, name);
87 if (!oc) {
88 error_setg(errp, "Unknown CPU definition \'%s\'", name);
89 g_free(name);
90 return NULL;
91 }
92 typename = object_class_get_name(oc);
93
94 if (!features_parsed) {
95 features_parsed = true;
96 cc = CPU_CLASS(oc);
97 cc->parse_features(typename, features, errp);
98 }
99 g_free(name);
100
101 if (*errp) {
102 return NULL;
103 }
104 return S390_CPU(CPU(object_new(typename)));
96b1a8bb
MR
105}
106
107S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp)
108{
109 S390CPU *cpu;
110 Error *err = NULL;
111
112 cpu = cpu_s390x_create(cpu_model, &err);
113 if (err != NULL) {
114 goto out;
115 }
116
117 object_property_set_int(OBJECT(cpu), id, "id", &err);
118 if (err != NULL) {
119 goto out;
120 }
121 object_property_set_bool(OBJECT(cpu), true, "realized", &err);
122
123out:
124 if (err) {
125 error_propagate(errp, err);
126 object_unref(OBJECT(cpu));
127 cpu = NULL;
128 }
129 return cpu;
130}
131
132S390CPU *cpu_s390x_init(const char *cpu_model)
133{
134 Error *err = NULL;
135 S390CPU *cpu;
136 /* Use to track CPU ID for linux-user only */
137 static int64_t next_cpu_id;
1f136632 138
96b1a8bb
MR
139 cpu = s390x_new_cpu(cpu_model, next_cpu_id++, &err);
140 if (err) {
141 error_report_err(err);
142 }
564b863d 143 return cpu;
10ec5117
AG
144}
145
d5a43964
AG
146#if defined(CONFIG_USER_ONLY)
147
97a8ea5a 148void s390_cpu_do_interrupt(CPUState *cs)
d5a43964 149{
27103424 150 cs->exception_index = -1;
d5a43964
AG
151}
152
7510454e
AF
153int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr address,
154 int rw, int mmu_idx)
d5a43964 155{
7510454e
AF
156 S390CPU *cpu = S390_CPU(cs);
157
27103424 158 cs->exception_index = EXCP_PGM;
7510454e 159 cpu->env.int_pgm_code = PGM_ADDRESSING;
d5a103cd
RH
160 /* On real machines this value is dropped into LowMem. Since this
161 is userland, simply put this someplace that cpu_loop can find it. */
7510454e 162 cpu->env.__excp_addr = address;
d5a43964
AG
163 return 1;
164}
165
b7e516ce 166#else /* !CONFIG_USER_ONLY */
d5a43964
AG
167
168/* Ensure to exit the TB after this call! */
dfebd7a7 169void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen)
d5a43964 170{
27103424
AF
171 CPUState *cs = CPU(s390_env_get_cpu(env));
172
173 cs->exception_index = EXCP_PGM;
d5a43964 174 env->int_pgm_code = code;
d5a103cd 175 env->int_pgm_ilen = ilen;
d5a43964
AG
176}
177
7510454e
AF
178int s390_cpu_handle_mmu_fault(CPUState *cs, vaddr orig_vaddr,
179 int rw, int mmu_idx)
10c339a0 180{
7510454e
AF
181 S390CPU *cpu = S390_CPU(cs);
182 CPUS390XState *env = &cpu->env;
c255ac60 183 uint64_t asc = cpu_mmu_idx_to_asc(mmu_idx);
d5a43964 184 target_ulong vaddr, raddr;
10c339a0
AG
185 int prot;
186
7510454e 187 DPRINTF("%s: address 0x%" VADDR_PRIx " rw %d mmu_idx %d\n",
07cc7d12 188 __func__, orig_vaddr, rw, mmu_idx);
d5a43964 189
71e47088
BS
190 orig_vaddr &= TARGET_PAGE_MASK;
191 vaddr = orig_vaddr;
d5a43964
AG
192
193 /* 31-Bit mode */
194 if (!(env->psw.mask & PSW_MASK_64)) {
195 vaddr &= 0x7fffffff;
196 }
197
e3e09d87 198 if (mmu_translate(env, vaddr, rw, asc, &raddr, &prot, true)) {
d5a43964
AG
199 /* Translation ended in exception */
200 return 1;
201 }
10c339a0 202
d5a43964 203 /* check out of RAM access */
7b3fdbd9 204 if (raddr > ram_size) {
a6f921b0
AF
205 DPRINTF("%s: raddr %" PRIx64 " > ram_size %" PRIx64 "\n", __func__,
206 (uint64_t)raddr, (uint64_t)ram_size);
d5a103cd 207 trigger_pgm_exception(env, PGM_ADDRESSING, ILEN_LATER);
d5a43964
AG
208 return 1;
209 }
10c339a0 210
339aaf5b
AP
211 qemu_log_mask(CPU_LOG_MMU, "%s: set tlb %" PRIx64 " -> %" PRIx64 " (%x)\n",
212 __func__, (uint64_t)vaddr, (uint64_t)raddr, prot);
d5a43964 213
0c591eb0 214 tlb_set_page(cs, orig_vaddr, raddr, prot,
d4c430a8 215 mmu_idx, TARGET_PAGE_SIZE);
d5a43964 216
d4c430a8 217 return 0;
10c339a0 218}
d5a43964 219
00b941e5 220hwaddr s390_cpu_get_phys_page_debug(CPUState *cs, vaddr vaddr)
d5a43964 221{
00b941e5
AF
222 S390CPU *cpu = S390_CPU(cs);
223 CPUS390XState *env = &cpu->env;
d5a43964 224 target_ulong raddr;
e3e09d87 225 int prot;
d5a43964
AG
226 uint64_t asc = env->psw.mask & PSW_MASK_ASC;
227
228 /* 31-Bit mode */
229 if (!(env->psw.mask & PSW_MASK_64)) {
230 vaddr &= 0x7fffffff;
231 }
232
234779a2
DH
233 if (mmu_translate(env, vaddr, MMU_INST_FETCH, asc, &raddr, &prot, false)) {
234 return -1;
235 }
d5a43964
AG
236 return raddr;
237}
238
770a6379
DH
239hwaddr s390_cpu_get_phys_addr_debug(CPUState *cs, vaddr vaddr)
240{
241 hwaddr phys_addr;
242 target_ulong page;
243
244 page = vaddr & TARGET_PAGE_MASK;
245 phys_addr = cpu_get_phys_page_debug(cs, page);
246 phys_addr += (vaddr & ~TARGET_PAGE_MASK);
247
248 return phys_addr;
249}
250
a4e3ad19 251void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr)
d5a43964 252{
311918b9
AJ
253 uint64_t old_mask = env->psw.mask;
254
eb24f7c6
DH
255 env->psw.addr = addr;
256 env->psw.mask = mask;
3f10341f
DH
257 if (tcg_enabled()) {
258 env->cc_op = (mask >> 44) & 3;
259 }
eb24f7c6 260
311918b9
AJ
261 if ((old_mask ^ mask) & PSW_MASK_PER) {
262 s390_cpu_recompute_watchpoints(CPU(s390_env_get_cpu(env)));
263 }
264
d5a43964 265 if (mask & PSW_MASK_WAIT) {
49e15878 266 S390CPU *cpu = s390_env_get_cpu(env);
eb24f7c6 267 if (s390_cpu_halt(cpu) == 0) {
ef81522b 268#ifndef CONFIG_USER_ONLY
cf83f140 269 qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN);
ef81522b 270#endif
d5a43964
AG
271 }
272 }
d5a43964
AG
273}
274
a4e3ad19 275static uint64_t get_psw_mask(CPUS390XState *env)
d5a43964 276{
3f10341f 277 uint64_t r = env->psw.mask;
d5a43964 278
3f10341f
DH
279 if (tcg_enabled()) {
280 env->cc_op = calc_cc(env, env->cc_op, env->cc_src, env->cc_dst,
281 env->cc_vr);
d5a43964 282
3f10341f
DH
283 r &= ~PSW_MASK_CC;
284 assert(!(env->cc_op & ~3));
285 r |= (uint64_t)env->cc_op << 44;
286 }
d5a43964
AG
287
288 return r;
289}
290
4782a23b
CH
291static LowCore *cpu_map_lowcore(CPUS390XState *env)
292{
a47dddd7 293 S390CPU *cpu = s390_env_get_cpu(env);
4782a23b
CH
294 LowCore *lowcore;
295 hwaddr len = sizeof(LowCore);
296
297 lowcore = cpu_physical_memory_map(env->psa, &len, 1);
298
299 if (len < sizeof(LowCore)) {
a47dddd7 300 cpu_abort(CPU(cpu), "Could not map lowcore\n");
4782a23b
CH
301 }
302
303 return lowcore;
304}
305
306static void cpu_unmap_lowcore(LowCore *lowcore)
307{
308 cpu_physical_memory_unmap(lowcore, sizeof(LowCore), 1, sizeof(LowCore));
309}
310
3f10341f
DH
311void do_restart_interrupt(CPUS390XState *env)
312{
313 uint64_t mask, addr;
314 LowCore *lowcore;
315
316 lowcore = cpu_map_lowcore(env);
317
318 lowcore->restart_old_psw.mask = cpu_to_be64(get_psw_mask(env));
319 lowcore->restart_old_psw.addr = cpu_to_be64(env->psw.addr);
320 mask = be64_to_cpu(lowcore->restart_new_psw.mask);
321 addr = be64_to_cpu(lowcore->restart_new_psw.addr);
322
323 cpu_unmap_lowcore(lowcore);
324
325 load_psw(env, mask, addr);
326}
327
a4e3ad19 328static void do_program_interrupt(CPUS390XState *env)
d5a43964
AG
329{
330 uint64_t mask, addr;
331 LowCore *lowcore;
d5a103cd 332 int ilen = env->int_pgm_ilen;
d5a43964 333
d5a103cd
RH
334 switch (ilen) {
335 case ILEN_LATER:
336 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
d5a43964 337 break;
d5a103cd
RH
338 case ILEN_LATER_INC:
339 ilen = get_ilen(cpu_ldub_code(env, env->psw.addr));
340 env->psw.addr += ilen;
d5a43964 341 break;
d5a103cd
RH
342 default:
343 assert(ilen == 2 || ilen == 4 || ilen == 6);
d5a43964
AG
344 }
345
d5a103cd
RH
346 qemu_log_mask(CPU_LOG_INT, "%s: code=0x%x ilen=%d\n",
347 __func__, env->int_pgm_code, ilen);
d5a43964 348
4782a23b 349 lowcore = cpu_map_lowcore(env);
d5a43964 350
777c98c3
AJ
351 /* Signal PER events with the exception. */
352 if (env->per_perc_atmid) {
353 env->int_pgm_code |= PGM_PER;
354 lowcore->per_address = cpu_to_be64(env->per_address);
355 lowcore->per_perc_atmid = cpu_to_be16(env->per_perc_atmid);
356 env->per_perc_atmid = 0;
357 }
358
d5a103cd 359 lowcore->pgm_ilen = cpu_to_be16(ilen);
d5a43964
AG
360 lowcore->pgm_code = cpu_to_be16(env->int_pgm_code);
361 lowcore->program_old_psw.mask = cpu_to_be64(get_psw_mask(env));
362 lowcore->program_old_psw.addr = cpu_to_be64(env->psw.addr);
363 mask = be64_to_cpu(lowcore->program_new_psw.mask);
364 addr = be64_to_cpu(lowcore->program_new_psw.addr);
3da0ab35 365 lowcore->per_breaking_event_addr = cpu_to_be64(env->gbea);
d5a43964 366
4782a23b 367 cpu_unmap_lowcore(lowcore);
d5a43964 368
71e47088 369 DPRINTF("%s: %x %x %" PRIx64 " %" PRIx64 "\n", __func__,
d5a103cd 370 env->int_pgm_code, ilen, env->psw.mask,
d5a43964
AG
371 env->psw.addr);
372
373 load_psw(env, mask, addr);
374}
375
777c98c3
AJ
376static void do_svc_interrupt(CPUS390XState *env)
377{
378 uint64_t mask, addr;
379 LowCore *lowcore;
380
381 lowcore = cpu_map_lowcore(env);
382
383 lowcore->svc_code = cpu_to_be16(env->int_svc_code);
384 lowcore->svc_ilen = cpu_to_be16(env->int_svc_ilen);
385 lowcore->svc_old_psw.mask = cpu_to_be64(get_psw_mask(env));
386 lowcore->svc_old_psw.addr = cpu_to_be64(env->psw.addr + env->int_svc_ilen);
387 mask = be64_to_cpu(lowcore->svc_new_psw.mask);
388 addr = be64_to_cpu(lowcore->svc_new_psw.addr);
389
390 cpu_unmap_lowcore(lowcore);
391
392 load_psw(env, mask, addr);
393
394 /* When a PER event is pending, the PER exception has to happen
395 immediately after the SERVICE CALL one. */
396 if (env->per_perc_atmid) {
397 env->int_pgm_code = PGM_PER;
398 env->int_pgm_ilen = env->int_svc_ilen;
399 do_program_interrupt(env);
400 }
401}
402
d5a43964
AG
403#define VIRTIO_SUBCODE_64 0x0D00
404
a4e3ad19 405static void do_ext_interrupt(CPUS390XState *env)
d5a43964 406{
a47dddd7 407 S390CPU *cpu = s390_env_get_cpu(env);
d5a43964
AG
408 uint64_t mask, addr;
409 LowCore *lowcore;
d5a43964
AG
410 ExtQueue *q;
411
412 if (!(env->psw.mask & PSW_MASK_EXT)) {
a47dddd7 413 cpu_abort(CPU(cpu), "Ext int w/o ext mask\n");
d5a43964
AG
414 }
415
1a719923 416 if (env->ext_index < 0 || env->ext_index >= MAX_EXT_QUEUE) {
a47dddd7 417 cpu_abort(CPU(cpu), "Ext queue overrun: %d\n", env->ext_index);
d5a43964
AG
418 }
419
420 q = &env->ext_queue[env->ext_index];
4782a23b 421 lowcore = cpu_map_lowcore(env);
d5a43964
AG
422
423 lowcore->ext_int_code = cpu_to_be16(q->code);
424 lowcore->ext_params = cpu_to_be32(q->param);
425 lowcore->ext_params2 = cpu_to_be64(q->param64);
426 lowcore->external_old_psw.mask = cpu_to_be64(get_psw_mask(env));
427 lowcore->external_old_psw.addr = cpu_to_be64(env->psw.addr);
428 lowcore->cpu_addr = cpu_to_be16(env->cpu_num | VIRTIO_SUBCODE_64);
429 mask = be64_to_cpu(lowcore->external_new_psw.mask);
430 addr = be64_to_cpu(lowcore->external_new_psw.addr);
431
4782a23b 432 cpu_unmap_lowcore(lowcore);
d5a43964
AG
433
434 env->ext_index--;
435 if (env->ext_index == -1) {
436 env->pending_int &= ~INTERRUPT_EXT;
437 }
438
71e47088 439 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
d5a43964
AG
440 env->psw.mask, env->psw.addr);
441
442 load_psw(env, mask, addr);
443}
3110e292 444
5d69c547
CH
445static void do_io_interrupt(CPUS390XState *env)
446{
a47dddd7 447 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
448 LowCore *lowcore;
449 IOIntQueue *q;
450 uint8_t isc;
451 int disable = 1;
452 int found = 0;
453
454 if (!(env->psw.mask & PSW_MASK_IO)) {
a47dddd7 455 cpu_abort(CPU(cpu), "I/O int w/o I/O mask\n");
5d69c547
CH
456 }
457
458 for (isc = 0; isc < ARRAY_SIZE(env->io_index); isc++) {
91b0a8f3
CH
459 uint64_t isc_bits;
460
5d69c547
CH
461 if (env->io_index[isc] < 0) {
462 continue;
463 }
1a719923 464 if (env->io_index[isc] >= MAX_IO_QUEUE) {
a47dddd7 465 cpu_abort(CPU(cpu), "I/O queue overrun for isc %d: %d\n",
5d69c547
CH
466 isc, env->io_index[isc]);
467 }
468
469 q = &env->io_queue[env->io_index[isc]][isc];
91b0a8f3
CH
470 isc_bits = ISC_TO_ISC_BITS(IO_INT_WORD_ISC(q->word));
471 if (!(env->cregs[6] & isc_bits)) {
5d69c547
CH
472 disable = 0;
473 continue;
474 }
bd9a8d85
CH
475 if (!found) {
476 uint64_t mask, addr;
5d69c547 477
bd9a8d85
CH
478 found = 1;
479 lowcore = cpu_map_lowcore(env);
5d69c547 480
bd9a8d85
CH
481 lowcore->subchannel_id = cpu_to_be16(q->id);
482 lowcore->subchannel_nr = cpu_to_be16(q->nr);
483 lowcore->io_int_parm = cpu_to_be32(q->parm);
484 lowcore->io_int_word = cpu_to_be32(q->word);
485 lowcore->io_old_psw.mask = cpu_to_be64(get_psw_mask(env));
486 lowcore->io_old_psw.addr = cpu_to_be64(env->psw.addr);
487 mask = be64_to_cpu(lowcore->io_new_psw.mask);
488 addr = be64_to_cpu(lowcore->io_new_psw.addr);
5d69c547 489
bd9a8d85
CH
490 cpu_unmap_lowcore(lowcore);
491
492 env->io_index[isc]--;
493
494 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
495 env->psw.mask, env->psw.addr);
496 load_psw(env, mask, addr);
497 }
b22dd124 498 if (env->io_index[isc] >= 0) {
5d69c547
CH
499 disable = 0;
500 }
bd9a8d85 501 continue;
5d69c547
CH
502 }
503
504 if (disable) {
505 env->pending_int &= ~INTERRUPT_IO;
506 }
507
5d69c547
CH
508}
509
510static void do_mchk_interrupt(CPUS390XState *env)
511{
a47dddd7 512 S390CPU *cpu = s390_env_get_cpu(env);
5d69c547
CH
513 uint64_t mask, addr;
514 LowCore *lowcore;
515 MchkQueue *q;
516 int i;
517
518 if (!(env->psw.mask & PSW_MASK_MCHECK)) {
a47dddd7 519 cpu_abort(CPU(cpu), "Machine check w/o mchk mask\n");
5d69c547
CH
520 }
521
1a719923 522 if (env->mchk_index < 0 || env->mchk_index >= MAX_MCHK_QUEUE) {
a47dddd7 523 cpu_abort(CPU(cpu), "Mchk queue overrun: %d\n", env->mchk_index);
5d69c547
CH
524 }
525
526 q = &env->mchk_queue[env->mchk_index];
527
528 if (q->type != 1) {
529 /* Don't know how to handle this... */
a47dddd7 530 cpu_abort(CPU(cpu), "Unknown machine check type %d\n", q->type);
5d69c547
CH
531 }
532 if (!(env->cregs[14] & (1 << 28))) {
533 /* CRW machine checks disabled */
534 return;
535 }
536
537 lowcore = cpu_map_lowcore(env);
538
539 for (i = 0; i < 16; i++) {
c498d8e3 540 lowcore->floating_pt_save_area[i] = cpu_to_be64(get_freg(env, i)->ll);
5d69c547
CH
541 lowcore->gpregs_save_area[i] = cpu_to_be64(env->regs[i]);
542 lowcore->access_regs_save_area[i] = cpu_to_be32(env->aregs[i]);
543 lowcore->cregs_save_area[i] = cpu_to_be64(env->cregs[i]);
544 }
545 lowcore->prefixreg_save_area = cpu_to_be32(env->psa);
546 lowcore->fpt_creg_save_area = cpu_to_be32(env->fpc);
547 lowcore->tod_progreg_save_area = cpu_to_be32(env->todpr);
548 lowcore->cpu_timer_save_area[0] = cpu_to_be32(env->cputm >> 32);
549 lowcore->cpu_timer_save_area[1] = cpu_to_be32((uint32_t)env->cputm);
550 lowcore->clock_comp_save_area[0] = cpu_to_be32(env->ckc >> 32);
551 lowcore->clock_comp_save_area[1] = cpu_to_be32((uint32_t)env->ckc);
552
553 lowcore->mcck_interruption_code[0] = cpu_to_be32(0x00400f1d);
554 lowcore->mcck_interruption_code[1] = cpu_to_be32(0x40330000);
555 lowcore->mcck_old_psw.mask = cpu_to_be64(get_psw_mask(env));
556 lowcore->mcck_old_psw.addr = cpu_to_be64(env->psw.addr);
557 mask = be64_to_cpu(lowcore->mcck_new_psw.mask);
558 addr = be64_to_cpu(lowcore->mcck_new_psw.addr);
559
560 cpu_unmap_lowcore(lowcore);
561
562 env->mchk_index--;
563 if (env->mchk_index == -1) {
564 env->pending_int &= ~INTERRUPT_MCHK;
565 }
566
567 DPRINTF("%s: %" PRIx64 " %" PRIx64 "\n", __func__,
568 env->psw.mask, env->psw.addr);
569
570 load_psw(env, mask, addr);
571}
572
97a8ea5a 573void s390_cpu_do_interrupt(CPUState *cs)
3110e292 574{
97a8ea5a
AF
575 S390CPU *cpu = S390_CPU(cs);
576 CPUS390XState *env = &cpu->env;
f9466733 577
0d404541 578 qemu_log_mask(CPU_LOG_INT, "%s: %d at pc=%" PRIx64 "\n",
27103424 579 __func__, cs->exception_index, env->psw.addr);
d5a43964 580
eb24f7c6 581 s390_cpu_set_state(CPU_STATE_OPERATING, cpu);
5d69c547
CH
582 /* handle machine checks */
583 if ((env->psw.mask & PSW_MASK_MCHECK) &&
27103424 584 (cs->exception_index == -1)) {
5d69c547 585 if (env->pending_int & INTERRUPT_MCHK) {
27103424 586 cs->exception_index = EXCP_MCHK;
5d69c547
CH
587 }
588 }
d5a43964
AG
589 /* handle external interrupts */
590 if ((env->psw.mask & PSW_MASK_EXT) &&
27103424 591 cs->exception_index == -1) {
d5a43964
AG
592 if (env->pending_int & INTERRUPT_EXT) {
593 /* code is already in env */
27103424 594 cs->exception_index = EXCP_EXT;
d5a43964 595 } else if (env->pending_int & INTERRUPT_TOD) {
f9466733 596 cpu_inject_ext(cpu, 0x1004, 0, 0);
27103424 597 cs->exception_index = EXCP_EXT;
d5a43964
AG
598 env->pending_int &= ~INTERRUPT_EXT;
599 env->pending_int &= ~INTERRUPT_TOD;
600 } else if (env->pending_int & INTERRUPT_CPUTIMER) {
f9466733 601 cpu_inject_ext(cpu, 0x1005, 0, 0);
27103424 602 cs->exception_index = EXCP_EXT;
d5a43964
AG
603 env->pending_int &= ~INTERRUPT_EXT;
604 env->pending_int &= ~INTERRUPT_TOD;
605 }
606 }
5d69c547
CH
607 /* handle I/O interrupts */
608 if ((env->psw.mask & PSW_MASK_IO) &&
27103424 609 (cs->exception_index == -1)) {
5d69c547 610 if (env->pending_int & INTERRUPT_IO) {
27103424 611 cs->exception_index = EXCP_IO;
5d69c547
CH
612 }
613 }
d5a43964 614
27103424 615 switch (cs->exception_index) {
d5a43964
AG
616 case EXCP_PGM:
617 do_program_interrupt(env);
618 break;
619 case EXCP_SVC:
620 do_svc_interrupt(env);
621 break;
622 case EXCP_EXT:
623 do_ext_interrupt(env);
624 break;
5d69c547
CH
625 case EXCP_IO:
626 do_io_interrupt(env);
627 break;
628 case EXCP_MCHK:
629 do_mchk_interrupt(env);
630 break;
d5a43964 631 }
27103424 632 cs->exception_index = -1;
d5a43964
AG
633
634 if (!env->pending_int) {
259186a7 635 cs->interrupt_request &= ~CPU_INTERRUPT_HARD;
d5a43964 636 }
3110e292 637}
d5a43964 638
02bb9bbf
RH
639bool s390_cpu_exec_interrupt(CPUState *cs, int interrupt_request)
640{
641 if (interrupt_request & CPU_INTERRUPT_HARD) {
642 S390CPU *cpu = S390_CPU(cs);
643 CPUS390XState *env = &cpu->env;
644
645 if (env->psw.mask & PSW_MASK_EXT) {
646 s390_cpu_do_interrupt(cs);
647 return true;
648 }
649 }
650 return false;
651}
311918b9
AJ
652
653void s390_cpu_recompute_watchpoints(CPUState *cs)
654{
655 const int wp_flags = BP_CPU | BP_MEM_WRITE | BP_STOP_BEFORE_ACCESS;
656 S390CPU *cpu = S390_CPU(cs);
657 CPUS390XState *env = &cpu->env;
658
659 /* We are called when the watchpoints have changed. First
660 remove them all. */
661 cpu_watchpoint_remove_all(cs, BP_CPU);
662
663 /* Return if PER is not enabled */
664 if (!(env->psw.mask & PSW_MASK_PER)) {
665 return;
666 }
667
668 /* Return if storage-alteration event is not enabled. */
669 if (!(env->cregs[9] & PER_CR9_EVENT_STORE)) {
670 return;
671 }
672
673 if (env->cregs[10] == 0 && env->cregs[11] == -1LL) {
674 /* We can't create a watchoint spanning the whole memory range, so
675 split it in two parts. */
676 cpu_watchpoint_insert(cs, 0, 1ULL << 63, wp_flags, NULL);
677 cpu_watchpoint_insert(cs, 1ULL << 63, 1ULL << 63, wp_flags, NULL);
678 } else if (env->cregs[10] > env->cregs[11]) {
679 /* The address range loops, create two watchpoints. */
680 cpu_watchpoint_insert(cs, env->cregs[10], -env->cregs[10],
681 wp_flags, NULL);
682 cpu_watchpoint_insert(cs, 0, env->cregs[11] + 1, wp_flags, NULL);
683
684 } else {
685 /* Default case, create a single watchpoint. */
686 cpu_watchpoint_insert(cs, env->cregs[10],
687 env->cregs[11] - env->cregs[10] + 1,
688 wp_flags, NULL);
689 }
690}
691
692void s390x_cpu_debug_excp_handler(CPUState *cs)
693{
694 S390CPU *cpu = S390_CPU(cs);
695 CPUS390XState *env = &cpu->env;
696 CPUWatchpoint *wp_hit = cs->watchpoint_hit;
697
698 if (wp_hit && wp_hit->flags & BP_CPU) {
699 /* FIXME: When the storage-alteration-space control bit is set,
700 the exception should only be triggered if the memory access
701 is done using an address space with the storage-alteration-event
702 bit set. We have no way to detect that with the current
703 watchpoint code. */
704 cs->watchpoint_hit = NULL;
705
706 env->per_address = env->psw.addr;
707 env->per_perc_atmid |= PER_CODE_EVENT_STORE | get_per_atmid(env);
708 /* FIXME: We currently no way to detect the address space used
709 to trigger the watchpoint. For now just consider it is the
710 current default ASC. This turn to be true except when MVCP
711 and MVCS instrutions are not used. */
712 env->per_perc_atmid |= env->psw.mask & (PSW_MASK_ASC) >> 46;
713
714 /* Remove all watchpoints to re-execute the code. A PER exception
715 will be triggered, it will call load_psw which will recompute
716 the watchpoints. */
717 cpu_watchpoint_remove_all(cs, BP_CPU);
6886b980 718 cpu_loop_exit_noexc(cs);
311918b9
AJ
719 }
720}
44977a8f
RH
721
722/* Unaligned accesses are only diagnosed with MO_ALIGN. At the moment,
723 this is only for the atomic operations, for which we want to raise a
724 specification exception. */
725void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
726 MMUAccessType access_type,
727 int mmu_idx, uintptr_t retaddr)
728{
729 S390CPU *cpu = S390_CPU(cs);
730 CPUS390XState *env = &cpu->env;
731
732 if (retaddr) {
733 cpu_restore_state(cs, retaddr);
734 }
735 program_interrupt(env, PGM_SPECIFICATION, ILEN_LATER);
736}
d5a43964 737#endif /* CONFIG_USER_ONLY */