]> git.proxmox.com Git - mirror_qemu.git/blob - target/s390x/cpu.h
Merge remote-tracking branch 'kwolf/tags/for-upstream' into staging
[mirror_qemu.git] / target / s390x / cpu.h
1 /*
2 * S/390 virtual CPU header
3 *
4 * Copyright (c) 2009 Ulrich Hecht
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * Contributions after 2012-10-29 are licensed under the terms of the
17 * GNU GPL, version 2 or (at your option) any later version.
18 *
19 * You should have received a copy of the GNU (Lesser) General Public
20 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
21 */
22
23 #ifndef S390X_CPU_H
24 #define S390X_CPU_H
25
26 #include "qemu-common.h"
27 #include "cpu-qom.h"
28
29 #define TARGET_LONG_BITS 64
30
31 #define ELF_MACHINE_UNAME "S390X"
32
33 #define CPUArchState struct CPUS390XState
34
35 #include "exec/cpu-defs.h"
36 #define TARGET_PAGE_BITS 12
37
38 #define TARGET_PHYS_ADDR_SPACE_BITS 64
39 #define TARGET_VIRT_ADDR_SPACE_BITS 64
40
41 #include "exec/cpu-all.h"
42
43 #include "fpu/softfloat.h"
44
45 #define NB_MMU_MODES 3
46 #define TARGET_INSN_START_EXTRA_WORDS 1
47
48 #define MMU_MODE0_SUFFIX _primary
49 #define MMU_MODE1_SUFFIX _secondary
50 #define MMU_MODE2_SUFFIX _home
51
52 #define MMU_USER_IDX 0
53
54 #define MAX_EXT_QUEUE 16
55 #define MAX_IO_QUEUE 16
56 #define MAX_MCHK_QUEUE 16
57
58 #define PSW_MCHK_MASK 0x0004000000000000
59 #define PSW_IO_MASK 0x0200000000000000
60
61 typedef struct PSW {
62 uint64_t mask;
63 uint64_t addr;
64 } PSW;
65
66 typedef struct ExtQueue {
67 uint32_t code;
68 uint32_t param;
69 uint32_t param64;
70 } ExtQueue;
71
72 typedef struct IOIntQueue {
73 uint16_t id;
74 uint16_t nr;
75 uint32_t parm;
76 uint32_t word;
77 } IOIntQueue;
78
79 typedef struct MchkQueue {
80 uint16_t type;
81 } MchkQueue;
82
83 typedef struct CPUS390XState {
84 uint64_t regs[16]; /* GP registers */
85 /*
86 * The floating point registers are part of the vector registers.
87 * vregs[0][0] -> vregs[15][0] are 16 floating point registers
88 */
89 CPU_DoubleU vregs[32][2]; /* vector registers */
90 uint32_t aregs[16]; /* access registers */
91 uint8_t riccb[64]; /* runtime instrumentation control */
92
93 /* Fields up to this point are not cleared by initial CPU reset */
94 struct {} start_initial_reset_fields;
95
96 uint32_t fpc; /* floating-point control register */
97 uint32_t cc_op;
98
99 float_status fpu_status; /* passed to softfloat lib */
100
101 /* The low part of a 128-bit return, or remainder of a divide. */
102 uint64_t retxl;
103
104 PSW psw;
105
106 uint64_t cc_src;
107 uint64_t cc_dst;
108 uint64_t cc_vr;
109
110 uint64_t __excp_addr;
111 uint64_t psa;
112
113 uint32_t int_pgm_code;
114 uint32_t int_pgm_ilen;
115
116 uint32_t int_svc_code;
117 uint32_t int_svc_ilen;
118
119 uint64_t per_address;
120 uint16_t per_perc_atmid;
121
122 uint64_t cregs[16]; /* control registers */
123
124 ExtQueue ext_queue[MAX_EXT_QUEUE];
125 IOIntQueue io_queue[MAX_IO_QUEUE][8];
126 MchkQueue mchk_queue[MAX_MCHK_QUEUE];
127
128 int pending_int;
129 int ext_index;
130 int io_index[8];
131 int mchk_index;
132
133 uint64_t ckc;
134 uint64_t cputm;
135 uint32_t todpr;
136
137 uint64_t pfault_token;
138 uint64_t pfault_compare;
139 uint64_t pfault_select;
140
141 uint64_t gbea;
142 uint64_t pp;
143
144 /* Fields up to this point are cleared by a CPU reset */
145 struct {} end_reset_fields;
146
147 CPU_COMMON
148
149 uint32_t cpu_num;
150 uint32_t machine_type;
151
152 uint64_t tod_offset;
153 uint64_t tod_basetime;
154 QEMUTimer *tod_timer;
155
156 QEMUTimer *cpu_timer;
157
158 /*
159 * The cpu state represents the logical state of a cpu. In contrast to other
160 * architectures, there is a difference between a halt and a stop on s390.
161 * If all cpus are either stopped (including check stop) or in the disabled
162 * wait state, the vm can be shut down.
163 */
164 #define CPU_STATE_UNINITIALIZED 0x00
165 #define CPU_STATE_STOPPED 0x01
166 #define CPU_STATE_CHECK_STOP 0x02
167 #define CPU_STATE_OPERATING 0x03
168 #define CPU_STATE_LOAD 0x04
169 uint8_t cpu_state;
170
171 /* currently processed sigp order */
172 uint8_t sigp_order;
173
174 } CPUS390XState;
175
176 static inline CPU_DoubleU *get_freg(CPUS390XState *cs, int nr)
177 {
178 return &cs->vregs[nr][0];
179 }
180
181 /**
182 * S390CPU:
183 * @env: #CPUS390XState.
184 *
185 * An S/390 CPU.
186 */
187 struct S390CPU {
188 /*< private >*/
189 CPUState parent_obj;
190 /*< public >*/
191
192 CPUS390XState env;
193 int64_t id;
194 S390CPUModel *model;
195 /* needed for live migration */
196 void *irqstate;
197 uint32_t irqstate_saved_size;
198 };
199
200 static inline S390CPU *s390_env_get_cpu(CPUS390XState *env)
201 {
202 return container_of(env, S390CPU, env);
203 }
204
205 #define ENV_GET_CPU(e) CPU(s390_env_get_cpu(e))
206
207 #define ENV_OFFSET offsetof(S390CPU, env)
208
209 #ifndef CONFIG_USER_ONLY
210 extern const struct VMStateDescription vmstate_s390_cpu;
211 #endif
212
213 void s390_cpu_do_interrupt(CPUState *cpu);
214 bool s390_cpu_exec_interrupt(CPUState *cpu, int int_req);
215 void s390_cpu_dump_state(CPUState *cpu, FILE *f, fprintf_function cpu_fprintf,
216 int flags);
217 int s390_cpu_write_elf64_note(WriteCoreDumpFunction f, CPUState *cs,
218 int cpuid, void *opaque);
219
220 hwaddr s390_cpu_get_phys_page_debug(CPUState *cpu, vaddr addr);
221 hwaddr s390_cpu_get_phys_addr_debug(CPUState *cpu, vaddr addr);
222 int s390_cpu_gdb_read_register(CPUState *cpu, uint8_t *buf, int reg);
223 int s390_cpu_gdb_write_register(CPUState *cpu, uint8_t *buf, int reg);
224 void s390_cpu_gdb_init(CPUState *cs);
225 void s390x_cpu_debug_excp_handler(CPUState *cs);
226
227 #include "sysemu/kvm.h"
228
229 /* distinguish between 24 bit and 31 bit addressing */
230 #define HIGH_ORDER_BIT 0x80000000
231
232 /* Interrupt Codes */
233 /* Program Interrupts */
234 #define PGM_OPERATION 0x0001
235 #define PGM_PRIVILEGED 0x0002
236 #define PGM_EXECUTE 0x0003
237 #define PGM_PROTECTION 0x0004
238 #define PGM_ADDRESSING 0x0005
239 #define PGM_SPECIFICATION 0x0006
240 #define PGM_DATA 0x0007
241 #define PGM_FIXPT_OVERFLOW 0x0008
242 #define PGM_FIXPT_DIVIDE 0x0009
243 #define PGM_DEC_OVERFLOW 0x000a
244 #define PGM_DEC_DIVIDE 0x000b
245 #define PGM_HFP_EXP_OVERFLOW 0x000c
246 #define PGM_HFP_EXP_UNDERFLOW 0x000d
247 #define PGM_HFP_SIGNIFICANCE 0x000e
248 #define PGM_HFP_DIVIDE 0x000f
249 #define PGM_SEGMENT_TRANS 0x0010
250 #define PGM_PAGE_TRANS 0x0011
251 #define PGM_TRANS_SPEC 0x0012
252 #define PGM_SPECIAL_OP 0x0013
253 #define PGM_OPERAND 0x0015
254 #define PGM_TRACE_TABLE 0x0016
255 #define PGM_SPACE_SWITCH 0x001c
256 #define PGM_HFP_SQRT 0x001d
257 #define PGM_PC_TRANS_SPEC 0x001f
258 #define PGM_AFX_TRANS 0x0020
259 #define PGM_ASX_TRANS 0x0021
260 #define PGM_LX_TRANS 0x0022
261 #define PGM_EX_TRANS 0x0023
262 #define PGM_PRIM_AUTH 0x0024
263 #define PGM_SEC_AUTH 0x0025
264 #define PGM_ALET_SPEC 0x0028
265 #define PGM_ALEN_SPEC 0x0029
266 #define PGM_ALE_SEQ 0x002a
267 #define PGM_ASTE_VALID 0x002b
268 #define PGM_ASTE_SEQ 0x002c
269 #define PGM_EXT_AUTH 0x002d
270 #define PGM_STACK_FULL 0x0030
271 #define PGM_STACK_EMPTY 0x0031
272 #define PGM_STACK_SPEC 0x0032
273 #define PGM_STACK_TYPE 0x0033
274 #define PGM_STACK_OP 0x0034
275 #define PGM_ASCE_TYPE 0x0038
276 #define PGM_REG_FIRST_TRANS 0x0039
277 #define PGM_REG_SEC_TRANS 0x003a
278 #define PGM_REG_THIRD_TRANS 0x003b
279 #define PGM_MONITOR 0x0040
280 #define PGM_PER 0x0080
281 #define PGM_CRYPTO 0x0119
282
283 /* External Interrupts */
284 #define EXT_INTERRUPT_KEY 0x0040
285 #define EXT_CLOCK_COMP 0x1004
286 #define EXT_CPU_TIMER 0x1005
287 #define EXT_MALFUNCTION 0x1200
288 #define EXT_EMERGENCY 0x1201
289 #define EXT_EXTERNAL_CALL 0x1202
290 #define EXT_ETR 0x1406
291 #define EXT_SERVICE 0x2401
292 #define EXT_VIRTIO 0x2603
293
294 /* PSW defines */
295 #undef PSW_MASK_PER
296 #undef PSW_MASK_DAT
297 #undef PSW_MASK_IO
298 #undef PSW_MASK_EXT
299 #undef PSW_MASK_KEY
300 #undef PSW_SHIFT_KEY
301 #undef PSW_MASK_MCHECK
302 #undef PSW_MASK_WAIT
303 #undef PSW_MASK_PSTATE
304 #undef PSW_MASK_ASC
305 #undef PSW_MASK_CC
306 #undef PSW_MASK_PM
307 #undef PSW_MASK_64
308 #undef PSW_MASK_32
309 #undef PSW_MASK_ESA_ADDR
310
311 #define PSW_MASK_PER 0x4000000000000000ULL
312 #define PSW_MASK_DAT 0x0400000000000000ULL
313 #define PSW_MASK_IO 0x0200000000000000ULL
314 #define PSW_MASK_EXT 0x0100000000000000ULL
315 #define PSW_MASK_KEY 0x00F0000000000000ULL
316 #define PSW_SHIFT_KEY 56
317 #define PSW_MASK_MCHECK 0x0004000000000000ULL
318 #define PSW_MASK_WAIT 0x0002000000000000ULL
319 #define PSW_MASK_PSTATE 0x0001000000000000ULL
320 #define PSW_MASK_ASC 0x0000C00000000000ULL
321 #define PSW_MASK_CC 0x0000300000000000ULL
322 #define PSW_MASK_PM 0x00000F0000000000ULL
323 #define PSW_MASK_64 0x0000000100000000ULL
324 #define PSW_MASK_32 0x0000000080000000ULL
325 #define PSW_MASK_ESA_ADDR 0x000000007fffffffULL
326
327 #undef PSW_ASC_PRIMARY
328 #undef PSW_ASC_ACCREG
329 #undef PSW_ASC_SECONDARY
330 #undef PSW_ASC_HOME
331
332 #define PSW_ASC_PRIMARY 0x0000000000000000ULL
333 #define PSW_ASC_ACCREG 0x0000400000000000ULL
334 #define PSW_ASC_SECONDARY 0x0000800000000000ULL
335 #define PSW_ASC_HOME 0x0000C00000000000ULL
336
337 /* tb flags */
338
339 #define FLAG_MASK_PER (PSW_MASK_PER >> 32)
340 #define FLAG_MASK_DAT (PSW_MASK_DAT >> 32)
341 #define FLAG_MASK_IO (PSW_MASK_IO >> 32)
342 #define FLAG_MASK_EXT (PSW_MASK_EXT >> 32)
343 #define FLAG_MASK_KEY (PSW_MASK_KEY >> 32)
344 #define FLAG_MASK_MCHECK (PSW_MASK_MCHECK >> 32)
345 #define FLAG_MASK_WAIT (PSW_MASK_WAIT >> 32)
346 #define FLAG_MASK_PSTATE (PSW_MASK_PSTATE >> 32)
347 #define FLAG_MASK_ASC (PSW_MASK_ASC >> 32)
348 #define FLAG_MASK_CC (PSW_MASK_CC >> 32)
349 #define FLAG_MASK_PM (PSW_MASK_PM >> 32)
350 #define FLAG_MASK_64 (PSW_MASK_64 >> 32)
351 #define FLAG_MASK_32 0x00001000
352
353 /* Control register 0 bits */
354 #define CR0_LOWPROT 0x0000000010000000ULL
355 #define CR0_EDAT 0x0000000000800000ULL
356
357 /* MMU */
358 #define MMU_PRIMARY_IDX 0
359 #define MMU_SECONDARY_IDX 1
360 #define MMU_HOME_IDX 2
361
362 static inline int cpu_mmu_index (CPUS390XState *env, bool ifetch)
363 {
364 switch (env->psw.mask & PSW_MASK_ASC) {
365 case PSW_ASC_PRIMARY:
366 return MMU_PRIMARY_IDX;
367 case PSW_ASC_SECONDARY:
368 return MMU_SECONDARY_IDX;
369 case PSW_ASC_HOME:
370 return MMU_HOME_IDX;
371 case PSW_ASC_ACCREG:
372 /* Fallthrough: access register mode is not yet supported */
373 default:
374 abort();
375 }
376 }
377
378 static inline uint64_t cpu_mmu_idx_to_asc(int mmu_idx)
379 {
380 switch (mmu_idx) {
381 case MMU_PRIMARY_IDX:
382 return PSW_ASC_PRIMARY;
383 case MMU_SECONDARY_IDX:
384 return PSW_ASC_SECONDARY;
385 case MMU_HOME_IDX:
386 return PSW_ASC_HOME;
387 default:
388 abort();
389 }
390 }
391
392 static inline void cpu_get_tb_cpu_state(CPUS390XState* env, target_ulong *pc,
393 target_ulong *cs_base, uint32_t *flags)
394 {
395 *pc = env->psw.addr;
396 *cs_base = 0;
397 *flags = ((env->psw.mask >> 32) & ~FLAG_MASK_CC) |
398 ((env->psw.mask & PSW_MASK_32) ? FLAG_MASK_32 : 0);
399 }
400
401 #define MAX_ILEN 6
402
403 /* While the PoO talks about ILC (a number between 1-3) what is actually
404 stored in LowCore is shifted left one bit (an even between 2-6). As
405 this is the actual length of the insn and therefore more useful, that
406 is what we want to pass around and manipulate. To make sure that we
407 have applied this distinction universally, rename the "ILC" to "ILEN". */
408 static inline int get_ilen(uint8_t opc)
409 {
410 switch (opc >> 6) {
411 case 0:
412 return 2;
413 case 1:
414 case 2:
415 return 4;
416 default:
417 return 6;
418 }
419 }
420
421 /* PER bits from control register 9 */
422 #define PER_CR9_EVENT_BRANCH 0x80000000
423 #define PER_CR9_EVENT_IFETCH 0x40000000
424 #define PER_CR9_EVENT_STORE 0x20000000
425 #define PER_CR9_EVENT_STORE_REAL 0x08000000
426 #define PER_CR9_EVENT_NULLIFICATION 0x01000000
427 #define PER_CR9_CONTROL_BRANCH_ADDRESS 0x00800000
428 #define PER_CR9_CONTROL_ALTERATION 0x00200000
429
430 /* PER bits from the PER CODE/ATMID/AI in lowcore */
431 #define PER_CODE_EVENT_BRANCH 0x8000
432 #define PER_CODE_EVENT_IFETCH 0x4000
433 #define PER_CODE_EVENT_STORE 0x2000
434 #define PER_CODE_EVENT_STORE_REAL 0x0800
435 #define PER_CODE_EVENT_NULLIFICATION 0x0100
436
437 /* Compute the ATMID field that is stored in the per_perc_atmid lowcore
438 entry when a PER exception is triggered. */
439 static inline uint8_t get_per_atmid(CPUS390XState *env)
440 {
441 return ((env->psw.mask & PSW_MASK_64) ? (1 << 7) : 0) |
442 ( (1 << 6) ) |
443 ((env->psw.mask & PSW_MASK_32) ? (1 << 5) : 0) |
444 ((env->psw.mask & PSW_MASK_DAT)? (1 << 4) : 0) |
445 ((env->psw.mask & PSW_ASC_SECONDARY)? (1 << 3) : 0) |
446 ((env->psw.mask & PSW_ASC_ACCREG)? (1 << 2) : 0);
447 }
448
449 /* Check if an address is within the PER starting address and the PER
450 ending address. The address range might loop. */
451 static inline bool get_per_in_range(CPUS390XState *env, uint64_t addr)
452 {
453 if (env->cregs[10] <= env->cregs[11]) {
454 return env->cregs[10] <= addr && addr <= env->cregs[11];
455 } else {
456 return env->cregs[10] <= addr || addr <= env->cregs[11];
457 }
458 }
459
460 #ifndef CONFIG_USER_ONLY
461 /* In several cases of runtime exceptions, we havn't recorded the true
462 instruction length. Use these codes when raising exceptions in order
463 to re-compute the length by examining the insn in memory. */
464 #define ILEN_LATER 0x20
465 #define ILEN_LATER_INC 0x21
466 void trigger_pgm_exception(CPUS390XState *env, uint32_t code, uint32_t ilen);
467 #endif
468
469 S390CPU *cpu_s390x_init(const char *cpu_model);
470 S390CPU *s390x_new_cpu(const char *cpu_model, int64_t id, Error **errp);
471 S390CPU *cpu_s390x_create(const char *cpu_model, Error **errp);
472 void s390x_translate_init(void);
473
474 /* you can call this signal handler from your SIGBUS and SIGSEGV
475 signal handlers to inform the virtual CPU of exceptions. non zero
476 is returned if the signal was handled by the virtual CPU. */
477 int cpu_s390x_signal_handler(int host_signum, void *pinfo,
478 void *puc);
479 int s390_cpu_handle_mmu_fault(CPUState *cpu, vaddr address, int rw,
480 int mmu_idx);
481
482
483 #ifndef CONFIG_USER_ONLY
484 void do_restart_interrupt(CPUS390XState *env);
485 void s390x_cpu_do_unaligned_access(CPUState *cs, vaddr addr,
486 MMUAccessType access_type,
487 int mmu_idx, uintptr_t retaddr);
488
489 static inline hwaddr decode_basedisp_s(CPUS390XState *env, uint32_t ipb,
490 uint8_t *ar)
491 {
492 hwaddr addr = 0;
493 uint8_t reg;
494
495 reg = ipb >> 28;
496 if (reg > 0) {
497 addr = env->regs[reg];
498 }
499 addr += (ipb >> 16) & 0xfff;
500 if (ar) {
501 *ar = reg;
502 }
503
504 return addr;
505 }
506
507 /* Base/displacement are at the same locations. */
508 #define decode_basedisp_rs decode_basedisp_s
509
510 /* helper functions for run_on_cpu() */
511 static inline void s390_do_cpu_reset(CPUState *cs, run_on_cpu_data arg)
512 {
513 S390CPUClass *scc = S390_CPU_GET_CLASS(cs);
514
515 scc->cpu_reset(cs);
516 }
517 static inline void s390_do_cpu_full_reset(CPUState *cs, run_on_cpu_data arg)
518 {
519 cpu_reset(cs);
520 }
521
522 void s390x_tod_timer(void *opaque);
523 void s390x_cpu_timer(void *opaque);
524
525 int s390_virtio_hypercall(CPUS390XState *env);
526
527 #ifdef CONFIG_KVM
528 void kvm_s390_service_interrupt(uint32_t parm);
529 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq);
530 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq);
531 int kvm_s390_inject_flic(struct kvm_s390_irq *irq);
532 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code);
533 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
534 int len, bool is_write);
535 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_clock);
536 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_clock);
537 #else
538 static inline void kvm_s390_service_interrupt(uint32_t parm)
539 {
540 }
541 static inline int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
542 {
543 return -ENOSYS;
544 }
545 static inline int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
546 {
547 return -ENOSYS;
548 }
549 static inline int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar,
550 void *hostbuf, int len, bool is_write)
551 {
552 return -ENOSYS;
553 }
554 static inline void kvm_s390_access_exception(S390CPU *cpu, uint16_t code,
555 uint64_t te_code)
556 {
557 }
558 #endif
559
560 static inline int s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
561 {
562 if (kvm_enabled()) {
563 return kvm_s390_get_clock(tod_high, tod_low);
564 }
565 /* Fixme TCG */
566 *tod_high = 0;
567 *tod_low = 0;
568 return 0;
569 }
570
571 static inline int s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
572 {
573 if (kvm_enabled()) {
574 return kvm_s390_set_clock(tod_high, tod_low);
575 }
576 /* Fixme TCG */
577 return 0;
578 }
579
580 S390CPU *s390_cpu_addr2state(uint16_t cpu_addr);
581 unsigned int s390_cpu_halt(S390CPU *cpu);
582 void s390_cpu_unhalt(S390CPU *cpu);
583 unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu);
584 static inline uint8_t s390_cpu_get_state(S390CPU *cpu)
585 {
586 return cpu->env.cpu_state;
587 }
588
589 void gtod_save(QEMUFile *f, void *opaque);
590 int gtod_load(QEMUFile *f, void *opaque, int version_id);
591
592 void cpu_inject_ext(S390CPU *cpu, uint32_t code, uint32_t param,
593 uint64_t param64);
594
595 /* ioinst.c */
596 void ioinst_handle_xsch(S390CPU *cpu, uint64_t reg1);
597 void ioinst_handle_csch(S390CPU *cpu, uint64_t reg1);
598 void ioinst_handle_hsch(S390CPU *cpu, uint64_t reg1);
599 void ioinst_handle_msch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
600 void ioinst_handle_ssch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
601 void ioinst_handle_stcrw(S390CPU *cpu, uint32_t ipb);
602 void ioinst_handle_stsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
603 int ioinst_handle_tsch(S390CPU *cpu, uint64_t reg1, uint32_t ipb);
604 void ioinst_handle_chsc(S390CPU *cpu, uint32_t ipb);
605 int ioinst_handle_tpi(S390CPU *cpu, uint32_t ipb);
606 void ioinst_handle_schm(S390CPU *cpu, uint64_t reg1, uint64_t reg2,
607 uint32_t ipb);
608 void ioinst_handle_rsch(S390CPU *cpu, uint64_t reg1);
609 void ioinst_handle_rchp(S390CPU *cpu, uint64_t reg1);
610 void ioinst_handle_sal(S390CPU *cpu, uint64_t reg1);
611
612 /* service interrupts are floating therefore we must not pass an cpustate */
613 void s390_sclp_extint(uint32_t parm);
614
615 #else
616 static inline unsigned int s390_cpu_halt(S390CPU *cpu)
617 {
618 return 0;
619 }
620
621 static inline void s390_cpu_unhalt(S390CPU *cpu)
622 {
623 }
624
625 static inline unsigned int s390_cpu_set_state(uint8_t cpu_state, S390CPU *cpu)
626 {
627 return 0;
628 }
629 #endif
630
631 extern void subsystem_reset(void);
632
633 #define cpu_init(model) CPU(cpu_s390x_init(model))
634 #define cpu_signal_handler cpu_s390x_signal_handler
635
636 void s390_cpu_list(FILE *f, fprintf_function cpu_fprintf);
637 #define cpu_list s390_cpu_list
638 void s390_cpu_model_register_props(Object *obj);
639 void s390_cpu_model_class_register_props(ObjectClass *oc);
640 void s390_realize_cpu_model(CPUState *cs, Error **errp);
641 ObjectClass *s390_cpu_class_by_name(const char *name);
642
643 #define EXCP_EXT 1 /* external interrupt */
644 #define EXCP_SVC 2 /* supervisor call (syscall) */
645 #define EXCP_PGM 3 /* program interruption */
646 #define EXCP_IO 7 /* I/O interrupt */
647 #define EXCP_MCHK 8 /* machine check */
648
649 #define INTERRUPT_EXT (1 << 0)
650 #define INTERRUPT_TOD (1 << 1)
651 #define INTERRUPT_CPUTIMER (1 << 2)
652 #define INTERRUPT_IO (1 << 3)
653 #define INTERRUPT_MCHK (1 << 4)
654
655 /* Program Status Word. */
656 #define S390_PSWM_REGNUM 0
657 #define S390_PSWA_REGNUM 1
658 /* General Purpose Registers. */
659 #define S390_R0_REGNUM 2
660 #define S390_R1_REGNUM 3
661 #define S390_R2_REGNUM 4
662 #define S390_R3_REGNUM 5
663 #define S390_R4_REGNUM 6
664 #define S390_R5_REGNUM 7
665 #define S390_R6_REGNUM 8
666 #define S390_R7_REGNUM 9
667 #define S390_R8_REGNUM 10
668 #define S390_R9_REGNUM 11
669 #define S390_R10_REGNUM 12
670 #define S390_R11_REGNUM 13
671 #define S390_R12_REGNUM 14
672 #define S390_R13_REGNUM 15
673 #define S390_R14_REGNUM 16
674 #define S390_R15_REGNUM 17
675 /* Total Core Registers. */
676 #define S390_NUM_CORE_REGS 18
677
678 /* CC optimization */
679
680 /* Instead of computing the condition codes after each x86 instruction,
681 * QEMU just stores the result (called CC_DST), the type of operation
682 * (called CC_OP) and whatever operands are needed (CC_SRC and possibly
683 * CC_VR). When the condition codes are needed, the condition codes can
684 * be calculated using this information. Condition codes are not generated
685 * if they are only needed for conditional branches.
686 */
687 enum cc_op {
688 CC_OP_CONST0 = 0, /* CC is 0 */
689 CC_OP_CONST1, /* CC is 1 */
690 CC_OP_CONST2, /* CC is 2 */
691 CC_OP_CONST3, /* CC is 3 */
692
693 CC_OP_DYNAMIC, /* CC calculation defined by env->cc_op */
694 CC_OP_STATIC, /* CC value is env->cc_op */
695
696 CC_OP_NZ, /* env->cc_dst != 0 */
697 CC_OP_LTGT_32, /* signed less/greater than (32bit) */
698 CC_OP_LTGT_64, /* signed less/greater than (64bit) */
699 CC_OP_LTUGTU_32, /* unsigned less/greater than (32bit) */
700 CC_OP_LTUGTU_64, /* unsigned less/greater than (64bit) */
701 CC_OP_LTGT0_32, /* signed less/greater than 0 (32bit) */
702 CC_OP_LTGT0_64, /* signed less/greater than 0 (64bit) */
703
704 CC_OP_ADD_64, /* overflow on add (64bit) */
705 CC_OP_ADDU_64, /* overflow on unsigned add (64bit) */
706 CC_OP_ADDC_64, /* overflow on unsigned add-carry (64bit) */
707 CC_OP_SUB_64, /* overflow on subtraction (64bit) */
708 CC_OP_SUBU_64, /* overflow on unsigned subtraction (64bit) */
709 CC_OP_SUBB_64, /* overflow on unsigned sub-borrow (64bit) */
710 CC_OP_ABS_64, /* sign eval on abs (64bit) */
711 CC_OP_NABS_64, /* sign eval on nabs (64bit) */
712
713 CC_OP_ADD_32, /* overflow on add (32bit) */
714 CC_OP_ADDU_32, /* overflow on unsigned add (32bit) */
715 CC_OP_ADDC_32, /* overflow on unsigned add-carry (32bit) */
716 CC_OP_SUB_32, /* overflow on subtraction (32bit) */
717 CC_OP_SUBU_32, /* overflow on unsigned subtraction (32bit) */
718 CC_OP_SUBB_32, /* overflow on unsigned sub-borrow (32bit) */
719 CC_OP_ABS_32, /* sign eval on abs (64bit) */
720 CC_OP_NABS_32, /* sign eval on nabs (64bit) */
721
722 CC_OP_COMP_32, /* complement */
723 CC_OP_COMP_64, /* complement */
724
725 CC_OP_TM_32, /* test under mask (32bit) */
726 CC_OP_TM_64, /* test under mask (64bit) */
727
728 CC_OP_NZ_F32, /* FP dst != 0 (32bit) */
729 CC_OP_NZ_F64, /* FP dst != 0 (64bit) */
730 CC_OP_NZ_F128, /* FP dst != 0 (128bit) */
731
732 CC_OP_ICM, /* insert characters under mask */
733 CC_OP_SLA_32, /* Calculate shift left signed (32bit) */
734 CC_OP_SLA_64, /* Calculate shift left signed (64bit) */
735 CC_OP_FLOGR, /* find leftmost one */
736 CC_OP_MAX
737 };
738
739 static const char *cc_names[] = {
740 [CC_OP_CONST0] = "CC_OP_CONST0",
741 [CC_OP_CONST1] = "CC_OP_CONST1",
742 [CC_OP_CONST2] = "CC_OP_CONST2",
743 [CC_OP_CONST3] = "CC_OP_CONST3",
744 [CC_OP_DYNAMIC] = "CC_OP_DYNAMIC",
745 [CC_OP_STATIC] = "CC_OP_STATIC",
746 [CC_OP_NZ] = "CC_OP_NZ",
747 [CC_OP_LTGT_32] = "CC_OP_LTGT_32",
748 [CC_OP_LTGT_64] = "CC_OP_LTGT_64",
749 [CC_OP_LTUGTU_32] = "CC_OP_LTUGTU_32",
750 [CC_OP_LTUGTU_64] = "CC_OP_LTUGTU_64",
751 [CC_OP_LTGT0_32] = "CC_OP_LTGT0_32",
752 [CC_OP_LTGT0_64] = "CC_OP_LTGT0_64",
753 [CC_OP_ADD_64] = "CC_OP_ADD_64",
754 [CC_OP_ADDU_64] = "CC_OP_ADDU_64",
755 [CC_OP_ADDC_64] = "CC_OP_ADDC_64",
756 [CC_OP_SUB_64] = "CC_OP_SUB_64",
757 [CC_OP_SUBU_64] = "CC_OP_SUBU_64",
758 [CC_OP_SUBB_64] = "CC_OP_SUBB_64",
759 [CC_OP_ABS_64] = "CC_OP_ABS_64",
760 [CC_OP_NABS_64] = "CC_OP_NABS_64",
761 [CC_OP_ADD_32] = "CC_OP_ADD_32",
762 [CC_OP_ADDU_32] = "CC_OP_ADDU_32",
763 [CC_OP_ADDC_32] = "CC_OP_ADDC_32",
764 [CC_OP_SUB_32] = "CC_OP_SUB_32",
765 [CC_OP_SUBU_32] = "CC_OP_SUBU_32",
766 [CC_OP_SUBB_32] = "CC_OP_SUBB_32",
767 [CC_OP_ABS_32] = "CC_OP_ABS_32",
768 [CC_OP_NABS_32] = "CC_OP_NABS_32",
769 [CC_OP_COMP_32] = "CC_OP_COMP_32",
770 [CC_OP_COMP_64] = "CC_OP_COMP_64",
771 [CC_OP_TM_32] = "CC_OP_TM_32",
772 [CC_OP_TM_64] = "CC_OP_TM_64",
773 [CC_OP_NZ_F32] = "CC_OP_NZ_F32",
774 [CC_OP_NZ_F64] = "CC_OP_NZ_F64",
775 [CC_OP_NZ_F128] = "CC_OP_NZ_F128",
776 [CC_OP_ICM] = "CC_OP_ICM",
777 [CC_OP_SLA_32] = "CC_OP_SLA_32",
778 [CC_OP_SLA_64] = "CC_OP_SLA_64",
779 [CC_OP_FLOGR] = "CC_OP_FLOGR",
780 };
781
782 static inline const char *cc_name(int cc_op)
783 {
784 return cc_names[cc_op];
785 }
786
787 static inline void setcc(S390CPU *cpu, uint64_t cc)
788 {
789 CPUS390XState *env = &cpu->env;
790
791 env->psw.mask &= ~(3ull << 44);
792 env->psw.mask |= (cc & 3) << 44;
793 env->cc_op = cc;
794 }
795
796 typedef struct LowCore
797 {
798 /* prefix area: defined by architecture */
799 uint32_t ccw1[2]; /* 0x000 */
800 uint32_t ccw2[4]; /* 0x008 */
801 uint8_t pad1[0x80-0x18]; /* 0x018 */
802 uint32_t ext_params; /* 0x080 */
803 uint16_t cpu_addr; /* 0x084 */
804 uint16_t ext_int_code; /* 0x086 */
805 uint16_t svc_ilen; /* 0x088 */
806 uint16_t svc_code; /* 0x08a */
807 uint16_t pgm_ilen; /* 0x08c */
808 uint16_t pgm_code; /* 0x08e */
809 uint32_t data_exc_code; /* 0x090 */
810 uint16_t mon_class_num; /* 0x094 */
811 uint16_t per_perc_atmid; /* 0x096 */
812 uint64_t per_address; /* 0x098 */
813 uint8_t exc_access_id; /* 0x0a0 */
814 uint8_t per_access_id; /* 0x0a1 */
815 uint8_t op_access_id; /* 0x0a2 */
816 uint8_t ar_access_id; /* 0x0a3 */
817 uint8_t pad2[0xA8-0xA4]; /* 0x0a4 */
818 uint64_t trans_exc_code; /* 0x0a8 */
819 uint64_t monitor_code; /* 0x0b0 */
820 uint16_t subchannel_id; /* 0x0b8 */
821 uint16_t subchannel_nr; /* 0x0ba */
822 uint32_t io_int_parm; /* 0x0bc */
823 uint32_t io_int_word; /* 0x0c0 */
824 uint8_t pad3[0xc8-0xc4]; /* 0x0c4 */
825 uint32_t stfl_fac_list; /* 0x0c8 */
826 uint8_t pad4[0xe8-0xcc]; /* 0x0cc */
827 uint32_t mcck_interruption_code[2]; /* 0x0e8 */
828 uint8_t pad5[0xf4-0xf0]; /* 0x0f0 */
829 uint32_t external_damage_code; /* 0x0f4 */
830 uint64_t failing_storage_address; /* 0x0f8 */
831 uint8_t pad6[0x110-0x100]; /* 0x100 */
832 uint64_t per_breaking_event_addr; /* 0x110 */
833 uint8_t pad7[0x120-0x118]; /* 0x118 */
834 PSW restart_old_psw; /* 0x120 */
835 PSW external_old_psw; /* 0x130 */
836 PSW svc_old_psw; /* 0x140 */
837 PSW program_old_psw; /* 0x150 */
838 PSW mcck_old_psw; /* 0x160 */
839 PSW io_old_psw; /* 0x170 */
840 uint8_t pad8[0x1a0-0x180]; /* 0x180 */
841 PSW restart_new_psw; /* 0x1a0 */
842 PSW external_new_psw; /* 0x1b0 */
843 PSW svc_new_psw; /* 0x1c0 */
844 PSW program_new_psw; /* 0x1d0 */
845 PSW mcck_new_psw; /* 0x1e0 */
846 PSW io_new_psw; /* 0x1f0 */
847 PSW return_psw; /* 0x200 */
848 uint8_t irb[64]; /* 0x210 */
849 uint64_t sync_enter_timer; /* 0x250 */
850 uint64_t async_enter_timer; /* 0x258 */
851 uint64_t exit_timer; /* 0x260 */
852 uint64_t last_update_timer; /* 0x268 */
853 uint64_t user_timer; /* 0x270 */
854 uint64_t system_timer; /* 0x278 */
855 uint64_t last_update_clock; /* 0x280 */
856 uint64_t steal_clock; /* 0x288 */
857 PSW return_mcck_psw; /* 0x290 */
858 uint8_t pad9[0xc00-0x2a0]; /* 0x2a0 */
859 /* System info area */
860 uint64_t save_area[16]; /* 0xc00 */
861 uint8_t pad10[0xd40-0xc80]; /* 0xc80 */
862 uint64_t kernel_stack; /* 0xd40 */
863 uint64_t thread_info; /* 0xd48 */
864 uint64_t async_stack; /* 0xd50 */
865 uint64_t kernel_asce; /* 0xd58 */
866 uint64_t user_asce; /* 0xd60 */
867 uint64_t panic_stack; /* 0xd68 */
868 uint64_t user_exec_asce; /* 0xd70 */
869 uint8_t pad11[0xdc0-0xd78]; /* 0xd78 */
870
871 /* SMP info area: defined by DJB */
872 uint64_t clock_comparator; /* 0xdc0 */
873 uint64_t ext_call_fast; /* 0xdc8 */
874 uint64_t percpu_offset; /* 0xdd0 */
875 uint64_t current_task; /* 0xdd8 */
876 uint32_t softirq_pending; /* 0xde0 */
877 uint32_t pad_0x0de4; /* 0xde4 */
878 uint64_t int_clock; /* 0xde8 */
879 uint8_t pad12[0xe00-0xdf0]; /* 0xdf0 */
880
881 /* 0xe00 is used as indicator for dump tools */
882 /* whether the kernel died with panic() or not */
883 uint32_t panic_magic; /* 0xe00 */
884
885 uint8_t pad13[0x11b8-0xe04]; /* 0xe04 */
886
887 /* 64 bit extparam used for pfault, diag 250 etc */
888 uint64_t ext_params2; /* 0x11B8 */
889
890 uint8_t pad14[0x1200-0x11C0]; /* 0x11C0 */
891
892 /* System info area */
893
894 uint64_t floating_pt_save_area[16]; /* 0x1200 */
895 uint64_t gpregs_save_area[16]; /* 0x1280 */
896 uint32_t st_status_fixed_logout[4]; /* 0x1300 */
897 uint8_t pad15[0x1318-0x1310]; /* 0x1310 */
898 uint32_t prefixreg_save_area; /* 0x1318 */
899 uint32_t fpt_creg_save_area; /* 0x131c */
900 uint8_t pad16[0x1324-0x1320]; /* 0x1320 */
901 uint32_t tod_progreg_save_area; /* 0x1324 */
902 uint32_t cpu_timer_save_area[2]; /* 0x1328 */
903 uint32_t clock_comp_save_area[2]; /* 0x1330 */
904 uint8_t pad17[0x1340-0x1338]; /* 0x1338 */
905 uint32_t access_regs_save_area[16]; /* 0x1340 */
906 uint64_t cregs_save_area[16]; /* 0x1380 */
907
908 /* align to the top of the prefix area */
909
910 uint8_t pad18[0x2000-0x1400]; /* 0x1400 */
911 } QEMU_PACKED LowCore;
912
913 /* STSI */
914 #define STSI_LEVEL_MASK 0x00000000f0000000ULL
915 #define STSI_LEVEL_CURRENT 0x0000000000000000ULL
916 #define STSI_LEVEL_1 0x0000000010000000ULL
917 #define STSI_LEVEL_2 0x0000000020000000ULL
918 #define STSI_LEVEL_3 0x0000000030000000ULL
919 #define STSI_R0_RESERVED_MASK 0x000000000fffff00ULL
920 #define STSI_R0_SEL1_MASK 0x00000000000000ffULL
921 #define STSI_R1_RESERVED_MASK 0x00000000ffff0000ULL
922 #define STSI_R1_SEL2_MASK 0x000000000000ffffULL
923
924 /* Basic Machine Configuration */
925 struct sysib_111 {
926 uint32_t res1[8];
927 uint8_t manuf[16];
928 uint8_t type[4];
929 uint8_t res2[12];
930 uint8_t model[16];
931 uint8_t sequence[16];
932 uint8_t plant[4];
933 uint8_t res3[156];
934 };
935
936 /* Basic Machine CPU */
937 struct sysib_121 {
938 uint32_t res1[80];
939 uint8_t sequence[16];
940 uint8_t plant[4];
941 uint8_t res2[2];
942 uint16_t cpu_addr;
943 uint8_t res3[152];
944 };
945
946 /* Basic Machine CPUs */
947 struct sysib_122 {
948 uint8_t res1[32];
949 uint32_t capability;
950 uint16_t total_cpus;
951 uint16_t active_cpus;
952 uint16_t standby_cpus;
953 uint16_t reserved_cpus;
954 uint16_t adjustments[2026];
955 };
956
957 /* LPAR CPU */
958 struct sysib_221 {
959 uint32_t res1[80];
960 uint8_t sequence[16];
961 uint8_t plant[4];
962 uint16_t cpu_id;
963 uint16_t cpu_addr;
964 uint8_t res3[152];
965 };
966
967 /* LPAR CPUs */
968 struct sysib_222 {
969 uint32_t res1[32];
970 uint16_t lpar_num;
971 uint8_t res2;
972 uint8_t lcpuc;
973 uint16_t total_cpus;
974 uint16_t conf_cpus;
975 uint16_t standby_cpus;
976 uint16_t reserved_cpus;
977 uint8_t name[8];
978 uint32_t caf;
979 uint8_t res3[16];
980 uint16_t dedicated_cpus;
981 uint16_t shared_cpus;
982 uint8_t res4[180];
983 };
984
985 /* VM CPUs */
986 struct sysib_322 {
987 uint8_t res1[31];
988 uint8_t count;
989 struct {
990 uint8_t res2[4];
991 uint16_t total_cpus;
992 uint16_t conf_cpus;
993 uint16_t standby_cpus;
994 uint16_t reserved_cpus;
995 uint8_t name[8];
996 uint32_t caf;
997 uint8_t cpi[16];
998 uint8_t res5[3];
999 uint8_t ext_name_encoding;
1000 uint32_t res3;
1001 uint8_t uuid[16];
1002 } vm[8];
1003 uint8_t res4[1504];
1004 uint8_t ext_names[8][256];
1005 };
1006
1007 /* MMU defines */
1008 #define _ASCE_ORIGIN ~0xfffULL /* segment table origin */
1009 #define _ASCE_SUBSPACE 0x200 /* subspace group control */
1010 #define _ASCE_PRIVATE_SPACE 0x100 /* private space control */
1011 #define _ASCE_ALT_EVENT 0x80 /* storage alteration event control */
1012 #define _ASCE_SPACE_SWITCH 0x40 /* space switch event */
1013 #define _ASCE_REAL_SPACE 0x20 /* real space control */
1014 #define _ASCE_TYPE_MASK 0x0c /* asce table type mask */
1015 #define _ASCE_TYPE_REGION1 0x0c /* region first table type */
1016 #define _ASCE_TYPE_REGION2 0x08 /* region second table type */
1017 #define _ASCE_TYPE_REGION3 0x04 /* region third table type */
1018 #define _ASCE_TYPE_SEGMENT 0x00 /* segment table type */
1019 #define _ASCE_TABLE_LENGTH 0x03 /* region table length */
1020
1021 #define _REGION_ENTRY_ORIGIN ~0xfffULL /* region/segment table origin */
1022 #define _REGION_ENTRY_RO 0x200 /* region/segment protection bit */
1023 #define _REGION_ENTRY_TF 0xc0 /* region/segment table offset */
1024 #define _REGION_ENTRY_INV 0x20 /* invalid region table entry */
1025 #define _REGION_ENTRY_TYPE_MASK 0x0c /* region/segment table type mask */
1026 #define _REGION_ENTRY_TYPE_R1 0x0c /* region first table type */
1027 #define _REGION_ENTRY_TYPE_R2 0x08 /* region second table type */
1028 #define _REGION_ENTRY_TYPE_R3 0x04 /* region third table type */
1029 #define _REGION_ENTRY_LENGTH 0x03 /* region third length */
1030
1031 #define _SEGMENT_ENTRY_ORIGIN ~0x7ffULL /* segment table origin */
1032 #define _SEGMENT_ENTRY_FC 0x400 /* format control */
1033 #define _SEGMENT_ENTRY_RO 0x200 /* page protection bit */
1034 #define _SEGMENT_ENTRY_INV 0x20 /* invalid segment table entry */
1035
1036 #define _PAGE_RO 0x200 /* HW read-only bit */
1037 #define _PAGE_INVALID 0x400 /* HW invalid bit */
1038 #define _PAGE_RES0 0x800 /* bit must be zero */
1039
1040 #define SK_C (0x1 << 1)
1041 #define SK_R (0x1 << 2)
1042 #define SK_F (0x1 << 3)
1043 #define SK_ACC_MASK (0xf << 4)
1044
1045 /* SIGP order codes */
1046 #define SIGP_SENSE 0x01
1047 #define SIGP_EXTERNAL_CALL 0x02
1048 #define SIGP_EMERGENCY 0x03
1049 #define SIGP_START 0x04
1050 #define SIGP_STOP 0x05
1051 #define SIGP_RESTART 0x06
1052 #define SIGP_STOP_STORE_STATUS 0x09
1053 #define SIGP_INITIAL_CPU_RESET 0x0b
1054 #define SIGP_CPU_RESET 0x0c
1055 #define SIGP_SET_PREFIX 0x0d
1056 #define SIGP_STORE_STATUS_ADDR 0x0e
1057 #define SIGP_SET_ARCH 0x12
1058 #define SIGP_STORE_ADTL_STATUS 0x17
1059
1060 /* SIGP condition codes */
1061 #define SIGP_CC_ORDER_CODE_ACCEPTED 0
1062 #define SIGP_CC_STATUS_STORED 1
1063 #define SIGP_CC_BUSY 2
1064 #define SIGP_CC_NOT_OPERATIONAL 3
1065
1066 /* SIGP status bits */
1067 #define SIGP_STAT_EQUIPMENT_CHECK 0x80000000UL
1068 #define SIGP_STAT_INCORRECT_STATE 0x00000200UL
1069 #define SIGP_STAT_INVALID_PARAMETER 0x00000100UL
1070 #define SIGP_STAT_EXT_CALL_PENDING 0x00000080UL
1071 #define SIGP_STAT_STOPPED 0x00000040UL
1072 #define SIGP_STAT_OPERATOR_INTERV 0x00000020UL
1073 #define SIGP_STAT_CHECK_STOP 0x00000010UL
1074 #define SIGP_STAT_INOPERATIVE 0x00000004UL
1075 #define SIGP_STAT_INVALID_ORDER 0x00000002UL
1076 #define SIGP_STAT_RECEIVER_CHECK 0x00000001UL
1077
1078 /* SIGP SET ARCHITECTURE modes */
1079 #define SIGP_MODE_ESA_S390 0
1080 #define SIGP_MODE_Z_ARCH_TRANS_ALL_PSW 1
1081 #define SIGP_MODE_Z_ARCH_TRANS_CUR_PSW 2
1082
1083 /* SIGP order code mask corresponding to bit positions 56-63 */
1084 #define SIGP_ORDER_MASK 0x000000ff
1085
1086 void load_psw(CPUS390XState *env, uint64_t mask, uint64_t addr);
1087 int mmu_translate(CPUS390XState *env, target_ulong vaddr, int rw, uint64_t asc,
1088 target_ulong *raddr, int *flags, bool exc);
1089 int sclp_service_call(CPUS390XState *env, uint64_t sccb, uint32_t code);
1090 uint32_t calc_cc(CPUS390XState *env, uint32_t cc_op, uint64_t src, uint64_t dst,
1091 uint64_t vr);
1092 void s390_cpu_recompute_watchpoints(CPUState *cs);
1093
1094 int s390_cpu_virt_mem_rw(S390CPU *cpu, vaddr laddr, uint8_t ar, void *hostbuf,
1095 int len, bool is_write);
1096
1097 #define s390_cpu_virt_mem_read(cpu, laddr, ar, dest, len) \
1098 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, false)
1099 #define s390_cpu_virt_mem_write(cpu, laddr, ar, dest, len) \
1100 s390_cpu_virt_mem_rw(cpu, laddr, ar, dest, len, true)
1101 #define s390_cpu_virt_mem_check_write(cpu, laddr, ar, len) \
1102 s390_cpu_virt_mem_rw(cpu, laddr, ar, NULL, len, true)
1103
1104 /* The value of the TOD clock for 1.1.1970. */
1105 #define TOD_UNIX_EPOCH 0x7d91048bca000000ULL
1106
1107 /* Converts ns to s390's clock format */
1108 static inline uint64_t time2tod(uint64_t ns) {
1109 return (ns << 9) / 125;
1110 }
1111
1112 /* Converts s390's clock format to ns */
1113 static inline uint64_t tod2time(uint64_t t) {
1114 return (t * 125) >> 9;
1115 }
1116
1117 /* from s390-virtio-ccw */
1118 #define MEM_SECTION_SIZE 0x10000000UL
1119 #define MAX_AVAIL_SLOTS 32
1120
1121 /* fpu_helper.c */
1122 uint32_t set_cc_nz_f32(float32 v);
1123 uint32_t set_cc_nz_f64(float64 v);
1124 uint32_t set_cc_nz_f128(float128 v);
1125
1126 /* misc_helper.c */
1127 #ifndef CONFIG_USER_ONLY
1128 int handle_diag_288(CPUS390XState *env, uint64_t r1, uint64_t r3);
1129 void handle_diag_308(CPUS390XState *env, uint64_t r1, uint64_t r3);
1130 #endif
1131 void program_interrupt(CPUS390XState *env, uint32_t code, int ilen);
1132 void QEMU_NORETURN runtime_exception(CPUS390XState *env, int excp,
1133 uintptr_t retaddr);
1134
1135 #ifdef CONFIG_KVM
1136 void kvm_s390_io_interrupt(uint16_t subchannel_id,
1137 uint16_t subchannel_nr, uint32_t io_int_parm,
1138 uint32_t io_int_word);
1139 void kvm_s390_crw_mchk(void);
1140 void kvm_s390_enable_css_support(S390CPU *cpu);
1141 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1142 int vq, bool assign);
1143 int kvm_s390_cpu_restart(S390CPU *cpu);
1144 int kvm_s390_get_memslot_count(KVMState *s);
1145 void kvm_s390_cmma_reset(void);
1146 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state);
1147 void kvm_s390_reset_vcpu(S390CPU *cpu);
1148 int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit);
1149 void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu);
1150 int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu);
1151 int kvm_s390_get_ri(void);
1152 void kvm_s390_crypto_reset(void);
1153 #else
1154 static inline void kvm_s390_io_interrupt(uint16_t subchannel_id,
1155 uint16_t subchannel_nr,
1156 uint32_t io_int_parm,
1157 uint32_t io_int_word)
1158 {
1159 }
1160 static inline void kvm_s390_crw_mchk(void)
1161 {
1162 }
1163 static inline void kvm_s390_enable_css_support(S390CPU *cpu)
1164 {
1165 }
1166 static inline int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier,
1167 uint32_t sch, int vq,
1168 bool assign)
1169 {
1170 return -ENOSYS;
1171 }
1172 static inline int kvm_s390_cpu_restart(S390CPU *cpu)
1173 {
1174 return -ENOSYS;
1175 }
1176 static inline void kvm_s390_cmma_reset(void)
1177 {
1178 }
1179 static inline int kvm_s390_get_memslot_count(KVMState *s)
1180 {
1181 return MAX_AVAIL_SLOTS;
1182 }
1183 static inline int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
1184 {
1185 return -ENOSYS;
1186 }
1187 static inline void kvm_s390_reset_vcpu(S390CPU *cpu)
1188 {
1189 }
1190 static inline int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit,
1191 uint64_t *hw_limit)
1192 {
1193 return 0;
1194 }
1195 static inline void kvm_s390_vcpu_interrupt_pre_save(S390CPU *cpu)
1196 {
1197 }
1198 static inline int kvm_s390_vcpu_interrupt_post_load(S390CPU *cpu)
1199 {
1200 return 0;
1201 }
1202 static inline int kvm_s390_get_ri(void)
1203 {
1204 return 0;
1205 }
1206 static inline void kvm_s390_crypto_reset(void)
1207 {
1208 }
1209 #endif
1210
1211 static inline int s390_set_memory_limit(uint64_t new_limit, uint64_t *hw_limit)
1212 {
1213 if (kvm_enabled()) {
1214 return kvm_s390_set_mem_limit(kvm_state, new_limit, hw_limit);
1215 }
1216 return 0;
1217 }
1218
1219 static inline void s390_cmma_reset(void)
1220 {
1221 if (kvm_enabled()) {
1222 kvm_s390_cmma_reset();
1223 }
1224 }
1225
1226 static inline int s390_cpu_restart(S390CPU *cpu)
1227 {
1228 if (kvm_enabled()) {
1229 return kvm_s390_cpu_restart(cpu);
1230 }
1231 return -ENOSYS;
1232 }
1233
1234 static inline int s390_get_memslot_count(KVMState *s)
1235 {
1236 if (kvm_enabled()) {
1237 return kvm_s390_get_memslot_count(s);
1238 } else {
1239 return MAX_AVAIL_SLOTS;
1240 }
1241 }
1242
1243 void s390_io_interrupt(uint16_t subchannel_id, uint16_t subchannel_nr,
1244 uint32_t io_int_parm, uint32_t io_int_word);
1245 void s390_crw_mchk(void);
1246
1247 static inline int s390_assign_subch_ioeventfd(EventNotifier *notifier,
1248 uint32_t sch_id, int vq,
1249 bool assign)
1250 {
1251 return kvm_s390_assign_subch_ioeventfd(notifier, sch_id, vq, assign);
1252 }
1253
1254 static inline void s390_crypto_reset(void)
1255 {
1256 if (kvm_enabled()) {
1257 kvm_s390_crypto_reset();
1258 }
1259 }
1260
1261 static inline bool s390_get_squash_mcss(void)
1262 {
1263 if (object_property_get_bool(OBJECT(qdev_get_machine()), "s390-squash-mcss",
1264 NULL)) {
1265 return true;
1266 }
1267
1268 return false;
1269 }
1270
1271 /* machine check interruption code */
1272
1273 /* subclasses */
1274 #define MCIC_SC_SD 0x8000000000000000ULL
1275 #define MCIC_SC_PD 0x4000000000000000ULL
1276 #define MCIC_SC_SR 0x2000000000000000ULL
1277 #define MCIC_SC_CD 0x0800000000000000ULL
1278 #define MCIC_SC_ED 0x0400000000000000ULL
1279 #define MCIC_SC_DG 0x0100000000000000ULL
1280 #define MCIC_SC_W 0x0080000000000000ULL
1281 #define MCIC_SC_CP 0x0040000000000000ULL
1282 #define MCIC_SC_SP 0x0020000000000000ULL
1283 #define MCIC_SC_CK 0x0010000000000000ULL
1284
1285 /* subclass modifiers */
1286 #define MCIC_SCM_B 0x0002000000000000ULL
1287 #define MCIC_SCM_DA 0x0000000020000000ULL
1288 #define MCIC_SCM_AP 0x0000000000080000ULL
1289
1290 /* storage errors */
1291 #define MCIC_SE_SE 0x0000800000000000ULL
1292 #define MCIC_SE_SC 0x0000400000000000ULL
1293 #define MCIC_SE_KE 0x0000200000000000ULL
1294 #define MCIC_SE_DS 0x0000100000000000ULL
1295 #define MCIC_SE_IE 0x0000000080000000ULL
1296
1297 /* validity bits */
1298 #define MCIC_VB_WP 0x0000080000000000ULL
1299 #define MCIC_VB_MS 0x0000040000000000ULL
1300 #define MCIC_VB_PM 0x0000020000000000ULL
1301 #define MCIC_VB_IA 0x0000010000000000ULL
1302 #define MCIC_VB_FA 0x0000008000000000ULL
1303 #define MCIC_VB_VR 0x0000004000000000ULL
1304 #define MCIC_VB_EC 0x0000002000000000ULL
1305 #define MCIC_VB_FP 0x0000001000000000ULL
1306 #define MCIC_VB_GR 0x0000000800000000ULL
1307 #define MCIC_VB_CR 0x0000000400000000ULL
1308 #define MCIC_VB_ST 0x0000000100000000ULL
1309 #define MCIC_VB_AR 0x0000000040000000ULL
1310 #define MCIC_VB_PR 0x0000000000200000ULL
1311 #define MCIC_VB_FC 0x0000000000100000ULL
1312 #define MCIC_VB_CT 0x0000000000020000ULL
1313 #define MCIC_VB_CC 0x0000000000010000ULL
1314
1315 #endif