]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/kvm.c
s390x/kvm: implement handling of new SIGP orders
[mirror_qemu.git] / target-s390x / kvm.c
1 /*
2 * QEMU S390x KVM implementation
3 *
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5 * Copyright IBM Corp. 2012
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
19 *
20 * You should have received a copy of the GNU (Lesser) General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include <sys/types.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27
28 #include <linux/kvm.h>
29 #include <asm/ptrace.h>
30
31 #include "qemu-common.h"
32 #include "qemu/timer.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/kvm.h"
35 #include "hw/hw.h"
36 #include "cpu.h"
37 #include "sysemu/device_tree.h"
38 #include "qapi/qmp/qjson.h"
39 #include "monitor/monitor.h"
40 #include "exec/gdbstub.h"
41 #include "exec/address-spaces.h"
42 #include "trace.h"
43 #include "qapi-event.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/s390x/s390-pci-bus.h"
46 #include "hw/s390x/ipl.h"
47
48 /* #define DEBUG_KVM */
49
50 #ifdef DEBUG_KVM
51 #define DPRINTF(fmt, ...) \
52 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
53 #else
54 #define DPRINTF(fmt, ...) \
55 do { } while (0)
56 #endif
57
58 #define IPA0_DIAG 0x8300
59 #define IPA0_SIGP 0xae00
60 #define IPA0_B2 0xb200
61 #define IPA0_B9 0xb900
62 #define IPA0_EB 0xeb00
63 #define IPA0_E3 0xe300
64
65 #define PRIV_B2_SCLP_CALL 0x20
66 #define PRIV_B2_CSCH 0x30
67 #define PRIV_B2_HSCH 0x31
68 #define PRIV_B2_MSCH 0x32
69 #define PRIV_B2_SSCH 0x33
70 #define PRIV_B2_STSCH 0x34
71 #define PRIV_B2_TSCH 0x35
72 #define PRIV_B2_TPI 0x36
73 #define PRIV_B2_SAL 0x37
74 #define PRIV_B2_RSCH 0x38
75 #define PRIV_B2_STCRW 0x39
76 #define PRIV_B2_STCPS 0x3a
77 #define PRIV_B2_RCHP 0x3b
78 #define PRIV_B2_SCHM 0x3c
79 #define PRIV_B2_CHSC 0x5f
80 #define PRIV_B2_SIGA 0x74
81 #define PRIV_B2_XSCH 0x76
82
83 #define PRIV_EB_SQBS 0x8a
84 #define PRIV_EB_PCISTB 0xd0
85 #define PRIV_EB_SIC 0xd1
86
87 #define PRIV_B9_EQBS 0x9c
88 #define PRIV_B9_CLP 0xa0
89 #define PRIV_B9_PCISTG 0xd0
90 #define PRIV_B9_PCILG 0xd2
91 #define PRIV_B9_RPCIT 0xd3
92
93 #define PRIV_E3_MPCIFC 0xd0
94 #define PRIV_E3_STPCIFC 0xd4
95
96 #define DIAG_IPL 0x308
97 #define DIAG_KVM_HYPERCALL 0x500
98 #define DIAG_KVM_BREAKPOINT 0x501
99
100 #define ICPT_INSTRUCTION 0x04
101 #define ICPT_PROGRAM 0x08
102 #define ICPT_EXT_INT 0x14
103 #define ICPT_WAITPSW 0x1c
104 #define ICPT_SOFT_INTERCEPT 0x24
105 #define ICPT_CPU_STOP 0x28
106 #define ICPT_IO 0x40
107
108 static CPUWatchpoint hw_watchpoint;
109 /*
110 * We don't use a list because this structure is also used to transmit the
111 * hardware breakpoints to the kernel.
112 */
113 static struct kvm_hw_breakpoint *hw_breakpoints;
114 static int nb_hw_breakpoints;
115
116 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
117 KVM_CAP_LAST_INFO
118 };
119
120 static int cap_sync_regs;
121 static int cap_async_pf;
122
123 static void *legacy_s390_alloc(size_t size, uint64_t *align);
124
125 static int kvm_s390_check_clear_cmma(KVMState *s)
126 {
127 struct kvm_device_attr attr = {
128 .group = KVM_S390_VM_MEM_CTRL,
129 .attr = KVM_S390_VM_MEM_CLR_CMMA,
130 };
131
132 return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr);
133 }
134
135 static int kvm_s390_check_enable_cmma(KVMState *s)
136 {
137 struct kvm_device_attr attr = {
138 .group = KVM_S390_VM_MEM_CTRL,
139 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
140 };
141
142 return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr);
143 }
144
145 void kvm_s390_clear_cmma_callback(void *opaque)
146 {
147 int rc;
148 KVMState *s = opaque;
149 struct kvm_device_attr attr = {
150 .group = KVM_S390_VM_MEM_CTRL,
151 .attr = KVM_S390_VM_MEM_CLR_CMMA,
152 };
153
154 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
155 trace_kvm_clear_cmma(rc);
156 }
157
158 static void kvm_s390_enable_cmma(KVMState *s)
159 {
160 int rc;
161 struct kvm_device_attr attr = {
162 .group = KVM_S390_VM_MEM_CTRL,
163 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
164 };
165
166 if (kvm_s390_check_enable_cmma(s) || kvm_s390_check_clear_cmma(s)) {
167 return;
168 }
169
170 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
171 if (!rc) {
172 qemu_register_reset(kvm_s390_clear_cmma_callback, s);
173 }
174 trace_kvm_enable_cmma(rc);
175 }
176
177 int kvm_arch_init(KVMState *s)
178 {
179 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
180 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
181
182 if (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES)) {
183 kvm_s390_enable_cmma(s);
184 }
185
186 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
187 || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
188 phys_mem_set_alloc(legacy_s390_alloc);
189 }
190 return 0;
191 }
192
193 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
194 {
195 return cpu->cpu_index;
196 }
197
198 int kvm_arch_init_vcpu(CPUState *cs)
199 {
200 S390CPU *cpu = S390_CPU(cs);
201 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
202 return 0;
203 }
204
205 void kvm_s390_reset_vcpu(S390CPU *cpu)
206 {
207 CPUState *cs = CPU(cpu);
208
209 /* The initial reset call is needed here to reset in-kernel
210 * vcpu data that we can't access directly from QEMU
211 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
212 * Before this ioctl cpu_synchronize_state() is called in common kvm
213 * code (kvm-all) */
214 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
215 error_report("Initial CPU reset failed on CPU %i\n", cs->cpu_index);
216 }
217 }
218
219 static int can_sync_regs(CPUState *cs, int regs)
220 {
221 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
222 }
223
224 int kvm_arch_put_registers(CPUState *cs, int level)
225 {
226 S390CPU *cpu = S390_CPU(cs);
227 CPUS390XState *env = &cpu->env;
228 struct kvm_sregs sregs;
229 struct kvm_regs regs;
230 struct kvm_fpu fpu = {};
231 int r;
232 int i;
233
234 /* always save the PSW and the GPRS*/
235 cs->kvm_run->psw_addr = env->psw.addr;
236 cs->kvm_run->psw_mask = env->psw.mask;
237
238 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
239 for (i = 0; i < 16; i++) {
240 cs->kvm_run->s.regs.gprs[i] = env->regs[i];
241 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
242 }
243 } else {
244 for (i = 0; i < 16; i++) {
245 regs.gprs[i] = env->regs[i];
246 }
247 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
248 if (r < 0) {
249 return r;
250 }
251 }
252
253 /* Floating point */
254 for (i = 0; i < 16; i++) {
255 fpu.fprs[i] = env->fregs[i].ll;
256 }
257 fpu.fpc = env->fpc;
258
259 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
260 if (r < 0) {
261 return r;
262 }
263
264 /* Do we need to save more than that? */
265 if (level == KVM_PUT_RUNTIME_STATE) {
266 return 0;
267 }
268
269 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
270 cs->kvm_run->s.regs.cputm = env->cputm;
271 cs->kvm_run->s.regs.ckc = env->ckc;
272 cs->kvm_run->s.regs.todpr = env->todpr;
273 cs->kvm_run->s.regs.gbea = env->gbea;
274 cs->kvm_run->s.regs.pp = env->pp;
275 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
276 } else {
277 /*
278 * These ONE_REGS are not protected by a capability. As they are only
279 * necessary for migration we just trace a possible error, but don't
280 * return with an error return code.
281 */
282 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
283 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
284 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
285 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
286 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
287 }
288
289 /* pfault parameters */
290 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
291 cs->kvm_run->s.regs.pft = env->pfault_token;
292 cs->kvm_run->s.regs.pfs = env->pfault_select;
293 cs->kvm_run->s.regs.pfc = env->pfault_compare;
294 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
295 } else if (cap_async_pf) {
296 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
297 if (r < 0) {
298 return r;
299 }
300 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
301 if (r < 0) {
302 return r;
303 }
304 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
305 if (r < 0) {
306 return r;
307 }
308 }
309
310 /* access registers and control registers*/
311 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
312 for (i = 0; i < 16; i++) {
313 cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
314 cs->kvm_run->s.regs.crs[i] = env->cregs[i];
315 }
316 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
317 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
318 } else {
319 for (i = 0; i < 16; i++) {
320 sregs.acrs[i] = env->aregs[i];
321 sregs.crs[i] = env->cregs[i];
322 }
323 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
324 if (r < 0) {
325 return r;
326 }
327 }
328
329 /* Finally the prefix */
330 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
331 cs->kvm_run->s.regs.prefix = env->psa;
332 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
333 } else {
334 /* prefix is only supported via sync regs */
335 }
336 return 0;
337 }
338
339 int kvm_arch_get_registers(CPUState *cs)
340 {
341 S390CPU *cpu = S390_CPU(cs);
342 CPUS390XState *env = &cpu->env;
343 struct kvm_sregs sregs;
344 struct kvm_regs regs;
345 struct kvm_fpu fpu;
346 int i, r;
347
348 /* get the PSW */
349 env->psw.addr = cs->kvm_run->psw_addr;
350 env->psw.mask = cs->kvm_run->psw_mask;
351
352 /* the GPRS */
353 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
354 for (i = 0; i < 16; i++) {
355 env->regs[i] = cs->kvm_run->s.regs.gprs[i];
356 }
357 } else {
358 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
359 if (r < 0) {
360 return r;
361 }
362 for (i = 0; i < 16; i++) {
363 env->regs[i] = regs.gprs[i];
364 }
365 }
366
367 /* The ACRS and CRS */
368 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
369 for (i = 0; i < 16; i++) {
370 env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
371 env->cregs[i] = cs->kvm_run->s.regs.crs[i];
372 }
373 } else {
374 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
375 if (r < 0) {
376 return r;
377 }
378 for (i = 0; i < 16; i++) {
379 env->aregs[i] = sregs.acrs[i];
380 env->cregs[i] = sregs.crs[i];
381 }
382 }
383
384 /* Floating point */
385 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
386 if (r < 0) {
387 return r;
388 }
389 for (i = 0; i < 16; i++) {
390 env->fregs[i].ll = fpu.fprs[i];
391 }
392 env->fpc = fpu.fpc;
393
394 /* The prefix */
395 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
396 env->psa = cs->kvm_run->s.regs.prefix;
397 }
398
399 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
400 env->cputm = cs->kvm_run->s.regs.cputm;
401 env->ckc = cs->kvm_run->s.regs.ckc;
402 env->todpr = cs->kvm_run->s.regs.todpr;
403 env->gbea = cs->kvm_run->s.regs.gbea;
404 env->pp = cs->kvm_run->s.regs.pp;
405 } else {
406 /*
407 * These ONE_REGS are not protected by a capability. As they are only
408 * necessary for migration we just trace a possible error, but don't
409 * return with an error return code.
410 */
411 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
412 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
413 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
414 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
415 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
416 }
417
418 /* pfault parameters */
419 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
420 env->pfault_token = cs->kvm_run->s.regs.pft;
421 env->pfault_select = cs->kvm_run->s.regs.pfs;
422 env->pfault_compare = cs->kvm_run->s.regs.pfc;
423 } else if (cap_async_pf) {
424 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
425 if (r < 0) {
426 return r;
427 }
428 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
429 if (r < 0) {
430 return r;
431 }
432 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
433 if (r < 0) {
434 return r;
435 }
436 }
437
438 return 0;
439 }
440
441 /*
442 * Legacy layout for s390:
443 * Older S390 KVM requires the topmost vma of the RAM to be
444 * smaller than an system defined value, which is at least 256GB.
445 * Larger systems have larger values. We put the guest between
446 * the end of data segment (system break) and this value. We
447 * use 32GB as a base to have enough room for the system break
448 * to grow. We also have to use MAP parameters that avoid
449 * read-only mapping of guest pages.
450 */
451 static void *legacy_s390_alloc(size_t size, uint64_t *align)
452 {
453 void *mem;
454
455 mem = mmap((void *) 0x800000000ULL, size,
456 PROT_EXEC|PROT_READ|PROT_WRITE,
457 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
458 return mem == MAP_FAILED ? NULL : mem;
459 }
460
461 /* DIAG 501 is used for sw breakpoints */
462 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
463
464 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
465 {
466
467 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
468 sizeof(diag_501), 0) ||
469 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501,
470 sizeof(diag_501), 1)) {
471 return -EINVAL;
472 }
473 return 0;
474 }
475
476 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
477 {
478 uint8_t t[sizeof(diag_501)];
479
480 if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) {
481 return -EINVAL;
482 } else if (memcmp(t, diag_501, sizeof(diag_501))) {
483 return -EINVAL;
484 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
485 sizeof(diag_501), 1)) {
486 return -EINVAL;
487 }
488
489 return 0;
490 }
491
492 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
493 int len, int type)
494 {
495 int n;
496
497 for (n = 0; n < nb_hw_breakpoints; n++) {
498 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
499 (hw_breakpoints[n].len == len || len == -1)) {
500 return &hw_breakpoints[n];
501 }
502 }
503
504 return NULL;
505 }
506
507 static int insert_hw_breakpoint(target_ulong addr, int len, int type)
508 {
509 int size;
510
511 if (find_hw_breakpoint(addr, len, type)) {
512 return -EEXIST;
513 }
514
515 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
516
517 if (!hw_breakpoints) {
518 nb_hw_breakpoints = 0;
519 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
520 } else {
521 hw_breakpoints =
522 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
523 }
524
525 if (!hw_breakpoints) {
526 nb_hw_breakpoints = 0;
527 return -ENOMEM;
528 }
529
530 hw_breakpoints[nb_hw_breakpoints].addr = addr;
531 hw_breakpoints[nb_hw_breakpoints].len = len;
532 hw_breakpoints[nb_hw_breakpoints].type = type;
533
534 nb_hw_breakpoints++;
535
536 return 0;
537 }
538
539 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
540 target_ulong len, int type)
541 {
542 switch (type) {
543 case GDB_BREAKPOINT_HW:
544 type = KVM_HW_BP;
545 break;
546 case GDB_WATCHPOINT_WRITE:
547 if (len < 1) {
548 return -EINVAL;
549 }
550 type = KVM_HW_WP_WRITE;
551 break;
552 default:
553 return -ENOSYS;
554 }
555 return insert_hw_breakpoint(addr, len, type);
556 }
557
558 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
559 target_ulong len, int type)
560 {
561 int size;
562 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
563
564 if (bp == NULL) {
565 return -ENOENT;
566 }
567
568 nb_hw_breakpoints--;
569 if (nb_hw_breakpoints > 0) {
570 /*
571 * In order to trim the array, move the last element to the position to
572 * be removed - if necessary.
573 */
574 if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
575 *bp = hw_breakpoints[nb_hw_breakpoints];
576 }
577 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
578 hw_breakpoints =
579 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
580 } else {
581 g_free(hw_breakpoints);
582 hw_breakpoints = NULL;
583 }
584
585 return 0;
586 }
587
588 void kvm_arch_remove_all_hw_breakpoints(void)
589 {
590 nb_hw_breakpoints = 0;
591 g_free(hw_breakpoints);
592 hw_breakpoints = NULL;
593 }
594
595 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
596 {
597 int i;
598
599 if (nb_hw_breakpoints > 0) {
600 dbg->arch.nr_hw_bp = nb_hw_breakpoints;
601 dbg->arch.hw_bp = hw_breakpoints;
602
603 for (i = 0; i < nb_hw_breakpoints; ++i) {
604 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
605 hw_breakpoints[i].addr);
606 }
607 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
608 } else {
609 dbg->arch.nr_hw_bp = 0;
610 dbg->arch.hw_bp = NULL;
611 }
612 }
613
614 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
615 {
616 }
617
618 void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
619 {
620 }
621
622 int kvm_arch_process_async_events(CPUState *cs)
623 {
624 return cs->halted;
625 }
626
627 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
628 struct kvm_s390_interrupt *interrupt)
629 {
630 int r = 0;
631
632 interrupt->type = irq->type;
633 switch (irq->type) {
634 case KVM_S390_INT_VIRTIO:
635 interrupt->parm = irq->u.ext.ext_params;
636 /* fall through */
637 case KVM_S390_INT_PFAULT_INIT:
638 case KVM_S390_INT_PFAULT_DONE:
639 interrupt->parm64 = irq->u.ext.ext_params2;
640 break;
641 case KVM_S390_PROGRAM_INT:
642 interrupt->parm = irq->u.pgm.code;
643 break;
644 case KVM_S390_SIGP_SET_PREFIX:
645 interrupt->parm = irq->u.prefix.address;
646 break;
647 case KVM_S390_INT_SERVICE:
648 interrupt->parm = irq->u.ext.ext_params;
649 break;
650 case KVM_S390_MCHK:
651 interrupt->parm = irq->u.mchk.cr14;
652 interrupt->parm64 = irq->u.mchk.mcic;
653 break;
654 case KVM_S390_INT_EXTERNAL_CALL:
655 interrupt->parm = irq->u.extcall.code;
656 break;
657 case KVM_S390_INT_EMERGENCY:
658 interrupt->parm = irq->u.emerg.code;
659 break;
660 case KVM_S390_SIGP_STOP:
661 case KVM_S390_RESTART:
662 break; /* These types have no parameters */
663 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
664 interrupt->parm = irq->u.io.subchannel_id << 16;
665 interrupt->parm |= irq->u.io.subchannel_nr;
666 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
667 interrupt->parm64 |= irq->u.io.io_int_word;
668 break;
669 default:
670 r = -EINVAL;
671 break;
672 }
673 return r;
674 }
675
676 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
677 {
678 struct kvm_s390_interrupt kvmint = {};
679 CPUState *cs = CPU(cpu);
680 int r;
681
682 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
683 if (r < 0) {
684 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
685 exit(1);
686 }
687
688 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
689 if (r < 0) {
690 fprintf(stderr, "KVM failed to inject interrupt\n");
691 exit(1);
692 }
693 }
694
695 static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
696 {
697 struct kvm_s390_interrupt kvmint = {};
698 int r;
699
700 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
701 if (r < 0) {
702 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
703 exit(1);
704 }
705
706 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
707 if (r < 0) {
708 fprintf(stderr, "KVM failed to inject interrupt\n");
709 exit(1);
710 }
711 }
712
713 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
714 {
715 static bool use_flic = true;
716 int r;
717
718 if (use_flic) {
719 r = kvm_s390_inject_flic(irq);
720 if (r == -ENOSYS) {
721 use_flic = false;
722 }
723 if (!r) {
724 return;
725 }
726 }
727 __kvm_s390_floating_interrupt(irq);
728 }
729
730 void kvm_s390_virtio_irq(int config_change, uint64_t token)
731 {
732 struct kvm_s390_irq irq = {
733 .type = KVM_S390_INT_VIRTIO,
734 .u.ext.ext_params = config_change,
735 .u.ext.ext_params2 = token,
736 };
737
738 kvm_s390_floating_interrupt(&irq);
739 }
740
741 void kvm_s390_service_interrupt(uint32_t parm)
742 {
743 struct kvm_s390_irq irq = {
744 .type = KVM_S390_INT_SERVICE,
745 .u.ext.ext_params = parm,
746 };
747
748 kvm_s390_floating_interrupt(&irq);
749 }
750
751 static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
752 {
753 struct kvm_s390_irq irq = {
754 .type = KVM_S390_PROGRAM_INT,
755 .u.pgm.code = code,
756 };
757
758 kvm_s390_vcpu_interrupt(cpu, &irq);
759 }
760
761 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
762 {
763 struct kvm_s390_irq irq = {
764 .type = KVM_S390_PROGRAM_INT,
765 .u.pgm.code = code,
766 .u.pgm.trans_exc_code = te_code,
767 .u.pgm.exc_access_id = te_code & 3,
768 };
769
770 kvm_s390_vcpu_interrupt(cpu, &irq);
771 }
772
773 static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
774 uint16_t ipbh0)
775 {
776 CPUS390XState *env = &cpu->env;
777 uint64_t sccb;
778 uint32_t code;
779 int r = 0;
780
781 cpu_synchronize_state(CPU(cpu));
782 sccb = env->regs[ipbh0 & 0xf];
783 code = env->regs[(ipbh0 & 0xf0) >> 4];
784
785 r = sclp_service_call(env, sccb, code);
786 if (r < 0) {
787 enter_pgmcheck(cpu, -r);
788 } else {
789 setcc(cpu, r);
790 }
791
792 return 0;
793 }
794
795 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
796 {
797 CPUS390XState *env = &cpu->env;
798 int rc = 0;
799 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
800
801 cpu_synchronize_state(CPU(cpu));
802
803 switch (ipa1) {
804 case PRIV_B2_XSCH:
805 ioinst_handle_xsch(cpu, env->regs[1]);
806 break;
807 case PRIV_B2_CSCH:
808 ioinst_handle_csch(cpu, env->regs[1]);
809 break;
810 case PRIV_B2_HSCH:
811 ioinst_handle_hsch(cpu, env->regs[1]);
812 break;
813 case PRIV_B2_MSCH:
814 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
815 break;
816 case PRIV_B2_SSCH:
817 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
818 break;
819 case PRIV_B2_STCRW:
820 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
821 break;
822 case PRIV_B2_STSCH:
823 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
824 break;
825 case PRIV_B2_TSCH:
826 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
827 fprintf(stderr, "Spurious tsch intercept\n");
828 break;
829 case PRIV_B2_CHSC:
830 ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
831 break;
832 case PRIV_B2_TPI:
833 /* This should have been handled by kvm already. */
834 fprintf(stderr, "Spurious tpi intercept\n");
835 break;
836 case PRIV_B2_SCHM:
837 ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
838 run->s390_sieic.ipb);
839 break;
840 case PRIV_B2_RSCH:
841 ioinst_handle_rsch(cpu, env->regs[1]);
842 break;
843 case PRIV_B2_RCHP:
844 ioinst_handle_rchp(cpu, env->regs[1]);
845 break;
846 case PRIV_B2_STCPS:
847 /* We do not provide this instruction, it is suppressed. */
848 break;
849 case PRIV_B2_SAL:
850 ioinst_handle_sal(cpu, env->regs[1]);
851 break;
852 case PRIV_B2_SIGA:
853 /* Not provided, set CC = 3 for subchannel not operational */
854 setcc(cpu, 3);
855 break;
856 case PRIV_B2_SCLP_CALL:
857 rc = kvm_sclp_service_call(cpu, run, ipbh0);
858 break;
859 default:
860 rc = -1;
861 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
862 break;
863 }
864
865 return rc;
866 }
867
868 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run)
869 {
870 CPUS390XState *env = &cpu->env;
871 uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
872 uint32_t base2 = run->s390_sieic.ipb >> 28;
873 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
874 ((run->s390_sieic.ipb & 0xff00) << 4);
875
876 if (disp2 & 0x80000) {
877 disp2 += 0xfff00000;
878 }
879
880 return (base2 ? env->regs[base2] : 0) +
881 (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
882 }
883
884 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run)
885 {
886 CPUS390XState *env = &cpu->env;
887 uint32_t base2 = run->s390_sieic.ipb >> 28;
888 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
889 ((run->s390_sieic.ipb & 0xff00) << 4);
890
891 if (disp2 & 0x80000) {
892 disp2 += 0xfff00000;
893 }
894
895 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
896 }
897
898 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
899 {
900 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
901
902 return clp_service_call(cpu, r2);
903 }
904
905 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
906 {
907 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
908 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
909
910 return pcilg_service_call(cpu, r1, r2);
911 }
912
913 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
914 {
915 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
916 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
917
918 return pcistg_service_call(cpu, r1, r2);
919 }
920
921 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
922 {
923 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
924 uint64_t fiba;
925
926 cpu_synchronize_state(CPU(cpu));
927 fiba = get_base_disp_rxy(cpu, run);
928
929 return stpcifc_service_call(cpu, r1, fiba);
930 }
931
932 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
933 {
934 /* NOOP */
935 return 0;
936 }
937
938 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
939 {
940 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
941 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
942
943 return rpcit_service_call(cpu, r1, r2);
944 }
945
946 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
947 {
948 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
949 uint8_t r3 = run->s390_sieic.ipa & 0x000f;
950 uint64_t gaddr;
951
952 cpu_synchronize_state(CPU(cpu));
953 gaddr = get_base_disp_rsy(cpu, run);
954
955 return pcistb_service_call(cpu, r1, r3, gaddr);
956 }
957
958 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
959 {
960 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
961 uint64_t fiba;
962
963 cpu_synchronize_state(CPU(cpu));
964 fiba = get_base_disp_rxy(cpu, run);
965
966 return mpcifc_service_call(cpu, r1, fiba);
967 }
968
969 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
970 {
971 int r = 0;
972
973 switch (ipa1) {
974 case PRIV_B9_CLP:
975 r = kvm_clp_service_call(cpu, run);
976 break;
977 case PRIV_B9_PCISTG:
978 r = kvm_pcistg_service_call(cpu, run);
979 break;
980 case PRIV_B9_PCILG:
981 r = kvm_pcilg_service_call(cpu, run);
982 break;
983 case PRIV_B9_RPCIT:
984 r = kvm_rpcit_service_call(cpu, run);
985 break;
986 case PRIV_B9_EQBS:
987 /* just inject exception */
988 r = -1;
989 break;
990 default:
991 r = -1;
992 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
993 break;
994 }
995
996 return r;
997 }
998
999 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1000 {
1001 int r = 0;
1002
1003 switch (ipbl) {
1004 case PRIV_EB_PCISTB:
1005 r = kvm_pcistb_service_call(cpu, run);
1006 break;
1007 case PRIV_EB_SIC:
1008 r = kvm_sic_service_call(cpu, run);
1009 break;
1010 case PRIV_EB_SQBS:
1011 /* just inject exception */
1012 r = -1;
1013 break;
1014 default:
1015 r = -1;
1016 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
1017 break;
1018 }
1019
1020 return r;
1021 }
1022
1023 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1024 {
1025 int r = 0;
1026
1027 switch (ipbl) {
1028 case PRIV_E3_MPCIFC:
1029 r = kvm_mpcifc_service_call(cpu, run);
1030 break;
1031 case PRIV_E3_STPCIFC:
1032 r = kvm_stpcifc_service_call(cpu, run);
1033 break;
1034 default:
1035 r = -1;
1036 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
1037 break;
1038 }
1039
1040 return r;
1041 }
1042
1043 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
1044 {
1045 CPUS390XState *env = &cpu->env;
1046 int ret;
1047
1048 cpu_synchronize_state(CPU(cpu));
1049 ret = s390_virtio_hypercall(env);
1050 if (ret == -EINVAL) {
1051 enter_pgmcheck(cpu, PGM_SPECIFICATION);
1052 return 0;
1053 }
1054
1055 return ret;
1056 }
1057
1058 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1059 {
1060 uint64_t r1, r3;
1061
1062 cpu_synchronize_state(CPU(cpu));
1063 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1064 r3 = run->s390_sieic.ipa & 0x000f;
1065 handle_diag_308(&cpu->env, r1, r3);
1066 }
1067
1068 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1069 {
1070 CPUS390XState *env = &cpu->env;
1071 unsigned long pc;
1072
1073 cpu_synchronize_state(CPU(cpu));
1074
1075 pc = env->psw.addr - 4;
1076 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1077 env->psw.addr = pc;
1078 return EXCP_DEBUG;
1079 }
1080
1081 return -ENOENT;
1082 }
1083
1084 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1085
1086 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
1087 {
1088 int r = 0;
1089 uint16_t func_code;
1090
1091 /*
1092 * For any diagnose call we support, bits 48-63 of the resulting
1093 * address specify the function code; the remainder is ignored.
1094 */
1095 func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK;
1096 switch (func_code) {
1097 case DIAG_IPL:
1098 kvm_handle_diag_308(cpu, run);
1099 break;
1100 case DIAG_KVM_HYPERCALL:
1101 r = handle_hypercall(cpu, run);
1102 break;
1103 case DIAG_KVM_BREAKPOINT:
1104 r = handle_sw_breakpoint(cpu, run);
1105 break;
1106 default:
1107 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
1108 enter_pgmcheck(cpu, PGM_SPECIFICATION);
1109 break;
1110 }
1111
1112 return r;
1113 }
1114
1115 typedef struct SigpInfo {
1116 S390CPU *cpu;
1117 uint64_t param;
1118 int cc;
1119 uint64_t *status_reg;
1120 } SigpInfo;
1121
1122 static void set_sigp_status(SigpInfo *si, uint64_t status)
1123 {
1124 *si->status_reg &= 0xffffffff00000000ULL;
1125 *si->status_reg |= status;
1126 si->cc = SIGP_CC_STATUS_STORED;
1127 }
1128
1129 static void sigp_start(void *arg)
1130 {
1131 SigpInfo *si = arg;
1132
1133 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1134 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1135 }
1136
1137 static void sigp_stop(void *arg)
1138 {
1139 SigpInfo *si = arg;
1140 struct kvm_s390_irq irq = {
1141 .type = KVM_S390_SIGP_STOP,
1142 };
1143
1144 if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) {
1145 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1146 return;
1147 }
1148
1149 /* disabled wait - sleeping in user space */
1150 if (CPU(si->cpu)->halted) {
1151 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1152 } else {
1153 /* execute the stop function */
1154 si->cpu->env.sigp_order = SIGP_STOP;
1155 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1156 }
1157 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1158 }
1159
1160 #define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
1161 #define SAVE_AREA_SIZE 512
1162 static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
1163 {
1164 static const uint8_t ar_id = 1;
1165 uint64_t ckc = cpu->env.ckc >> 8;
1166 void *mem;
1167 hwaddr len = SAVE_AREA_SIZE;
1168
1169 mem = cpu_physical_memory_map(addr, &len, 1);
1170 if (!mem) {
1171 return -EFAULT;
1172 }
1173 if (len != SAVE_AREA_SIZE) {
1174 cpu_physical_memory_unmap(mem, len, 1, 0);
1175 return -EFAULT;
1176 }
1177
1178 if (store_arch) {
1179 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
1180 }
1181 memcpy(mem, &cpu->env.fregs, 128);
1182 memcpy(mem + 128, &cpu->env.regs, 128);
1183 memcpy(mem + 256, &cpu->env.psw, 16);
1184 memcpy(mem + 280, &cpu->env.psa, 4);
1185 memcpy(mem + 284, &cpu->env.fpc, 4);
1186 memcpy(mem + 292, &cpu->env.todpr, 4);
1187 memcpy(mem + 296, &cpu->env.cputm, 8);
1188 memcpy(mem + 304, &ckc, 8);
1189 memcpy(mem + 320, &cpu->env.aregs, 64);
1190 memcpy(mem + 384, &cpu->env.cregs, 128);
1191
1192 cpu_physical_memory_unmap(mem, len, 1, len);
1193
1194 return 0;
1195 }
1196
1197 static void sigp_stop_and_store_status(void *arg)
1198 {
1199 SigpInfo *si = arg;
1200 struct kvm_s390_irq irq = {
1201 .type = KVM_S390_SIGP_STOP,
1202 };
1203
1204 /* disabled wait - sleeping in user space */
1205 if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING &&
1206 CPU(si->cpu)->halted) {
1207 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1208 }
1209
1210 switch (s390_cpu_get_state(si->cpu)) {
1211 case CPU_STATE_OPERATING:
1212 si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
1213 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1214 /* store will be performed when handling the stop intercept */
1215 break;
1216 case CPU_STATE_STOPPED:
1217 /* already stopped, just store the status */
1218 cpu_synchronize_state(CPU(si->cpu));
1219 kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
1220 break;
1221 }
1222 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1223 }
1224
1225 static void sigp_store_status_at_address(void *arg)
1226 {
1227 SigpInfo *si = arg;
1228 uint32_t address = si->param & 0x7ffffe00u;
1229
1230 /* cpu has to be stopped */
1231 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1232 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1233 return;
1234 }
1235
1236 cpu_synchronize_state(CPU(si->cpu));
1237
1238 if (kvm_s390_store_status(si->cpu, address, false)) {
1239 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1240 return;
1241 }
1242 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1243 }
1244
1245 static void sigp_restart(void *arg)
1246 {
1247 SigpInfo *si = arg;
1248 struct kvm_s390_irq irq = {
1249 .type = KVM_S390_RESTART,
1250 };
1251
1252 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1253 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1254 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1255 }
1256
1257 int kvm_s390_cpu_restart(S390CPU *cpu)
1258 {
1259 SigpInfo si = {
1260 .cpu = cpu,
1261 };
1262
1263 run_on_cpu(CPU(cpu), sigp_restart, &si);
1264 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
1265 return 0;
1266 }
1267
1268 static void sigp_initial_cpu_reset(void *arg)
1269 {
1270 SigpInfo *si = arg;
1271 CPUState *cs = CPU(si->cpu);
1272 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
1273
1274 cpu_synchronize_state(cs);
1275 scc->initial_cpu_reset(cs);
1276 cpu_synchronize_post_reset(cs);
1277 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1278 }
1279
1280 static void sigp_cpu_reset(void *arg)
1281 {
1282 SigpInfo *si = arg;
1283 CPUState *cs = CPU(si->cpu);
1284 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
1285
1286 cpu_synchronize_state(cs);
1287 scc->cpu_reset(cs);
1288 cpu_synchronize_post_reset(cs);
1289 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1290 }
1291
1292 static void sigp_set_prefix(void *arg)
1293 {
1294 SigpInfo *si = arg;
1295 uint32_t addr = si->param & 0x7fffe000u;
1296
1297 cpu_synchronize_state(CPU(si->cpu));
1298
1299 if (!address_space_access_valid(&address_space_memory, addr,
1300 sizeof(struct LowCore), false)) {
1301 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1302 return;
1303 }
1304
1305 /* cpu has to be stopped */
1306 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1307 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1308 return;
1309 }
1310
1311 si->cpu->env.psa = addr;
1312 cpu_synchronize_post_init(CPU(si->cpu));
1313 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1314 }
1315
1316 static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
1317 uint64_t param, uint64_t *status_reg)
1318 {
1319 SigpInfo si = {
1320 .cpu = dst_cpu,
1321 .param = param,
1322 .status_reg = status_reg,
1323 };
1324
1325 /* cpu available? */
1326 if (dst_cpu == NULL) {
1327 return SIGP_CC_NOT_OPERATIONAL;
1328 }
1329
1330 /* only resets can break pending orders */
1331 if (dst_cpu->env.sigp_order != 0 &&
1332 order != SIGP_CPU_RESET &&
1333 order != SIGP_INITIAL_CPU_RESET) {
1334 return SIGP_CC_BUSY;
1335 }
1336
1337 switch (order) {
1338 case SIGP_START:
1339 run_on_cpu(CPU(dst_cpu), sigp_start, &si);
1340 break;
1341 case SIGP_STOP:
1342 run_on_cpu(CPU(dst_cpu), sigp_stop, &si);
1343 break;
1344 case SIGP_RESTART:
1345 run_on_cpu(CPU(dst_cpu), sigp_restart, &si);
1346 break;
1347 case SIGP_STOP_STORE_STATUS:
1348 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si);
1349 break;
1350 case SIGP_STORE_STATUS_ADDR:
1351 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si);
1352 break;
1353 case SIGP_SET_PREFIX:
1354 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si);
1355 break;
1356 case SIGP_INITIAL_CPU_RESET:
1357 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si);
1358 break;
1359 case SIGP_CPU_RESET:
1360 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si);
1361 break;
1362 default:
1363 DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
1364 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
1365 }
1366
1367 return si.cc;
1368 }
1369
1370 static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
1371 uint64_t *status_reg)
1372 {
1373 CPUState *cur_cs;
1374 S390CPU *cur_cpu;
1375
1376 /* due to the BQL, we are the only active cpu */
1377 CPU_FOREACH(cur_cs) {
1378 cur_cpu = S390_CPU(cur_cs);
1379 if (cur_cpu->env.sigp_order != 0) {
1380 return SIGP_CC_BUSY;
1381 }
1382 cpu_synchronize_state(cur_cs);
1383 /* all but the current one have to be stopped */
1384 if (cur_cpu != cpu &&
1385 s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
1386 *status_reg &= 0xffffffff00000000ULL;
1387 *status_reg |= SIGP_STAT_INCORRECT_STATE;
1388 return SIGP_CC_STATUS_STORED;
1389 }
1390 }
1391
1392 switch (param & 0xff) {
1393 case SIGP_MODE_ESA_S390:
1394 /* not supported */
1395 return SIGP_CC_NOT_OPERATIONAL;
1396 case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW:
1397 case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW:
1398 CPU_FOREACH(cur_cs) {
1399 cur_cpu = S390_CPU(cur_cs);
1400 cur_cpu->env.pfault_token = -1UL;
1401 }
1402 break;
1403 default:
1404 *status_reg &= 0xffffffff00000000ULL;
1405 *status_reg |= SIGP_STAT_INVALID_PARAMETER;
1406 return SIGP_CC_STATUS_STORED;
1407 }
1408
1409 return SIGP_CC_ORDER_CODE_ACCEPTED;
1410 }
1411
1412 #define SIGP_ORDER_MASK 0x000000ff
1413
1414 static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1415 {
1416 CPUS390XState *env = &cpu->env;
1417 const uint8_t r1 = ipa1 >> 4;
1418 const uint8_t r3 = ipa1 & 0x0f;
1419 int ret;
1420 uint8_t order;
1421 uint64_t *status_reg;
1422 uint64_t param;
1423 S390CPU *dst_cpu = NULL;
1424
1425 cpu_synchronize_state(CPU(cpu));
1426
1427 /* get order code */
1428 order = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK;
1429 status_reg = &env->regs[r1];
1430 param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
1431
1432 switch (order) {
1433 case SIGP_SET_ARCH:
1434 ret = sigp_set_architecture(cpu, param, status_reg);
1435 break;
1436 default:
1437 /* all other sigp orders target a single vcpu */
1438 dst_cpu = s390_cpu_addr2state(env->regs[r3]);
1439 ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
1440 }
1441
1442 trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
1443 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
1444
1445 if (ret >= 0) {
1446 setcc(cpu, ret);
1447 return 0;
1448 }
1449
1450 return ret;
1451 }
1452
1453 static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
1454 {
1455 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1456 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
1457 int r = -1;
1458
1459 DPRINTF("handle_instruction 0x%x 0x%x\n",
1460 run->s390_sieic.ipa, run->s390_sieic.ipb);
1461 switch (ipa0) {
1462 case IPA0_B2:
1463 r = handle_b2(cpu, run, ipa1);
1464 break;
1465 case IPA0_B9:
1466 r = handle_b9(cpu, run, ipa1);
1467 break;
1468 case IPA0_EB:
1469 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
1470 break;
1471 case IPA0_E3:
1472 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1473 break;
1474 case IPA0_DIAG:
1475 r = handle_diag(cpu, run, run->s390_sieic.ipb);
1476 break;
1477 case IPA0_SIGP:
1478 r = handle_sigp(cpu, run, ipa1);
1479 break;
1480 }
1481
1482 if (r < 0) {
1483 r = 0;
1484 enter_pgmcheck(cpu, 0x0001);
1485 }
1486
1487 return r;
1488 }
1489
1490 static bool is_special_wait_psw(CPUState *cs)
1491 {
1492 /* signal quiesce */
1493 return cs->kvm_run->psw_addr == 0xfffUL;
1494 }
1495
1496 static void guest_panicked(void)
1497 {
1498 qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE,
1499 &error_abort);
1500 vm_stop(RUN_STATE_GUEST_PANICKED);
1501 }
1502
1503 static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
1504 {
1505 CPUState *cs = CPU(cpu);
1506
1507 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1508 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
1509 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
1510 s390_cpu_halt(cpu);
1511 guest_panicked();
1512 }
1513
1514 static int handle_intercept(S390CPU *cpu)
1515 {
1516 CPUState *cs = CPU(cpu);
1517 struct kvm_run *run = cs->kvm_run;
1518 int icpt_code = run->s390_sieic.icptcode;
1519 int r = 0;
1520
1521 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
1522 (long)cs->kvm_run->psw_addr);
1523 switch (icpt_code) {
1524 case ICPT_INSTRUCTION:
1525 r = handle_instruction(cpu, run);
1526 break;
1527 case ICPT_PROGRAM:
1528 unmanageable_intercept(cpu, "program interrupt",
1529 offsetof(LowCore, program_new_psw));
1530 r = EXCP_HALTED;
1531 break;
1532 case ICPT_EXT_INT:
1533 unmanageable_intercept(cpu, "external interrupt",
1534 offsetof(LowCore, external_new_psw));
1535 r = EXCP_HALTED;
1536 break;
1537 case ICPT_WAITPSW:
1538 /* disabled wait, since enabled wait is handled in kernel */
1539 cpu_synchronize_state(cs);
1540 if (s390_cpu_halt(cpu) == 0) {
1541 if (is_special_wait_psw(cs)) {
1542 qemu_system_shutdown_request();
1543 } else {
1544 guest_panicked();
1545 }
1546 }
1547 r = EXCP_HALTED;
1548 break;
1549 case ICPT_CPU_STOP:
1550 if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
1551 qemu_system_shutdown_request();
1552 }
1553 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
1554 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
1555 true);
1556 }
1557 cpu->env.sigp_order = 0;
1558 r = EXCP_HALTED;
1559 break;
1560 case ICPT_SOFT_INTERCEPT:
1561 fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1562 exit(1);
1563 break;
1564 case ICPT_IO:
1565 fprintf(stderr, "KVM unimplemented icpt IO\n");
1566 exit(1);
1567 break;
1568 default:
1569 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1570 exit(1);
1571 break;
1572 }
1573
1574 return r;
1575 }
1576
1577 static int handle_tsch(S390CPU *cpu)
1578 {
1579 CPUState *cs = CPU(cpu);
1580 struct kvm_run *run = cs->kvm_run;
1581 int ret;
1582
1583 cpu_synchronize_state(cs);
1584
1585 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
1586 if (ret < 0) {
1587 /*
1588 * Failure.
1589 * If an I/O interrupt had been dequeued, we have to reinject it.
1590 */
1591 if (run->s390_tsch.dequeued) {
1592 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
1593 run->s390_tsch.subchannel_nr,
1594 run->s390_tsch.io_int_parm,
1595 run->s390_tsch.io_int_word);
1596 }
1597 ret = 0;
1598 }
1599 return ret;
1600 }
1601
1602 static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1603 {
1604 CPUState *cs = CPU(cpu);
1605 struct kvm_run *run = cs->kvm_run;
1606
1607 int ret = 0;
1608 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1609
1610 switch (arch_info->type) {
1611 case KVM_HW_WP_WRITE:
1612 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1613 cs->watchpoint_hit = &hw_watchpoint;
1614 hw_watchpoint.vaddr = arch_info->addr;
1615 hw_watchpoint.flags = BP_MEM_WRITE;
1616 ret = EXCP_DEBUG;
1617 }
1618 break;
1619 case KVM_HW_BP:
1620 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1621 ret = EXCP_DEBUG;
1622 }
1623 break;
1624 case KVM_SINGLESTEP:
1625 if (cs->singlestep_enabled) {
1626 ret = EXCP_DEBUG;
1627 }
1628 break;
1629 default:
1630 ret = -ENOSYS;
1631 }
1632
1633 return ret;
1634 }
1635
1636 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1637 {
1638 S390CPU *cpu = S390_CPU(cs);
1639 int ret = 0;
1640
1641 switch (run->exit_reason) {
1642 case KVM_EXIT_S390_SIEIC:
1643 ret = handle_intercept(cpu);
1644 break;
1645 case KVM_EXIT_S390_RESET:
1646 s390_reipl_request();
1647 break;
1648 case KVM_EXIT_S390_TSCH:
1649 ret = handle_tsch(cpu);
1650 break;
1651 case KVM_EXIT_DEBUG:
1652 ret = kvm_arch_handle_debug_exit(cpu);
1653 break;
1654 default:
1655 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1656 break;
1657 }
1658
1659 if (ret == 0) {
1660 ret = EXCP_INTERRUPT;
1661 }
1662 return ret;
1663 }
1664
1665 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1666 {
1667 return true;
1668 }
1669
1670 int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
1671 {
1672 return 1;
1673 }
1674
1675 int kvm_arch_on_sigbus(int code, void *addr)
1676 {
1677 return 1;
1678 }
1679
1680 void kvm_s390_io_interrupt(uint16_t subchannel_id,
1681 uint16_t subchannel_nr, uint32_t io_int_parm,
1682 uint32_t io_int_word)
1683 {
1684 struct kvm_s390_irq irq = {
1685 .u.io.subchannel_id = subchannel_id,
1686 .u.io.subchannel_nr = subchannel_nr,
1687 .u.io.io_int_parm = io_int_parm,
1688 .u.io.io_int_word = io_int_word,
1689 };
1690
1691 if (io_int_word & IO_INT_WORD_AI) {
1692 irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
1693 } else {
1694 irq.type = ((subchannel_id & 0xff00) << 24) |
1695 ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
1696 }
1697 kvm_s390_floating_interrupt(&irq);
1698 }
1699
1700 void kvm_s390_crw_mchk(void)
1701 {
1702 struct kvm_s390_irq irq = {
1703 .type = KVM_S390_MCHK,
1704 .u.mchk.cr14 = 1 << 28,
1705 .u.mchk.mcic = 0x00400f1d40330000ULL,
1706 };
1707 kvm_s390_floating_interrupt(&irq);
1708 }
1709
1710 void kvm_s390_enable_css_support(S390CPU *cpu)
1711 {
1712 int r;
1713
1714 /* Activate host kernel channel subsystem support. */
1715 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
1716 assert(r == 0);
1717 }
1718
1719 void kvm_arch_init_irq_routing(KVMState *s)
1720 {
1721 /*
1722 * Note that while irqchip capabilities generally imply that cpustates
1723 * are handled in-kernel, it is not true for s390 (yet); therefore, we
1724 * have to override the common code kvm_halt_in_kernel_allowed setting.
1725 */
1726 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
1727 kvm_gsi_routing_allowed = true;
1728 kvm_halt_in_kernel_allowed = false;
1729 }
1730 }
1731
1732 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1733 int vq, bool assign)
1734 {
1735 struct kvm_ioeventfd kick = {
1736 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
1737 KVM_IOEVENTFD_FLAG_DATAMATCH,
1738 .fd = event_notifier_get_fd(notifier),
1739 .datamatch = vq,
1740 .addr = sch,
1741 .len = 8,
1742 };
1743 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
1744 return -ENOSYS;
1745 }
1746 if (!assign) {
1747 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1748 }
1749 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1750 }
1751
1752 int kvm_s390_get_memslot_count(KVMState *s)
1753 {
1754 return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1755 }
1756
1757 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
1758 {
1759 struct kvm_mp_state mp_state = {};
1760 int ret;
1761
1762 /* the kvm part might not have been initialized yet */
1763 if (CPU(cpu)->kvm_state == NULL) {
1764 return 0;
1765 }
1766
1767 switch (cpu_state) {
1768 case CPU_STATE_STOPPED:
1769 mp_state.mp_state = KVM_MP_STATE_STOPPED;
1770 break;
1771 case CPU_STATE_CHECK_STOP:
1772 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
1773 break;
1774 case CPU_STATE_OPERATING:
1775 mp_state.mp_state = KVM_MP_STATE_OPERATING;
1776 break;
1777 case CPU_STATE_LOAD:
1778 mp_state.mp_state = KVM_MP_STATE_LOAD;
1779 break;
1780 default:
1781 error_report("Requested CPU state is not a valid S390 CPU state: %u",
1782 cpu_state);
1783 exit(1);
1784 }
1785
1786 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1787 if (ret) {
1788 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
1789 strerror(-ret));
1790 }
1791
1792 return ret;
1793 }
1794
1795 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1796 uint64_t address, uint32_t data)
1797 {
1798 S390PCIBusDevice *pbdev;
1799 uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
1800 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
1801
1802 pbdev = s390_pci_find_dev_by_fid(fid);
1803 if (!pbdev) {
1804 DPRINTF("add_msi_route no dev\n");
1805 return -ENODEV;
1806 }
1807
1808 pbdev->routes.adapter.ind_offset = vec;
1809
1810 route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
1811 route->flags = 0;
1812 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
1813 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
1814 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
1815 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
1816 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
1817 return 0;
1818 }