]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/kvm.c
s390x/kvm: make use of generic vm attribute check
[mirror_qemu.git] / target-s390x / kvm.c
1 /*
2 * QEMU S390x KVM implementation
3 *
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5 * Copyright IBM Corp. 2012
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
19 *
20 * You should have received a copy of the GNU (Lesser) General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include <sys/types.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27
28 #include <linux/kvm.h>
29 #include <asm/ptrace.h>
30
31 #include "qemu-common.h"
32 #include "qemu/timer.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/kvm.h"
35 #include "hw/hw.h"
36 #include "cpu.h"
37 #include "sysemu/device_tree.h"
38 #include "qapi/qmp/qjson.h"
39 #include "monitor/monitor.h"
40 #include "exec/gdbstub.h"
41 #include "exec/address-spaces.h"
42 #include "trace.h"
43 #include "qapi-event.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/s390x/s390-pci-bus.h"
46 #include "hw/s390x/ipl.h"
47
48 /* #define DEBUG_KVM */
49
50 #ifdef DEBUG_KVM
51 #define DPRINTF(fmt, ...) \
52 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
53 #else
54 #define DPRINTF(fmt, ...) \
55 do { } while (0)
56 #endif
57
58 #define kvm_vm_check_mem_attr(s, attr) \
59 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
60
61 #define IPA0_DIAG 0x8300
62 #define IPA0_SIGP 0xae00
63 #define IPA0_B2 0xb200
64 #define IPA0_B9 0xb900
65 #define IPA0_EB 0xeb00
66 #define IPA0_E3 0xe300
67
68 #define PRIV_B2_SCLP_CALL 0x20
69 #define PRIV_B2_CSCH 0x30
70 #define PRIV_B2_HSCH 0x31
71 #define PRIV_B2_MSCH 0x32
72 #define PRIV_B2_SSCH 0x33
73 #define PRIV_B2_STSCH 0x34
74 #define PRIV_B2_TSCH 0x35
75 #define PRIV_B2_TPI 0x36
76 #define PRIV_B2_SAL 0x37
77 #define PRIV_B2_RSCH 0x38
78 #define PRIV_B2_STCRW 0x39
79 #define PRIV_B2_STCPS 0x3a
80 #define PRIV_B2_RCHP 0x3b
81 #define PRIV_B2_SCHM 0x3c
82 #define PRIV_B2_CHSC 0x5f
83 #define PRIV_B2_SIGA 0x74
84 #define PRIV_B2_XSCH 0x76
85
86 #define PRIV_EB_SQBS 0x8a
87 #define PRIV_EB_PCISTB 0xd0
88 #define PRIV_EB_SIC 0xd1
89
90 #define PRIV_B9_EQBS 0x9c
91 #define PRIV_B9_CLP 0xa0
92 #define PRIV_B9_PCISTG 0xd0
93 #define PRIV_B9_PCILG 0xd2
94 #define PRIV_B9_RPCIT 0xd3
95
96 #define PRIV_E3_MPCIFC 0xd0
97 #define PRIV_E3_STPCIFC 0xd4
98
99 #define DIAG_IPL 0x308
100 #define DIAG_KVM_HYPERCALL 0x500
101 #define DIAG_KVM_BREAKPOINT 0x501
102
103 #define ICPT_INSTRUCTION 0x04
104 #define ICPT_PROGRAM 0x08
105 #define ICPT_EXT_INT 0x14
106 #define ICPT_WAITPSW 0x1c
107 #define ICPT_SOFT_INTERCEPT 0x24
108 #define ICPT_CPU_STOP 0x28
109 #define ICPT_IO 0x40
110
111 static CPUWatchpoint hw_watchpoint;
112 /*
113 * We don't use a list because this structure is also used to transmit the
114 * hardware breakpoints to the kernel.
115 */
116 static struct kvm_hw_breakpoint *hw_breakpoints;
117 static int nb_hw_breakpoints;
118
119 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
120 KVM_CAP_LAST_INFO
121 };
122
123 static int cap_sync_regs;
124 static int cap_async_pf;
125
126 static void *legacy_s390_alloc(size_t size, uint64_t *align);
127
128 static int kvm_s390_query_mem_limit(KVMState *s, uint64_t *memory_limit)
129 {
130 struct kvm_device_attr attr = {
131 .group = KVM_S390_VM_MEM_CTRL,
132 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
133 .addr = (uint64_t) memory_limit,
134 };
135
136 return kvm_vm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
137 }
138
139 int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit)
140 {
141 int rc;
142
143 struct kvm_device_attr attr = {
144 .group = KVM_S390_VM_MEM_CTRL,
145 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
146 .addr = (uint64_t) &new_limit,
147 };
148
149 if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_LIMIT_SIZE)) {
150 return 0;
151 }
152
153 rc = kvm_s390_query_mem_limit(s, hw_limit);
154 if (rc) {
155 return rc;
156 } else if (*hw_limit < new_limit) {
157 return -E2BIG;
158 }
159
160 return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
161 }
162
163 void kvm_s390_clear_cmma_callback(void *opaque)
164 {
165 int rc;
166 KVMState *s = opaque;
167 struct kvm_device_attr attr = {
168 .group = KVM_S390_VM_MEM_CTRL,
169 .attr = KVM_S390_VM_MEM_CLR_CMMA,
170 };
171
172 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
173 trace_kvm_clear_cmma(rc);
174 }
175
176 static void kvm_s390_enable_cmma(KVMState *s)
177 {
178 int rc;
179 struct kvm_device_attr attr = {
180 .group = KVM_S390_VM_MEM_CTRL,
181 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
182 };
183
184 if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_ENABLE_CMMA) ||
185 !kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_CLR_CMMA)) {
186 return;
187 }
188
189 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
190 if (!rc) {
191 qemu_register_reset(kvm_s390_clear_cmma_callback, s);
192 }
193 trace_kvm_enable_cmma(rc);
194 }
195
196 int kvm_arch_init(MachineState *ms, KVMState *s)
197 {
198 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
199 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
200
201 kvm_s390_enable_cmma(s);
202
203 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
204 || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
205 phys_mem_set_alloc(legacy_s390_alloc);
206 }
207
208 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
209
210 return 0;
211 }
212
213 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
214 {
215 return cpu->cpu_index;
216 }
217
218 int kvm_arch_init_vcpu(CPUState *cs)
219 {
220 S390CPU *cpu = S390_CPU(cs);
221 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
222 return 0;
223 }
224
225 void kvm_s390_reset_vcpu(S390CPU *cpu)
226 {
227 CPUState *cs = CPU(cpu);
228
229 /* The initial reset call is needed here to reset in-kernel
230 * vcpu data that we can't access directly from QEMU
231 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
232 * Before this ioctl cpu_synchronize_state() is called in common kvm
233 * code (kvm-all) */
234 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
235 error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
236 }
237 }
238
239 static int can_sync_regs(CPUState *cs, int regs)
240 {
241 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
242 }
243
244 int kvm_arch_put_registers(CPUState *cs, int level)
245 {
246 S390CPU *cpu = S390_CPU(cs);
247 CPUS390XState *env = &cpu->env;
248 struct kvm_sregs sregs;
249 struct kvm_regs regs;
250 struct kvm_fpu fpu = {};
251 int r;
252 int i;
253
254 /* always save the PSW and the GPRS*/
255 cs->kvm_run->psw_addr = env->psw.addr;
256 cs->kvm_run->psw_mask = env->psw.mask;
257
258 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
259 for (i = 0; i < 16; i++) {
260 cs->kvm_run->s.regs.gprs[i] = env->regs[i];
261 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
262 }
263 } else {
264 for (i = 0; i < 16; i++) {
265 regs.gprs[i] = env->regs[i];
266 }
267 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
268 if (r < 0) {
269 return r;
270 }
271 }
272
273 /* Floating point */
274 for (i = 0; i < 16; i++) {
275 fpu.fprs[i] = env->fregs[i].ll;
276 }
277 fpu.fpc = env->fpc;
278
279 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
280 if (r < 0) {
281 return r;
282 }
283
284 /* Do we need to save more than that? */
285 if (level == KVM_PUT_RUNTIME_STATE) {
286 return 0;
287 }
288
289 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
290 cs->kvm_run->s.regs.cputm = env->cputm;
291 cs->kvm_run->s.regs.ckc = env->ckc;
292 cs->kvm_run->s.regs.todpr = env->todpr;
293 cs->kvm_run->s.regs.gbea = env->gbea;
294 cs->kvm_run->s.regs.pp = env->pp;
295 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
296 } else {
297 /*
298 * These ONE_REGS are not protected by a capability. As they are only
299 * necessary for migration we just trace a possible error, but don't
300 * return with an error return code.
301 */
302 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
303 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
304 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
305 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
306 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
307 }
308
309 /* pfault parameters */
310 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
311 cs->kvm_run->s.regs.pft = env->pfault_token;
312 cs->kvm_run->s.regs.pfs = env->pfault_select;
313 cs->kvm_run->s.regs.pfc = env->pfault_compare;
314 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
315 } else if (cap_async_pf) {
316 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
317 if (r < 0) {
318 return r;
319 }
320 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
321 if (r < 0) {
322 return r;
323 }
324 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
325 if (r < 0) {
326 return r;
327 }
328 }
329
330 /* access registers and control registers*/
331 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
332 for (i = 0; i < 16; i++) {
333 cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
334 cs->kvm_run->s.regs.crs[i] = env->cregs[i];
335 }
336 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
337 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
338 } else {
339 for (i = 0; i < 16; i++) {
340 sregs.acrs[i] = env->aregs[i];
341 sregs.crs[i] = env->cregs[i];
342 }
343 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
344 if (r < 0) {
345 return r;
346 }
347 }
348
349 /* Finally the prefix */
350 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
351 cs->kvm_run->s.regs.prefix = env->psa;
352 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
353 } else {
354 /* prefix is only supported via sync regs */
355 }
356 return 0;
357 }
358
359 int kvm_arch_get_registers(CPUState *cs)
360 {
361 S390CPU *cpu = S390_CPU(cs);
362 CPUS390XState *env = &cpu->env;
363 struct kvm_sregs sregs;
364 struct kvm_regs regs;
365 struct kvm_fpu fpu;
366 int i, r;
367
368 /* get the PSW */
369 env->psw.addr = cs->kvm_run->psw_addr;
370 env->psw.mask = cs->kvm_run->psw_mask;
371
372 /* the GPRS */
373 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
374 for (i = 0; i < 16; i++) {
375 env->regs[i] = cs->kvm_run->s.regs.gprs[i];
376 }
377 } else {
378 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
379 if (r < 0) {
380 return r;
381 }
382 for (i = 0; i < 16; i++) {
383 env->regs[i] = regs.gprs[i];
384 }
385 }
386
387 /* The ACRS and CRS */
388 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
389 for (i = 0; i < 16; i++) {
390 env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
391 env->cregs[i] = cs->kvm_run->s.regs.crs[i];
392 }
393 } else {
394 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
395 if (r < 0) {
396 return r;
397 }
398 for (i = 0; i < 16; i++) {
399 env->aregs[i] = sregs.acrs[i];
400 env->cregs[i] = sregs.crs[i];
401 }
402 }
403
404 /* Floating point */
405 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
406 if (r < 0) {
407 return r;
408 }
409 for (i = 0; i < 16; i++) {
410 env->fregs[i].ll = fpu.fprs[i];
411 }
412 env->fpc = fpu.fpc;
413
414 /* The prefix */
415 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
416 env->psa = cs->kvm_run->s.regs.prefix;
417 }
418
419 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
420 env->cputm = cs->kvm_run->s.regs.cputm;
421 env->ckc = cs->kvm_run->s.regs.ckc;
422 env->todpr = cs->kvm_run->s.regs.todpr;
423 env->gbea = cs->kvm_run->s.regs.gbea;
424 env->pp = cs->kvm_run->s.regs.pp;
425 } else {
426 /*
427 * These ONE_REGS are not protected by a capability. As they are only
428 * necessary for migration we just trace a possible error, but don't
429 * return with an error return code.
430 */
431 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
432 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
433 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
434 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
435 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
436 }
437
438 /* pfault parameters */
439 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
440 env->pfault_token = cs->kvm_run->s.regs.pft;
441 env->pfault_select = cs->kvm_run->s.regs.pfs;
442 env->pfault_compare = cs->kvm_run->s.regs.pfc;
443 } else if (cap_async_pf) {
444 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
445 if (r < 0) {
446 return r;
447 }
448 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
449 if (r < 0) {
450 return r;
451 }
452 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
453 if (r < 0) {
454 return r;
455 }
456 }
457
458 return 0;
459 }
460
461 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
462 {
463 int r;
464 struct kvm_device_attr attr = {
465 .group = KVM_S390_VM_TOD,
466 .attr = KVM_S390_VM_TOD_LOW,
467 .addr = (uint64_t)tod_low,
468 };
469
470 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
471 if (r) {
472 return r;
473 }
474
475 attr.attr = KVM_S390_VM_TOD_HIGH;
476 attr.addr = (uint64_t)tod_high;
477 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
478 }
479
480 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
481 {
482 int r;
483
484 struct kvm_device_attr attr = {
485 .group = KVM_S390_VM_TOD,
486 .attr = KVM_S390_VM_TOD_LOW,
487 .addr = (uint64_t)tod_low,
488 };
489
490 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
491 if (r) {
492 return r;
493 }
494
495 attr.attr = KVM_S390_VM_TOD_HIGH;
496 attr.addr = (uint64_t)tod_high;
497 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
498 }
499
500 /*
501 * Legacy layout for s390:
502 * Older S390 KVM requires the topmost vma of the RAM to be
503 * smaller than an system defined value, which is at least 256GB.
504 * Larger systems have larger values. We put the guest between
505 * the end of data segment (system break) and this value. We
506 * use 32GB as a base to have enough room for the system break
507 * to grow. We also have to use MAP parameters that avoid
508 * read-only mapping of guest pages.
509 */
510 static void *legacy_s390_alloc(size_t size, uint64_t *align)
511 {
512 void *mem;
513
514 mem = mmap((void *) 0x800000000ULL, size,
515 PROT_EXEC|PROT_READ|PROT_WRITE,
516 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
517 return mem == MAP_FAILED ? NULL : mem;
518 }
519
520 /* DIAG 501 is used for sw breakpoints */
521 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
522
523 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
524 {
525
526 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
527 sizeof(diag_501), 0) ||
528 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501,
529 sizeof(diag_501), 1)) {
530 return -EINVAL;
531 }
532 return 0;
533 }
534
535 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
536 {
537 uint8_t t[sizeof(diag_501)];
538
539 if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) {
540 return -EINVAL;
541 } else if (memcmp(t, diag_501, sizeof(diag_501))) {
542 return -EINVAL;
543 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
544 sizeof(diag_501), 1)) {
545 return -EINVAL;
546 }
547
548 return 0;
549 }
550
551 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
552 int len, int type)
553 {
554 int n;
555
556 for (n = 0; n < nb_hw_breakpoints; n++) {
557 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
558 (hw_breakpoints[n].len == len || len == -1)) {
559 return &hw_breakpoints[n];
560 }
561 }
562
563 return NULL;
564 }
565
566 static int insert_hw_breakpoint(target_ulong addr, int len, int type)
567 {
568 int size;
569
570 if (find_hw_breakpoint(addr, len, type)) {
571 return -EEXIST;
572 }
573
574 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
575
576 if (!hw_breakpoints) {
577 nb_hw_breakpoints = 0;
578 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
579 } else {
580 hw_breakpoints =
581 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
582 }
583
584 if (!hw_breakpoints) {
585 nb_hw_breakpoints = 0;
586 return -ENOMEM;
587 }
588
589 hw_breakpoints[nb_hw_breakpoints].addr = addr;
590 hw_breakpoints[nb_hw_breakpoints].len = len;
591 hw_breakpoints[nb_hw_breakpoints].type = type;
592
593 nb_hw_breakpoints++;
594
595 return 0;
596 }
597
598 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
599 target_ulong len, int type)
600 {
601 switch (type) {
602 case GDB_BREAKPOINT_HW:
603 type = KVM_HW_BP;
604 break;
605 case GDB_WATCHPOINT_WRITE:
606 if (len < 1) {
607 return -EINVAL;
608 }
609 type = KVM_HW_WP_WRITE;
610 break;
611 default:
612 return -ENOSYS;
613 }
614 return insert_hw_breakpoint(addr, len, type);
615 }
616
617 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
618 target_ulong len, int type)
619 {
620 int size;
621 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
622
623 if (bp == NULL) {
624 return -ENOENT;
625 }
626
627 nb_hw_breakpoints--;
628 if (nb_hw_breakpoints > 0) {
629 /*
630 * In order to trim the array, move the last element to the position to
631 * be removed - if necessary.
632 */
633 if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
634 *bp = hw_breakpoints[nb_hw_breakpoints];
635 }
636 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
637 hw_breakpoints =
638 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
639 } else {
640 g_free(hw_breakpoints);
641 hw_breakpoints = NULL;
642 }
643
644 return 0;
645 }
646
647 void kvm_arch_remove_all_hw_breakpoints(void)
648 {
649 nb_hw_breakpoints = 0;
650 g_free(hw_breakpoints);
651 hw_breakpoints = NULL;
652 }
653
654 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
655 {
656 int i;
657
658 if (nb_hw_breakpoints > 0) {
659 dbg->arch.nr_hw_bp = nb_hw_breakpoints;
660 dbg->arch.hw_bp = hw_breakpoints;
661
662 for (i = 0; i < nb_hw_breakpoints; ++i) {
663 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
664 hw_breakpoints[i].addr);
665 }
666 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
667 } else {
668 dbg->arch.nr_hw_bp = 0;
669 dbg->arch.hw_bp = NULL;
670 }
671 }
672
673 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
674 {
675 }
676
677 void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run)
678 {
679 }
680
681 int kvm_arch_process_async_events(CPUState *cs)
682 {
683 return cs->halted;
684 }
685
686 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
687 struct kvm_s390_interrupt *interrupt)
688 {
689 int r = 0;
690
691 interrupt->type = irq->type;
692 switch (irq->type) {
693 case KVM_S390_INT_VIRTIO:
694 interrupt->parm = irq->u.ext.ext_params;
695 /* fall through */
696 case KVM_S390_INT_PFAULT_INIT:
697 case KVM_S390_INT_PFAULT_DONE:
698 interrupt->parm64 = irq->u.ext.ext_params2;
699 break;
700 case KVM_S390_PROGRAM_INT:
701 interrupt->parm = irq->u.pgm.code;
702 break;
703 case KVM_S390_SIGP_SET_PREFIX:
704 interrupt->parm = irq->u.prefix.address;
705 break;
706 case KVM_S390_INT_SERVICE:
707 interrupt->parm = irq->u.ext.ext_params;
708 break;
709 case KVM_S390_MCHK:
710 interrupt->parm = irq->u.mchk.cr14;
711 interrupt->parm64 = irq->u.mchk.mcic;
712 break;
713 case KVM_S390_INT_EXTERNAL_CALL:
714 interrupt->parm = irq->u.extcall.code;
715 break;
716 case KVM_S390_INT_EMERGENCY:
717 interrupt->parm = irq->u.emerg.code;
718 break;
719 case KVM_S390_SIGP_STOP:
720 case KVM_S390_RESTART:
721 break; /* These types have no parameters */
722 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
723 interrupt->parm = irq->u.io.subchannel_id << 16;
724 interrupt->parm |= irq->u.io.subchannel_nr;
725 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
726 interrupt->parm64 |= irq->u.io.io_int_word;
727 break;
728 default:
729 r = -EINVAL;
730 break;
731 }
732 return r;
733 }
734
735 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
736 {
737 struct kvm_s390_interrupt kvmint = {};
738 CPUState *cs = CPU(cpu);
739 int r;
740
741 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
742 if (r < 0) {
743 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
744 exit(1);
745 }
746
747 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
748 if (r < 0) {
749 fprintf(stderr, "KVM failed to inject interrupt\n");
750 exit(1);
751 }
752 }
753
754 static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
755 {
756 struct kvm_s390_interrupt kvmint = {};
757 int r;
758
759 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
760 if (r < 0) {
761 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
762 exit(1);
763 }
764
765 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
766 if (r < 0) {
767 fprintf(stderr, "KVM failed to inject interrupt\n");
768 exit(1);
769 }
770 }
771
772 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
773 {
774 static bool use_flic = true;
775 int r;
776
777 if (use_flic) {
778 r = kvm_s390_inject_flic(irq);
779 if (r == -ENOSYS) {
780 use_flic = false;
781 }
782 if (!r) {
783 return;
784 }
785 }
786 __kvm_s390_floating_interrupt(irq);
787 }
788
789 void kvm_s390_virtio_irq(int config_change, uint64_t token)
790 {
791 struct kvm_s390_irq irq = {
792 .type = KVM_S390_INT_VIRTIO,
793 .u.ext.ext_params = config_change,
794 .u.ext.ext_params2 = token,
795 };
796
797 kvm_s390_floating_interrupt(&irq);
798 }
799
800 void kvm_s390_service_interrupt(uint32_t parm)
801 {
802 struct kvm_s390_irq irq = {
803 .type = KVM_S390_INT_SERVICE,
804 .u.ext.ext_params = parm,
805 };
806
807 kvm_s390_floating_interrupt(&irq);
808 }
809
810 static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
811 {
812 struct kvm_s390_irq irq = {
813 .type = KVM_S390_PROGRAM_INT,
814 .u.pgm.code = code,
815 };
816
817 kvm_s390_vcpu_interrupt(cpu, &irq);
818 }
819
820 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
821 {
822 struct kvm_s390_irq irq = {
823 .type = KVM_S390_PROGRAM_INT,
824 .u.pgm.code = code,
825 .u.pgm.trans_exc_code = te_code,
826 .u.pgm.exc_access_id = te_code & 3,
827 };
828
829 kvm_s390_vcpu_interrupt(cpu, &irq);
830 }
831
832 static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
833 uint16_t ipbh0)
834 {
835 CPUS390XState *env = &cpu->env;
836 uint64_t sccb;
837 uint32_t code;
838 int r = 0;
839
840 cpu_synchronize_state(CPU(cpu));
841 sccb = env->regs[ipbh0 & 0xf];
842 code = env->regs[(ipbh0 & 0xf0) >> 4];
843
844 r = sclp_service_call(env, sccb, code);
845 if (r < 0) {
846 enter_pgmcheck(cpu, -r);
847 } else {
848 setcc(cpu, r);
849 }
850
851 return 0;
852 }
853
854 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
855 {
856 CPUS390XState *env = &cpu->env;
857 int rc = 0;
858 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
859
860 cpu_synchronize_state(CPU(cpu));
861
862 switch (ipa1) {
863 case PRIV_B2_XSCH:
864 ioinst_handle_xsch(cpu, env->regs[1]);
865 break;
866 case PRIV_B2_CSCH:
867 ioinst_handle_csch(cpu, env->regs[1]);
868 break;
869 case PRIV_B2_HSCH:
870 ioinst_handle_hsch(cpu, env->regs[1]);
871 break;
872 case PRIV_B2_MSCH:
873 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
874 break;
875 case PRIV_B2_SSCH:
876 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
877 break;
878 case PRIV_B2_STCRW:
879 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
880 break;
881 case PRIV_B2_STSCH:
882 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
883 break;
884 case PRIV_B2_TSCH:
885 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
886 fprintf(stderr, "Spurious tsch intercept\n");
887 break;
888 case PRIV_B2_CHSC:
889 ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
890 break;
891 case PRIV_B2_TPI:
892 /* This should have been handled by kvm already. */
893 fprintf(stderr, "Spurious tpi intercept\n");
894 break;
895 case PRIV_B2_SCHM:
896 ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
897 run->s390_sieic.ipb);
898 break;
899 case PRIV_B2_RSCH:
900 ioinst_handle_rsch(cpu, env->regs[1]);
901 break;
902 case PRIV_B2_RCHP:
903 ioinst_handle_rchp(cpu, env->regs[1]);
904 break;
905 case PRIV_B2_STCPS:
906 /* We do not provide this instruction, it is suppressed. */
907 break;
908 case PRIV_B2_SAL:
909 ioinst_handle_sal(cpu, env->regs[1]);
910 break;
911 case PRIV_B2_SIGA:
912 /* Not provided, set CC = 3 for subchannel not operational */
913 setcc(cpu, 3);
914 break;
915 case PRIV_B2_SCLP_CALL:
916 rc = kvm_sclp_service_call(cpu, run, ipbh0);
917 break;
918 default:
919 rc = -1;
920 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
921 break;
922 }
923
924 return rc;
925 }
926
927 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run)
928 {
929 CPUS390XState *env = &cpu->env;
930 uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
931 uint32_t base2 = run->s390_sieic.ipb >> 28;
932 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
933 ((run->s390_sieic.ipb & 0xff00) << 4);
934
935 if (disp2 & 0x80000) {
936 disp2 += 0xfff00000;
937 }
938
939 return (base2 ? env->regs[base2] : 0) +
940 (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
941 }
942
943 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run)
944 {
945 CPUS390XState *env = &cpu->env;
946 uint32_t base2 = run->s390_sieic.ipb >> 28;
947 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
948 ((run->s390_sieic.ipb & 0xff00) << 4);
949
950 if (disp2 & 0x80000) {
951 disp2 += 0xfff00000;
952 }
953
954 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
955 }
956
957 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
958 {
959 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
960
961 return clp_service_call(cpu, r2);
962 }
963
964 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
965 {
966 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
967 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
968
969 return pcilg_service_call(cpu, r1, r2);
970 }
971
972 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
973 {
974 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
975 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
976
977 return pcistg_service_call(cpu, r1, r2);
978 }
979
980 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
981 {
982 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
983 uint64_t fiba;
984
985 cpu_synchronize_state(CPU(cpu));
986 fiba = get_base_disp_rxy(cpu, run);
987
988 return stpcifc_service_call(cpu, r1, fiba);
989 }
990
991 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
992 {
993 /* NOOP */
994 return 0;
995 }
996
997 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
998 {
999 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1000 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1001
1002 return rpcit_service_call(cpu, r1, r2);
1003 }
1004
1005 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1006 {
1007 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1008 uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1009 uint64_t gaddr;
1010
1011 cpu_synchronize_state(CPU(cpu));
1012 gaddr = get_base_disp_rsy(cpu, run);
1013
1014 return pcistb_service_call(cpu, r1, r3, gaddr);
1015 }
1016
1017 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1018 {
1019 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1020 uint64_t fiba;
1021
1022 cpu_synchronize_state(CPU(cpu));
1023 fiba = get_base_disp_rxy(cpu, run);
1024
1025 return mpcifc_service_call(cpu, r1, fiba);
1026 }
1027
1028 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1029 {
1030 int r = 0;
1031
1032 switch (ipa1) {
1033 case PRIV_B9_CLP:
1034 r = kvm_clp_service_call(cpu, run);
1035 break;
1036 case PRIV_B9_PCISTG:
1037 r = kvm_pcistg_service_call(cpu, run);
1038 break;
1039 case PRIV_B9_PCILG:
1040 r = kvm_pcilg_service_call(cpu, run);
1041 break;
1042 case PRIV_B9_RPCIT:
1043 r = kvm_rpcit_service_call(cpu, run);
1044 break;
1045 case PRIV_B9_EQBS:
1046 /* just inject exception */
1047 r = -1;
1048 break;
1049 default:
1050 r = -1;
1051 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
1052 break;
1053 }
1054
1055 return r;
1056 }
1057
1058 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1059 {
1060 int r = 0;
1061
1062 switch (ipbl) {
1063 case PRIV_EB_PCISTB:
1064 r = kvm_pcistb_service_call(cpu, run);
1065 break;
1066 case PRIV_EB_SIC:
1067 r = kvm_sic_service_call(cpu, run);
1068 break;
1069 case PRIV_EB_SQBS:
1070 /* just inject exception */
1071 r = -1;
1072 break;
1073 default:
1074 r = -1;
1075 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
1076 break;
1077 }
1078
1079 return r;
1080 }
1081
1082 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1083 {
1084 int r = 0;
1085
1086 switch (ipbl) {
1087 case PRIV_E3_MPCIFC:
1088 r = kvm_mpcifc_service_call(cpu, run);
1089 break;
1090 case PRIV_E3_STPCIFC:
1091 r = kvm_stpcifc_service_call(cpu, run);
1092 break;
1093 default:
1094 r = -1;
1095 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
1096 break;
1097 }
1098
1099 return r;
1100 }
1101
1102 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
1103 {
1104 CPUS390XState *env = &cpu->env;
1105 int ret;
1106
1107 cpu_synchronize_state(CPU(cpu));
1108 ret = s390_virtio_hypercall(env);
1109 if (ret == -EINVAL) {
1110 enter_pgmcheck(cpu, PGM_SPECIFICATION);
1111 return 0;
1112 }
1113
1114 return ret;
1115 }
1116
1117 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1118 {
1119 uint64_t r1, r3;
1120
1121 cpu_synchronize_state(CPU(cpu));
1122 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1123 r3 = run->s390_sieic.ipa & 0x000f;
1124 handle_diag_308(&cpu->env, r1, r3);
1125 }
1126
1127 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1128 {
1129 CPUS390XState *env = &cpu->env;
1130 unsigned long pc;
1131
1132 cpu_synchronize_state(CPU(cpu));
1133
1134 pc = env->psw.addr - 4;
1135 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1136 env->psw.addr = pc;
1137 return EXCP_DEBUG;
1138 }
1139
1140 return -ENOENT;
1141 }
1142
1143 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1144
1145 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
1146 {
1147 int r = 0;
1148 uint16_t func_code;
1149
1150 /*
1151 * For any diagnose call we support, bits 48-63 of the resulting
1152 * address specify the function code; the remainder is ignored.
1153 */
1154 func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK;
1155 switch (func_code) {
1156 case DIAG_IPL:
1157 kvm_handle_diag_308(cpu, run);
1158 break;
1159 case DIAG_KVM_HYPERCALL:
1160 r = handle_hypercall(cpu, run);
1161 break;
1162 case DIAG_KVM_BREAKPOINT:
1163 r = handle_sw_breakpoint(cpu, run);
1164 break;
1165 default:
1166 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
1167 enter_pgmcheck(cpu, PGM_SPECIFICATION);
1168 break;
1169 }
1170
1171 return r;
1172 }
1173
1174 typedef struct SigpInfo {
1175 S390CPU *cpu;
1176 uint64_t param;
1177 int cc;
1178 uint64_t *status_reg;
1179 } SigpInfo;
1180
1181 static void set_sigp_status(SigpInfo *si, uint64_t status)
1182 {
1183 *si->status_reg &= 0xffffffff00000000ULL;
1184 *si->status_reg |= status;
1185 si->cc = SIGP_CC_STATUS_STORED;
1186 }
1187
1188 static void sigp_start(void *arg)
1189 {
1190 SigpInfo *si = arg;
1191
1192 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1193 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1194 return;
1195 }
1196
1197 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1198 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1199 }
1200
1201 static void sigp_stop(void *arg)
1202 {
1203 SigpInfo *si = arg;
1204 struct kvm_s390_irq irq = {
1205 .type = KVM_S390_SIGP_STOP,
1206 };
1207
1208 if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) {
1209 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1210 return;
1211 }
1212
1213 /* disabled wait - sleeping in user space */
1214 if (CPU(si->cpu)->halted) {
1215 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1216 } else {
1217 /* execute the stop function */
1218 si->cpu->env.sigp_order = SIGP_STOP;
1219 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1220 }
1221 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1222 }
1223
1224 #define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
1225 #define SAVE_AREA_SIZE 512
1226 static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
1227 {
1228 static const uint8_t ar_id = 1;
1229 uint64_t ckc = cpu->env.ckc >> 8;
1230 void *mem;
1231 hwaddr len = SAVE_AREA_SIZE;
1232
1233 mem = cpu_physical_memory_map(addr, &len, 1);
1234 if (!mem) {
1235 return -EFAULT;
1236 }
1237 if (len != SAVE_AREA_SIZE) {
1238 cpu_physical_memory_unmap(mem, len, 1, 0);
1239 return -EFAULT;
1240 }
1241
1242 if (store_arch) {
1243 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
1244 }
1245 memcpy(mem, &cpu->env.fregs, 128);
1246 memcpy(mem + 128, &cpu->env.regs, 128);
1247 memcpy(mem + 256, &cpu->env.psw, 16);
1248 memcpy(mem + 280, &cpu->env.psa, 4);
1249 memcpy(mem + 284, &cpu->env.fpc, 4);
1250 memcpy(mem + 292, &cpu->env.todpr, 4);
1251 memcpy(mem + 296, &cpu->env.cputm, 8);
1252 memcpy(mem + 304, &ckc, 8);
1253 memcpy(mem + 320, &cpu->env.aregs, 64);
1254 memcpy(mem + 384, &cpu->env.cregs, 128);
1255
1256 cpu_physical_memory_unmap(mem, len, 1, len);
1257
1258 return 0;
1259 }
1260
1261 static void sigp_stop_and_store_status(void *arg)
1262 {
1263 SigpInfo *si = arg;
1264 struct kvm_s390_irq irq = {
1265 .type = KVM_S390_SIGP_STOP,
1266 };
1267
1268 /* disabled wait - sleeping in user space */
1269 if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING &&
1270 CPU(si->cpu)->halted) {
1271 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1272 }
1273
1274 switch (s390_cpu_get_state(si->cpu)) {
1275 case CPU_STATE_OPERATING:
1276 si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
1277 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1278 /* store will be performed when handling the stop intercept */
1279 break;
1280 case CPU_STATE_STOPPED:
1281 /* already stopped, just store the status */
1282 cpu_synchronize_state(CPU(si->cpu));
1283 kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
1284 break;
1285 }
1286 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1287 }
1288
1289 static void sigp_store_status_at_address(void *arg)
1290 {
1291 SigpInfo *si = arg;
1292 uint32_t address = si->param & 0x7ffffe00u;
1293
1294 /* cpu has to be stopped */
1295 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1296 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1297 return;
1298 }
1299
1300 cpu_synchronize_state(CPU(si->cpu));
1301
1302 if (kvm_s390_store_status(si->cpu, address, false)) {
1303 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1304 return;
1305 }
1306 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1307 }
1308
1309 static void sigp_restart(void *arg)
1310 {
1311 SigpInfo *si = arg;
1312 struct kvm_s390_irq irq = {
1313 .type = KVM_S390_RESTART,
1314 };
1315
1316 switch (s390_cpu_get_state(si->cpu)) {
1317 case CPU_STATE_STOPPED:
1318 /* the restart irq has to be delivered prior to any other pending irq */
1319 cpu_synchronize_state(CPU(si->cpu));
1320 do_restart_interrupt(&si->cpu->env);
1321 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1322 break;
1323 case CPU_STATE_OPERATING:
1324 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1325 break;
1326 }
1327 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1328 }
1329
1330 int kvm_s390_cpu_restart(S390CPU *cpu)
1331 {
1332 SigpInfo si = {
1333 .cpu = cpu,
1334 };
1335
1336 run_on_cpu(CPU(cpu), sigp_restart, &si);
1337 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
1338 return 0;
1339 }
1340
1341 static void sigp_initial_cpu_reset(void *arg)
1342 {
1343 SigpInfo *si = arg;
1344 CPUState *cs = CPU(si->cpu);
1345 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
1346
1347 cpu_synchronize_state(cs);
1348 scc->initial_cpu_reset(cs);
1349 cpu_synchronize_post_reset(cs);
1350 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1351 }
1352
1353 static void sigp_cpu_reset(void *arg)
1354 {
1355 SigpInfo *si = arg;
1356 CPUState *cs = CPU(si->cpu);
1357 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
1358
1359 cpu_synchronize_state(cs);
1360 scc->cpu_reset(cs);
1361 cpu_synchronize_post_reset(cs);
1362 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1363 }
1364
1365 static void sigp_set_prefix(void *arg)
1366 {
1367 SigpInfo *si = arg;
1368 uint32_t addr = si->param & 0x7fffe000u;
1369
1370 cpu_synchronize_state(CPU(si->cpu));
1371
1372 if (!address_space_access_valid(&address_space_memory, addr,
1373 sizeof(struct LowCore), false)) {
1374 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1375 return;
1376 }
1377
1378 /* cpu has to be stopped */
1379 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1380 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1381 return;
1382 }
1383
1384 si->cpu->env.psa = addr;
1385 cpu_synchronize_post_init(CPU(si->cpu));
1386 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1387 }
1388
1389 static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
1390 uint64_t param, uint64_t *status_reg)
1391 {
1392 SigpInfo si = {
1393 .cpu = dst_cpu,
1394 .param = param,
1395 .status_reg = status_reg,
1396 };
1397
1398 /* cpu available? */
1399 if (dst_cpu == NULL) {
1400 return SIGP_CC_NOT_OPERATIONAL;
1401 }
1402
1403 /* only resets can break pending orders */
1404 if (dst_cpu->env.sigp_order != 0 &&
1405 order != SIGP_CPU_RESET &&
1406 order != SIGP_INITIAL_CPU_RESET) {
1407 return SIGP_CC_BUSY;
1408 }
1409
1410 switch (order) {
1411 case SIGP_START:
1412 run_on_cpu(CPU(dst_cpu), sigp_start, &si);
1413 break;
1414 case SIGP_STOP:
1415 run_on_cpu(CPU(dst_cpu), sigp_stop, &si);
1416 break;
1417 case SIGP_RESTART:
1418 run_on_cpu(CPU(dst_cpu), sigp_restart, &si);
1419 break;
1420 case SIGP_STOP_STORE_STATUS:
1421 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si);
1422 break;
1423 case SIGP_STORE_STATUS_ADDR:
1424 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si);
1425 break;
1426 case SIGP_SET_PREFIX:
1427 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si);
1428 break;
1429 case SIGP_INITIAL_CPU_RESET:
1430 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si);
1431 break;
1432 case SIGP_CPU_RESET:
1433 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si);
1434 break;
1435 default:
1436 DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
1437 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
1438 }
1439
1440 return si.cc;
1441 }
1442
1443 static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
1444 uint64_t *status_reg)
1445 {
1446 CPUState *cur_cs;
1447 S390CPU *cur_cpu;
1448
1449 /* due to the BQL, we are the only active cpu */
1450 CPU_FOREACH(cur_cs) {
1451 cur_cpu = S390_CPU(cur_cs);
1452 if (cur_cpu->env.sigp_order != 0) {
1453 return SIGP_CC_BUSY;
1454 }
1455 cpu_synchronize_state(cur_cs);
1456 /* all but the current one have to be stopped */
1457 if (cur_cpu != cpu &&
1458 s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
1459 *status_reg &= 0xffffffff00000000ULL;
1460 *status_reg |= SIGP_STAT_INCORRECT_STATE;
1461 return SIGP_CC_STATUS_STORED;
1462 }
1463 }
1464
1465 switch (param & 0xff) {
1466 case SIGP_MODE_ESA_S390:
1467 /* not supported */
1468 return SIGP_CC_NOT_OPERATIONAL;
1469 case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW:
1470 case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW:
1471 CPU_FOREACH(cur_cs) {
1472 cur_cpu = S390_CPU(cur_cs);
1473 cur_cpu->env.pfault_token = -1UL;
1474 }
1475 break;
1476 default:
1477 *status_reg &= 0xffffffff00000000ULL;
1478 *status_reg |= SIGP_STAT_INVALID_PARAMETER;
1479 return SIGP_CC_STATUS_STORED;
1480 }
1481
1482 return SIGP_CC_ORDER_CODE_ACCEPTED;
1483 }
1484
1485 #define SIGP_ORDER_MASK 0x000000ff
1486
1487 static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1488 {
1489 CPUS390XState *env = &cpu->env;
1490 const uint8_t r1 = ipa1 >> 4;
1491 const uint8_t r3 = ipa1 & 0x0f;
1492 int ret;
1493 uint8_t order;
1494 uint64_t *status_reg;
1495 uint64_t param;
1496 S390CPU *dst_cpu = NULL;
1497
1498 cpu_synchronize_state(CPU(cpu));
1499
1500 /* get order code */
1501 order = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK;
1502 status_reg = &env->regs[r1];
1503 param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
1504
1505 switch (order) {
1506 case SIGP_SET_ARCH:
1507 ret = sigp_set_architecture(cpu, param, status_reg);
1508 break;
1509 default:
1510 /* all other sigp orders target a single vcpu */
1511 dst_cpu = s390_cpu_addr2state(env->regs[r3]);
1512 ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
1513 }
1514
1515 trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
1516 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
1517
1518 if (ret >= 0) {
1519 setcc(cpu, ret);
1520 return 0;
1521 }
1522
1523 return ret;
1524 }
1525
1526 static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
1527 {
1528 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1529 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
1530 int r = -1;
1531
1532 DPRINTF("handle_instruction 0x%x 0x%x\n",
1533 run->s390_sieic.ipa, run->s390_sieic.ipb);
1534 switch (ipa0) {
1535 case IPA0_B2:
1536 r = handle_b2(cpu, run, ipa1);
1537 break;
1538 case IPA0_B9:
1539 r = handle_b9(cpu, run, ipa1);
1540 break;
1541 case IPA0_EB:
1542 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
1543 break;
1544 case IPA0_E3:
1545 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1546 break;
1547 case IPA0_DIAG:
1548 r = handle_diag(cpu, run, run->s390_sieic.ipb);
1549 break;
1550 case IPA0_SIGP:
1551 r = handle_sigp(cpu, run, ipa1);
1552 break;
1553 }
1554
1555 if (r < 0) {
1556 r = 0;
1557 enter_pgmcheck(cpu, 0x0001);
1558 }
1559
1560 return r;
1561 }
1562
1563 static bool is_special_wait_psw(CPUState *cs)
1564 {
1565 /* signal quiesce */
1566 return cs->kvm_run->psw_addr == 0xfffUL;
1567 }
1568
1569 static void guest_panicked(void)
1570 {
1571 qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE,
1572 &error_abort);
1573 vm_stop(RUN_STATE_GUEST_PANICKED);
1574 }
1575
1576 static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
1577 {
1578 CPUState *cs = CPU(cpu);
1579
1580 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1581 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
1582 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
1583 s390_cpu_halt(cpu);
1584 guest_panicked();
1585 }
1586
1587 static int handle_intercept(S390CPU *cpu)
1588 {
1589 CPUState *cs = CPU(cpu);
1590 struct kvm_run *run = cs->kvm_run;
1591 int icpt_code = run->s390_sieic.icptcode;
1592 int r = 0;
1593
1594 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
1595 (long)cs->kvm_run->psw_addr);
1596 switch (icpt_code) {
1597 case ICPT_INSTRUCTION:
1598 r = handle_instruction(cpu, run);
1599 break;
1600 case ICPT_PROGRAM:
1601 unmanageable_intercept(cpu, "program interrupt",
1602 offsetof(LowCore, program_new_psw));
1603 r = EXCP_HALTED;
1604 break;
1605 case ICPT_EXT_INT:
1606 unmanageable_intercept(cpu, "external interrupt",
1607 offsetof(LowCore, external_new_psw));
1608 r = EXCP_HALTED;
1609 break;
1610 case ICPT_WAITPSW:
1611 /* disabled wait, since enabled wait is handled in kernel */
1612 cpu_synchronize_state(cs);
1613 if (s390_cpu_halt(cpu) == 0) {
1614 if (is_special_wait_psw(cs)) {
1615 qemu_system_shutdown_request();
1616 } else {
1617 guest_panicked();
1618 }
1619 }
1620 r = EXCP_HALTED;
1621 break;
1622 case ICPT_CPU_STOP:
1623 if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
1624 qemu_system_shutdown_request();
1625 }
1626 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
1627 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
1628 true);
1629 }
1630 cpu->env.sigp_order = 0;
1631 r = EXCP_HALTED;
1632 break;
1633 case ICPT_SOFT_INTERCEPT:
1634 fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1635 exit(1);
1636 break;
1637 case ICPT_IO:
1638 fprintf(stderr, "KVM unimplemented icpt IO\n");
1639 exit(1);
1640 break;
1641 default:
1642 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1643 exit(1);
1644 break;
1645 }
1646
1647 return r;
1648 }
1649
1650 static int handle_tsch(S390CPU *cpu)
1651 {
1652 CPUState *cs = CPU(cpu);
1653 struct kvm_run *run = cs->kvm_run;
1654 int ret;
1655
1656 cpu_synchronize_state(cs);
1657
1658 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
1659 if (ret < 0) {
1660 /*
1661 * Failure.
1662 * If an I/O interrupt had been dequeued, we have to reinject it.
1663 */
1664 if (run->s390_tsch.dequeued) {
1665 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
1666 run->s390_tsch.subchannel_nr,
1667 run->s390_tsch.io_int_parm,
1668 run->s390_tsch.io_int_word);
1669 }
1670 ret = 0;
1671 }
1672 return ret;
1673 }
1674
1675 static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1676 {
1677 CPUState *cs = CPU(cpu);
1678 struct kvm_run *run = cs->kvm_run;
1679
1680 int ret = 0;
1681 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1682
1683 switch (arch_info->type) {
1684 case KVM_HW_WP_WRITE:
1685 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1686 cs->watchpoint_hit = &hw_watchpoint;
1687 hw_watchpoint.vaddr = arch_info->addr;
1688 hw_watchpoint.flags = BP_MEM_WRITE;
1689 ret = EXCP_DEBUG;
1690 }
1691 break;
1692 case KVM_HW_BP:
1693 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1694 ret = EXCP_DEBUG;
1695 }
1696 break;
1697 case KVM_SINGLESTEP:
1698 if (cs->singlestep_enabled) {
1699 ret = EXCP_DEBUG;
1700 }
1701 break;
1702 default:
1703 ret = -ENOSYS;
1704 }
1705
1706 return ret;
1707 }
1708
1709 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1710 {
1711 S390CPU *cpu = S390_CPU(cs);
1712 int ret = 0;
1713
1714 switch (run->exit_reason) {
1715 case KVM_EXIT_S390_SIEIC:
1716 ret = handle_intercept(cpu);
1717 break;
1718 case KVM_EXIT_S390_RESET:
1719 s390_reipl_request();
1720 break;
1721 case KVM_EXIT_S390_TSCH:
1722 ret = handle_tsch(cpu);
1723 break;
1724 case KVM_EXIT_DEBUG:
1725 ret = kvm_arch_handle_debug_exit(cpu);
1726 break;
1727 default:
1728 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1729 break;
1730 }
1731
1732 if (ret == 0) {
1733 ret = EXCP_INTERRUPT;
1734 }
1735 return ret;
1736 }
1737
1738 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1739 {
1740 return true;
1741 }
1742
1743 int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
1744 {
1745 return 1;
1746 }
1747
1748 int kvm_arch_on_sigbus(int code, void *addr)
1749 {
1750 return 1;
1751 }
1752
1753 void kvm_s390_io_interrupt(uint16_t subchannel_id,
1754 uint16_t subchannel_nr, uint32_t io_int_parm,
1755 uint32_t io_int_word)
1756 {
1757 struct kvm_s390_irq irq = {
1758 .u.io.subchannel_id = subchannel_id,
1759 .u.io.subchannel_nr = subchannel_nr,
1760 .u.io.io_int_parm = io_int_parm,
1761 .u.io.io_int_word = io_int_word,
1762 };
1763
1764 if (io_int_word & IO_INT_WORD_AI) {
1765 irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
1766 } else {
1767 irq.type = ((subchannel_id & 0xff00) << 24) |
1768 ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
1769 }
1770 kvm_s390_floating_interrupt(&irq);
1771 }
1772
1773 void kvm_s390_crw_mchk(void)
1774 {
1775 struct kvm_s390_irq irq = {
1776 .type = KVM_S390_MCHK,
1777 .u.mchk.cr14 = 1 << 28,
1778 .u.mchk.mcic = 0x00400f1d40330000ULL,
1779 };
1780 kvm_s390_floating_interrupt(&irq);
1781 }
1782
1783 void kvm_s390_enable_css_support(S390CPU *cpu)
1784 {
1785 int r;
1786
1787 /* Activate host kernel channel subsystem support. */
1788 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
1789 assert(r == 0);
1790 }
1791
1792 void kvm_arch_init_irq_routing(KVMState *s)
1793 {
1794 /*
1795 * Note that while irqchip capabilities generally imply that cpustates
1796 * are handled in-kernel, it is not true for s390 (yet); therefore, we
1797 * have to override the common code kvm_halt_in_kernel_allowed setting.
1798 */
1799 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
1800 kvm_gsi_routing_allowed = true;
1801 kvm_halt_in_kernel_allowed = false;
1802 }
1803 }
1804
1805 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1806 int vq, bool assign)
1807 {
1808 struct kvm_ioeventfd kick = {
1809 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
1810 KVM_IOEVENTFD_FLAG_DATAMATCH,
1811 .fd = event_notifier_get_fd(notifier),
1812 .datamatch = vq,
1813 .addr = sch,
1814 .len = 8,
1815 };
1816 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
1817 return -ENOSYS;
1818 }
1819 if (!assign) {
1820 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1821 }
1822 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
1823 }
1824
1825 int kvm_s390_get_memslot_count(KVMState *s)
1826 {
1827 return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
1828 }
1829
1830 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
1831 {
1832 struct kvm_mp_state mp_state = {};
1833 int ret;
1834
1835 /* the kvm part might not have been initialized yet */
1836 if (CPU(cpu)->kvm_state == NULL) {
1837 return 0;
1838 }
1839
1840 switch (cpu_state) {
1841 case CPU_STATE_STOPPED:
1842 mp_state.mp_state = KVM_MP_STATE_STOPPED;
1843 break;
1844 case CPU_STATE_CHECK_STOP:
1845 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
1846 break;
1847 case CPU_STATE_OPERATING:
1848 mp_state.mp_state = KVM_MP_STATE_OPERATING;
1849 break;
1850 case CPU_STATE_LOAD:
1851 mp_state.mp_state = KVM_MP_STATE_LOAD;
1852 break;
1853 default:
1854 error_report("Requested CPU state is not a valid S390 CPU state: %u",
1855 cpu_state);
1856 exit(1);
1857 }
1858
1859 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
1860 if (ret) {
1861 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
1862 strerror(-ret));
1863 }
1864
1865 return ret;
1866 }
1867
1868 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
1869 uint64_t address, uint32_t data)
1870 {
1871 S390PCIBusDevice *pbdev;
1872 uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
1873 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
1874
1875 pbdev = s390_pci_find_dev_by_fid(fid);
1876 if (!pbdev) {
1877 DPRINTF("add_msi_route no dev\n");
1878 return -ENODEV;
1879 }
1880
1881 pbdev->routes.adapter.ind_offset = vec;
1882
1883 route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
1884 route->flags = 0;
1885 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
1886 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
1887 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
1888 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
1889 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
1890 return 0;
1891 }