]> git.proxmox.com Git - mirror_qemu.git/blob - target-s390x/kvm.c
kvm: add support for memory transaction attributes
[mirror_qemu.git] / target-s390x / kvm.c
1 /*
2 * QEMU S390x KVM implementation
3 *
4 * Copyright (c) 2009 Alexander Graf <agraf@suse.de>
5 * Copyright IBM Corp. 2012
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * Contributions after 2012-10-29 are licensed under the terms of the
18 * GNU GPL, version 2 or (at your option) any later version.
19 *
20 * You should have received a copy of the GNU (Lesser) General Public
21 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
22 */
23
24 #include <sys/types.h>
25 #include <sys/ioctl.h>
26 #include <sys/mman.h>
27
28 #include <linux/kvm.h>
29 #include <asm/ptrace.h>
30
31 #include "qemu-common.h"
32 #include "qemu/timer.h"
33 #include "sysemu/sysemu.h"
34 #include "sysemu/kvm.h"
35 #include "hw/hw.h"
36 #include "cpu.h"
37 #include "sysemu/device_tree.h"
38 #include "qapi/qmp/qjson.h"
39 #include "monitor/monitor.h"
40 #include "exec/gdbstub.h"
41 #include "exec/address-spaces.h"
42 #include "trace.h"
43 #include "qapi-event.h"
44 #include "hw/s390x/s390-pci-inst.h"
45 #include "hw/s390x/s390-pci-bus.h"
46 #include "hw/s390x/ipl.h"
47 #include "hw/s390x/ebcdic.h"
48 #include "exec/memattrs.h"
49
50 /* #define DEBUG_KVM */
51
52 #ifdef DEBUG_KVM
53 #define DPRINTF(fmt, ...) \
54 do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0)
55 #else
56 #define DPRINTF(fmt, ...) \
57 do { } while (0)
58 #endif
59
60 #define kvm_vm_check_mem_attr(s, attr) \
61 kvm_vm_check_attr(s, KVM_S390_VM_MEM_CTRL, attr)
62
63 #define IPA0_DIAG 0x8300
64 #define IPA0_SIGP 0xae00
65 #define IPA0_B2 0xb200
66 #define IPA0_B9 0xb900
67 #define IPA0_EB 0xeb00
68 #define IPA0_E3 0xe300
69
70 #define PRIV_B2_SCLP_CALL 0x20
71 #define PRIV_B2_CSCH 0x30
72 #define PRIV_B2_HSCH 0x31
73 #define PRIV_B2_MSCH 0x32
74 #define PRIV_B2_SSCH 0x33
75 #define PRIV_B2_STSCH 0x34
76 #define PRIV_B2_TSCH 0x35
77 #define PRIV_B2_TPI 0x36
78 #define PRIV_B2_SAL 0x37
79 #define PRIV_B2_RSCH 0x38
80 #define PRIV_B2_STCRW 0x39
81 #define PRIV_B2_STCPS 0x3a
82 #define PRIV_B2_RCHP 0x3b
83 #define PRIV_B2_SCHM 0x3c
84 #define PRIV_B2_CHSC 0x5f
85 #define PRIV_B2_SIGA 0x74
86 #define PRIV_B2_XSCH 0x76
87
88 #define PRIV_EB_SQBS 0x8a
89 #define PRIV_EB_PCISTB 0xd0
90 #define PRIV_EB_SIC 0xd1
91
92 #define PRIV_B9_EQBS 0x9c
93 #define PRIV_B9_CLP 0xa0
94 #define PRIV_B9_PCISTG 0xd0
95 #define PRIV_B9_PCILG 0xd2
96 #define PRIV_B9_RPCIT 0xd3
97
98 #define PRIV_E3_MPCIFC 0xd0
99 #define PRIV_E3_STPCIFC 0xd4
100
101 #define DIAG_IPL 0x308
102 #define DIAG_KVM_HYPERCALL 0x500
103 #define DIAG_KVM_BREAKPOINT 0x501
104
105 #define ICPT_INSTRUCTION 0x04
106 #define ICPT_PROGRAM 0x08
107 #define ICPT_EXT_INT 0x14
108 #define ICPT_WAITPSW 0x1c
109 #define ICPT_SOFT_INTERCEPT 0x24
110 #define ICPT_CPU_STOP 0x28
111 #define ICPT_IO 0x40
112
113 static CPUWatchpoint hw_watchpoint;
114 /*
115 * We don't use a list because this structure is also used to transmit the
116 * hardware breakpoints to the kernel.
117 */
118 static struct kvm_hw_breakpoint *hw_breakpoints;
119 static int nb_hw_breakpoints;
120
121 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
122 KVM_CAP_LAST_INFO
123 };
124
125 static int cap_sync_regs;
126 static int cap_async_pf;
127 static int cap_mem_op;
128
129 static void *legacy_s390_alloc(size_t size, uint64_t *align);
130
131 static int kvm_s390_query_mem_limit(KVMState *s, uint64_t *memory_limit)
132 {
133 struct kvm_device_attr attr = {
134 .group = KVM_S390_VM_MEM_CTRL,
135 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
136 .addr = (uint64_t) memory_limit,
137 };
138
139 return kvm_vm_ioctl(s, KVM_GET_DEVICE_ATTR, &attr);
140 }
141
142 int kvm_s390_set_mem_limit(KVMState *s, uint64_t new_limit, uint64_t *hw_limit)
143 {
144 int rc;
145
146 struct kvm_device_attr attr = {
147 .group = KVM_S390_VM_MEM_CTRL,
148 .attr = KVM_S390_VM_MEM_LIMIT_SIZE,
149 .addr = (uint64_t) &new_limit,
150 };
151
152 if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_LIMIT_SIZE)) {
153 return 0;
154 }
155
156 rc = kvm_s390_query_mem_limit(s, hw_limit);
157 if (rc) {
158 return rc;
159 } else if (*hw_limit < new_limit) {
160 return -E2BIG;
161 }
162
163 return kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
164 }
165
166 void kvm_s390_clear_cmma_callback(void *opaque)
167 {
168 int rc;
169 KVMState *s = opaque;
170 struct kvm_device_attr attr = {
171 .group = KVM_S390_VM_MEM_CTRL,
172 .attr = KVM_S390_VM_MEM_CLR_CMMA,
173 };
174
175 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
176 trace_kvm_clear_cmma(rc);
177 }
178
179 static void kvm_s390_enable_cmma(KVMState *s)
180 {
181 int rc;
182 struct kvm_device_attr attr = {
183 .group = KVM_S390_VM_MEM_CTRL,
184 .attr = KVM_S390_VM_MEM_ENABLE_CMMA,
185 };
186
187 if (!kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_ENABLE_CMMA) ||
188 !kvm_vm_check_mem_attr(s, KVM_S390_VM_MEM_CLR_CMMA)) {
189 return;
190 }
191
192 rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr);
193 if (!rc) {
194 qemu_register_reset(kvm_s390_clear_cmma_callback, s);
195 }
196 trace_kvm_enable_cmma(rc);
197 }
198
199 static void kvm_s390_set_attr(uint64_t attr)
200 {
201 struct kvm_device_attr attribute = {
202 .group = KVM_S390_VM_CRYPTO,
203 .attr = attr,
204 };
205
206 int ret = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attribute);
207
208 if (ret) {
209 error_report("Failed to set crypto device attribute %lu: %s",
210 attr, strerror(-ret));
211 }
212 }
213
214 static void kvm_s390_init_aes_kw(void)
215 {
216 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_AES_KW;
217
218 if (object_property_get_bool(OBJECT(qdev_get_machine()), "aes-key-wrap",
219 NULL)) {
220 attr = KVM_S390_VM_CRYPTO_ENABLE_AES_KW;
221 }
222
223 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
224 kvm_s390_set_attr(attr);
225 }
226 }
227
228 static void kvm_s390_init_dea_kw(void)
229 {
230 uint64_t attr = KVM_S390_VM_CRYPTO_DISABLE_DEA_KW;
231
232 if (object_property_get_bool(OBJECT(qdev_get_machine()), "dea-key-wrap",
233 NULL)) {
234 attr = KVM_S390_VM_CRYPTO_ENABLE_DEA_KW;
235 }
236
237 if (kvm_vm_check_attr(kvm_state, KVM_S390_VM_CRYPTO, attr)) {
238 kvm_s390_set_attr(attr);
239 }
240 }
241
242 static void kvm_s390_init_crypto(void)
243 {
244 kvm_s390_init_aes_kw();
245 kvm_s390_init_dea_kw();
246 }
247
248 int kvm_arch_init(MachineState *ms, KVMState *s)
249 {
250 cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS);
251 cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF);
252 cap_mem_op = kvm_check_extension(s, KVM_CAP_S390_MEM_OP);
253
254 kvm_s390_enable_cmma(s);
255
256 if (!kvm_check_extension(s, KVM_CAP_S390_GMAP)
257 || !kvm_check_extension(s, KVM_CAP_S390_COW)) {
258 phys_mem_set_alloc(legacy_s390_alloc);
259 }
260
261 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_SIGP, 0);
262 kvm_vm_enable_cap(s, KVM_CAP_S390_USER_STSI, 0);
263
264 return 0;
265 }
266
267 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
268 {
269 return cpu->cpu_index;
270 }
271
272 int kvm_arch_init_vcpu(CPUState *cs)
273 {
274 S390CPU *cpu = S390_CPU(cs);
275 kvm_s390_set_cpu_state(cpu, cpu->env.cpu_state);
276 return 0;
277 }
278
279 void kvm_s390_reset_vcpu(S390CPU *cpu)
280 {
281 CPUState *cs = CPU(cpu);
282
283 /* The initial reset call is needed here to reset in-kernel
284 * vcpu data that we can't access directly from QEMU
285 * (i.e. with older kernels which don't support sync_regs/ONE_REG).
286 * Before this ioctl cpu_synchronize_state() is called in common kvm
287 * code (kvm-all) */
288 if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) {
289 error_report("Initial CPU reset failed on CPU %i", cs->cpu_index);
290 }
291
292 kvm_s390_init_crypto();
293 }
294
295 static int can_sync_regs(CPUState *cs, int regs)
296 {
297 return cap_sync_regs && (cs->kvm_run->kvm_valid_regs & regs) == regs;
298 }
299
300 int kvm_arch_put_registers(CPUState *cs, int level)
301 {
302 S390CPU *cpu = S390_CPU(cs);
303 CPUS390XState *env = &cpu->env;
304 struct kvm_sregs sregs;
305 struct kvm_regs regs;
306 struct kvm_fpu fpu = {};
307 int r;
308 int i;
309
310 /* always save the PSW and the GPRS*/
311 cs->kvm_run->psw_addr = env->psw.addr;
312 cs->kvm_run->psw_mask = env->psw.mask;
313
314 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
315 for (i = 0; i < 16; i++) {
316 cs->kvm_run->s.regs.gprs[i] = env->regs[i];
317 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS;
318 }
319 } else {
320 for (i = 0; i < 16; i++) {
321 regs.gprs[i] = env->regs[i];
322 }
323 r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
324 if (r < 0) {
325 return r;
326 }
327 }
328
329 /* Floating point */
330 for (i = 0; i < 16; i++) {
331 fpu.fprs[i] = env->fregs[i].ll;
332 }
333 fpu.fpc = env->fpc;
334
335 r = kvm_vcpu_ioctl(cs, KVM_SET_FPU, &fpu);
336 if (r < 0) {
337 return r;
338 }
339
340 /* Do we need to save more than that? */
341 if (level == KVM_PUT_RUNTIME_STATE) {
342 return 0;
343 }
344
345 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
346 cs->kvm_run->s.regs.cputm = env->cputm;
347 cs->kvm_run->s.regs.ckc = env->ckc;
348 cs->kvm_run->s.regs.todpr = env->todpr;
349 cs->kvm_run->s.regs.gbea = env->gbea;
350 cs->kvm_run->s.regs.pp = env->pp;
351 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ARCH0;
352 } else {
353 /*
354 * These ONE_REGS are not protected by a capability. As they are only
355 * necessary for migration we just trace a possible error, but don't
356 * return with an error return code.
357 */
358 kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
359 kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
360 kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
361 kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
362 kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp);
363 }
364
365 /* pfault parameters */
366 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
367 cs->kvm_run->s.regs.pft = env->pfault_token;
368 cs->kvm_run->s.regs.pfs = env->pfault_select;
369 cs->kvm_run->s.regs.pfc = env->pfault_compare;
370 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PFAULT;
371 } else if (cap_async_pf) {
372 r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
373 if (r < 0) {
374 return r;
375 }
376 r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
377 if (r < 0) {
378 return r;
379 }
380 r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
381 if (r < 0) {
382 return r;
383 }
384 }
385
386 /* access registers and control registers*/
387 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
388 for (i = 0; i < 16; i++) {
389 cs->kvm_run->s.regs.acrs[i] = env->aregs[i];
390 cs->kvm_run->s.regs.crs[i] = env->cregs[i];
391 }
392 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS;
393 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS;
394 } else {
395 for (i = 0; i < 16; i++) {
396 sregs.acrs[i] = env->aregs[i];
397 sregs.crs[i] = env->cregs[i];
398 }
399 r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
400 if (r < 0) {
401 return r;
402 }
403 }
404
405 /* Finally the prefix */
406 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
407 cs->kvm_run->s.regs.prefix = env->psa;
408 cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX;
409 } else {
410 /* prefix is only supported via sync regs */
411 }
412 return 0;
413 }
414
415 int kvm_arch_get_registers(CPUState *cs)
416 {
417 S390CPU *cpu = S390_CPU(cs);
418 CPUS390XState *env = &cpu->env;
419 struct kvm_sregs sregs;
420 struct kvm_regs regs;
421 struct kvm_fpu fpu;
422 int i, r;
423
424 /* get the PSW */
425 env->psw.addr = cs->kvm_run->psw_addr;
426 env->psw.mask = cs->kvm_run->psw_mask;
427
428 /* the GPRS */
429 if (can_sync_regs(cs, KVM_SYNC_GPRS)) {
430 for (i = 0; i < 16; i++) {
431 env->regs[i] = cs->kvm_run->s.regs.gprs[i];
432 }
433 } else {
434 r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
435 if (r < 0) {
436 return r;
437 }
438 for (i = 0; i < 16; i++) {
439 env->regs[i] = regs.gprs[i];
440 }
441 }
442
443 /* The ACRS and CRS */
444 if (can_sync_regs(cs, KVM_SYNC_ACRS | KVM_SYNC_CRS)) {
445 for (i = 0; i < 16; i++) {
446 env->aregs[i] = cs->kvm_run->s.regs.acrs[i];
447 env->cregs[i] = cs->kvm_run->s.regs.crs[i];
448 }
449 } else {
450 r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
451 if (r < 0) {
452 return r;
453 }
454 for (i = 0; i < 16; i++) {
455 env->aregs[i] = sregs.acrs[i];
456 env->cregs[i] = sregs.crs[i];
457 }
458 }
459
460 /* Floating point */
461 r = kvm_vcpu_ioctl(cs, KVM_GET_FPU, &fpu);
462 if (r < 0) {
463 return r;
464 }
465 for (i = 0; i < 16; i++) {
466 env->fregs[i].ll = fpu.fprs[i];
467 }
468 env->fpc = fpu.fpc;
469
470 /* The prefix */
471 if (can_sync_regs(cs, KVM_SYNC_PREFIX)) {
472 env->psa = cs->kvm_run->s.regs.prefix;
473 }
474
475 if (can_sync_regs(cs, KVM_SYNC_ARCH0)) {
476 env->cputm = cs->kvm_run->s.regs.cputm;
477 env->ckc = cs->kvm_run->s.regs.ckc;
478 env->todpr = cs->kvm_run->s.regs.todpr;
479 env->gbea = cs->kvm_run->s.regs.gbea;
480 env->pp = cs->kvm_run->s.regs.pp;
481 } else {
482 /*
483 * These ONE_REGS are not protected by a capability. As they are only
484 * necessary for migration we just trace a possible error, but don't
485 * return with an error return code.
486 */
487 kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm);
488 kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc);
489 kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr);
490 kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea);
491 kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp);
492 }
493
494 /* pfault parameters */
495 if (can_sync_regs(cs, KVM_SYNC_PFAULT)) {
496 env->pfault_token = cs->kvm_run->s.regs.pft;
497 env->pfault_select = cs->kvm_run->s.regs.pfs;
498 env->pfault_compare = cs->kvm_run->s.regs.pfc;
499 } else if (cap_async_pf) {
500 r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token);
501 if (r < 0) {
502 return r;
503 }
504 r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare);
505 if (r < 0) {
506 return r;
507 }
508 r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select);
509 if (r < 0) {
510 return r;
511 }
512 }
513
514 return 0;
515 }
516
517 int kvm_s390_get_clock(uint8_t *tod_high, uint64_t *tod_low)
518 {
519 int r;
520 struct kvm_device_attr attr = {
521 .group = KVM_S390_VM_TOD,
522 .attr = KVM_S390_VM_TOD_LOW,
523 .addr = (uint64_t)tod_low,
524 };
525
526 r = kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
527 if (r) {
528 return r;
529 }
530
531 attr.attr = KVM_S390_VM_TOD_HIGH;
532 attr.addr = (uint64_t)tod_high;
533 return kvm_vm_ioctl(kvm_state, KVM_GET_DEVICE_ATTR, &attr);
534 }
535
536 int kvm_s390_set_clock(uint8_t *tod_high, uint64_t *tod_low)
537 {
538 int r;
539
540 struct kvm_device_attr attr = {
541 .group = KVM_S390_VM_TOD,
542 .attr = KVM_S390_VM_TOD_LOW,
543 .addr = (uint64_t)tod_low,
544 };
545
546 r = kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
547 if (r) {
548 return r;
549 }
550
551 attr.attr = KVM_S390_VM_TOD_HIGH;
552 attr.addr = (uint64_t)tod_high;
553 return kvm_vm_ioctl(kvm_state, KVM_SET_DEVICE_ATTR, &attr);
554 }
555
556 /**
557 * kvm_s390_mem_op:
558 * @addr: the logical start address in guest memory
559 * @ar: the access register number
560 * @hostbuf: buffer in host memory. NULL = do only checks w/o copying
561 * @len: length that should be transfered
562 * @is_write: true = write, false = read
563 * Returns: 0 on success, non-zero if an exception or error occured
564 *
565 * Use KVM ioctl to read/write from/to guest memory. An access exception
566 * is injected into the vCPU in case of translation errors.
567 */
568 int kvm_s390_mem_op(S390CPU *cpu, vaddr addr, uint8_t ar, void *hostbuf,
569 int len, bool is_write)
570 {
571 struct kvm_s390_mem_op mem_op = {
572 .gaddr = addr,
573 .flags = KVM_S390_MEMOP_F_INJECT_EXCEPTION,
574 .size = len,
575 .op = is_write ? KVM_S390_MEMOP_LOGICAL_WRITE
576 : KVM_S390_MEMOP_LOGICAL_READ,
577 .buf = (uint64_t)hostbuf,
578 .ar = ar,
579 };
580 int ret;
581
582 if (!cap_mem_op) {
583 return -ENOSYS;
584 }
585 if (!hostbuf) {
586 mem_op.flags |= KVM_S390_MEMOP_F_CHECK_ONLY;
587 }
588
589 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_S390_MEM_OP, &mem_op);
590 if (ret < 0) {
591 error_printf("KVM_S390_MEM_OP failed: %s\n", strerror(-ret));
592 }
593 return ret;
594 }
595
596 /*
597 * Legacy layout for s390:
598 * Older S390 KVM requires the topmost vma of the RAM to be
599 * smaller than an system defined value, which is at least 256GB.
600 * Larger systems have larger values. We put the guest between
601 * the end of data segment (system break) and this value. We
602 * use 32GB as a base to have enough room for the system break
603 * to grow. We also have to use MAP parameters that avoid
604 * read-only mapping of guest pages.
605 */
606 static void *legacy_s390_alloc(size_t size, uint64_t *align)
607 {
608 void *mem;
609
610 mem = mmap((void *) 0x800000000ULL, size,
611 PROT_EXEC|PROT_READ|PROT_WRITE,
612 MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0);
613 return mem == MAP_FAILED ? NULL : mem;
614 }
615
616 /* DIAG 501 is used for sw breakpoints */
617 static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01};
618
619 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
620 {
621
622 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
623 sizeof(diag_501), 0) ||
624 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501,
625 sizeof(diag_501), 1)) {
626 return -EINVAL;
627 }
628 return 0;
629 }
630
631 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
632 {
633 uint8_t t[sizeof(diag_501)];
634
635 if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) {
636 return -EINVAL;
637 } else if (memcmp(t, diag_501, sizeof(diag_501))) {
638 return -EINVAL;
639 } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
640 sizeof(diag_501), 1)) {
641 return -EINVAL;
642 }
643
644 return 0;
645 }
646
647 static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr,
648 int len, int type)
649 {
650 int n;
651
652 for (n = 0; n < nb_hw_breakpoints; n++) {
653 if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type &&
654 (hw_breakpoints[n].len == len || len == -1)) {
655 return &hw_breakpoints[n];
656 }
657 }
658
659 return NULL;
660 }
661
662 static int insert_hw_breakpoint(target_ulong addr, int len, int type)
663 {
664 int size;
665
666 if (find_hw_breakpoint(addr, len, type)) {
667 return -EEXIST;
668 }
669
670 size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint);
671
672 if (!hw_breakpoints) {
673 nb_hw_breakpoints = 0;
674 hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size);
675 } else {
676 hw_breakpoints =
677 (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size);
678 }
679
680 if (!hw_breakpoints) {
681 nb_hw_breakpoints = 0;
682 return -ENOMEM;
683 }
684
685 hw_breakpoints[nb_hw_breakpoints].addr = addr;
686 hw_breakpoints[nb_hw_breakpoints].len = len;
687 hw_breakpoints[nb_hw_breakpoints].type = type;
688
689 nb_hw_breakpoints++;
690
691 return 0;
692 }
693
694 int kvm_arch_insert_hw_breakpoint(target_ulong addr,
695 target_ulong len, int type)
696 {
697 switch (type) {
698 case GDB_BREAKPOINT_HW:
699 type = KVM_HW_BP;
700 break;
701 case GDB_WATCHPOINT_WRITE:
702 if (len < 1) {
703 return -EINVAL;
704 }
705 type = KVM_HW_WP_WRITE;
706 break;
707 default:
708 return -ENOSYS;
709 }
710 return insert_hw_breakpoint(addr, len, type);
711 }
712
713 int kvm_arch_remove_hw_breakpoint(target_ulong addr,
714 target_ulong len, int type)
715 {
716 int size;
717 struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type);
718
719 if (bp == NULL) {
720 return -ENOENT;
721 }
722
723 nb_hw_breakpoints--;
724 if (nb_hw_breakpoints > 0) {
725 /*
726 * In order to trim the array, move the last element to the position to
727 * be removed - if necessary.
728 */
729 if (bp != &hw_breakpoints[nb_hw_breakpoints]) {
730 *bp = hw_breakpoints[nb_hw_breakpoints];
731 }
732 size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint);
733 hw_breakpoints =
734 (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size);
735 } else {
736 g_free(hw_breakpoints);
737 hw_breakpoints = NULL;
738 }
739
740 return 0;
741 }
742
743 void kvm_arch_remove_all_hw_breakpoints(void)
744 {
745 nb_hw_breakpoints = 0;
746 g_free(hw_breakpoints);
747 hw_breakpoints = NULL;
748 }
749
750 void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg)
751 {
752 int i;
753
754 if (nb_hw_breakpoints > 0) {
755 dbg->arch.nr_hw_bp = nb_hw_breakpoints;
756 dbg->arch.hw_bp = hw_breakpoints;
757
758 for (i = 0; i < nb_hw_breakpoints; ++i) {
759 hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu,
760 hw_breakpoints[i].addr);
761 }
762 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
763 } else {
764 dbg->arch.nr_hw_bp = 0;
765 dbg->arch.hw_bp = NULL;
766 }
767 }
768
769 void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run)
770 {
771 }
772
773 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
774 {
775 return MEMTXATTRS_UNSPECIFIED;
776 }
777
778 int kvm_arch_process_async_events(CPUState *cs)
779 {
780 return cs->halted;
781 }
782
783 static int s390_kvm_irq_to_interrupt(struct kvm_s390_irq *irq,
784 struct kvm_s390_interrupt *interrupt)
785 {
786 int r = 0;
787
788 interrupt->type = irq->type;
789 switch (irq->type) {
790 case KVM_S390_INT_VIRTIO:
791 interrupt->parm = irq->u.ext.ext_params;
792 /* fall through */
793 case KVM_S390_INT_PFAULT_INIT:
794 case KVM_S390_INT_PFAULT_DONE:
795 interrupt->parm64 = irq->u.ext.ext_params2;
796 break;
797 case KVM_S390_PROGRAM_INT:
798 interrupt->parm = irq->u.pgm.code;
799 break;
800 case KVM_S390_SIGP_SET_PREFIX:
801 interrupt->parm = irq->u.prefix.address;
802 break;
803 case KVM_S390_INT_SERVICE:
804 interrupt->parm = irq->u.ext.ext_params;
805 break;
806 case KVM_S390_MCHK:
807 interrupt->parm = irq->u.mchk.cr14;
808 interrupt->parm64 = irq->u.mchk.mcic;
809 break;
810 case KVM_S390_INT_EXTERNAL_CALL:
811 interrupt->parm = irq->u.extcall.code;
812 break;
813 case KVM_S390_INT_EMERGENCY:
814 interrupt->parm = irq->u.emerg.code;
815 break;
816 case KVM_S390_SIGP_STOP:
817 case KVM_S390_RESTART:
818 break; /* These types have no parameters */
819 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
820 interrupt->parm = irq->u.io.subchannel_id << 16;
821 interrupt->parm |= irq->u.io.subchannel_nr;
822 interrupt->parm64 = (uint64_t)irq->u.io.io_int_parm << 32;
823 interrupt->parm64 |= irq->u.io.io_int_word;
824 break;
825 default:
826 r = -EINVAL;
827 break;
828 }
829 return r;
830 }
831
832 void kvm_s390_vcpu_interrupt(S390CPU *cpu, struct kvm_s390_irq *irq)
833 {
834 struct kvm_s390_interrupt kvmint = {};
835 CPUState *cs = CPU(cpu);
836 int r;
837
838 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
839 if (r < 0) {
840 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
841 exit(1);
842 }
843
844 r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint);
845 if (r < 0) {
846 fprintf(stderr, "KVM failed to inject interrupt\n");
847 exit(1);
848 }
849 }
850
851 static void __kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
852 {
853 struct kvm_s390_interrupt kvmint = {};
854 int r;
855
856 r = s390_kvm_irq_to_interrupt(irq, &kvmint);
857 if (r < 0) {
858 fprintf(stderr, "%s called with bogus interrupt\n", __func__);
859 exit(1);
860 }
861
862 r = kvm_vm_ioctl(kvm_state, KVM_S390_INTERRUPT, &kvmint);
863 if (r < 0) {
864 fprintf(stderr, "KVM failed to inject interrupt\n");
865 exit(1);
866 }
867 }
868
869 void kvm_s390_floating_interrupt(struct kvm_s390_irq *irq)
870 {
871 static bool use_flic = true;
872 int r;
873
874 if (use_flic) {
875 r = kvm_s390_inject_flic(irq);
876 if (r == -ENOSYS) {
877 use_flic = false;
878 }
879 if (!r) {
880 return;
881 }
882 }
883 __kvm_s390_floating_interrupt(irq);
884 }
885
886 void kvm_s390_virtio_irq(int config_change, uint64_t token)
887 {
888 struct kvm_s390_irq irq = {
889 .type = KVM_S390_INT_VIRTIO,
890 .u.ext.ext_params = config_change,
891 .u.ext.ext_params2 = token,
892 };
893
894 kvm_s390_floating_interrupt(&irq);
895 }
896
897 void kvm_s390_service_interrupt(uint32_t parm)
898 {
899 struct kvm_s390_irq irq = {
900 .type = KVM_S390_INT_SERVICE,
901 .u.ext.ext_params = parm,
902 };
903
904 kvm_s390_floating_interrupt(&irq);
905 }
906
907 static void enter_pgmcheck(S390CPU *cpu, uint16_t code)
908 {
909 struct kvm_s390_irq irq = {
910 .type = KVM_S390_PROGRAM_INT,
911 .u.pgm.code = code,
912 };
913
914 kvm_s390_vcpu_interrupt(cpu, &irq);
915 }
916
917 void kvm_s390_access_exception(S390CPU *cpu, uint16_t code, uint64_t te_code)
918 {
919 struct kvm_s390_irq irq = {
920 .type = KVM_S390_PROGRAM_INT,
921 .u.pgm.code = code,
922 .u.pgm.trans_exc_code = te_code,
923 .u.pgm.exc_access_id = te_code & 3,
924 };
925
926 kvm_s390_vcpu_interrupt(cpu, &irq);
927 }
928
929 static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run,
930 uint16_t ipbh0)
931 {
932 CPUS390XState *env = &cpu->env;
933 uint64_t sccb;
934 uint32_t code;
935 int r = 0;
936
937 cpu_synchronize_state(CPU(cpu));
938 sccb = env->regs[ipbh0 & 0xf];
939 code = env->regs[(ipbh0 & 0xf0) >> 4];
940
941 r = sclp_service_call(env, sccb, code);
942 if (r < 0) {
943 enter_pgmcheck(cpu, -r);
944 } else {
945 setcc(cpu, r);
946 }
947
948 return 0;
949 }
950
951 static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
952 {
953 CPUS390XState *env = &cpu->env;
954 int rc = 0;
955 uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16;
956
957 cpu_synchronize_state(CPU(cpu));
958
959 switch (ipa1) {
960 case PRIV_B2_XSCH:
961 ioinst_handle_xsch(cpu, env->regs[1]);
962 break;
963 case PRIV_B2_CSCH:
964 ioinst_handle_csch(cpu, env->regs[1]);
965 break;
966 case PRIV_B2_HSCH:
967 ioinst_handle_hsch(cpu, env->regs[1]);
968 break;
969 case PRIV_B2_MSCH:
970 ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb);
971 break;
972 case PRIV_B2_SSCH:
973 ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb);
974 break;
975 case PRIV_B2_STCRW:
976 ioinst_handle_stcrw(cpu, run->s390_sieic.ipb);
977 break;
978 case PRIV_B2_STSCH:
979 ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb);
980 break;
981 case PRIV_B2_TSCH:
982 /* We should only get tsch via KVM_EXIT_S390_TSCH. */
983 fprintf(stderr, "Spurious tsch intercept\n");
984 break;
985 case PRIV_B2_CHSC:
986 ioinst_handle_chsc(cpu, run->s390_sieic.ipb);
987 break;
988 case PRIV_B2_TPI:
989 /* This should have been handled by kvm already. */
990 fprintf(stderr, "Spurious tpi intercept\n");
991 break;
992 case PRIV_B2_SCHM:
993 ioinst_handle_schm(cpu, env->regs[1], env->regs[2],
994 run->s390_sieic.ipb);
995 break;
996 case PRIV_B2_RSCH:
997 ioinst_handle_rsch(cpu, env->regs[1]);
998 break;
999 case PRIV_B2_RCHP:
1000 ioinst_handle_rchp(cpu, env->regs[1]);
1001 break;
1002 case PRIV_B2_STCPS:
1003 /* We do not provide this instruction, it is suppressed. */
1004 break;
1005 case PRIV_B2_SAL:
1006 ioinst_handle_sal(cpu, env->regs[1]);
1007 break;
1008 case PRIV_B2_SIGA:
1009 /* Not provided, set CC = 3 for subchannel not operational */
1010 setcc(cpu, 3);
1011 break;
1012 case PRIV_B2_SCLP_CALL:
1013 rc = kvm_sclp_service_call(cpu, run, ipbh0);
1014 break;
1015 default:
1016 rc = -1;
1017 DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1);
1018 break;
1019 }
1020
1021 return rc;
1022 }
1023
1024 static uint64_t get_base_disp_rxy(S390CPU *cpu, struct kvm_run *run,
1025 uint8_t *ar)
1026 {
1027 CPUS390XState *env = &cpu->env;
1028 uint32_t x2 = (run->s390_sieic.ipa & 0x000f);
1029 uint32_t base2 = run->s390_sieic.ipb >> 28;
1030 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1031 ((run->s390_sieic.ipb & 0xff00) << 4);
1032
1033 if (disp2 & 0x80000) {
1034 disp2 += 0xfff00000;
1035 }
1036 if (ar) {
1037 *ar = base2;
1038 }
1039
1040 return (base2 ? env->regs[base2] : 0) +
1041 (x2 ? env->regs[x2] : 0) + (long)(int)disp2;
1042 }
1043
1044 static uint64_t get_base_disp_rsy(S390CPU *cpu, struct kvm_run *run,
1045 uint8_t *ar)
1046 {
1047 CPUS390XState *env = &cpu->env;
1048 uint32_t base2 = run->s390_sieic.ipb >> 28;
1049 uint32_t disp2 = ((run->s390_sieic.ipb & 0x0fff0000) >> 16) +
1050 ((run->s390_sieic.ipb & 0xff00) << 4);
1051
1052 if (disp2 & 0x80000) {
1053 disp2 += 0xfff00000;
1054 }
1055 if (ar) {
1056 *ar = base2;
1057 }
1058
1059 return (base2 ? env->regs[base2] : 0) + (long)(int)disp2;
1060 }
1061
1062 static int kvm_clp_service_call(S390CPU *cpu, struct kvm_run *run)
1063 {
1064 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1065
1066 return clp_service_call(cpu, r2);
1067 }
1068
1069 static int kvm_pcilg_service_call(S390CPU *cpu, struct kvm_run *run)
1070 {
1071 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1072 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1073
1074 return pcilg_service_call(cpu, r1, r2);
1075 }
1076
1077 static int kvm_pcistg_service_call(S390CPU *cpu, struct kvm_run *run)
1078 {
1079 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1080 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1081
1082 return pcistg_service_call(cpu, r1, r2);
1083 }
1084
1085 static int kvm_stpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1086 {
1087 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1088 uint64_t fiba;
1089 uint8_t ar;
1090
1091 cpu_synchronize_state(CPU(cpu));
1092 fiba = get_base_disp_rxy(cpu, run, &ar);
1093
1094 return stpcifc_service_call(cpu, r1, fiba, ar);
1095 }
1096
1097 static int kvm_sic_service_call(S390CPU *cpu, struct kvm_run *run)
1098 {
1099 /* NOOP */
1100 return 0;
1101 }
1102
1103 static int kvm_rpcit_service_call(S390CPU *cpu, struct kvm_run *run)
1104 {
1105 uint8_t r1 = (run->s390_sieic.ipb & 0x00f00000) >> 20;
1106 uint8_t r2 = (run->s390_sieic.ipb & 0x000f0000) >> 16;
1107
1108 return rpcit_service_call(cpu, r1, r2);
1109 }
1110
1111 static int kvm_pcistb_service_call(S390CPU *cpu, struct kvm_run *run)
1112 {
1113 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1114 uint8_t r3 = run->s390_sieic.ipa & 0x000f;
1115 uint64_t gaddr;
1116 uint8_t ar;
1117
1118 cpu_synchronize_state(CPU(cpu));
1119 gaddr = get_base_disp_rsy(cpu, run, &ar);
1120
1121 return pcistb_service_call(cpu, r1, r3, gaddr, ar);
1122 }
1123
1124 static int kvm_mpcifc_service_call(S390CPU *cpu, struct kvm_run *run)
1125 {
1126 uint8_t r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1127 uint64_t fiba;
1128 uint8_t ar;
1129
1130 cpu_synchronize_state(CPU(cpu));
1131 fiba = get_base_disp_rxy(cpu, run, &ar);
1132
1133 return mpcifc_service_call(cpu, r1, fiba, ar);
1134 }
1135
1136 static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1137 {
1138 int r = 0;
1139
1140 switch (ipa1) {
1141 case PRIV_B9_CLP:
1142 r = kvm_clp_service_call(cpu, run);
1143 break;
1144 case PRIV_B9_PCISTG:
1145 r = kvm_pcistg_service_call(cpu, run);
1146 break;
1147 case PRIV_B9_PCILG:
1148 r = kvm_pcilg_service_call(cpu, run);
1149 break;
1150 case PRIV_B9_RPCIT:
1151 r = kvm_rpcit_service_call(cpu, run);
1152 break;
1153 case PRIV_B9_EQBS:
1154 /* just inject exception */
1155 r = -1;
1156 break;
1157 default:
1158 r = -1;
1159 DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1);
1160 break;
1161 }
1162
1163 return r;
1164 }
1165
1166 static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1167 {
1168 int r = 0;
1169
1170 switch (ipbl) {
1171 case PRIV_EB_PCISTB:
1172 r = kvm_pcistb_service_call(cpu, run);
1173 break;
1174 case PRIV_EB_SIC:
1175 r = kvm_sic_service_call(cpu, run);
1176 break;
1177 case PRIV_EB_SQBS:
1178 /* just inject exception */
1179 r = -1;
1180 break;
1181 default:
1182 r = -1;
1183 DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipbl);
1184 break;
1185 }
1186
1187 return r;
1188 }
1189
1190 static int handle_e3(S390CPU *cpu, struct kvm_run *run, uint8_t ipbl)
1191 {
1192 int r = 0;
1193
1194 switch (ipbl) {
1195 case PRIV_E3_MPCIFC:
1196 r = kvm_mpcifc_service_call(cpu, run);
1197 break;
1198 case PRIV_E3_STPCIFC:
1199 r = kvm_stpcifc_service_call(cpu, run);
1200 break;
1201 default:
1202 r = -1;
1203 DPRINTF("KVM: unhandled PRIV: 0xe3%x\n", ipbl);
1204 break;
1205 }
1206
1207 return r;
1208 }
1209
1210 static int handle_hypercall(S390CPU *cpu, struct kvm_run *run)
1211 {
1212 CPUS390XState *env = &cpu->env;
1213 int ret;
1214
1215 cpu_synchronize_state(CPU(cpu));
1216 ret = s390_virtio_hypercall(env);
1217 if (ret == -EINVAL) {
1218 enter_pgmcheck(cpu, PGM_SPECIFICATION);
1219 return 0;
1220 }
1221
1222 return ret;
1223 }
1224
1225 static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run)
1226 {
1227 uint64_t r1, r3;
1228
1229 cpu_synchronize_state(CPU(cpu));
1230 r1 = (run->s390_sieic.ipa & 0x00f0) >> 4;
1231 r3 = run->s390_sieic.ipa & 0x000f;
1232 handle_diag_308(&cpu->env, r1, r3);
1233 }
1234
1235 static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run)
1236 {
1237 CPUS390XState *env = &cpu->env;
1238 unsigned long pc;
1239
1240 cpu_synchronize_state(CPU(cpu));
1241
1242 pc = env->psw.addr - 4;
1243 if (kvm_find_sw_breakpoint(CPU(cpu), pc)) {
1244 env->psw.addr = pc;
1245 return EXCP_DEBUG;
1246 }
1247
1248 return -ENOENT;
1249 }
1250
1251 #define DIAG_KVM_CODE_MASK 0x000000000000ffff
1252
1253 static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb)
1254 {
1255 int r = 0;
1256 uint16_t func_code;
1257
1258 /*
1259 * For any diagnose call we support, bits 48-63 of the resulting
1260 * address specify the function code; the remainder is ignored.
1261 */
1262 func_code = decode_basedisp_rs(&cpu->env, ipb, NULL) & DIAG_KVM_CODE_MASK;
1263 switch (func_code) {
1264 case DIAG_IPL:
1265 kvm_handle_diag_308(cpu, run);
1266 break;
1267 case DIAG_KVM_HYPERCALL:
1268 r = handle_hypercall(cpu, run);
1269 break;
1270 case DIAG_KVM_BREAKPOINT:
1271 r = handle_sw_breakpoint(cpu, run);
1272 break;
1273 default:
1274 DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code);
1275 enter_pgmcheck(cpu, PGM_SPECIFICATION);
1276 break;
1277 }
1278
1279 return r;
1280 }
1281
1282 typedef struct SigpInfo {
1283 S390CPU *cpu;
1284 uint64_t param;
1285 int cc;
1286 uint64_t *status_reg;
1287 } SigpInfo;
1288
1289 static void set_sigp_status(SigpInfo *si, uint64_t status)
1290 {
1291 *si->status_reg &= 0xffffffff00000000ULL;
1292 *si->status_reg |= status;
1293 si->cc = SIGP_CC_STATUS_STORED;
1294 }
1295
1296 static void sigp_start(void *arg)
1297 {
1298 SigpInfo *si = arg;
1299
1300 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1301 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1302 return;
1303 }
1304
1305 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1306 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1307 }
1308
1309 static void sigp_stop(void *arg)
1310 {
1311 SigpInfo *si = arg;
1312 struct kvm_s390_irq irq = {
1313 .type = KVM_S390_SIGP_STOP,
1314 };
1315
1316 if (s390_cpu_get_state(si->cpu) != CPU_STATE_OPERATING) {
1317 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1318 return;
1319 }
1320
1321 /* disabled wait - sleeping in user space */
1322 if (CPU(si->cpu)->halted) {
1323 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1324 } else {
1325 /* execute the stop function */
1326 si->cpu->env.sigp_order = SIGP_STOP;
1327 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1328 }
1329 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1330 }
1331
1332 #define KVM_S390_STORE_STATUS_DEF_ADDR offsetof(LowCore, floating_pt_save_area)
1333 #define SAVE_AREA_SIZE 512
1334 static int kvm_s390_store_status(S390CPU *cpu, hwaddr addr, bool store_arch)
1335 {
1336 static const uint8_t ar_id = 1;
1337 uint64_t ckc = cpu->env.ckc >> 8;
1338 void *mem;
1339 hwaddr len = SAVE_AREA_SIZE;
1340
1341 mem = cpu_physical_memory_map(addr, &len, 1);
1342 if (!mem) {
1343 return -EFAULT;
1344 }
1345 if (len != SAVE_AREA_SIZE) {
1346 cpu_physical_memory_unmap(mem, len, 1, 0);
1347 return -EFAULT;
1348 }
1349
1350 if (store_arch) {
1351 cpu_physical_memory_write(offsetof(LowCore, ar_access_id), &ar_id, 1);
1352 }
1353 memcpy(mem, &cpu->env.fregs, 128);
1354 memcpy(mem + 128, &cpu->env.regs, 128);
1355 memcpy(mem + 256, &cpu->env.psw, 16);
1356 memcpy(mem + 280, &cpu->env.psa, 4);
1357 memcpy(mem + 284, &cpu->env.fpc, 4);
1358 memcpy(mem + 292, &cpu->env.todpr, 4);
1359 memcpy(mem + 296, &cpu->env.cputm, 8);
1360 memcpy(mem + 304, &ckc, 8);
1361 memcpy(mem + 320, &cpu->env.aregs, 64);
1362 memcpy(mem + 384, &cpu->env.cregs, 128);
1363
1364 cpu_physical_memory_unmap(mem, len, 1, len);
1365
1366 return 0;
1367 }
1368
1369 static void sigp_stop_and_store_status(void *arg)
1370 {
1371 SigpInfo *si = arg;
1372 struct kvm_s390_irq irq = {
1373 .type = KVM_S390_SIGP_STOP,
1374 };
1375
1376 /* disabled wait - sleeping in user space */
1377 if (s390_cpu_get_state(si->cpu) == CPU_STATE_OPERATING &&
1378 CPU(si->cpu)->halted) {
1379 s390_cpu_set_state(CPU_STATE_STOPPED, si->cpu);
1380 }
1381
1382 switch (s390_cpu_get_state(si->cpu)) {
1383 case CPU_STATE_OPERATING:
1384 si->cpu->env.sigp_order = SIGP_STOP_STORE_STATUS;
1385 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1386 /* store will be performed when handling the stop intercept */
1387 break;
1388 case CPU_STATE_STOPPED:
1389 /* already stopped, just store the status */
1390 cpu_synchronize_state(CPU(si->cpu));
1391 kvm_s390_store_status(si->cpu, KVM_S390_STORE_STATUS_DEF_ADDR, true);
1392 break;
1393 }
1394 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1395 }
1396
1397 static void sigp_store_status_at_address(void *arg)
1398 {
1399 SigpInfo *si = arg;
1400 uint32_t address = si->param & 0x7ffffe00u;
1401
1402 /* cpu has to be stopped */
1403 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1404 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1405 return;
1406 }
1407
1408 cpu_synchronize_state(CPU(si->cpu));
1409
1410 if (kvm_s390_store_status(si->cpu, address, false)) {
1411 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1412 return;
1413 }
1414 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1415 }
1416
1417 static void sigp_restart(void *arg)
1418 {
1419 SigpInfo *si = arg;
1420 struct kvm_s390_irq irq = {
1421 .type = KVM_S390_RESTART,
1422 };
1423
1424 switch (s390_cpu_get_state(si->cpu)) {
1425 case CPU_STATE_STOPPED:
1426 /* the restart irq has to be delivered prior to any other pending irq */
1427 cpu_synchronize_state(CPU(si->cpu));
1428 do_restart_interrupt(&si->cpu->env);
1429 s390_cpu_set_state(CPU_STATE_OPERATING, si->cpu);
1430 break;
1431 case CPU_STATE_OPERATING:
1432 kvm_s390_vcpu_interrupt(si->cpu, &irq);
1433 break;
1434 }
1435 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1436 }
1437
1438 int kvm_s390_cpu_restart(S390CPU *cpu)
1439 {
1440 SigpInfo si = {
1441 .cpu = cpu,
1442 };
1443
1444 run_on_cpu(CPU(cpu), sigp_restart, &si);
1445 DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env);
1446 return 0;
1447 }
1448
1449 static void sigp_initial_cpu_reset(void *arg)
1450 {
1451 SigpInfo *si = arg;
1452 CPUState *cs = CPU(si->cpu);
1453 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
1454
1455 cpu_synchronize_state(cs);
1456 scc->initial_cpu_reset(cs);
1457 cpu_synchronize_post_reset(cs);
1458 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1459 }
1460
1461 static void sigp_cpu_reset(void *arg)
1462 {
1463 SigpInfo *si = arg;
1464 CPUState *cs = CPU(si->cpu);
1465 S390CPUClass *scc = S390_CPU_GET_CLASS(si->cpu);
1466
1467 cpu_synchronize_state(cs);
1468 scc->cpu_reset(cs);
1469 cpu_synchronize_post_reset(cs);
1470 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1471 }
1472
1473 static void sigp_set_prefix(void *arg)
1474 {
1475 SigpInfo *si = arg;
1476 uint32_t addr = si->param & 0x7fffe000u;
1477
1478 cpu_synchronize_state(CPU(si->cpu));
1479
1480 if (!address_space_access_valid(&address_space_memory, addr,
1481 sizeof(struct LowCore), false)) {
1482 set_sigp_status(si, SIGP_STAT_INVALID_PARAMETER);
1483 return;
1484 }
1485
1486 /* cpu has to be stopped */
1487 if (s390_cpu_get_state(si->cpu) != CPU_STATE_STOPPED) {
1488 set_sigp_status(si, SIGP_STAT_INCORRECT_STATE);
1489 return;
1490 }
1491
1492 si->cpu->env.psa = addr;
1493 cpu_synchronize_post_init(CPU(si->cpu));
1494 si->cc = SIGP_CC_ORDER_CODE_ACCEPTED;
1495 }
1496
1497 static int handle_sigp_single_dst(S390CPU *dst_cpu, uint8_t order,
1498 uint64_t param, uint64_t *status_reg)
1499 {
1500 SigpInfo si = {
1501 .cpu = dst_cpu,
1502 .param = param,
1503 .status_reg = status_reg,
1504 };
1505
1506 /* cpu available? */
1507 if (dst_cpu == NULL) {
1508 return SIGP_CC_NOT_OPERATIONAL;
1509 }
1510
1511 /* only resets can break pending orders */
1512 if (dst_cpu->env.sigp_order != 0 &&
1513 order != SIGP_CPU_RESET &&
1514 order != SIGP_INITIAL_CPU_RESET) {
1515 return SIGP_CC_BUSY;
1516 }
1517
1518 switch (order) {
1519 case SIGP_START:
1520 run_on_cpu(CPU(dst_cpu), sigp_start, &si);
1521 break;
1522 case SIGP_STOP:
1523 run_on_cpu(CPU(dst_cpu), sigp_stop, &si);
1524 break;
1525 case SIGP_RESTART:
1526 run_on_cpu(CPU(dst_cpu), sigp_restart, &si);
1527 break;
1528 case SIGP_STOP_STORE_STATUS:
1529 run_on_cpu(CPU(dst_cpu), sigp_stop_and_store_status, &si);
1530 break;
1531 case SIGP_STORE_STATUS_ADDR:
1532 run_on_cpu(CPU(dst_cpu), sigp_store_status_at_address, &si);
1533 break;
1534 case SIGP_SET_PREFIX:
1535 run_on_cpu(CPU(dst_cpu), sigp_set_prefix, &si);
1536 break;
1537 case SIGP_INITIAL_CPU_RESET:
1538 run_on_cpu(CPU(dst_cpu), sigp_initial_cpu_reset, &si);
1539 break;
1540 case SIGP_CPU_RESET:
1541 run_on_cpu(CPU(dst_cpu), sigp_cpu_reset, &si);
1542 break;
1543 default:
1544 DPRINTF("KVM: unknown SIGP: 0x%x\n", order);
1545 set_sigp_status(&si, SIGP_STAT_INVALID_ORDER);
1546 }
1547
1548 return si.cc;
1549 }
1550
1551 static int sigp_set_architecture(S390CPU *cpu, uint32_t param,
1552 uint64_t *status_reg)
1553 {
1554 CPUState *cur_cs;
1555 S390CPU *cur_cpu;
1556
1557 /* due to the BQL, we are the only active cpu */
1558 CPU_FOREACH(cur_cs) {
1559 cur_cpu = S390_CPU(cur_cs);
1560 if (cur_cpu->env.sigp_order != 0) {
1561 return SIGP_CC_BUSY;
1562 }
1563 cpu_synchronize_state(cur_cs);
1564 /* all but the current one have to be stopped */
1565 if (cur_cpu != cpu &&
1566 s390_cpu_get_state(cur_cpu) != CPU_STATE_STOPPED) {
1567 *status_reg &= 0xffffffff00000000ULL;
1568 *status_reg |= SIGP_STAT_INCORRECT_STATE;
1569 return SIGP_CC_STATUS_STORED;
1570 }
1571 }
1572
1573 switch (param & 0xff) {
1574 case SIGP_MODE_ESA_S390:
1575 /* not supported */
1576 return SIGP_CC_NOT_OPERATIONAL;
1577 case SIGP_MODE_Z_ARCH_TRANS_ALL_PSW:
1578 case SIGP_MODE_Z_ARCH_TRANS_CUR_PSW:
1579 CPU_FOREACH(cur_cs) {
1580 cur_cpu = S390_CPU(cur_cs);
1581 cur_cpu->env.pfault_token = -1UL;
1582 }
1583 break;
1584 default:
1585 *status_reg &= 0xffffffff00000000ULL;
1586 *status_reg |= SIGP_STAT_INVALID_PARAMETER;
1587 return SIGP_CC_STATUS_STORED;
1588 }
1589
1590 return SIGP_CC_ORDER_CODE_ACCEPTED;
1591 }
1592
1593 #define SIGP_ORDER_MASK 0x000000ff
1594
1595 static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1)
1596 {
1597 CPUS390XState *env = &cpu->env;
1598 const uint8_t r1 = ipa1 >> 4;
1599 const uint8_t r3 = ipa1 & 0x0f;
1600 int ret;
1601 uint8_t order;
1602 uint64_t *status_reg;
1603 uint64_t param;
1604 S390CPU *dst_cpu = NULL;
1605
1606 cpu_synchronize_state(CPU(cpu));
1607
1608 /* get order code */
1609 order = decode_basedisp_rs(env, run->s390_sieic.ipb, NULL)
1610 & SIGP_ORDER_MASK;
1611 status_reg = &env->regs[r1];
1612 param = (r1 % 2) ? env->regs[r1] : env->regs[r1 + 1];
1613
1614 switch (order) {
1615 case SIGP_SET_ARCH:
1616 ret = sigp_set_architecture(cpu, param, status_reg);
1617 break;
1618 default:
1619 /* all other sigp orders target a single vcpu */
1620 dst_cpu = s390_cpu_addr2state(env->regs[r3]);
1621 ret = handle_sigp_single_dst(dst_cpu, order, param, status_reg);
1622 }
1623
1624 trace_kvm_sigp_finished(order, CPU(cpu)->cpu_index,
1625 dst_cpu ? CPU(dst_cpu)->cpu_index : -1, ret);
1626
1627 if (ret >= 0) {
1628 setcc(cpu, ret);
1629 return 0;
1630 }
1631
1632 return ret;
1633 }
1634
1635 static int handle_instruction(S390CPU *cpu, struct kvm_run *run)
1636 {
1637 unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00);
1638 uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff;
1639 int r = -1;
1640
1641 DPRINTF("handle_instruction 0x%x 0x%x\n",
1642 run->s390_sieic.ipa, run->s390_sieic.ipb);
1643 switch (ipa0) {
1644 case IPA0_B2:
1645 r = handle_b2(cpu, run, ipa1);
1646 break;
1647 case IPA0_B9:
1648 r = handle_b9(cpu, run, ipa1);
1649 break;
1650 case IPA0_EB:
1651 r = handle_eb(cpu, run, run->s390_sieic.ipb & 0xff);
1652 break;
1653 case IPA0_E3:
1654 r = handle_e3(cpu, run, run->s390_sieic.ipb & 0xff);
1655 break;
1656 case IPA0_DIAG:
1657 r = handle_diag(cpu, run, run->s390_sieic.ipb);
1658 break;
1659 case IPA0_SIGP:
1660 r = handle_sigp(cpu, run, ipa1);
1661 break;
1662 }
1663
1664 if (r < 0) {
1665 r = 0;
1666 enter_pgmcheck(cpu, 0x0001);
1667 }
1668
1669 return r;
1670 }
1671
1672 static bool is_special_wait_psw(CPUState *cs)
1673 {
1674 /* signal quiesce */
1675 return cs->kvm_run->psw_addr == 0xfffUL;
1676 }
1677
1678 static void guest_panicked(void)
1679 {
1680 qapi_event_send_guest_panicked(GUEST_PANIC_ACTION_PAUSE,
1681 &error_abort);
1682 vm_stop(RUN_STATE_GUEST_PANICKED);
1683 }
1684
1685 static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset)
1686 {
1687 CPUState *cs = CPU(cpu);
1688
1689 error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx",
1690 str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset),
1691 ldq_phys(cs->as, cpu->env.psa + pswoffset + 8));
1692 s390_cpu_halt(cpu);
1693 guest_panicked();
1694 }
1695
1696 static int handle_intercept(S390CPU *cpu)
1697 {
1698 CPUState *cs = CPU(cpu);
1699 struct kvm_run *run = cs->kvm_run;
1700 int icpt_code = run->s390_sieic.icptcode;
1701 int r = 0;
1702
1703 DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code,
1704 (long)cs->kvm_run->psw_addr);
1705 switch (icpt_code) {
1706 case ICPT_INSTRUCTION:
1707 r = handle_instruction(cpu, run);
1708 break;
1709 case ICPT_PROGRAM:
1710 unmanageable_intercept(cpu, "program interrupt",
1711 offsetof(LowCore, program_new_psw));
1712 r = EXCP_HALTED;
1713 break;
1714 case ICPT_EXT_INT:
1715 unmanageable_intercept(cpu, "external interrupt",
1716 offsetof(LowCore, external_new_psw));
1717 r = EXCP_HALTED;
1718 break;
1719 case ICPT_WAITPSW:
1720 /* disabled wait, since enabled wait is handled in kernel */
1721 cpu_synchronize_state(cs);
1722 if (s390_cpu_halt(cpu) == 0) {
1723 if (is_special_wait_psw(cs)) {
1724 qemu_system_shutdown_request();
1725 } else {
1726 guest_panicked();
1727 }
1728 }
1729 r = EXCP_HALTED;
1730 break;
1731 case ICPT_CPU_STOP:
1732 if (s390_cpu_set_state(CPU_STATE_STOPPED, cpu) == 0) {
1733 qemu_system_shutdown_request();
1734 }
1735 if (cpu->env.sigp_order == SIGP_STOP_STORE_STATUS) {
1736 kvm_s390_store_status(cpu, KVM_S390_STORE_STATUS_DEF_ADDR,
1737 true);
1738 }
1739 cpu->env.sigp_order = 0;
1740 r = EXCP_HALTED;
1741 break;
1742 case ICPT_SOFT_INTERCEPT:
1743 fprintf(stderr, "KVM unimplemented icpt SOFT\n");
1744 exit(1);
1745 break;
1746 case ICPT_IO:
1747 fprintf(stderr, "KVM unimplemented icpt IO\n");
1748 exit(1);
1749 break;
1750 default:
1751 fprintf(stderr, "Unknown intercept code: %d\n", icpt_code);
1752 exit(1);
1753 break;
1754 }
1755
1756 return r;
1757 }
1758
1759 static int handle_tsch(S390CPU *cpu)
1760 {
1761 CPUState *cs = CPU(cpu);
1762 struct kvm_run *run = cs->kvm_run;
1763 int ret;
1764
1765 cpu_synchronize_state(cs);
1766
1767 ret = ioinst_handle_tsch(cpu, cpu->env.regs[1], run->s390_tsch.ipb);
1768 if (ret < 0) {
1769 /*
1770 * Failure.
1771 * If an I/O interrupt had been dequeued, we have to reinject it.
1772 */
1773 if (run->s390_tsch.dequeued) {
1774 kvm_s390_io_interrupt(run->s390_tsch.subchannel_id,
1775 run->s390_tsch.subchannel_nr,
1776 run->s390_tsch.io_int_parm,
1777 run->s390_tsch.io_int_word);
1778 }
1779 ret = 0;
1780 }
1781 return ret;
1782 }
1783
1784 static void insert_stsi_3_2_2(S390CPU *cpu, __u64 addr, uint8_t ar)
1785 {
1786 struct sysib_322 sysib;
1787 int del;
1788
1789 if (s390_cpu_virt_mem_read(cpu, addr, ar, &sysib, sizeof(sysib))) {
1790 return;
1791 }
1792 /* Shift the stack of Extended Names to prepare for our own data */
1793 memmove(&sysib.ext_names[1], &sysib.ext_names[0],
1794 sizeof(sysib.ext_names[0]) * (sysib.count - 1));
1795 /* First virt level, that doesn't provide Ext Names delimits stack. It is
1796 * assumed it's not capable of managing Extended Names for lower levels.
1797 */
1798 for (del = 1; del < sysib.count; del++) {
1799 if (!sysib.vm[del].ext_name_encoding || !sysib.ext_names[del][0]) {
1800 break;
1801 }
1802 }
1803 if (del < sysib.count) {
1804 memset(sysib.ext_names[del], 0,
1805 sizeof(sysib.ext_names[0]) * (sysib.count - del));
1806 }
1807 /* Insert short machine name in EBCDIC, padded with blanks */
1808 if (qemu_name) {
1809 memset(sysib.vm[0].name, 0x40, sizeof(sysib.vm[0].name));
1810 ebcdic_put(sysib.vm[0].name, qemu_name, MIN(sizeof(sysib.vm[0].name),
1811 strlen(qemu_name)));
1812 }
1813 sysib.vm[0].ext_name_encoding = 2; /* 2 = UTF-8 */
1814 memset(sysib.ext_names[0], 0, sizeof(sysib.ext_names[0]));
1815 /* If hypervisor specifies zero Extended Name in STSI322 SYSIB, it's
1816 * considered by s390 as not capable of providing any Extended Name.
1817 * Therefore if no name was specified on qemu invocation, we go with the
1818 * same "KVMguest" default, which KVM has filled into short name field.
1819 */
1820 if (qemu_name) {
1821 strncpy((char *)sysib.ext_names[0], qemu_name,
1822 sizeof(sysib.ext_names[0]));
1823 } else {
1824 strcpy((char *)sysib.ext_names[0], "KVMguest");
1825 }
1826 /* Insert UUID */
1827 memcpy(sysib.vm[0].uuid, qemu_uuid, sizeof(sysib.vm[0].uuid));
1828
1829 s390_cpu_virt_mem_write(cpu, addr, ar, &sysib, sizeof(sysib));
1830 }
1831
1832 static int handle_stsi(S390CPU *cpu)
1833 {
1834 CPUState *cs = CPU(cpu);
1835 struct kvm_run *run = cs->kvm_run;
1836
1837 switch (run->s390_stsi.fc) {
1838 case 3:
1839 if (run->s390_stsi.sel1 != 2 || run->s390_stsi.sel2 != 2) {
1840 return 0;
1841 }
1842 /* Only sysib 3.2.2 needs post-handling for now. */
1843 insert_stsi_3_2_2(cpu, run->s390_stsi.addr, run->s390_stsi.ar);
1844 return 0;
1845 default:
1846 return 0;
1847 }
1848 }
1849
1850 static int kvm_arch_handle_debug_exit(S390CPU *cpu)
1851 {
1852 CPUState *cs = CPU(cpu);
1853 struct kvm_run *run = cs->kvm_run;
1854
1855 int ret = 0;
1856 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1857
1858 switch (arch_info->type) {
1859 case KVM_HW_WP_WRITE:
1860 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1861 cs->watchpoint_hit = &hw_watchpoint;
1862 hw_watchpoint.vaddr = arch_info->addr;
1863 hw_watchpoint.flags = BP_MEM_WRITE;
1864 ret = EXCP_DEBUG;
1865 }
1866 break;
1867 case KVM_HW_BP:
1868 if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) {
1869 ret = EXCP_DEBUG;
1870 }
1871 break;
1872 case KVM_SINGLESTEP:
1873 if (cs->singlestep_enabled) {
1874 ret = EXCP_DEBUG;
1875 }
1876 break;
1877 default:
1878 ret = -ENOSYS;
1879 }
1880
1881 return ret;
1882 }
1883
1884 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1885 {
1886 S390CPU *cpu = S390_CPU(cs);
1887 int ret = 0;
1888
1889 switch (run->exit_reason) {
1890 case KVM_EXIT_S390_SIEIC:
1891 ret = handle_intercept(cpu);
1892 break;
1893 case KVM_EXIT_S390_RESET:
1894 s390_reipl_request();
1895 break;
1896 case KVM_EXIT_S390_TSCH:
1897 ret = handle_tsch(cpu);
1898 break;
1899 case KVM_EXIT_S390_STSI:
1900 ret = handle_stsi(cpu);
1901 break;
1902 case KVM_EXIT_DEBUG:
1903 ret = kvm_arch_handle_debug_exit(cpu);
1904 break;
1905 default:
1906 fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason);
1907 break;
1908 }
1909
1910 if (ret == 0) {
1911 ret = EXCP_INTERRUPT;
1912 }
1913 return ret;
1914 }
1915
1916 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
1917 {
1918 return true;
1919 }
1920
1921 int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr)
1922 {
1923 return 1;
1924 }
1925
1926 int kvm_arch_on_sigbus(int code, void *addr)
1927 {
1928 return 1;
1929 }
1930
1931 void kvm_s390_io_interrupt(uint16_t subchannel_id,
1932 uint16_t subchannel_nr, uint32_t io_int_parm,
1933 uint32_t io_int_word)
1934 {
1935 struct kvm_s390_irq irq = {
1936 .u.io.subchannel_id = subchannel_id,
1937 .u.io.subchannel_nr = subchannel_nr,
1938 .u.io.io_int_parm = io_int_parm,
1939 .u.io.io_int_word = io_int_word,
1940 };
1941
1942 if (io_int_word & IO_INT_WORD_AI) {
1943 irq.type = KVM_S390_INT_IO(1, 0, 0, 0);
1944 } else {
1945 irq.type = ((subchannel_id & 0xff00) << 24) |
1946 ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16);
1947 }
1948 kvm_s390_floating_interrupt(&irq);
1949 }
1950
1951 void kvm_s390_crw_mchk(void)
1952 {
1953 struct kvm_s390_irq irq = {
1954 .type = KVM_S390_MCHK,
1955 .u.mchk.cr14 = 1 << 28,
1956 .u.mchk.mcic = 0x00400f1d40330000ULL,
1957 };
1958 kvm_s390_floating_interrupt(&irq);
1959 }
1960
1961 void kvm_s390_enable_css_support(S390CPU *cpu)
1962 {
1963 int r;
1964
1965 /* Activate host kernel channel subsystem support. */
1966 r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0);
1967 assert(r == 0);
1968 }
1969
1970 void kvm_arch_init_irq_routing(KVMState *s)
1971 {
1972 /*
1973 * Note that while irqchip capabilities generally imply that cpustates
1974 * are handled in-kernel, it is not true for s390 (yet); therefore, we
1975 * have to override the common code kvm_halt_in_kernel_allowed setting.
1976 */
1977 if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) {
1978 kvm_gsi_routing_allowed = true;
1979 kvm_halt_in_kernel_allowed = false;
1980 }
1981 }
1982
1983 int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch,
1984 int vq, bool assign)
1985 {
1986 struct kvm_ioeventfd kick = {
1987 .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY |
1988 KVM_IOEVENTFD_FLAG_DATAMATCH,
1989 .fd = event_notifier_get_fd(notifier),
1990 .datamatch = vq,
1991 .addr = sch,
1992 .len = 8,
1993 };
1994 if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) {
1995 return -ENOSYS;
1996 }
1997 if (!assign) {
1998 kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN;
1999 }
2000 return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick);
2001 }
2002
2003 int kvm_s390_get_memslot_count(KVMState *s)
2004 {
2005 return kvm_check_extension(s, KVM_CAP_NR_MEMSLOTS);
2006 }
2007
2008 int kvm_s390_set_cpu_state(S390CPU *cpu, uint8_t cpu_state)
2009 {
2010 struct kvm_mp_state mp_state = {};
2011 int ret;
2012
2013 /* the kvm part might not have been initialized yet */
2014 if (CPU(cpu)->kvm_state == NULL) {
2015 return 0;
2016 }
2017
2018 switch (cpu_state) {
2019 case CPU_STATE_STOPPED:
2020 mp_state.mp_state = KVM_MP_STATE_STOPPED;
2021 break;
2022 case CPU_STATE_CHECK_STOP:
2023 mp_state.mp_state = KVM_MP_STATE_CHECK_STOP;
2024 break;
2025 case CPU_STATE_OPERATING:
2026 mp_state.mp_state = KVM_MP_STATE_OPERATING;
2027 break;
2028 case CPU_STATE_LOAD:
2029 mp_state.mp_state = KVM_MP_STATE_LOAD;
2030 break;
2031 default:
2032 error_report("Requested CPU state is not a valid S390 CPU state: %u",
2033 cpu_state);
2034 exit(1);
2035 }
2036
2037 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_SET_MP_STATE, &mp_state);
2038 if (ret) {
2039 trace_kvm_failed_cpu_state_set(CPU(cpu)->cpu_index, cpu_state,
2040 strerror(-ret));
2041 }
2042
2043 return ret;
2044 }
2045
2046 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2047 uint64_t address, uint32_t data)
2048 {
2049 S390PCIBusDevice *pbdev;
2050 uint32_t fid = data >> ZPCI_MSI_VEC_BITS;
2051 uint32_t vec = data & ZPCI_MSI_VEC_MASK;
2052
2053 pbdev = s390_pci_find_dev_by_fid(fid);
2054 if (!pbdev) {
2055 DPRINTF("add_msi_route no dev\n");
2056 return -ENODEV;
2057 }
2058
2059 pbdev->routes.adapter.ind_offset = vec;
2060
2061 route->type = KVM_IRQ_ROUTING_S390_ADAPTER;
2062 route->flags = 0;
2063 route->u.adapter.summary_addr = pbdev->routes.adapter.summary_addr;
2064 route->u.adapter.ind_addr = pbdev->routes.adapter.ind_addr;
2065 route->u.adapter.summary_offset = pbdev->routes.adapter.summary_offset;
2066 route->u.adapter.ind_offset = pbdev->routes.adapter.ind_offset;
2067 route->u.adapter.adapter_id = pbdev->routes.adapter.adapter_id;
2068 return 0;
2069 }