]>
Commit | Line | Data |
---|---|---|
0e60a699 AG |
1 | /* |
2 | * QEMU S390x KVM implementation | |
3 | * | |
4 | * Copyright (c) 2009 Alexander Graf <agraf@suse.de> | |
ccb084d3 | 5 | * Copyright IBM Corp. 2012 |
0e60a699 AG |
6 | * |
7 | * This library is free software; you can redistribute it and/or | |
8 | * modify it under the terms of the GNU Lesser General Public | |
9 | * License as published by the Free Software Foundation; either | |
10 | * version 2 of the License, or (at your option) any later version. | |
11 | * | |
12 | * This library is distributed in the hope that it will be useful, | |
13 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
14 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU | |
15 | * Lesser General Public License for more details. | |
16 | * | |
ccb084d3 CB |
17 | * Contributions after 2012-10-29 are licensed under the terms of the |
18 | * GNU GPL, version 2 or (at your option) any later version. | |
19 | * | |
20 | * You should have received a copy of the GNU (Lesser) General Public | |
0e60a699 AG |
21 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
22 | */ | |
23 | ||
24 | #include <sys/types.h> | |
25 | #include <sys/ioctl.h> | |
26 | #include <sys/mman.h> | |
27 | ||
28 | #include <linux/kvm.h> | |
29 | #include <asm/ptrace.h> | |
30 | ||
31 | #include "qemu-common.h" | |
1de7afc9 | 32 | #include "qemu/timer.h" |
9c17d615 PB |
33 | #include "sysemu/sysemu.h" |
34 | #include "sysemu/kvm.h" | |
4cb88c3c | 35 | #include "hw/hw.h" |
0e60a699 | 36 | #include "cpu.h" |
9c17d615 | 37 | #include "sysemu/device_tree.h" |
08eb8c85 CB |
38 | #include "qapi/qmp/qjson.h" |
39 | #include "monitor/monitor.h" | |
770a6379 | 40 | #include "exec/gdbstub.h" |
860643bc | 41 | #include "trace.h" |
0e60a699 AG |
42 | |
43 | /* #define DEBUG_KVM */ | |
44 | ||
45 | #ifdef DEBUG_KVM | |
e67137c6 | 46 | #define DPRINTF(fmt, ...) \ |
0e60a699 AG |
47 | do { fprintf(stderr, fmt, ## __VA_ARGS__); } while (0) |
48 | #else | |
e67137c6 | 49 | #define DPRINTF(fmt, ...) \ |
0e60a699 AG |
50 | do { } while (0) |
51 | #endif | |
52 | ||
53 | #define IPA0_DIAG 0x8300 | |
54 | #define IPA0_SIGP 0xae00 | |
09b99878 CH |
55 | #define IPA0_B2 0xb200 |
56 | #define IPA0_B9 0xb900 | |
57 | #define IPA0_EB 0xeb00 | |
0e60a699 | 58 | |
1eecf41b FB |
59 | #define PRIV_B2_SCLP_CALL 0x20 |
60 | #define PRIV_B2_CSCH 0x30 | |
61 | #define PRIV_B2_HSCH 0x31 | |
62 | #define PRIV_B2_MSCH 0x32 | |
63 | #define PRIV_B2_SSCH 0x33 | |
64 | #define PRIV_B2_STSCH 0x34 | |
65 | #define PRIV_B2_TSCH 0x35 | |
66 | #define PRIV_B2_TPI 0x36 | |
67 | #define PRIV_B2_SAL 0x37 | |
68 | #define PRIV_B2_RSCH 0x38 | |
69 | #define PRIV_B2_STCRW 0x39 | |
70 | #define PRIV_B2_STCPS 0x3a | |
71 | #define PRIV_B2_RCHP 0x3b | |
72 | #define PRIV_B2_SCHM 0x3c | |
73 | #define PRIV_B2_CHSC 0x5f | |
74 | #define PRIV_B2_SIGA 0x74 | |
75 | #define PRIV_B2_XSCH 0x76 | |
76 | ||
77 | #define PRIV_EB_SQBS 0x8a | |
78 | ||
79 | #define PRIV_B9_EQBS 0x9c | |
80 | ||
268846ba | 81 | #define DIAG_IPL 0x308 |
0e60a699 AG |
82 | #define DIAG_KVM_HYPERCALL 0x500 |
83 | #define DIAG_KVM_BREAKPOINT 0x501 | |
84 | ||
0e60a699 | 85 | #define ICPT_INSTRUCTION 0x04 |
a2689242 | 86 | #define ICPT_EXT_INT 0x14 |
0e60a699 AG |
87 | #define ICPT_WAITPSW 0x1c |
88 | #define ICPT_SOFT_INTERCEPT 0x24 | |
89 | #define ICPT_CPU_STOP 0x28 | |
90 | #define ICPT_IO 0x40 | |
91 | ||
770a6379 DH |
92 | static CPUWatchpoint hw_watchpoint; |
93 | /* | |
94 | * We don't use a list because this structure is also used to transmit the | |
95 | * hardware breakpoints to the kernel. | |
96 | */ | |
97 | static struct kvm_hw_breakpoint *hw_breakpoints; | |
98 | static int nb_hw_breakpoints; | |
99 | ||
94a8d39a JK |
100 | const KVMCapabilityInfo kvm_arch_required_capabilities[] = { |
101 | KVM_CAP_LAST_INFO | |
102 | }; | |
103 | ||
5b08b344 | 104 | static int cap_sync_regs; |
819bd309 | 105 | static int cap_async_pf; |
5b08b344 | 106 | |
575ddeb4 | 107 | static void *legacy_s390_alloc(size_t size); |
91138037 | 108 | |
4cb88c3c DD |
109 | static int kvm_s390_check_clear_cmma(KVMState *s) |
110 | { | |
111 | struct kvm_device_attr attr = { | |
112 | .group = KVM_S390_VM_MEM_CTRL, | |
113 | .attr = KVM_S390_VM_MEM_CLR_CMMA, | |
114 | }; | |
115 | ||
116 | return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr); | |
117 | } | |
118 | ||
119 | static int kvm_s390_check_enable_cmma(KVMState *s) | |
120 | { | |
121 | struct kvm_device_attr attr = { | |
122 | .group = KVM_S390_VM_MEM_CTRL, | |
123 | .attr = KVM_S390_VM_MEM_ENABLE_CMMA, | |
124 | }; | |
125 | ||
126 | return kvm_vm_ioctl(s, KVM_HAS_DEVICE_ATTR, &attr); | |
127 | } | |
128 | ||
129 | void kvm_s390_clear_cmma_callback(void *opaque) | |
130 | { | |
131 | int rc; | |
132 | KVMState *s = opaque; | |
133 | struct kvm_device_attr attr = { | |
134 | .group = KVM_S390_VM_MEM_CTRL, | |
135 | .attr = KVM_S390_VM_MEM_CLR_CMMA, | |
136 | }; | |
137 | ||
138 | rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr); | |
139 | trace_kvm_clear_cmma(rc); | |
140 | } | |
141 | ||
142 | static void kvm_s390_enable_cmma(KVMState *s) | |
143 | { | |
144 | int rc; | |
145 | struct kvm_device_attr attr = { | |
146 | .group = KVM_S390_VM_MEM_CTRL, | |
147 | .attr = KVM_S390_VM_MEM_ENABLE_CMMA, | |
148 | }; | |
149 | ||
150 | if (kvm_s390_check_enable_cmma(s) || kvm_s390_check_clear_cmma(s)) { | |
151 | return; | |
152 | } | |
153 | ||
154 | rc = kvm_vm_ioctl(s, KVM_SET_DEVICE_ATTR, &attr); | |
155 | if (!rc) { | |
156 | qemu_register_reset(kvm_s390_clear_cmma_callback, s); | |
157 | } | |
158 | trace_kvm_enable_cmma(rc); | |
159 | } | |
160 | ||
cad1e282 | 161 | int kvm_arch_init(KVMState *s) |
0e60a699 | 162 | { |
5b08b344 | 163 | cap_sync_regs = kvm_check_extension(s, KVM_CAP_SYNC_REGS); |
819bd309 | 164 | cap_async_pf = kvm_check_extension(s, KVM_CAP_ASYNC_PF); |
4cb88c3c DD |
165 | |
166 | if (kvm_check_extension(s, KVM_CAP_VM_ATTRIBUTES)) { | |
167 | kvm_s390_enable_cmma(s); | |
168 | } | |
169 | ||
91138037 MA |
170 | if (!kvm_check_extension(s, KVM_CAP_S390_GMAP) |
171 | || !kvm_check_extension(s, KVM_CAP_S390_COW)) { | |
172 | phys_mem_set_alloc(legacy_s390_alloc); | |
173 | } | |
0e60a699 AG |
174 | return 0; |
175 | } | |
176 | ||
b164e48e EH |
177 | unsigned long kvm_arch_vcpu_id(CPUState *cpu) |
178 | { | |
179 | return cpu->cpu_index; | |
180 | } | |
181 | ||
20d695a9 | 182 | int kvm_arch_init_vcpu(CPUState *cpu) |
0e60a699 | 183 | { |
1c9d2a1d CB |
184 | /* nothing todo yet */ |
185 | return 0; | |
0e60a699 AG |
186 | } |
187 | ||
50a2c6e5 | 188 | void kvm_s390_reset_vcpu(S390CPU *cpu) |
0e60a699 | 189 | { |
50a2c6e5 PB |
190 | CPUState *cs = CPU(cpu); |
191 | ||
419831d7 AG |
192 | /* The initial reset call is needed here to reset in-kernel |
193 | * vcpu data that we can't access directly from QEMU | |
194 | * (i.e. with older kernels which don't support sync_regs/ONE_REG). | |
195 | * Before this ioctl cpu_synchronize_state() is called in common kvm | |
196 | * code (kvm-all) */ | |
50a2c6e5 | 197 | if (kvm_vcpu_ioctl(cs, KVM_S390_INITIAL_RESET, NULL)) { |
70bada03 JF |
198 | perror("Can't reset vcpu\n"); |
199 | } | |
0e60a699 AG |
200 | } |
201 | ||
20d695a9 | 202 | int kvm_arch_put_registers(CPUState *cs, int level) |
0e60a699 | 203 | { |
20d695a9 AF |
204 | S390CPU *cpu = S390_CPU(cs); |
205 | CPUS390XState *env = &cpu->env; | |
5b08b344 | 206 | struct kvm_sregs sregs; |
0e60a699 | 207 | struct kvm_regs regs; |
860643bc | 208 | int r; |
0e60a699 AG |
209 | int i; |
210 | ||
5b08b344 | 211 | /* always save the PSW and the GPRS*/ |
f7575c96 AF |
212 | cs->kvm_run->psw_addr = env->psw.addr; |
213 | cs->kvm_run->psw_mask = env->psw.mask; | |
0e60a699 | 214 | |
f7575c96 | 215 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { |
5b08b344 | 216 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
217 | cs->kvm_run->s.regs.gprs[i] = env->regs[i]; |
218 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_GPRS; | |
5b08b344 CB |
219 | } |
220 | } else { | |
221 | for (i = 0; i < 16; i++) { | |
222 | regs.gprs[i] = env->regs[i]; | |
223 | } | |
860643bc CB |
224 | r = kvm_vcpu_ioctl(cs, KVM_SET_REGS, ®s); |
225 | if (r < 0) { | |
226 | return r; | |
5b08b344 | 227 | } |
0e60a699 AG |
228 | } |
229 | ||
44c68de0 DD |
230 | /* Do we need to save more than that? */ |
231 | if (level == KVM_PUT_RUNTIME_STATE) { | |
232 | return 0; | |
233 | } | |
420840e5 | 234 | |
860643bc CB |
235 | /* |
236 | * These ONE_REGS are not protected by a capability. As they are only | |
237 | * necessary for migration we just trace a possible error, but don't | |
238 | * return with an error return code. | |
239 | */ | |
240 | kvm_set_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); | |
241 | kvm_set_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); | |
242 | kvm_set_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); | |
44b0c0bb CB |
243 | kvm_set_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); |
244 | kvm_set_one_reg(cs, KVM_REG_S390_PP, &env->pp); | |
0e60a699 | 245 | |
819bd309 | 246 | if (cap_async_pf) { |
860643bc CB |
247 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); |
248 | if (r < 0) { | |
249 | return r; | |
819bd309 | 250 | } |
860643bc CB |
251 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); |
252 | if (r < 0) { | |
253 | return r; | |
819bd309 | 254 | } |
860643bc CB |
255 | r = kvm_set_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); |
256 | if (r < 0) { | |
257 | return r; | |
819bd309 DD |
258 | } |
259 | } | |
260 | ||
5b08b344 | 261 | if (cap_sync_regs && |
f7575c96 AF |
262 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && |
263 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { | |
5b08b344 | 264 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
265 | cs->kvm_run->s.regs.acrs[i] = env->aregs[i]; |
266 | cs->kvm_run->s.regs.crs[i] = env->cregs[i]; | |
5b08b344 | 267 | } |
f7575c96 AF |
268 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_ACRS; |
269 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_CRS; | |
5b08b344 CB |
270 | } else { |
271 | for (i = 0; i < 16; i++) { | |
272 | sregs.acrs[i] = env->aregs[i]; | |
273 | sregs.crs[i] = env->cregs[i]; | |
274 | } | |
860643bc CB |
275 | r = kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs); |
276 | if (r < 0) { | |
277 | return r; | |
5b08b344 CB |
278 | } |
279 | } | |
0e60a699 | 280 | |
5b08b344 | 281 | /* Finally the prefix */ |
f7575c96 AF |
282 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { |
283 | cs->kvm_run->s.regs.prefix = env->psa; | |
284 | cs->kvm_run->kvm_dirty_regs |= KVM_SYNC_PREFIX; | |
5b08b344 CB |
285 | } else { |
286 | /* prefix is only supported via sync regs */ | |
287 | } | |
288 | return 0; | |
0e60a699 AG |
289 | } |
290 | ||
20d695a9 | 291 | int kvm_arch_get_registers(CPUState *cs) |
420840e5 JH |
292 | { |
293 | S390CPU *cpu = S390_CPU(cs); | |
294 | CPUS390XState *env = &cpu->env; | |
5b08b344 | 295 | struct kvm_sregs sregs; |
0e60a699 | 296 | struct kvm_regs regs; |
44c68de0 | 297 | int i, r; |
420840e5 | 298 | |
5b08b344 | 299 | /* get the PSW */ |
f7575c96 AF |
300 | env->psw.addr = cs->kvm_run->psw_addr; |
301 | env->psw.mask = cs->kvm_run->psw_mask; | |
5b08b344 CB |
302 | |
303 | /* the GPRS */ | |
f7575c96 | 304 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_GPRS) { |
5b08b344 | 305 | for (i = 0; i < 16; i++) { |
f7575c96 | 306 | env->regs[i] = cs->kvm_run->s.regs.gprs[i]; |
5b08b344 CB |
307 | } |
308 | } else { | |
44c68de0 DD |
309 | r = kvm_vcpu_ioctl(cs, KVM_GET_REGS, ®s); |
310 | if (r < 0) { | |
311 | return r; | |
5b08b344 CB |
312 | } |
313 | for (i = 0; i < 16; i++) { | |
314 | env->regs[i] = regs.gprs[i]; | |
315 | } | |
0e60a699 AG |
316 | } |
317 | ||
5b08b344 CB |
318 | /* The ACRS and CRS */ |
319 | if (cap_sync_regs && | |
f7575c96 AF |
320 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_ACRS && |
321 | cs->kvm_run->kvm_valid_regs & KVM_SYNC_CRS) { | |
5b08b344 | 322 | for (i = 0; i < 16; i++) { |
f7575c96 AF |
323 | env->aregs[i] = cs->kvm_run->s.regs.acrs[i]; |
324 | env->cregs[i] = cs->kvm_run->s.regs.crs[i]; | |
5b08b344 CB |
325 | } |
326 | } else { | |
44c68de0 DD |
327 | r = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs); |
328 | if (r < 0) { | |
329 | return r; | |
5b08b344 CB |
330 | } |
331 | for (i = 0; i < 16; i++) { | |
332 | env->aregs[i] = sregs.acrs[i]; | |
333 | env->cregs[i] = sregs.crs[i]; | |
334 | } | |
0e60a699 AG |
335 | } |
336 | ||
44c68de0 | 337 | /* The prefix */ |
f7575c96 AF |
338 | if (cap_sync_regs && cs->kvm_run->kvm_valid_regs & KVM_SYNC_PREFIX) { |
339 | env->psa = cs->kvm_run->s.regs.prefix; | |
5b08b344 | 340 | } |
0e60a699 | 341 | |
860643bc CB |
342 | /* |
343 | * These ONE_REGS are not protected by a capability. As they are only | |
344 | * necessary for migration we just trace a possible error, but don't | |
345 | * return with an error return code. | |
346 | */ | |
347 | kvm_get_one_reg(cs, KVM_REG_S390_CPU_TIMER, &env->cputm); | |
348 | kvm_get_one_reg(cs, KVM_REG_S390_CLOCK_COMP, &env->ckc); | |
349 | kvm_get_one_reg(cs, KVM_REG_S390_TODPR, &env->todpr); | |
44b0c0bb CB |
350 | kvm_get_one_reg(cs, KVM_REG_S390_GBEA, &env->gbea); |
351 | kvm_get_one_reg(cs, KVM_REG_S390_PP, &env->pp); | |
44c68de0 | 352 | |
819bd309 | 353 | if (cap_async_pf) { |
860643bc | 354 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFTOKEN, &env->pfault_token); |
819bd309 DD |
355 | if (r < 0) { |
356 | return r; | |
357 | } | |
860643bc | 358 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFCOMPARE, &env->pfault_compare); |
819bd309 DD |
359 | if (r < 0) { |
360 | return r; | |
361 | } | |
860643bc | 362 | r = kvm_get_one_reg(cs, KVM_REG_S390_PFSELECT, &env->pfault_select); |
819bd309 DD |
363 | if (r < 0) { |
364 | return r; | |
365 | } | |
366 | } | |
367 | ||
0e60a699 AG |
368 | return 0; |
369 | } | |
370 | ||
fdec9918 CB |
371 | /* |
372 | * Legacy layout for s390: | |
373 | * Older S390 KVM requires the topmost vma of the RAM to be | |
374 | * smaller than an system defined value, which is at least 256GB. | |
375 | * Larger systems have larger values. We put the guest between | |
376 | * the end of data segment (system break) and this value. We | |
377 | * use 32GB as a base to have enough room for the system break | |
378 | * to grow. We also have to use MAP parameters that avoid | |
379 | * read-only mapping of guest pages. | |
380 | */ | |
575ddeb4 | 381 | static void *legacy_s390_alloc(size_t size) |
fdec9918 CB |
382 | { |
383 | void *mem; | |
384 | ||
385 | mem = mmap((void *) 0x800000000ULL, size, | |
386 | PROT_EXEC|PROT_READ|PROT_WRITE, | |
387 | MAP_SHARED | MAP_ANONYMOUS | MAP_FIXED, -1, 0); | |
39228250 | 388 | return mem == MAP_FAILED ? NULL : mem; |
fdec9918 CB |
389 | } |
390 | ||
8e4e86af DH |
391 | /* DIAG 501 is used for sw breakpoints */ |
392 | static const uint8_t diag_501[] = {0x83, 0x24, 0x05, 0x01}; | |
393 | ||
20d695a9 | 394 | int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
0e60a699 | 395 | { |
0e60a699 | 396 | |
8e4e86af DH |
397 | if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, |
398 | sizeof(diag_501), 0) || | |
399 | cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)diag_501, | |
400 | sizeof(diag_501), 1)) { | |
0e60a699 AG |
401 | return -EINVAL; |
402 | } | |
403 | return 0; | |
404 | } | |
405 | ||
20d695a9 | 406 | int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp) |
0e60a699 | 407 | { |
8e4e86af | 408 | uint8_t t[sizeof(diag_501)]; |
0e60a699 | 409 | |
8e4e86af | 410 | if (cpu_memory_rw_debug(cs, bp->pc, t, sizeof(diag_501), 0)) { |
0e60a699 | 411 | return -EINVAL; |
8e4e86af | 412 | } else if (memcmp(t, diag_501, sizeof(diag_501))) { |
0e60a699 | 413 | return -EINVAL; |
8e4e86af DH |
414 | } else if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn, |
415 | sizeof(diag_501), 1)) { | |
0e60a699 AG |
416 | return -EINVAL; |
417 | } | |
418 | ||
419 | return 0; | |
420 | } | |
421 | ||
770a6379 DH |
422 | static struct kvm_hw_breakpoint *find_hw_breakpoint(target_ulong addr, |
423 | int len, int type) | |
424 | { | |
425 | int n; | |
426 | ||
427 | for (n = 0; n < nb_hw_breakpoints; n++) { | |
428 | if (hw_breakpoints[n].addr == addr && hw_breakpoints[n].type == type && | |
429 | (hw_breakpoints[n].len == len || len == -1)) { | |
430 | return &hw_breakpoints[n]; | |
431 | } | |
432 | } | |
433 | ||
434 | return NULL; | |
435 | } | |
436 | ||
437 | static int insert_hw_breakpoint(target_ulong addr, int len, int type) | |
438 | { | |
439 | int size; | |
440 | ||
441 | if (find_hw_breakpoint(addr, len, type)) { | |
442 | return -EEXIST; | |
443 | } | |
444 | ||
445 | size = (nb_hw_breakpoints + 1) * sizeof(struct kvm_hw_breakpoint); | |
446 | ||
447 | if (!hw_breakpoints) { | |
448 | nb_hw_breakpoints = 0; | |
449 | hw_breakpoints = (struct kvm_hw_breakpoint *)g_try_malloc(size); | |
450 | } else { | |
451 | hw_breakpoints = | |
452 | (struct kvm_hw_breakpoint *)g_try_realloc(hw_breakpoints, size); | |
453 | } | |
454 | ||
455 | if (!hw_breakpoints) { | |
456 | nb_hw_breakpoints = 0; | |
457 | return -ENOMEM; | |
458 | } | |
459 | ||
460 | hw_breakpoints[nb_hw_breakpoints].addr = addr; | |
461 | hw_breakpoints[nb_hw_breakpoints].len = len; | |
462 | hw_breakpoints[nb_hw_breakpoints].type = type; | |
463 | ||
464 | nb_hw_breakpoints++; | |
465 | ||
466 | return 0; | |
467 | } | |
468 | ||
8c012449 DH |
469 | int kvm_arch_insert_hw_breakpoint(target_ulong addr, |
470 | target_ulong len, int type) | |
471 | { | |
770a6379 DH |
472 | switch (type) { |
473 | case GDB_BREAKPOINT_HW: | |
474 | type = KVM_HW_BP; | |
475 | break; | |
476 | case GDB_WATCHPOINT_WRITE: | |
477 | if (len < 1) { | |
478 | return -EINVAL; | |
479 | } | |
480 | type = KVM_HW_WP_WRITE; | |
481 | break; | |
482 | default: | |
483 | return -ENOSYS; | |
484 | } | |
485 | return insert_hw_breakpoint(addr, len, type); | |
8c012449 DH |
486 | } |
487 | ||
488 | int kvm_arch_remove_hw_breakpoint(target_ulong addr, | |
489 | target_ulong len, int type) | |
490 | { | |
770a6379 DH |
491 | int size; |
492 | struct kvm_hw_breakpoint *bp = find_hw_breakpoint(addr, len, type); | |
493 | ||
494 | if (bp == NULL) { | |
495 | return -ENOENT; | |
496 | } | |
497 | ||
498 | nb_hw_breakpoints--; | |
499 | if (nb_hw_breakpoints > 0) { | |
500 | /* | |
501 | * In order to trim the array, move the last element to the position to | |
502 | * be removed - if necessary. | |
503 | */ | |
504 | if (bp != &hw_breakpoints[nb_hw_breakpoints]) { | |
505 | *bp = hw_breakpoints[nb_hw_breakpoints]; | |
506 | } | |
507 | size = nb_hw_breakpoints * sizeof(struct kvm_hw_breakpoint); | |
508 | hw_breakpoints = | |
509 | (struct kvm_hw_breakpoint *)g_realloc(hw_breakpoints, size); | |
510 | } else { | |
511 | g_free(hw_breakpoints); | |
512 | hw_breakpoints = NULL; | |
513 | } | |
514 | ||
515 | return 0; | |
8c012449 DH |
516 | } |
517 | ||
518 | void kvm_arch_remove_all_hw_breakpoints(void) | |
519 | { | |
770a6379 DH |
520 | nb_hw_breakpoints = 0; |
521 | g_free(hw_breakpoints); | |
522 | hw_breakpoints = NULL; | |
8c012449 DH |
523 | } |
524 | ||
525 | void kvm_arch_update_guest_debug(CPUState *cpu, struct kvm_guest_debug *dbg) | |
526 | { | |
770a6379 DH |
527 | int i; |
528 | ||
529 | if (nb_hw_breakpoints > 0) { | |
530 | dbg->arch.nr_hw_bp = nb_hw_breakpoints; | |
531 | dbg->arch.hw_bp = hw_breakpoints; | |
532 | ||
533 | for (i = 0; i < nb_hw_breakpoints; ++i) { | |
534 | hw_breakpoints[i].phys_addr = s390_cpu_get_phys_addr_debug(cpu, | |
535 | hw_breakpoints[i].addr); | |
536 | } | |
537 | dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP; | |
538 | } else { | |
539 | dbg->arch.nr_hw_bp = 0; | |
540 | dbg->arch.hw_bp = NULL; | |
541 | } | |
8c012449 DH |
542 | } |
543 | ||
20d695a9 | 544 | void kvm_arch_pre_run(CPUState *cpu, struct kvm_run *run) |
0e60a699 | 545 | { |
0e60a699 AG |
546 | } |
547 | ||
20d695a9 | 548 | void kvm_arch_post_run(CPUState *cpu, struct kvm_run *run) |
0e60a699 | 549 | { |
0e60a699 AG |
550 | } |
551 | ||
20d695a9 | 552 | int kvm_arch_process_async_events(CPUState *cs) |
0af691d7 | 553 | { |
225dc991 | 554 | return cs->halted; |
0af691d7 MT |
555 | } |
556 | ||
1bc22652 | 557 | void kvm_s390_interrupt_internal(S390CPU *cpu, int type, uint32_t parm, |
bcec36ea | 558 | uint64_t parm64, int vm) |
0e60a699 | 559 | { |
1bc22652 | 560 | CPUState *cs = CPU(cpu); |
0e60a699 AG |
561 | struct kvm_s390_interrupt kvmint; |
562 | int r; | |
563 | ||
a60f24b5 | 564 | if (!cs->kvm_state) { |
0e60a699 AG |
565 | return; |
566 | } | |
567 | ||
0e60a699 AG |
568 | kvmint.type = type; |
569 | kvmint.parm = parm; | |
570 | kvmint.parm64 = parm64; | |
571 | ||
572 | if (vm) { | |
a60f24b5 | 573 | r = kvm_vm_ioctl(cs->kvm_state, KVM_S390_INTERRUPT, &kvmint); |
0e60a699 | 574 | } else { |
1bc22652 | 575 | r = kvm_vcpu_ioctl(cs, KVM_S390_INTERRUPT, &kvmint); |
0e60a699 AG |
576 | } |
577 | ||
578 | if (r < 0) { | |
579 | fprintf(stderr, "KVM failed to inject interrupt\n"); | |
580 | exit(1); | |
581 | } | |
582 | } | |
583 | ||
1bc22652 | 584 | void kvm_s390_virtio_irq(S390CPU *cpu, int config_change, uint64_t token) |
0e60a699 | 585 | { |
1bc22652 | 586 | kvm_s390_interrupt_internal(cpu, KVM_S390_INT_VIRTIO, config_change, |
0e60a699 AG |
587 | token, 1); |
588 | } | |
589 | ||
1bc22652 | 590 | void kvm_s390_interrupt(S390CPU *cpu, int type, uint32_t code) |
0e60a699 | 591 | { |
1bc22652 | 592 | kvm_s390_interrupt_internal(cpu, type, code, 0, 0); |
0e60a699 AG |
593 | } |
594 | ||
1bc22652 | 595 | static void enter_pgmcheck(S390CPU *cpu, uint16_t code) |
0e60a699 | 596 | { |
1bc22652 | 597 | kvm_s390_interrupt(cpu, KVM_S390_PROGRAM_INT, code); |
0e60a699 AG |
598 | } |
599 | ||
1bc22652 | 600 | static int kvm_sclp_service_call(S390CPU *cpu, struct kvm_run *run, |
bcec36ea | 601 | uint16_t ipbh0) |
0e60a699 | 602 | { |
1bc22652 | 603 | CPUS390XState *env = &cpu->env; |
a0fa2cb8 TH |
604 | uint64_t sccb; |
605 | uint32_t code; | |
0e60a699 AG |
606 | int r = 0; |
607 | ||
cb446eca | 608 | cpu_synchronize_state(CPU(cpu)); |
0e60a699 AG |
609 | sccb = env->regs[ipbh0 & 0xf]; |
610 | code = env->regs[(ipbh0 & 0xf0) >> 4]; | |
611 | ||
6e252802 | 612 | r = sclp_service_call(env, sccb, code); |
9abf567d | 613 | if (r < 0) { |
1bc22652 | 614 | enter_pgmcheck(cpu, -r); |
e8803d93 TH |
615 | } else { |
616 | setcc(cpu, r); | |
0e60a699 | 617 | } |
81f7c56c | 618 | |
0e60a699 AG |
619 | return 0; |
620 | } | |
621 | ||
1eecf41b | 622 | static int handle_b2(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
09b99878 | 623 | { |
09b99878 | 624 | CPUS390XState *env = &cpu->env; |
1eecf41b FB |
625 | int rc = 0; |
626 | uint16_t ipbh0 = (run->s390_sieic.ipb & 0xffff0000) >> 16; | |
3474b679 | 627 | |
44c68de0 | 628 | cpu_synchronize_state(CPU(cpu)); |
3474b679 | 629 | |
09b99878 | 630 | switch (ipa1) { |
1eecf41b | 631 | case PRIV_B2_XSCH: |
5d9bf1c0 | 632 | ioinst_handle_xsch(cpu, env->regs[1]); |
09b99878 | 633 | break; |
1eecf41b | 634 | case PRIV_B2_CSCH: |
5d9bf1c0 | 635 | ioinst_handle_csch(cpu, env->regs[1]); |
09b99878 | 636 | break; |
1eecf41b | 637 | case PRIV_B2_HSCH: |
5d9bf1c0 | 638 | ioinst_handle_hsch(cpu, env->regs[1]); |
09b99878 | 639 | break; |
1eecf41b | 640 | case PRIV_B2_MSCH: |
5d9bf1c0 | 641 | ioinst_handle_msch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 642 | break; |
1eecf41b | 643 | case PRIV_B2_SSCH: |
5d9bf1c0 | 644 | ioinst_handle_ssch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 645 | break; |
1eecf41b | 646 | case PRIV_B2_STCRW: |
5d9bf1c0 | 647 | ioinst_handle_stcrw(cpu, run->s390_sieic.ipb); |
09b99878 | 648 | break; |
1eecf41b | 649 | case PRIV_B2_STSCH: |
5d9bf1c0 | 650 | ioinst_handle_stsch(cpu, env->regs[1], run->s390_sieic.ipb); |
09b99878 | 651 | break; |
1eecf41b | 652 | case PRIV_B2_TSCH: |
09b99878 CH |
653 | /* We should only get tsch via KVM_EXIT_S390_TSCH. */ |
654 | fprintf(stderr, "Spurious tsch intercept\n"); | |
655 | break; | |
1eecf41b | 656 | case PRIV_B2_CHSC: |
5d9bf1c0 | 657 | ioinst_handle_chsc(cpu, run->s390_sieic.ipb); |
09b99878 | 658 | break; |
1eecf41b | 659 | case PRIV_B2_TPI: |
09b99878 CH |
660 | /* This should have been handled by kvm already. */ |
661 | fprintf(stderr, "Spurious tpi intercept\n"); | |
662 | break; | |
1eecf41b | 663 | case PRIV_B2_SCHM: |
5d9bf1c0 TH |
664 | ioinst_handle_schm(cpu, env->regs[1], env->regs[2], |
665 | run->s390_sieic.ipb); | |
09b99878 | 666 | break; |
1eecf41b | 667 | case PRIV_B2_RSCH: |
5d9bf1c0 | 668 | ioinst_handle_rsch(cpu, env->regs[1]); |
09b99878 | 669 | break; |
1eecf41b | 670 | case PRIV_B2_RCHP: |
5d9bf1c0 | 671 | ioinst_handle_rchp(cpu, env->regs[1]); |
09b99878 | 672 | break; |
1eecf41b | 673 | case PRIV_B2_STCPS: |
09b99878 | 674 | /* We do not provide this instruction, it is suppressed. */ |
09b99878 | 675 | break; |
1eecf41b | 676 | case PRIV_B2_SAL: |
5d9bf1c0 | 677 | ioinst_handle_sal(cpu, env->regs[1]); |
09b99878 | 678 | break; |
1eecf41b | 679 | case PRIV_B2_SIGA: |
c1e8dfb5 | 680 | /* Not provided, set CC = 3 for subchannel not operational */ |
5d9bf1c0 | 681 | setcc(cpu, 3); |
09b99878 | 682 | break; |
1eecf41b FB |
683 | case PRIV_B2_SCLP_CALL: |
684 | rc = kvm_sclp_service_call(cpu, run, ipbh0); | |
685 | break; | |
c1e8dfb5 | 686 | default: |
1eecf41b FB |
687 | rc = -1; |
688 | DPRINTF("KVM: unhandled PRIV: 0xb2%x\n", ipa1); | |
689 | break; | |
09b99878 CH |
690 | } |
691 | ||
1eecf41b | 692 | return rc; |
09b99878 CH |
693 | } |
694 | ||
1eecf41b | 695 | static int handle_b9(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
0e60a699 AG |
696 | { |
697 | int r = 0; | |
0e60a699 | 698 | |
0e60a699 | 699 | switch (ipa1) { |
1eecf41b FB |
700 | case PRIV_B9_EQBS: |
701 | /* just inject exception */ | |
702 | r = -1; | |
703 | break; | |
704 | default: | |
705 | r = -1; | |
706 | DPRINTF("KVM: unhandled PRIV: 0xb9%x\n", ipa1); | |
707 | break; | |
708 | } | |
709 | ||
710 | return r; | |
711 | } | |
712 | ||
713 | static int handle_eb(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) | |
714 | { | |
715 | int r = 0; | |
716 | ||
717 | switch (ipa1) { | |
718 | case PRIV_EB_SQBS: | |
719 | /* just inject exception */ | |
720 | r = -1; | |
721 | break; | |
722 | default: | |
723 | r = -1; | |
724 | DPRINTF("KVM: unhandled PRIV: 0xeb%x\n", ipa1); | |
725 | break; | |
0e60a699 AG |
726 | } |
727 | ||
728 | return r; | |
729 | } | |
730 | ||
4fd6dd06 | 731 | static int handle_hypercall(S390CPU *cpu, struct kvm_run *run) |
0e60a699 | 732 | { |
4fd6dd06 | 733 | CPUS390XState *env = &cpu->env; |
77319f22 | 734 | int ret; |
3474b679 | 735 | |
44c68de0 | 736 | cpu_synchronize_state(CPU(cpu)); |
77319f22 TH |
737 | ret = s390_virtio_hypercall(env); |
738 | if (ret == -EINVAL) { | |
739 | enter_pgmcheck(cpu, PGM_SPECIFICATION); | |
740 | return 0; | |
741 | } | |
0e60a699 | 742 | |
77319f22 | 743 | return ret; |
0e60a699 AG |
744 | } |
745 | ||
268846ba ED |
746 | static void kvm_handle_diag_308(S390CPU *cpu, struct kvm_run *run) |
747 | { | |
748 | uint64_t r1, r3; | |
749 | ||
750 | cpu_synchronize_state(CPU(cpu)); | |
751 | r1 = (run->s390_sieic.ipa & 0x00f0) >> 8; | |
752 | r3 = run->s390_sieic.ipa & 0x000f; | |
753 | handle_diag_308(&cpu->env, r1, r3); | |
754 | } | |
755 | ||
b30f4dfb DH |
756 | static int handle_sw_breakpoint(S390CPU *cpu, struct kvm_run *run) |
757 | { | |
758 | CPUS390XState *env = &cpu->env; | |
759 | unsigned long pc; | |
760 | ||
761 | cpu_synchronize_state(CPU(cpu)); | |
762 | ||
763 | pc = env->psw.addr - 4; | |
764 | if (kvm_find_sw_breakpoint(CPU(cpu), pc)) { | |
765 | env->psw.addr = pc; | |
766 | return EXCP_DEBUG; | |
767 | } | |
768 | ||
769 | return -ENOENT; | |
770 | } | |
771 | ||
638129ff CH |
772 | #define DIAG_KVM_CODE_MASK 0x000000000000ffff |
773 | ||
774 | static int handle_diag(S390CPU *cpu, struct kvm_run *run, uint32_t ipb) | |
0e60a699 AG |
775 | { |
776 | int r = 0; | |
638129ff CH |
777 | uint16_t func_code; |
778 | ||
779 | /* | |
780 | * For any diagnose call we support, bits 48-63 of the resulting | |
781 | * address specify the function code; the remainder is ignored. | |
782 | */ | |
783 | func_code = decode_basedisp_rs(&cpu->env, ipb) & DIAG_KVM_CODE_MASK; | |
784 | switch (func_code) { | |
268846ba ED |
785 | case DIAG_IPL: |
786 | kvm_handle_diag_308(cpu, run); | |
787 | break; | |
39fbc5c6 CB |
788 | case DIAG_KVM_HYPERCALL: |
789 | r = handle_hypercall(cpu, run); | |
790 | break; | |
791 | case DIAG_KVM_BREAKPOINT: | |
b30f4dfb | 792 | r = handle_sw_breakpoint(cpu, run); |
39fbc5c6 CB |
793 | break; |
794 | default: | |
638129ff | 795 | DPRINTF("KVM: unknown DIAG: 0x%x\n", func_code); |
39fbc5c6 CB |
796 | r = -1; |
797 | break; | |
0e60a699 AG |
798 | } |
799 | ||
800 | return r; | |
801 | } | |
802 | ||
b20a461f TH |
803 | static int kvm_s390_cpu_start(S390CPU *cpu) |
804 | { | |
805 | s390_add_running_cpu(cpu); | |
806 | qemu_cpu_kick(CPU(cpu)); | |
807 | DPRINTF("DONE: KVM cpu start: %p\n", &cpu->env); | |
808 | return 0; | |
809 | } | |
810 | ||
7f7f9752 | 811 | int kvm_s390_cpu_restart(S390CPU *cpu) |
0e60a699 | 812 | { |
1bc22652 | 813 | kvm_s390_interrupt(cpu, KVM_S390_RESTART, 0); |
49e15878 | 814 | s390_add_running_cpu(cpu); |
c08d7424 | 815 | qemu_cpu_kick(CPU(cpu)); |
7f7f9752 | 816 | DPRINTF("DONE: KVM cpu restart: %p\n", &cpu->env); |
0e60a699 AG |
817 | return 0; |
818 | } | |
819 | ||
f7d3e466 | 820 | static void sigp_initial_cpu_reset(void *arg) |
0e60a699 | 821 | { |
f7d3e466 TH |
822 | CPUState *cpu = arg; |
823 | S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); | |
d5900813 | 824 | |
f7d3e466 TH |
825 | cpu_synchronize_state(cpu); |
826 | scc->initial_cpu_reset(cpu); | |
0e60a699 AG |
827 | } |
828 | ||
04c2b516 TH |
829 | static void sigp_cpu_reset(void *arg) |
830 | { | |
831 | CPUState *cpu = arg; | |
832 | S390CPUClass *scc = S390_CPU_GET_CLASS(cpu); | |
833 | ||
834 | cpu_synchronize_state(cpu); | |
835 | scc->cpu_reset(cpu); | |
836 | } | |
837 | ||
b8031adb TH |
838 | #define SIGP_ORDER_MASK 0x000000ff |
839 | ||
f7575c96 | 840 | static int handle_sigp(S390CPU *cpu, struct kvm_run *run, uint8_t ipa1) |
0e60a699 | 841 | { |
f7575c96 | 842 | CPUS390XState *env = &cpu->env; |
0e60a699 | 843 | uint8_t order_code; |
0e60a699 | 844 | uint16_t cpu_addr; |
45fa769b | 845 | S390CPU *target_cpu; |
3796f0e1 TH |
846 | uint64_t *statusreg = &env->regs[ipa1 >> 4]; |
847 | int cc; | |
0e60a699 | 848 | |
cb446eca | 849 | cpu_synchronize_state(CPU(cpu)); |
0e60a699 AG |
850 | |
851 | /* get order code */ | |
b8031adb | 852 | order_code = decode_basedisp_rs(env, run->s390_sieic.ipb) & SIGP_ORDER_MASK; |
0e60a699 | 853 | |
0e60a699 | 854 | cpu_addr = env->regs[ipa1 & 0x0f]; |
45fa769b AF |
855 | target_cpu = s390_cpu_addr2state(cpu_addr); |
856 | if (target_cpu == NULL) { | |
3796f0e1 | 857 | cc = 3; /* not operational */ |
0e60a699 AG |
858 | goto out; |
859 | } | |
860 | ||
861 | switch (order_code) { | |
b20a461f | 862 | case SIGP_START: |
3796f0e1 | 863 | cc = kvm_s390_cpu_start(target_cpu); |
b20a461f | 864 | break; |
0b9972a2 | 865 | case SIGP_RESTART: |
3796f0e1 | 866 | cc = kvm_s390_cpu_restart(target_cpu); |
0b9972a2 TH |
867 | break; |
868 | case SIGP_SET_ARCH: | |
0788082a TH |
869 | *statusreg &= 0xffffffff00000000UL; |
870 | *statusreg |= SIGP_STAT_INVALID_PARAMETER; | |
871 | cc = 1; /* status stored */ | |
872 | break; | |
0b9972a2 | 873 | case SIGP_INITIAL_CPU_RESET: |
f7d3e466 TH |
874 | run_on_cpu(CPU(target_cpu), sigp_initial_cpu_reset, CPU(target_cpu)); |
875 | cc = 0; | |
0b9972a2 | 876 | break; |
04c2b516 TH |
877 | case SIGP_CPU_RESET: |
878 | run_on_cpu(CPU(target_cpu), sigp_cpu_reset, CPU(target_cpu)); | |
879 | cc = 0; | |
880 | break; | |
0b9972a2 | 881 | default: |
3796f0e1 TH |
882 | DPRINTF("KVM: unknown SIGP: 0x%x\n", order_code); |
883 | *statusreg &= 0xffffffff00000000UL; | |
884 | *statusreg |= SIGP_STAT_INVALID_ORDER; | |
885 | cc = 1; /* status stored */ | |
0b9972a2 | 886 | break; |
0e60a699 AG |
887 | } |
888 | ||
889 | out: | |
3796f0e1 | 890 | setcc(cpu, cc); |
0e60a699 AG |
891 | return 0; |
892 | } | |
893 | ||
b30f4dfb | 894 | static int handle_instruction(S390CPU *cpu, struct kvm_run *run) |
0e60a699 AG |
895 | { |
896 | unsigned int ipa0 = (run->s390_sieic.ipa & 0xff00); | |
897 | uint8_t ipa1 = run->s390_sieic.ipa & 0x00ff; | |
d7963c43 | 898 | int r = -1; |
0e60a699 | 899 | |
e67137c6 PM |
900 | DPRINTF("handle_instruction 0x%x 0x%x\n", |
901 | run->s390_sieic.ipa, run->s390_sieic.ipb); | |
0e60a699 | 902 | switch (ipa0) { |
09b99878 | 903 | case IPA0_B2: |
1eecf41b FB |
904 | r = handle_b2(cpu, run, ipa1); |
905 | break; | |
09b99878 | 906 | case IPA0_B9: |
1eecf41b FB |
907 | r = handle_b9(cpu, run, ipa1); |
908 | break; | |
09b99878 | 909 | case IPA0_EB: |
1eecf41b | 910 | r = handle_eb(cpu, run, ipa1); |
09b99878 CH |
911 | break; |
912 | case IPA0_DIAG: | |
638129ff | 913 | r = handle_diag(cpu, run, run->s390_sieic.ipb); |
09b99878 CH |
914 | break; |
915 | case IPA0_SIGP: | |
916 | r = handle_sigp(cpu, run, ipa1); | |
917 | break; | |
0e60a699 AG |
918 | } |
919 | ||
920 | if (r < 0) { | |
b30f4dfb | 921 | r = 0; |
1bc22652 | 922 | enter_pgmcheck(cpu, 0x0001); |
0e60a699 | 923 | } |
b30f4dfb DH |
924 | |
925 | return r; | |
0e60a699 AG |
926 | } |
927 | ||
f7575c96 | 928 | static bool is_special_wait_psw(CPUState *cs) |
eca3ed03 CB |
929 | { |
930 | /* signal quiesce */ | |
f7575c96 | 931 | return cs->kvm_run->psw_addr == 0xfffUL; |
eca3ed03 CB |
932 | } |
933 | ||
a2689242 TH |
934 | static void guest_panicked(void) |
935 | { | |
936 | QObject *data; | |
937 | ||
938 | data = qobject_from_jsonf("{ 'action': %s }", "pause"); | |
939 | monitor_protocol_event(QEVENT_GUEST_PANICKED, data); | |
940 | qobject_decref(data); | |
941 | ||
942 | vm_stop(RUN_STATE_GUEST_PANICKED); | |
943 | } | |
944 | ||
945 | static void unmanageable_intercept(S390CPU *cpu, const char *str, int pswoffset) | |
946 | { | |
947 | CPUState *cs = CPU(cpu); | |
948 | ||
949 | error_report("Unmanageable %s! CPU%i new PSW: 0x%016lx:%016lx", | |
950 | str, cs->cpu_index, ldq_phys(cs->as, cpu->env.psa + pswoffset), | |
951 | ldq_phys(cs->as, cpu->env.psa + pswoffset + 8)); | |
952 | s390_del_running_cpu(cpu); | |
953 | guest_panicked(); | |
954 | } | |
955 | ||
1bc22652 | 956 | static int handle_intercept(S390CPU *cpu) |
0e60a699 | 957 | { |
f7575c96 AF |
958 | CPUState *cs = CPU(cpu); |
959 | struct kvm_run *run = cs->kvm_run; | |
0e60a699 AG |
960 | int icpt_code = run->s390_sieic.icptcode; |
961 | int r = 0; | |
962 | ||
e67137c6 | 963 | DPRINTF("intercept: 0x%x (at 0x%lx)\n", icpt_code, |
f7575c96 | 964 | (long)cs->kvm_run->psw_addr); |
0e60a699 AG |
965 | switch (icpt_code) { |
966 | case ICPT_INSTRUCTION: | |
b30f4dfb | 967 | r = handle_instruction(cpu, run); |
0e60a699 | 968 | break; |
a2689242 TH |
969 | case ICPT_EXT_INT: |
970 | unmanageable_intercept(cpu, "external interrupt", | |
971 | offsetof(LowCore, external_new_psw)); | |
972 | r = EXCP_HALTED; | |
973 | break; | |
0e60a699 | 974 | case ICPT_WAITPSW: |
08eb8c85 CB |
975 | /* disabled wait, since enabled wait is handled in kernel */ |
976 | if (s390_del_running_cpu(cpu) == 0) { | |
977 | if (is_special_wait_psw(cs)) { | |
978 | qemu_system_shutdown_request(); | |
979 | } else { | |
a2689242 | 980 | guest_panicked(); |
08eb8c85 | 981 | } |
eca3ed03 CB |
982 | } |
983 | r = EXCP_HALTED; | |
984 | break; | |
854e42f3 | 985 | case ICPT_CPU_STOP: |
49e15878 | 986 | if (s390_del_running_cpu(cpu) == 0) { |
854e42f3 CB |
987 | qemu_system_shutdown_request(); |
988 | } | |
989 | r = EXCP_HALTED; | |
0e60a699 AG |
990 | break; |
991 | case ICPT_SOFT_INTERCEPT: | |
992 | fprintf(stderr, "KVM unimplemented icpt SOFT\n"); | |
993 | exit(1); | |
994 | break; | |
0e60a699 AG |
995 | case ICPT_IO: |
996 | fprintf(stderr, "KVM unimplemented icpt IO\n"); | |
997 | exit(1); | |
998 | break; | |
999 | default: | |
1000 | fprintf(stderr, "Unknown intercept code: %d\n", icpt_code); | |
1001 | exit(1); | |
1002 | break; | |
1003 | } | |
1004 | ||
1005 | return r; | |
1006 | } | |
1007 | ||
09b99878 CH |
1008 | static int handle_tsch(S390CPU *cpu) |
1009 | { | |
1010 | CPUS390XState *env = &cpu->env; | |
1011 | CPUState *cs = CPU(cpu); | |
1012 | struct kvm_run *run = cs->kvm_run; | |
1013 | int ret; | |
1014 | ||
44c68de0 | 1015 | cpu_synchronize_state(cs); |
3474b679 | 1016 | |
09b99878 CH |
1017 | ret = ioinst_handle_tsch(env, env->regs[1], run->s390_tsch.ipb); |
1018 | if (ret >= 0) { | |
1019 | /* Success; set condition code. */ | |
1020 | setcc(cpu, ret); | |
1021 | ret = 0; | |
1022 | } else if (ret < -1) { | |
1023 | /* | |
1024 | * Failure. | |
1025 | * If an I/O interrupt had been dequeued, we have to reinject it. | |
1026 | */ | |
1027 | if (run->s390_tsch.dequeued) { | |
1028 | uint16_t subchannel_id = run->s390_tsch.subchannel_id; | |
1029 | uint16_t subchannel_nr = run->s390_tsch.subchannel_nr; | |
1030 | uint32_t io_int_parm = run->s390_tsch.io_int_parm; | |
1031 | uint32_t io_int_word = run->s390_tsch.io_int_word; | |
1032 | uint32_t type = ((subchannel_id & 0xff00) << 24) | | |
1033 | ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16); | |
1034 | ||
1035 | kvm_s390_interrupt_internal(cpu, type, | |
1036 | ((uint32_t)subchannel_id << 16) | |
1037 | | subchannel_nr, | |
1038 | ((uint64_t)io_int_parm << 32) | |
1039 | | io_int_word, 1); | |
1040 | } | |
1041 | ret = 0; | |
1042 | } | |
1043 | return ret; | |
1044 | } | |
1045 | ||
8c012449 DH |
1046 | static int kvm_arch_handle_debug_exit(S390CPU *cpu) |
1047 | { | |
770a6379 DH |
1048 | CPUState *cs = CPU(cpu); |
1049 | struct kvm_run *run = cs->kvm_run; | |
1050 | ||
1051 | int ret = 0; | |
1052 | struct kvm_debug_exit_arch *arch_info = &run->debug.arch; | |
1053 | ||
1054 | switch (arch_info->type) { | |
1055 | case KVM_HW_WP_WRITE: | |
1056 | if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { | |
1057 | cs->watchpoint_hit = &hw_watchpoint; | |
1058 | hw_watchpoint.vaddr = arch_info->addr; | |
1059 | hw_watchpoint.flags = BP_MEM_WRITE; | |
1060 | ret = EXCP_DEBUG; | |
1061 | } | |
1062 | break; | |
1063 | case KVM_HW_BP: | |
1064 | if (find_hw_breakpoint(arch_info->addr, -1, arch_info->type)) { | |
1065 | ret = EXCP_DEBUG; | |
1066 | } | |
1067 | break; | |
1068 | case KVM_SINGLESTEP: | |
1069 | if (cs->singlestep_enabled) { | |
1070 | ret = EXCP_DEBUG; | |
1071 | } | |
1072 | break; | |
1073 | default: | |
1074 | ret = -ENOSYS; | |
1075 | } | |
1076 | ||
1077 | return ret; | |
8c012449 DH |
1078 | } |
1079 | ||
20d695a9 | 1080 | int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run) |
0e60a699 | 1081 | { |
20d695a9 | 1082 | S390CPU *cpu = S390_CPU(cs); |
0e60a699 AG |
1083 | int ret = 0; |
1084 | ||
1085 | switch (run->exit_reason) { | |
1086 | case KVM_EXIT_S390_SIEIC: | |
1bc22652 | 1087 | ret = handle_intercept(cpu); |
0e60a699 AG |
1088 | break; |
1089 | case KVM_EXIT_S390_RESET: | |
add142e0 | 1090 | qemu_system_reset_request(); |
0e60a699 | 1091 | break; |
09b99878 CH |
1092 | case KVM_EXIT_S390_TSCH: |
1093 | ret = handle_tsch(cpu); | |
1094 | break; | |
8c012449 DH |
1095 | case KVM_EXIT_DEBUG: |
1096 | ret = kvm_arch_handle_debug_exit(cpu); | |
1097 | break; | |
0e60a699 AG |
1098 | default: |
1099 | fprintf(stderr, "Unknown KVM exit: %d\n", run->exit_reason); | |
1100 | break; | |
1101 | } | |
1102 | ||
bb4ea393 JK |
1103 | if (ret == 0) { |
1104 | ret = EXCP_INTERRUPT; | |
bb4ea393 | 1105 | } |
0e60a699 AG |
1106 | return ret; |
1107 | } | |
4513d923 | 1108 | |
20d695a9 | 1109 | bool kvm_arch_stop_on_emulation_error(CPUState *cpu) |
4513d923 GN |
1110 | { |
1111 | return true; | |
1112 | } | |
a1b87fe0 | 1113 | |
20d695a9 | 1114 | int kvm_arch_on_sigbus_vcpu(CPUState *cpu, int code, void *addr) |
a1b87fe0 JK |
1115 | { |
1116 | return 1; | |
1117 | } | |
1118 | ||
1119 | int kvm_arch_on_sigbus(int code, void *addr) | |
1120 | { | |
1121 | return 1; | |
1122 | } | |
09b99878 CH |
1123 | |
1124 | void kvm_s390_io_interrupt(S390CPU *cpu, uint16_t subchannel_id, | |
1125 | uint16_t subchannel_nr, uint32_t io_int_parm, | |
1126 | uint32_t io_int_word) | |
1127 | { | |
1128 | uint32_t type; | |
1129 | ||
7e749462 CH |
1130 | if (io_int_word & IO_INT_WORD_AI) { |
1131 | type = KVM_S390_INT_IO(1, 0, 0, 0); | |
1132 | } else { | |
1133 | type = ((subchannel_id & 0xff00) << 24) | | |
1134 | ((subchannel_id & 0x00060) << 22) | (subchannel_nr << 16); | |
1135 | } | |
09b99878 CH |
1136 | kvm_s390_interrupt_internal(cpu, type, |
1137 | ((uint32_t)subchannel_id << 16) | subchannel_nr, | |
1138 | ((uint64_t)io_int_parm << 32) | io_int_word, 1); | |
1139 | } | |
1140 | ||
1141 | void kvm_s390_crw_mchk(S390CPU *cpu) | |
1142 | { | |
1143 | kvm_s390_interrupt_internal(cpu, KVM_S390_MCHK, 1 << 28, | |
1144 | 0x00400f1d40330000, 1); | |
1145 | } | |
1146 | ||
1147 | void kvm_s390_enable_css_support(S390CPU *cpu) | |
1148 | { | |
09b99878 CH |
1149 | int r; |
1150 | ||
1151 | /* Activate host kernel channel subsystem support. */ | |
e080f0fd | 1152 | r = kvm_vcpu_enable_cap(CPU(cpu), KVM_CAP_S390_CSS_SUPPORT, 0); |
09b99878 CH |
1153 | assert(r == 0); |
1154 | } | |
48475e14 AK |
1155 | |
1156 | void kvm_arch_init_irq_routing(KVMState *s) | |
1157 | { | |
d426d9fb CH |
1158 | /* |
1159 | * Note that while irqchip capabilities generally imply that cpustates | |
1160 | * are handled in-kernel, it is not true for s390 (yet); therefore, we | |
1161 | * have to override the common code kvm_halt_in_kernel_allowed setting. | |
1162 | */ | |
1163 | if (kvm_check_extension(s, KVM_CAP_IRQ_ROUTING)) { | |
1164 | kvm_irqfds_allowed = true; | |
1165 | kvm_gsi_routing_allowed = true; | |
1166 | kvm_halt_in_kernel_allowed = false; | |
1167 | } | |
48475e14 | 1168 | } |
b4436a0b | 1169 | |
cc3ac9c4 CH |
1170 | int kvm_s390_assign_subch_ioeventfd(EventNotifier *notifier, uint32_t sch, |
1171 | int vq, bool assign) | |
b4436a0b CH |
1172 | { |
1173 | struct kvm_ioeventfd kick = { | |
1174 | .flags = KVM_IOEVENTFD_FLAG_VIRTIO_CCW_NOTIFY | | |
1175 | KVM_IOEVENTFD_FLAG_DATAMATCH, | |
cc3ac9c4 | 1176 | .fd = event_notifier_get_fd(notifier), |
b4436a0b CH |
1177 | .datamatch = vq, |
1178 | .addr = sch, | |
1179 | .len = 8, | |
1180 | }; | |
1181 | if (!kvm_check_extension(kvm_state, KVM_CAP_IOEVENTFD)) { | |
1182 | return -ENOSYS; | |
1183 | } | |
1184 | if (!assign) { | |
1185 | kick.flags |= KVM_IOEVENTFD_FLAG_DEASSIGN; | |
1186 | } | |
1187 | return kvm_vm_ioctl(kvm_state, KVM_IOEVENTFD, &kick); | |
1188 | } |