]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/s390/kvm/interrupt.c
KVM: s390: add more debug data for the pfault diagnoses
[mirror_ubuntu-kernels.git] / arch / s390 / kvm / interrupt.c
CommitLineData
ba5c1e9b 1/*
a53c8fab 2 * handling kvm guest interrupts
ba5c1e9b 3 *
33b412ac 4 * Copyright IBM Corp. 2008, 2015
ba5c1e9b
CO
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
ca872302 13#include <linux/interrupt.h>
ba5c1e9b 14#include <linux/kvm_host.h>
cbb870c8 15#include <linux/hrtimer.h>
84223598 16#include <linux/mmu_context.h>
3cd61299 17#include <linux/signal.h>
5a0e3ad6 18#include <linux/slab.h>
383d0b05 19#include <linux/bitmap.h>
94aa033e 20#include <linux/vmalloc.h>
cbb870c8 21#include <asm/asm-offsets.h>
33b412ac 22#include <asm/dis.h>
cbb870c8 23#include <asm/uaccess.h>
ea5f4969 24#include <asm/sclp.h>
6d3da241 25#include <asm/isc.h>
ba5c1e9b
CO
26#include "kvm-s390.h"
27#include "gaccess.h"
ade38c31 28#include "trace-s390.h"
ba5c1e9b 29
d8346b7d
CH
30#define IOINT_SCHID_MASK 0x0000ffff
31#define IOINT_SSID_MASK 0x00030000
32#define IOINT_CSSID_MASK 0x03fc0000
33#define IOINT_AI_MASK 0x04000000
44c6ca3d 34#define PFAULT_INIT 0x0600
60f90a14
JF
35#define PFAULT_DONE 0x0680
36#define VIRTIO_PARAM 0x0d00
d8346b7d 37
3c038e6b 38int psw_extint_disabled(struct kvm_vcpu *vcpu)
ba5c1e9b
CO
39{
40 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
41}
42
d8346b7d
CH
43static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
44{
45 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
46}
47
48a3e950
CH
48static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
49{
50 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
51}
52
ba5c1e9b
CO
53static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
54{
55 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
56 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
57 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
58 return 0;
59 return 1;
60}
61
bb78c5ec
DH
62static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
63{
64 if (psw_extint_disabled(vcpu) ||
65 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
66 return 0;
f71d0dc5
DH
67 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
68 /* No timer interrupts when single stepping */
69 return 0;
bb78c5ec
DH
70 return 1;
71}
72
b4aec925
DH
73static int ckc_irq_pending(struct kvm_vcpu *vcpu)
74{
75 if (!(vcpu->arch.sie_block->ckc <
76 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
77 return 0;
78 return ckc_interrupts_enabled(vcpu);
79}
80
81static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
82{
83 return !psw_extint_disabled(vcpu) &&
84 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
85}
86
87static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
88{
89 return (vcpu->arch.sie_block->cputm >> 63) &&
90 cpu_timer_interrupts_enabled(vcpu);
91}
92
6d3da241 93static inline int is_ioirq(unsigned long irq_type)
79fd50c6 94{
6d3da241
JF
95 return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
96 (irq_type <= IRQ_PEND_IO_ISC_7));
97}
79fd50c6 98
6d3da241
JF
99static uint64_t isc_to_isc_bits(int isc)
100{
79fd50c6
CH
101 return (0x80 >> isc) << 24;
102}
103
6d3da241 104static inline u8 int_word_to_isc(u32 int_word)
ba5c1e9b 105{
6d3da241
JF
106 return (int_word & 0x38000000) >> 27;
107}
108
109static inline unsigned long pending_floating_irqs(struct kvm_vcpu *vcpu)
110{
111 return vcpu->kvm->arch.float_int.pending_irqs;
ba5c1e9b
CO
112}
113
383d0b05
JF
114static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
115{
116 return vcpu->arch.local_int.pending_irqs;
117}
118
6d3da241
JF
119static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
120 unsigned long active_mask)
121{
122 int i;
123
124 for (i = 0; i <= MAX_ISC; i++)
125 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
126 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
127
128 return active_mask;
129}
130
131static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
383d0b05 132{
6d3da241
JF
133 unsigned long active_mask;
134
135 active_mask = pending_local_irqs(vcpu);
136 active_mask |= pending_floating_irqs(vcpu);
ffeca0ae
JF
137 if (!active_mask)
138 return 0;
383d0b05
JF
139
140 if (psw_extint_disabled(vcpu))
141 active_mask &= ~IRQ_PEND_EXT_MASK;
6d3da241
JF
142 if (psw_ioint_disabled(vcpu))
143 active_mask &= ~IRQ_PEND_IO_MASK;
144 else
145 active_mask = disable_iscs(vcpu, active_mask);
383d0b05
JF
146 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
147 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
148 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
149 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
150 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
151 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
152 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
153 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
6d3da241
JF
154 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
155 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
383d0b05
JF
156 if (psw_mchk_disabled(vcpu))
157 active_mask &= ~IRQ_PEND_MCHK_MASK;
6d3da241
JF
158 if (!(vcpu->arch.sie_block->gcr[14] &
159 vcpu->kvm->arch.float_int.mchk.cr14))
160 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
383d0b05 161
6cddd432
DH
162 /*
163 * STOP irqs will never be actively delivered. They are triggered via
164 * intercept requests and cleared when the stop intercept is performed.
165 */
166 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
167
383d0b05
JF
168 return active_mask;
169}
170
ba5c1e9b
CO
171static void __set_cpu_idle(struct kvm_vcpu *vcpu)
172{
ba5c1e9b
CO
173 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
174 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
175}
176
177static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
178{
ba5c1e9b
CO
179 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
180 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
181}
182
183static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
184{
4953919f
DH
185 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
186 &vcpu->arch.sie_block->cpuflags);
ba5c1e9b 187 vcpu->arch.sie_block->lctl = 0x0000;
27291e21
DH
188 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
189
190 if (guestdbg_enabled(vcpu)) {
191 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
192 LCTL_CR10 | LCTL_CR11);
193 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
194 }
ba5c1e9b
CO
195}
196
197static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
198{
199 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
200}
201
6d3da241
JF
202static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
203{
204 if (!(pending_floating_irqs(vcpu) & IRQ_PEND_IO_MASK))
205 return;
206 else if (psw_ioint_disabled(vcpu))
207 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
208 else
209 vcpu->arch.sie_block->lctl |= LCTL_CR6;
210}
211
383d0b05
JF
212static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
213{
214 if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
215 return;
216 if (psw_extint_disabled(vcpu))
217 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
218 else
219 vcpu->arch.sie_block->lctl |= LCTL_CR0;
220}
221
222static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
223{
224 if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
225 return;
226 if (psw_mchk_disabled(vcpu))
227 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
228 else
229 vcpu->arch.sie_block->lctl |= LCTL_CR14;
230}
231
6cddd432
DH
232static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
233{
234 if (kvm_s390_is_stop_irq_pending(vcpu))
235 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
236}
237
6d3da241
JF
238/* Set interception request for non-deliverable interrupts */
239static void set_intercept_indicators(struct kvm_vcpu *vcpu)
383d0b05 240{
6d3da241 241 set_intercept_indicators_io(vcpu);
383d0b05
JF
242 set_intercept_indicators_ext(vcpu);
243 set_intercept_indicators_mchk(vcpu);
6cddd432 244 set_intercept_indicators_stop(vcpu);
383d0b05
JF
245}
246
8a2ef71b
JF
247static u16 get_ilc(struct kvm_vcpu *vcpu)
248{
8a2ef71b
JF
249 switch (vcpu->arch.sie_block->icptcode) {
250 case ICPT_INST:
251 case ICPT_INSTPROGI:
252 case ICPT_OPEREXC:
253 case ICPT_PARTEXEC:
254 case ICPT_IOINST:
255 /* last instruction only stored for these icptcodes */
33b412ac 256 return insn_length(vcpu->arch.sie_block->ipa >> 8);
8a2ef71b
JF
257 case ICPT_PROGI:
258 return vcpu->arch.sie_block->pgmilc;
259 default:
260 return 0;
261 }
262}
263
0fb97abe
JF
264static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
265{
383d0b05 266 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
267 int rc;
268
269 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
270 0, 0);
271
272 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
273 (u16 *)__LC_EXT_INT_CODE);
467fc298 274 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
275 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
276 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
277 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
278 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 279 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
99e20009 280 return rc ? -EFAULT : 0;
0fb97abe
JF
281}
282
283static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
284{
383d0b05 285 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
286 int rc;
287
288 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
289 0, 0);
290
291 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
292 (u16 __user *)__LC_EXT_INT_CODE);
467fc298 293 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
294 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
295 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
296 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
297 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 298 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
99e20009 299 return rc ? -EFAULT : 0;
0fb97abe
JF
300}
301
383d0b05 302static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
0fb97abe 303{
383d0b05
JF
304 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
305 struct kvm_s390_ext_info ext;
0fb97abe
JF
306 int rc;
307
383d0b05
JF
308 spin_lock(&li->lock);
309 ext = li->irq.ext;
310 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
311 li->irq.ext.ext_params2 = 0;
312 spin_unlock(&li->lock);
313
0fb97abe 314 VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
383d0b05 315 0, ext.ext_params2);
0fb97abe
JF
316 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
317 KVM_S390_INT_PFAULT_INIT,
383d0b05 318 0, ext.ext_params2);
0fb97abe
JF
319
320 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
321 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
322 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
323 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
324 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
325 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 326 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
99e20009 327 return rc ? -EFAULT : 0;
0fb97abe
JF
328}
329
383d0b05 330static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
0fb97abe 331{
6d3da241 332 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
383d0b05 333 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
6d3da241 334 struct kvm_s390_mchk_info mchk = {};
bc17de7c 335 unsigned long adtl_status_addr;
6d3da241
JF
336 int deliver = 0;
337 int rc = 0;
0fb97abe 338
6d3da241 339 spin_lock(&fi->lock);
383d0b05 340 spin_lock(&li->lock);
6d3da241
JF
341 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
342 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
343 /*
344 * If there was an exigent machine check pending, then any
345 * repressible machine checks that might have been pending
346 * are indicated along with it, so always clear bits for
347 * repressible and exigent interrupts
348 */
349 mchk = li->irq.mchk;
350 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
351 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
352 memset(&li->irq.mchk, 0, sizeof(mchk));
353 deliver = 1;
354 }
383d0b05 355 /*
6d3da241
JF
356 * We indicate floating repressible conditions along with
357 * other pending conditions. Channel Report Pending and Channel
358 * Subsystem damage are the only two and and are indicated by
359 * bits in mcic and masked in cr14.
383d0b05 360 */
6d3da241
JF
361 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
362 mchk.mcic |= fi->mchk.mcic;
363 mchk.cr14 |= fi->mchk.cr14;
364 memset(&fi->mchk, 0, sizeof(mchk));
365 deliver = 1;
366 }
383d0b05 367 spin_unlock(&li->lock);
6d3da241 368 spin_unlock(&fi->lock);
383d0b05 369
6d3da241
JF
370 if (deliver) {
371 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
372 mchk.mcic);
373 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
374 KVM_S390_MCHK,
375 mchk.cr14, mchk.mcic);
376
377 rc = kvm_s390_vcpu_store_status(vcpu,
378 KVM_S390_STORE_STATUS_PREFIXED);
379 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
380 &adtl_status_addr,
381 sizeof(unsigned long));
382 rc |= kvm_s390_vcpu_store_adtl_status(vcpu,
383 adtl_status_addr);
384 rc |= put_guest_lc(vcpu, mchk.mcic,
385 (u64 __user *) __LC_MCCK_CODE);
386 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
387 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
388 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
389 &mchk.fixed_logout,
390 sizeof(mchk.fixed_logout));
391 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
392 &vcpu->arch.sie_block->gpsw,
393 sizeof(psw_t));
394 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
395 &vcpu->arch.sie_block->gpsw,
396 sizeof(psw_t));
397 }
99e20009 398 return rc ? -EFAULT : 0;
0fb97abe
JF
399}
400
401static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
402{
383d0b05 403 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
404 int rc;
405
406 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
407 vcpu->stat.deliver_restart_signal++;
408 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
409
410 rc = write_guest_lc(vcpu,
411 offsetof(struct _lowcore, restart_old_psw),
412 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
413 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
414 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 415 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
99e20009 416 return rc ? -EFAULT : 0;
0fb97abe
JF
417}
418
383d0b05 419static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
0fb97abe 420{
383d0b05
JF
421 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
422 struct kvm_s390_prefix_info prefix;
423
424 spin_lock(&li->lock);
425 prefix = li->irq.prefix;
426 li->irq.prefix.address = 0;
427 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
428 spin_unlock(&li->lock);
0fb97abe 429
0fb97abe
JF
430 vcpu->stat.deliver_prefix_signal++;
431 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
432 KVM_S390_SIGP_SET_PREFIX,
383d0b05 433 prefix.address, 0);
0fb97abe 434
383d0b05 435 kvm_s390_set_prefix(vcpu, prefix.address);
0fb97abe
JF
436 return 0;
437}
438
383d0b05 439static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
0fb97abe 440{
383d0b05 441 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe 442 int rc;
383d0b05
JF
443 int cpu_addr;
444
445 spin_lock(&li->lock);
446 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
447 clear_bit(cpu_addr, li->sigp_emerg_pending);
448 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
449 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
450 spin_unlock(&li->lock);
0fb97abe
JF
451
452 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
453 vcpu->stat.deliver_emergency_signal++;
383d0b05
JF
454 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
455 cpu_addr, 0);
0fb97abe
JF
456
457 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
458 (u16 *)__LC_EXT_INT_CODE);
383d0b05 459 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
460 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
461 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
462 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
463 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 464 return rc ? -EFAULT : 0;
0fb97abe
JF
465}
466
383d0b05 467static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
0fb97abe 468{
383d0b05
JF
469 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
470 struct kvm_s390_extcall_info extcall;
0fb97abe
JF
471 int rc;
472
383d0b05
JF
473 spin_lock(&li->lock);
474 extcall = li->irq.extcall;
475 li->irq.extcall.code = 0;
476 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
477 spin_unlock(&li->lock);
478
0fb97abe
JF
479 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
480 vcpu->stat.deliver_external_call++;
481 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
482 KVM_S390_INT_EXTERNAL_CALL,
383d0b05 483 extcall.code, 0);
0fb97abe
JF
484
485 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
486 (u16 *)__LC_EXT_INT_CODE);
383d0b05 487 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
488 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
489 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
490 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
491 sizeof(psw_t));
99e20009 492 return rc ? -EFAULT : 0;
0fb97abe
JF
493}
494
383d0b05 495static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
8712836b 496{
383d0b05
JF
497 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
498 struct kvm_s390_pgm_info pgm_info;
a9a846fd 499 int rc = 0, nullifying = false;
8a2ef71b 500 u16 ilc = get_ilc(vcpu);
8712836b 501
383d0b05
JF
502 spin_lock(&li->lock);
503 pgm_info = li->irq.pgm;
504 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
505 memset(&li->irq.pgm, 0, sizeof(pgm_info));
506 spin_unlock(&li->lock);
507
0fb97abe 508 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
383d0b05 509 pgm_info.code, ilc);
0fb97abe
JF
510 vcpu->stat.deliver_program_int++;
511 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
383d0b05 512 pgm_info.code, 0);
0fb97abe 513
383d0b05 514 switch (pgm_info.code & ~PGM_PER) {
8712836b
DH
515 case PGM_AFX_TRANSLATION:
516 case PGM_ASX_TRANSLATION:
517 case PGM_EX_TRANSLATION:
518 case PGM_LFX_TRANSLATION:
519 case PGM_LSTE_SEQUENCE:
520 case PGM_LSX_TRANSLATION:
521 case PGM_LX_TRANSLATION:
522 case PGM_PRIMARY_AUTHORITY:
523 case PGM_SECONDARY_AUTHORITY:
a9a846fd
TH
524 nullifying = true;
525 /* fall through */
8712836b 526 case PGM_SPACE_SWITCH:
383d0b05 527 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b
DH
528 (u64 *)__LC_TRANS_EXC_CODE);
529 break;
530 case PGM_ALEN_TRANSLATION:
531 case PGM_ALE_SEQUENCE:
532 case PGM_ASTE_INSTANCE:
533 case PGM_ASTE_SEQUENCE:
534 case PGM_ASTE_VALIDITY:
535 case PGM_EXTENDED_AUTHORITY:
383d0b05 536 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 537 (u8 *)__LC_EXC_ACCESS_ID);
a9a846fd 538 nullifying = true;
8712836b
DH
539 break;
540 case PGM_ASCE_TYPE:
541 case PGM_PAGE_TRANSLATION:
542 case PGM_REGION_FIRST_TRANS:
543 case PGM_REGION_SECOND_TRANS:
544 case PGM_REGION_THIRD_TRANS:
545 case PGM_SEGMENT_TRANSLATION:
383d0b05 546 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 547 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 548 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 549 (u8 *)__LC_EXC_ACCESS_ID);
383d0b05 550 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
8712836b 551 (u8 *)__LC_OP_ACCESS_ID);
a9a846fd 552 nullifying = true;
8712836b
DH
553 break;
554 case PGM_MONITOR:
383d0b05 555 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
a36c5393 556 (u16 *)__LC_MON_CLASS_NR);
383d0b05 557 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
8712836b
DH
558 (u64 *)__LC_MON_CODE);
559 break;
403c8648 560 case PGM_VECTOR_PROCESSING:
8712836b 561 case PGM_DATA:
383d0b05 562 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
8712836b
DH
563 (u32 *)__LC_DATA_EXC_CODE);
564 break;
565 case PGM_PROTECTION:
383d0b05 566 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 567 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 568 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b
DH
569 (u8 *)__LC_EXC_ACCESS_ID);
570 break;
a9a846fd
TH
571 case PGM_STACK_FULL:
572 case PGM_STACK_EMPTY:
573 case PGM_STACK_SPECIFICATION:
574 case PGM_STACK_TYPE:
575 case PGM_STACK_OPERATION:
576 case PGM_TRACE_TABEL:
577 case PGM_CRYPTO_OPERATION:
578 nullifying = true;
579 break;
8712836b
DH
580 }
581
383d0b05
JF
582 if (pgm_info.code & PGM_PER) {
583 rc |= put_guest_lc(vcpu, pgm_info.per_code,
8712836b 584 (u8 *) __LC_PER_CODE);
383d0b05 585 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
8712836b 586 (u8 *)__LC_PER_ATMID);
383d0b05 587 rc |= put_guest_lc(vcpu, pgm_info.per_address,
8712836b 588 (u64 *) __LC_PER_ADDRESS);
383d0b05 589 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
8712836b
DH
590 (u8 *) __LC_PER_ACCESS_ID);
591 }
592
a9a846fd
TH
593 if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
594 kvm_s390_rewind_psw(vcpu, ilc);
595
8a2ef71b 596 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
2ba45968
DH
597 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
598 (u64 *) __LC_LAST_BREAK);
383d0b05 599 rc |= put_guest_lc(vcpu, pgm_info.code,
8712836b
DH
600 (u16 *)__LC_PGM_INT_CODE);
601 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
602 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
603 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
604 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 605 return rc ? -EFAULT : 0;
0fb97abe
JF
606}
607
6d3da241 608static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
0fb97abe 609{
6d3da241
JF
610 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
611 struct kvm_s390_ext_info ext;
612 int rc = 0;
613
614 spin_lock(&fi->lock);
615 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
616 spin_unlock(&fi->lock);
617 return 0;
618 }
619 ext = fi->srv_signal;
620 memset(&fi->srv_signal, 0, sizeof(ext));
621 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
622 spin_unlock(&fi->lock);
0fb97abe
JF
623
624 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
6d3da241 625 ext.ext_params);
0fb97abe 626 vcpu->stat.deliver_service_signal++;
6d3da241
JF
627 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
628 ext.ext_params, 0);
0fb97abe
JF
629
630 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
467fc298 631 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
632 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
633 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
634 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
635 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
6d3da241 636 rc |= put_guest_lc(vcpu, ext.ext_params,
0fb97abe 637 (u32 *)__LC_EXT_PARAMS);
6d3da241 638
99e20009 639 return rc ? -EFAULT : 0;
0fb97abe
JF
640}
641
6d3da241 642static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
0fb97abe 643{
6d3da241
JF
644 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
645 struct kvm_s390_interrupt_info *inti;
646 int rc = 0;
0fb97abe 647
6d3da241
JF
648 spin_lock(&fi->lock);
649 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
650 struct kvm_s390_interrupt_info,
651 list);
652 if (inti) {
653 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
654 KVM_S390_INT_PFAULT_DONE, 0,
655 inti->ext.ext_params2);
656 list_del(&inti->list);
657 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
658 }
659 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
660 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
661 spin_unlock(&fi->lock);
8712836b 662
6d3da241
JF
663 if (inti) {
664 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
665 (u16 *)__LC_EXT_INT_CODE);
666 rc |= put_guest_lc(vcpu, PFAULT_DONE,
667 (u16 *)__LC_EXT_CPU_ADDR);
668 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
669 &vcpu->arch.sie_block->gpsw,
670 sizeof(psw_t));
671 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
672 &vcpu->arch.sie_block->gpsw,
673 sizeof(psw_t));
674 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
675 (u64 *)__LC_EXT_PARAMS2);
676 kfree(inti);
677 }
99e20009 678 return rc ? -EFAULT : 0;
0fb97abe
JF
679}
680
6d3da241 681static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
0fb97abe 682{
6d3da241
JF
683 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
684 struct kvm_s390_interrupt_info *inti;
685 int rc = 0;
0fb97abe 686
6d3da241
JF
687 spin_lock(&fi->lock);
688 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
689 struct kvm_s390_interrupt_info,
690 list);
691 if (inti) {
692 VCPU_EVENT(vcpu, 4,
693 "interrupt: virtio parm:%x,parm64:%llx",
694 inti->ext.ext_params, inti->ext.ext_params2);
695 vcpu->stat.deliver_virtio_interrupt++;
696 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
697 inti->type,
698 inti->ext.ext_params,
699 inti->ext.ext_params2);
700 list_del(&inti->list);
701 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
702 }
703 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
704 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
705 spin_unlock(&fi->lock);
0fb97abe 706
6d3da241
JF
707 if (inti) {
708 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
709 (u16 *)__LC_EXT_INT_CODE);
710 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
711 (u16 *)__LC_EXT_CPU_ADDR);
712 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
713 &vcpu->arch.sie_block->gpsw,
714 sizeof(psw_t));
715 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
716 &vcpu->arch.sie_block->gpsw,
717 sizeof(psw_t));
718 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
719 (u32 *)__LC_EXT_PARAMS);
720 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
721 (u64 *)__LC_EXT_PARAMS2);
722 kfree(inti);
723 }
99e20009 724 return rc ? -EFAULT : 0;
0fb97abe
JF
725}
726
727static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
6d3da241 728 unsigned long irq_type)
0fb97abe 729{
6d3da241
JF
730 struct list_head *isc_list;
731 struct kvm_s390_float_interrupt *fi;
732 struct kvm_s390_interrupt_info *inti = NULL;
733 int rc = 0;
0fb97abe 734
6d3da241 735 fi = &vcpu->kvm->arch.float_int;
8712836b 736
6d3da241
JF
737 spin_lock(&fi->lock);
738 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
739 inti = list_first_entry_or_null(isc_list,
740 struct kvm_s390_interrupt_info,
741 list);
742 if (inti) {
743 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
744 vcpu->stat.deliver_io_int++;
745 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
746 inti->type,
747 ((__u32)inti->io.subchannel_id << 16) |
748 inti->io.subchannel_nr,
749 ((__u64)inti->io.io_int_parm << 32) |
750 inti->io.io_int_word);
751 list_del(&inti->list);
752 fi->counters[FIRQ_CNTR_IO] -= 1;
753 }
754 if (list_empty(isc_list))
755 clear_bit(irq_type, &fi->pending_irqs);
756 spin_unlock(&fi->lock);
757
758 if (inti) {
759 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
760 (u16 *)__LC_SUBCHANNEL_ID);
761 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
762 (u16 *)__LC_SUBCHANNEL_NR);
763 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
764 (u32 *)__LC_IO_INT_PARM);
765 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
766 (u32 *)__LC_IO_INT_WORD);
767 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
768 &vcpu->arch.sie_block->gpsw,
769 sizeof(psw_t));
770 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
771 &vcpu->arch.sie_block->gpsw,
772 sizeof(psw_t));
773 kfree(inti);
774 }
383d0b05 775
99e20009 776 return rc ? -EFAULT : 0;
383d0b05
JF
777}
778
779typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
780
781static const deliver_irq_t deliver_irq_funcs[] = {
782 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
6d3da241 783 [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
383d0b05
JF
784 [IRQ_PEND_PROG] = __deliver_prog,
785 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
786 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
787 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
788 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
789 [IRQ_PEND_RESTART] = __deliver_restart,
383d0b05
JF
790 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
791 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
6d3da241
JF
792 [IRQ_PEND_EXT_SERVICE] = __deliver_service,
793 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
794 [IRQ_PEND_VIRTIO] = __deliver_virtio,
383d0b05
JF
795};
796
ea5f4969
DH
797/* Check whether an external call is pending (deliverable or not) */
798int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
4953919f 799{
ea5f4969
DH
800 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
801 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
4953919f 802
37c5f6c8 803 if (!sclp.has_sigpif)
ea5f4969 804 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
4953919f 805
ea5f4969
DH
806 return (sigp_ctrl & SIGP_CTRL_C) &&
807 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
4953919f
DH
808}
809
9a022067 810int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
ba5c1e9b 811{
383d0b05 812 int rc;
ba5c1e9b 813
6d3da241 814 rc = !!deliverable_irqs(vcpu);
ba5c1e9b 815
bb78c5ec
DH
816 if (!rc && kvm_cpu_has_pending_timer(vcpu))
817 rc = 1;
ba5c1e9b 818
ea5f4969
DH
819 /* external call pending and deliverable */
820 if (!rc && kvm_s390_ext_call_pending(vcpu) &&
821 !psw_extint_disabled(vcpu) &&
822 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
4953919f
DH
823 rc = 1;
824
9a022067 825 if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
6cddd432
DH
826 rc = 1;
827
ba5c1e9b
CO
828 return rc;
829}
830
3d80840d
MT
831int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
832{
b4aec925 833 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
3d80840d
MT
834}
835
ba5c1e9b
CO
836int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
837{
838 u64 now, sltime;
ba5c1e9b
CO
839
840 vcpu->stat.exit_wait_state++;
ba5c1e9b 841
0759d068
DH
842 /* fast path */
843 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
844 return 0;
e52b2af5 845
ba5c1e9b
CO
846 if (psw_interrupts_disabled(vcpu)) {
847 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
b8e660b8 848 return -EOPNOTSUPP; /* disabled wait */
ba5c1e9b
CO
849 }
850
bb78c5ec 851 if (!ckc_interrupts_enabled(vcpu)) {
ba5c1e9b 852 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
bda343ef 853 __set_cpu_idle(vcpu);
ba5c1e9b
CO
854 goto no_timer;
855 }
856
8c071b0f 857 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
ed4f2094 858 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
bda343ef
DH
859
860 /* underflow */
861 if (vcpu->arch.sie_block->ckc < now)
862 return 0;
863
864 __set_cpu_idle(vcpu);
ca872302
CB
865 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
866 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
ba5c1e9b 867no_timer:
800c1065 868 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
0759d068 869 kvm_vcpu_block(vcpu);
ba5c1e9b 870 __unset_cpu_idle(vcpu);
800c1065
TH
871 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
872
2d00f759 873 hrtimer_cancel(&vcpu->arch.ckc_timer);
ba5c1e9b
CO
874 return 0;
875}
876
0e9c85a5
DH
877void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
878{
879 if (waitqueue_active(&vcpu->wq)) {
880 /*
881 * The vcpu gave up the cpu voluntarily, mark it as a good
882 * yield-candidate.
883 */
884 vcpu->preempted = true;
885 wake_up_interruptible(&vcpu->wq);
ce2e4f0b 886 vcpu->stat.halt_wakeup++;
0e9c85a5
DH
887 }
888}
889
ca872302
CB
890enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
891{
892 struct kvm_vcpu *vcpu;
2d00f759 893 u64 now, sltime;
ca872302
CB
894
895 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
2d00f759
DH
896 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
897 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
ca872302 898
2d00f759
DH
899 /*
900 * If the monotonic clock runs faster than the tod clock we might be
901 * woken up too early and have to go back to sleep to avoid deadlocks.
902 */
903 if (vcpu->arch.sie_block->ckc > now &&
904 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
905 return HRTIMER_RESTART;
906 kvm_s390_vcpu_wakeup(vcpu);
ca872302
CB
907 return HRTIMER_NORESTART;
908}
ba5c1e9b 909
2ed10cc1
JF
910void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
911{
912 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2ed10cc1 913
4ae3c081 914 spin_lock(&li->lock);
383d0b05
JF
915 li->pending_irqs = 0;
916 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
917 memset(&li->irq, 0, sizeof(li->irq));
4ae3c081 918 spin_unlock(&li->lock);
4953919f
DH
919
920 /* clear pending external calls set by sigp interpretation facility */
383d0b05 921 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
ea5f4969 922 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
2ed10cc1
JF
923}
924
614aeab4 925int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
ba5c1e9b 926{
180c12fb 927 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 928 deliver_irq_t func;
79395031 929 int rc = 0;
383d0b05 930 unsigned long irq_type;
6d3da241 931 unsigned long irqs;
ba5c1e9b
CO
932
933 __reset_intercept_indicators(vcpu);
ba5c1e9b 934
383d0b05
JF
935 /* pending ckc conditions might have been invalidated */
936 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
b4aec925 937 if (ckc_irq_pending(vcpu))
383d0b05
JF
938 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
939
b4aec925
DH
940 /* pending cpu timer conditions might have been invalidated */
941 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
942 if (cpu_timer_irq_pending(vcpu))
943 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
944
ffeca0ae 945 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
383d0b05 946 /* bits are in the order of interrupt priority */
6d3da241 947 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
6d3da241
JF
948 if (is_ioirq(irq_type)) {
949 rc = __deliver_io(vcpu, irq_type);
950 } else {
951 func = deliver_irq_funcs[irq_type];
952 if (!func) {
953 WARN_ON_ONCE(func == NULL);
954 clear_bit(irq_type, &li->pending_irqs);
955 continue;
956 }
957 rc = func(vcpu);
383d0b05 958 }
ffeca0ae 959 }
383d0b05 960
6d3da241 961 set_intercept_indicators(vcpu);
79395031
JF
962
963 return rc;
ba5c1e9b
CO
964}
965
383d0b05 966static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
967{
968 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
969
ed2afcfa
DH
970 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
971 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
972 irq->u.pgm.code, 0);
973
383d0b05 974 li->irq.pgm = irq->u.pgm;
9185124e 975 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
0146a7b0
JF
976 return 0;
977}
978
ba5c1e9b
CO
979int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
980{
180c12fb 981 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 982 struct kvm_s390_irq irq;
ba5c1e9b 983
4ae3c081 984 spin_lock(&li->lock);
383d0b05
JF
985 irq.u.pgm.code = code;
986 __inject_prog(vcpu, &irq);
d0321a24 987 BUG_ON(waitqueue_active(li->wq));
4ae3c081 988 spin_unlock(&li->lock);
bcd84683
JF
989 return 0;
990}
991
992int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
993 struct kvm_s390_pgm_info *pgm_info)
994{
995 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 996 struct kvm_s390_irq irq;
0146a7b0 997 int rc;
bcd84683 998
4ae3c081 999 spin_lock(&li->lock);
383d0b05
JF
1000 irq.u.pgm = *pgm_info;
1001 rc = __inject_prog(vcpu, &irq);
bcd84683 1002 BUG_ON(waitqueue_active(li->wq));
4ae3c081 1003 spin_unlock(&li->lock);
0146a7b0
JF
1004 return rc;
1005}
1006
383d0b05 1007static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1008{
1009 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1010
383d0b05
JF
1011 VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
1012 irq->u.ext.ext_params, irq->u.ext.ext_params2);
1013 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1014 irq->u.ext.ext_params,
ed2afcfa 1015 irq->u.ext.ext_params2);
383d0b05
JF
1016
1017 li->irq.ext = irq->u.ext;
1018 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
0146a7b0
JF
1019 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1020 return 0;
1021}
1022
ea5f4969
DH
1023static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1024{
1025 unsigned char new_val, old_val;
1026 uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1027
1028 new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1029 old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1030 if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1031 /* another external call is pending */
1032 return -EBUSY;
1033 }
1034 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1035 return 0;
1036}
1037
0675d92d 1038static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1039{
1040 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1041 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
ea5f4969 1042 uint16_t src_id = irq->u.extcall.code;
0146a7b0
JF
1043
1044 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
ea5f4969 1045 src_id);
383d0b05 1046 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
ed2afcfa 1047 src_id, 0);
ea5f4969
DH
1048
1049 /* sending vcpu invalid */
1050 if (src_id >= KVM_MAX_VCPUS ||
1051 kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1052 return -EINVAL;
1053
37c5f6c8 1054 if (sclp.has_sigpif)
ea5f4969 1055 return __inject_extcall_sigpif(vcpu, src_id);
383d0b05 1056
b938eace 1057 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
ea5f4969 1058 return -EBUSY;
383d0b05 1059 *extcall = irq->u.extcall;
0146a7b0
JF
1060 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1061 return 0;
1062}
1063
383d0b05 1064static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1065{
1066 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1067 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
0146a7b0 1068
ed2afcfa 1069 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
556cc0da 1070 irq->u.prefix.address);
383d0b05 1071 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
ed2afcfa 1072 irq->u.prefix.address, 0);
383d0b05 1073
a3a9c59a
DH
1074 if (!is_vcpu_stopped(vcpu))
1075 return -EBUSY;
1076
383d0b05
JF
1077 *prefix = irq->u.prefix;
1078 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
0146a7b0
JF
1079 return 0;
1080}
1081
6cddd432 1082#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
383d0b05 1083static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1084{
1085 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2822545f 1086 struct kvm_s390_stop_info *stop = &li->irq.stop;
6cddd432 1087 int rc = 0;
0146a7b0 1088
ed2afcfa 1089 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
383d0b05 1090
2822545f
DH
1091 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1092 return -EINVAL;
1093
6cddd432
DH
1094 if (is_vcpu_stopped(vcpu)) {
1095 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1096 rc = kvm_s390_store_status_unloaded(vcpu,
1097 KVM_S390_STORE_STATUS_NOADDR);
1098 return rc;
1099 }
1100
1101 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1102 return -EBUSY;
2822545f 1103 stop->flags = irq->u.stop.flags;
6cddd432 1104 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
0146a7b0
JF
1105 return 0;
1106}
1107
1108static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
383d0b05 1109 struct kvm_s390_irq *irq)
0146a7b0
JF
1110{
1111 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1112
383d0b05 1113 VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
ed2afcfa 1114 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
383d0b05
JF
1115
1116 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
0146a7b0
JF
1117 return 0;
1118}
1119
1120static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
383d0b05 1121 struct kvm_s390_irq *irq)
0146a7b0
JF
1122{
1123 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1124
383d0b05
JF
1125 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1126 irq->u.emerg.code);
1127 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
ed2afcfa 1128 irq->u.emerg.code, 0);
383d0b05 1129
49538d12 1130 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
383d0b05 1131 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
0146a7b0
JF
1132 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1133 return 0;
1134}
1135
383d0b05 1136static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1137{
1138 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1139 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
0146a7b0
JF
1140
1141 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
556cc0da 1142 irq->u.mchk.mcic);
383d0b05 1143 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
ed2afcfa 1144 irq->u.mchk.mcic);
383d0b05
JF
1145
1146 /*
fc2020cf
JF
1147 * Because repressible machine checks can be indicated along with
1148 * exigent machine checks (PoP, Chapter 11, Interruption action)
1149 * we need to combine cr14, mcic and external damage code.
1150 * Failing storage address and the logout area should not be or'ed
1151 * together, we just indicate the last occurrence of the corresponding
1152 * machine check
383d0b05 1153 */
fc2020cf 1154 mchk->cr14 |= irq->u.mchk.cr14;
383d0b05 1155 mchk->mcic |= irq->u.mchk.mcic;
fc2020cf
JF
1156 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1157 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1158 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1159 sizeof(mchk->fixed_logout));
383d0b05
JF
1160 if (mchk->mcic & MCHK_EX_MASK)
1161 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1162 else if (mchk->mcic & MCHK_REP_MASK)
1163 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
0146a7b0
JF
1164 return 0;
1165}
1166
383d0b05 1167static int __inject_ckc(struct kvm_vcpu *vcpu)
0146a7b0
JF
1168{
1169 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1170
383d0b05
JF
1171 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1172 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
ed2afcfa 1173 0, 0);
383d0b05
JF
1174
1175 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
0146a7b0
JF
1176 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1177 return 0;
1178}
1179
383d0b05 1180static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0146a7b0
JF
1181{
1182 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1183
383d0b05
JF
1184 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1185 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
ed2afcfa 1186 0, 0);
383d0b05
JF
1187
1188 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
0146a7b0 1189 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
ba5c1e9b
CO
1190 return 0;
1191}
1192
6d3da241
JF
1193static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1194 int isc, u32 schid)
1195{
1196 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1197 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1198 struct kvm_s390_interrupt_info *iter;
1199 u16 id = (schid & 0xffff0000U) >> 16;
1200 u16 nr = schid & 0x0000ffffU;
1201
1202 spin_lock(&fi->lock);
1203 list_for_each_entry(iter, isc_list, list) {
1204 if (schid && (id != iter->io.subchannel_id ||
1205 nr != iter->io.subchannel_nr))
1206 continue;
1207 /* found an appropriate entry */
1208 list_del_init(&iter->list);
1209 fi->counters[FIRQ_CNTR_IO] -= 1;
1210 if (list_empty(isc_list))
1211 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1212 spin_unlock(&fi->lock);
1213 return iter;
1214 }
1215 spin_unlock(&fi->lock);
1216 return NULL;
1217}
383d0b05 1218
6d3da241
JF
1219/*
1220 * Dequeue and return an I/O interrupt matching any of the interruption
1221 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1222 */
fa6b7fe9 1223struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
6d3da241
JF
1224 u64 isc_mask, u32 schid)
1225{
1226 struct kvm_s390_interrupt_info *inti = NULL;
1227 int isc;
1228
1229 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1230 if (isc_mask & isc_to_isc_bits(isc))
1231 inti = get_io_int(kvm, isc, schid);
1232 }
1233 return inti;
1234}
1235
1236#define SCCB_MASK 0xFFFFFFF8
1237#define SCCB_EVENT_PENDING 0x3
1238
1239static int __inject_service(struct kvm *kvm,
1240 struct kvm_s390_interrupt_info *inti)
1241{
1242 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1243
1244 spin_lock(&fi->lock);
1245 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1246 /*
1247 * Early versions of the QEMU s390 bios will inject several
1248 * service interrupts after another without handling a
1249 * condition code indicating busy.
1250 * We will silently ignore those superfluous sccb values.
1251 * A future version of QEMU will take care of serialization
1252 * of servc requests
1253 */
1254 if (fi->srv_signal.ext_params & SCCB_MASK)
1255 goto out;
1256 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1257 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1258out:
1259 spin_unlock(&fi->lock);
1260 kfree(inti);
1261 return 0;
1262}
1263
1264static int __inject_virtio(struct kvm *kvm,
1265 struct kvm_s390_interrupt_info *inti)
1266{
1267 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1268
1269 spin_lock(&fi->lock);
1270 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1271 spin_unlock(&fi->lock);
1272 return -EBUSY;
1273 }
1274 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1275 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1276 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1277 spin_unlock(&fi->lock);
1278 return 0;
1279}
1280
1281static int __inject_pfault_done(struct kvm *kvm,
1282 struct kvm_s390_interrupt_info *inti)
1283{
1284 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1285
1286 spin_lock(&fi->lock);
1287 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1288 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1289 spin_unlock(&fi->lock);
1290 return -EBUSY;
1291 }
1292 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1293 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1294 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1295 spin_unlock(&fi->lock);
1296 return 0;
1297}
1298
1299#define CR_PENDING_SUBCLASS 28
1300static int __inject_float_mchk(struct kvm *kvm,
1301 struct kvm_s390_interrupt_info *inti)
1302{
1303 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1304
1305 spin_lock(&fi->lock);
1306 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1307 fi->mchk.mcic |= inti->mchk.mcic;
1308 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1309 spin_unlock(&fi->lock);
1310 kfree(inti);
1311 return 0;
1312}
1313
1314static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
fa6b7fe9
CH
1315{
1316 struct kvm_s390_float_interrupt *fi;
6d3da241
JF
1317 struct list_head *list;
1318 int isc;
fa6b7fe9 1319
fa6b7fe9
CH
1320 fi = &kvm->arch.float_int;
1321 spin_lock(&fi->lock);
6d3da241
JF
1322 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1323 spin_unlock(&fi->lock);
1324 return -EBUSY;
a91b8ebe 1325 }
6d3da241
JF
1326 fi->counters[FIRQ_CNTR_IO] += 1;
1327
1328 isc = int_word_to_isc(inti->io.io_int_word);
1329 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1330 list_add_tail(&inti->list, list);
1331 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
fa6b7fe9 1332 spin_unlock(&fi->lock);
6d3da241 1333 return 0;
fa6b7fe9 1334}
ba5c1e9b 1335
96e0ed23
DH
1336/*
1337 * Find a destination VCPU for a floating irq and kick it.
1338 */
1339static void __floating_irq_kick(struct kvm *kvm, u64 type)
ba5c1e9b 1340{
96e0ed23 1341 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
180c12fb 1342 struct kvm_s390_local_interrupt *li;
96e0ed23
DH
1343 struct kvm_vcpu *dst_vcpu;
1344 int sigcpu, online_vcpus, nr_tries = 0;
1345
1346 online_vcpus = atomic_read(&kvm->online_vcpus);
1347 if (!online_vcpus)
1348 return;
1349
1350 /* find idle VCPUs first, then round robin */
1351 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1352 if (sigcpu == online_vcpus) {
1353 do {
1354 sigcpu = fi->next_rr_cpu;
1355 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1356 /* avoid endless loops if all vcpus are stopped */
1357 if (nr_tries++ >= online_vcpus)
1358 return;
1359 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1360 }
1361 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1362
1363 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1364 li = &dst_vcpu->arch.local_int;
1365 spin_lock(&li->lock);
1366 switch (type) {
1367 case KVM_S390_MCHK:
1368 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1369 break;
1370 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1371 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1372 break;
1373 default:
1374 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1375 break;
1376 }
1377 spin_unlock(&li->lock);
1378 kvm_s390_vcpu_wakeup(dst_vcpu);
1379}
1380
1381static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1382{
180c12fb 1383 struct kvm_s390_float_interrupt *fi;
6d3da241
JF
1384 u64 type = READ_ONCE(inti->type);
1385 int rc;
ba5c1e9b 1386
c05c4186 1387 fi = &kvm->arch.float_int;
6d3da241
JF
1388
1389 switch (type) {
1390 case KVM_S390_MCHK:
1391 rc = __inject_float_mchk(kvm, inti);
1392 break;
1393 case KVM_S390_INT_VIRTIO:
1394 rc = __inject_virtio(kvm, inti);
1395 break;
1396 case KVM_S390_INT_SERVICE:
1397 rc = __inject_service(kvm, inti);
1398 break;
1399 case KVM_S390_INT_PFAULT_DONE:
1400 rc = __inject_pfault_done(kvm, inti);
1401 break;
1402 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1403 rc = __inject_io(kvm, inti);
1404 break;
1405 default:
a91b8ebe 1406 rc = -EINVAL;
c05c4186 1407 }
6d3da241
JF
1408 if (rc)
1409 return rc;
1410
96e0ed23 1411 __floating_irq_kick(kvm, type);
6d3da241 1412 return 0;
c05c4186
JF
1413}
1414
1415int kvm_s390_inject_vm(struct kvm *kvm,
1416 struct kvm_s390_interrupt *s390int)
1417{
1418 struct kvm_s390_interrupt_info *inti;
428d53be 1419 int rc;
c05c4186 1420
ba5c1e9b
CO
1421 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1422 if (!inti)
1423 return -ENOMEM;
1424
c05c4186
JF
1425 inti->type = s390int->type;
1426 switch (inti->type) {
ba5c1e9b 1427 case KVM_S390_INT_VIRTIO:
33e19115 1428 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
ba5c1e9b 1429 s390int->parm, s390int->parm64);
ba5c1e9b
CO
1430 inti->ext.ext_params = s390int->parm;
1431 inti->ext.ext_params2 = s390int->parm64;
1432 break;
1433 case KVM_S390_INT_SERVICE:
1434 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
ba5c1e9b
CO
1435 inti->ext.ext_params = s390int->parm;
1436 break;
3c038e6b 1437 case KVM_S390_INT_PFAULT_DONE:
3c038e6b
DD
1438 inti->ext.ext_params2 = s390int->parm64;
1439 break;
48a3e950
CH
1440 case KVM_S390_MCHK:
1441 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1442 s390int->parm64);
48a3e950
CH
1443 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1444 inti->mchk.mcic = s390int->parm64;
1445 break;
d8346b7d 1446 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
c05c4186 1447 if (inti->type & IOINT_AI_MASK)
d8346b7d
CH
1448 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1449 else
1450 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1451 s390int->type & IOINT_CSSID_MASK,
1452 s390int->type & IOINT_SSID_MASK,
1453 s390int->type & IOINT_SCHID_MASK);
d8346b7d
CH
1454 inti->io.subchannel_id = s390int->parm >> 16;
1455 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1456 inti->io.io_int_parm = s390int->parm64 >> 32;
1457 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1458 break;
ba5c1e9b
CO
1459 default:
1460 kfree(inti);
1461 return -EINVAL;
1462 }
ade38c31
CH
1463 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1464 2);
ba5c1e9b 1465
428d53be
DH
1466 rc = __inject_vm(kvm, inti);
1467 if (rc)
1468 kfree(inti);
1469 return rc;
ba5c1e9b
CO
1470}
1471
15462e37 1472int kvm_s390_reinject_io_int(struct kvm *kvm,
2f32d4ea
CH
1473 struct kvm_s390_interrupt_info *inti)
1474{
15462e37 1475 return __inject_vm(kvm, inti);
2f32d4ea
CH
1476}
1477
383d0b05
JF
1478int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1479 struct kvm_s390_irq *irq)
1480{
1481 irq->type = s390int->type;
1482 switch (irq->type) {
1483 case KVM_S390_PROGRAM_INT:
1484 if (s390int->parm & 0xffff0000)
1485 return -EINVAL;
1486 irq->u.pgm.code = s390int->parm;
1487 break;
1488 case KVM_S390_SIGP_SET_PREFIX:
1489 irq->u.prefix.address = s390int->parm;
1490 break;
2822545f
DH
1491 case KVM_S390_SIGP_STOP:
1492 irq->u.stop.flags = s390int->parm;
1493 break;
383d0b05 1494 case KVM_S390_INT_EXTERNAL_CALL:
94d1f564 1495 if (s390int->parm & 0xffff0000)
383d0b05
JF
1496 return -EINVAL;
1497 irq->u.extcall.code = s390int->parm;
1498 break;
1499 case KVM_S390_INT_EMERGENCY:
94d1f564 1500 if (s390int->parm & 0xffff0000)
383d0b05
JF
1501 return -EINVAL;
1502 irq->u.emerg.code = s390int->parm;
1503 break;
1504 case KVM_S390_MCHK:
1505 irq->u.mchk.mcic = s390int->parm64;
1506 break;
1507 }
1508 return 0;
1509}
1510
6cddd432
DH
1511int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1512{
1513 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1514
1515 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1516}
1517
1518void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1519{
1520 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1521
1522 spin_lock(&li->lock);
1523 li->irq.stop.flags = 0;
1524 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1525 spin_unlock(&li->lock);
1526}
1527
79e87a10 1528static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
ba5c1e9b 1529{
0146a7b0 1530 int rc;
ba5c1e9b 1531
383d0b05 1532 switch (irq->type) {
ba5c1e9b 1533 case KVM_S390_PROGRAM_INT:
383d0b05 1534 rc = __inject_prog(vcpu, irq);
ba5c1e9b 1535 break;
b7e6e4d3 1536 case KVM_S390_SIGP_SET_PREFIX:
383d0b05 1537 rc = __inject_set_prefix(vcpu, irq);
b7e6e4d3 1538 break;
ba5c1e9b 1539 case KVM_S390_SIGP_STOP:
383d0b05 1540 rc = __inject_sigp_stop(vcpu, irq);
0146a7b0 1541 break;
ba5c1e9b 1542 case KVM_S390_RESTART:
383d0b05 1543 rc = __inject_sigp_restart(vcpu, irq);
0146a7b0 1544 break;
e029ae5b 1545 case KVM_S390_INT_CLOCK_COMP:
383d0b05 1546 rc = __inject_ckc(vcpu);
0146a7b0 1547 break;
e029ae5b 1548 case KVM_S390_INT_CPU_TIMER:
383d0b05 1549 rc = __inject_cpu_timer(vcpu);
82a12737 1550 break;
7697e71f 1551 case KVM_S390_INT_EXTERNAL_CALL:
383d0b05 1552 rc = __inject_extcall(vcpu, irq);
82a12737 1553 break;
ba5c1e9b 1554 case KVM_S390_INT_EMERGENCY:
383d0b05 1555 rc = __inject_sigp_emergency(vcpu, irq);
ba5c1e9b 1556 break;
48a3e950 1557 case KVM_S390_MCHK:
383d0b05 1558 rc = __inject_mchk(vcpu, irq);
48a3e950 1559 break;
3c038e6b 1560 case KVM_S390_INT_PFAULT_INIT:
383d0b05 1561 rc = __inject_pfault_init(vcpu, irq);
3c038e6b 1562 break;
ba5c1e9b
CO
1563 case KVM_S390_INT_VIRTIO:
1564 case KVM_S390_INT_SERVICE:
d8346b7d 1565 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
ba5c1e9b 1566 default:
0146a7b0 1567 rc = -EINVAL;
ba5c1e9b 1568 }
79e87a10
JF
1569
1570 return rc;
1571}
1572
1573int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1574{
1575 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1576 int rc;
1577
1578 spin_lock(&li->lock);
1579 rc = do_inject_vcpu(vcpu, irq);
4ae3c081 1580 spin_unlock(&li->lock);
0146a7b0
JF
1581 if (!rc)
1582 kvm_s390_vcpu_wakeup(vcpu);
0146a7b0 1583 return rc;
ba5c1e9b 1584}
c05c4186 1585
6d3da241 1586static inline void clear_irq_list(struct list_head *_list)
c05c4186 1587{
6d3da241 1588 struct kvm_s390_interrupt_info *inti, *n;
c05c4186 1589
6d3da241 1590 list_for_each_entry_safe(inti, n, _list, list) {
c05c4186
JF
1591 list_del(&inti->list);
1592 kfree(inti);
1593 }
c05c4186
JF
1594}
1595
94aa033e
JF
1596static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1597 struct kvm_s390_irq *irq)
c05c4186 1598{
94aa033e 1599 irq->type = inti->type;
c05c4186 1600 switch (inti->type) {
3c038e6b
DD
1601 case KVM_S390_INT_PFAULT_INIT:
1602 case KVM_S390_INT_PFAULT_DONE:
c05c4186 1603 case KVM_S390_INT_VIRTIO:
94aa033e 1604 irq->u.ext = inti->ext;
c05c4186
JF
1605 break;
1606 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
94aa033e 1607 irq->u.io = inti->io;
c05c4186 1608 break;
c05c4186 1609 }
c05c4186
JF
1610}
1611
6d3da241
JF
1612void kvm_s390_clear_float_irqs(struct kvm *kvm)
1613{
1614 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1615 int i;
1616
1617 spin_lock(&fi->lock);
f2ae45ed
JF
1618 fi->pending_irqs = 0;
1619 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1620 memset(&fi->mchk, 0, sizeof(fi->mchk));
6d3da241
JF
1621 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1622 clear_irq_list(&fi->lists[i]);
1623 for (i = 0; i < FIRQ_MAX_COUNT; i++)
1624 fi->counters[i] = 0;
1625 spin_unlock(&fi->lock);
1626};
1627
94aa033e 1628static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
c05c4186
JF
1629{
1630 struct kvm_s390_interrupt_info *inti;
1631 struct kvm_s390_float_interrupt *fi;
94aa033e 1632 struct kvm_s390_irq *buf;
6d3da241 1633 struct kvm_s390_irq *irq;
94aa033e 1634 int max_irqs;
c05c4186
JF
1635 int ret = 0;
1636 int n = 0;
6d3da241 1637 int i;
c05c4186 1638
94aa033e
JF
1639 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1640 return -EINVAL;
1641
1642 /*
1643 * We are already using -ENOMEM to signal
1644 * userspace it may retry with a bigger buffer,
1645 * so we need to use something else for this case
1646 */
1647 buf = vzalloc(len);
1648 if (!buf)
1649 return -ENOBUFS;
1650
1651 max_irqs = len / sizeof(struct kvm_s390_irq);
1652
c05c4186
JF
1653 fi = &kvm->arch.float_int;
1654 spin_lock(&fi->lock);
6d3da241
JF
1655 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1656 list_for_each_entry(inti, &fi->lists[i], list) {
1657 if (n == max_irqs) {
1658 /* signal userspace to try again */
1659 ret = -ENOMEM;
1660 goto out;
1661 }
1662 inti_to_irq(inti, &buf[n]);
1663 n++;
1664 }
1665 }
1666 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
94aa033e 1667 if (n == max_irqs) {
c05c4186
JF
1668 /* signal userspace to try again */
1669 ret = -ENOMEM;
6d3da241 1670 goto out;
c05c4186 1671 }
6d3da241
JF
1672 irq = (struct kvm_s390_irq *) &buf[n];
1673 irq->type = KVM_S390_INT_SERVICE;
1674 irq->u.ext = fi->srv_signal;
c05c4186
JF
1675 n++;
1676 }
6d3da241
JF
1677 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1678 if (n == max_irqs) {
1679 /* signal userspace to try again */
1680 ret = -ENOMEM;
1681 goto out;
1682 }
1683 irq = (struct kvm_s390_irq *) &buf[n];
1684 irq->type = KVM_S390_MCHK;
1685 irq->u.mchk = fi->mchk;
1686 n++;
1687}
1688
1689out:
c05c4186 1690 spin_unlock(&fi->lock);
94aa033e
JF
1691 if (!ret && n > 0) {
1692 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1693 ret = -EFAULT;
1694 }
1695 vfree(buf);
c05c4186
JF
1696
1697 return ret < 0 ? ret : n;
1698}
1699
1700static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1701{
1702 int r;
1703
1704 switch (attr->group) {
1705 case KVM_DEV_FLIC_GET_ALL_IRQS:
94aa033e 1706 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
c05c4186
JF
1707 attr->attr);
1708 break;
1709 default:
1710 r = -EINVAL;
1711 }
1712
1713 return r;
1714}
1715
1716static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1717 u64 addr)
1718{
1719 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1720 void *target = NULL;
1721 void __user *source;
1722 u64 size;
1723
1724 if (get_user(inti->type, (u64 __user *)addr))
1725 return -EFAULT;
1726
1727 switch (inti->type) {
3c038e6b
DD
1728 case KVM_S390_INT_PFAULT_INIT:
1729 case KVM_S390_INT_PFAULT_DONE:
c05c4186
JF
1730 case KVM_S390_INT_VIRTIO:
1731 case KVM_S390_INT_SERVICE:
1732 target = (void *) &inti->ext;
1733 source = &uptr->u.ext;
1734 size = sizeof(inti->ext);
1735 break;
1736 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1737 target = (void *) &inti->io;
1738 source = &uptr->u.io;
1739 size = sizeof(inti->io);
1740 break;
1741 case KVM_S390_MCHK:
1742 target = (void *) &inti->mchk;
1743 source = &uptr->u.mchk;
1744 size = sizeof(inti->mchk);
1745 break;
1746 default:
1747 return -EINVAL;
1748 }
1749
1750 if (copy_from_user(target, source, size))
1751 return -EFAULT;
1752
1753 return 0;
1754}
1755
1756static int enqueue_floating_irq(struct kvm_device *dev,
1757 struct kvm_device_attr *attr)
1758{
1759 struct kvm_s390_interrupt_info *inti = NULL;
1760 int r = 0;
1761 int len = attr->attr;
1762
1763 if (len % sizeof(struct kvm_s390_irq) != 0)
1764 return -EINVAL;
1765 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1766 return -EINVAL;
1767
1768 while (len >= sizeof(struct kvm_s390_irq)) {
1769 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1770 if (!inti)
1771 return -ENOMEM;
1772
1773 r = copy_irq_from_user(inti, attr->addr);
1774 if (r) {
1775 kfree(inti);
1776 return r;
1777 }
a91b8ebe
JF
1778 r = __inject_vm(dev->kvm, inti);
1779 if (r) {
1780 kfree(inti);
1781 return r;
1782 }
c05c4186
JF
1783 len -= sizeof(struct kvm_s390_irq);
1784 attr->addr += sizeof(struct kvm_s390_irq);
1785 }
1786
1787 return r;
1788}
1789
841b91c5
CH
1790static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1791{
1792 if (id >= MAX_S390_IO_ADAPTERS)
1793 return NULL;
1794 return kvm->arch.adapters[id];
1795}
1796
1797static int register_io_adapter(struct kvm_device *dev,
1798 struct kvm_device_attr *attr)
1799{
1800 struct s390_io_adapter *adapter;
1801 struct kvm_s390_io_adapter adapter_info;
1802
1803 if (copy_from_user(&adapter_info,
1804 (void __user *)attr->addr, sizeof(adapter_info)))
1805 return -EFAULT;
1806
1807 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1808 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1809 return -EINVAL;
1810
1811 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1812 if (!adapter)
1813 return -ENOMEM;
1814
1815 INIT_LIST_HEAD(&adapter->maps);
1816 init_rwsem(&adapter->maps_lock);
1817 atomic_set(&adapter->nr_maps, 0);
1818 adapter->id = adapter_info.id;
1819 adapter->isc = adapter_info.isc;
1820 adapter->maskable = adapter_info.maskable;
1821 adapter->masked = false;
1822 adapter->swap = adapter_info.swap;
1823 dev->kvm->arch.adapters[adapter->id] = adapter;
1824
1825 return 0;
1826}
1827
1828int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1829{
1830 int ret;
1831 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1832
1833 if (!adapter || !adapter->maskable)
1834 return -EINVAL;
1835 ret = adapter->masked;
1836 adapter->masked = masked;
1837 return ret;
1838}
1839
1840static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1841{
1842 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1843 struct s390_map_info *map;
1844 int ret;
1845
1846 if (!adapter || !addr)
1847 return -EINVAL;
1848
1849 map = kzalloc(sizeof(*map), GFP_KERNEL);
1850 if (!map) {
1851 ret = -ENOMEM;
1852 goto out;
1853 }
1854 INIT_LIST_HEAD(&map->list);
1855 map->guest_addr = addr;
6e0a0431 1856 map->addr = gmap_translate(kvm->arch.gmap, addr);
841b91c5
CH
1857 if (map->addr == -EFAULT) {
1858 ret = -EFAULT;
1859 goto out;
1860 }
1861 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1862 if (ret < 0)
1863 goto out;
1864 BUG_ON(ret != 1);
1865 down_write(&adapter->maps_lock);
1866 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1867 list_add_tail(&map->list, &adapter->maps);
1868 ret = 0;
1869 } else {
1870 put_page(map->page);
1871 ret = -EINVAL;
1872 }
1873 up_write(&adapter->maps_lock);
1874out:
1875 if (ret)
1876 kfree(map);
1877 return ret;
1878}
1879
1880static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1881{
1882 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1883 struct s390_map_info *map, *tmp;
1884 int found = 0;
1885
1886 if (!adapter || !addr)
1887 return -EINVAL;
1888
1889 down_write(&adapter->maps_lock);
1890 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1891 if (map->guest_addr == addr) {
1892 found = 1;
1893 atomic_dec(&adapter->nr_maps);
1894 list_del(&map->list);
1895 put_page(map->page);
1896 kfree(map);
1897 break;
1898 }
1899 }
1900 up_write(&adapter->maps_lock);
1901
1902 return found ? 0 : -EINVAL;
1903}
1904
1905void kvm_s390_destroy_adapters(struct kvm *kvm)
1906{
1907 int i;
1908 struct s390_map_info *map, *tmp;
1909
1910 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1911 if (!kvm->arch.adapters[i])
1912 continue;
1913 list_for_each_entry_safe(map, tmp,
1914 &kvm->arch.adapters[i]->maps, list) {
1915 list_del(&map->list);
1916 put_page(map->page);
1917 kfree(map);
1918 }
1919 kfree(kvm->arch.adapters[i]);
1920 }
1921}
1922
1923static int modify_io_adapter(struct kvm_device *dev,
1924 struct kvm_device_attr *attr)
1925{
1926 struct kvm_s390_io_adapter_req req;
1927 struct s390_io_adapter *adapter;
1928 int ret;
1929
1930 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1931 return -EFAULT;
1932
1933 adapter = get_io_adapter(dev->kvm, req.id);
1934 if (!adapter)
1935 return -EINVAL;
1936 switch (req.type) {
1937 case KVM_S390_IO_ADAPTER_MASK:
1938 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1939 if (ret > 0)
1940 ret = 0;
1941 break;
1942 case KVM_S390_IO_ADAPTER_MAP:
1943 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1944 break;
1945 case KVM_S390_IO_ADAPTER_UNMAP:
1946 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1947 break;
1948 default:
1949 ret = -EINVAL;
1950 }
1951
1952 return ret;
1953}
1954
c05c4186
JF
1955static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1956{
1957 int r = 0;
3c038e6b
DD
1958 unsigned int i;
1959 struct kvm_vcpu *vcpu;
c05c4186
JF
1960
1961 switch (attr->group) {
1962 case KVM_DEV_FLIC_ENQUEUE:
1963 r = enqueue_floating_irq(dev, attr);
1964 break;
1965 case KVM_DEV_FLIC_CLEAR_IRQS:
67335e63 1966 kvm_s390_clear_float_irqs(dev->kvm);
c05c4186 1967 break;
3c038e6b
DD
1968 case KVM_DEV_FLIC_APF_ENABLE:
1969 dev->kvm->arch.gmap->pfault_enabled = 1;
1970 break;
1971 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1972 dev->kvm->arch.gmap->pfault_enabled = 0;
1973 /*
1974 * Make sure no async faults are in transition when
1975 * clearing the queues. So we don't need to worry
1976 * about late coming workers.
1977 */
1978 synchronize_srcu(&dev->kvm->srcu);
1979 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1980 kvm_clear_async_pf_completion_queue(vcpu);
1981 break;
841b91c5
CH
1982 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1983 r = register_io_adapter(dev, attr);
1984 break;
1985 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1986 r = modify_io_adapter(dev, attr);
1987 break;
c05c4186
JF
1988 default:
1989 r = -EINVAL;
1990 }
1991
1992 return r;
1993}
1994
1995static int flic_create(struct kvm_device *dev, u32 type)
1996{
1997 if (!dev)
1998 return -EINVAL;
1999 if (dev->kvm->arch.flic)
2000 return -EINVAL;
2001 dev->kvm->arch.flic = dev;
2002 return 0;
2003}
2004
2005static void flic_destroy(struct kvm_device *dev)
2006{
2007 dev->kvm->arch.flic = NULL;
2008 kfree(dev);
2009}
2010
2011/* s390 floating irq controller (flic) */
2012struct kvm_device_ops kvm_flic_ops = {
2013 .name = "kvm-flic",
2014 .get_attr = flic_get_attr,
2015 .set_attr = flic_set_attr,
2016 .create = flic_create,
2017 .destroy = flic_destroy,
2018};
84223598
CH
2019
2020static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2021{
2022 unsigned long bit;
2023
2024 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2025
2026 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2027}
2028
2029static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2030 u64 addr)
2031{
2032 struct s390_map_info *map;
2033
2034 if (!adapter)
2035 return NULL;
2036
2037 list_for_each_entry(map, &adapter->maps, list) {
2038 if (map->guest_addr == addr)
2039 return map;
2040 }
2041 return NULL;
2042}
2043
2044static int adapter_indicators_set(struct kvm *kvm,
2045 struct s390_io_adapter *adapter,
2046 struct kvm_s390_adapter_int *adapter_int)
2047{
2048 unsigned long bit;
2049 int summary_set, idx;
2050 struct s390_map_info *info;
2051 void *map;
2052
2053 info = get_map_info(adapter, adapter_int->ind_addr);
2054 if (!info)
2055 return -1;
2056 map = page_address(info->page);
2057 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2058 set_bit(bit, map);
2059 idx = srcu_read_lock(&kvm->srcu);
2060 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2061 set_page_dirty_lock(info->page);
2062 info = get_map_info(adapter, adapter_int->summary_addr);
2063 if (!info) {
2064 srcu_read_unlock(&kvm->srcu, idx);
2065 return -1;
2066 }
2067 map = page_address(info->page);
2068 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2069 adapter->swap);
2070 summary_set = test_and_set_bit(bit, map);
2071 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2072 set_page_dirty_lock(info->page);
2073 srcu_read_unlock(&kvm->srcu, idx);
2074 return summary_set ? 0 : 1;
2075}
2076
2077/*
2078 * < 0 - not injected due to error
2079 * = 0 - coalesced, summary indicator already active
2080 * > 0 - injected interrupt
2081 */
2082static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2083 struct kvm *kvm, int irq_source_id, int level,
2084 bool line_status)
2085{
2086 int ret;
2087 struct s390_io_adapter *adapter;
2088
2089 /* We're only interested in the 0->1 transition. */
2090 if (!level)
2091 return 0;
2092 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2093 if (!adapter)
2094 return -1;
2095 down_read(&adapter->maps_lock);
2096 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2097 up_read(&adapter->maps_lock);
2098 if ((ret > 0) && !adapter->masked) {
2099 struct kvm_s390_interrupt s390int = {
2100 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2101 .parm = 0,
2102 .parm64 = (adapter->isc << 27) | 0x80000000,
2103 };
2104 ret = kvm_s390_inject_vm(kvm, &s390int);
2105 if (ret == 0)
2106 ret = 1;
2107 }
2108 return ret;
2109}
2110
8ba918d4 2111int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
84223598
CH
2112 const struct kvm_irq_routing_entry *ue)
2113{
2114 int ret;
2115
2116 switch (ue->type) {
2117 case KVM_IRQ_ROUTING_S390_ADAPTER:
2118 e->set = set_adapter_int;
2119 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2120 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2121 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2122 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2123 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2124 ret = 0;
2125 break;
2126 default:
2127 ret = -EINVAL;
2128 }
2129
2130 return ret;
2131}
2132
2133int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2134 int irq_source_id, int level, bool line_status)
2135{
2136 return -EINVAL;
2137}
816c7667
JF
2138
2139int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2140{
2141 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2142 struct kvm_s390_irq *buf;
2143 int r = 0;
2144 int n;
2145
2146 buf = vmalloc(len);
2147 if (!buf)
2148 return -ENOMEM;
2149
2150 if (copy_from_user((void *) buf, irqstate, len)) {
2151 r = -EFAULT;
2152 goto out_free;
2153 }
2154
2155 /*
2156 * Don't allow setting the interrupt state
2157 * when there are already interrupts pending
2158 */
2159 spin_lock(&li->lock);
2160 if (li->pending_irqs) {
2161 r = -EBUSY;
2162 goto out_unlock;
2163 }
2164
2165 for (n = 0; n < len / sizeof(*buf); n++) {
2166 r = do_inject_vcpu(vcpu, &buf[n]);
2167 if (r)
2168 break;
2169 }
2170
2171out_unlock:
2172 spin_unlock(&li->lock);
2173out_free:
2174 vfree(buf);
2175
2176 return r;
2177}
2178
2179static void store_local_irq(struct kvm_s390_local_interrupt *li,
2180 struct kvm_s390_irq *irq,
2181 unsigned long irq_type)
2182{
2183 switch (irq_type) {
2184 case IRQ_PEND_MCHK_EX:
2185 case IRQ_PEND_MCHK_REP:
2186 irq->type = KVM_S390_MCHK;
2187 irq->u.mchk = li->irq.mchk;
2188 break;
2189 case IRQ_PEND_PROG:
2190 irq->type = KVM_S390_PROGRAM_INT;
2191 irq->u.pgm = li->irq.pgm;
2192 break;
2193 case IRQ_PEND_PFAULT_INIT:
2194 irq->type = KVM_S390_INT_PFAULT_INIT;
2195 irq->u.ext = li->irq.ext;
2196 break;
2197 case IRQ_PEND_EXT_EXTERNAL:
2198 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2199 irq->u.extcall = li->irq.extcall;
2200 break;
2201 case IRQ_PEND_EXT_CLOCK_COMP:
2202 irq->type = KVM_S390_INT_CLOCK_COMP;
2203 break;
2204 case IRQ_PEND_EXT_CPU_TIMER:
2205 irq->type = KVM_S390_INT_CPU_TIMER;
2206 break;
2207 case IRQ_PEND_SIGP_STOP:
2208 irq->type = KVM_S390_SIGP_STOP;
2209 irq->u.stop = li->irq.stop;
2210 break;
2211 case IRQ_PEND_RESTART:
2212 irq->type = KVM_S390_RESTART;
2213 break;
2214 case IRQ_PEND_SET_PREFIX:
2215 irq->type = KVM_S390_SIGP_SET_PREFIX;
2216 irq->u.prefix = li->irq.prefix;
2217 break;
2218 }
2219}
2220
2221int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2222{
2223 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
2224 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2225 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2226 unsigned long pending_irqs;
2227 struct kvm_s390_irq irq;
2228 unsigned long irq_type;
2229 int cpuaddr;
2230 int n = 0;
2231
2232 spin_lock(&li->lock);
2233 pending_irqs = li->pending_irqs;
2234 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2235 sizeof(sigp_emerg_pending));
2236 spin_unlock(&li->lock);
2237
2238 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2239 memset(&irq, 0, sizeof(irq));
2240 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2241 continue;
2242 if (n + sizeof(irq) > len)
2243 return -ENOBUFS;
2244 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2245 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2246 return -EFAULT;
2247 n += sizeof(irq);
2248 }
2249
2250 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2251 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2252 memset(&irq, 0, sizeof(irq));
2253 if (n + sizeof(irq) > len)
2254 return -ENOBUFS;
2255 irq.type = KVM_S390_INT_EMERGENCY;
2256 irq.u.emerg.code = cpuaddr;
2257 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2258 return -EFAULT;
2259 n += sizeof(irq);
2260 }
2261 }
2262
2263 if ((sigp_ctrl & SIGP_CTRL_C) &&
2264 (atomic_read(&vcpu->arch.sie_block->cpuflags) &
2265 CPUSTAT_ECALL_PEND)) {
2266 if (n + sizeof(irq) > len)
2267 return -ENOBUFS;
2268 memset(&irq, 0, sizeof(irq));
2269 irq.type = KVM_S390_INT_EXTERNAL_CALL;
2270 irq.u.extcall.code = sigp_ctrl & SIGP_CTRL_SCN_MASK;
2271 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2272 return -EFAULT;
2273 n += sizeof(irq);
2274 }
2275
2276 return n;
2277}