]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/s390/kvm/interrupt.c
KVM: s390: interface for suppressible I/O adapters
[mirror_ubuntu-kernels.git] / arch / s390 / kvm / interrupt.c
CommitLineData
ba5c1e9b 1/*
a53c8fab 2 * handling kvm guest interrupts
ba5c1e9b 3 *
33b412ac 4 * Copyright IBM Corp. 2008, 2015
ba5c1e9b
CO
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
ca872302 13#include <linux/interrupt.h>
ba5c1e9b 14#include <linux/kvm_host.h>
cbb870c8 15#include <linux/hrtimer.h>
84223598 16#include <linux/mmu_context.h>
3cd61299 17#include <linux/signal.h>
5a0e3ad6 18#include <linux/slab.h>
383d0b05 19#include <linux/bitmap.h>
94aa033e 20#include <linux/vmalloc.h>
cbb870c8 21#include <asm/asm-offsets.h>
33b412ac 22#include <asm/dis.h>
7c0f6ba6 23#include <linux/uaccess.h>
ea5f4969 24#include <asm/sclp.h>
6d3da241 25#include <asm/isc.h>
1e133ab2 26#include <asm/gmap.h>
0319dae6 27#include <asm/switch_to.h>
ff5dc149 28#include <asm/nmi.h>
ba5c1e9b
CO
29#include "kvm-s390.h"
30#include "gaccess.h"
ade38c31 31#include "trace-s390.h"
ba5c1e9b 32
44c6ca3d 33#define PFAULT_INIT 0x0600
60f90a14
JF
34#define PFAULT_DONE 0x0680
35#define VIRTIO_PARAM 0x0d00
d8346b7d 36
a5bd7647
ED
37/* handle external calls via sigp interpretation facility */
38static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
39{
7d43bafc
ED
40 int c, scn;
41
2c1bb2be
DH
42 if (!(atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND))
43 return 0;
44
a6940674 45 BUG_ON(!kvm_s390_use_sca_entries());
5e044315 46 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
47 if (vcpu->kvm->arch.use_esca) {
48 struct esca_block *sca = vcpu->kvm->arch.sca;
49 union esca_sigp_ctrl sigp_ctrl =
50 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
51
52 c = sigp_ctrl.c;
53 scn = sigp_ctrl.scn;
54 } else {
55 struct bsca_block *sca = vcpu->kvm->arch.sca;
56 union bsca_sigp_ctrl sigp_ctrl =
57 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
58
59 c = sigp_ctrl.c;
60 scn = sigp_ctrl.scn;
61 }
5e044315 62 read_unlock(&vcpu->kvm->arch.sca_lock);
a5bd7647
ED
63
64 if (src_id)
7d43bafc 65 *src_id = scn;
a5bd7647 66
2c1bb2be 67 return c;
a5bd7647
ED
68}
69
70static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
71{
bc784cce 72 int expect, rc;
a5bd7647 73
a6940674 74 BUG_ON(!kvm_s390_use_sca_entries());
5e044315 75 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
76 if (vcpu->kvm->arch.use_esca) {
77 struct esca_block *sca = vcpu->kvm->arch.sca;
78 union esca_sigp_ctrl *sigp_ctrl =
79 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
80 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
81
82 new_val.scn = src_id;
83 new_val.c = 1;
84 old_val.c = 0;
85
86 expect = old_val.value;
87 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
88 } else {
89 struct bsca_block *sca = vcpu->kvm->arch.sca;
90 union bsca_sigp_ctrl *sigp_ctrl =
91 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
92 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
bc784cce 93
7d43bafc
ED
94 new_val.scn = src_id;
95 new_val.c = 1;
96 old_val.c = 0;
97
98 expect = old_val.value;
99 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
100 }
5e044315 101 read_unlock(&vcpu->kvm->arch.sca_lock);
bc784cce
ED
102
103 if (rc != expect) {
a5bd7647
ED
104 /* another external call is pending */
105 return -EBUSY;
106 }
107 atomic_or(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
108 return 0;
109}
110
111static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
112{
a5bd7647 113 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
7d43bafc 114 int rc, expect;
a5bd7647 115
a6940674
DH
116 if (!kvm_s390_use_sca_entries())
117 return;
a5bd7647 118 atomic_andnot(CPUSTAT_ECALL_PEND, li->cpuflags);
5e044315 119 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
120 if (vcpu->kvm->arch.use_esca) {
121 struct esca_block *sca = vcpu->kvm->arch.sca;
122 union esca_sigp_ctrl *sigp_ctrl =
123 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
124 union esca_sigp_ctrl old = *sigp_ctrl;
125
126 expect = old.value;
127 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
128 } else {
129 struct bsca_block *sca = vcpu->kvm->arch.sca;
130 union bsca_sigp_ctrl *sigp_ctrl =
131 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
132 union bsca_sigp_ctrl old = *sigp_ctrl;
133
134 expect = old.value;
135 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
136 }
5e044315 137 read_unlock(&vcpu->kvm->arch.sca_lock);
7d43bafc 138 WARN_ON(rc != expect); /* cannot clear? */
a5bd7647
ED
139}
140
3c038e6b 141int psw_extint_disabled(struct kvm_vcpu *vcpu)
ba5c1e9b
CO
142{
143 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
144}
145
d8346b7d
CH
146static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
147{
148 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
149}
150
48a3e950
CH
151static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
152{
153 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
154}
155
ba5c1e9b
CO
156static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
157{
fee0e0fd
DH
158 return psw_extint_disabled(vcpu) &&
159 psw_ioint_disabled(vcpu) &&
160 psw_mchk_disabled(vcpu);
ba5c1e9b
CO
161}
162
bb78c5ec
DH
163static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
164{
165 if (psw_extint_disabled(vcpu) ||
166 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
167 return 0;
f71d0dc5
DH
168 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
169 /* No timer interrupts when single stepping */
170 return 0;
bb78c5ec
DH
171 return 1;
172}
173
b4aec925
DH
174static int ckc_irq_pending(struct kvm_vcpu *vcpu)
175{
60417fcc 176 if (vcpu->arch.sie_block->ckc >= kvm_s390_get_tod_clock_fast(vcpu->kvm))
b4aec925
DH
177 return 0;
178 return ckc_interrupts_enabled(vcpu);
179}
180
181static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
182{
183 return !psw_extint_disabled(vcpu) &&
184 (vcpu->arch.sie_block->gcr[0] & 0x400ul);
185}
186
187static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
188{
4287f247
DH
189 if (!cpu_timer_interrupts_enabled(vcpu))
190 return 0;
191 return kvm_s390_get_cpu_timer(vcpu) >> 63;
b4aec925
DH
192}
193
6d3da241 194static inline int is_ioirq(unsigned long irq_type)
79fd50c6 195{
6d3da241
JF
196 return ((irq_type >= IRQ_PEND_IO_ISC_0) &&
197 (irq_type <= IRQ_PEND_IO_ISC_7));
198}
79fd50c6 199
6d3da241
JF
200static uint64_t isc_to_isc_bits(int isc)
201{
79fd50c6
CH
202 return (0x80 >> isc) << 24;
203}
204
6d3da241 205static inline u8 int_word_to_isc(u32 int_word)
ba5c1e9b 206{
6d3da241
JF
207 return (int_word & 0x38000000) >> 27;
208}
209
5f94c58e 210static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
6d3da241 211{
5f94c58e
DH
212 return vcpu->kvm->arch.float_int.pending_irqs |
213 vcpu->arch.local_int.pending_irqs;
383d0b05
JF
214}
215
6d3da241
JF
216static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
217 unsigned long active_mask)
218{
219 int i;
220
221 for (i = 0; i <= MAX_ISC; i++)
222 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
223 active_mask &= ~(1UL << (IRQ_PEND_IO_ISC_0 + i));
224
225 return active_mask;
226}
227
228static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
383d0b05 229{
6d3da241
JF
230 unsigned long active_mask;
231
5f94c58e 232 active_mask = pending_irqs(vcpu);
ffeca0ae
JF
233 if (!active_mask)
234 return 0;
383d0b05
JF
235
236 if (psw_extint_disabled(vcpu))
237 active_mask &= ~IRQ_PEND_EXT_MASK;
6d3da241
JF
238 if (psw_ioint_disabled(vcpu))
239 active_mask &= ~IRQ_PEND_IO_MASK;
240 else
241 active_mask = disable_iscs(vcpu, active_mask);
383d0b05
JF
242 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
243 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
244 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
245 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
246 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
247 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
248 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
249 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
6d3da241
JF
250 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
251 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
383d0b05
JF
252 if (psw_mchk_disabled(vcpu))
253 active_mask &= ~IRQ_PEND_MCHK_MASK;
6d3da241
JF
254 if (!(vcpu->arch.sie_block->gcr[14] &
255 vcpu->kvm->arch.float_int.mchk.cr14))
256 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
383d0b05 257
6cddd432
DH
258 /*
259 * STOP irqs will never be actively delivered. They are triggered via
260 * intercept requests and cleared when the stop intercept is performed.
261 */
262 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
263
383d0b05
JF
264 return active_mask;
265}
266
ba5c1e9b
CO
267static void __set_cpu_idle(struct kvm_vcpu *vcpu)
268{
805de8f4 269 atomic_or(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
270 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
271}
272
273static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
274{
805de8f4 275 atomic_andnot(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
276 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
277}
278
279static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
280{
805de8f4
PZ
281 atomic_andnot(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
282 &vcpu->arch.sie_block->cpuflags);
ba5c1e9b 283 vcpu->arch.sie_block->lctl = 0x0000;
27291e21
DH
284 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
285
286 if (guestdbg_enabled(vcpu)) {
287 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
288 LCTL_CR10 | LCTL_CR11);
289 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
290 }
ba5c1e9b
CO
291}
292
293static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
294{
805de8f4 295 atomic_or(flag, &vcpu->arch.sie_block->cpuflags);
ba5c1e9b
CO
296}
297
6d3da241
JF
298static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
299{
5f94c58e 300 if (!(pending_irqs(vcpu) & IRQ_PEND_IO_MASK))
6d3da241
JF
301 return;
302 else if (psw_ioint_disabled(vcpu))
303 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
304 else
305 vcpu->arch.sie_block->lctl |= LCTL_CR6;
306}
307
383d0b05
JF
308static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
309{
5f94c58e 310 if (!(pending_irqs(vcpu) & IRQ_PEND_EXT_MASK))
383d0b05
JF
311 return;
312 if (psw_extint_disabled(vcpu))
313 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
314 else
315 vcpu->arch.sie_block->lctl |= LCTL_CR0;
316}
317
318static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
319{
5f94c58e 320 if (!(pending_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
383d0b05
JF
321 return;
322 if (psw_mchk_disabled(vcpu))
323 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
324 else
325 vcpu->arch.sie_block->lctl |= LCTL_CR14;
326}
327
6cddd432
DH
328static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
329{
330 if (kvm_s390_is_stop_irq_pending(vcpu))
331 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
332}
333
6d3da241
JF
334/* Set interception request for non-deliverable interrupts */
335static void set_intercept_indicators(struct kvm_vcpu *vcpu)
383d0b05 336{
6d3da241 337 set_intercept_indicators_io(vcpu);
383d0b05
JF
338 set_intercept_indicators_ext(vcpu);
339 set_intercept_indicators_mchk(vcpu);
6cddd432 340 set_intercept_indicators_stop(vcpu);
383d0b05
JF
341}
342
0fb97abe
JF
343static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
344{
383d0b05 345 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
346 int rc;
347
348 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
349 0, 0);
350
351 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
352 (u16 *)__LC_EXT_INT_CODE);
467fc298 353 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
354 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
355 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
356 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
357 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 358 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
99e20009 359 return rc ? -EFAULT : 0;
0fb97abe
JF
360}
361
362static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
363{
383d0b05 364 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
365 int rc;
366
367 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
368 0, 0);
369
370 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
371 (u16 __user *)__LC_EXT_INT_CODE);
467fc298 372 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
373 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
374 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
375 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
376 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 377 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
99e20009 378 return rc ? -EFAULT : 0;
0fb97abe
JF
379}
380
383d0b05 381static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
0fb97abe 382{
383d0b05
JF
383 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
384 struct kvm_s390_ext_info ext;
0fb97abe
JF
385 int rc;
386
383d0b05
JF
387 spin_lock(&li->lock);
388 ext = li->irq.ext;
389 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
390 li->irq.ext.ext_params2 = 0;
391 spin_unlock(&li->lock);
392
3f24ba15
CB
393 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
394 ext.ext_params2);
0fb97abe
JF
395 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
396 KVM_S390_INT_PFAULT_INIT,
383d0b05 397 0, ext.ext_params2);
0fb97abe
JF
398
399 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
400 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
401 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
402 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
403 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
404 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 405 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
99e20009 406 return rc ? -EFAULT : 0;
0fb97abe
JF
407}
408
d6404ded
DH
409static int __write_machine_check(struct kvm_vcpu *vcpu,
410 struct kvm_s390_mchk_info *mchk)
411{
412 unsigned long ext_sa_addr;
4e0b1ab7 413 unsigned long lc;
0319dae6 414 freg_t fprs[NUM_FPRS];
ff5dc149 415 union mci mci;
d6404ded
DH
416 int rc;
417
ff5dc149 418 mci.val = mchk->mcic;
31d8b8d4 419 /* take care of lazy register loading */
0319dae6
DH
420 save_fpu_regs();
421 save_access_regs(vcpu->run->s.regs.acrs);
422
d6404ded 423 /* Extended save area */
916cda1a
MS
424 rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
425 sizeof(unsigned long));
4e0b1ab7
FZ
426 /* Only bits 0 through 63-LC are used for address formation */
427 lc = ext_sa_addr & MCESA_LC_MASK;
428 if (test_kvm_facility(vcpu->kvm, 133)) {
429 switch (lc) {
430 case 0:
431 case 10:
432 ext_sa_addr &= ~0x3ffUL;
433 break;
434 case 11:
435 ext_sa_addr &= ~0x7ffUL;
436 break;
437 case 12:
438 ext_sa_addr &= ~0xfffUL;
439 break;
440 default:
441 ext_sa_addr = 0;
442 break;
443 }
444 } else {
445 ext_sa_addr &= ~0x3ffUL;
446 }
447
ff5dc149
DH
448 if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
449 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
450 512))
451 mci.vr = 0;
452 } else {
453 mci.vr = 0;
454 }
4e0b1ab7
FZ
455 if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
456 && (lc == 11 || lc == 12)) {
457 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
458 &vcpu->run->s.regs.gscb, 32))
459 mci.gs = 0;
460 } else {
461 mci.gs = 0;
462 }
d6404ded
DH
463
464 /* General interruption information */
0319dae6 465 rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
d6404ded
DH
466 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
467 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
468 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
469 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
ff5dc149 470 rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
d6404ded
DH
471
472 /* Register-save areas */
0319dae6
DH
473 if (MACHINE_HAS_VX) {
474 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
475 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
476 } else {
477 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
478 vcpu->run->s.regs.fprs, 128);
479 }
480 rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
481 vcpu->run->s.regs.gprs, 128);
482 rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
483 (u32 __user *) __LC_FP_CREG_SAVE_AREA);
484 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
485 (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
486 rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
487 (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
488 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
489 (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
490 rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
491 &vcpu->run->s.regs.acrs, 64);
492 rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
493 &vcpu->arch.sie_block->gcr, 128);
d6404ded
DH
494
495 /* Extended interruption information */
8953fb08
DH
496 rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
497 (u32 __user *) __LC_EXT_DAMAGE_CODE);
d6404ded
DH
498 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
499 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
500 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
501 sizeof(mchk->fixed_logout));
502 return rc ? -EFAULT : 0;
503}
504
383d0b05 505static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
0fb97abe 506{
6d3da241 507 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
383d0b05 508 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
6d3da241 509 struct kvm_s390_mchk_info mchk = {};
6d3da241
JF
510 int deliver = 0;
511 int rc = 0;
0fb97abe 512
6d3da241 513 spin_lock(&fi->lock);
383d0b05 514 spin_lock(&li->lock);
6d3da241
JF
515 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
516 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
517 /*
518 * If there was an exigent machine check pending, then any
519 * repressible machine checks that might have been pending
520 * are indicated along with it, so always clear bits for
521 * repressible and exigent interrupts
522 */
523 mchk = li->irq.mchk;
524 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
525 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
526 memset(&li->irq.mchk, 0, sizeof(mchk));
527 deliver = 1;
528 }
383d0b05 529 /*
6d3da241
JF
530 * We indicate floating repressible conditions along with
531 * other pending conditions. Channel Report Pending and Channel
532 * Subsystem damage are the only two and and are indicated by
533 * bits in mcic and masked in cr14.
383d0b05 534 */
6d3da241
JF
535 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
536 mchk.mcic |= fi->mchk.mcic;
537 mchk.cr14 |= fi->mchk.cr14;
538 memset(&fi->mchk, 0, sizeof(mchk));
539 deliver = 1;
540 }
383d0b05 541 spin_unlock(&li->lock);
6d3da241 542 spin_unlock(&fi->lock);
383d0b05 543
6d3da241 544 if (deliver) {
3f24ba15 545 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
6d3da241
JF
546 mchk.mcic);
547 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
548 KVM_S390_MCHK,
549 mchk.cr14, mchk.mcic);
d6404ded 550 rc = __write_machine_check(vcpu, &mchk);
6d3da241 551 }
d6404ded 552 return rc;
0fb97abe
JF
553}
554
555static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
556{
383d0b05 557 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
558 int rc;
559
3f24ba15 560 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
0fb97abe
JF
561 vcpu->stat.deliver_restart_signal++;
562 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
563
564 rc = write_guest_lc(vcpu,
c667aeac 565 offsetof(struct lowcore, restart_old_psw),
0fb97abe 566 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
c667aeac 567 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
0fb97abe 568 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 569 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
99e20009 570 return rc ? -EFAULT : 0;
0fb97abe
JF
571}
572
383d0b05 573static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
0fb97abe 574{
383d0b05
JF
575 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
576 struct kvm_s390_prefix_info prefix;
577
578 spin_lock(&li->lock);
579 prefix = li->irq.prefix;
580 li->irq.prefix.address = 0;
581 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
582 spin_unlock(&li->lock);
0fb97abe 583
0fb97abe
JF
584 vcpu->stat.deliver_prefix_signal++;
585 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
586 KVM_S390_SIGP_SET_PREFIX,
383d0b05 587 prefix.address, 0);
0fb97abe 588
383d0b05 589 kvm_s390_set_prefix(vcpu, prefix.address);
0fb97abe
JF
590 return 0;
591}
592
383d0b05 593static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
0fb97abe 594{
383d0b05 595 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe 596 int rc;
383d0b05
JF
597 int cpu_addr;
598
599 spin_lock(&li->lock);
600 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
601 clear_bit(cpu_addr, li->sigp_emerg_pending);
602 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
603 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
604 spin_unlock(&li->lock);
0fb97abe 605
3f24ba15 606 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
0fb97abe 607 vcpu->stat.deliver_emergency_signal++;
383d0b05
JF
608 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
609 cpu_addr, 0);
0fb97abe
JF
610
611 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
612 (u16 *)__LC_EXT_INT_CODE);
383d0b05 613 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
614 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
615 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
616 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
617 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 618 return rc ? -EFAULT : 0;
0fb97abe
JF
619}
620
383d0b05 621static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
0fb97abe 622{
383d0b05
JF
623 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
624 struct kvm_s390_extcall_info extcall;
0fb97abe
JF
625 int rc;
626
383d0b05
JF
627 spin_lock(&li->lock);
628 extcall = li->irq.extcall;
629 li->irq.extcall.code = 0;
630 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
631 spin_unlock(&li->lock);
632
3f24ba15 633 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
0fb97abe
JF
634 vcpu->stat.deliver_external_call++;
635 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
636 KVM_S390_INT_EXTERNAL_CALL,
383d0b05 637 extcall.code, 0);
0fb97abe
JF
638
639 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
640 (u16 *)__LC_EXT_INT_CODE);
383d0b05 641 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
642 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
643 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
644 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
645 sizeof(psw_t));
99e20009 646 return rc ? -EFAULT : 0;
0fb97abe
JF
647}
648
383d0b05 649static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
8712836b 650{
383d0b05
JF
651 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
652 struct kvm_s390_pgm_info pgm_info;
a9a846fd 653 int rc = 0, nullifying = false;
634790b8 654 u16 ilen;
8712836b 655
383d0b05
JF
656 spin_lock(&li->lock);
657 pgm_info = li->irq.pgm;
658 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
659 memset(&li->irq.pgm, 0, sizeof(pgm_info));
660 spin_unlock(&li->lock);
661
634790b8 662 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
0e8bc06a
DH
663 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
664 pgm_info.code, ilen);
0fb97abe
JF
665 vcpu->stat.deliver_program_int++;
666 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
383d0b05 667 pgm_info.code, 0);
0fb97abe 668
383d0b05 669 switch (pgm_info.code & ~PGM_PER) {
8712836b
DH
670 case PGM_AFX_TRANSLATION:
671 case PGM_ASX_TRANSLATION:
672 case PGM_EX_TRANSLATION:
673 case PGM_LFX_TRANSLATION:
674 case PGM_LSTE_SEQUENCE:
675 case PGM_LSX_TRANSLATION:
676 case PGM_LX_TRANSLATION:
677 case PGM_PRIMARY_AUTHORITY:
678 case PGM_SECONDARY_AUTHORITY:
a9a846fd
TH
679 nullifying = true;
680 /* fall through */
8712836b 681 case PGM_SPACE_SWITCH:
383d0b05 682 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b
DH
683 (u64 *)__LC_TRANS_EXC_CODE);
684 break;
685 case PGM_ALEN_TRANSLATION:
686 case PGM_ALE_SEQUENCE:
687 case PGM_ASTE_INSTANCE:
688 case PGM_ASTE_SEQUENCE:
689 case PGM_ASTE_VALIDITY:
690 case PGM_EXTENDED_AUTHORITY:
383d0b05 691 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 692 (u8 *)__LC_EXC_ACCESS_ID);
a9a846fd 693 nullifying = true;
8712836b
DH
694 break;
695 case PGM_ASCE_TYPE:
696 case PGM_PAGE_TRANSLATION:
697 case PGM_REGION_FIRST_TRANS:
698 case PGM_REGION_SECOND_TRANS:
699 case PGM_REGION_THIRD_TRANS:
700 case PGM_SEGMENT_TRANSLATION:
383d0b05 701 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 702 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 703 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 704 (u8 *)__LC_EXC_ACCESS_ID);
383d0b05 705 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
8712836b 706 (u8 *)__LC_OP_ACCESS_ID);
a9a846fd 707 nullifying = true;
8712836b
DH
708 break;
709 case PGM_MONITOR:
383d0b05 710 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
a36c5393 711 (u16 *)__LC_MON_CLASS_NR);
383d0b05 712 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
8712836b
DH
713 (u64 *)__LC_MON_CODE);
714 break;
403c8648 715 case PGM_VECTOR_PROCESSING:
8712836b 716 case PGM_DATA:
383d0b05 717 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
8712836b
DH
718 (u32 *)__LC_DATA_EXC_CODE);
719 break;
720 case PGM_PROTECTION:
383d0b05 721 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 722 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 723 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b
DH
724 (u8 *)__LC_EXC_ACCESS_ID);
725 break;
a9a846fd
TH
726 case PGM_STACK_FULL:
727 case PGM_STACK_EMPTY:
728 case PGM_STACK_SPECIFICATION:
729 case PGM_STACK_TYPE:
730 case PGM_STACK_OPERATION:
731 case PGM_TRACE_TABEL:
732 case PGM_CRYPTO_OPERATION:
733 nullifying = true;
734 break;
8712836b
DH
735 }
736
383d0b05
JF
737 if (pgm_info.code & PGM_PER) {
738 rc |= put_guest_lc(vcpu, pgm_info.per_code,
8712836b 739 (u8 *) __LC_PER_CODE);
383d0b05 740 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
8712836b 741 (u8 *)__LC_PER_ATMID);
383d0b05 742 rc |= put_guest_lc(vcpu, pgm_info.per_address,
8712836b 743 (u64 *) __LC_PER_ADDRESS);
383d0b05 744 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
8712836b
DH
745 (u8 *) __LC_PER_ACCESS_ID);
746 }
747
eaa4f416 748 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
0e8bc06a 749 kvm_s390_rewind_psw(vcpu, ilen);
a9a846fd 750
0e8bc06a
DH
751 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
752 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
2ba45968
DH
753 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
754 (u64 *) __LC_LAST_BREAK);
383d0b05 755 rc |= put_guest_lc(vcpu, pgm_info.code,
8712836b
DH
756 (u16 *)__LC_PGM_INT_CODE);
757 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
758 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
759 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
760 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 761 return rc ? -EFAULT : 0;
0fb97abe
JF
762}
763
6d3da241 764static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
0fb97abe 765{
6d3da241
JF
766 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
767 struct kvm_s390_ext_info ext;
768 int rc = 0;
769
770 spin_lock(&fi->lock);
771 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
772 spin_unlock(&fi->lock);
773 return 0;
774 }
775 ext = fi->srv_signal;
776 memset(&fi->srv_signal, 0, sizeof(ext));
777 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
778 spin_unlock(&fi->lock);
0fb97abe 779
3f24ba15 780 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
6d3da241 781 ext.ext_params);
0fb97abe 782 vcpu->stat.deliver_service_signal++;
6d3da241
JF
783 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
784 ext.ext_params, 0);
0fb97abe
JF
785
786 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
467fc298 787 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
788 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
789 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
790 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
791 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
6d3da241 792 rc |= put_guest_lc(vcpu, ext.ext_params,
0fb97abe 793 (u32 *)__LC_EXT_PARAMS);
6d3da241 794
99e20009 795 return rc ? -EFAULT : 0;
0fb97abe
JF
796}
797
6d3da241 798static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
0fb97abe 799{
6d3da241
JF
800 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
801 struct kvm_s390_interrupt_info *inti;
802 int rc = 0;
0fb97abe 803
6d3da241
JF
804 spin_lock(&fi->lock);
805 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
806 struct kvm_s390_interrupt_info,
807 list);
808 if (inti) {
6d3da241
JF
809 list_del(&inti->list);
810 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
811 }
812 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
813 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
814 spin_unlock(&fi->lock);
8712836b 815
6d3da241 816 if (inti) {
3f24ba15
CB
817 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
818 KVM_S390_INT_PFAULT_DONE, 0,
819 inti->ext.ext_params2);
820 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
821 inti->ext.ext_params2);
822
6d3da241
JF
823 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
824 (u16 *)__LC_EXT_INT_CODE);
825 rc |= put_guest_lc(vcpu, PFAULT_DONE,
826 (u16 *)__LC_EXT_CPU_ADDR);
827 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
828 &vcpu->arch.sie_block->gpsw,
829 sizeof(psw_t));
830 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
831 &vcpu->arch.sie_block->gpsw,
832 sizeof(psw_t));
833 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
834 (u64 *)__LC_EXT_PARAMS2);
835 kfree(inti);
836 }
99e20009 837 return rc ? -EFAULT : 0;
0fb97abe
JF
838}
839
6d3da241 840static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
0fb97abe 841{
6d3da241
JF
842 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
843 struct kvm_s390_interrupt_info *inti;
844 int rc = 0;
0fb97abe 845
6d3da241
JF
846 spin_lock(&fi->lock);
847 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
848 struct kvm_s390_interrupt_info,
849 list);
850 if (inti) {
851 VCPU_EVENT(vcpu, 4,
3f24ba15 852 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
6d3da241
JF
853 inti->ext.ext_params, inti->ext.ext_params2);
854 vcpu->stat.deliver_virtio_interrupt++;
855 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
856 inti->type,
857 inti->ext.ext_params,
858 inti->ext.ext_params2);
859 list_del(&inti->list);
860 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
861 }
862 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
863 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
864 spin_unlock(&fi->lock);
0fb97abe 865
6d3da241
JF
866 if (inti) {
867 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
868 (u16 *)__LC_EXT_INT_CODE);
869 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
870 (u16 *)__LC_EXT_CPU_ADDR);
871 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
872 &vcpu->arch.sie_block->gpsw,
873 sizeof(psw_t));
874 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
875 &vcpu->arch.sie_block->gpsw,
876 sizeof(psw_t));
877 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
878 (u32 *)__LC_EXT_PARAMS);
879 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
880 (u64 *)__LC_EXT_PARAMS2);
881 kfree(inti);
882 }
99e20009 883 return rc ? -EFAULT : 0;
0fb97abe
JF
884}
885
886static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
6d3da241 887 unsigned long irq_type)
0fb97abe 888{
6d3da241
JF
889 struct list_head *isc_list;
890 struct kvm_s390_float_interrupt *fi;
891 struct kvm_s390_interrupt_info *inti = NULL;
892 int rc = 0;
0fb97abe 893
6d3da241 894 fi = &vcpu->kvm->arch.float_int;
8712836b 895
6d3da241
JF
896 spin_lock(&fi->lock);
897 isc_list = &fi->lists[irq_type - IRQ_PEND_IO_ISC_0];
898 inti = list_first_entry_or_null(isc_list,
899 struct kvm_s390_interrupt_info,
900 list);
901 if (inti) {
dcc98ea6
CB
902 if (inti->type & KVM_S390_INT_IO_AI_MASK)
903 VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
904 else
905 VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
906 inti->io.subchannel_id >> 8,
907 inti->io.subchannel_id >> 1 & 0x3,
908 inti->io.subchannel_nr);
909
6d3da241
JF
910 vcpu->stat.deliver_io_int++;
911 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
912 inti->type,
913 ((__u32)inti->io.subchannel_id << 16) |
914 inti->io.subchannel_nr,
915 ((__u64)inti->io.io_int_parm << 32) |
916 inti->io.io_int_word);
917 list_del(&inti->list);
918 fi->counters[FIRQ_CNTR_IO] -= 1;
919 }
920 if (list_empty(isc_list))
921 clear_bit(irq_type, &fi->pending_irqs);
922 spin_unlock(&fi->lock);
923
924 if (inti) {
925 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
926 (u16 *)__LC_SUBCHANNEL_ID);
927 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
928 (u16 *)__LC_SUBCHANNEL_NR);
929 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
930 (u32 *)__LC_IO_INT_PARM);
931 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
932 (u32 *)__LC_IO_INT_WORD);
933 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
934 &vcpu->arch.sie_block->gpsw,
935 sizeof(psw_t));
936 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
937 &vcpu->arch.sie_block->gpsw,
938 sizeof(psw_t));
939 kfree(inti);
940 }
383d0b05 941
99e20009 942 return rc ? -EFAULT : 0;
383d0b05
JF
943}
944
945typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
946
947static const deliver_irq_t deliver_irq_funcs[] = {
948 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
6d3da241 949 [IRQ_PEND_MCHK_REP] = __deliver_machine_check,
383d0b05
JF
950 [IRQ_PEND_PROG] = __deliver_prog,
951 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
952 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
953 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
954 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
955 [IRQ_PEND_RESTART] = __deliver_restart,
383d0b05
JF
956 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
957 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
6d3da241
JF
958 [IRQ_PEND_EXT_SERVICE] = __deliver_service,
959 [IRQ_PEND_PFAULT_DONE] = __deliver_pfault_done,
960 [IRQ_PEND_VIRTIO] = __deliver_virtio,
383d0b05
JF
961};
962
ea5f4969
DH
963/* Check whether an external call is pending (deliverable or not) */
964int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
4953919f 965{
ea5f4969 966 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
4953919f 967
37c5f6c8 968 if (!sclp.has_sigpif)
ea5f4969 969 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
4953919f 970
a5bd7647 971 return sca_ext_call_pending(vcpu, NULL);
4953919f
DH
972}
973
9a022067 974int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
ba5c1e9b 975{
4d32ad6b
DH
976 if (deliverable_irqs(vcpu))
977 return 1;
ba5c1e9b 978
4d32ad6b
DH
979 if (kvm_cpu_has_pending_timer(vcpu))
980 return 1;
ba5c1e9b 981
ea5f4969 982 /* external call pending and deliverable */
4d32ad6b 983 if (kvm_s390_ext_call_pending(vcpu) &&
ea5f4969
DH
984 !psw_extint_disabled(vcpu) &&
985 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
4d32ad6b 986 return 1;
4953919f 987
4d32ad6b
DH
988 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
989 return 1;
990 return 0;
ba5c1e9b
CO
991}
992
3d80840d
MT
993int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
994{
b4aec925 995 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
3d80840d
MT
996}
997
b3c17f10
DH
998static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
999{
1000 u64 now, cputm, sltime = 0;
1001
1002 if (ckc_interrupts_enabled(vcpu)) {
1003 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1004 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
1005 /* already expired or overflow? */
1006 if (!sltime || vcpu->arch.sie_block->ckc <= now)
1007 return 0;
1008 if (cpu_timer_interrupts_enabled(vcpu)) {
1009 cputm = kvm_s390_get_cpu_timer(vcpu);
1010 /* already expired? */
1011 if (cputm >> 63)
1012 return 0;
1013 return min(sltime, tod_to_ns(cputm));
1014 }
1015 } else if (cpu_timer_interrupts_enabled(vcpu)) {
1016 sltime = kvm_s390_get_cpu_timer(vcpu);
1017 /* already expired? */
1018 if (sltime >> 63)
1019 return 0;
1020 }
1021 return sltime;
1022}
1023
ba5c1e9b
CO
1024int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1025{
b3c17f10 1026 u64 sltime;
ba5c1e9b
CO
1027
1028 vcpu->stat.exit_wait_state++;
ba5c1e9b 1029
0759d068 1030 /* fast path */
118b862b 1031 if (kvm_arch_vcpu_runnable(vcpu))
0759d068 1032 return 0;
e52b2af5 1033
ba5c1e9b
CO
1034 if (psw_interrupts_disabled(vcpu)) {
1035 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
b8e660b8 1036 return -EOPNOTSUPP; /* disabled wait */
ba5c1e9b
CO
1037 }
1038
b3c17f10
DH
1039 if (!ckc_interrupts_enabled(vcpu) &&
1040 !cpu_timer_interrupts_enabled(vcpu)) {
ba5c1e9b 1041 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
bda343ef 1042 __set_cpu_idle(vcpu);
ba5c1e9b
CO
1043 goto no_timer;
1044 }
1045
b3c17f10
DH
1046 sltime = __calculate_sltime(vcpu);
1047 if (!sltime)
bda343ef
DH
1048 return 0;
1049
1050 __set_cpu_idle(vcpu);
8b0e1953 1051 hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
b3c17f10 1052 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
ba5c1e9b 1053no_timer:
800c1065 1054 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
0759d068 1055 kvm_vcpu_block(vcpu);
ba5c1e9b 1056 __unset_cpu_idle(vcpu);
800c1065
TH
1057 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1058
2d00f759 1059 hrtimer_cancel(&vcpu->arch.ckc_timer);
ba5c1e9b
CO
1060 return 0;
1061}
1062
0e9c85a5
DH
1063void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1064{
3491caf2
CB
1065 /*
1066 * We cannot move this into the if, as the CPU might be already
1067 * in kvm_vcpu_block without having the waitqueue set (polling)
1068 */
1069 vcpu->valid_wakeup = true;
8577370f 1070 if (swait_active(&vcpu->wq)) {
0e9c85a5
DH
1071 /*
1072 * The vcpu gave up the cpu voluntarily, mark it as a good
1073 * yield-candidate.
1074 */
1075 vcpu->preempted = true;
8577370f 1076 swake_up(&vcpu->wq);
ce2e4f0b 1077 vcpu->stat.halt_wakeup++;
0e9c85a5 1078 }
adbf1698
DH
1079 /*
1080 * The VCPU might not be sleeping but is executing the VSIE. Let's
1081 * kick it, so it leaves the SIE to process the request.
1082 */
1083 kvm_s390_vsie_kick(vcpu);
0e9c85a5
DH
1084}
1085
ca872302
CB
1086enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1087{
1088 struct kvm_vcpu *vcpu;
b3c17f10 1089 u64 sltime;
ca872302
CB
1090
1091 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
b3c17f10 1092 sltime = __calculate_sltime(vcpu);
ca872302 1093
2d00f759
DH
1094 /*
1095 * If the monotonic clock runs faster than the tod clock we might be
1096 * woken up too early and have to go back to sleep to avoid deadlocks.
1097 */
b3c17f10 1098 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
2d00f759
DH
1099 return HRTIMER_RESTART;
1100 kvm_s390_vcpu_wakeup(vcpu);
ca872302
CB
1101 return HRTIMER_NORESTART;
1102}
ba5c1e9b 1103
2ed10cc1
JF
1104void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1105{
1106 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2ed10cc1 1107
4ae3c081 1108 spin_lock(&li->lock);
383d0b05
JF
1109 li->pending_irqs = 0;
1110 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1111 memset(&li->irq, 0, sizeof(li->irq));
4ae3c081 1112 spin_unlock(&li->lock);
4953919f 1113
a5bd7647 1114 sca_clear_ext_call(vcpu);
2ed10cc1
JF
1115}
1116
614aeab4 1117int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
ba5c1e9b 1118{
180c12fb 1119 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1120 deliver_irq_t func;
79395031 1121 int rc = 0;
383d0b05 1122 unsigned long irq_type;
6d3da241 1123 unsigned long irqs;
ba5c1e9b
CO
1124
1125 __reset_intercept_indicators(vcpu);
ba5c1e9b 1126
383d0b05
JF
1127 /* pending ckc conditions might have been invalidated */
1128 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
b4aec925 1129 if (ckc_irq_pending(vcpu))
383d0b05
JF
1130 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1131
b4aec925
DH
1132 /* pending cpu timer conditions might have been invalidated */
1133 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1134 if (cpu_timer_irq_pending(vcpu))
1135 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1136
ffeca0ae 1137 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
383d0b05 1138 /* bits are in the order of interrupt priority */
6d3da241 1139 irq_type = find_first_bit(&irqs, IRQ_PEND_COUNT);
6d3da241
JF
1140 if (is_ioirq(irq_type)) {
1141 rc = __deliver_io(vcpu, irq_type);
1142 } else {
1143 func = deliver_irq_funcs[irq_type];
1144 if (!func) {
1145 WARN_ON_ONCE(func == NULL);
1146 clear_bit(irq_type, &li->pending_irqs);
1147 continue;
1148 }
1149 rc = func(vcpu);
383d0b05 1150 }
ffeca0ae 1151 }
383d0b05 1152
6d3da241 1153 set_intercept_indicators(vcpu);
79395031
JF
1154
1155 return rc;
ba5c1e9b
CO
1156}
1157
383d0b05 1158static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1159{
1160 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1161
ed2afcfa
DH
1162 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1163 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1164 irq->u.pgm.code, 0);
1165
634790b8
DH
1166 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1167 /* auto detection if no valid ILC was given */
1168 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1169 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1170 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1171 }
1172
238293b1
DH
1173 if (irq->u.pgm.code == PGM_PER) {
1174 li->irq.pgm.code |= PGM_PER;
634790b8 1175 li->irq.pgm.flags = irq->u.pgm.flags;
238293b1
DH
1176 /* only modify PER related information */
1177 li->irq.pgm.per_address = irq->u.pgm.per_address;
1178 li->irq.pgm.per_code = irq->u.pgm.per_code;
1179 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1180 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1181 } else if (!(irq->u.pgm.code & PGM_PER)) {
1182 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1183 irq->u.pgm.code;
634790b8 1184 li->irq.pgm.flags = irq->u.pgm.flags;
238293b1
DH
1185 /* only modify non-PER information */
1186 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1187 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1188 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1189 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1190 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1191 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1192 } else {
1193 li->irq.pgm = irq->u.pgm;
1194 }
9185124e 1195 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
0146a7b0
JF
1196 return 0;
1197}
1198
383d0b05 1199static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1200{
1201 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1202
3f24ba15
CB
1203 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1204 irq->u.ext.ext_params2);
383d0b05
JF
1205 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1206 irq->u.ext.ext_params,
ed2afcfa 1207 irq->u.ext.ext_params2);
383d0b05
JF
1208
1209 li->irq.ext = irq->u.ext;
1210 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
805de8f4 1211 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1212 return 0;
1213}
1214
0675d92d 1215static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1216{
1217 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1218 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
ea5f4969 1219 uint16_t src_id = irq->u.extcall.code;
0146a7b0 1220
3f24ba15 1221 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
ea5f4969 1222 src_id);
383d0b05 1223 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
ed2afcfa 1224 src_id, 0);
ea5f4969
DH
1225
1226 /* sending vcpu invalid */
152e9f65 1227 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
ea5f4969
DH
1228 return -EINVAL;
1229
37c5f6c8 1230 if (sclp.has_sigpif)
a5bd7647 1231 return sca_inject_ext_call(vcpu, src_id);
383d0b05 1232
b938eace 1233 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
ea5f4969 1234 return -EBUSY;
383d0b05 1235 *extcall = irq->u.extcall;
805de8f4 1236 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1237 return 0;
1238}
1239
383d0b05 1240static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1241{
1242 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1243 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
0146a7b0 1244
ed2afcfa 1245 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
556cc0da 1246 irq->u.prefix.address);
383d0b05 1247 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
ed2afcfa 1248 irq->u.prefix.address, 0);
383d0b05 1249
a3a9c59a
DH
1250 if (!is_vcpu_stopped(vcpu))
1251 return -EBUSY;
1252
383d0b05
JF
1253 *prefix = irq->u.prefix;
1254 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
0146a7b0
JF
1255 return 0;
1256}
1257
6cddd432 1258#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
383d0b05 1259static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1260{
1261 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2822545f 1262 struct kvm_s390_stop_info *stop = &li->irq.stop;
6cddd432 1263 int rc = 0;
0146a7b0 1264
ed2afcfa 1265 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
383d0b05 1266
2822545f
DH
1267 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1268 return -EINVAL;
1269
6cddd432
DH
1270 if (is_vcpu_stopped(vcpu)) {
1271 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1272 rc = kvm_s390_store_status_unloaded(vcpu,
1273 KVM_S390_STORE_STATUS_NOADDR);
1274 return rc;
1275 }
1276
1277 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1278 return -EBUSY;
2822545f 1279 stop->flags = irq->u.stop.flags;
6cddd432 1280 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
0146a7b0
JF
1281 return 0;
1282}
1283
1284static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
383d0b05 1285 struct kvm_s390_irq *irq)
0146a7b0
JF
1286{
1287 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1288
3f24ba15 1289 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
ed2afcfa 1290 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
383d0b05
JF
1291
1292 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
0146a7b0
JF
1293 return 0;
1294}
1295
1296static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
383d0b05 1297 struct kvm_s390_irq *irq)
0146a7b0
JF
1298{
1299 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1300
3f24ba15 1301 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
383d0b05
JF
1302 irq->u.emerg.code);
1303 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
ed2afcfa 1304 irq->u.emerg.code, 0);
383d0b05 1305
b85de33a
DH
1306 /* sending vcpu invalid */
1307 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1308 return -EINVAL;
1309
49538d12 1310 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
383d0b05 1311 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
805de8f4 1312 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1313 return 0;
1314}
1315
383d0b05 1316static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1317{
1318 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1319 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
0146a7b0 1320
3f24ba15 1321 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
556cc0da 1322 irq->u.mchk.mcic);
383d0b05 1323 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
ed2afcfa 1324 irq->u.mchk.mcic);
383d0b05
JF
1325
1326 /*
fc2020cf
JF
1327 * Because repressible machine checks can be indicated along with
1328 * exigent machine checks (PoP, Chapter 11, Interruption action)
1329 * we need to combine cr14, mcic and external damage code.
1330 * Failing storage address and the logout area should not be or'ed
1331 * together, we just indicate the last occurrence of the corresponding
1332 * machine check
383d0b05 1333 */
fc2020cf 1334 mchk->cr14 |= irq->u.mchk.cr14;
383d0b05 1335 mchk->mcic |= irq->u.mchk.mcic;
fc2020cf
JF
1336 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1337 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1338 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1339 sizeof(mchk->fixed_logout));
383d0b05
JF
1340 if (mchk->mcic & MCHK_EX_MASK)
1341 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1342 else if (mchk->mcic & MCHK_REP_MASK)
1343 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
0146a7b0
JF
1344 return 0;
1345}
1346
383d0b05 1347static int __inject_ckc(struct kvm_vcpu *vcpu)
0146a7b0
JF
1348{
1349 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1350
3f24ba15 1351 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
383d0b05 1352 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
ed2afcfa 1353 0, 0);
383d0b05
JF
1354
1355 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
805de8f4 1356 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
0146a7b0
JF
1357 return 0;
1358}
1359
383d0b05 1360static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0146a7b0
JF
1361{
1362 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1363
3f24ba15 1364 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
383d0b05 1365 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
ed2afcfa 1366 0, 0);
383d0b05
JF
1367
1368 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
805de8f4 1369 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
ba5c1e9b
CO
1370 return 0;
1371}
1372
6d3da241
JF
1373static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1374 int isc, u32 schid)
1375{
1376 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1377 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1378 struct kvm_s390_interrupt_info *iter;
1379 u16 id = (schid & 0xffff0000U) >> 16;
1380 u16 nr = schid & 0x0000ffffU;
1381
1382 spin_lock(&fi->lock);
1383 list_for_each_entry(iter, isc_list, list) {
1384 if (schid && (id != iter->io.subchannel_id ||
1385 nr != iter->io.subchannel_nr))
1386 continue;
1387 /* found an appropriate entry */
1388 list_del_init(&iter->list);
1389 fi->counters[FIRQ_CNTR_IO] -= 1;
1390 if (list_empty(isc_list))
1391 clear_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
1392 spin_unlock(&fi->lock);
1393 return iter;
1394 }
1395 spin_unlock(&fi->lock);
1396 return NULL;
1397}
383d0b05 1398
6d3da241
JF
1399/*
1400 * Dequeue and return an I/O interrupt matching any of the interruption
1401 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
1402 */
fa6b7fe9 1403struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
6d3da241
JF
1404 u64 isc_mask, u32 schid)
1405{
1406 struct kvm_s390_interrupt_info *inti = NULL;
1407 int isc;
1408
1409 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1410 if (isc_mask & isc_to_isc_bits(isc))
1411 inti = get_io_int(kvm, isc, schid);
1412 }
1413 return inti;
1414}
1415
1416#define SCCB_MASK 0xFFFFFFF8
1417#define SCCB_EVENT_PENDING 0x3
1418
1419static int __inject_service(struct kvm *kvm,
1420 struct kvm_s390_interrupt_info *inti)
1421{
1422 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1423
1424 spin_lock(&fi->lock);
1425 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1426 /*
1427 * Early versions of the QEMU s390 bios will inject several
1428 * service interrupts after another without handling a
1429 * condition code indicating busy.
1430 * We will silently ignore those superfluous sccb values.
1431 * A future version of QEMU will take care of serialization
1432 * of servc requests
1433 */
1434 if (fi->srv_signal.ext_params & SCCB_MASK)
1435 goto out;
1436 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1437 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1438out:
1439 spin_unlock(&fi->lock);
1440 kfree(inti);
1441 return 0;
1442}
1443
1444static int __inject_virtio(struct kvm *kvm,
1445 struct kvm_s390_interrupt_info *inti)
1446{
1447 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1448
1449 spin_lock(&fi->lock);
1450 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1451 spin_unlock(&fi->lock);
1452 return -EBUSY;
1453 }
1454 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1455 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1456 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1457 spin_unlock(&fi->lock);
1458 return 0;
1459}
1460
1461static int __inject_pfault_done(struct kvm *kvm,
1462 struct kvm_s390_interrupt_info *inti)
1463{
1464 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1465
1466 spin_lock(&fi->lock);
1467 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1468 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1469 spin_unlock(&fi->lock);
1470 return -EBUSY;
1471 }
1472 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1473 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1474 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1475 spin_unlock(&fi->lock);
1476 return 0;
1477}
1478
1479#define CR_PENDING_SUBCLASS 28
1480static int __inject_float_mchk(struct kvm *kvm,
1481 struct kvm_s390_interrupt_info *inti)
1482{
1483 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1484
1485 spin_lock(&fi->lock);
1486 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1487 fi->mchk.mcic |= inti->mchk.mcic;
1488 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1489 spin_unlock(&fi->lock);
1490 kfree(inti);
1491 return 0;
1492}
1493
1494static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
fa6b7fe9
CH
1495{
1496 struct kvm_s390_float_interrupt *fi;
6d3da241
JF
1497 struct list_head *list;
1498 int isc;
fa6b7fe9 1499
fa6b7fe9
CH
1500 fi = &kvm->arch.float_int;
1501 spin_lock(&fi->lock);
6d3da241
JF
1502 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1503 spin_unlock(&fi->lock);
1504 return -EBUSY;
a91b8ebe 1505 }
6d3da241
JF
1506 fi->counters[FIRQ_CNTR_IO] += 1;
1507
dcc98ea6
CB
1508 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1509 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1510 else
1511 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1512 inti->io.subchannel_id >> 8,
1513 inti->io.subchannel_id >> 1 & 0x3,
1514 inti->io.subchannel_nr);
6d3da241
JF
1515 isc = int_word_to_isc(inti->io.io_int_word);
1516 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1517 list_add_tail(&inti->list, list);
1518 set_bit(IRQ_PEND_IO_ISC_0 + isc, &fi->pending_irqs);
fa6b7fe9 1519 spin_unlock(&fi->lock);
6d3da241 1520 return 0;
fa6b7fe9 1521}
ba5c1e9b 1522
96e0ed23
DH
1523/*
1524 * Find a destination VCPU for a floating irq and kick it.
1525 */
1526static void __floating_irq_kick(struct kvm *kvm, u64 type)
ba5c1e9b 1527{
96e0ed23 1528 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
180c12fb 1529 struct kvm_s390_local_interrupt *li;
96e0ed23
DH
1530 struct kvm_vcpu *dst_vcpu;
1531 int sigcpu, online_vcpus, nr_tries = 0;
1532
1533 online_vcpus = atomic_read(&kvm->online_vcpus);
1534 if (!online_vcpus)
1535 return;
1536
1537 /* find idle VCPUs first, then round robin */
1538 sigcpu = find_first_bit(fi->idle_mask, online_vcpus);
1539 if (sigcpu == online_vcpus) {
1540 do {
1541 sigcpu = fi->next_rr_cpu;
1542 fi->next_rr_cpu = (fi->next_rr_cpu + 1) % online_vcpus;
1543 /* avoid endless loops if all vcpus are stopped */
1544 if (nr_tries++ >= online_vcpus)
1545 return;
1546 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1547 }
1548 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1549
1550 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
1551 li = &dst_vcpu->arch.local_int;
1552 spin_lock(&li->lock);
1553 switch (type) {
1554 case KVM_S390_MCHK:
805de8f4 1555 atomic_or(CPUSTAT_STOP_INT, li->cpuflags);
96e0ed23
DH
1556 break;
1557 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
805de8f4 1558 atomic_or(CPUSTAT_IO_INT, li->cpuflags);
96e0ed23
DH
1559 break;
1560 default:
805de8f4 1561 atomic_or(CPUSTAT_EXT_INT, li->cpuflags);
96e0ed23
DH
1562 break;
1563 }
1564 spin_unlock(&li->lock);
1565 kvm_s390_vcpu_wakeup(dst_vcpu);
1566}
1567
1568static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1569{
6d3da241
JF
1570 u64 type = READ_ONCE(inti->type);
1571 int rc;
ba5c1e9b 1572
6d3da241
JF
1573 switch (type) {
1574 case KVM_S390_MCHK:
1575 rc = __inject_float_mchk(kvm, inti);
1576 break;
1577 case KVM_S390_INT_VIRTIO:
1578 rc = __inject_virtio(kvm, inti);
1579 break;
1580 case KVM_S390_INT_SERVICE:
1581 rc = __inject_service(kvm, inti);
1582 break;
1583 case KVM_S390_INT_PFAULT_DONE:
1584 rc = __inject_pfault_done(kvm, inti);
1585 break;
1586 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1587 rc = __inject_io(kvm, inti);
1588 break;
1589 default:
a91b8ebe 1590 rc = -EINVAL;
c05c4186 1591 }
6d3da241
JF
1592 if (rc)
1593 return rc;
1594
96e0ed23 1595 __floating_irq_kick(kvm, type);
6d3da241 1596 return 0;
c05c4186
JF
1597}
1598
1599int kvm_s390_inject_vm(struct kvm *kvm,
1600 struct kvm_s390_interrupt *s390int)
1601{
1602 struct kvm_s390_interrupt_info *inti;
428d53be 1603 int rc;
c05c4186 1604
ba5c1e9b
CO
1605 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1606 if (!inti)
1607 return -ENOMEM;
1608
c05c4186
JF
1609 inti->type = s390int->type;
1610 switch (inti->type) {
ba5c1e9b 1611 case KVM_S390_INT_VIRTIO:
33e19115 1612 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
ba5c1e9b 1613 s390int->parm, s390int->parm64);
ba5c1e9b
CO
1614 inti->ext.ext_params = s390int->parm;
1615 inti->ext.ext_params2 = s390int->parm64;
1616 break;
1617 case KVM_S390_INT_SERVICE:
3f24ba15 1618 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
ba5c1e9b
CO
1619 inti->ext.ext_params = s390int->parm;
1620 break;
3c038e6b 1621 case KVM_S390_INT_PFAULT_DONE:
3c038e6b
DD
1622 inti->ext.ext_params2 = s390int->parm64;
1623 break;
48a3e950 1624 case KVM_S390_MCHK:
3f24ba15 1625 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
48a3e950 1626 s390int->parm64);
48a3e950
CH
1627 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1628 inti->mchk.mcic = s390int->parm64;
1629 break;
d8346b7d 1630 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
d8346b7d
CH
1631 inti->io.subchannel_id = s390int->parm >> 16;
1632 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1633 inti->io.io_int_parm = s390int->parm64 >> 32;
1634 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1635 break;
ba5c1e9b
CO
1636 default:
1637 kfree(inti);
1638 return -EINVAL;
1639 }
ade38c31
CH
1640 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1641 2);
ba5c1e9b 1642
428d53be
DH
1643 rc = __inject_vm(kvm, inti);
1644 if (rc)
1645 kfree(inti);
1646 return rc;
ba5c1e9b
CO
1647}
1648
15462e37 1649int kvm_s390_reinject_io_int(struct kvm *kvm,
2f32d4ea
CH
1650 struct kvm_s390_interrupt_info *inti)
1651{
15462e37 1652 return __inject_vm(kvm, inti);
2f32d4ea
CH
1653}
1654
383d0b05
JF
1655int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1656 struct kvm_s390_irq *irq)
1657{
1658 irq->type = s390int->type;
1659 switch (irq->type) {
1660 case KVM_S390_PROGRAM_INT:
1661 if (s390int->parm & 0xffff0000)
1662 return -EINVAL;
1663 irq->u.pgm.code = s390int->parm;
1664 break;
1665 case KVM_S390_SIGP_SET_PREFIX:
1666 irq->u.prefix.address = s390int->parm;
1667 break;
2822545f
DH
1668 case KVM_S390_SIGP_STOP:
1669 irq->u.stop.flags = s390int->parm;
1670 break;
383d0b05 1671 case KVM_S390_INT_EXTERNAL_CALL:
94d1f564 1672 if (s390int->parm & 0xffff0000)
383d0b05
JF
1673 return -EINVAL;
1674 irq->u.extcall.code = s390int->parm;
1675 break;
1676 case KVM_S390_INT_EMERGENCY:
94d1f564 1677 if (s390int->parm & 0xffff0000)
383d0b05
JF
1678 return -EINVAL;
1679 irq->u.emerg.code = s390int->parm;
1680 break;
1681 case KVM_S390_MCHK:
1682 irq->u.mchk.mcic = s390int->parm64;
1683 break;
1684 }
1685 return 0;
1686}
1687
6cddd432
DH
1688int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1689{
1690 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1691
1692 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1693}
1694
1695void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1696{
1697 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1698
1699 spin_lock(&li->lock);
1700 li->irq.stop.flags = 0;
1701 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1702 spin_unlock(&li->lock);
1703}
1704
79e87a10 1705static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
ba5c1e9b 1706{
0146a7b0 1707 int rc;
ba5c1e9b 1708
383d0b05 1709 switch (irq->type) {
ba5c1e9b 1710 case KVM_S390_PROGRAM_INT:
383d0b05 1711 rc = __inject_prog(vcpu, irq);
ba5c1e9b 1712 break;
b7e6e4d3 1713 case KVM_S390_SIGP_SET_PREFIX:
383d0b05 1714 rc = __inject_set_prefix(vcpu, irq);
b7e6e4d3 1715 break;
ba5c1e9b 1716 case KVM_S390_SIGP_STOP:
383d0b05 1717 rc = __inject_sigp_stop(vcpu, irq);
0146a7b0 1718 break;
ba5c1e9b 1719 case KVM_S390_RESTART:
383d0b05 1720 rc = __inject_sigp_restart(vcpu, irq);
0146a7b0 1721 break;
e029ae5b 1722 case KVM_S390_INT_CLOCK_COMP:
383d0b05 1723 rc = __inject_ckc(vcpu);
0146a7b0 1724 break;
e029ae5b 1725 case KVM_S390_INT_CPU_TIMER:
383d0b05 1726 rc = __inject_cpu_timer(vcpu);
82a12737 1727 break;
7697e71f 1728 case KVM_S390_INT_EXTERNAL_CALL:
383d0b05 1729 rc = __inject_extcall(vcpu, irq);
82a12737 1730 break;
ba5c1e9b 1731 case KVM_S390_INT_EMERGENCY:
383d0b05 1732 rc = __inject_sigp_emergency(vcpu, irq);
ba5c1e9b 1733 break;
48a3e950 1734 case KVM_S390_MCHK:
383d0b05 1735 rc = __inject_mchk(vcpu, irq);
48a3e950 1736 break;
3c038e6b 1737 case KVM_S390_INT_PFAULT_INIT:
383d0b05 1738 rc = __inject_pfault_init(vcpu, irq);
3c038e6b 1739 break;
ba5c1e9b
CO
1740 case KVM_S390_INT_VIRTIO:
1741 case KVM_S390_INT_SERVICE:
d8346b7d 1742 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
ba5c1e9b 1743 default:
0146a7b0 1744 rc = -EINVAL;
ba5c1e9b 1745 }
79e87a10
JF
1746
1747 return rc;
1748}
1749
1750int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1751{
1752 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1753 int rc;
1754
1755 spin_lock(&li->lock);
1756 rc = do_inject_vcpu(vcpu, irq);
4ae3c081 1757 spin_unlock(&li->lock);
0146a7b0
JF
1758 if (!rc)
1759 kvm_s390_vcpu_wakeup(vcpu);
0146a7b0 1760 return rc;
ba5c1e9b 1761}
c05c4186 1762
6d3da241 1763static inline void clear_irq_list(struct list_head *_list)
c05c4186 1764{
6d3da241 1765 struct kvm_s390_interrupt_info *inti, *n;
c05c4186 1766
6d3da241 1767 list_for_each_entry_safe(inti, n, _list, list) {
c05c4186
JF
1768 list_del(&inti->list);
1769 kfree(inti);
1770 }
c05c4186
JF
1771}
1772
94aa033e
JF
1773static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1774 struct kvm_s390_irq *irq)
c05c4186 1775{
94aa033e 1776 irq->type = inti->type;
c05c4186 1777 switch (inti->type) {
3c038e6b
DD
1778 case KVM_S390_INT_PFAULT_INIT:
1779 case KVM_S390_INT_PFAULT_DONE:
c05c4186 1780 case KVM_S390_INT_VIRTIO:
94aa033e 1781 irq->u.ext = inti->ext;
c05c4186
JF
1782 break;
1783 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
94aa033e 1784 irq->u.io = inti->io;
c05c4186 1785 break;
c05c4186 1786 }
c05c4186
JF
1787}
1788
6d3da241
JF
1789void kvm_s390_clear_float_irqs(struct kvm *kvm)
1790{
1791 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1792 int i;
1793
1794 spin_lock(&fi->lock);
f2ae45ed
JF
1795 fi->pending_irqs = 0;
1796 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
1797 memset(&fi->mchk, 0, sizeof(fi->mchk));
6d3da241
JF
1798 for (i = 0; i < FIRQ_LIST_COUNT; i++)
1799 clear_irq_list(&fi->lists[i]);
1800 for (i = 0; i < FIRQ_MAX_COUNT; i++)
1801 fi->counters[i] = 0;
1802 spin_unlock(&fi->lock);
1803};
1804
94aa033e 1805static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
c05c4186
JF
1806{
1807 struct kvm_s390_interrupt_info *inti;
1808 struct kvm_s390_float_interrupt *fi;
94aa033e 1809 struct kvm_s390_irq *buf;
6d3da241 1810 struct kvm_s390_irq *irq;
94aa033e 1811 int max_irqs;
c05c4186
JF
1812 int ret = 0;
1813 int n = 0;
6d3da241 1814 int i;
c05c4186 1815
94aa033e
JF
1816 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
1817 return -EINVAL;
1818
1819 /*
1820 * We are already using -ENOMEM to signal
1821 * userspace it may retry with a bigger buffer,
1822 * so we need to use something else for this case
1823 */
1824 buf = vzalloc(len);
1825 if (!buf)
1826 return -ENOBUFS;
1827
1828 max_irqs = len / sizeof(struct kvm_s390_irq);
1829
c05c4186
JF
1830 fi = &kvm->arch.float_int;
1831 spin_lock(&fi->lock);
6d3da241
JF
1832 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
1833 list_for_each_entry(inti, &fi->lists[i], list) {
1834 if (n == max_irqs) {
1835 /* signal userspace to try again */
1836 ret = -ENOMEM;
1837 goto out;
1838 }
1839 inti_to_irq(inti, &buf[n]);
1840 n++;
1841 }
1842 }
1843 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
94aa033e 1844 if (n == max_irqs) {
c05c4186
JF
1845 /* signal userspace to try again */
1846 ret = -ENOMEM;
6d3da241 1847 goto out;
c05c4186 1848 }
6d3da241
JF
1849 irq = (struct kvm_s390_irq *) &buf[n];
1850 irq->type = KVM_S390_INT_SERVICE;
1851 irq->u.ext = fi->srv_signal;
c05c4186
JF
1852 n++;
1853 }
6d3da241
JF
1854 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
1855 if (n == max_irqs) {
1856 /* signal userspace to try again */
1857 ret = -ENOMEM;
1858 goto out;
1859 }
1860 irq = (struct kvm_s390_irq *) &buf[n];
1861 irq->type = KVM_S390_MCHK;
1862 irq->u.mchk = fi->mchk;
1863 n++;
1864}
1865
1866out:
c05c4186 1867 spin_unlock(&fi->lock);
94aa033e
JF
1868 if (!ret && n > 0) {
1869 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
1870 ret = -EFAULT;
1871 }
1872 vfree(buf);
c05c4186
JF
1873
1874 return ret < 0 ? ret : n;
1875}
1876
1877static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1878{
1879 int r;
1880
1881 switch (attr->group) {
1882 case KVM_DEV_FLIC_GET_ALL_IRQS:
94aa033e 1883 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
c05c4186
JF
1884 attr->attr);
1885 break;
1886 default:
1887 r = -EINVAL;
1888 }
1889
1890 return r;
1891}
1892
1893static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1894 u64 addr)
1895{
1896 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1897 void *target = NULL;
1898 void __user *source;
1899 u64 size;
1900
1901 if (get_user(inti->type, (u64 __user *)addr))
1902 return -EFAULT;
1903
1904 switch (inti->type) {
3c038e6b
DD
1905 case KVM_S390_INT_PFAULT_INIT:
1906 case KVM_S390_INT_PFAULT_DONE:
c05c4186
JF
1907 case KVM_S390_INT_VIRTIO:
1908 case KVM_S390_INT_SERVICE:
1909 target = (void *) &inti->ext;
1910 source = &uptr->u.ext;
1911 size = sizeof(inti->ext);
1912 break;
1913 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1914 target = (void *) &inti->io;
1915 source = &uptr->u.io;
1916 size = sizeof(inti->io);
1917 break;
1918 case KVM_S390_MCHK:
1919 target = (void *) &inti->mchk;
1920 source = &uptr->u.mchk;
1921 size = sizeof(inti->mchk);
1922 break;
1923 default:
1924 return -EINVAL;
1925 }
1926
1927 if (copy_from_user(target, source, size))
1928 return -EFAULT;
1929
1930 return 0;
1931}
1932
1933static int enqueue_floating_irq(struct kvm_device *dev,
1934 struct kvm_device_attr *attr)
1935{
1936 struct kvm_s390_interrupt_info *inti = NULL;
1937 int r = 0;
1938 int len = attr->attr;
1939
1940 if (len % sizeof(struct kvm_s390_irq) != 0)
1941 return -EINVAL;
1942 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1943 return -EINVAL;
1944
1945 while (len >= sizeof(struct kvm_s390_irq)) {
1946 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1947 if (!inti)
1948 return -ENOMEM;
1949
1950 r = copy_irq_from_user(inti, attr->addr);
1951 if (r) {
1952 kfree(inti);
1953 return r;
1954 }
a91b8ebe
JF
1955 r = __inject_vm(dev->kvm, inti);
1956 if (r) {
1957 kfree(inti);
1958 return r;
1959 }
c05c4186
JF
1960 len -= sizeof(struct kvm_s390_irq);
1961 attr->addr += sizeof(struct kvm_s390_irq);
1962 }
1963
1964 return r;
1965}
1966
841b91c5
CH
1967static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1968{
1969 if (id >= MAX_S390_IO_ADAPTERS)
1970 return NULL;
1971 return kvm->arch.adapters[id];
1972}
1973
1974static int register_io_adapter(struct kvm_device *dev,
1975 struct kvm_device_attr *attr)
1976{
1977 struct s390_io_adapter *adapter;
1978 struct kvm_s390_io_adapter adapter_info;
1979
1980 if (copy_from_user(&adapter_info,
1981 (void __user *)attr->addr, sizeof(adapter_info)))
1982 return -EFAULT;
1983
1984 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1985 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1986 return -EINVAL;
1987
1988 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1989 if (!adapter)
1990 return -ENOMEM;
1991
1992 INIT_LIST_HEAD(&adapter->maps);
1993 init_rwsem(&adapter->maps_lock);
1994 atomic_set(&adapter->nr_maps, 0);
1995 adapter->id = adapter_info.id;
1996 adapter->isc = adapter_info.isc;
1997 adapter->maskable = adapter_info.maskable;
1998 adapter->masked = false;
1999 adapter->swap = adapter_info.swap;
08fab50d
FL
2000 adapter->suppressible = (adapter_info.flags) &
2001 KVM_S390_ADAPTER_SUPPRESSIBLE;
841b91c5
CH
2002 dev->kvm->arch.adapters[adapter->id] = adapter;
2003
2004 return 0;
2005}
2006
2007int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2008{
2009 int ret;
2010 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2011
2012 if (!adapter || !adapter->maskable)
2013 return -EINVAL;
2014 ret = adapter->masked;
2015 adapter->masked = masked;
2016 return ret;
2017}
2018
2019static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
2020{
2021 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2022 struct s390_map_info *map;
2023 int ret;
2024
2025 if (!adapter || !addr)
2026 return -EINVAL;
2027
2028 map = kzalloc(sizeof(*map), GFP_KERNEL);
2029 if (!map) {
2030 ret = -ENOMEM;
2031 goto out;
2032 }
2033 INIT_LIST_HEAD(&map->list);
2034 map->guest_addr = addr;
6e0a0431 2035 map->addr = gmap_translate(kvm->arch.gmap, addr);
841b91c5
CH
2036 if (map->addr == -EFAULT) {
2037 ret = -EFAULT;
2038 goto out;
2039 }
2040 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
2041 if (ret < 0)
2042 goto out;
2043 BUG_ON(ret != 1);
2044 down_write(&adapter->maps_lock);
2045 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2046 list_add_tail(&map->list, &adapter->maps);
2047 ret = 0;
2048 } else {
2049 put_page(map->page);
2050 ret = -EINVAL;
2051 }
2052 up_write(&adapter->maps_lock);
2053out:
2054 if (ret)
2055 kfree(map);
2056 return ret;
2057}
2058
2059static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2060{
2061 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2062 struct s390_map_info *map, *tmp;
2063 int found = 0;
2064
2065 if (!adapter || !addr)
2066 return -EINVAL;
2067
2068 down_write(&adapter->maps_lock);
2069 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2070 if (map->guest_addr == addr) {
2071 found = 1;
2072 atomic_dec(&adapter->nr_maps);
2073 list_del(&map->list);
2074 put_page(map->page);
2075 kfree(map);
2076 break;
2077 }
2078 }
2079 up_write(&adapter->maps_lock);
2080
2081 return found ? 0 : -EINVAL;
2082}
2083
2084void kvm_s390_destroy_adapters(struct kvm *kvm)
2085{
2086 int i;
2087 struct s390_map_info *map, *tmp;
2088
2089 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2090 if (!kvm->arch.adapters[i])
2091 continue;
2092 list_for_each_entry_safe(map, tmp,
2093 &kvm->arch.adapters[i]->maps, list) {
2094 list_del(&map->list);
2095 put_page(map->page);
2096 kfree(map);
2097 }
2098 kfree(kvm->arch.adapters[i]);
2099 }
2100}
2101
2102static int modify_io_adapter(struct kvm_device *dev,
2103 struct kvm_device_attr *attr)
2104{
2105 struct kvm_s390_io_adapter_req req;
2106 struct s390_io_adapter *adapter;
2107 int ret;
2108
2109 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2110 return -EFAULT;
2111
2112 adapter = get_io_adapter(dev->kvm, req.id);
2113 if (!adapter)
2114 return -EINVAL;
2115 switch (req.type) {
2116 case KVM_S390_IO_ADAPTER_MASK:
2117 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2118 if (ret > 0)
2119 ret = 0;
2120 break;
2121 case KVM_S390_IO_ADAPTER_MAP:
2122 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2123 break;
2124 case KVM_S390_IO_ADAPTER_UNMAP:
2125 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2126 break;
2127 default:
2128 ret = -EINVAL;
2129 }
2130
2131 return ret;
2132}
2133
6d28f789
HP
2134static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2135
2136{
2137 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2138 u32 schid;
2139
2140 if (attr->flags)
2141 return -EINVAL;
2142 if (attr->attr != sizeof(schid))
2143 return -EINVAL;
2144 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2145 return -EFAULT;
2146 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2147 /*
2148 * If userspace is conforming to the architecture, we can have at most
2149 * one pending I/O interrupt per subchannel, so this is effectively a
2150 * clear all.
2151 */
2152 return 0;
2153}
2154
c05c4186
JF
2155static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2156{
2157 int r = 0;
3c038e6b
DD
2158 unsigned int i;
2159 struct kvm_vcpu *vcpu;
c05c4186
JF
2160
2161 switch (attr->group) {
2162 case KVM_DEV_FLIC_ENQUEUE:
2163 r = enqueue_floating_irq(dev, attr);
2164 break;
2165 case KVM_DEV_FLIC_CLEAR_IRQS:
67335e63 2166 kvm_s390_clear_float_irqs(dev->kvm);
c05c4186 2167 break;
3c038e6b
DD
2168 case KVM_DEV_FLIC_APF_ENABLE:
2169 dev->kvm->arch.gmap->pfault_enabled = 1;
2170 break;
2171 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2172 dev->kvm->arch.gmap->pfault_enabled = 0;
2173 /*
2174 * Make sure no async faults are in transition when
2175 * clearing the queues. So we don't need to worry
2176 * about late coming workers.
2177 */
2178 synchronize_srcu(&dev->kvm->srcu);
2179 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2180 kvm_clear_async_pf_completion_queue(vcpu);
2181 break;
841b91c5
CH
2182 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2183 r = register_io_adapter(dev, attr);
2184 break;
2185 case KVM_DEV_FLIC_ADAPTER_MODIFY:
2186 r = modify_io_adapter(dev, attr);
2187 break;
6d28f789
HP
2188 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2189 r = clear_io_irq(dev->kvm, attr);
2190 break;
c05c4186
JF
2191 default:
2192 r = -EINVAL;
2193 }
2194
2195 return r;
2196}
2197
4f129858
HP
2198static int flic_has_attr(struct kvm_device *dev,
2199 struct kvm_device_attr *attr)
2200{
2201 switch (attr->group) {
2202 case KVM_DEV_FLIC_GET_ALL_IRQS:
2203 case KVM_DEV_FLIC_ENQUEUE:
2204 case KVM_DEV_FLIC_CLEAR_IRQS:
2205 case KVM_DEV_FLIC_APF_ENABLE:
2206 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2207 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2208 case KVM_DEV_FLIC_ADAPTER_MODIFY:
6d28f789 2209 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
4f129858
HP
2210 return 0;
2211 }
2212 return -ENXIO;
2213}
2214
c05c4186
JF
2215static int flic_create(struct kvm_device *dev, u32 type)
2216{
2217 if (!dev)
2218 return -EINVAL;
2219 if (dev->kvm->arch.flic)
2220 return -EINVAL;
2221 dev->kvm->arch.flic = dev;
2222 return 0;
2223}
2224
2225static void flic_destroy(struct kvm_device *dev)
2226{
2227 dev->kvm->arch.flic = NULL;
2228 kfree(dev);
2229}
2230
2231/* s390 floating irq controller (flic) */
2232struct kvm_device_ops kvm_flic_ops = {
2233 .name = "kvm-flic",
2234 .get_attr = flic_get_attr,
2235 .set_attr = flic_set_attr,
4f129858 2236 .has_attr = flic_has_attr,
c05c4186
JF
2237 .create = flic_create,
2238 .destroy = flic_destroy,
2239};
84223598
CH
2240
2241static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2242{
2243 unsigned long bit;
2244
2245 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2246
2247 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2248}
2249
2250static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2251 u64 addr)
2252{
2253 struct s390_map_info *map;
2254
2255 if (!adapter)
2256 return NULL;
2257
2258 list_for_each_entry(map, &adapter->maps, list) {
2259 if (map->guest_addr == addr)
2260 return map;
2261 }
2262 return NULL;
2263}
2264
2265static int adapter_indicators_set(struct kvm *kvm,
2266 struct s390_io_adapter *adapter,
2267 struct kvm_s390_adapter_int *adapter_int)
2268{
2269 unsigned long bit;
2270 int summary_set, idx;
2271 struct s390_map_info *info;
2272 void *map;
2273
2274 info = get_map_info(adapter, adapter_int->ind_addr);
2275 if (!info)
2276 return -1;
2277 map = page_address(info->page);
2278 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2279 set_bit(bit, map);
2280 idx = srcu_read_lock(&kvm->srcu);
2281 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2282 set_page_dirty_lock(info->page);
2283 info = get_map_info(adapter, adapter_int->summary_addr);
2284 if (!info) {
2285 srcu_read_unlock(&kvm->srcu, idx);
2286 return -1;
2287 }
2288 map = page_address(info->page);
2289 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2290 adapter->swap);
2291 summary_set = test_and_set_bit(bit, map);
2292 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2293 set_page_dirty_lock(info->page);
2294 srcu_read_unlock(&kvm->srcu, idx);
2295 return summary_set ? 0 : 1;
2296}
2297
2298/*
2299 * < 0 - not injected due to error
2300 * = 0 - coalesced, summary indicator already active
2301 * > 0 - injected interrupt
2302 */
2303static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2304 struct kvm *kvm, int irq_source_id, int level,
2305 bool line_status)
2306{
2307 int ret;
2308 struct s390_io_adapter *adapter;
2309
2310 /* We're only interested in the 0->1 transition. */
2311 if (!level)
2312 return 0;
2313 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2314 if (!adapter)
2315 return -1;
2316 down_read(&adapter->maps_lock);
2317 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2318 up_read(&adapter->maps_lock);
2319 if ((ret > 0) && !adapter->masked) {
2320 struct kvm_s390_interrupt s390int = {
2321 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2322 .parm = 0,
2323 .parm64 = (adapter->isc << 27) | 0x80000000,
2324 };
2325 ret = kvm_s390_inject_vm(kvm, &s390int);
2326 if (ret == 0)
2327 ret = 1;
2328 }
2329 return ret;
2330}
2331
c63cf538
RK
2332int kvm_set_routing_entry(struct kvm *kvm,
2333 struct kvm_kernel_irq_routing_entry *e,
84223598
CH
2334 const struct kvm_irq_routing_entry *ue)
2335{
2336 int ret;
2337
2338 switch (ue->type) {
2339 case KVM_IRQ_ROUTING_S390_ADAPTER:
2340 e->set = set_adapter_int;
2341 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2342 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2343 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2344 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2345 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2346 ret = 0;
2347 break;
2348 default:
2349 ret = -EINVAL;
2350 }
2351
2352 return ret;
2353}
2354
2355int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2356 int irq_source_id, int level, bool line_status)
2357{
2358 return -EINVAL;
2359}
816c7667
JF
2360
2361int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2362{
2363 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2364 struct kvm_s390_irq *buf;
2365 int r = 0;
2366 int n;
2367
2368 buf = vmalloc(len);
2369 if (!buf)
2370 return -ENOMEM;
2371
2372 if (copy_from_user((void *) buf, irqstate, len)) {
2373 r = -EFAULT;
2374 goto out_free;
2375 }
2376
2377 /*
2378 * Don't allow setting the interrupt state
2379 * when there are already interrupts pending
2380 */
2381 spin_lock(&li->lock);
2382 if (li->pending_irqs) {
2383 r = -EBUSY;
2384 goto out_unlock;
2385 }
2386
2387 for (n = 0; n < len / sizeof(*buf); n++) {
2388 r = do_inject_vcpu(vcpu, &buf[n]);
2389 if (r)
2390 break;
2391 }
2392
2393out_unlock:
2394 spin_unlock(&li->lock);
2395out_free:
2396 vfree(buf);
2397
2398 return r;
2399}
2400
2401static void store_local_irq(struct kvm_s390_local_interrupt *li,
2402 struct kvm_s390_irq *irq,
2403 unsigned long irq_type)
2404{
2405 switch (irq_type) {
2406 case IRQ_PEND_MCHK_EX:
2407 case IRQ_PEND_MCHK_REP:
2408 irq->type = KVM_S390_MCHK;
2409 irq->u.mchk = li->irq.mchk;
2410 break;
2411 case IRQ_PEND_PROG:
2412 irq->type = KVM_S390_PROGRAM_INT;
2413 irq->u.pgm = li->irq.pgm;
2414 break;
2415 case IRQ_PEND_PFAULT_INIT:
2416 irq->type = KVM_S390_INT_PFAULT_INIT;
2417 irq->u.ext = li->irq.ext;
2418 break;
2419 case IRQ_PEND_EXT_EXTERNAL:
2420 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2421 irq->u.extcall = li->irq.extcall;
2422 break;
2423 case IRQ_PEND_EXT_CLOCK_COMP:
2424 irq->type = KVM_S390_INT_CLOCK_COMP;
2425 break;
2426 case IRQ_PEND_EXT_CPU_TIMER:
2427 irq->type = KVM_S390_INT_CPU_TIMER;
2428 break;
2429 case IRQ_PEND_SIGP_STOP:
2430 irq->type = KVM_S390_SIGP_STOP;
2431 irq->u.stop = li->irq.stop;
2432 break;
2433 case IRQ_PEND_RESTART:
2434 irq->type = KVM_S390_RESTART;
2435 break;
2436 case IRQ_PEND_SET_PREFIX:
2437 irq->type = KVM_S390_SIGP_SET_PREFIX;
2438 irq->u.prefix = li->irq.prefix;
2439 break;
2440 }
2441}
2442
2443int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2444{
a5bd7647 2445 int scn;
816c7667
JF
2446 unsigned long sigp_emerg_pending[BITS_TO_LONGS(KVM_MAX_VCPUS)];
2447 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2448 unsigned long pending_irqs;
2449 struct kvm_s390_irq irq;
2450 unsigned long irq_type;
2451 int cpuaddr;
2452 int n = 0;
2453
2454 spin_lock(&li->lock);
2455 pending_irqs = li->pending_irqs;
2456 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2457 sizeof(sigp_emerg_pending));
2458 spin_unlock(&li->lock);
2459
2460 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2461 memset(&irq, 0, sizeof(irq));
2462 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2463 continue;
2464 if (n + sizeof(irq) > len)
2465 return -ENOBUFS;
2466 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2467 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2468 return -EFAULT;
2469 n += sizeof(irq);
2470 }
2471
2472 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2473 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2474 memset(&irq, 0, sizeof(irq));
2475 if (n + sizeof(irq) > len)
2476 return -ENOBUFS;
2477 irq.type = KVM_S390_INT_EMERGENCY;
2478 irq.u.emerg.code = cpuaddr;
2479 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2480 return -EFAULT;
2481 n += sizeof(irq);
2482 }
2483 }
2484
a5bd7647 2485 if (sca_ext_call_pending(vcpu, &scn)) {
816c7667
JF
2486 if (n + sizeof(irq) > len)
2487 return -ENOBUFS;
2488 memset(&irq, 0, sizeof(irq));
2489 irq.type = KVM_S390_INT_EXTERNAL_CALL;
a5bd7647 2490 irq.u.extcall.code = scn;
816c7667
JF
2491 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2492 return -EFAULT;
2493 n += sizeof(irq);
2494 }
2495
2496 return n;
2497}