]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/interrupt.c
KVM: s390: add kvm reference to struct sie_page2
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / interrupt.c
CommitLineData
d809aa23 1// SPDX-License-Identifier: GPL-2.0
ba5c1e9b 2/*
a53c8fab 3 * handling kvm guest interrupts
ba5c1e9b 4 *
33b412ac 5 * Copyright IBM Corp. 2008, 2015
ba5c1e9b 6 *
ba5c1e9b
CO
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 */
9
1282c21e
MM
10#define KMSG_COMPONENT "kvm-s390"
11#define pr_fmt(fmt) KMSG_COMPONENT ": " fmt
12
ca872302 13#include <linux/interrupt.h>
ba5c1e9b 14#include <linux/kvm_host.h>
cbb870c8 15#include <linux/hrtimer.h>
84223598 16#include <linux/mmu_context.h>
3cd61299 17#include <linux/signal.h>
5a0e3ad6 18#include <linux/slab.h>
383d0b05 19#include <linux/bitmap.h>
94aa033e 20#include <linux/vmalloc.h>
cbb870c8 21#include <asm/asm-offsets.h>
33b412ac 22#include <asm/dis.h>
7c0f6ba6 23#include <linux/uaccess.h>
ea5f4969 24#include <asm/sclp.h>
6d3da241 25#include <asm/isc.h>
1e133ab2 26#include <asm/gmap.h>
0319dae6 27#include <asm/switch_to.h>
ff5dc149 28#include <asm/nmi.h>
ba5c1e9b
CO
29#include "kvm-s390.h"
30#include "gaccess.h"
ade38c31 31#include "trace-s390.h"
ba5c1e9b 32
44c6ca3d 33#define PFAULT_INIT 0x0600
60f90a14
JF
34#define PFAULT_DONE 0x0680
35#define VIRTIO_PARAM 0x0d00
d8346b7d 36
1282c21e
MM
37static struct kvm_s390_gib *gib;
38
a5bd7647
ED
39/* handle external calls via sigp interpretation facility */
40static int sca_ext_call_pending(struct kvm_vcpu *vcpu, int *src_id)
41{
7d43bafc
ED
42 int c, scn;
43
8d5fb0dc 44 if (!kvm_s390_test_cpuflags(vcpu, CPUSTAT_ECALL_PEND))
2c1bb2be
DH
45 return 0;
46
a6940674 47 BUG_ON(!kvm_s390_use_sca_entries());
5e044315 48 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
49 if (vcpu->kvm->arch.use_esca) {
50 struct esca_block *sca = vcpu->kvm->arch.sca;
51 union esca_sigp_ctrl sigp_ctrl =
52 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
53
54 c = sigp_ctrl.c;
55 scn = sigp_ctrl.scn;
56 } else {
57 struct bsca_block *sca = vcpu->kvm->arch.sca;
58 union bsca_sigp_ctrl sigp_ctrl =
59 sca->cpu[vcpu->vcpu_id].sigp_ctrl;
60
61 c = sigp_ctrl.c;
62 scn = sigp_ctrl.scn;
63 }
5e044315 64 read_unlock(&vcpu->kvm->arch.sca_lock);
a5bd7647
ED
65
66 if (src_id)
7d43bafc 67 *src_id = scn;
a5bd7647 68
2c1bb2be 69 return c;
a5bd7647
ED
70}
71
72static int sca_inject_ext_call(struct kvm_vcpu *vcpu, int src_id)
73{
bc784cce 74 int expect, rc;
a5bd7647 75
a6940674 76 BUG_ON(!kvm_s390_use_sca_entries());
5e044315 77 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
78 if (vcpu->kvm->arch.use_esca) {
79 struct esca_block *sca = vcpu->kvm->arch.sca;
80 union esca_sigp_ctrl *sigp_ctrl =
81 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
82 union esca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
83
84 new_val.scn = src_id;
85 new_val.c = 1;
86 old_val.c = 0;
87
88 expect = old_val.value;
89 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
90 } else {
91 struct bsca_block *sca = vcpu->kvm->arch.sca;
92 union bsca_sigp_ctrl *sigp_ctrl =
93 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
94 union bsca_sigp_ctrl new_val = {0}, old_val = *sigp_ctrl;
bc784cce 95
7d43bafc
ED
96 new_val.scn = src_id;
97 new_val.c = 1;
98 old_val.c = 0;
99
100 expect = old_val.value;
101 rc = cmpxchg(&sigp_ctrl->value, old_val.value, new_val.value);
102 }
5e044315 103 read_unlock(&vcpu->kvm->arch.sca_lock);
bc784cce
ED
104
105 if (rc != expect) {
a5bd7647
ED
106 /* another external call is pending */
107 return -EBUSY;
108 }
ef8f4f49 109 kvm_s390_set_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
a5bd7647
ED
110 return 0;
111}
112
113static void sca_clear_ext_call(struct kvm_vcpu *vcpu)
114{
7d43bafc 115 int rc, expect;
a5bd7647 116
a6940674
DH
117 if (!kvm_s390_use_sca_entries())
118 return;
9daecfc6 119 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_ECALL_PEND);
5e044315 120 read_lock(&vcpu->kvm->arch.sca_lock);
7d43bafc
ED
121 if (vcpu->kvm->arch.use_esca) {
122 struct esca_block *sca = vcpu->kvm->arch.sca;
123 union esca_sigp_ctrl *sigp_ctrl =
124 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
125 union esca_sigp_ctrl old = *sigp_ctrl;
126
127 expect = old.value;
128 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
129 } else {
130 struct bsca_block *sca = vcpu->kvm->arch.sca;
131 union bsca_sigp_ctrl *sigp_ctrl =
132 &(sca->cpu[vcpu->vcpu_id].sigp_ctrl);
133 union bsca_sigp_ctrl old = *sigp_ctrl;
134
135 expect = old.value;
136 rc = cmpxchg(&sigp_ctrl->value, old.value, 0);
137 }
5e044315 138 read_unlock(&vcpu->kvm->arch.sca_lock);
7d43bafc 139 WARN_ON(rc != expect); /* cannot clear? */
a5bd7647
ED
140}
141
3c038e6b 142int psw_extint_disabled(struct kvm_vcpu *vcpu)
ba5c1e9b
CO
143{
144 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
145}
146
d8346b7d
CH
147static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
148{
149 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
150}
151
48a3e950
CH
152static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
153{
154 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
155}
156
ba5c1e9b
CO
157static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
158{
fee0e0fd
DH
159 return psw_extint_disabled(vcpu) &&
160 psw_ioint_disabled(vcpu) &&
161 psw_mchk_disabled(vcpu);
ba5c1e9b
CO
162}
163
bb78c5ec
DH
164static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
165{
166 if (psw_extint_disabled(vcpu) ||
b9224cd7 167 !(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
bb78c5ec 168 return 0;
f71d0dc5
DH
169 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
170 /* No timer interrupts when single stepping */
171 return 0;
bb78c5ec
DH
172 return 1;
173}
174
b4aec925
DH
175static int ckc_irq_pending(struct kvm_vcpu *vcpu)
176{
5fe01793
DH
177 const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
178 const u64 ckc = vcpu->arch.sie_block->ckc;
179
b9224cd7 180 if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
5fe01793
DH
181 if ((s64)ckc >= (s64)now)
182 return 0;
183 } else if (ckc >= now) {
b4aec925 184 return 0;
5fe01793 185 }
b4aec925
DH
186 return ckc_interrupts_enabled(vcpu);
187}
188
189static int cpu_timer_interrupts_enabled(struct kvm_vcpu *vcpu)
190{
191 return !psw_extint_disabled(vcpu) &&
b9224cd7 192 (vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK);
b4aec925
DH
193}
194
195static int cpu_timer_irq_pending(struct kvm_vcpu *vcpu)
196{
4287f247
DH
197 if (!cpu_timer_interrupts_enabled(vcpu))
198 return 0;
199 return kvm_s390_get_cpu_timer(vcpu) >> 63;
b4aec925
DH
200}
201
6d3da241
JF
202static uint64_t isc_to_isc_bits(int isc)
203{
79fd50c6
CH
204 return (0x80 >> isc) << 24;
205}
206
2496c8e7
MM
207static inline u32 isc_to_int_word(u8 isc)
208{
209 return ((u32)isc << 27) | 0x80000000;
210}
211
6d3da241 212static inline u8 int_word_to_isc(u32 int_word)
ba5c1e9b 213{
6d3da241
JF
214 return (int_word & 0x38000000) >> 27;
215}
216
d77e6414
MM
217/*
218 * To use atomic bitmap functions, we have to provide a bitmap address
219 * that is u64 aligned. However, the ipm might be u32 aligned.
220 * Therefore, we logically start the bitmap at the very beginning of the
221 * struct and fixup the bit number.
222 */
223#define IPM_BIT_OFFSET (offsetof(struct kvm_s390_gisa, ipm) * BITS_PER_BYTE)
224
bb2fb8cd 225static inline void gisa_set_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
d77e6414
MM
226{
227 set_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
228}
229
bb2fb8cd 230static inline u8 gisa_get_ipm(struct kvm_s390_gisa *gisa)
d77e6414
MM
231{
232 return READ_ONCE(gisa->ipm);
233}
234
bb2fb8cd 235static inline void gisa_clear_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
d77e6414
MM
236{
237 clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
238}
239
bb2fb8cd 240static inline int gisa_tac_ipm_gisc(struct kvm_s390_gisa *gisa, u32 gisc)
d77e6414
MM
241{
242 return test_and_clear_bit_inv(IPM_BIT_OFFSET + gisc, (unsigned long *) gisa);
243}
244
8846f317 245static inline unsigned long pending_irqs_no_gisa(struct kvm_vcpu *vcpu)
6d3da241 246{
5f94c58e 247 return vcpu->kvm->arch.float_int.pending_irqs |
8846f317
CB
248 vcpu->arch.local_int.pending_irqs;
249}
250
251static inline unsigned long pending_irqs(struct kvm_vcpu *vcpu)
252{
253 return pending_irqs_no_gisa(vcpu) |
982cff42
MM
254 gisa_get_ipm(vcpu->kvm->arch.gisa_int.origin) <<
255 IRQ_PEND_IO_ISC_7;
383d0b05
JF
256}
257
ee739f4b
MM
258static inline int isc_to_irq_type(unsigned long isc)
259{
c7901a6e 260 return IRQ_PEND_IO_ISC_0 - isc;
ee739f4b
MM
261}
262
263static inline int irq_type_to_isc(unsigned long irq_type)
264{
c7901a6e 265 return IRQ_PEND_IO_ISC_0 - irq_type;
ee739f4b
MM
266}
267
6d3da241
JF
268static unsigned long disable_iscs(struct kvm_vcpu *vcpu,
269 unsigned long active_mask)
270{
271 int i;
272
273 for (i = 0; i <= MAX_ISC; i++)
274 if (!(vcpu->arch.sie_block->gcr[6] & isc_to_isc_bits(i)))
ee739f4b 275 active_mask &= ~(1UL << (isc_to_irq_type(i)));
6d3da241
JF
276
277 return active_mask;
278}
279
280static unsigned long deliverable_irqs(struct kvm_vcpu *vcpu)
383d0b05 281{
6d3da241
JF
282 unsigned long active_mask;
283
5f94c58e 284 active_mask = pending_irqs(vcpu);
ffeca0ae
JF
285 if (!active_mask)
286 return 0;
383d0b05
JF
287
288 if (psw_extint_disabled(vcpu))
289 active_mask &= ~IRQ_PEND_EXT_MASK;
6d3da241
JF
290 if (psw_ioint_disabled(vcpu))
291 active_mask &= ~IRQ_PEND_IO_MASK;
292 else
293 active_mask = disable_iscs(vcpu, active_mask);
b9224cd7 294 if (!(vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
383d0b05 295 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
b9224cd7 296 if (!(vcpu->arch.sie_block->gcr[0] & CR0_EMERGENCY_SIGNAL_SUBMASK))
383d0b05 297 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
b9224cd7 298 if (!(vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SUBMASK))
383d0b05 299 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
b9224cd7 300 if (!(vcpu->arch.sie_block->gcr[0] & CR0_CPU_TIMER_SUBMASK))
383d0b05 301 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
b9224cd7 302 if (!(vcpu->arch.sie_block->gcr[0] & CR0_SERVICE_SIGNAL_SUBMASK))
6d3da241 303 __clear_bit(IRQ_PEND_EXT_SERVICE, &active_mask);
383d0b05
JF
304 if (psw_mchk_disabled(vcpu))
305 active_mask &= ~IRQ_PEND_MCHK_MASK;
4d62fcc0
QH
306 /*
307 * Check both floating and local interrupt's cr14 because
308 * bit IRQ_PEND_MCHK_REP could be set in both cases.
309 */
6d3da241 310 if (!(vcpu->arch.sie_block->gcr[14] &
4d62fcc0
QH
311 (vcpu->kvm->arch.float_int.mchk.cr14 |
312 vcpu->arch.local_int.irq.mchk.cr14)))
6d3da241 313 __clear_bit(IRQ_PEND_MCHK_REP, &active_mask);
383d0b05 314
6cddd432
DH
315 /*
316 * STOP irqs will never be actively delivered. They are triggered via
317 * intercept requests and cleared when the stop intercept is performed.
318 */
319 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
320
383d0b05
JF
321 return active_mask;
322}
323
ba5c1e9b
CO
324static void __set_cpu_idle(struct kvm_vcpu *vcpu)
325{
ef8f4f49 326 kvm_s390_set_cpuflags(vcpu, CPUSTAT_WAIT);
246b7218 327 set_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
ba5c1e9b
CO
328}
329
330static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
331{
9daecfc6 332 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_WAIT);
246b7218 333 clear_bit(vcpu->vcpu_id, vcpu->kvm->arch.idle_mask);
ba5c1e9b
CO
334}
335
336static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
337{
9daecfc6
DH
338 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_IO_INT | CPUSTAT_EXT_INT |
339 CPUSTAT_STOP_INT);
ba5c1e9b 340 vcpu->arch.sie_block->lctl = 0x0000;
27291e21
DH
341 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
342
343 if (guestdbg_enabled(vcpu)) {
344 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
345 LCTL_CR10 | LCTL_CR11);
346 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
347 }
ba5c1e9b
CO
348}
349
6d3da241
JF
350static void set_intercept_indicators_io(struct kvm_vcpu *vcpu)
351{
8846f317 352 if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_IO_MASK))
6d3da241 353 return;
b7d45571 354 if (psw_ioint_disabled(vcpu))
2018224d 355 kvm_s390_set_cpuflags(vcpu, CPUSTAT_IO_INT);
6d3da241
JF
356 else
357 vcpu->arch.sie_block->lctl |= LCTL_CR6;
358}
359
383d0b05
JF
360static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
361{
96723d32 362 if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_EXT_MASK))
383d0b05
JF
363 return;
364 if (psw_extint_disabled(vcpu))
2018224d 365 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
383d0b05
JF
366 else
367 vcpu->arch.sie_block->lctl |= LCTL_CR0;
368}
369
370static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
371{
96723d32 372 if (!(pending_irqs_no_gisa(vcpu) & IRQ_PEND_MCHK_MASK))
383d0b05
JF
373 return;
374 if (psw_mchk_disabled(vcpu))
375 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
376 else
377 vcpu->arch.sie_block->lctl |= LCTL_CR14;
378}
379
6cddd432
DH
380static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
381{
382 if (kvm_s390_is_stop_irq_pending(vcpu))
2018224d 383 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
6cddd432
DH
384}
385
6d3da241
JF
386/* Set interception request for non-deliverable interrupts */
387static void set_intercept_indicators(struct kvm_vcpu *vcpu)
383d0b05 388{
6d3da241 389 set_intercept_indicators_io(vcpu);
383d0b05
JF
390 set_intercept_indicators_ext(vcpu);
391 set_intercept_indicators_mchk(vcpu);
6cddd432 392 set_intercept_indicators_stop(vcpu);
383d0b05
JF
393}
394
0fb97abe
JF
395static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
396{
383d0b05 397 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
398 int rc;
399
ccc40c53 400 vcpu->stat.deliver_cputm++;
0fb97abe
JF
401 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
402 0, 0);
403
404 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
405 (u16 *)__LC_EXT_INT_CODE);
467fc298 406 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
407 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
408 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
409 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
410 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 411 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
99e20009 412 return rc ? -EFAULT : 0;
0fb97abe
JF
413}
414
415static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
416{
383d0b05 417 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
418 int rc;
419
ccc40c53 420 vcpu->stat.deliver_ckc++;
0fb97abe
JF
421 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
422 0, 0);
423
424 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
425 (u16 __user *)__LC_EXT_INT_CODE);
467fc298 426 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
427 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
428 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
429 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
430 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 431 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
99e20009 432 return rc ? -EFAULT : 0;
0fb97abe
JF
433}
434
383d0b05 435static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
0fb97abe 436{
383d0b05
JF
437 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
438 struct kvm_s390_ext_info ext;
0fb97abe
JF
439 int rc;
440
383d0b05
JF
441 spin_lock(&li->lock);
442 ext = li->irq.ext;
443 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
444 li->irq.ext.ext_params2 = 0;
445 spin_unlock(&li->lock);
446
3f24ba15
CB
447 VCPU_EVENT(vcpu, 4, "deliver: pfault init token 0x%llx",
448 ext.ext_params2);
0fb97abe
JF
449 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
450 KVM_S390_INT_PFAULT_INIT,
383d0b05 451 0, ext.ext_params2);
0fb97abe
JF
452
453 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
454 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
455 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
456 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
457 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
458 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 459 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
99e20009 460 return rc ? -EFAULT : 0;
0fb97abe
JF
461}
462
d6404ded
DH
463static int __write_machine_check(struct kvm_vcpu *vcpu,
464 struct kvm_s390_mchk_info *mchk)
465{
466 unsigned long ext_sa_addr;
4e0b1ab7 467 unsigned long lc;
0319dae6 468 freg_t fprs[NUM_FPRS];
ff5dc149 469 union mci mci;
d6404ded
DH
470 int rc;
471
ff5dc149 472 mci.val = mchk->mcic;
31d8b8d4 473 /* take care of lazy register loading */
0319dae6
DH
474 save_fpu_regs();
475 save_access_regs(vcpu->run->s.regs.acrs);
80248559
CB
476 if (MACHINE_HAS_GS && vcpu->arch.gs_enabled)
477 save_gs_cb(current->thread.gs_cb);
0319dae6 478
d6404ded 479 /* Extended save area */
916cda1a
MS
480 rc = read_guest_lc(vcpu, __LC_MCESAD, &ext_sa_addr,
481 sizeof(unsigned long));
4e0b1ab7
FZ
482 /* Only bits 0 through 63-LC are used for address formation */
483 lc = ext_sa_addr & MCESA_LC_MASK;
484 if (test_kvm_facility(vcpu->kvm, 133)) {
485 switch (lc) {
486 case 0:
487 case 10:
488 ext_sa_addr &= ~0x3ffUL;
489 break;
490 case 11:
491 ext_sa_addr &= ~0x7ffUL;
492 break;
493 case 12:
494 ext_sa_addr &= ~0xfffUL;
495 break;
496 default:
497 ext_sa_addr = 0;
498 break;
499 }
500 } else {
501 ext_sa_addr &= ~0x3ffUL;
502 }
503
ff5dc149
DH
504 if (!rc && mci.vr && ext_sa_addr && test_kvm_facility(vcpu->kvm, 129)) {
505 if (write_guest_abs(vcpu, ext_sa_addr, vcpu->run->s.regs.vrs,
506 512))
507 mci.vr = 0;
508 } else {
509 mci.vr = 0;
510 }
4e0b1ab7
FZ
511 if (!rc && mci.gs && ext_sa_addr && test_kvm_facility(vcpu->kvm, 133)
512 && (lc == 11 || lc == 12)) {
513 if (write_guest_abs(vcpu, ext_sa_addr + 1024,
514 &vcpu->run->s.regs.gscb, 32))
515 mci.gs = 0;
516 } else {
517 mci.gs = 0;
518 }
d6404ded
DH
519
520 /* General interruption information */
0319dae6 521 rc |= put_guest_lc(vcpu, 1, (u8 __user *) __LC_AR_MODE_ID);
d6404ded
DH
522 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
523 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
524 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
525 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
ff5dc149 526 rc |= put_guest_lc(vcpu, mci.val, (u64 __user *) __LC_MCCK_CODE);
d6404ded
DH
527
528 /* Register-save areas */
0319dae6
DH
529 if (MACHINE_HAS_VX) {
530 convert_vx_to_fp(fprs, (__vector128 *) vcpu->run->s.regs.vrs);
531 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA, fprs, 128);
532 } else {
533 rc |= write_guest_lc(vcpu, __LC_FPREGS_SAVE_AREA,
534 vcpu->run->s.regs.fprs, 128);
535 }
536 rc |= write_guest_lc(vcpu, __LC_GPREGS_SAVE_AREA,
537 vcpu->run->s.regs.gprs, 128);
538 rc |= put_guest_lc(vcpu, current->thread.fpu.fpc,
539 (u32 __user *) __LC_FP_CREG_SAVE_AREA);
540 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->todpr,
541 (u32 __user *) __LC_TOD_PROGREG_SAVE_AREA);
542 rc |= put_guest_lc(vcpu, kvm_s390_get_cpu_timer(vcpu),
543 (u64 __user *) __LC_CPU_TIMER_SAVE_AREA);
544 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->ckc >> 8,
545 (u64 __user *) __LC_CLOCK_COMP_SAVE_AREA);
546 rc |= write_guest_lc(vcpu, __LC_AREGS_SAVE_AREA,
547 &vcpu->run->s.regs.acrs, 64);
548 rc |= write_guest_lc(vcpu, __LC_CREGS_SAVE_AREA,
549 &vcpu->arch.sie_block->gcr, 128);
d6404ded
DH
550
551 /* Extended interruption information */
8953fb08
DH
552 rc |= put_guest_lc(vcpu, mchk->ext_damage_code,
553 (u32 __user *) __LC_EXT_DAMAGE_CODE);
d6404ded
DH
554 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
555 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
556 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA, &mchk->fixed_logout,
557 sizeof(mchk->fixed_logout));
558 return rc ? -EFAULT : 0;
559}
560
383d0b05 561static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
0fb97abe 562{
6d3da241 563 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
383d0b05 564 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
6d3da241 565 struct kvm_s390_mchk_info mchk = {};
6d3da241
JF
566 int deliver = 0;
567 int rc = 0;
0fb97abe 568
6d3da241 569 spin_lock(&fi->lock);
383d0b05 570 spin_lock(&li->lock);
6d3da241
JF
571 if (test_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs) ||
572 test_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs)) {
573 /*
574 * If there was an exigent machine check pending, then any
575 * repressible machine checks that might have been pending
576 * are indicated along with it, so always clear bits for
577 * repressible and exigent interrupts
578 */
579 mchk = li->irq.mchk;
580 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
581 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
582 memset(&li->irq.mchk, 0, sizeof(mchk));
583 deliver = 1;
584 }
383d0b05 585 /*
6d3da241
JF
586 * We indicate floating repressible conditions along with
587 * other pending conditions. Channel Report Pending and Channel
588 * Subsystem damage are the only two and and are indicated by
589 * bits in mcic and masked in cr14.
383d0b05 590 */
6d3da241
JF
591 if (test_and_clear_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
592 mchk.mcic |= fi->mchk.mcic;
593 mchk.cr14 |= fi->mchk.cr14;
594 memset(&fi->mchk, 0, sizeof(mchk));
595 deliver = 1;
596 }
383d0b05 597 spin_unlock(&li->lock);
6d3da241 598 spin_unlock(&fi->lock);
383d0b05 599
6d3da241 600 if (deliver) {
3f24ba15 601 VCPU_EVENT(vcpu, 3, "deliver: machine check mcic 0x%llx",
6d3da241
JF
602 mchk.mcic);
603 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
604 KVM_S390_MCHK,
605 mchk.cr14, mchk.mcic);
32de0749 606 vcpu->stat.deliver_machine_check++;
d6404ded 607 rc = __write_machine_check(vcpu, &mchk);
6d3da241 608 }
d6404ded 609 return rc;
0fb97abe
JF
610}
611
612static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
613{
383d0b05 614 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
615 int rc;
616
3f24ba15 617 VCPU_EVENT(vcpu, 3, "%s", "deliver: cpu restart");
0fb97abe
JF
618 vcpu->stat.deliver_restart_signal++;
619 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
620
621 rc = write_guest_lc(vcpu,
c667aeac 622 offsetof(struct lowcore, restart_old_psw),
0fb97abe 623 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
c667aeac 624 rc |= read_guest_lc(vcpu, offsetof(struct lowcore, restart_psw),
0fb97abe 625 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 626 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
99e20009 627 return rc ? -EFAULT : 0;
0fb97abe
JF
628}
629
383d0b05 630static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
0fb97abe 631{
383d0b05
JF
632 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
633 struct kvm_s390_prefix_info prefix;
634
635 spin_lock(&li->lock);
636 prefix = li->irq.prefix;
637 li->irq.prefix.address = 0;
638 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
639 spin_unlock(&li->lock);
0fb97abe 640
0fb97abe
JF
641 vcpu->stat.deliver_prefix_signal++;
642 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
643 KVM_S390_SIGP_SET_PREFIX,
383d0b05 644 prefix.address, 0);
0fb97abe 645
383d0b05 646 kvm_s390_set_prefix(vcpu, prefix.address);
0fb97abe
JF
647 return 0;
648}
649
383d0b05 650static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
0fb97abe 651{
383d0b05 652 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe 653 int rc;
383d0b05
JF
654 int cpu_addr;
655
656 spin_lock(&li->lock);
657 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
658 clear_bit(cpu_addr, li->sigp_emerg_pending);
659 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
660 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
661 spin_unlock(&li->lock);
0fb97abe 662
3f24ba15 663 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp emerg");
0fb97abe 664 vcpu->stat.deliver_emergency_signal++;
383d0b05
JF
665 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
666 cpu_addr, 0);
0fb97abe
JF
667
668 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
669 (u16 *)__LC_EXT_INT_CODE);
383d0b05 670 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
671 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
672 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
673 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
674 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 675 return rc ? -EFAULT : 0;
0fb97abe
JF
676}
677
383d0b05 678static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
0fb97abe 679{
383d0b05
JF
680 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
681 struct kvm_s390_extcall_info extcall;
0fb97abe
JF
682 int rc;
683
383d0b05
JF
684 spin_lock(&li->lock);
685 extcall = li->irq.extcall;
686 li->irq.extcall.code = 0;
687 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
688 spin_unlock(&li->lock);
689
3f24ba15 690 VCPU_EVENT(vcpu, 4, "%s", "deliver: sigp ext call");
0fb97abe
JF
691 vcpu->stat.deliver_external_call++;
692 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
693 KVM_S390_INT_EXTERNAL_CALL,
383d0b05 694 extcall.code, 0);
0fb97abe
JF
695
696 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
697 (u16 *)__LC_EXT_INT_CODE);
383d0b05 698 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
699 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
700 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
701 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
702 sizeof(psw_t));
99e20009 703 return rc ? -EFAULT : 0;
0fb97abe
JF
704}
705
383d0b05 706static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
8712836b 707{
383d0b05
JF
708 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
709 struct kvm_s390_pgm_info pgm_info;
a9a846fd 710 int rc = 0, nullifying = false;
634790b8 711 u16 ilen;
8712836b 712
383d0b05
JF
713 spin_lock(&li->lock);
714 pgm_info = li->irq.pgm;
715 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
716 memset(&li->irq.pgm, 0, sizeof(pgm_info));
717 spin_unlock(&li->lock);
718
634790b8 719 ilen = pgm_info.flags & KVM_S390_PGM_FLAGS_ILC_MASK;
0e8bc06a
DH
720 VCPU_EVENT(vcpu, 3, "deliver: program irq code 0x%x, ilen:%d",
721 pgm_info.code, ilen);
ccc40c53 722 vcpu->stat.deliver_program++;
0fb97abe 723 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
383d0b05 724 pgm_info.code, 0);
0fb97abe 725
383d0b05 726 switch (pgm_info.code & ~PGM_PER) {
8712836b
DH
727 case PGM_AFX_TRANSLATION:
728 case PGM_ASX_TRANSLATION:
729 case PGM_EX_TRANSLATION:
730 case PGM_LFX_TRANSLATION:
731 case PGM_LSTE_SEQUENCE:
732 case PGM_LSX_TRANSLATION:
733 case PGM_LX_TRANSLATION:
734 case PGM_PRIMARY_AUTHORITY:
735 case PGM_SECONDARY_AUTHORITY:
a9a846fd
TH
736 nullifying = true;
737 /* fall through */
8712836b 738 case PGM_SPACE_SWITCH:
383d0b05 739 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b
DH
740 (u64 *)__LC_TRANS_EXC_CODE);
741 break;
742 case PGM_ALEN_TRANSLATION:
743 case PGM_ALE_SEQUENCE:
744 case PGM_ASTE_INSTANCE:
745 case PGM_ASTE_SEQUENCE:
746 case PGM_ASTE_VALIDITY:
747 case PGM_EXTENDED_AUTHORITY:
383d0b05 748 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 749 (u8 *)__LC_EXC_ACCESS_ID);
a9a846fd 750 nullifying = true;
8712836b
DH
751 break;
752 case PGM_ASCE_TYPE:
753 case PGM_PAGE_TRANSLATION:
754 case PGM_REGION_FIRST_TRANS:
755 case PGM_REGION_SECOND_TRANS:
756 case PGM_REGION_THIRD_TRANS:
757 case PGM_SEGMENT_TRANSLATION:
383d0b05 758 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 759 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 760 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 761 (u8 *)__LC_EXC_ACCESS_ID);
383d0b05 762 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
8712836b 763 (u8 *)__LC_OP_ACCESS_ID);
a9a846fd 764 nullifying = true;
8712836b
DH
765 break;
766 case PGM_MONITOR:
383d0b05 767 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
a36c5393 768 (u16 *)__LC_MON_CLASS_NR);
383d0b05 769 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
8712836b
DH
770 (u64 *)__LC_MON_CODE);
771 break;
403c8648 772 case PGM_VECTOR_PROCESSING:
8712836b 773 case PGM_DATA:
383d0b05 774 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
8712836b
DH
775 (u32 *)__LC_DATA_EXC_CODE);
776 break;
777 case PGM_PROTECTION:
383d0b05 778 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 779 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 780 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b
DH
781 (u8 *)__LC_EXC_ACCESS_ID);
782 break;
a9a846fd
TH
783 case PGM_STACK_FULL:
784 case PGM_STACK_EMPTY:
785 case PGM_STACK_SPECIFICATION:
786 case PGM_STACK_TYPE:
787 case PGM_STACK_OPERATION:
788 case PGM_TRACE_TABEL:
789 case PGM_CRYPTO_OPERATION:
790 nullifying = true;
791 break;
8712836b
DH
792 }
793
383d0b05
JF
794 if (pgm_info.code & PGM_PER) {
795 rc |= put_guest_lc(vcpu, pgm_info.per_code,
8712836b 796 (u8 *) __LC_PER_CODE);
383d0b05 797 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
8712836b 798 (u8 *)__LC_PER_ATMID);
383d0b05 799 rc |= put_guest_lc(vcpu, pgm_info.per_address,
8712836b 800 (u64 *) __LC_PER_ADDRESS);
383d0b05 801 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
8712836b
DH
802 (u8 *) __LC_PER_ACCESS_ID);
803 }
804
eaa4f416 805 if (nullifying && !(pgm_info.flags & KVM_S390_PGM_FLAGS_NO_REWIND))
0e8bc06a 806 kvm_s390_rewind_psw(vcpu, ilen);
a9a846fd 807
0e8bc06a
DH
808 /* bit 1+2 of the target are the ilc, so we can directly use ilen */
809 rc |= put_guest_lc(vcpu, ilen, (u16 *) __LC_PGM_ILC);
2ba45968
DH
810 rc |= put_guest_lc(vcpu, vcpu->arch.sie_block->gbea,
811 (u64 *) __LC_LAST_BREAK);
383d0b05 812 rc |= put_guest_lc(vcpu, pgm_info.code,
8712836b
DH
813 (u16 *)__LC_PGM_INT_CODE);
814 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
815 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
816 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
817 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 818 return rc ? -EFAULT : 0;
0fb97abe
JF
819}
820
6d3da241 821static int __must_check __deliver_service(struct kvm_vcpu *vcpu)
0fb97abe 822{
6d3da241
JF
823 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
824 struct kvm_s390_ext_info ext;
825 int rc = 0;
826
827 spin_lock(&fi->lock);
828 if (!(test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs))) {
829 spin_unlock(&fi->lock);
830 return 0;
831 }
832 ext = fi->srv_signal;
833 memset(&fi->srv_signal, 0, sizeof(ext));
834 clear_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
835 spin_unlock(&fi->lock);
0fb97abe 836
3f24ba15 837 VCPU_EVENT(vcpu, 4, "deliver: sclp parameter 0x%x",
6d3da241 838 ext.ext_params);
0fb97abe 839 vcpu->stat.deliver_service_signal++;
6d3da241
JF
840 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_SERVICE,
841 ext.ext_params, 0);
0fb97abe
JF
842
843 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
467fc298 844 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
845 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
846 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
847 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
848 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
6d3da241 849 rc |= put_guest_lc(vcpu, ext.ext_params,
0fb97abe 850 (u32 *)__LC_EXT_PARAMS);
6d3da241 851
99e20009 852 return rc ? -EFAULT : 0;
0fb97abe
JF
853}
854
6d3da241 855static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu)
0fb97abe 856{
6d3da241
JF
857 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
858 struct kvm_s390_interrupt_info *inti;
859 int rc = 0;
0fb97abe 860
6d3da241
JF
861 spin_lock(&fi->lock);
862 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_PFAULT],
863 struct kvm_s390_interrupt_info,
864 list);
865 if (inti) {
6d3da241
JF
866 list_del(&inti->list);
867 fi->counters[FIRQ_CNTR_PFAULT] -= 1;
868 }
869 if (list_empty(&fi->lists[FIRQ_LIST_PFAULT]))
870 clear_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
871 spin_unlock(&fi->lock);
8712836b 872
6d3da241 873 if (inti) {
3f24ba15
CB
874 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
875 KVM_S390_INT_PFAULT_DONE, 0,
876 inti->ext.ext_params2);
877 VCPU_EVENT(vcpu, 4, "deliver: pfault done token 0x%llx",
878 inti->ext.ext_params2);
879
6d3da241
JF
880 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
881 (u16 *)__LC_EXT_INT_CODE);
882 rc |= put_guest_lc(vcpu, PFAULT_DONE,
883 (u16 *)__LC_EXT_CPU_ADDR);
884 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
885 &vcpu->arch.sie_block->gpsw,
886 sizeof(psw_t));
887 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
888 &vcpu->arch.sie_block->gpsw,
889 sizeof(psw_t));
890 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
891 (u64 *)__LC_EXT_PARAMS2);
892 kfree(inti);
893 }
99e20009 894 return rc ? -EFAULT : 0;
0fb97abe
JF
895}
896
6d3da241 897static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu)
0fb97abe 898{
6d3da241
JF
899 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
900 struct kvm_s390_interrupt_info *inti;
901 int rc = 0;
0fb97abe 902
6d3da241
JF
903 spin_lock(&fi->lock);
904 inti = list_first_entry_or_null(&fi->lists[FIRQ_LIST_VIRTIO],
905 struct kvm_s390_interrupt_info,
906 list);
907 if (inti) {
908 VCPU_EVENT(vcpu, 4,
3f24ba15 909 "deliver: virtio parm: 0x%x,parm64: 0x%llx",
6d3da241 910 inti->ext.ext_params, inti->ext.ext_params2);
ccc40c53 911 vcpu->stat.deliver_virtio++;
6d3da241
JF
912 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
913 inti->type,
914 inti->ext.ext_params,
915 inti->ext.ext_params2);
916 list_del(&inti->list);
917 fi->counters[FIRQ_CNTR_VIRTIO] -= 1;
918 }
919 if (list_empty(&fi->lists[FIRQ_LIST_VIRTIO]))
920 clear_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
921 spin_unlock(&fi->lock);
0fb97abe 922
6d3da241
JF
923 if (inti) {
924 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE,
925 (u16 *)__LC_EXT_INT_CODE);
926 rc |= put_guest_lc(vcpu, VIRTIO_PARAM,
927 (u16 *)__LC_EXT_CPU_ADDR);
928 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
929 &vcpu->arch.sie_block->gpsw,
930 sizeof(psw_t));
931 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
932 &vcpu->arch.sie_block->gpsw,
933 sizeof(psw_t));
934 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
935 (u32 *)__LC_EXT_PARAMS);
936 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
937 (u64 *)__LC_EXT_PARAMS2);
938 kfree(inti);
939 }
99e20009 940 return rc ? -EFAULT : 0;
0fb97abe
JF
941}
942
d7c5cb01
MM
943static int __do_deliver_io(struct kvm_vcpu *vcpu, struct kvm_s390_io_info *io)
944{
945 int rc;
946
947 rc = put_guest_lc(vcpu, io->subchannel_id, (u16 *)__LC_SUBCHANNEL_ID);
948 rc |= put_guest_lc(vcpu, io->subchannel_nr, (u16 *)__LC_SUBCHANNEL_NR);
949 rc |= put_guest_lc(vcpu, io->io_int_parm, (u32 *)__LC_IO_INT_PARM);
950 rc |= put_guest_lc(vcpu, io->io_int_word, (u32 *)__LC_IO_INT_WORD);
951 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
952 &vcpu->arch.sie_block->gpsw,
953 sizeof(psw_t));
954 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
955 &vcpu->arch.sie_block->gpsw,
956 sizeof(psw_t));
957 return rc ? -EFAULT : 0;
958}
959
0fb97abe 960static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
6d3da241 961 unsigned long irq_type)
0fb97abe 962{
6d3da241
JF
963 struct list_head *isc_list;
964 struct kvm_s390_float_interrupt *fi;
982cff42 965 struct kvm_s390_gisa_interrupt *gi = &vcpu->kvm->arch.gisa_int;
6d3da241 966 struct kvm_s390_interrupt_info *inti = NULL;
d7c5cb01
MM
967 struct kvm_s390_io_info io;
968 u32 isc;
6d3da241 969 int rc = 0;
0fb97abe 970
6d3da241 971 fi = &vcpu->kvm->arch.float_int;
8712836b 972
6d3da241 973 spin_lock(&fi->lock);
d7c5cb01
MM
974 isc = irq_type_to_isc(irq_type);
975 isc_list = &fi->lists[isc];
6d3da241
JF
976 inti = list_first_entry_or_null(isc_list,
977 struct kvm_s390_interrupt_info,
978 list);
979 if (inti) {
dcc98ea6
CB
980 if (inti->type & KVM_S390_INT_IO_AI_MASK)
981 VCPU_EVENT(vcpu, 4, "%s", "deliver: I/O (AI)");
982 else
983 VCPU_EVENT(vcpu, 4, "deliver: I/O %x ss %x schid %04x",
984 inti->io.subchannel_id >> 8,
985 inti->io.subchannel_id >> 1 & 0x3,
986 inti->io.subchannel_nr);
987
ccc40c53 988 vcpu->stat.deliver_io++;
6d3da241
JF
989 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
990 inti->type,
991 ((__u32)inti->io.subchannel_id << 16) |
992 inti->io.subchannel_nr,
993 ((__u64)inti->io.io_int_parm << 32) |
994 inti->io.io_int_word);
995 list_del(&inti->list);
996 fi->counters[FIRQ_CNTR_IO] -= 1;
997 }
998 if (list_empty(isc_list))
999 clear_bit(irq_type, &fi->pending_irqs);
1000 spin_unlock(&fi->lock);
1001
1002 if (inti) {
d7c5cb01 1003 rc = __do_deliver_io(vcpu, &(inti->io));
6d3da241 1004 kfree(inti);
d7c5cb01 1005 goto out;
6d3da241 1006 }
383d0b05 1007
982cff42 1008 if (gi->origin && gisa_tac_ipm_gisc(gi->origin, isc)) {
d7c5cb01
MM
1009 /*
1010 * in case an adapter interrupt was not delivered
1011 * in SIE context KVM will handle the delivery
1012 */
1013 VCPU_EVENT(vcpu, 4, "%s isc %u", "deliver: I/O (AI/gisa)", isc);
1014 memset(&io, 0, sizeof(io));
2496c8e7 1015 io.io_int_word = isc_to_int_word(isc);
ccc40c53 1016 vcpu->stat.deliver_io++;
d7c5cb01
MM
1017 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
1018 KVM_S390_INT_IO(1, 0, 0, 0),
1019 ((__u32)io.subchannel_id << 16) |
1020 io.subchannel_nr,
1021 ((__u64)io.io_int_parm << 32) |
1022 io.io_int_word);
1023 rc = __do_deliver_io(vcpu, &io);
1024 }
1025out:
1026 return rc;
383d0b05
JF
1027}
1028
ea5f4969
DH
1029/* Check whether an external call is pending (deliverable or not) */
1030int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
4953919f 1031{
ea5f4969 1032 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
4953919f 1033
37c5f6c8 1034 if (!sclp.has_sigpif)
ea5f4969 1035 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
4953919f 1036
a5bd7647 1037 return sca_ext_call_pending(vcpu, NULL);
4953919f
DH
1038}
1039
9a022067 1040int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
ba5c1e9b 1041{
4d32ad6b
DH
1042 if (deliverable_irqs(vcpu))
1043 return 1;
ba5c1e9b 1044
4d32ad6b
DH
1045 if (kvm_cpu_has_pending_timer(vcpu))
1046 return 1;
ba5c1e9b 1047
ea5f4969 1048 /* external call pending and deliverable */
4d32ad6b 1049 if (kvm_s390_ext_call_pending(vcpu) &&
ea5f4969 1050 !psw_extint_disabled(vcpu) &&
b9224cd7 1051 (vcpu->arch.sie_block->gcr[0] & CR0_EXTERNAL_CALL_SUBMASK))
4d32ad6b 1052 return 1;
4953919f 1053
4d32ad6b
DH
1054 if (!exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
1055 return 1;
1056 return 0;
ba5c1e9b
CO
1057}
1058
3d80840d
MT
1059int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
1060{
b4aec925 1061 return ckc_irq_pending(vcpu) || cpu_timer_irq_pending(vcpu);
3d80840d
MT
1062}
1063
b3c17f10
DH
1064static u64 __calculate_sltime(struct kvm_vcpu *vcpu)
1065{
5fe01793
DH
1066 const u64 now = kvm_s390_get_tod_clock_fast(vcpu->kvm);
1067 const u64 ckc = vcpu->arch.sie_block->ckc;
1068 u64 cputm, sltime = 0;
b3c17f10
DH
1069
1070 if (ckc_interrupts_enabled(vcpu)) {
b9224cd7 1071 if (vcpu->arch.sie_block->gcr[0] & CR0_CLOCK_COMPARATOR_SIGN) {
5fe01793
DH
1072 if ((s64)now < (s64)ckc)
1073 sltime = tod_to_ns((s64)ckc - (s64)now);
1074 } else if (now < ckc) {
1075 sltime = tod_to_ns(ckc - now);
1076 }
1077 /* already expired */
1078 if (!sltime)
b3c17f10
DH
1079 return 0;
1080 if (cpu_timer_interrupts_enabled(vcpu)) {
1081 cputm = kvm_s390_get_cpu_timer(vcpu);
1082 /* already expired? */
1083 if (cputm >> 63)
1084 return 0;
1085 return min(sltime, tod_to_ns(cputm));
1086 }
1087 } else if (cpu_timer_interrupts_enabled(vcpu)) {
1088 sltime = kvm_s390_get_cpu_timer(vcpu);
1089 /* already expired? */
1090 if (sltime >> 63)
1091 return 0;
1092 }
1093 return sltime;
1094}
1095
ba5c1e9b
CO
1096int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
1097{
b3c17f10 1098 u64 sltime;
ba5c1e9b
CO
1099
1100 vcpu->stat.exit_wait_state++;
ba5c1e9b 1101
0759d068 1102 /* fast path */
118b862b 1103 if (kvm_arch_vcpu_runnable(vcpu))
0759d068 1104 return 0;
e52b2af5 1105
ba5c1e9b
CO
1106 if (psw_interrupts_disabled(vcpu)) {
1107 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
b8e660b8 1108 return -EOPNOTSUPP; /* disabled wait */
ba5c1e9b
CO
1109 }
1110
b3c17f10
DH
1111 if (!ckc_interrupts_enabled(vcpu) &&
1112 !cpu_timer_interrupts_enabled(vcpu)) {
ba5c1e9b 1113 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
bda343ef 1114 __set_cpu_idle(vcpu);
ba5c1e9b
CO
1115 goto no_timer;
1116 }
1117
b3c17f10
DH
1118 sltime = __calculate_sltime(vcpu);
1119 if (!sltime)
bda343ef
DH
1120 return 0;
1121
1122 __set_cpu_idle(vcpu);
8b0e1953 1123 hrtimer_start(&vcpu->arch.ckc_timer, sltime, HRTIMER_MODE_REL);
b3c17f10 1124 VCPU_EVENT(vcpu, 4, "enabled wait: %llu ns", sltime);
ba5c1e9b 1125no_timer:
800c1065 1126 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
0759d068 1127 kvm_vcpu_block(vcpu);
ba5c1e9b 1128 __unset_cpu_idle(vcpu);
800c1065
TH
1129 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1130
2d00f759 1131 hrtimer_cancel(&vcpu->arch.ckc_timer);
ba5c1e9b
CO
1132 return 0;
1133}
1134
0e9c85a5
DH
1135void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
1136{
3491caf2
CB
1137 /*
1138 * We cannot move this into the if, as the CPU might be already
1139 * in kvm_vcpu_block without having the waitqueue set (polling)
1140 */
1141 vcpu->valid_wakeup = true;
72e1ad42
CB
1142 /*
1143 * This is mostly to document, that the read in swait_active could
1144 * be moved before other stores, leading to subtle races.
1145 * All current users do not store or use an atomic like update
1146 */
1147 smp_mb__after_atomic();
8577370f 1148 if (swait_active(&vcpu->wq)) {
0e9c85a5
DH
1149 /*
1150 * The vcpu gave up the cpu voluntarily, mark it as a good
1151 * yield-candidate.
1152 */
1153 vcpu->preempted = true;
b3dae109 1154 swake_up_one(&vcpu->wq);
ce2e4f0b 1155 vcpu->stat.halt_wakeup++;
0e9c85a5 1156 }
adbf1698
DH
1157 /*
1158 * The VCPU might not be sleeping but is executing the VSIE. Let's
1159 * kick it, so it leaves the SIE to process the request.
1160 */
1161 kvm_s390_vsie_kick(vcpu);
0e9c85a5
DH
1162}
1163
ca872302
CB
1164enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
1165{
1166 struct kvm_vcpu *vcpu;
b3c17f10 1167 u64 sltime;
ca872302
CB
1168
1169 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
b3c17f10 1170 sltime = __calculate_sltime(vcpu);
ca872302 1171
2d00f759
DH
1172 /*
1173 * If the monotonic clock runs faster than the tod clock we might be
1174 * woken up too early and have to go back to sleep to avoid deadlocks.
1175 */
b3c17f10 1176 if (sltime && hrtimer_forward_now(timer, ns_to_ktime(sltime)))
2d00f759
DH
1177 return HRTIMER_RESTART;
1178 kvm_s390_vcpu_wakeup(vcpu);
ca872302
CB
1179 return HRTIMER_NORESTART;
1180}
ba5c1e9b 1181
2ed10cc1
JF
1182void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
1183{
1184 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2ed10cc1 1185
4ae3c081 1186 spin_lock(&li->lock);
383d0b05
JF
1187 li->pending_irqs = 0;
1188 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
1189 memset(&li->irq, 0, sizeof(li->irq));
4ae3c081 1190 spin_unlock(&li->lock);
4953919f 1191
a5bd7647 1192 sca_clear_ext_call(vcpu);
2ed10cc1
JF
1193}
1194
614aeab4 1195int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
ba5c1e9b 1196{
180c12fb 1197 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
79395031 1198 int rc = 0;
383d0b05 1199 unsigned long irq_type;
6d3da241 1200 unsigned long irqs;
ba5c1e9b
CO
1201
1202 __reset_intercept_indicators(vcpu);
ba5c1e9b 1203
383d0b05
JF
1204 /* pending ckc conditions might have been invalidated */
1205 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
b4aec925 1206 if (ckc_irq_pending(vcpu))
383d0b05
JF
1207 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
1208
b4aec925
DH
1209 /* pending cpu timer conditions might have been invalidated */
1210 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1211 if (cpu_timer_irq_pending(vcpu))
1212 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
1213
ffeca0ae 1214 while ((irqs = deliverable_irqs(vcpu)) && !rc) {
c7901a6e
MM
1215 /* bits are in the reverse order of interrupt priority */
1216 irq_type = find_last_bit(&irqs, IRQ_PEND_COUNT);
baabee67
DH
1217 switch (irq_type) {
1218 case IRQ_PEND_IO_ISC_0:
1219 case IRQ_PEND_IO_ISC_1:
1220 case IRQ_PEND_IO_ISC_2:
1221 case IRQ_PEND_IO_ISC_3:
1222 case IRQ_PEND_IO_ISC_4:
1223 case IRQ_PEND_IO_ISC_5:
1224 case IRQ_PEND_IO_ISC_6:
1225 case IRQ_PEND_IO_ISC_7:
6d3da241 1226 rc = __deliver_io(vcpu, irq_type);
baabee67
DH
1227 break;
1228 case IRQ_PEND_MCHK_EX:
1229 case IRQ_PEND_MCHK_REP:
1230 rc = __deliver_machine_check(vcpu);
1231 break;
1232 case IRQ_PEND_PROG:
1233 rc = __deliver_prog(vcpu);
1234 break;
1235 case IRQ_PEND_EXT_EMERGENCY:
1236 rc = __deliver_emergency_signal(vcpu);
1237 break;
1238 case IRQ_PEND_EXT_EXTERNAL:
1239 rc = __deliver_external_call(vcpu);
1240 break;
1241 case IRQ_PEND_EXT_CLOCK_COMP:
1242 rc = __deliver_ckc(vcpu);
1243 break;
1244 case IRQ_PEND_EXT_CPU_TIMER:
1245 rc = __deliver_cpu_timer(vcpu);
1246 break;
1247 case IRQ_PEND_RESTART:
1248 rc = __deliver_restart(vcpu);
1249 break;
1250 case IRQ_PEND_SET_PREFIX:
1251 rc = __deliver_set_prefix(vcpu);
1252 break;
1253 case IRQ_PEND_PFAULT_INIT:
1254 rc = __deliver_pfault_init(vcpu);
1255 break;
1256 case IRQ_PEND_EXT_SERVICE:
1257 rc = __deliver_service(vcpu);
1258 break;
1259 case IRQ_PEND_PFAULT_DONE:
1260 rc = __deliver_pfault_done(vcpu);
1261 break;
1262 case IRQ_PEND_VIRTIO:
1263 rc = __deliver_virtio(vcpu);
1264 break;
1265 default:
1266 WARN_ONCE(1, "Unknown pending irq type %ld", irq_type);
1267 clear_bit(irq_type, &li->pending_irqs);
383d0b05 1268 }
ffeca0ae 1269 }
383d0b05 1270
6d3da241 1271 set_intercept_indicators(vcpu);
79395031
JF
1272
1273 return rc;
ba5c1e9b
CO
1274}
1275
383d0b05 1276static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1277{
1278 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1279
ccc40c53 1280 vcpu->stat.inject_program++;
ed2afcfa
DH
1281 VCPU_EVENT(vcpu, 3, "inject: program irq code 0x%x", irq->u.pgm.code);
1282 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1283 irq->u.pgm.code, 0);
1284
634790b8
DH
1285 if (!(irq->u.pgm.flags & KVM_S390_PGM_FLAGS_ILC_VALID)) {
1286 /* auto detection if no valid ILC was given */
1287 irq->u.pgm.flags &= ~KVM_S390_PGM_FLAGS_ILC_MASK;
1288 irq->u.pgm.flags |= kvm_s390_get_ilen(vcpu);
1289 irq->u.pgm.flags |= KVM_S390_PGM_FLAGS_ILC_VALID;
1290 }
1291
238293b1
DH
1292 if (irq->u.pgm.code == PGM_PER) {
1293 li->irq.pgm.code |= PGM_PER;
634790b8 1294 li->irq.pgm.flags = irq->u.pgm.flags;
238293b1
DH
1295 /* only modify PER related information */
1296 li->irq.pgm.per_address = irq->u.pgm.per_address;
1297 li->irq.pgm.per_code = irq->u.pgm.per_code;
1298 li->irq.pgm.per_atmid = irq->u.pgm.per_atmid;
1299 li->irq.pgm.per_access_id = irq->u.pgm.per_access_id;
1300 } else if (!(irq->u.pgm.code & PGM_PER)) {
1301 li->irq.pgm.code = (li->irq.pgm.code & PGM_PER) |
1302 irq->u.pgm.code;
634790b8 1303 li->irq.pgm.flags = irq->u.pgm.flags;
238293b1
DH
1304 /* only modify non-PER information */
1305 li->irq.pgm.trans_exc_code = irq->u.pgm.trans_exc_code;
1306 li->irq.pgm.mon_code = irq->u.pgm.mon_code;
1307 li->irq.pgm.data_exc_code = irq->u.pgm.data_exc_code;
1308 li->irq.pgm.mon_class_nr = irq->u.pgm.mon_class_nr;
1309 li->irq.pgm.exc_access_id = irq->u.pgm.exc_access_id;
1310 li->irq.pgm.op_access_id = irq->u.pgm.op_access_id;
1311 } else {
1312 li->irq.pgm = irq->u.pgm;
1313 }
9185124e 1314 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
0146a7b0
JF
1315 return 0;
1316}
1317
383d0b05 1318static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1319{
1320 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1321
ccc40c53 1322 vcpu->stat.inject_pfault_init++;
3f24ba15
CB
1323 VCPU_EVENT(vcpu, 4, "inject: pfault init parameter block at 0x%llx",
1324 irq->u.ext.ext_params2);
383d0b05
JF
1325 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1326 irq->u.ext.ext_params,
ed2afcfa 1327 irq->u.ext.ext_params2);
383d0b05
JF
1328
1329 li->irq.ext = irq->u.ext;
1330 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
2018224d 1331 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
0146a7b0
JF
1332 return 0;
1333}
1334
0675d92d 1335static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1336{
1337 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1338 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
ea5f4969 1339 uint16_t src_id = irq->u.extcall.code;
0146a7b0 1340
ccc40c53 1341 vcpu->stat.inject_external_call++;
3f24ba15 1342 VCPU_EVENT(vcpu, 4, "inject: external call source-cpu:%u",
ea5f4969 1343 src_id);
383d0b05 1344 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
ed2afcfa 1345 src_id, 0);
ea5f4969
DH
1346
1347 /* sending vcpu invalid */
152e9f65 1348 if (kvm_get_vcpu_by_id(vcpu->kvm, src_id) == NULL)
ea5f4969
DH
1349 return -EINVAL;
1350
37c5f6c8 1351 if (sclp.has_sigpif)
a5bd7647 1352 return sca_inject_ext_call(vcpu, src_id);
383d0b05 1353
b938eace 1354 if (test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
ea5f4969 1355 return -EBUSY;
383d0b05 1356 *extcall = irq->u.extcall;
2018224d 1357 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
0146a7b0
JF
1358 return 0;
1359}
1360
383d0b05 1361static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1362{
1363 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1364 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
0146a7b0 1365
ccc40c53 1366 vcpu->stat.inject_set_prefix++;
ed2afcfa 1367 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x",
556cc0da 1368 irq->u.prefix.address);
383d0b05 1369 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
ed2afcfa 1370 irq->u.prefix.address, 0);
383d0b05 1371
a3a9c59a
DH
1372 if (!is_vcpu_stopped(vcpu))
1373 return -EBUSY;
1374
383d0b05
JF
1375 *prefix = irq->u.prefix;
1376 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
0146a7b0
JF
1377 return 0;
1378}
1379
6cddd432 1380#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
383d0b05 1381static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1382{
1383 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2822545f 1384 struct kvm_s390_stop_info *stop = &li->irq.stop;
6cddd432 1385 int rc = 0;
0146a7b0 1386
ccc40c53 1387 vcpu->stat.inject_stop_signal++;
ed2afcfa 1388 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0);
383d0b05 1389
2822545f
DH
1390 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1391 return -EINVAL;
1392
6cddd432
DH
1393 if (is_vcpu_stopped(vcpu)) {
1394 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1395 rc = kvm_s390_store_status_unloaded(vcpu,
1396 KVM_S390_STORE_STATUS_NOADDR);
1397 return rc;
1398 }
1399
1400 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1401 return -EBUSY;
2822545f 1402 stop->flags = irq->u.stop.flags;
2018224d 1403 kvm_s390_set_cpuflags(vcpu, CPUSTAT_STOP_INT);
0146a7b0
JF
1404 return 0;
1405}
1406
1407static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
383d0b05 1408 struct kvm_s390_irq *irq)
0146a7b0
JF
1409{
1410 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1411
ccc40c53 1412 vcpu->stat.inject_restart++;
3f24ba15 1413 VCPU_EVENT(vcpu, 3, "%s", "inject: restart int");
ed2afcfa 1414 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
383d0b05
JF
1415
1416 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
0146a7b0
JF
1417 return 0;
1418}
1419
1420static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
383d0b05 1421 struct kvm_s390_irq *irq)
0146a7b0
JF
1422{
1423 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1424
ccc40c53 1425 vcpu->stat.inject_emergency_signal++;
3f24ba15 1426 VCPU_EVENT(vcpu, 4, "inject: emergency from cpu %u",
383d0b05
JF
1427 irq->u.emerg.code);
1428 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
ed2afcfa 1429 irq->u.emerg.code, 0);
383d0b05 1430
b85de33a
DH
1431 /* sending vcpu invalid */
1432 if (kvm_get_vcpu_by_id(vcpu->kvm, irq->u.emerg.code) == NULL)
1433 return -EINVAL;
1434
49538d12 1435 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
383d0b05 1436 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
2018224d 1437 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
0146a7b0
JF
1438 return 0;
1439}
1440
383d0b05 1441static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1442{
1443 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1444 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
0146a7b0 1445
ccc40c53 1446 vcpu->stat.inject_mchk++;
3f24ba15 1447 VCPU_EVENT(vcpu, 3, "inject: machine check mcic 0x%llx",
556cc0da 1448 irq->u.mchk.mcic);
383d0b05 1449 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
ed2afcfa 1450 irq->u.mchk.mcic);
383d0b05
JF
1451
1452 /*
fc2020cf
JF
1453 * Because repressible machine checks can be indicated along with
1454 * exigent machine checks (PoP, Chapter 11, Interruption action)
1455 * we need to combine cr14, mcic and external damage code.
1456 * Failing storage address and the logout area should not be or'ed
1457 * together, we just indicate the last occurrence of the corresponding
1458 * machine check
383d0b05 1459 */
fc2020cf 1460 mchk->cr14 |= irq->u.mchk.cr14;
383d0b05 1461 mchk->mcic |= irq->u.mchk.mcic;
fc2020cf
JF
1462 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1463 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1464 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1465 sizeof(mchk->fixed_logout));
383d0b05
JF
1466 if (mchk->mcic & MCHK_EX_MASK)
1467 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1468 else if (mchk->mcic & MCHK_REP_MASK)
1469 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
0146a7b0
JF
1470 return 0;
1471}
1472
383d0b05 1473static int __inject_ckc(struct kvm_vcpu *vcpu)
0146a7b0
JF
1474{
1475 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1476
ccc40c53 1477 vcpu->stat.inject_ckc++;
3f24ba15 1478 VCPU_EVENT(vcpu, 3, "%s", "inject: clock comparator external");
383d0b05 1479 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
ed2afcfa 1480 0, 0);
383d0b05
JF
1481
1482 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
2018224d 1483 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
0146a7b0
JF
1484 return 0;
1485}
1486
383d0b05 1487static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0146a7b0
JF
1488{
1489 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1490
ccc40c53 1491 vcpu->stat.inject_cputm++;
3f24ba15 1492 VCPU_EVENT(vcpu, 3, "%s", "inject: cpu timer external");
383d0b05 1493 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
ed2afcfa 1494 0, 0);
383d0b05
JF
1495
1496 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
2018224d 1497 kvm_s390_set_cpuflags(vcpu, CPUSTAT_EXT_INT);
ba5c1e9b
CO
1498 return 0;
1499}
1500
6d3da241
JF
1501static struct kvm_s390_interrupt_info *get_io_int(struct kvm *kvm,
1502 int isc, u32 schid)
1503{
1504 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1505 struct list_head *isc_list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1506 struct kvm_s390_interrupt_info *iter;
1507 u16 id = (schid & 0xffff0000U) >> 16;
1508 u16 nr = schid & 0x0000ffffU;
1509
1510 spin_lock(&fi->lock);
1511 list_for_each_entry(iter, isc_list, list) {
1512 if (schid && (id != iter->io.subchannel_id ||
1513 nr != iter->io.subchannel_nr))
1514 continue;
1515 /* found an appropriate entry */
1516 list_del_init(&iter->list);
1517 fi->counters[FIRQ_CNTR_IO] -= 1;
1518 if (list_empty(isc_list))
ee739f4b 1519 clear_bit(isc_to_irq_type(isc), &fi->pending_irqs);
6d3da241
JF
1520 spin_unlock(&fi->lock);
1521 return iter;
1522 }
1523 spin_unlock(&fi->lock);
1524 return NULL;
1525}
383d0b05 1526
4b35f65e
MM
1527static struct kvm_s390_interrupt_info *get_top_io_int(struct kvm *kvm,
1528 u64 isc_mask, u32 schid)
1529{
1530 struct kvm_s390_interrupt_info *inti = NULL;
1531 int isc;
1532
1533 for (isc = 0; isc <= MAX_ISC && !inti; isc++) {
1534 if (isc_mask & isc_to_isc_bits(isc))
1535 inti = get_io_int(kvm, isc, schid);
1536 }
1537 return inti;
1538}
1539
1540static int get_top_gisa_isc(struct kvm *kvm, u64 isc_mask, u32 schid)
1541{
982cff42 1542 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
4b35f65e
MM
1543 unsigned long active_mask;
1544 int isc;
1545
1546 if (schid)
1547 goto out;
982cff42 1548 if (!gi->origin)
4b35f65e
MM
1549 goto out;
1550
982cff42 1551 active_mask = (isc_mask & gisa_get_ipm(gi->origin) << 24) << 32;
4b35f65e
MM
1552 while (active_mask) {
1553 isc = __fls(active_mask) ^ (BITS_PER_LONG - 1);
982cff42 1554 if (gisa_tac_ipm_gisc(gi->origin, isc))
4b35f65e
MM
1555 return isc;
1556 clear_bit_inv(isc, &active_mask);
1557 }
1558out:
1559 return -EINVAL;
1560}
1561
6d3da241
JF
1562/*
1563 * Dequeue and return an I/O interrupt matching any of the interruption
1564 * subclasses as designated by the isc mask in cr6 and the schid (if != 0).
4b35f65e
MM
1565 * Take into account the interrupts pending in the interrupt list and in GISA.
1566 *
1567 * Note that for a guest that does not enable I/O interrupts
1568 * but relies on TPI, a flood of classic interrupts may starve
1569 * out adapter interrupts on the same isc. Linux does not do
1570 * that, and it is possible to work around the issue by configuring
1571 * different iscs for classic and adapter interrupts in the guest,
1572 * but we may want to revisit this in the future.
6d3da241 1573 */
fa6b7fe9 1574struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
6d3da241
JF
1575 u64 isc_mask, u32 schid)
1576{
982cff42 1577 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
4b35f65e 1578 struct kvm_s390_interrupt_info *inti, *tmp_inti;
6d3da241
JF
1579 int isc;
1580
4b35f65e
MM
1581 inti = get_top_io_int(kvm, isc_mask, schid);
1582
1583 isc = get_top_gisa_isc(kvm, isc_mask, schid);
1584 if (isc < 0)
1585 /* no AI in GISA */
1586 goto out;
1587
1588 if (!inti)
1589 /* AI in GISA but no classical IO int */
1590 goto gisa_out;
1591
1592 /* both types of interrupts present */
1593 if (int_word_to_isc(inti->io.io_int_word) <= isc) {
1594 /* classical IO int with higher priority */
982cff42 1595 gisa_set_ipm_gisc(gi->origin, isc);
4b35f65e 1596 goto out;
6d3da241 1597 }
4b35f65e
MM
1598gisa_out:
1599 tmp_inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1600 if (tmp_inti) {
1601 tmp_inti->type = KVM_S390_INT_IO(1, 0, 0, 0);
1602 tmp_inti->io.io_int_word = isc_to_int_word(isc);
1603 if (inti)
1604 kvm_s390_reinject_io_int(kvm, inti);
1605 inti = tmp_inti;
1606 } else
982cff42 1607 gisa_set_ipm_gisc(gi->origin, isc);
4b35f65e 1608out:
6d3da241
JF
1609 return inti;
1610}
1611
1612#define SCCB_MASK 0xFFFFFFF8
1613#define SCCB_EVENT_PENDING 0x3
1614
1615static int __inject_service(struct kvm *kvm,
1616 struct kvm_s390_interrupt_info *inti)
1617{
1618 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1619
ccc40c53 1620 kvm->stat.inject_service_signal++;
6d3da241
JF
1621 spin_lock(&fi->lock);
1622 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_EVENT_PENDING;
1623 /*
1624 * Early versions of the QEMU s390 bios will inject several
1625 * service interrupts after another without handling a
1626 * condition code indicating busy.
1627 * We will silently ignore those superfluous sccb values.
1628 * A future version of QEMU will take care of serialization
1629 * of servc requests
1630 */
1631 if (fi->srv_signal.ext_params & SCCB_MASK)
1632 goto out;
1633 fi->srv_signal.ext_params |= inti->ext.ext_params & SCCB_MASK;
1634 set_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs);
1635out:
1636 spin_unlock(&fi->lock);
1637 kfree(inti);
1638 return 0;
1639}
1640
1641static int __inject_virtio(struct kvm *kvm,
1642 struct kvm_s390_interrupt_info *inti)
1643{
1644 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1645
ccc40c53 1646 kvm->stat.inject_virtio++;
6d3da241
JF
1647 spin_lock(&fi->lock);
1648 if (fi->counters[FIRQ_CNTR_VIRTIO] >= KVM_S390_MAX_VIRTIO_IRQS) {
1649 spin_unlock(&fi->lock);
1650 return -EBUSY;
1651 }
1652 fi->counters[FIRQ_CNTR_VIRTIO] += 1;
1653 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_VIRTIO]);
1654 set_bit(IRQ_PEND_VIRTIO, &fi->pending_irqs);
1655 spin_unlock(&fi->lock);
1656 return 0;
1657}
1658
1659static int __inject_pfault_done(struct kvm *kvm,
1660 struct kvm_s390_interrupt_info *inti)
1661{
1662 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1663
ccc40c53 1664 kvm->stat.inject_pfault_done++;
6d3da241
JF
1665 spin_lock(&fi->lock);
1666 if (fi->counters[FIRQ_CNTR_PFAULT] >=
1667 (ASYNC_PF_PER_VCPU * KVM_MAX_VCPUS)) {
1668 spin_unlock(&fi->lock);
1669 return -EBUSY;
1670 }
1671 fi->counters[FIRQ_CNTR_PFAULT] += 1;
1672 list_add_tail(&inti->list, &fi->lists[FIRQ_LIST_PFAULT]);
1673 set_bit(IRQ_PEND_PFAULT_DONE, &fi->pending_irqs);
1674 spin_unlock(&fi->lock);
1675 return 0;
1676}
1677
1678#define CR_PENDING_SUBCLASS 28
1679static int __inject_float_mchk(struct kvm *kvm,
1680 struct kvm_s390_interrupt_info *inti)
1681{
1682 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1683
ccc40c53 1684 kvm->stat.inject_float_mchk++;
6d3da241
JF
1685 spin_lock(&fi->lock);
1686 fi->mchk.cr14 |= inti->mchk.cr14 & (1UL << CR_PENDING_SUBCLASS);
1687 fi->mchk.mcic |= inti->mchk.mcic;
1688 set_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs);
1689 spin_unlock(&fi->lock);
1690 kfree(inti);
1691 return 0;
1692}
1693
1694static int __inject_io(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
fa6b7fe9 1695{
982cff42 1696 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
fa6b7fe9 1697 struct kvm_s390_float_interrupt *fi;
6d3da241
JF
1698 struct list_head *list;
1699 int isc;
fa6b7fe9 1700
ccc40c53 1701 kvm->stat.inject_io++;
d7c5cb01
MM
1702 isc = int_word_to_isc(inti->io.io_int_word);
1703
982cff42 1704 if (gi->origin && inti->type & KVM_S390_INT_IO_AI_MASK) {
d7c5cb01 1705 VM_EVENT(kvm, 4, "%s isc %1u", "inject: I/O (AI/gisa)", isc);
982cff42 1706 gisa_set_ipm_gisc(gi->origin, isc);
d7c5cb01
MM
1707 kfree(inti);
1708 return 0;
1709 }
1710
fa6b7fe9
CH
1711 fi = &kvm->arch.float_int;
1712 spin_lock(&fi->lock);
6d3da241
JF
1713 if (fi->counters[FIRQ_CNTR_IO] >= KVM_S390_MAX_FLOAT_IRQS) {
1714 spin_unlock(&fi->lock);
1715 return -EBUSY;
a91b8ebe 1716 }
6d3da241
JF
1717 fi->counters[FIRQ_CNTR_IO] += 1;
1718
dcc98ea6
CB
1719 if (inti->type & KVM_S390_INT_IO_AI_MASK)
1720 VM_EVENT(kvm, 4, "%s", "inject: I/O (AI)");
1721 else
1722 VM_EVENT(kvm, 4, "inject: I/O %x ss %x schid %04x",
1723 inti->io.subchannel_id >> 8,
1724 inti->io.subchannel_id >> 1 & 0x3,
1725 inti->io.subchannel_nr);
6d3da241
JF
1726 list = &fi->lists[FIRQ_LIST_IO_ISC_0 + isc];
1727 list_add_tail(&inti->list, list);
ee739f4b 1728 set_bit(isc_to_irq_type(isc), &fi->pending_irqs);
fa6b7fe9 1729 spin_unlock(&fi->lock);
6d3da241 1730 return 0;
fa6b7fe9 1731}
ba5c1e9b 1732
96e0ed23
DH
1733/*
1734 * Find a destination VCPU for a floating irq and kick it.
1735 */
1736static void __floating_irq_kick(struct kvm *kvm, u64 type)
ba5c1e9b 1737{
96e0ed23
DH
1738 struct kvm_vcpu *dst_vcpu;
1739 int sigcpu, online_vcpus, nr_tries = 0;
1740
1741 online_vcpus = atomic_read(&kvm->online_vcpus);
1742 if (!online_vcpus)
1743 return;
1744
1745 /* find idle VCPUs first, then round robin */
246b7218 1746 sigcpu = find_first_bit(kvm->arch.idle_mask, online_vcpus);
96e0ed23
DH
1747 if (sigcpu == online_vcpus) {
1748 do {
246b7218
MM
1749 sigcpu = kvm->arch.float_int.next_rr_cpu++;
1750 kvm->arch.float_int.next_rr_cpu %= online_vcpus;
96e0ed23
DH
1751 /* avoid endless loops if all vcpus are stopped */
1752 if (nr_tries++ >= online_vcpus)
1753 return;
1754 } while (is_vcpu_stopped(kvm_get_vcpu(kvm, sigcpu)));
1755 }
1756 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1757
1758 /* make the VCPU drop out of the SIE, or wake it up if sleeping */
96e0ed23
DH
1759 switch (type) {
1760 case KVM_S390_MCHK:
2018224d 1761 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_STOP_INT);
96e0ed23
DH
1762 break;
1763 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
982cff42
MM
1764 if (!(type & KVM_S390_INT_IO_AI_MASK &&
1765 kvm->arch.gisa_int.origin))
a9810327 1766 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_IO_INT);
96e0ed23
DH
1767 break;
1768 default:
2018224d 1769 kvm_s390_set_cpuflags(dst_vcpu, CPUSTAT_EXT_INT);
96e0ed23
DH
1770 break;
1771 }
96e0ed23
DH
1772 kvm_s390_vcpu_wakeup(dst_vcpu);
1773}
1774
1775static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
1776{
6d3da241
JF
1777 u64 type = READ_ONCE(inti->type);
1778 int rc;
ba5c1e9b 1779
6d3da241
JF
1780 switch (type) {
1781 case KVM_S390_MCHK:
1782 rc = __inject_float_mchk(kvm, inti);
1783 break;
1784 case KVM_S390_INT_VIRTIO:
1785 rc = __inject_virtio(kvm, inti);
1786 break;
1787 case KVM_S390_INT_SERVICE:
1788 rc = __inject_service(kvm, inti);
1789 break;
1790 case KVM_S390_INT_PFAULT_DONE:
1791 rc = __inject_pfault_done(kvm, inti);
1792 break;
1793 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1794 rc = __inject_io(kvm, inti);
1795 break;
1796 default:
a91b8ebe 1797 rc = -EINVAL;
c05c4186 1798 }
6d3da241
JF
1799 if (rc)
1800 return rc;
1801
96e0ed23 1802 __floating_irq_kick(kvm, type);
6d3da241 1803 return 0;
c05c4186
JF
1804}
1805
1806int kvm_s390_inject_vm(struct kvm *kvm,
1807 struct kvm_s390_interrupt *s390int)
1808{
1809 struct kvm_s390_interrupt_info *inti;
428d53be 1810 int rc;
c05c4186 1811
ba5c1e9b
CO
1812 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1813 if (!inti)
1814 return -ENOMEM;
1815
c05c4186
JF
1816 inti->type = s390int->type;
1817 switch (inti->type) {
ba5c1e9b 1818 case KVM_S390_INT_VIRTIO:
33e19115 1819 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
ba5c1e9b 1820 s390int->parm, s390int->parm64);
ba5c1e9b
CO
1821 inti->ext.ext_params = s390int->parm;
1822 inti->ext.ext_params2 = s390int->parm64;
1823 break;
1824 case KVM_S390_INT_SERVICE:
3f24ba15 1825 VM_EVENT(kvm, 4, "inject: sclp parm:%x", s390int->parm);
ba5c1e9b
CO
1826 inti->ext.ext_params = s390int->parm;
1827 break;
3c038e6b 1828 case KVM_S390_INT_PFAULT_DONE:
3c038e6b
DD
1829 inti->ext.ext_params2 = s390int->parm64;
1830 break;
48a3e950 1831 case KVM_S390_MCHK:
3f24ba15 1832 VM_EVENT(kvm, 3, "inject: machine check mcic 0x%llx",
48a3e950 1833 s390int->parm64);
48a3e950
CH
1834 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1835 inti->mchk.mcic = s390int->parm64;
1836 break;
d8346b7d 1837 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
d8346b7d
CH
1838 inti->io.subchannel_id = s390int->parm >> 16;
1839 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1840 inti->io.io_int_parm = s390int->parm64 >> 32;
1841 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1842 break;
ba5c1e9b
CO
1843 default:
1844 kfree(inti);
1845 return -EINVAL;
1846 }
ade38c31
CH
1847 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1848 2);
ba5c1e9b 1849
428d53be
DH
1850 rc = __inject_vm(kvm, inti);
1851 if (rc)
1852 kfree(inti);
1853 return rc;
ba5c1e9b
CO
1854}
1855
15462e37 1856int kvm_s390_reinject_io_int(struct kvm *kvm,
2f32d4ea
CH
1857 struct kvm_s390_interrupt_info *inti)
1858{
15462e37 1859 return __inject_vm(kvm, inti);
2f32d4ea
CH
1860}
1861
383d0b05
JF
1862int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1863 struct kvm_s390_irq *irq)
1864{
1865 irq->type = s390int->type;
1866 switch (irq->type) {
1867 case KVM_S390_PROGRAM_INT:
1868 if (s390int->parm & 0xffff0000)
1869 return -EINVAL;
1870 irq->u.pgm.code = s390int->parm;
1871 break;
1872 case KVM_S390_SIGP_SET_PREFIX:
1873 irq->u.prefix.address = s390int->parm;
1874 break;
2822545f
DH
1875 case KVM_S390_SIGP_STOP:
1876 irq->u.stop.flags = s390int->parm;
1877 break;
383d0b05 1878 case KVM_S390_INT_EXTERNAL_CALL:
94d1f564 1879 if (s390int->parm & 0xffff0000)
383d0b05
JF
1880 return -EINVAL;
1881 irq->u.extcall.code = s390int->parm;
1882 break;
1883 case KVM_S390_INT_EMERGENCY:
94d1f564 1884 if (s390int->parm & 0xffff0000)
383d0b05
JF
1885 return -EINVAL;
1886 irq->u.emerg.code = s390int->parm;
1887 break;
1888 case KVM_S390_MCHK:
1889 irq->u.mchk.mcic = s390int->parm64;
1890 break;
1891 }
1892 return 0;
1893}
1894
6cddd432
DH
1895int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1896{
1897 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1898
1899 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1900}
1901
1902void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1903{
1904 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1905
1906 spin_lock(&li->lock);
1907 li->irq.stop.flags = 0;
1908 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1909 spin_unlock(&li->lock);
1910}
1911
79e87a10 1912static int do_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
ba5c1e9b 1913{
0146a7b0 1914 int rc;
ba5c1e9b 1915
383d0b05 1916 switch (irq->type) {
ba5c1e9b 1917 case KVM_S390_PROGRAM_INT:
383d0b05 1918 rc = __inject_prog(vcpu, irq);
ba5c1e9b 1919 break;
b7e6e4d3 1920 case KVM_S390_SIGP_SET_PREFIX:
383d0b05 1921 rc = __inject_set_prefix(vcpu, irq);
b7e6e4d3 1922 break;
ba5c1e9b 1923 case KVM_S390_SIGP_STOP:
383d0b05 1924 rc = __inject_sigp_stop(vcpu, irq);
0146a7b0 1925 break;
ba5c1e9b 1926 case KVM_S390_RESTART:
383d0b05 1927 rc = __inject_sigp_restart(vcpu, irq);
0146a7b0 1928 break;
e029ae5b 1929 case KVM_S390_INT_CLOCK_COMP:
383d0b05 1930 rc = __inject_ckc(vcpu);
0146a7b0 1931 break;
e029ae5b 1932 case KVM_S390_INT_CPU_TIMER:
383d0b05 1933 rc = __inject_cpu_timer(vcpu);
82a12737 1934 break;
7697e71f 1935 case KVM_S390_INT_EXTERNAL_CALL:
383d0b05 1936 rc = __inject_extcall(vcpu, irq);
82a12737 1937 break;
ba5c1e9b 1938 case KVM_S390_INT_EMERGENCY:
383d0b05 1939 rc = __inject_sigp_emergency(vcpu, irq);
ba5c1e9b 1940 break;
48a3e950 1941 case KVM_S390_MCHK:
383d0b05 1942 rc = __inject_mchk(vcpu, irq);
48a3e950 1943 break;
3c038e6b 1944 case KVM_S390_INT_PFAULT_INIT:
383d0b05 1945 rc = __inject_pfault_init(vcpu, irq);
3c038e6b 1946 break;
ba5c1e9b
CO
1947 case KVM_S390_INT_VIRTIO:
1948 case KVM_S390_INT_SERVICE:
d8346b7d 1949 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
ba5c1e9b 1950 default:
0146a7b0 1951 rc = -EINVAL;
ba5c1e9b 1952 }
79e87a10
JF
1953
1954 return rc;
1955}
1956
1957int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
1958{
1959 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1960 int rc;
1961
1962 spin_lock(&li->lock);
1963 rc = do_inject_vcpu(vcpu, irq);
4ae3c081 1964 spin_unlock(&li->lock);
0146a7b0
JF
1965 if (!rc)
1966 kvm_s390_vcpu_wakeup(vcpu);
0146a7b0 1967 return rc;
ba5c1e9b 1968}
c05c4186 1969
6d3da241 1970static inline void clear_irq_list(struct list_head *_list)
c05c4186 1971{
6d3da241 1972 struct kvm_s390_interrupt_info *inti, *n;
c05c4186 1973
6d3da241 1974 list_for_each_entry_safe(inti, n, _list, list) {
c05c4186
JF
1975 list_del(&inti->list);
1976 kfree(inti);
1977 }
c05c4186
JF
1978}
1979
94aa033e
JF
1980static void inti_to_irq(struct kvm_s390_interrupt_info *inti,
1981 struct kvm_s390_irq *irq)
c05c4186 1982{
94aa033e 1983 irq->type = inti->type;
c05c4186 1984 switch (inti->type) {
3c038e6b
DD
1985 case KVM_S390_INT_PFAULT_INIT:
1986 case KVM_S390_INT_PFAULT_DONE:
c05c4186 1987 case KVM_S390_INT_VIRTIO:
94aa033e 1988 irq->u.ext = inti->ext;
c05c4186
JF
1989 break;
1990 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
94aa033e 1991 irq->u.io = inti->io;
c05c4186 1992 break;
c05c4186 1993 }
c05c4186
JF
1994}
1995
6d3da241
JF
1996void kvm_s390_clear_float_irqs(struct kvm *kvm)
1997{
1998 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
1999 int i;
2000
2001 spin_lock(&fi->lock);
f2ae45ed
JF
2002 fi->pending_irqs = 0;
2003 memset(&fi->srv_signal, 0, sizeof(fi->srv_signal));
2004 memset(&fi->mchk, 0, sizeof(fi->mchk));
6d3da241
JF
2005 for (i = 0; i < FIRQ_LIST_COUNT; i++)
2006 clear_irq_list(&fi->lists[i]);
2007 for (i = 0; i < FIRQ_MAX_COUNT; i++)
2008 fi->counters[i] = 0;
2009 spin_unlock(&fi->lock);
24160af6 2010 kvm_s390_gisa_clear(kvm);
6d3da241
JF
2011};
2012
94aa033e 2013static int get_all_floating_irqs(struct kvm *kvm, u8 __user *usrbuf, u64 len)
c05c4186 2014{
982cff42 2015 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
c05c4186
JF
2016 struct kvm_s390_interrupt_info *inti;
2017 struct kvm_s390_float_interrupt *fi;
94aa033e 2018 struct kvm_s390_irq *buf;
6d3da241 2019 struct kvm_s390_irq *irq;
94aa033e 2020 int max_irqs;
c05c4186
JF
2021 int ret = 0;
2022 int n = 0;
6d3da241 2023 int i;
c05c4186 2024
94aa033e
JF
2025 if (len > KVM_S390_FLIC_MAX_BUFFER || len == 0)
2026 return -EINVAL;
2027
2028 /*
2029 * We are already using -ENOMEM to signal
2030 * userspace it may retry with a bigger buffer,
2031 * so we need to use something else for this case
2032 */
2033 buf = vzalloc(len);
2034 if (!buf)
2035 return -ENOBUFS;
2036
2037 max_irqs = len / sizeof(struct kvm_s390_irq);
2038
982cff42 2039 if (gi->origin && gisa_get_ipm(gi->origin)) {
24160af6
MM
2040 for (i = 0; i <= MAX_ISC; i++) {
2041 if (n == max_irqs) {
2042 /* signal userspace to try again */
2043 ret = -ENOMEM;
2044 goto out_nolock;
2045 }
982cff42 2046 if (gisa_tac_ipm_gisc(gi->origin, i)) {
24160af6
MM
2047 irq = (struct kvm_s390_irq *) &buf[n];
2048 irq->type = KVM_S390_INT_IO(1, 0, 0, 0);
2049 irq->u.io.io_int_word = isc_to_int_word(i);
2050 n++;
2051 }
2052 }
2053 }
c05c4186
JF
2054 fi = &kvm->arch.float_int;
2055 spin_lock(&fi->lock);
6d3da241
JF
2056 for (i = 0; i < FIRQ_LIST_COUNT; i++) {
2057 list_for_each_entry(inti, &fi->lists[i], list) {
2058 if (n == max_irqs) {
2059 /* signal userspace to try again */
2060 ret = -ENOMEM;
2061 goto out;
2062 }
2063 inti_to_irq(inti, &buf[n]);
2064 n++;
2065 }
2066 }
2067 if (test_bit(IRQ_PEND_EXT_SERVICE, &fi->pending_irqs)) {
94aa033e 2068 if (n == max_irqs) {
c05c4186
JF
2069 /* signal userspace to try again */
2070 ret = -ENOMEM;
6d3da241 2071 goto out;
c05c4186 2072 }
6d3da241
JF
2073 irq = (struct kvm_s390_irq *) &buf[n];
2074 irq->type = KVM_S390_INT_SERVICE;
2075 irq->u.ext = fi->srv_signal;
c05c4186
JF
2076 n++;
2077 }
6d3da241
JF
2078 if (test_bit(IRQ_PEND_MCHK_REP, &fi->pending_irqs)) {
2079 if (n == max_irqs) {
2080 /* signal userspace to try again */
2081 ret = -ENOMEM;
2082 goto out;
2083 }
2084 irq = (struct kvm_s390_irq *) &buf[n];
2085 irq->type = KVM_S390_MCHK;
2086 irq->u.mchk = fi->mchk;
2087 n++;
2088}
2089
2090out:
c05c4186 2091 spin_unlock(&fi->lock);
24160af6 2092out_nolock:
94aa033e
JF
2093 if (!ret && n > 0) {
2094 if (copy_to_user(usrbuf, buf, sizeof(struct kvm_s390_irq) * n))
2095 ret = -EFAULT;
2096 }
2097 vfree(buf);
c05c4186
JF
2098
2099 return ret < 0 ? ret : n;
2100}
2101
2c1a48f2
YMZ
2102static int flic_ais_mode_get_all(struct kvm *kvm, struct kvm_device_attr *attr)
2103{
2104 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2105 struct kvm_s390_ais_all ais;
2106
2107 if (attr->attr < sizeof(ais))
2108 return -EINVAL;
2109
2110 if (!test_kvm_facility(kvm, 72))
2111 return -ENOTSUPP;
2112
2113 mutex_lock(&fi->ais_lock);
2114 ais.simm = fi->simm;
2115 ais.nimm = fi->nimm;
2116 mutex_unlock(&fi->ais_lock);
2117
2118 if (copy_to_user((void __user *)attr->addr, &ais, sizeof(ais)))
2119 return -EFAULT;
2120
2121 return 0;
2122}
2123
c05c4186
JF
2124static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2125{
2126 int r;
2127
2128 switch (attr->group) {
2129 case KVM_DEV_FLIC_GET_ALL_IRQS:
94aa033e 2130 r = get_all_floating_irqs(dev->kvm, (u8 __user *) attr->addr,
c05c4186
JF
2131 attr->attr);
2132 break;
2c1a48f2
YMZ
2133 case KVM_DEV_FLIC_AISM_ALL:
2134 r = flic_ais_mode_get_all(dev->kvm, attr);
2135 break;
c05c4186
JF
2136 default:
2137 r = -EINVAL;
2138 }
2139
2140 return r;
2141}
2142
2143static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
2144 u64 addr)
2145{
2146 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
2147 void *target = NULL;
2148 void __user *source;
2149 u64 size;
2150
2151 if (get_user(inti->type, (u64 __user *)addr))
2152 return -EFAULT;
2153
2154 switch (inti->type) {
3c038e6b
DD
2155 case KVM_S390_INT_PFAULT_INIT:
2156 case KVM_S390_INT_PFAULT_DONE:
c05c4186
JF
2157 case KVM_S390_INT_VIRTIO:
2158 case KVM_S390_INT_SERVICE:
2159 target = (void *) &inti->ext;
2160 source = &uptr->u.ext;
2161 size = sizeof(inti->ext);
2162 break;
2163 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
2164 target = (void *) &inti->io;
2165 source = &uptr->u.io;
2166 size = sizeof(inti->io);
2167 break;
2168 case KVM_S390_MCHK:
2169 target = (void *) &inti->mchk;
2170 source = &uptr->u.mchk;
2171 size = sizeof(inti->mchk);
2172 break;
2173 default:
2174 return -EINVAL;
2175 }
2176
2177 if (copy_from_user(target, source, size))
2178 return -EFAULT;
2179
2180 return 0;
2181}
2182
2183static int enqueue_floating_irq(struct kvm_device *dev,
2184 struct kvm_device_attr *attr)
2185{
2186 struct kvm_s390_interrupt_info *inti = NULL;
2187 int r = 0;
2188 int len = attr->attr;
2189
2190 if (len % sizeof(struct kvm_s390_irq) != 0)
2191 return -EINVAL;
2192 else if (len > KVM_S390_FLIC_MAX_BUFFER)
2193 return -EINVAL;
2194
2195 while (len >= sizeof(struct kvm_s390_irq)) {
2196 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
2197 if (!inti)
2198 return -ENOMEM;
2199
2200 r = copy_irq_from_user(inti, attr->addr);
2201 if (r) {
2202 kfree(inti);
2203 return r;
2204 }
a91b8ebe
JF
2205 r = __inject_vm(dev->kvm, inti);
2206 if (r) {
2207 kfree(inti);
2208 return r;
2209 }
c05c4186
JF
2210 len -= sizeof(struct kvm_s390_irq);
2211 attr->addr += sizeof(struct kvm_s390_irq);
2212 }
2213
2214 return r;
2215}
2216
841b91c5
CH
2217static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
2218{
2219 if (id >= MAX_S390_IO_ADAPTERS)
2220 return NULL;
2221 return kvm->arch.adapters[id];
2222}
2223
2224static int register_io_adapter(struct kvm_device *dev,
2225 struct kvm_device_attr *attr)
2226{
2227 struct s390_io_adapter *adapter;
2228 struct kvm_s390_io_adapter adapter_info;
2229
2230 if (copy_from_user(&adapter_info,
2231 (void __user *)attr->addr, sizeof(adapter_info)))
2232 return -EFAULT;
2233
2234 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
2235 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
2236 return -EINVAL;
2237
2238 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
2239 if (!adapter)
2240 return -ENOMEM;
2241
2242 INIT_LIST_HEAD(&adapter->maps);
2243 init_rwsem(&adapter->maps_lock);
2244 atomic_set(&adapter->nr_maps, 0);
2245 adapter->id = adapter_info.id;
2246 adapter->isc = adapter_info.isc;
2247 adapter->maskable = adapter_info.maskable;
2248 adapter->masked = false;
2249 adapter->swap = adapter_info.swap;
08fab50d
FL
2250 adapter->suppressible = (adapter_info.flags) &
2251 KVM_S390_ADAPTER_SUPPRESSIBLE;
841b91c5
CH
2252 dev->kvm->arch.adapters[adapter->id] = adapter;
2253
2254 return 0;
2255}
2256
2257int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
2258{
2259 int ret;
2260 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2261
2262 if (!adapter || !adapter->maskable)
2263 return -EINVAL;
2264 ret = adapter->masked;
2265 adapter->masked = masked;
2266 return ret;
2267}
2268
2269static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
2270{
2271 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2272 struct s390_map_info *map;
2273 int ret;
2274
2275 if (!adapter || !addr)
2276 return -EINVAL;
2277
2278 map = kzalloc(sizeof(*map), GFP_KERNEL);
2279 if (!map) {
2280 ret = -ENOMEM;
2281 goto out;
2282 }
2283 INIT_LIST_HEAD(&map->list);
2284 map->guest_addr = addr;
6e0a0431 2285 map->addr = gmap_translate(kvm->arch.gmap, addr);
841b91c5
CH
2286 if (map->addr == -EFAULT) {
2287 ret = -EFAULT;
2288 goto out;
2289 }
2290 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
2291 if (ret < 0)
2292 goto out;
2293 BUG_ON(ret != 1);
2294 down_write(&adapter->maps_lock);
2295 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
2296 list_add_tail(&map->list, &adapter->maps);
2297 ret = 0;
2298 } else {
2299 put_page(map->page);
2300 ret = -EINVAL;
2301 }
2302 up_write(&adapter->maps_lock);
2303out:
2304 if (ret)
2305 kfree(map);
2306 return ret;
2307}
2308
2309static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
2310{
2311 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2312 struct s390_map_info *map, *tmp;
2313 int found = 0;
2314
2315 if (!adapter || !addr)
2316 return -EINVAL;
2317
2318 down_write(&adapter->maps_lock);
2319 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
2320 if (map->guest_addr == addr) {
2321 found = 1;
2322 atomic_dec(&adapter->nr_maps);
2323 list_del(&map->list);
2324 put_page(map->page);
2325 kfree(map);
2326 break;
2327 }
2328 }
2329 up_write(&adapter->maps_lock);
2330
2331 return found ? 0 : -EINVAL;
2332}
2333
2334void kvm_s390_destroy_adapters(struct kvm *kvm)
2335{
2336 int i;
2337 struct s390_map_info *map, *tmp;
2338
2339 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
2340 if (!kvm->arch.adapters[i])
2341 continue;
2342 list_for_each_entry_safe(map, tmp,
2343 &kvm->arch.adapters[i]->maps, list) {
2344 list_del(&map->list);
2345 put_page(map->page);
2346 kfree(map);
2347 }
2348 kfree(kvm->arch.adapters[i]);
2349 }
2350}
2351
2352static int modify_io_adapter(struct kvm_device *dev,
2353 struct kvm_device_attr *attr)
2354{
2355 struct kvm_s390_io_adapter_req req;
2356 struct s390_io_adapter *adapter;
2357 int ret;
2358
2359 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2360 return -EFAULT;
2361
2362 adapter = get_io_adapter(dev->kvm, req.id);
2363 if (!adapter)
2364 return -EINVAL;
2365 switch (req.type) {
2366 case KVM_S390_IO_ADAPTER_MASK:
2367 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
2368 if (ret > 0)
2369 ret = 0;
2370 break;
2371 case KVM_S390_IO_ADAPTER_MAP:
2372 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
2373 break;
2374 case KVM_S390_IO_ADAPTER_UNMAP:
2375 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
2376 break;
2377 default:
2378 ret = -EINVAL;
2379 }
2380
2381 return ret;
2382}
2383
6d28f789
HP
2384static int clear_io_irq(struct kvm *kvm, struct kvm_device_attr *attr)
2385
2386{
2387 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
2388 u32 schid;
2389
2390 if (attr->flags)
2391 return -EINVAL;
2392 if (attr->attr != sizeof(schid))
2393 return -EINVAL;
2394 if (copy_from_user(&schid, (void __user *) attr->addr, sizeof(schid)))
2395 return -EFAULT;
4dd6f17e
MM
2396 if (!schid)
2397 return -EINVAL;
6d28f789
HP
2398 kfree(kvm_s390_get_io_int(kvm, isc_mask, schid));
2399 /*
2400 * If userspace is conforming to the architecture, we can have at most
2401 * one pending I/O interrupt per subchannel, so this is effectively a
2402 * clear all.
2403 */
2404 return 0;
2405}
2406
51978393
FL
2407static int modify_ais_mode(struct kvm *kvm, struct kvm_device_attr *attr)
2408{
2409 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2410 struct kvm_s390_ais_req req;
2411 int ret = 0;
2412
1ba15b24 2413 if (!test_kvm_facility(kvm, 72))
51978393
FL
2414 return -ENOTSUPP;
2415
2416 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
2417 return -EFAULT;
2418
2419 if (req.isc > MAX_ISC)
2420 return -EINVAL;
2421
2422 trace_kvm_s390_modify_ais_mode(req.isc,
2423 (fi->simm & AIS_MODE_MASK(req.isc)) ?
2424 (fi->nimm & AIS_MODE_MASK(req.isc)) ?
2425 2 : KVM_S390_AIS_MODE_SINGLE :
2426 KVM_S390_AIS_MODE_ALL, req.mode);
2427
2428 mutex_lock(&fi->ais_lock);
2429 switch (req.mode) {
2430 case KVM_S390_AIS_MODE_ALL:
2431 fi->simm &= ~AIS_MODE_MASK(req.isc);
2432 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2433 break;
2434 case KVM_S390_AIS_MODE_SINGLE:
2435 fi->simm |= AIS_MODE_MASK(req.isc);
2436 fi->nimm &= ~AIS_MODE_MASK(req.isc);
2437 break;
2438 default:
2439 ret = -EINVAL;
2440 }
2441 mutex_unlock(&fi->ais_lock);
2442
2443 return ret;
2444}
2445
a8920950
YMZ
2446static int kvm_s390_inject_airq(struct kvm *kvm,
2447 struct s390_io_adapter *adapter)
2448{
2449 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2450 struct kvm_s390_interrupt s390int = {
2451 .type = KVM_S390_INT_IO(1, 0, 0, 0),
2452 .parm = 0,
2496c8e7 2453 .parm64 = isc_to_int_word(adapter->isc),
a8920950
YMZ
2454 };
2455 int ret = 0;
2456
1ba15b24 2457 if (!test_kvm_facility(kvm, 72) || !adapter->suppressible)
a8920950
YMZ
2458 return kvm_s390_inject_vm(kvm, &s390int);
2459
2460 mutex_lock(&fi->ais_lock);
2461 if (fi->nimm & AIS_MODE_MASK(adapter->isc)) {
2462 trace_kvm_s390_airq_suppressed(adapter->id, adapter->isc);
2463 goto out;
2464 }
2465
2466 ret = kvm_s390_inject_vm(kvm, &s390int);
2467 if (!ret && (fi->simm & AIS_MODE_MASK(adapter->isc))) {
2468 fi->nimm |= AIS_MODE_MASK(adapter->isc);
2469 trace_kvm_s390_modify_ais_mode(adapter->isc,
2470 KVM_S390_AIS_MODE_SINGLE, 2);
2471 }
2472out:
2473 mutex_unlock(&fi->ais_lock);
2474 return ret;
2475}
2476
2477static int flic_inject_airq(struct kvm *kvm, struct kvm_device_attr *attr)
2478{
2479 unsigned int id = attr->attr;
2480 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
2481
2482 if (!adapter)
2483 return -EINVAL;
2484
2485 return kvm_s390_inject_airq(kvm, adapter);
2486}
2487
2c1a48f2
YMZ
2488static int flic_ais_mode_set_all(struct kvm *kvm, struct kvm_device_attr *attr)
2489{
2490 struct kvm_s390_float_interrupt *fi = &kvm->arch.float_int;
2491 struct kvm_s390_ais_all ais;
2492
2493 if (!test_kvm_facility(kvm, 72))
2494 return -ENOTSUPP;
2495
2496 if (copy_from_user(&ais, (void __user *)attr->addr, sizeof(ais)))
2497 return -EFAULT;
2498
2499 mutex_lock(&fi->ais_lock);
2500 fi->simm = ais.simm;
2501 fi->nimm = ais.nimm;
2502 mutex_unlock(&fi->ais_lock);
2503
2504 return 0;
2505}
2506
c05c4186
JF
2507static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
2508{
2509 int r = 0;
3c038e6b
DD
2510 unsigned int i;
2511 struct kvm_vcpu *vcpu;
c05c4186
JF
2512
2513 switch (attr->group) {
2514 case KVM_DEV_FLIC_ENQUEUE:
2515 r = enqueue_floating_irq(dev, attr);
2516 break;
2517 case KVM_DEV_FLIC_CLEAR_IRQS:
67335e63 2518 kvm_s390_clear_float_irqs(dev->kvm);
c05c4186 2519 break;
3c038e6b
DD
2520 case KVM_DEV_FLIC_APF_ENABLE:
2521 dev->kvm->arch.gmap->pfault_enabled = 1;
2522 break;
2523 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2524 dev->kvm->arch.gmap->pfault_enabled = 0;
2525 /*
2526 * Make sure no async faults are in transition when
2527 * clearing the queues. So we don't need to worry
2528 * about late coming workers.
2529 */
2530 synchronize_srcu(&dev->kvm->srcu);
2531 kvm_for_each_vcpu(i, vcpu, dev->kvm)
2532 kvm_clear_async_pf_completion_queue(vcpu);
2533 break;
841b91c5
CH
2534 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2535 r = register_io_adapter(dev, attr);
2536 break;
2537 case KVM_DEV_FLIC_ADAPTER_MODIFY:
2538 r = modify_io_adapter(dev, attr);
2539 break;
6d28f789
HP
2540 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
2541 r = clear_io_irq(dev->kvm, attr);
2542 break;
51978393
FL
2543 case KVM_DEV_FLIC_AISM:
2544 r = modify_ais_mode(dev->kvm, attr);
2545 break;
a8920950
YMZ
2546 case KVM_DEV_FLIC_AIRQ_INJECT:
2547 r = flic_inject_airq(dev->kvm, attr);
2548 break;
2c1a48f2
YMZ
2549 case KVM_DEV_FLIC_AISM_ALL:
2550 r = flic_ais_mode_set_all(dev->kvm, attr);
2551 break;
c05c4186
JF
2552 default:
2553 r = -EINVAL;
2554 }
2555
2556 return r;
2557}
2558
4f129858
HP
2559static int flic_has_attr(struct kvm_device *dev,
2560 struct kvm_device_attr *attr)
2561{
2562 switch (attr->group) {
2563 case KVM_DEV_FLIC_GET_ALL_IRQS:
2564 case KVM_DEV_FLIC_ENQUEUE:
2565 case KVM_DEV_FLIC_CLEAR_IRQS:
2566 case KVM_DEV_FLIC_APF_ENABLE:
2567 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
2568 case KVM_DEV_FLIC_ADAPTER_REGISTER:
2569 case KVM_DEV_FLIC_ADAPTER_MODIFY:
6d28f789 2570 case KVM_DEV_FLIC_CLEAR_IO_IRQ:
51978393 2571 case KVM_DEV_FLIC_AISM:
a8920950 2572 case KVM_DEV_FLIC_AIRQ_INJECT:
2c1a48f2 2573 case KVM_DEV_FLIC_AISM_ALL:
4f129858
HP
2574 return 0;
2575 }
2576 return -ENXIO;
2577}
2578
c05c4186
JF
2579static int flic_create(struct kvm_device *dev, u32 type)
2580{
2581 if (!dev)
2582 return -EINVAL;
2583 if (dev->kvm->arch.flic)
2584 return -EINVAL;
2585 dev->kvm->arch.flic = dev;
2586 return 0;
2587}
2588
2589static void flic_destroy(struct kvm_device *dev)
2590{
2591 dev->kvm->arch.flic = NULL;
2592 kfree(dev);
2593}
2594
2595/* s390 floating irq controller (flic) */
2596struct kvm_device_ops kvm_flic_ops = {
2597 .name = "kvm-flic",
2598 .get_attr = flic_get_attr,
2599 .set_attr = flic_set_attr,
4f129858 2600 .has_attr = flic_has_attr,
c05c4186
JF
2601 .create = flic_create,
2602 .destroy = flic_destroy,
2603};
84223598
CH
2604
2605static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
2606{
2607 unsigned long bit;
2608
2609 bit = bit_nr + (addr % PAGE_SIZE) * 8;
2610
2611 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
2612}
2613
2614static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
2615 u64 addr)
2616{
2617 struct s390_map_info *map;
2618
2619 if (!adapter)
2620 return NULL;
2621
2622 list_for_each_entry(map, &adapter->maps, list) {
2623 if (map->guest_addr == addr)
2624 return map;
2625 }
2626 return NULL;
2627}
2628
2629static int adapter_indicators_set(struct kvm *kvm,
2630 struct s390_io_adapter *adapter,
2631 struct kvm_s390_adapter_int *adapter_int)
2632{
2633 unsigned long bit;
2634 int summary_set, idx;
2635 struct s390_map_info *info;
2636 void *map;
2637
2638 info = get_map_info(adapter, adapter_int->ind_addr);
2639 if (!info)
2640 return -1;
2641 map = page_address(info->page);
2642 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
2643 set_bit(bit, map);
2644 idx = srcu_read_lock(&kvm->srcu);
2645 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2646 set_page_dirty_lock(info->page);
2647 info = get_map_info(adapter, adapter_int->summary_addr);
2648 if (!info) {
2649 srcu_read_unlock(&kvm->srcu, idx);
2650 return -1;
2651 }
2652 map = page_address(info->page);
2653 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
2654 adapter->swap);
2655 summary_set = test_and_set_bit(bit, map);
2656 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
2657 set_page_dirty_lock(info->page);
2658 srcu_read_unlock(&kvm->srcu, idx);
2659 return summary_set ? 0 : 1;
2660}
2661
2662/*
2663 * < 0 - not injected due to error
2664 * = 0 - coalesced, summary indicator already active
2665 * > 0 - injected interrupt
2666 */
2667static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
2668 struct kvm *kvm, int irq_source_id, int level,
2669 bool line_status)
2670{
2671 int ret;
2672 struct s390_io_adapter *adapter;
2673
2674 /* We're only interested in the 0->1 transition. */
2675 if (!level)
2676 return 0;
2677 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
2678 if (!adapter)
2679 return -1;
2680 down_read(&adapter->maps_lock);
2681 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
2682 up_read(&adapter->maps_lock);
2683 if ((ret > 0) && !adapter->masked) {
a8920950 2684 ret = kvm_s390_inject_airq(kvm, adapter);
84223598
CH
2685 if (ret == 0)
2686 ret = 1;
2687 }
2688 return ret;
2689}
2690
4d62fcc0
QH
2691/*
2692 * Inject the machine check to the guest.
2693 */
2694void kvm_s390_reinject_machine_check(struct kvm_vcpu *vcpu,
2695 struct mcck_volatile_info *mcck_info)
2696{
2697 struct kvm_s390_interrupt_info inti;
2698 struct kvm_s390_irq irq;
2699 struct kvm_s390_mchk_info *mchk;
2700 union mci mci;
2701 __u64 cr14 = 0; /* upper bits are not used */
3dbf0205 2702 int rc;
4d62fcc0
QH
2703
2704 mci.val = mcck_info->mcic;
2705 if (mci.sr)
cc65450c 2706 cr14 |= CR14_RECOVERY_SUBMASK;
4d62fcc0 2707 if (mci.dg)
cc65450c 2708 cr14 |= CR14_DEGRADATION_SUBMASK;
4d62fcc0 2709 if (mci.w)
cc65450c 2710 cr14 |= CR14_WARNING_SUBMASK;
4d62fcc0
QH
2711
2712 mchk = mci.ck ? &inti.mchk : &irq.u.mchk;
2713 mchk->cr14 = cr14;
2714 mchk->mcic = mcck_info->mcic;
2715 mchk->ext_damage_code = mcck_info->ext_damage_code;
2716 mchk->failing_storage_address = mcck_info->failing_storage_address;
2717 if (mci.ck) {
2718 /* Inject the floating machine check */
2719 inti.type = KVM_S390_MCHK;
3dbf0205 2720 rc = __inject_vm(vcpu->kvm, &inti);
4d62fcc0
QH
2721 } else {
2722 /* Inject the machine check to specified vcpu */
2723 irq.type = KVM_S390_MCHK;
3dbf0205 2724 rc = kvm_s390_inject_vcpu(vcpu, &irq);
4d62fcc0 2725 }
3dbf0205 2726 WARN_ON_ONCE(rc);
4d62fcc0
QH
2727}
2728
c63cf538
RK
2729int kvm_set_routing_entry(struct kvm *kvm,
2730 struct kvm_kernel_irq_routing_entry *e,
84223598
CH
2731 const struct kvm_irq_routing_entry *ue)
2732{
2733 int ret;
2734
2735 switch (ue->type) {
2736 case KVM_IRQ_ROUTING_S390_ADAPTER:
2737 e->set = set_adapter_int;
2738 e->adapter.summary_addr = ue->u.adapter.summary_addr;
2739 e->adapter.ind_addr = ue->u.adapter.ind_addr;
2740 e->adapter.summary_offset = ue->u.adapter.summary_offset;
2741 e->adapter.ind_offset = ue->u.adapter.ind_offset;
2742 e->adapter.adapter_id = ue->u.adapter.adapter_id;
2743 ret = 0;
2744 break;
2745 default:
2746 ret = -EINVAL;
2747 }
2748
2749 return ret;
2750}
2751
2752int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
2753 int irq_source_id, int level, bool line_status)
2754{
2755 return -EINVAL;
2756}
816c7667
JF
2757
2758int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu, void __user *irqstate, int len)
2759{
2760 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2761 struct kvm_s390_irq *buf;
2762 int r = 0;
2763 int n;
2764
2765 buf = vmalloc(len);
2766 if (!buf)
2767 return -ENOMEM;
2768
2769 if (copy_from_user((void *) buf, irqstate, len)) {
2770 r = -EFAULT;
2771 goto out_free;
2772 }
2773
2774 /*
2775 * Don't allow setting the interrupt state
2776 * when there are already interrupts pending
2777 */
2778 spin_lock(&li->lock);
2779 if (li->pending_irqs) {
2780 r = -EBUSY;
2781 goto out_unlock;
2782 }
2783
2784 for (n = 0; n < len / sizeof(*buf); n++) {
2785 r = do_inject_vcpu(vcpu, &buf[n]);
2786 if (r)
2787 break;
2788 }
2789
2790out_unlock:
2791 spin_unlock(&li->lock);
2792out_free:
2793 vfree(buf);
2794
2795 return r;
2796}
2797
2798static void store_local_irq(struct kvm_s390_local_interrupt *li,
2799 struct kvm_s390_irq *irq,
2800 unsigned long irq_type)
2801{
2802 switch (irq_type) {
2803 case IRQ_PEND_MCHK_EX:
2804 case IRQ_PEND_MCHK_REP:
2805 irq->type = KVM_S390_MCHK;
2806 irq->u.mchk = li->irq.mchk;
2807 break;
2808 case IRQ_PEND_PROG:
2809 irq->type = KVM_S390_PROGRAM_INT;
2810 irq->u.pgm = li->irq.pgm;
2811 break;
2812 case IRQ_PEND_PFAULT_INIT:
2813 irq->type = KVM_S390_INT_PFAULT_INIT;
2814 irq->u.ext = li->irq.ext;
2815 break;
2816 case IRQ_PEND_EXT_EXTERNAL:
2817 irq->type = KVM_S390_INT_EXTERNAL_CALL;
2818 irq->u.extcall = li->irq.extcall;
2819 break;
2820 case IRQ_PEND_EXT_CLOCK_COMP:
2821 irq->type = KVM_S390_INT_CLOCK_COMP;
2822 break;
2823 case IRQ_PEND_EXT_CPU_TIMER:
2824 irq->type = KVM_S390_INT_CPU_TIMER;
2825 break;
2826 case IRQ_PEND_SIGP_STOP:
2827 irq->type = KVM_S390_SIGP_STOP;
2828 irq->u.stop = li->irq.stop;
2829 break;
2830 case IRQ_PEND_RESTART:
2831 irq->type = KVM_S390_RESTART;
2832 break;
2833 case IRQ_PEND_SET_PREFIX:
2834 irq->type = KVM_S390_SIGP_SET_PREFIX;
2835 irq->u.prefix = li->irq.prefix;
2836 break;
2837 }
2838}
2839
2840int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu, __u8 __user *buf, int len)
2841{
a5bd7647 2842 int scn;
689bdf9e 2843 DECLARE_BITMAP(sigp_emerg_pending, KVM_MAX_VCPUS);
816c7667
JF
2844 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2845 unsigned long pending_irqs;
2846 struct kvm_s390_irq irq;
2847 unsigned long irq_type;
2848 int cpuaddr;
2849 int n = 0;
2850
2851 spin_lock(&li->lock);
2852 pending_irqs = li->pending_irqs;
2853 memcpy(&sigp_emerg_pending, &li->sigp_emerg_pending,
2854 sizeof(sigp_emerg_pending));
2855 spin_unlock(&li->lock);
2856
2857 for_each_set_bit(irq_type, &pending_irqs, IRQ_PEND_COUNT) {
2858 memset(&irq, 0, sizeof(irq));
2859 if (irq_type == IRQ_PEND_EXT_EMERGENCY)
2860 continue;
2861 if (n + sizeof(irq) > len)
2862 return -ENOBUFS;
2863 store_local_irq(&vcpu->arch.local_int, &irq, irq_type);
2864 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2865 return -EFAULT;
2866 n += sizeof(irq);
2867 }
2868
2869 if (test_bit(IRQ_PEND_EXT_EMERGENCY, &pending_irqs)) {
2870 for_each_set_bit(cpuaddr, sigp_emerg_pending, KVM_MAX_VCPUS) {
2871 memset(&irq, 0, sizeof(irq));
2872 if (n + sizeof(irq) > len)
2873 return -ENOBUFS;
2874 irq.type = KVM_S390_INT_EMERGENCY;
2875 irq.u.emerg.code = cpuaddr;
2876 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2877 return -EFAULT;
2878 n += sizeof(irq);
2879 }
2880 }
2881
a5bd7647 2882 if (sca_ext_call_pending(vcpu, &scn)) {
816c7667
JF
2883 if (n + sizeof(irq) > len)
2884 return -ENOBUFS;
2885 memset(&irq, 0, sizeof(irq));
2886 irq.type = KVM_S390_INT_EXTERNAL_CALL;
a5bd7647 2887 irq.u.extcall.code = scn;
816c7667
JF
2888 if (copy_to_user(&buf[n], &irq, sizeof(irq)))
2889 return -EFAULT;
2890 n += sizeof(irq);
2891 }
2892
2893 return n;
2894}
d7c5cb01
MM
2895
2896void kvm_s390_gisa_clear(struct kvm *kvm)
2897{
982cff42
MM
2898 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2899
2900 if (!gi->origin)
672128bf 2901 return;
982cff42
MM
2902 memset(gi->origin, 0, sizeof(struct kvm_s390_gisa));
2903 gi->origin->next_alert = (u32)(u64)gi->origin;
2904 VM_EVENT(kvm, 3, "gisa 0x%pK cleared", gi->origin);
d7c5cb01
MM
2905}
2906
2907void kvm_s390_gisa_init(struct kvm *kvm)
2908{
982cff42
MM
2909 struct kvm_s390_gisa_interrupt *gi = &kvm->arch.gisa_int;
2910
672128bf
MM
2911 if (!css_general_characteristics.aiv)
2912 return;
982cff42 2913 gi->origin = &kvm->arch.sie_page2->gisa;
672128bf 2914 kvm_s390_gisa_clear(kvm);
982cff42 2915 VM_EVENT(kvm, 3, "gisa 0x%pK initialized", gi->origin);
d7c5cb01
MM
2916}
2917
2918void kvm_s390_gisa_destroy(struct kvm *kvm)
2919{
982cff42 2920 kvm->arch.gisa_int.origin = NULL;
d7c5cb01 2921}
1282c21e
MM
2922
2923void kvm_s390_gib_destroy(void)
2924{
2925 if (!gib)
2926 return;
2927 chsc_sgib(0);
2928 free_page((unsigned long)gib);
2929 gib = NULL;
2930}
2931
2932int kvm_s390_gib_init(u8 nisc)
2933{
2934 int rc = 0;
2935
2936 if (!css_general_characteristics.aiv) {
2937 KVM_EVENT(3, "%s", "gib not initialized, no AIV facility");
2938 goto out;
2939 }
2940
2941 gib = (struct kvm_s390_gib *)get_zeroed_page(GFP_KERNEL | GFP_DMA);
2942 if (!gib) {
2943 rc = -ENOMEM;
2944 goto out;
2945 }
2946
2947 gib->nisc = nisc;
2948 if (chsc_sgib((u32)(u64)gib)) {
2949 pr_err("Associating the GIB with the AIV facility failed\n");
2950 free_page((unsigned long)gib);
2951 gib = NULL;
2952 rc = -EIO;
2953 goto out;
2954 }
2955
2956 KVM_EVENT(3, "gib 0x%pK (nisc=%d) initialized", gib, gib->nisc);
2957out:
2958 return rc;
2959}