]> git.proxmox.com Git - mirror_ubuntu-kernels.git/blame - arch/s390/kvm/interrupt.c
KVM: s390: represent SIMD cap in kvm facility
[mirror_ubuntu-kernels.git] / arch / s390 / kvm / interrupt.c
CommitLineData
ba5c1e9b 1/*
a53c8fab 2 * handling kvm guest interrupts
ba5c1e9b 3 *
33b412ac 4 * Copyright IBM Corp. 2008, 2015
ba5c1e9b
CO
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 */
12
ca872302 13#include <linux/interrupt.h>
ba5c1e9b 14#include <linux/kvm_host.h>
cbb870c8 15#include <linux/hrtimer.h>
84223598 16#include <linux/mmu_context.h>
3cd61299 17#include <linux/signal.h>
5a0e3ad6 18#include <linux/slab.h>
383d0b05 19#include <linux/bitmap.h>
cbb870c8 20#include <asm/asm-offsets.h>
33b412ac 21#include <asm/dis.h>
cbb870c8 22#include <asm/uaccess.h>
ea5f4969 23#include <asm/sclp.h>
ba5c1e9b
CO
24#include "kvm-s390.h"
25#include "gaccess.h"
ade38c31 26#include "trace-s390.h"
ba5c1e9b 27
d8346b7d
CH
28#define IOINT_SCHID_MASK 0x0000ffff
29#define IOINT_SSID_MASK 0x00030000
30#define IOINT_CSSID_MASK 0x03fc0000
31#define IOINT_AI_MASK 0x04000000
44c6ca3d 32#define PFAULT_INIT 0x0600
60f90a14
JF
33#define PFAULT_DONE 0x0680
34#define VIRTIO_PARAM 0x0d00
d8346b7d
CH
35
36static int is_ioint(u64 type)
37{
38 return ((type & 0xfffe0000u) != 0xfffe0000u);
39}
40
3c038e6b 41int psw_extint_disabled(struct kvm_vcpu *vcpu)
ba5c1e9b
CO
42{
43 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT);
44}
45
d8346b7d
CH
46static int psw_ioint_disabled(struct kvm_vcpu *vcpu)
47{
48 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO);
49}
50
48a3e950
CH
51static int psw_mchk_disabled(struct kvm_vcpu *vcpu)
52{
53 return !(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK);
54}
55
ba5c1e9b
CO
56static int psw_interrupts_disabled(struct kvm_vcpu *vcpu)
57{
58 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PER) ||
59 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_IO) ||
60 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_EXT))
61 return 0;
62 return 1;
63}
64
bb78c5ec
DH
65static int ckc_interrupts_enabled(struct kvm_vcpu *vcpu)
66{
67 if (psw_extint_disabled(vcpu) ||
68 !(vcpu->arch.sie_block->gcr[0] & 0x800ul))
69 return 0;
f71d0dc5
DH
70 if (guestdbg_enabled(vcpu) && guestdbg_sstep_enabled(vcpu))
71 /* No timer interrupts when single stepping */
72 return 0;
bb78c5ec
DH
73 return 1;
74}
75
79fd50c6
CH
76static u64 int_word_to_isc_bits(u32 int_word)
77{
78 u8 isc = (int_word & 0x38000000) >> 27;
79
80 return (0x80 >> isc) << 24;
81}
82
614aeab4 83static int __must_check __interrupt_is_deliverable(struct kvm_vcpu *vcpu,
180c12fb 84 struct kvm_s390_interrupt_info *inti)
ba5c1e9b
CO
85{
86 switch (inti->type) {
7697e71f
CE
87 case KVM_S390_INT_EXTERNAL_CALL:
88 if (psw_extint_disabled(vcpu))
89 return 0;
90 if (vcpu->arch.sie_block->gcr[0] & 0x2000ul)
91 return 1;
f346026e 92 return 0;
ba5c1e9b
CO
93 case KVM_S390_INT_EMERGENCY:
94 if (psw_extint_disabled(vcpu))
95 return 0;
96 if (vcpu->arch.sie_block->gcr[0] & 0x4000ul)
97 return 1;
98 return 0;
e029ae5b
TH
99 case KVM_S390_INT_CLOCK_COMP:
100 return ckc_interrupts_enabled(vcpu);
101 case KVM_S390_INT_CPU_TIMER:
102 if (psw_extint_disabled(vcpu))
103 return 0;
104 if (vcpu->arch.sie_block->gcr[0] & 0x400ul)
105 return 1;
106 return 0;
ba5c1e9b 107 case KVM_S390_INT_SERVICE:
3c038e6b
DD
108 case KVM_S390_INT_PFAULT_INIT:
109 case KVM_S390_INT_PFAULT_DONE:
ba5c1e9b
CO
110 case KVM_S390_INT_VIRTIO:
111 if (psw_extint_disabled(vcpu))
112 return 0;
113 if (vcpu->arch.sie_block->gcr[0] & 0x200ul)
114 return 1;
115 return 0;
116 case KVM_S390_PROGRAM_INT:
117 case KVM_S390_SIGP_STOP:
118 case KVM_S390_SIGP_SET_PREFIX:
119 case KVM_S390_RESTART:
120 return 1;
48a3e950
CH
121 case KVM_S390_MCHK:
122 if (psw_mchk_disabled(vcpu))
123 return 0;
124 if (vcpu->arch.sie_block->gcr[14] & inti->mchk.cr14)
125 return 1;
126 return 0;
d8346b7d
CH
127 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
128 if (psw_ioint_disabled(vcpu))
129 return 0;
79fd50c6
CH
130 if (vcpu->arch.sie_block->gcr[6] &
131 int_word_to_isc_bits(inti->io.io_int_word))
d8346b7d
CH
132 return 1;
133 return 0;
ba5c1e9b 134 default:
d8346b7d
CH
135 printk(KERN_WARNING "illegal interrupt type %llx\n",
136 inti->type);
ba5c1e9b
CO
137 BUG();
138 }
139 return 0;
140}
141
383d0b05
JF
142static inline unsigned long pending_local_irqs(struct kvm_vcpu *vcpu)
143{
144 return vcpu->arch.local_int.pending_irqs;
145}
146
147static unsigned long deliverable_local_irqs(struct kvm_vcpu *vcpu)
148{
149 unsigned long active_mask = pending_local_irqs(vcpu);
150
151 if (psw_extint_disabled(vcpu))
152 active_mask &= ~IRQ_PEND_EXT_MASK;
153 if (!(vcpu->arch.sie_block->gcr[0] & 0x2000ul))
154 __clear_bit(IRQ_PEND_EXT_EXTERNAL, &active_mask);
155 if (!(vcpu->arch.sie_block->gcr[0] & 0x4000ul))
156 __clear_bit(IRQ_PEND_EXT_EMERGENCY, &active_mask);
157 if (!(vcpu->arch.sie_block->gcr[0] & 0x800ul))
158 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &active_mask);
159 if (!(vcpu->arch.sie_block->gcr[0] & 0x400ul))
160 __clear_bit(IRQ_PEND_EXT_CPU_TIMER, &active_mask);
161 if (psw_mchk_disabled(vcpu))
162 active_mask &= ~IRQ_PEND_MCHK_MASK;
163
6cddd432
DH
164 /*
165 * STOP irqs will never be actively delivered. They are triggered via
166 * intercept requests and cleared when the stop intercept is performed.
167 */
168 __clear_bit(IRQ_PEND_SIGP_STOP, &active_mask);
169
383d0b05
JF
170 return active_mask;
171}
172
ba5c1e9b
CO
173static void __set_cpu_idle(struct kvm_vcpu *vcpu)
174{
ba5c1e9b
CO
175 atomic_set_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
176 set_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
177}
178
179static void __unset_cpu_idle(struct kvm_vcpu *vcpu)
180{
ba5c1e9b
CO
181 atomic_clear_mask(CPUSTAT_WAIT, &vcpu->arch.sie_block->cpuflags);
182 clear_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
183}
184
185static void __reset_intercept_indicators(struct kvm_vcpu *vcpu)
186{
4953919f
DH
187 atomic_clear_mask(CPUSTAT_IO_INT | CPUSTAT_EXT_INT | CPUSTAT_STOP_INT,
188 &vcpu->arch.sie_block->cpuflags);
ba5c1e9b 189 vcpu->arch.sie_block->lctl = 0x0000;
27291e21
DH
190 vcpu->arch.sie_block->ictl &= ~(ICTL_LPSW | ICTL_STCTL | ICTL_PINT);
191
192 if (guestdbg_enabled(vcpu)) {
193 vcpu->arch.sie_block->lctl |= (LCTL_CR0 | LCTL_CR9 |
194 LCTL_CR10 | LCTL_CR11);
195 vcpu->arch.sie_block->ictl |= (ICTL_STCTL | ICTL_PINT);
196 }
ba5c1e9b
CO
197}
198
199static void __set_cpuflag(struct kvm_vcpu *vcpu, u32 flag)
200{
201 atomic_set_mask(flag, &vcpu->arch.sie_block->cpuflags);
202}
203
383d0b05
JF
204static void set_intercept_indicators_ext(struct kvm_vcpu *vcpu)
205{
206 if (!(pending_local_irqs(vcpu) & IRQ_PEND_EXT_MASK))
207 return;
208 if (psw_extint_disabled(vcpu))
209 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
210 else
211 vcpu->arch.sie_block->lctl |= LCTL_CR0;
212}
213
214static void set_intercept_indicators_mchk(struct kvm_vcpu *vcpu)
215{
216 if (!(pending_local_irqs(vcpu) & IRQ_PEND_MCHK_MASK))
217 return;
218 if (psw_mchk_disabled(vcpu))
219 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
220 else
221 vcpu->arch.sie_block->lctl |= LCTL_CR14;
222}
223
6cddd432
DH
224static void set_intercept_indicators_stop(struct kvm_vcpu *vcpu)
225{
226 if (kvm_s390_is_stop_irq_pending(vcpu))
227 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
228}
229
383d0b05
JF
230/* Set interception request for non-deliverable local interrupts */
231static void set_intercept_indicators_local(struct kvm_vcpu *vcpu)
232{
233 set_intercept_indicators_ext(vcpu);
234 set_intercept_indicators_mchk(vcpu);
6cddd432 235 set_intercept_indicators_stop(vcpu);
383d0b05
JF
236}
237
ba5c1e9b 238static void __set_intercept_indicator(struct kvm_vcpu *vcpu,
180c12fb 239 struct kvm_s390_interrupt_info *inti)
ba5c1e9b
CO
240{
241 switch (inti->type) {
ba5c1e9b 242 case KVM_S390_INT_SERVICE:
3c038e6b 243 case KVM_S390_INT_PFAULT_DONE:
ba5c1e9b
CO
244 case KVM_S390_INT_VIRTIO:
245 if (psw_extint_disabled(vcpu))
246 __set_cpuflag(vcpu, CPUSTAT_EXT_INT);
247 else
248 vcpu->arch.sie_block->lctl |= LCTL_CR0;
249 break;
48a3e950
CH
250 case KVM_S390_MCHK:
251 if (psw_mchk_disabled(vcpu))
252 vcpu->arch.sie_block->ictl |= ICTL_LPSW;
253 else
254 vcpu->arch.sie_block->lctl |= LCTL_CR14;
255 break;
d8346b7d
CH
256 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
257 if (psw_ioint_disabled(vcpu))
258 __set_cpuflag(vcpu, CPUSTAT_IO_INT);
259 else
260 vcpu->arch.sie_block->lctl |= LCTL_CR6;
261 break;
ba5c1e9b
CO
262 default:
263 BUG();
264 }
265}
266
8a2ef71b
JF
267static u16 get_ilc(struct kvm_vcpu *vcpu)
268{
8a2ef71b
JF
269 switch (vcpu->arch.sie_block->icptcode) {
270 case ICPT_INST:
271 case ICPT_INSTPROGI:
272 case ICPT_OPEREXC:
273 case ICPT_PARTEXEC:
274 case ICPT_IOINST:
275 /* last instruction only stored for these icptcodes */
33b412ac 276 return insn_length(vcpu->arch.sie_block->ipa >> 8);
8a2ef71b
JF
277 case ICPT_PROGI:
278 return vcpu->arch.sie_block->pgmilc;
279 default:
280 return 0;
281 }
282}
283
0fb97abe
JF
284static int __must_check __deliver_cpu_timer(struct kvm_vcpu *vcpu)
285{
383d0b05 286 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
287 int rc;
288
289 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
290 0, 0);
291
292 rc = put_guest_lc(vcpu, EXT_IRQ_CPU_TIMER,
293 (u16 *)__LC_EXT_INT_CODE);
467fc298 294 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
295 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
296 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
297 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
298 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 299 clear_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
99e20009 300 return rc ? -EFAULT : 0;
0fb97abe
JF
301}
302
303static int __must_check __deliver_ckc(struct kvm_vcpu *vcpu)
304{
383d0b05 305 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
306 int rc;
307
308 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
309 0, 0);
310
311 rc = put_guest_lc(vcpu, EXT_IRQ_CLK_COMP,
312 (u16 __user *)__LC_EXT_INT_CODE);
467fc298 313 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
314 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
315 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
316 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
317 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 318 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
99e20009 319 return rc ? -EFAULT : 0;
0fb97abe
JF
320}
321
383d0b05 322static int __must_check __deliver_pfault_init(struct kvm_vcpu *vcpu)
0fb97abe 323{
383d0b05
JF
324 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
325 struct kvm_s390_ext_info ext;
0fb97abe
JF
326 int rc;
327
383d0b05
JF
328 spin_lock(&li->lock);
329 ext = li->irq.ext;
330 clear_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
331 li->irq.ext.ext_params2 = 0;
332 spin_unlock(&li->lock);
333
0fb97abe 334 VCPU_EVENT(vcpu, 4, "interrupt: pfault init parm:%x,parm64:%llx",
383d0b05 335 0, ext.ext_params2);
0fb97abe
JF
336 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
337 KVM_S390_INT_PFAULT_INIT,
383d0b05 338 0, ext.ext_params2);
0fb97abe
JF
339
340 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *) __LC_EXT_INT_CODE);
341 rc |= put_guest_lc(vcpu, PFAULT_INIT, (u16 *) __LC_EXT_CPU_ADDR);
342 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
343 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
344 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
345 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 346 rc |= put_guest_lc(vcpu, ext.ext_params2, (u64 *) __LC_EXT_PARAMS2);
99e20009 347 return rc ? -EFAULT : 0;
0fb97abe
JF
348}
349
383d0b05 350static int __must_check __deliver_machine_check(struct kvm_vcpu *vcpu)
0fb97abe 351{
383d0b05
JF
352 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
353 struct kvm_s390_mchk_info mchk;
bc17de7c 354 unsigned long adtl_status_addr;
0fb97abe
JF
355 int rc;
356
383d0b05
JF
357 spin_lock(&li->lock);
358 mchk = li->irq.mchk;
359 /*
360 * If there was an exigent machine check pending, then any repressible
361 * machine checks that might have been pending are indicated along
362 * with it, so always clear both bits
363 */
364 clear_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
365 clear_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
366 memset(&li->irq.mchk, 0, sizeof(mchk));
367 spin_unlock(&li->lock);
368
0fb97abe 369 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
383d0b05 370 mchk.mcic);
0fb97abe 371 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
383d0b05 372 mchk.cr14, mchk.mcic);
0fb97abe
JF
373
374 rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
bc17de7c
EF
375 rc |= read_guest_lc(vcpu, __LC_VX_SAVE_AREA_ADDR,
376 &adtl_status_addr, sizeof(unsigned long));
377 rc |= kvm_s390_vcpu_store_adtl_status(vcpu, adtl_status_addr);
383d0b05 378 rc |= put_guest_lc(vcpu, mchk.mcic,
0fb97abe 379 (u64 __user *) __LC_MCCK_CODE);
383d0b05 380 rc |= put_guest_lc(vcpu, mchk.failing_storage_address,
0fb97abe
JF
381 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
382 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
383d0b05 383 &mchk.fixed_logout, sizeof(mchk.fixed_logout));
0fb97abe
JF
384 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
385 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
386 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
387 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 388 return rc ? -EFAULT : 0;
0fb97abe
JF
389}
390
391static int __must_check __deliver_restart(struct kvm_vcpu *vcpu)
392{
383d0b05 393 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe
JF
394 int rc;
395
396 VCPU_EVENT(vcpu, 4, "%s", "interrupt: cpu restart");
397 vcpu->stat.deliver_restart_signal++;
398 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0);
399
400 rc = write_guest_lc(vcpu,
401 offsetof(struct _lowcore, restart_old_psw),
402 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
403 rc |= read_guest_lc(vcpu, offsetof(struct _lowcore, restart_psw),
404 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
383d0b05 405 clear_bit(IRQ_PEND_RESTART, &li->pending_irqs);
99e20009 406 return rc ? -EFAULT : 0;
0fb97abe
JF
407}
408
383d0b05 409static int __must_check __deliver_set_prefix(struct kvm_vcpu *vcpu)
0fb97abe 410{
383d0b05
JF
411 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
412 struct kvm_s390_prefix_info prefix;
413
414 spin_lock(&li->lock);
415 prefix = li->irq.prefix;
416 li->irq.prefix.address = 0;
417 clear_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
418 spin_unlock(&li->lock);
0fb97abe 419
383d0b05 420 VCPU_EVENT(vcpu, 4, "interrupt: set prefix to %x", prefix.address);
0fb97abe
JF
421 vcpu->stat.deliver_prefix_signal++;
422 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
423 KVM_S390_SIGP_SET_PREFIX,
383d0b05 424 prefix.address, 0);
0fb97abe 425
383d0b05 426 kvm_s390_set_prefix(vcpu, prefix.address);
0fb97abe
JF
427 return 0;
428}
429
383d0b05 430static int __must_check __deliver_emergency_signal(struct kvm_vcpu *vcpu)
0fb97abe 431{
383d0b05 432 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0fb97abe 433 int rc;
383d0b05
JF
434 int cpu_addr;
435
436 spin_lock(&li->lock);
437 cpu_addr = find_first_bit(li->sigp_emerg_pending, KVM_MAX_VCPUS);
438 clear_bit(cpu_addr, li->sigp_emerg_pending);
439 if (bitmap_empty(li->sigp_emerg_pending, KVM_MAX_VCPUS))
440 clear_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
441 spin_unlock(&li->lock);
0fb97abe
JF
442
443 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp emerg");
444 vcpu->stat.deliver_emergency_signal++;
383d0b05
JF
445 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
446 cpu_addr, 0);
0fb97abe
JF
447
448 rc = put_guest_lc(vcpu, EXT_IRQ_EMERGENCY_SIG,
449 (u16 *)__LC_EXT_INT_CODE);
383d0b05 450 rc |= put_guest_lc(vcpu, cpu_addr, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
451 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
452 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
453 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
454 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 455 return rc ? -EFAULT : 0;
0fb97abe
JF
456}
457
383d0b05 458static int __must_check __deliver_external_call(struct kvm_vcpu *vcpu)
0fb97abe 459{
383d0b05
JF
460 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
461 struct kvm_s390_extcall_info extcall;
0fb97abe
JF
462 int rc;
463
383d0b05
JF
464 spin_lock(&li->lock);
465 extcall = li->irq.extcall;
466 li->irq.extcall.code = 0;
467 clear_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
468 spin_unlock(&li->lock);
469
0fb97abe
JF
470 VCPU_EVENT(vcpu, 4, "%s", "interrupt: sigp ext call");
471 vcpu->stat.deliver_external_call++;
472 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
473 KVM_S390_INT_EXTERNAL_CALL,
383d0b05 474 extcall.code, 0);
0fb97abe
JF
475
476 rc = put_guest_lc(vcpu, EXT_IRQ_EXTERNAL_CALL,
477 (u16 *)__LC_EXT_INT_CODE);
383d0b05 478 rc |= put_guest_lc(vcpu, extcall.code, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
479 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
480 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
481 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW, &vcpu->arch.sie_block->gpsw,
482 sizeof(psw_t));
99e20009 483 return rc ? -EFAULT : 0;
0fb97abe
JF
484}
485
383d0b05 486static int __must_check __deliver_prog(struct kvm_vcpu *vcpu)
8712836b 487{
383d0b05
JF
488 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
489 struct kvm_s390_pgm_info pgm_info;
a9a846fd 490 int rc = 0, nullifying = false;
8a2ef71b 491 u16 ilc = get_ilc(vcpu);
8712836b 492
383d0b05
JF
493 spin_lock(&li->lock);
494 pgm_info = li->irq.pgm;
495 clear_bit(IRQ_PEND_PROG, &li->pending_irqs);
496 memset(&li->irq.pgm, 0, sizeof(pgm_info));
497 spin_unlock(&li->lock);
498
0fb97abe 499 VCPU_EVENT(vcpu, 4, "interrupt: pgm check code:%x, ilc:%x",
383d0b05 500 pgm_info.code, ilc);
0fb97abe
JF
501 vcpu->stat.deliver_program_int++;
502 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
383d0b05 503 pgm_info.code, 0);
0fb97abe 504
383d0b05 505 switch (pgm_info.code & ~PGM_PER) {
8712836b
DH
506 case PGM_AFX_TRANSLATION:
507 case PGM_ASX_TRANSLATION:
508 case PGM_EX_TRANSLATION:
509 case PGM_LFX_TRANSLATION:
510 case PGM_LSTE_SEQUENCE:
511 case PGM_LSX_TRANSLATION:
512 case PGM_LX_TRANSLATION:
513 case PGM_PRIMARY_AUTHORITY:
514 case PGM_SECONDARY_AUTHORITY:
a9a846fd
TH
515 nullifying = true;
516 /* fall through */
8712836b 517 case PGM_SPACE_SWITCH:
383d0b05 518 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b
DH
519 (u64 *)__LC_TRANS_EXC_CODE);
520 break;
521 case PGM_ALEN_TRANSLATION:
522 case PGM_ALE_SEQUENCE:
523 case PGM_ASTE_INSTANCE:
524 case PGM_ASTE_SEQUENCE:
525 case PGM_ASTE_VALIDITY:
526 case PGM_EXTENDED_AUTHORITY:
383d0b05 527 rc = put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 528 (u8 *)__LC_EXC_ACCESS_ID);
a9a846fd 529 nullifying = true;
8712836b
DH
530 break;
531 case PGM_ASCE_TYPE:
532 case PGM_PAGE_TRANSLATION:
533 case PGM_REGION_FIRST_TRANS:
534 case PGM_REGION_SECOND_TRANS:
535 case PGM_REGION_THIRD_TRANS:
536 case PGM_SEGMENT_TRANSLATION:
383d0b05 537 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 538 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 539 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b 540 (u8 *)__LC_EXC_ACCESS_ID);
383d0b05 541 rc |= put_guest_lc(vcpu, pgm_info.op_access_id,
8712836b 542 (u8 *)__LC_OP_ACCESS_ID);
a9a846fd 543 nullifying = true;
8712836b
DH
544 break;
545 case PGM_MONITOR:
383d0b05 546 rc = put_guest_lc(vcpu, pgm_info.mon_class_nr,
a36c5393 547 (u16 *)__LC_MON_CLASS_NR);
383d0b05 548 rc |= put_guest_lc(vcpu, pgm_info.mon_code,
8712836b
DH
549 (u64 *)__LC_MON_CODE);
550 break;
403c8648 551 case PGM_VECTOR_PROCESSING:
8712836b 552 case PGM_DATA:
383d0b05 553 rc = put_guest_lc(vcpu, pgm_info.data_exc_code,
8712836b
DH
554 (u32 *)__LC_DATA_EXC_CODE);
555 break;
556 case PGM_PROTECTION:
383d0b05 557 rc = put_guest_lc(vcpu, pgm_info.trans_exc_code,
8712836b 558 (u64 *)__LC_TRANS_EXC_CODE);
383d0b05 559 rc |= put_guest_lc(vcpu, pgm_info.exc_access_id,
8712836b
DH
560 (u8 *)__LC_EXC_ACCESS_ID);
561 break;
a9a846fd
TH
562 case PGM_STACK_FULL:
563 case PGM_STACK_EMPTY:
564 case PGM_STACK_SPECIFICATION:
565 case PGM_STACK_TYPE:
566 case PGM_STACK_OPERATION:
567 case PGM_TRACE_TABEL:
568 case PGM_CRYPTO_OPERATION:
569 nullifying = true;
570 break;
8712836b
DH
571 }
572
383d0b05
JF
573 if (pgm_info.code & PGM_PER) {
574 rc |= put_guest_lc(vcpu, pgm_info.per_code,
8712836b 575 (u8 *) __LC_PER_CODE);
383d0b05 576 rc |= put_guest_lc(vcpu, pgm_info.per_atmid,
8712836b 577 (u8 *)__LC_PER_ATMID);
383d0b05 578 rc |= put_guest_lc(vcpu, pgm_info.per_address,
8712836b 579 (u64 *) __LC_PER_ADDRESS);
383d0b05 580 rc |= put_guest_lc(vcpu, pgm_info.per_access_id,
8712836b
DH
581 (u8 *) __LC_PER_ACCESS_ID);
582 }
583
a9a846fd
TH
584 if (nullifying && vcpu->arch.sie_block->icptcode == ICPT_INST)
585 kvm_s390_rewind_psw(vcpu, ilc);
586
8a2ef71b 587 rc |= put_guest_lc(vcpu, ilc, (u16 *) __LC_PGM_ILC);
383d0b05 588 rc |= put_guest_lc(vcpu, pgm_info.code,
8712836b
DH
589 (u16 *)__LC_PGM_INT_CODE);
590 rc |= write_guest_lc(vcpu, __LC_PGM_OLD_PSW,
591 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
592 rc |= read_guest_lc(vcpu, __LC_PGM_NEW_PSW,
593 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 594 return rc ? -EFAULT : 0;
0fb97abe
JF
595}
596
597static int __must_check __deliver_service(struct kvm_vcpu *vcpu,
598 struct kvm_s390_interrupt_info *inti)
599{
600 int rc;
601
602 VCPU_EVENT(vcpu, 4, "interrupt: sclp parm:%x",
603 inti->ext.ext_params);
604 vcpu->stat.deliver_service_signal++;
605 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
606 inti->ext.ext_params, 0);
607
608 rc = put_guest_lc(vcpu, EXT_IRQ_SERVICE_SIG, (u16 *)__LC_EXT_INT_CODE);
467fc298 609 rc |= put_guest_lc(vcpu, 0, (u16 *)__LC_EXT_CPU_ADDR);
0fb97abe
JF
610 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
611 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
612 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
613 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
614 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
615 (u32 *)__LC_EXT_PARAMS);
99e20009 616 return rc ? -EFAULT : 0;
0fb97abe
JF
617}
618
619static int __must_check __deliver_pfault_done(struct kvm_vcpu *vcpu,
620 struct kvm_s390_interrupt_info *inti)
621{
622 int rc;
623
624 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id,
625 KVM_S390_INT_PFAULT_DONE, 0,
626 inti->ext.ext_params2);
8712836b 627
0fb97abe
JF
628 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
629 rc |= put_guest_lc(vcpu, PFAULT_DONE, (u16 *)__LC_EXT_CPU_ADDR);
630 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
631 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
632 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
633 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
634 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
635 (u64 *)__LC_EXT_PARAMS2);
99e20009 636 return rc ? -EFAULT : 0;
0fb97abe
JF
637}
638
639static int __must_check __deliver_virtio(struct kvm_vcpu *vcpu,
640 struct kvm_s390_interrupt_info *inti)
641{
642 int rc;
643
644 VCPU_EVENT(vcpu, 4, "interrupt: virtio parm:%x,parm64:%llx",
645 inti->ext.ext_params, inti->ext.ext_params2);
646 vcpu->stat.deliver_virtio_interrupt++;
647 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
648 inti->ext.ext_params,
649 inti->ext.ext_params2);
650
651 rc = put_guest_lc(vcpu, EXT_IRQ_CP_SERVICE, (u16 *)__LC_EXT_INT_CODE);
652 rc |= put_guest_lc(vcpu, VIRTIO_PARAM, (u16 *)__LC_EXT_CPU_ADDR);
653 rc |= write_guest_lc(vcpu, __LC_EXT_OLD_PSW,
654 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
655 rc |= read_guest_lc(vcpu, __LC_EXT_NEW_PSW,
656 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
657 rc |= put_guest_lc(vcpu, inti->ext.ext_params,
658 (u32 *)__LC_EXT_PARAMS);
659 rc |= put_guest_lc(vcpu, inti->ext.ext_params2,
660 (u64 *)__LC_EXT_PARAMS2);
99e20009 661 return rc ? -EFAULT : 0;
0fb97abe
JF
662}
663
664static int __must_check __deliver_io(struct kvm_vcpu *vcpu,
665 struct kvm_s390_interrupt_info *inti)
666{
667 int rc;
668
669 VCPU_EVENT(vcpu, 4, "interrupt: I/O %llx", inti->type);
670 vcpu->stat.deliver_io_int++;
671 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, inti->type,
672 ((__u32)inti->io.subchannel_id << 16) |
673 inti->io.subchannel_nr,
674 ((__u64)inti->io.io_int_parm << 32) |
675 inti->io.io_int_word);
676
677 rc = put_guest_lc(vcpu, inti->io.subchannel_id,
678 (u16 *)__LC_SUBCHANNEL_ID);
679 rc |= put_guest_lc(vcpu, inti->io.subchannel_nr,
680 (u16 *)__LC_SUBCHANNEL_NR);
681 rc |= put_guest_lc(vcpu, inti->io.io_int_parm,
682 (u32 *)__LC_IO_INT_PARM);
683 rc |= put_guest_lc(vcpu, inti->io.io_int_word,
684 (u32 *)__LC_IO_INT_WORD);
685 rc |= write_guest_lc(vcpu, __LC_IO_OLD_PSW,
686 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
687 rc |= read_guest_lc(vcpu, __LC_IO_NEW_PSW,
688 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 689 return rc ? -EFAULT : 0;
8712836b
DH
690}
691
383d0b05
JF
692static int __must_check __deliver_mchk_floating(struct kvm_vcpu *vcpu,
693 struct kvm_s390_interrupt_info *inti)
694{
695 struct kvm_s390_mchk_info *mchk = &inti->mchk;
696 int rc;
697
698 VCPU_EVENT(vcpu, 4, "interrupt: machine check mcic=%llx",
699 mchk->mcic);
700 trace_kvm_s390_deliver_interrupt(vcpu->vcpu_id, KVM_S390_MCHK,
701 mchk->cr14, mchk->mcic);
702
703 rc = kvm_s390_vcpu_store_status(vcpu, KVM_S390_STORE_STATUS_PREFIXED);
704 rc |= put_guest_lc(vcpu, mchk->mcic,
705 (u64 __user *) __LC_MCCK_CODE);
706 rc |= put_guest_lc(vcpu, mchk->failing_storage_address,
707 (u64 __user *) __LC_MCCK_FAIL_STOR_ADDR);
708 rc |= write_guest_lc(vcpu, __LC_PSW_SAVE_AREA,
709 &mchk->fixed_logout, sizeof(mchk->fixed_logout));
710 rc |= write_guest_lc(vcpu, __LC_MCK_OLD_PSW,
711 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
712 rc |= read_guest_lc(vcpu, __LC_MCK_NEW_PSW,
713 &vcpu->arch.sie_block->gpsw, sizeof(psw_t));
99e20009 714 return rc ? -EFAULT : 0;
383d0b05
JF
715}
716
717typedef int (*deliver_irq_t)(struct kvm_vcpu *vcpu);
718
719static const deliver_irq_t deliver_irq_funcs[] = {
720 [IRQ_PEND_MCHK_EX] = __deliver_machine_check,
721 [IRQ_PEND_PROG] = __deliver_prog,
722 [IRQ_PEND_EXT_EMERGENCY] = __deliver_emergency_signal,
723 [IRQ_PEND_EXT_EXTERNAL] = __deliver_external_call,
724 [IRQ_PEND_EXT_CLOCK_COMP] = __deliver_ckc,
725 [IRQ_PEND_EXT_CPU_TIMER] = __deliver_cpu_timer,
726 [IRQ_PEND_RESTART] = __deliver_restart,
383d0b05
JF
727 [IRQ_PEND_SET_PREFIX] = __deliver_set_prefix,
728 [IRQ_PEND_PFAULT_INIT] = __deliver_pfault_init,
729};
730
731static int __must_check __deliver_floating_interrupt(struct kvm_vcpu *vcpu,
732 struct kvm_s390_interrupt_info *inti)
ba5c1e9b 733{
0fb97abe 734 int rc;
ba5c1e9b
CO
735
736 switch (inti->type) {
ba5c1e9b 737 case KVM_S390_INT_SERVICE:
0fb97abe 738 rc = __deliver_service(vcpu, inti);
ba5c1e9b 739 break;
3c038e6b 740 case KVM_S390_INT_PFAULT_DONE:
0fb97abe 741 rc = __deliver_pfault_done(vcpu, inti);
3c038e6b 742 break;
ba5c1e9b 743 case KVM_S390_INT_VIRTIO:
0fb97abe 744 rc = __deliver_virtio(vcpu, inti);
ba5c1e9b 745 break;
48a3e950 746 case KVM_S390_MCHK:
383d0b05 747 rc = __deliver_mchk_floating(vcpu, inti);
48a3e950 748 break;
d8346b7d 749 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
0fb97abe 750 rc = __deliver_io(vcpu, inti);
d8346b7d 751 break;
ba5c1e9b
CO
752 default:
753 BUG();
754 }
79395031
JF
755
756 return rc;
ba5c1e9b
CO
757}
758
ea5f4969
DH
759/* Check whether an external call is pending (deliverable or not) */
760int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu)
4953919f 761{
ea5f4969
DH
762 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
763 uint8_t sigp_ctrl = vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
4953919f 764
ea5f4969
DH
765 if (!sclp_has_sigpif())
766 return test_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs);
4953919f 767
ea5f4969
DH
768 return (sigp_ctrl & SIGP_CTRL_C) &&
769 (atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_ECALL_PEND);
4953919f
DH
770}
771
9a022067 772int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop)
ba5c1e9b 773{
180c12fb
CB
774 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
775 struct kvm_s390_interrupt_info *inti;
383d0b05 776 int rc;
ba5c1e9b 777
383d0b05 778 rc = !!deliverable_local_irqs(vcpu);
ba5c1e9b
CO
779
780 if ((!rc) && atomic_read(&fi->active)) {
b037a4f3 781 spin_lock(&fi->lock);
ba5c1e9b
CO
782 list_for_each_entry(inti, &fi->list, list)
783 if (__interrupt_is_deliverable(vcpu, inti)) {
784 rc = 1;
785 break;
786 }
b037a4f3 787 spin_unlock(&fi->lock);
ba5c1e9b
CO
788 }
789
bb78c5ec
DH
790 if (!rc && kvm_cpu_has_pending_timer(vcpu))
791 rc = 1;
ba5c1e9b 792
ea5f4969
DH
793 /* external call pending and deliverable */
794 if (!rc && kvm_s390_ext_call_pending(vcpu) &&
795 !psw_extint_disabled(vcpu) &&
796 (vcpu->arch.sie_block->gcr[0] & 0x2000ul))
4953919f
DH
797 rc = 1;
798
9a022067 799 if (!rc && !exclude_stop && kvm_s390_is_stop_irq_pending(vcpu))
6cddd432
DH
800 rc = 1;
801
ba5c1e9b
CO
802 return rc;
803}
804
3d80840d
MT
805int kvm_cpu_has_pending_timer(struct kvm_vcpu *vcpu)
806{
bb78c5ec
DH
807 if (!(vcpu->arch.sie_block->ckc <
808 get_tod_clock_fast() + vcpu->arch.sie_block->epoch))
809 return 0;
810 if (!ckc_interrupts_enabled(vcpu))
811 return 0;
812 return 1;
3d80840d
MT
813}
814
ba5c1e9b
CO
815int kvm_s390_handle_wait(struct kvm_vcpu *vcpu)
816{
817 u64 now, sltime;
ba5c1e9b
CO
818
819 vcpu->stat.exit_wait_state++;
ba5c1e9b 820
0759d068
DH
821 /* fast path */
822 if (kvm_cpu_has_pending_timer(vcpu) || kvm_arch_vcpu_runnable(vcpu))
823 return 0;
e52b2af5 824
ba5c1e9b
CO
825 if (psw_interrupts_disabled(vcpu)) {
826 VCPU_EVENT(vcpu, 3, "%s", "disabled wait");
b8e660b8 827 return -EOPNOTSUPP; /* disabled wait */
ba5c1e9b
CO
828 }
829
bb78c5ec 830 if (!ckc_interrupts_enabled(vcpu)) {
ba5c1e9b 831 VCPU_EVENT(vcpu, 3, "%s", "enabled wait w/o timer");
bda343ef 832 __set_cpu_idle(vcpu);
ba5c1e9b
CO
833 goto no_timer;
834 }
835
8c071b0f 836 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
ed4f2094 837 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
bda343ef
DH
838
839 /* underflow */
840 if (vcpu->arch.sie_block->ckc < now)
841 return 0;
842
843 __set_cpu_idle(vcpu);
ca872302
CB
844 hrtimer_start(&vcpu->arch.ckc_timer, ktime_set (0, sltime) , HRTIMER_MODE_REL);
845 VCPU_EVENT(vcpu, 5, "enabled wait via clock comparator: %llx ns", sltime);
ba5c1e9b 846no_timer:
800c1065 847 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
0759d068 848 kvm_vcpu_block(vcpu);
ba5c1e9b 849 __unset_cpu_idle(vcpu);
800c1065
TH
850 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
851
2d00f759 852 hrtimer_cancel(&vcpu->arch.ckc_timer);
ba5c1e9b
CO
853 return 0;
854}
855
0e9c85a5
DH
856void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu)
857{
858 if (waitqueue_active(&vcpu->wq)) {
859 /*
860 * The vcpu gave up the cpu voluntarily, mark it as a good
861 * yield-candidate.
862 */
863 vcpu->preempted = true;
864 wake_up_interruptible(&vcpu->wq);
ce2e4f0b 865 vcpu->stat.halt_wakeup++;
0e9c85a5
DH
866 }
867}
868
ca872302
CB
869enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer)
870{
871 struct kvm_vcpu *vcpu;
2d00f759 872 u64 now, sltime;
ca872302
CB
873
874 vcpu = container_of(timer, struct kvm_vcpu, arch.ckc_timer);
2d00f759
DH
875 now = get_tod_clock_fast() + vcpu->arch.sie_block->epoch;
876 sltime = tod_to_ns(vcpu->arch.sie_block->ckc - now);
ca872302 877
2d00f759
DH
878 /*
879 * If the monotonic clock runs faster than the tod clock we might be
880 * woken up too early and have to go back to sleep to avoid deadlocks.
881 */
882 if (vcpu->arch.sie_block->ckc > now &&
883 hrtimer_forward_now(timer, ns_to_ktime(sltime)))
884 return HRTIMER_RESTART;
885 kvm_s390_vcpu_wakeup(vcpu);
ca872302
CB
886 return HRTIMER_NORESTART;
887}
ba5c1e9b 888
2ed10cc1
JF
889void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu)
890{
891 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2ed10cc1 892
4ae3c081 893 spin_lock(&li->lock);
383d0b05
JF
894 li->pending_irqs = 0;
895 bitmap_zero(li->sigp_emerg_pending, KVM_MAX_VCPUS);
896 memset(&li->irq, 0, sizeof(li->irq));
4ae3c081 897 spin_unlock(&li->lock);
4953919f
DH
898
899 /* clear pending external calls set by sigp interpretation facility */
383d0b05 900 atomic_clear_mask(CPUSTAT_ECALL_PEND, li->cpuflags);
ea5f4969 901 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl = 0;
2ed10cc1
JF
902}
903
614aeab4 904int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu)
ba5c1e9b 905{
180c12fb
CB
906 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
907 struct kvm_s390_float_interrupt *fi = vcpu->arch.local_int.float_int;
908 struct kvm_s390_interrupt_info *n, *inti = NULL;
383d0b05 909 deliver_irq_t func;
ba5c1e9b 910 int deliver;
79395031 911 int rc = 0;
383d0b05
JF
912 unsigned long irq_type;
913 unsigned long deliverable_irqs;
ba5c1e9b
CO
914
915 __reset_intercept_indicators(vcpu);
ba5c1e9b 916
383d0b05
JF
917 /* pending ckc conditions might have been invalidated */
918 clear_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
919 if (kvm_cpu_has_pending_timer(vcpu))
920 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
921
922 do {
923 deliverable_irqs = deliverable_local_irqs(vcpu);
924 /* bits are in the order of interrupt priority */
925 irq_type = find_first_bit(&deliverable_irqs, IRQ_PEND_COUNT);
926 if (irq_type == IRQ_PEND_COUNT)
927 break;
928 func = deliver_irq_funcs[irq_type];
929 if (!func) {
930 WARN_ON_ONCE(func == NULL);
931 clear_bit(irq_type, &li->pending_irqs);
932 continue;
933 }
934 rc = func(vcpu);
935 } while (!rc && irq_type != IRQ_PEND_COUNT);
936
937 set_intercept_indicators_local(vcpu);
ba5c1e9b 938
79395031 939 if (!rc && atomic_read(&fi->active)) {
ba5c1e9b
CO
940 do {
941 deliver = 0;
b037a4f3 942 spin_lock(&fi->lock);
ba5c1e9b
CO
943 list_for_each_entry_safe(inti, n, &fi->list, list) {
944 if (__interrupt_is_deliverable(vcpu, inti)) {
945 list_del(&inti->list);
a91b8ebe 946 fi->irq_count--;
ba5c1e9b
CO
947 deliver = 1;
948 break;
949 }
950 __set_intercept_indicator(vcpu, inti);
951 }
952 if (list_empty(&fi->list))
953 atomic_set(&fi->active, 0);
b037a4f3 954 spin_unlock(&fi->lock);
ba5c1e9b 955 if (deliver) {
383d0b05 956 rc = __deliver_floating_interrupt(vcpu, inti);
ba5c1e9b
CO
957 kfree(inti);
958 }
79395031 959 } while (!rc && deliver);
ba5c1e9b 960 }
79395031
JF
961
962 return rc;
ba5c1e9b
CO
963}
964
383d0b05 965static int __inject_prog(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
966{
967 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
968
383d0b05 969 li->irq.pgm = irq->u.pgm;
9185124e 970 set_bit(IRQ_PEND_PROG, &li->pending_irqs);
0146a7b0
JF
971 return 0;
972}
973
ba5c1e9b
CO
974int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
975{
180c12fb 976 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 977 struct kvm_s390_irq irq;
ba5c1e9b
CO
978
979 VCPU_EVENT(vcpu, 3, "inject: program check %d (from kernel)", code);
383d0b05
JF
980 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT, code,
981 0, 1);
4ae3c081 982 spin_lock(&li->lock);
383d0b05
JF
983 irq.u.pgm.code = code;
984 __inject_prog(vcpu, &irq);
d0321a24 985 BUG_ON(waitqueue_active(li->wq));
4ae3c081 986 spin_unlock(&li->lock);
bcd84683
JF
987 return 0;
988}
989
990int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
991 struct kvm_s390_pgm_info *pgm_info)
992{
993 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 994 struct kvm_s390_irq irq;
0146a7b0 995 int rc;
bcd84683 996
bcd84683
JF
997 VCPU_EVENT(vcpu, 3, "inject: prog irq %d (from kernel)",
998 pgm_info->code);
999 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_PROGRAM_INT,
1000 pgm_info->code, 0, 1);
4ae3c081 1001 spin_lock(&li->lock);
383d0b05
JF
1002 irq.u.pgm = *pgm_info;
1003 rc = __inject_prog(vcpu, &irq);
bcd84683 1004 BUG_ON(waitqueue_active(li->wq));
4ae3c081 1005 spin_unlock(&li->lock);
0146a7b0
JF
1006 return rc;
1007}
1008
383d0b05 1009static int __inject_pfault_init(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1010{
1011 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1012
383d0b05
JF
1013 VCPU_EVENT(vcpu, 3, "inject: external irq params:%x, params2:%llx",
1014 irq->u.ext.ext_params, irq->u.ext.ext_params2);
1015 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_PFAULT_INIT,
1016 irq->u.ext.ext_params,
1017 irq->u.ext.ext_params2, 2);
1018
1019 li->irq.ext = irq->u.ext;
1020 set_bit(IRQ_PEND_PFAULT_INIT, &li->pending_irqs);
0146a7b0
JF
1021 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1022 return 0;
1023}
1024
ea5f4969
DH
1025static int __inject_extcall_sigpif(struct kvm_vcpu *vcpu, uint16_t src_id)
1026{
1027 unsigned char new_val, old_val;
1028 uint8_t *sigp_ctrl = &vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sigp_ctrl;
1029
1030 new_val = SIGP_CTRL_C | (src_id & SIGP_CTRL_SCN_MASK);
1031 old_val = *sigp_ctrl & ~SIGP_CTRL_C;
1032 if (cmpxchg(sigp_ctrl, old_val, new_val) != old_val) {
1033 /* another external call is pending */
1034 return -EBUSY;
1035 }
1036 atomic_set_mask(CPUSTAT_ECALL_PEND, &vcpu->arch.sie_block->cpuflags);
1037 return 0;
1038}
1039
0675d92d 1040static int __inject_extcall(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1041{
1042 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1043 struct kvm_s390_extcall_info *extcall = &li->irq.extcall;
ea5f4969 1044 uint16_t src_id = irq->u.extcall.code;
0146a7b0
JF
1045
1046 VCPU_EVENT(vcpu, 3, "inject: external call source-cpu:%u",
ea5f4969 1047 src_id);
383d0b05 1048 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EXTERNAL_CALL,
ea5f4969
DH
1049 src_id, 0, 2);
1050
1051 /* sending vcpu invalid */
1052 if (src_id >= KVM_MAX_VCPUS ||
1053 kvm_get_vcpu(vcpu->kvm, src_id) == NULL)
1054 return -EINVAL;
1055
1056 if (sclp_has_sigpif())
1057 return __inject_extcall_sigpif(vcpu, src_id);
383d0b05 1058
ea5f4969
DH
1059 if (!test_and_set_bit(IRQ_PEND_EXT_EXTERNAL, &li->pending_irqs))
1060 return -EBUSY;
383d0b05 1061 *extcall = irq->u.extcall;
0146a7b0
JF
1062 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1063 return 0;
1064}
1065
383d0b05 1066static int __inject_set_prefix(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1067{
1068 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1069 struct kvm_s390_prefix_info *prefix = &li->irq.prefix;
0146a7b0
JF
1070
1071 VCPU_EVENT(vcpu, 3, "inject: set prefix to %x (from user)",
556cc0da 1072 irq->u.prefix.address);
383d0b05 1073 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_SET_PREFIX,
556cc0da 1074 irq->u.prefix.address, 0, 2);
383d0b05 1075
a3a9c59a
DH
1076 if (!is_vcpu_stopped(vcpu))
1077 return -EBUSY;
1078
383d0b05
JF
1079 *prefix = irq->u.prefix;
1080 set_bit(IRQ_PEND_SET_PREFIX, &li->pending_irqs);
0146a7b0
JF
1081 return 0;
1082}
1083
6cddd432 1084#define KVM_S390_STOP_SUPP_FLAGS (KVM_S390_STOP_FLAG_STORE_STATUS)
383d0b05 1085static int __inject_sigp_stop(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1086{
1087 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
2822545f 1088 struct kvm_s390_stop_info *stop = &li->irq.stop;
6cddd432 1089 int rc = 0;
0146a7b0 1090
383d0b05
JF
1091 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_SIGP_STOP, 0, 0, 2);
1092
2822545f
DH
1093 if (irq->u.stop.flags & ~KVM_S390_STOP_SUPP_FLAGS)
1094 return -EINVAL;
1095
6cddd432
DH
1096 if (is_vcpu_stopped(vcpu)) {
1097 if (irq->u.stop.flags & KVM_S390_STOP_FLAG_STORE_STATUS)
1098 rc = kvm_s390_store_status_unloaded(vcpu,
1099 KVM_S390_STORE_STATUS_NOADDR);
1100 return rc;
1101 }
1102
1103 if (test_and_set_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs))
1104 return -EBUSY;
2822545f 1105 stop->flags = irq->u.stop.flags;
6cddd432 1106 __set_cpuflag(vcpu, CPUSTAT_STOP_INT);
0146a7b0
JF
1107 return 0;
1108}
1109
1110static int __inject_sigp_restart(struct kvm_vcpu *vcpu,
383d0b05 1111 struct kvm_s390_irq *irq)
0146a7b0
JF
1112{
1113 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1114
383d0b05
JF
1115 VCPU_EVENT(vcpu, 3, "inject: restart type %llx", irq->type);
1116 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_RESTART, 0, 0, 2);
1117
1118 set_bit(IRQ_PEND_RESTART, &li->pending_irqs);
0146a7b0
JF
1119 return 0;
1120}
1121
1122static int __inject_sigp_emergency(struct kvm_vcpu *vcpu,
383d0b05 1123 struct kvm_s390_irq *irq)
0146a7b0
JF
1124{
1125 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1126
383d0b05
JF
1127 VCPU_EVENT(vcpu, 3, "inject: emergency %u\n",
1128 irq->u.emerg.code);
1129 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_EMERGENCY,
49538d12 1130 irq->u.emerg.code, 0, 2);
383d0b05 1131
49538d12 1132 set_bit(irq->u.emerg.code, li->sigp_emerg_pending);
383d0b05 1133 set_bit(IRQ_PEND_EXT_EMERGENCY, &li->pending_irqs);
0146a7b0
JF
1134 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1135 return 0;
1136}
1137
383d0b05 1138static int __inject_mchk(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
0146a7b0
JF
1139{
1140 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
383d0b05 1141 struct kvm_s390_mchk_info *mchk = &li->irq.mchk;
0146a7b0
JF
1142
1143 VCPU_EVENT(vcpu, 5, "inject: machine check parm64:%llx",
556cc0da 1144 irq->u.mchk.mcic);
383d0b05 1145 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_MCHK, 0,
556cc0da 1146 irq->u.mchk.mcic, 2);
383d0b05
JF
1147
1148 /*
fc2020cf
JF
1149 * Because repressible machine checks can be indicated along with
1150 * exigent machine checks (PoP, Chapter 11, Interruption action)
1151 * we need to combine cr14, mcic and external damage code.
1152 * Failing storage address and the logout area should not be or'ed
1153 * together, we just indicate the last occurrence of the corresponding
1154 * machine check
383d0b05 1155 */
fc2020cf 1156 mchk->cr14 |= irq->u.mchk.cr14;
383d0b05 1157 mchk->mcic |= irq->u.mchk.mcic;
fc2020cf
JF
1158 mchk->ext_damage_code |= irq->u.mchk.ext_damage_code;
1159 mchk->failing_storage_address = irq->u.mchk.failing_storage_address;
1160 memcpy(&mchk->fixed_logout, &irq->u.mchk.fixed_logout,
1161 sizeof(mchk->fixed_logout));
383d0b05
JF
1162 if (mchk->mcic & MCHK_EX_MASK)
1163 set_bit(IRQ_PEND_MCHK_EX, &li->pending_irqs);
1164 else if (mchk->mcic & MCHK_REP_MASK)
1165 set_bit(IRQ_PEND_MCHK_REP, &li->pending_irqs);
0146a7b0
JF
1166 return 0;
1167}
1168
383d0b05 1169static int __inject_ckc(struct kvm_vcpu *vcpu)
0146a7b0
JF
1170{
1171 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1172
383d0b05
JF
1173 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP);
1174 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CLOCK_COMP,
1175 0, 0, 2);
1176
1177 set_bit(IRQ_PEND_EXT_CLOCK_COMP, &li->pending_irqs);
0146a7b0
JF
1178 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1179 return 0;
1180}
1181
383d0b05 1182static int __inject_cpu_timer(struct kvm_vcpu *vcpu)
0146a7b0
JF
1183{
1184 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1185
383d0b05
JF
1186 VCPU_EVENT(vcpu, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER);
1187 trace_kvm_s390_inject_vcpu(vcpu->vcpu_id, KVM_S390_INT_CPU_TIMER,
1188 0, 0, 2);
1189
1190 set_bit(IRQ_PEND_EXT_CPU_TIMER, &li->pending_irqs);
0146a7b0 1191 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
ba5c1e9b
CO
1192 return 0;
1193}
1194
383d0b05 1195
fa6b7fe9
CH
1196struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
1197 u64 cr6, u64 schid)
1198{
1199 struct kvm_s390_float_interrupt *fi;
1200 struct kvm_s390_interrupt_info *inti, *iter;
1201
1202 if ((!schid && !cr6) || (schid && cr6))
1203 return NULL;
fa6b7fe9
CH
1204 fi = &kvm->arch.float_int;
1205 spin_lock(&fi->lock);
1206 inti = NULL;
1207 list_for_each_entry(iter, &fi->list, list) {
1208 if (!is_ioint(iter->type))
1209 continue;
79fd50c6
CH
1210 if (cr6 &&
1211 ((cr6 & int_word_to_isc_bits(iter->io.io_int_word)) == 0))
fa6b7fe9
CH
1212 continue;
1213 if (schid) {
1214 if (((schid & 0x00000000ffff0000) >> 16) !=
1215 iter->io.subchannel_id)
1216 continue;
1217 if ((schid & 0x000000000000ffff) !=
1218 iter->io.subchannel_nr)
1219 continue;
1220 }
1221 inti = iter;
1222 break;
1223 }
a91b8ebe 1224 if (inti) {
fa6b7fe9 1225 list_del_init(&inti->list);
a91b8ebe
JF
1226 fi->irq_count--;
1227 }
fa6b7fe9
CH
1228 if (list_empty(&fi->list))
1229 atomic_set(&fi->active, 0);
1230 spin_unlock(&fi->lock);
fa6b7fe9
CH
1231 return inti;
1232}
ba5c1e9b 1233
a91b8ebe 1234static int __inject_vm(struct kvm *kvm, struct kvm_s390_interrupt_info *inti)
ba5c1e9b 1235{
180c12fb
CB
1236 struct kvm_s390_local_interrupt *li;
1237 struct kvm_s390_float_interrupt *fi;
c05c4186 1238 struct kvm_s390_interrupt_info *iter;
1ee0bc55 1239 struct kvm_vcpu *dst_vcpu = NULL;
ba5c1e9b 1240 int sigcpu;
a91b8ebe 1241 int rc = 0;
ba5c1e9b 1242
c05c4186
JF
1243 fi = &kvm->arch.float_int;
1244 spin_lock(&fi->lock);
a91b8ebe
JF
1245 if (fi->irq_count >= KVM_S390_MAX_FLOAT_IRQS) {
1246 rc = -EINVAL;
1247 goto unlock_fi;
1248 }
1249 fi->irq_count++;
c05c4186
JF
1250 if (!is_ioint(inti->type)) {
1251 list_add_tail(&inti->list, &fi->list);
1252 } else {
1253 u64 isc_bits = int_word_to_isc_bits(inti->io.io_int_word);
1254
1255 /* Keep I/O interrupts sorted in isc order. */
1256 list_for_each_entry(iter, &fi->list, list) {
1257 if (!is_ioint(iter->type))
1258 continue;
1259 if (int_word_to_isc_bits(iter->io.io_int_word)
1260 <= isc_bits)
1261 continue;
1262 break;
1263 }
1264 list_add_tail(&inti->list, &iter->list);
1265 }
1266 atomic_set(&fi->active, 1);
8e2207cd
DH
1267 if (atomic_read(&kvm->online_vcpus) == 0)
1268 goto unlock_fi;
c05c4186
JF
1269 sigcpu = find_first_bit(fi->idle_mask, KVM_MAX_VCPUS);
1270 if (sigcpu == KVM_MAX_VCPUS) {
1271 do {
1272 sigcpu = fi->next_rr_cpu++;
1273 if (sigcpu == KVM_MAX_VCPUS)
1274 sigcpu = fi->next_rr_cpu = 0;
1ee0bc55 1275 } while (kvm_get_vcpu(kvm, sigcpu) == NULL);
c05c4186 1276 }
1ee0bc55
JF
1277 dst_vcpu = kvm_get_vcpu(kvm, sigcpu);
1278 li = &dst_vcpu->arch.local_int;
4ae3c081 1279 spin_lock(&li->lock);
da00fcbd
CB
1280 switch (inti->type) {
1281 case KVM_S390_MCHK:
1282 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
1283 break;
1284 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1285 atomic_set_mask(CPUSTAT_IO_INT, li->cpuflags);
1286 break;
1287 default:
1288 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
1289 break;
1290 }
4ae3c081 1291 spin_unlock(&li->lock);
0e9c85a5 1292 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm, sigcpu));
a91b8ebe 1293unlock_fi:
c05c4186 1294 spin_unlock(&fi->lock);
a91b8ebe 1295 return rc;
c05c4186
JF
1296}
1297
1298int kvm_s390_inject_vm(struct kvm *kvm,
1299 struct kvm_s390_interrupt *s390int)
1300{
1301 struct kvm_s390_interrupt_info *inti;
428d53be 1302 int rc;
c05c4186 1303
ba5c1e9b
CO
1304 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1305 if (!inti)
1306 return -ENOMEM;
1307
c05c4186
JF
1308 inti->type = s390int->type;
1309 switch (inti->type) {
ba5c1e9b 1310 case KVM_S390_INT_VIRTIO:
33e19115 1311 VM_EVENT(kvm, 5, "inject: virtio parm:%x,parm64:%llx",
ba5c1e9b 1312 s390int->parm, s390int->parm64);
ba5c1e9b
CO
1313 inti->ext.ext_params = s390int->parm;
1314 inti->ext.ext_params2 = s390int->parm64;
1315 break;
1316 case KVM_S390_INT_SERVICE:
1317 VM_EVENT(kvm, 5, "inject: sclp parm:%x", s390int->parm);
ba5c1e9b
CO
1318 inti->ext.ext_params = s390int->parm;
1319 break;
3c038e6b 1320 case KVM_S390_INT_PFAULT_DONE:
3c038e6b
DD
1321 inti->ext.ext_params2 = s390int->parm64;
1322 break;
48a3e950
CH
1323 case KVM_S390_MCHK:
1324 VM_EVENT(kvm, 5, "inject: machine check parm64:%llx",
1325 s390int->parm64);
48a3e950
CH
1326 inti->mchk.cr14 = s390int->parm; /* upper bits are not used */
1327 inti->mchk.mcic = s390int->parm64;
1328 break;
d8346b7d 1329 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
c05c4186 1330 if (inti->type & IOINT_AI_MASK)
d8346b7d
CH
1331 VM_EVENT(kvm, 5, "%s", "inject: I/O (AI)");
1332 else
1333 VM_EVENT(kvm, 5, "inject: I/O css %x ss %x schid %04x",
1334 s390int->type & IOINT_CSSID_MASK,
1335 s390int->type & IOINT_SSID_MASK,
1336 s390int->type & IOINT_SCHID_MASK);
d8346b7d
CH
1337 inti->io.subchannel_id = s390int->parm >> 16;
1338 inti->io.subchannel_nr = s390int->parm & 0x0000ffffu;
1339 inti->io.io_int_parm = s390int->parm64 >> 32;
1340 inti->io.io_int_word = s390int->parm64 & 0x00000000ffffffffull;
1341 break;
ba5c1e9b
CO
1342 default:
1343 kfree(inti);
1344 return -EINVAL;
1345 }
ade38c31
CH
1346 trace_kvm_s390_inject_vm(s390int->type, s390int->parm, s390int->parm64,
1347 2);
ba5c1e9b 1348
428d53be
DH
1349 rc = __inject_vm(kvm, inti);
1350 if (rc)
1351 kfree(inti);
1352 return rc;
ba5c1e9b
CO
1353}
1354
15462e37 1355int kvm_s390_reinject_io_int(struct kvm *kvm,
2f32d4ea
CH
1356 struct kvm_s390_interrupt_info *inti)
1357{
15462e37 1358 return __inject_vm(kvm, inti);
2f32d4ea
CH
1359}
1360
383d0b05
JF
1361int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
1362 struct kvm_s390_irq *irq)
1363{
1364 irq->type = s390int->type;
1365 switch (irq->type) {
1366 case KVM_S390_PROGRAM_INT:
1367 if (s390int->parm & 0xffff0000)
1368 return -EINVAL;
1369 irq->u.pgm.code = s390int->parm;
1370 break;
1371 case KVM_S390_SIGP_SET_PREFIX:
1372 irq->u.prefix.address = s390int->parm;
1373 break;
2822545f
DH
1374 case KVM_S390_SIGP_STOP:
1375 irq->u.stop.flags = s390int->parm;
1376 break;
383d0b05 1377 case KVM_S390_INT_EXTERNAL_CALL:
94d1f564 1378 if (s390int->parm & 0xffff0000)
383d0b05
JF
1379 return -EINVAL;
1380 irq->u.extcall.code = s390int->parm;
1381 break;
1382 case KVM_S390_INT_EMERGENCY:
94d1f564 1383 if (s390int->parm & 0xffff0000)
383d0b05
JF
1384 return -EINVAL;
1385 irq->u.emerg.code = s390int->parm;
1386 break;
1387 case KVM_S390_MCHK:
1388 irq->u.mchk.mcic = s390int->parm64;
1389 break;
1390 }
1391 return 0;
1392}
1393
6cddd432
DH
1394int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu)
1395{
1396 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1397
1398 return test_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1399}
1400
1401void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu)
1402{
1403 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
1404
1405 spin_lock(&li->lock);
1406 li->irq.stop.flags = 0;
1407 clear_bit(IRQ_PEND_SIGP_STOP, &li->pending_irqs);
1408 spin_unlock(&li->lock);
1409}
1410
383d0b05 1411int kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu, struct kvm_s390_irq *irq)
ba5c1e9b 1412{
0146a7b0 1413 struct kvm_s390_local_interrupt *li = &vcpu->arch.local_int;
0146a7b0 1414 int rc;
ba5c1e9b 1415
0146a7b0 1416 spin_lock(&li->lock);
383d0b05 1417 switch (irq->type) {
ba5c1e9b 1418 case KVM_S390_PROGRAM_INT:
ba5c1e9b 1419 VCPU_EVENT(vcpu, 3, "inject: program check %d (from user)",
383d0b05
JF
1420 irq->u.pgm.code);
1421 rc = __inject_prog(vcpu, irq);
ba5c1e9b 1422 break;
b7e6e4d3 1423 case KVM_S390_SIGP_SET_PREFIX:
383d0b05 1424 rc = __inject_set_prefix(vcpu, irq);
b7e6e4d3 1425 break;
ba5c1e9b 1426 case KVM_S390_SIGP_STOP:
383d0b05 1427 rc = __inject_sigp_stop(vcpu, irq);
0146a7b0 1428 break;
ba5c1e9b 1429 case KVM_S390_RESTART:
383d0b05 1430 rc = __inject_sigp_restart(vcpu, irq);
0146a7b0 1431 break;
e029ae5b 1432 case KVM_S390_INT_CLOCK_COMP:
383d0b05 1433 rc = __inject_ckc(vcpu);
0146a7b0 1434 break;
e029ae5b 1435 case KVM_S390_INT_CPU_TIMER:
383d0b05 1436 rc = __inject_cpu_timer(vcpu);
82a12737 1437 break;
7697e71f 1438 case KVM_S390_INT_EXTERNAL_CALL:
383d0b05 1439 rc = __inject_extcall(vcpu, irq);
82a12737 1440 break;
ba5c1e9b 1441 case KVM_S390_INT_EMERGENCY:
383d0b05 1442 rc = __inject_sigp_emergency(vcpu, irq);
ba5c1e9b 1443 break;
48a3e950 1444 case KVM_S390_MCHK:
383d0b05 1445 rc = __inject_mchk(vcpu, irq);
48a3e950 1446 break;
3c038e6b 1447 case KVM_S390_INT_PFAULT_INIT:
383d0b05 1448 rc = __inject_pfault_init(vcpu, irq);
3c038e6b 1449 break;
ba5c1e9b
CO
1450 case KVM_S390_INT_VIRTIO:
1451 case KVM_S390_INT_SERVICE:
d8346b7d 1452 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
ba5c1e9b 1453 default:
0146a7b0 1454 rc = -EINVAL;
ba5c1e9b 1455 }
4ae3c081 1456 spin_unlock(&li->lock);
0146a7b0
JF
1457 if (!rc)
1458 kvm_s390_vcpu_wakeup(vcpu);
0146a7b0 1459 return rc;
ba5c1e9b 1460}
c05c4186 1461
67335e63 1462void kvm_s390_clear_float_irqs(struct kvm *kvm)
c05c4186
JF
1463{
1464 struct kvm_s390_float_interrupt *fi;
1465 struct kvm_s390_interrupt_info *n, *inti = NULL;
1466
c05c4186
JF
1467 fi = &kvm->arch.float_int;
1468 spin_lock(&fi->lock);
1469 list_for_each_entry_safe(inti, n, &fi->list, list) {
1470 list_del(&inti->list);
1471 kfree(inti);
1472 }
a91b8ebe 1473 fi->irq_count = 0;
c05c4186
JF
1474 atomic_set(&fi->active, 0);
1475 spin_unlock(&fi->lock);
c05c4186
JF
1476}
1477
1478static inline int copy_irq_to_user(struct kvm_s390_interrupt_info *inti,
1479 u8 *addr)
1480{
1481 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1482 struct kvm_s390_irq irq = {0};
1483
1484 irq.type = inti->type;
1485 switch (inti->type) {
3c038e6b
DD
1486 case KVM_S390_INT_PFAULT_INIT:
1487 case KVM_S390_INT_PFAULT_DONE:
c05c4186
JF
1488 case KVM_S390_INT_VIRTIO:
1489 case KVM_S390_INT_SERVICE:
1490 irq.u.ext = inti->ext;
1491 break;
1492 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1493 irq.u.io = inti->io;
1494 break;
1495 case KVM_S390_MCHK:
1496 irq.u.mchk = inti->mchk;
1497 break;
1498 default:
1499 return -EINVAL;
1500 }
1501
1502 if (copy_to_user(uptr, &irq, sizeof(irq)))
1503 return -EFAULT;
1504
1505 return 0;
1506}
1507
1508static int get_all_floating_irqs(struct kvm *kvm, __u8 *buf, __u64 len)
1509{
1510 struct kvm_s390_interrupt_info *inti;
1511 struct kvm_s390_float_interrupt *fi;
1512 int ret = 0;
1513 int n = 0;
1514
c05c4186
JF
1515 fi = &kvm->arch.float_int;
1516 spin_lock(&fi->lock);
1517
1518 list_for_each_entry(inti, &fi->list, list) {
1519 if (len < sizeof(struct kvm_s390_irq)) {
1520 /* signal userspace to try again */
1521 ret = -ENOMEM;
1522 break;
1523 }
1524 ret = copy_irq_to_user(inti, buf);
1525 if (ret)
1526 break;
1527 buf += sizeof(struct kvm_s390_irq);
1528 len -= sizeof(struct kvm_s390_irq);
1529 n++;
1530 }
1531
1532 spin_unlock(&fi->lock);
c05c4186
JF
1533
1534 return ret < 0 ? ret : n;
1535}
1536
1537static int flic_get_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1538{
1539 int r;
1540
1541 switch (attr->group) {
1542 case KVM_DEV_FLIC_GET_ALL_IRQS:
1543 r = get_all_floating_irqs(dev->kvm, (u8 *) attr->addr,
1544 attr->attr);
1545 break;
1546 default:
1547 r = -EINVAL;
1548 }
1549
1550 return r;
1551}
1552
1553static inline int copy_irq_from_user(struct kvm_s390_interrupt_info *inti,
1554 u64 addr)
1555{
1556 struct kvm_s390_irq __user *uptr = (struct kvm_s390_irq __user *) addr;
1557 void *target = NULL;
1558 void __user *source;
1559 u64 size;
1560
1561 if (get_user(inti->type, (u64 __user *)addr))
1562 return -EFAULT;
1563
1564 switch (inti->type) {
3c038e6b
DD
1565 case KVM_S390_INT_PFAULT_INIT:
1566 case KVM_S390_INT_PFAULT_DONE:
c05c4186
JF
1567 case KVM_S390_INT_VIRTIO:
1568 case KVM_S390_INT_SERVICE:
1569 target = (void *) &inti->ext;
1570 source = &uptr->u.ext;
1571 size = sizeof(inti->ext);
1572 break;
1573 case KVM_S390_INT_IO_MIN...KVM_S390_INT_IO_MAX:
1574 target = (void *) &inti->io;
1575 source = &uptr->u.io;
1576 size = sizeof(inti->io);
1577 break;
1578 case KVM_S390_MCHK:
1579 target = (void *) &inti->mchk;
1580 source = &uptr->u.mchk;
1581 size = sizeof(inti->mchk);
1582 break;
1583 default:
1584 return -EINVAL;
1585 }
1586
1587 if (copy_from_user(target, source, size))
1588 return -EFAULT;
1589
1590 return 0;
1591}
1592
1593static int enqueue_floating_irq(struct kvm_device *dev,
1594 struct kvm_device_attr *attr)
1595{
1596 struct kvm_s390_interrupt_info *inti = NULL;
1597 int r = 0;
1598 int len = attr->attr;
1599
1600 if (len % sizeof(struct kvm_s390_irq) != 0)
1601 return -EINVAL;
1602 else if (len > KVM_S390_FLIC_MAX_BUFFER)
1603 return -EINVAL;
1604
1605 while (len >= sizeof(struct kvm_s390_irq)) {
1606 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
1607 if (!inti)
1608 return -ENOMEM;
1609
1610 r = copy_irq_from_user(inti, attr->addr);
1611 if (r) {
1612 kfree(inti);
1613 return r;
1614 }
a91b8ebe
JF
1615 r = __inject_vm(dev->kvm, inti);
1616 if (r) {
1617 kfree(inti);
1618 return r;
1619 }
c05c4186
JF
1620 len -= sizeof(struct kvm_s390_irq);
1621 attr->addr += sizeof(struct kvm_s390_irq);
1622 }
1623
1624 return r;
1625}
1626
841b91c5
CH
1627static struct s390_io_adapter *get_io_adapter(struct kvm *kvm, unsigned int id)
1628{
1629 if (id >= MAX_S390_IO_ADAPTERS)
1630 return NULL;
1631 return kvm->arch.adapters[id];
1632}
1633
1634static int register_io_adapter(struct kvm_device *dev,
1635 struct kvm_device_attr *attr)
1636{
1637 struct s390_io_adapter *adapter;
1638 struct kvm_s390_io_adapter adapter_info;
1639
1640 if (copy_from_user(&adapter_info,
1641 (void __user *)attr->addr, sizeof(adapter_info)))
1642 return -EFAULT;
1643
1644 if ((adapter_info.id >= MAX_S390_IO_ADAPTERS) ||
1645 (dev->kvm->arch.adapters[adapter_info.id] != NULL))
1646 return -EINVAL;
1647
1648 adapter = kzalloc(sizeof(*adapter), GFP_KERNEL);
1649 if (!adapter)
1650 return -ENOMEM;
1651
1652 INIT_LIST_HEAD(&adapter->maps);
1653 init_rwsem(&adapter->maps_lock);
1654 atomic_set(&adapter->nr_maps, 0);
1655 adapter->id = adapter_info.id;
1656 adapter->isc = adapter_info.isc;
1657 adapter->maskable = adapter_info.maskable;
1658 adapter->masked = false;
1659 adapter->swap = adapter_info.swap;
1660 dev->kvm->arch.adapters[adapter->id] = adapter;
1661
1662 return 0;
1663}
1664
1665int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked)
1666{
1667 int ret;
1668 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1669
1670 if (!adapter || !adapter->maskable)
1671 return -EINVAL;
1672 ret = adapter->masked;
1673 adapter->masked = masked;
1674 return ret;
1675}
1676
1677static int kvm_s390_adapter_map(struct kvm *kvm, unsigned int id, __u64 addr)
1678{
1679 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1680 struct s390_map_info *map;
1681 int ret;
1682
1683 if (!adapter || !addr)
1684 return -EINVAL;
1685
1686 map = kzalloc(sizeof(*map), GFP_KERNEL);
1687 if (!map) {
1688 ret = -ENOMEM;
1689 goto out;
1690 }
1691 INIT_LIST_HEAD(&map->list);
1692 map->guest_addr = addr;
6e0a0431 1693 map->addr = gmap_translate(kvm->arch.gmap, addr);
841b91c5
CH
1694 if (map->addr == -EFAULT) {
1695 ret = -EFAULT;
1696 goto out;
1697 }
1698 ret = get_user_pages_fast(map->addr, 1, 1, &map->page);
1699 if (ret < 0)
1700 goto out;
1701 BUG_ON(ret != 1);
1702 down_write(&adapter->maps_lock);
1703 if (atomic_inc_return(&adapter->nr_maps) < MAX_S390_ADAPTER_MAPS) {
1704 list_add_tail(&map->list, &adapter->maps);
1705 ret = 0;
1706 } else {
1707 put_page(map->page);
1708 ret = -EINVAL;
1709 }
1710 up_write(&adapter->maps_lock);
1711out:
1712 if (ret)
1713 kfree(map);
1714 return ret;
1715}
1716
1717static int kvm_s390_adapter_unmap(struct kvm *kvm, unsigned int id, __u64 addr)
1718{
1719 struct s390_io_adapter *adapter = get_io_adapter(kvm, id);
1720 struct s390_map_info *map, *tmp;
1721 int found = 0;
1722
1723 if (!adapter || !addr)
1724 return -EINVAL;
1725
1726 down_write(&adapter->maps_lock);
1727 list_for_each_entry_safe(map, tmp, &adapter->maps, list) {
1728 if (map->guest_addr == addr) {
1729 found = 1;
1730 atomic_dec(&adapter->nr_maps);
1731 list_del(&map->list);
1732 put_page(map->page);
1733 kfree(map);
1734 break;
1735 }
1736 }
1737 up_write(&adapter->maps_lock);
1738
1739 return found ? 0 : -EINVAL;
1740}
1741
1742void kvm_s390_destroy_adapters(struct kvm *kvm)
1743{
1744 int i;
1745 struct s390_map_info *map, *tmp;
1746
1747 for (i = 0; i < MAX_S390_IO_ADAPTERS; i++) {
1748 if (!kvm->arch.adapters[i])
1749 continue;
1750 list_for_each_entry_safe(map, tmp,
1751 &kvm->arch.adapters[i]->maps, list) {
1752 list_del(&map->list);
1753 put_page(map->page);
1754 kfree(map);
1755 }
1756 kfree(kvm->arch.adapters[i]);
1757 }
1758}
1759
1760static int modify_io_adapter(struct kvm_device *dev,
1761 struct kvm_device_attr *attr)
1762{
1763 struct kvm_s390_io_adapter_req req;
1764 struct s390_io_adapter *adapter;
1765 int ret;
1766
1767 if (copy_from_user(&req, (void __user *)attr->addr, sizeof(req)))
1768 return -EFAULT;
1769
1770 adapter = get_io_adapter(dev->kvm, req.id);
1771 if (!adapter)
1772 return -EINVAL;
1773 switch (req.type) {
1774 case KVM_S390_IO_ADAPTER_MASK:
1775 ret = kvm_s390_mask_adapter(dev->kvm, req.id, req.mask);
1776 if (ret > 0)
1777 ret = 0;
1778 break;
1779 case KVM_S390_IO_ADAPTER_MAP:
1780 ret = kvm_s390_adapter_map(dev->kvm, req.id, req.addr);
1781 break;
1782 case KVM_S390_IO_ADAPTER_UNMAP:
1783 ret = kvm_s390_adapter_unmap(dev->kvm, req.id, req.addr);
1784 break;
1785 default:
1786 ret = -EINVAL;
1787 }
1788
1789 return ret;
1790}
1791
c05c4186
JF
1792static int flic_set_attr(struct kvm_device *dev, struct kvm_device_attr *attr)
1793{
1794 int r = 0;
3c038e6b
DD
1795 unsigned int i;
1796 struct kvm_vcpu *vcpu;
c05c4186
JF
1797
1798 switch (attr->group) {
1799 case KVM_DEV_FLIC_ENQUEUE:
1800 r = enqueue_floating_irq(dev, attr);
1801 break;
1802 case KVM_DEV_FLIC_CLEAR_IRQS:
67335e63 1803 kvm_s390_clear_float_irqs(dev->kvm);
c05c4186 1804 break;
3c038e6b
DD
1805 case KVM_DEV_FLIC_APF_ENABLE:
1806 dev->kvm->arch.gmap->pfault_enabled = 1;
1807 break;
1808 case KVM_DEV_FLIC_APF_DISABLE_WAIT:
1809 dev->kvm->arch.gmap->pfault_enabled = 0;
1810 /*
1811 * Make sure no async faults are in transition when
1812 * clearing the queues. So we don't need to worry
1813 * about late coming workers.
1814 */
1815 synchronize_srcu(&dev->kvm->srcu);
1816 kvm_for_each_vcpu(i, vcpu, dev->kvm)
1817 kvm_clear_async_pf_completion_queue(vcpu);
1818 break;
841b91c5
CH
1819 case KVM_DEV_FLIC_ADAPTER_REGISTER:
1820 r = register_io_adapter(dev, attr);
1821 break;
1822 case KVM_DEV_FLIC_ADAPTER_MODIFY:
1823 r = modify_io_adapter(dev, attr);
1824 break;
c05c4186
JF
1825 default:
1826 r = -EINVAL;
1827 }
1828
1829 return r;
1830}
1831
1832static int flic_create(struct kvm_device *dev, u32 type)
1833{
1834 if (!dev)
1835 return -EINVAL;
1836 if (dev->kvm->arch.flic)
1837 return -EINVAL;
1838 dev->kvm->arch.flic = dev;
1839 return 0;
1840}
1841
1842static void flic_destroy(struct kvm_device *dev)
1843{
1844 dev->kvm->arch.flic = NULL;
1845 kfree(dev);
1846}
1847
1848/* s390 floating irq controller (flic) */
1849struct kvm_device_ops kvm_flic_ops = {
1850 .name = "kvm-flic",
1851 .get_attr = flic_get_attr,
1852 .set_attr = flic_set_attr,
1853 .create = flic_create,
1854 .destroy = flic_destroy,
1855};
84223598
CH
1856
1857static unsigned long get_ind_bit(__u64 addr, unsigned long bit_nr, bool swap)
1858{
1859 unsigned long bit;
1860
1861 bit = bit_nr + (addr % PAGE_SIZE) * 8;
1862
1863 return swap ? (bit ^ (BITS_PER_LONG - 1)) : bit;
1864}
1865
1866static struct s390_map_info *get_map_info(struct s390_io_adapter *adapter,
1867 u64 addr)
1868{
1869 struct s390_map_info *map;
1870
1871 if (!adapter)
1872 return NULL;
1873
1874 list_for_each_entry(map, &adapter->maps, list) {
1875 if (map->guest_addr == addr)
1876 return map;
1877 }
1878 return NULL;
1879}
1880
1881static int adapter_indicators_set(struct kvm *kvm,
1882 struct s390_io_adapter *adapter,
1883 struct kvm_s390_adapter_int *adapter_int)
1884{
1885 unsigned long bit;
1886 int summary_set, idx;
1887 struct s390_map_info *info;
1888 void *map;
1889
1890 info = get_map_info(adapter, adapter_int->ind_addr);
1891 if (!info)
1892 return -1;
1893 map = page_address(info->page);
1894 bit = get_ind_bit(info->addr, adapter_int->ind_offset, adapter->swap);
1895 set_bit(bit, map);
1896 idx = srcu_read_lock(&kvm->srcu);
1897 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1898 set_page_dirty_lock(info->page);
1899 info = get_map_info(adapter, adapter_int->summary_addr);
1900 if (!info) {
1901 srcu_read_unlock(&kvm->srcu, idx);
1902 return -1;
1903 }
1904 map = page_address(info->page);
1905 bit = get_ind_bit(info->addr, adapter_int->summary_offset,
1906 adapter->swap);
1907 summary_set = test_and_set_bit(bit, map);
1908 mark_page_dirty(kvm, info->guest_addr >> PAGE_SHIFT);
1909 set_page_dirty_lock(info->page);
1910 srcu_read_unlock(&kvm->srcu, idx);
1911 return summary_set ? 0 : 1;
1912}
1913
1914/*
1915 * < 0 - not injected due to error
1916 * = 0 - coalesced, summary indicator already active
1917 * > 0 - injected interrupt
1918 */
1919static int set_adapter_int(struct kvm_kernel_irq_routing_entry *e,
1920 struct kvm *kvm, int irq_source_id, int level,
1921 bool line_status)
1922{
1923 int ret;
1924 struct s390_io_adapter *adapter;
1925
1926 /* We're only interested in the 0->1 transition. */
1927 if (!level)
1928 return 0;
1929 adapter = get_io_adapter(kvm, e->adapter.adapter_id);
1930 if (!adapter)
1931 return -1;
1932 down_read(&adapter->maps_lock);
1933 ret = adapter_indicators_set(kvm, adapter, &e->adapter);
1934 up_read(&adapter->maps_lock);
1935 if ((ret > 0) && !adapter->masked) {
1936 struct kvm_s390_interrupt s390int = {
1937 .type = KVM_S390_INT_IO(1, 0, 0, 0),
1938 .parm = 0,
1939 .parm64 = (adapter->isc << 27) | 0x80000000,
1940 };
1941 ret = kvm_s390_inject_vm(kvm, &s390int);
1942 if (ret == 0)
1943 ret = 1;
1944 }
1945 return ret;
1946}
1947
8ba918d4 1948int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry *e,
84223598
CH
1949 const struct kvm_irq_routing_entry *ue)
1950{
1951 int ret;
1952
1953 switch (ue->type) {
1954 case KVM_IRQ_ROUTING_S390_ADAPTER:
1955 e->set = set_adapter_int;
1956 e->adapter.summary_addr = ue->u.adapter.summary_addr;
1957 e->adapter.ind_addr = ue->u.adapter.ind_addr;
1958 e->adapter.summary_offset = ue->u.adapter.summary_offset;
1959 e->adapter.ind_offset = ue->u.adapter.ind_offset;
1960 e->adapter.adapter_id = ue->u.adapter.adapter_id;
1961 ret = 0;
1962 break;
1963 default:
1964 ret = -EINVAL;
1965 }
1966
1967 return ret;
1968}
1969
1970int kvm_set_msi(struct kvm_kernel_irq_routing_entry *e, struct kvm *kvm,
1971 int irq_source_id, int level, bool line_status)
1972{
1973 return -EINVAL;
1974}