2 * handling kvm guest interrupts
4 * Copyright IBM Corp. 2008,2014
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
13 #include <linux/interrupt.h>
14 #include <linux/kvm_host.h>
15 #include <linux/hrtimer.h>
16 #include <linux/mmu_context.h>
17 #include <linux/signal.h>
18 #include <linux/slab.h>
19 #include <linux/bitmap.h>
20 #include <asm/asm-offsets.h>
21 #include <asm/uaccess.h>
24 #include "trace-s390.h"
26 #define IOINT_SCHID_MASK 0x0000ffff
27 #define IOINT_SSID_MASK 0x00030000
28 #define IOINT_CSSID_MASK 0x03fc0000
29 #define IOINT_AI_MASK 0x04000000
30 #define PFAULT_INIT 0x0600
31 #define PFAULT_DONE 0x0680
32 #define VIRTIO_PARAM 0x0d00
34 static int is_ioint(u64 type
)
36 return ((type
& 0xfffe0000u
) != 0xfffe0000u
);
39 int psw_extint_disabled(struct kvm_vcpu
*vcpu
)
41 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
);
44 static int psw_ioint_disabled(struct kvm_vcpu
*vcpu
)
46 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
);
49 static int psw_mchk_disabled(struct kvm_vcpu
*vcpu
)
51 return !(vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_MCHECK
);
54 static int psw_interrupts_disabled(struct kvm_vcpu
*vcpu
)
56 if ((vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PER
) ||
57 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_IO
) ||
58 (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_EXT
))
63 static int ckc_interrupts_enabled(struct kvm_vcpu
*vcpu
)
65 if (psw_extint_disabled(vcpu
) ||
66 !(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
68 if (guestdbg_enabled(vcpu
) && guestdbg_sstep_enabled(vcpu
))
69 /* No timer interrupts when single stepping */
74 static u64
int_word_to_isc_bits(u32 int_word
)
76 u8 isc
= (int_word
& 0x38000000) >> 27;
78 return (0x80 >> isc
) << 24;
81 static int __must_check
__interrupt_is_deliverable(struct kvm_vcpu
*vcpu
,
82 struct kvm_s390_interrupt_info
*inti
)
85 case KVM_S390_INT_EXTERNAL_CALL
:
86 if (psw_extint_disabled(vcpu
))
88 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
)
91 case KVM_S390_INT_EMERGENCY
:
92 if (psw_extint_disabled(vcpu
))
94 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
)
97 case KVM_S390_INT_CLOCK_COMP
:
98 return ckc_interrupts_enabled(vcpu
);
99 case KVM_S390_INT_CPU_TIMER
:
100 if (psw_extint_disabled(vcpu
))
102 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
)
105 case KVM_S390_INT_SERVICE
:
106 case KVM_S390_INT_PFAULT_INIT
:
107 case KVM_S390_INT_PFAULT_DONE
:
108 case KVM_S390_INT_VIRTIO
:
109 if (psw_extint_disabled(vcpu
))
111 if (vcpu
->arch
.sie_block
->gcr
[0] & 0x200ul
)
114 case KVM_S390_PROGRAM_INT
:
115 case KVM_S390_SIGP_STOP
:
116 case KVM_S390_SIGP_SET_PREFIX
:
117 case KVM_S390_RESTART
:
120 if (psw_mchk_disabled(vcpu
))
122 if (vcpu
->arch
.sie_block
->gcr
[14] & inti
->mchk
.cr14
)
125 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
126 if (psw_ioint_disabled(vcpu
))
128 if (vcpu
->arch
.sie_block
->gcr
[6] &
129 int_word_to_isc_bits(inti
->io
.io_int_word
))
133 printk(KERN_WARNING
"illegal interrupt type %llx\n",
140 static inline unsigned long pending_local_irqs(struct kvm_vcpu
*vcpu
)
142 return vcpu
->arch
.local_int
.pending_irqs
;
145 static unsigned long deliverable_local_irqs(struct kvm_vcpu
*vcpu
)
147 unsigned long active_mask
= pending_local_irqs(vcpu
);
149 if (psw_extint_disabled(vcpu
))
150 active_mask
&= ~IRQ_PEND_EXT_MASK
;
151 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
))
152 __clear_bit(IRQ_PEND_EXT_EXTERNAL
, &active_mask
);
153 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x4000ul
))
154 __clear_bit(IRQ_PEND_EXT_EMERGENCY
, &active_mask
);
155 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x800ul
))
156 __clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &active_mask
);
157 if (!(vcpu
->arch
.sie_block
->gcr
[0] & 0x400ul
))
158 __clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &active_mask
);
159 if (psw_mchk_disabled(vcpu
))
160 active_mask
&= ~IRQ_PEND_MCHK_MASK
;
165 static void __set_cpu_idle(struct kvm_vcpu
*vcpu
)
167 atomic_set_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
168 set_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
171 static void __unset_cpu_idle(struct kvm_vcpu
*vcpu
)
173 atomic_clear_mask(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
174 clear_bit(vcpu
->vcpu_id
, vcpu
->arch
.local_int
.float_int
->idle_mask
);
177 static void __reset_intercept_indicators(struct kvm_vcpu
*vcpu
)
179 atomic_clear_mask(CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
| CPUSTAT_STOP_INT
,
180 &vcpu
->arch
.sie_block
->cpuflags
);
181 vcpu
->arch
.sie_block
->lctl
= 0x0000;
182 vcpu
->arch
.sie_block
->ictl
&= ~(ICTL_LPSW
| ICTL_STCTL
| ICTL_PINT
);
184 if (guestdbg_enabled(vcpu
)) {
185 vcpu
->arch
.sie_block
->lctl
|= (LCTL_CR0
| LCTL_CR9
|
186 LCTL_CR10
| LCTL_CR11
);
187 vcpu
->arch
.sie_block
->ictl
|= (ICTL_STCTL
| ICTL_PINT
);
190 if (vcpu
->arch
.local_int
.action_bits
& ACTION_STOP_ON_STOP
)
191 atomic_set_mask(CPUSTAT_STOP_INT
, &vcpu
->arch
.sie_block
->cpuflags
);
194 static void __set_cpuflag(struct kvm_vcpu
*vcpu
, u32 flag
)
196 atomic_set_mask(flag
, &vcpu
->arch
.sie_block
->cpuflags
);
199 static void set_intercept_indicators_ext(struct kvm_vcpu
*vcpu
)
201 if (!(pending_local_irqs(vcpu
) & IRQ_PEND_EXT_MASK
))
203 if (psw_extint_disabled(vcpu
))
204 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
206 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
209 static void set_intercept_indicators_mchk(struct kvm_vcpu
*vcpu
)
211 if (!(pending_local_irqs(vcpu
) & IRQ_PEND_MCHK_MASK
))
213 if (psw_mchk_disabled(vcpu
))
214 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
216 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
219 /* Set interception request for non-deliverable local interrupts */
220 static void set_intercept_indicators_local(struct kvm_vcpu
*vcpu
)
222 set_intercept_indicators_ext(vcpu
);
223 set_intercept_indicators_mchk(vcpu
);
226 static void __set_intercept_indicator(struct kvm_vcpu
*vcpu
,
227 struct kvm_s390_interrupt_info
*inti
)
229 switch (inti
->type
) {
230 case KVM_S390_INT_SERVICE
:
231 case KVM_S390_INT_PFAULT_DONE
:
232 case KVM_S390_INT_VIRTIO
:
233 if (psw_extint_disabled(vcpu
))
234 __set_cpuflag(vcpu
, CPUSTAT_EXT_INT
);
236 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR0
;
239 if (psw_mchk_disabled(vcpu
))
240 vcpu
->arch
.sie_block
->ictl
|= ICTL_LPSW
;
242 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR14
;
244 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
245 if (psw_ioint_disabled(vcpu
))
246 __set_cpuflag(vcpu
, CPUSTAT_IO_INT
);
248 vcpu
->arch
.sie_block
->lctl
|= LCTL_CR6
;
255 static u16
get_ilc(struct kvm_vcpu
*vcpu
)
257 const unsigned short table
[] = { 2, 4, 4, 6 };
259 switch (vcpu
->arch
.sie_block
->icptcode
) {
265 /* last instruction only stored for these icptcodes */
266 return table
[vcpu
->arch
.sie_block
->ipa
>> 14];
268 return vcpu
->arch
.sie_block
->pgmilc
;
274 static int __must_check
__deliver_cpu_timer(struct kvm_vcpu
*vcpu
)
276 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
279 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
282 rc
= put_guest_lc(vcpu
, EXT_IRQ_CPU_TIMER
,
283 (u16
*)__LC_EXT_INT_CODE
);
284 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
285 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
286 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
287 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
288 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
289 clear_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
290 return rc
? -EFAULT
: 0;
293 static int __must_check
__deliver_ckc(struct kvm_vcpu
*vcpu
)
295 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
298 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
301 rc
= put_guest_lc(vcpu
, EXT_IRQ_CLK_COMP
,
302 (u16 __user
*)__LC_EXT_INT_CODE
);
303 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
304 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
305 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
306 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
307 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
308 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
309 return rc
? -EFAULT
: 0;
312 static int __must_check
__deliver_pfault_init(struct kvm_vcpu
*vcpu
)
314 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
315 struct kvm_s390_ext_info ext
;
318 spin_lock(&li
->lock
);
320 clear_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
321 li
->irq
.ext
.ext_params2
= 0;
322 spin_unlock(&li
->lock
);
324 VCPU_EVENT(vcpu
, 4, "interrupt: pfault init parm:%x,parm64:%llx",
326 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
327 KVM_S390_INT_PFAULT_INIT
,
330 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*) __LC_EXT_INT_CODE
);
331 rc
|= put_guest_lc(vcpu
, PFAULT_INIT
, (u16
*) __LC_EXT_CPU_ADDR
);
332 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
333 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
334 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
335 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
336 rc
|= put_guest_lc(vcpu
, ext
.ext_params2
, (u64
*) __LC_EXT_PARAMS2
);
337 return rc
? -EFAULT
: 0;
340 static int __must_check
__deliver_machine_check(struct kvm_vcpu
*vcpu
)
342 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
343 struct kvm_s390_mchk_info mchk
;
346 spin_lock(&li
->lock
);
349 * If there was an exigent machine check pending, then any repressible
350 * machine checks that might have been pending are indicated along
351 * with it, so always clear both bits
353 clear_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
354 clear_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
355 memset(&li
->irq
.mchk
, 0, sizeof(mchk
));
356 spin_unlock(&li
->lock
);
358 VCPU_EVENT(vcpu
, 4, "interrupt: machine check mcic=%llx",
360 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_MCHK
,
361 mchk
.cr14
, mchk
.mcic
);
363 rc
= kvm_s390_vcpu_store_status(vcpu
, KVM_S390_STORE_STATUS_PREFIXED
);
364 rc
|= put_guest_lc(vcpu
, mchk
.mcic
,
365 (u64 __user
*) __LC_MCCK_CODE
);
366 rc
|= put_guest_lc(vcpu
, mchk
.failing_storage_address
,
367 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
368 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
369 &mchk
.fixed_logout
, sizeof(mchk
.fixed_logout
));
370 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
371 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
372 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
373 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
374 return rc
? -EFAULT
: 0;
377 static int __must_check
__deliver_restart(struct kvm_vcpu
*vcpu
)
379 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
382 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu restart");
383 vcpu
->stat
.deliver_restart_signal
++;
384 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0);
386 rc
= write_guest_lc(vcpu
,
387 offsetof(struct _lowcore
, restart_old_psw
),
388 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
389 rc
|= read_guest_lc(vcpu
, offsetof(struct _lowcore
, restart_psw
),
390 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
391 clear_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
392 return rc
? -EFAULT
: 0;
395 static int __must_check
__deliver_stop(struct kvm_vcpu
*vcpu
)
397 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: cpu stop");
398 vcpu
->stat
.deliver_stop_signal
++;
399 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
,
402 __set_cpuflag(vcpu
, CPUSTAT_STOP_INT
);
403 clear_bit(IRQ_PEND_SIGP_STOP
, &vcpu
->arch
.local_int
.pending_irqs
);
407 static int __must_check
__deliver_set_prefix(struct kvm_vcpu
*vcpu
)
409 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
410 struct kvm_s390_prefix_info prefix
;
412 spin_lock(&li
->lock
);
413 prefix
= li
->irq
.prefix
;
414 li
->irq
.prefix
.address
= 0;
415 clear_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
416 spin_unlock(&li
->lock
);
418 VCPU_EVENT(vcpu
, 4, "interrupt: set prefix to %x", prefix
.address
);
419 vcpu
->stat
.deliver_prefix_signal
++;
420 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
421 KVM_S390_SIGP_SET_PREFIX
,
424 kvm_s390_set_prefix(vcpu
, prefix
.address
);
428 static int __must_check
__deliver_emergency_signal(struct kvm_vcpu
*vcpu
)
430 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
434 spin_lock(&li
->lock
);
435 cpu_addr
= find_first_bit(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
436 clear_bit(cpu_addr
, li
->sigp_emerg_pending
);
437 if (bitmap_empty(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
))
438 clear_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
439 spin_unlock(&li
->lock
);
441 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp emerg");
442 vcpu
->stat
.deliver_emergency_signal
++;
443 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
446 rc
= put_guest_lc(vcpu
, EXT_IRQ_EMERGENCY_SIG
,
447 (u16
*)__LC_EXT_INT_CODE
);
448 rc
|= put_guest_lc(vcpu
, cpu_addr
, (u16
*)__LC_EXT_CPU_ADDR
);
449 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
450 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
451 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
452 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
453 return rc
? -EFAULT
: 0;
456 static int __must_check
__deliver_external_call(struct kvm_vcpu
*vcpu
)
458 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
459 struct kvm_s390_extcall_info extcall
;
462 spin_lock(&li
->lock
);
463 extcall
= li
->irq
.extcall
;
464 li
->irq
.extcall
.code
= 0;
465 clear_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
466 spin_unlock(&li
->lock
);
468 VCPU_EVENT(vcpu
, 4, "%s", "interrupt: sigp ext call");
469 vcpu
->stat
.deliver_external_call
++;
470 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
471 KVM_S390_INT_EXTERNAL_CALL
,
474 rc
= put_guest_lc(vcpu
, EXT_IRQ_EXTERNAL_CALL
,
475 (u16
*)__LC_EXT_INT_CODE
);
476 rc
|= put_guest_lc(vcpu
, extcall
.code
, (u16
*)__LC_EXT_CPU_ADDR
);
477 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
478 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
479 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
, &vcpu
->arch
.sie_block
->gpsw
,
481 return rc
? -EFAULT
: 0;
484 static int __must_check
__deliver_prog(struct kvm_vcpu
*vcpu
)
486 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
487 struct kvm_s390_pgm_info pgm_info
;
489 u16 ilc
= get_ilc(vcpu
);
491 spin_lock(&li
->lock
);
492 pgm_info
= li
->irq
.pgm
;
493 clear_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
494 memset(&li
->irq
.pgm
, 0, sizeof(pgm_info
));
495 spin_unlock(&li
->lock
);
497 VCPU_EVENT(vcpu
, 4, "interrupt: pgm check code:%x, ilc:%x",
499 vcpu
->stat
.deliver_program_int
++;
500 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
503 switch (pgm_info
.code
& ~PGM_PER
) {
504 case PGM_AFX_TRANSLATION
:
505 case PGM_ASX_TRANSLATION
:
506 case PGM_EX_TRANSLATION
:
507 case PGM_LFX_TRANSLATION
:
508 case PGM_LSTE_SEQUENCE
:
509 case PGM_LSX_TRANSLATION
:
510 case PGM_LX_TRANSLATION
:
511 case PGM_PRIMARY_AUTHORITY
:
512 case PGM_SECONDARY_AUTHORITY
:
513 case PGM_SPACE_SWITCH
:
514 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
515 (u64
*)__LC_TRANS_EXC_CODE
);
517 case PGM_ALEN_TRANSLATION
:
518 case PGM_ALE_SEQUENCE
:
519 case PGM_ASTE_INSTANCE
:
520 case PGM_ASTE_SEQUENCE
:
521 case PGM_ASTE_VALIDITY
:
522 case PGM_EXTENDED_AUTHORITY
:
523 rc
= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
524 (u8
*)__LC_EXC_ACCESS_ID
);
527 case PGM_PAGE_TRANSLATION
:
528 case PGM_REGION_FIRST_TRANS
:
529 case PGM_REGION_SECOND_TRANS
:
530 case PGM_REGION_THIRD_TRANS
:
531 case PGM_SEGMENT_TRANSLATION
:
532 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
533 (u64
*)__LC_TRANS_EXC_CODE
);
534 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
535 (u8
*)__LC_EXC_ACCESS_ID
);
536 rc
|= put_guest_lc(vcpu
, pgm_info
.op_access_id
,
537 (u8
*)__LC_OP_ACCESS_ID
);
540 rc
= put_guest_lc(vcpu
, pgm_info
.mon_class_nr
,
541 (u16
*)__LC_MON_CLASS_NR
);
542 rc
|= put_guest_lc(vcpu
, pgm_info
.mon_code
,
543 (u64
*)__LC_MON_CODE
);
546 rc
= put_guest_lc(vcpu
, pgm_info
.data_exc_code
,
547 (u32
*)__LC_DATA_EXC_CODE
);
550 rc
= put_guest_lc(vcpu
, pgm_info
.trans_exc_code
,
551 (u64
*)__LC_TRANS_EXC_CODE
);
552 rc
|= put_guest_lc(vcpu
, pgm_info
.exc_access_id
,
553 (u8
*)__LC_EXC_ACCESS_ID
);
557 if (pgm_info
.code
& PGM_PER
) {
558 rc
|= put_guest_lc(vcpu
, pgm_info
.per_code
,
559 (u8
*) __LC_PER_CODE
);
560 rc
|= put_guest_lc(vcpu
, pgm_info
.per_atmid
,
561 (u8
*)__LC_PER_ATMID
);
562 rc
|= put_guest_lc(vcpu
, pgm_info
.per_address
,
563 (u64
*) __LC_PER_ADDRESS
);
564 rc
|= put_guest_lc(vcpu
, pgm_info
.per_access_id
,
565 (u8
*) __LC_PER_ACCESS_ID
);
568 rc
|= put_guest_lc(vcpu
, ilc
, (u16
*) __LC_PGM_ILC
);
569 rc
|= put_guest_lc(vcpu
, pgm_info
.code
,
570 (u16
*)__LC_PGM_INT_CODE
);
571 rc
|= write_guest_lc(vcpu
, __LC_PGM_OLD_PSW
,
572 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
573 rc
|= read_guest_lc(vcpu
, __LC_PGM_NEW_PSW
,
574 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
575 return rc
? -EFAULT
: 0;
578 static int __must_check
__deliver_service(struct kvm_vcpu
*vcpu
,
579 struct kvm_s390_interrupt_info
*inti
)
583 VCPU_EVENT(vcpu
, 4, "interrupt: sclp parm:%x",
584 inti
->ext
.ext_params
);
585 vcpu
->stat
.deliver_service_signal
++;
586 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
587 inti
->ext
.ext_params
, 0);
589 rc
= put_guest_lc(vcpu
, EXT_IRQ_SERVICE_SIG
, (u16
*)__LC_EXT_INT_CODE
);
590 rc
|= put_guest_lc(vcpu
, 0, (u16
*)__LC_EXT_CPU_ADDR
);
591 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
592 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
593 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
594 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
595 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
596 (u32
*)__LC_EXT_PARAMS
);
597 return rc
? -EFAULT
: 0;
600 static int __must_check
__deliver_pfault_done(struct kvm_vcpu
*vcpu
,
601 struct kvm_s390_interrupt_info
*inti
)
605 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
,
606 KVM_S390_INT_PFAULT_DONE
, 0,
607 inti
->ext
.ext_params2
);
609 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*)__LC_EXT_INT_CODE
);
610 rc
|= put_guest_lc(vcpu
, PFAULT_DONE
, (u16
*)__LC_EXT_CPU_ADDR
);
611 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
612 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
613 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
614 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
615 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
616 (u64
*)__LC_EXT_PARAMS2
);
617 return rc
? -EFAULT
: 0;
620 static int __must_check
__deliver_virtio(struct kvm_vcpu
*vcpu
,
621 struct kvm_s390_interrupt_info
*inti
)
625 VCPU_EVENT(vcpu
, 4, "interrupt: virtio parm:%x,parm64:%llx",
626 inti
->ext
.ext_params
, inti
->ext
.ext_params2
);
627 vcpu
->stat
.deliver_virtio_interrupt
++;
628 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
629 inti
->ext
.ext_params
,
630 inti
->ext
.ext_params2
);
632 rc
= put_guest_lc(vcpu
, EXT_IRQ_CP_SERVICE
, (u16
*)__LC_EXT_INT_CODE
);
633 rc
|= put_guest_lc(vcpu
, VIRTIO_PARAM
, (u16
*)__LC_EXT_CPU_ADDR
);
634 rc
|= write_guest_lc(vcpu
, __LC_EXT_OLD_PSW
,
635 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
636 rc
|= read_guest_lc(vcpu
, __LC_EXT_NEW_PSW
,
637 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
638 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params
,
639 (u32
*)__LC_EXT_PARAMS
);
640 rc
|= put_guest_lc(vcpu
, inti
->ext
.ext_params2
,
641 (u64
*)__LC_EXT_PARAMS2
);
642 return rc
? -EFAULT
: 0;
645 static int __must_check
__deliver_io(struct kvm_vcpu
*vcpu
,
646 struct kvm_s390_interrupt_info
*inti
)
650 VCPU_EVENT(vcpu
, 4, "interrupt: I/O %llx", inti
->type
);
651 vcpu
->stat
.deliver_io_int
++;
652 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, inti
->type
,
653 ((__u32
)inti
->io
.subchannel_id
<< 16) |
654 inti
->io
.subchannel_nr
,
655 ((__u64
)inti
->io
.io_int_parm
<< 32) |
656 inti
->io
.io_int_word
);
658 rc
= put_guest_lc(vcpu
, inti
->io
.subchannel_id
,
659 (u16
*)__LC_SUBCHANNEL_ID
);
660 rc
|= put_guest_lc(vcpu
, inti
->io
.subchannel_nr
,
661 (u16
*)__LC_SUBCHANNEL_NR
);
662 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_parm
,
663 (u32
*)__LC_IO_INT_PARM
);
664 rc
|= put_guest_lc(vcpu
, inti
->io
.io_int_word
,
665 (u32
*)__LC_IO_INT_WORD
);
666 rc
|= write_guest_lc(vcpu
, __LC_IO_OLD_PSW
,
667 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
668 rc
|= read_guest_lc(vcpu
, __LC_IO_NEW_PSW
,
669 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
670 return rc
? -EFAULT
: 0;
673 static int __must_check
__deliver_mchk_floating(struct kvm_vcpu
*vcpu
,
674 struct kvm_s390_interrupt_info
*inti
)
676 struct kvm_s390_mchk_info
*mchk
= &inti
->mchk
;
679 VCPU_EVENT(vcpu
, 4, "interrupt: machine check mcic=%llx",
681 trace_kvm_s390_deliver_interrupt(vcpu
->vcpu_id
, KVM_S390_MCHK
,
682 mchk
->cr14
, mchk
->mcic
);
684 rc
= kvm_s390_vcpu_store_status(vcpu
, KVM_S390_STORE_STATUS_PREFIXED
);
685 rc
|= put_guest_lc(vcpu
, mchk
->mcic
,
686 (u64 __user
*) __LC_MCCK_CODE
);
687 rc
|= put_guest_lc(vcpu
, mchk
->failing_storage_address
,
688 (u64 __user
*) __LC_MCCK_FAIL_STOR_ADDR
);
689 rc
|= write_guest_lc(vcpu
, __LC_PSW_SAVE_AREA
,
690 &mchk
->fixed_logout
, sizeof(mchk
->fixed_logout
));
691 rc
|= write_guest_lc(vcpu
, __LC_MCK_OLD_PSW
,
692 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
693 rc
|= read_guest_lc(vcpu
, __LC_MCK_NEW_PSW
,
694 &vcpu
->arch
.sie_block
->gpsw
, sizeof(psw_t
));
695 return rc
? -EFAULT
: 0;
698 typedef int (*deliver_irq_t
)(struct kvm_vcpu
*vcpu
);
700 static const deliver_irq_t deliver_irq_funcs
[] = {
701 [IRQ_PEND_MCHK_EX
] = __deliver_machine_check
,
702 [IRQ_PEND_PROG
] = __deliver_prog
,
703 [IRQ_PEND_EXT_EMERGENCY
] = __deliver_emergency_signal
,
704 [IRQ_PEND_EXT_EXTERNAL
] = __deliver_external_call
,
705 [IRQ_PEND_EXT_CLOCK_COMP
] = __deliver_ckc
,
706 [IRQ_PEND_EXT_CPU_TIMER
] = __deliver_cpu_timer
,
707 [IRQ_PEND_RESTART
] = __deliver_restart
,
708 [IRQ_PEND_SIGP_STOP
] = __deliver_stop
,
709 [IRQ_PEND_SET_PREFIX
] = __deliver_set_prefix
,
710 [IRQ_PEND_PFAULT_INIT
] = __deliver_pfault_init
,
713 static int __must_check
__deliver_floating_interrupt(struct kvm_vcpu
*vcpu
,
714 struct kvm_s390_interrupt_info
*inti
)
718 switch (inti
->type
) {
719 case KVM_S390_INT_SERVICE
:
720 rc
= __deliver_service(vcpu
, inti
);
722 case KVM_S390_INT_PFAULT_DONE
:
723 rc
= __deliver_pfault_done(vcpu
, inti
);
725 case KVM_S390_INT_VIRTIO
:
726 rc
= __deliver_virtio(vcpu
, inti
);
729 rc
= __deliver_mchk_floating(vcpu
, inti
);
731 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
732 rc
= __deliver_io(vcpu
, inti
);
741 /* Check whether SIGP interpretation facility has an external call pending */
742 int kvm_s390_si_ext_call_pending(struct kvm_vcpu
*vcpu
)
744 atomic_t
*sigp_ctrl
= &vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].ctrl
;
746 if (!psw_extint_disabled(vcpu
) &&
747 (vcpu
->arch
.sie_block
->gcr
[0] & 0x2000ul
) &&
748 (atomic_read(sigp_ctrl
) & SIGP_CTRL_C
) &&
749 (atomic_read(&vcpu
->arch
.sie_block
->cpuflags
) & CPUSTAT_ECALL_PEND
))
755 int kvm_cpu_has_interrupt(struct kvm_vcpu
*vcpu
)
757 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
758 struct kvm_s390_interrupt_info
*inti
;
761 rc
= !!deliverable_local_irqs(vcpu
);
763 if ((!rc
) && atomic_read(&fi
->active
)) {
764 spin_lock(&fi
->lock
);
765 list_for_each_entry(inti
, &fi
->list
, list
)
766 if (__interrupt_is_deliverable(vcpu
, inti
)) {
770 spin_unlock(&fi
->lock
);
773 if (!rc
&& kvm_cpu_has_pending_timer(vcpu
))
776 if (!rc
&& kvm_s390_si_ext_call_pending(vcpu
))
782 int kvm_cpu_has_pending_timer(struct kvm_vcpu
*vcpu
)
784 if (!(vcpu
->arch
.sie_block
->ckc
<
785 get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
))
787 if (!ckc_interrupts_enabled(vcpu
))
792 int kvm_s390_handle_wait(struct kvm_vcpu
*vcpu
)
796 vcpu
->stat
.exit_wait_state
++;
799 if (kvm_cpu_has_pending_timer(vcpu
) || kvm_arch_vcpu_runnable(vcpu
))
802 if (psw_interrupts_disabled(vcpu
)) {
803 VCPU_EVENT(vcpu
, 3, "%s", "disabled wait");
804 return -EOPNOTSUPP
; /* disabled wait */
807 if (!ckc_interrupts_enabled(vcpu
)) {
808 VCPU_EVENT(vcpu
, 3, "%s", "enabled wait w/o timer");
809 __set_cpu_idle(vcpu
);
813 now
= get_tod_clock_fast() + vcpu
->arch
.sie_block
->epoch
;
814 sltime
= tod_to_ns(vcpu
->arch
.sie_block
->ckc
- now
);
817 if (vcpu
->arch
.sie_block
->ckc
< now
)
820 __set_cpu_idle(vcpu
);
821 hrtimer_start(&vcpu
->arch
.ckc_timer
, ktime_set (0, sltime
) , HRTIMER_MODE_REL
);
822 VCPU_EVENT(vcpu
, 5, "enabled wait via clock comparator: %llx ns", sltime
);
824 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
825 kvm_vcpu_block(vcpu
);
826 __unset_cpu_idle(vcpu
);
827 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
829 hrtimer_try_to_cancel(&vcpu
->arch
.ckc_timer
);
833 void kvm_s390_vcpu_wakeup(struct kvm_vcpu
*vcpu
)
835 if (waitqueue_active(&vcpu
->wq
)) {
837 * The vcpu gave up the cpu voluntarily, mark it as a good
840 vcpu
->preempted
= true;
841 wake_up_interruptible(&vcpu
->wq
);
842 vcpu
->stat
.halt_wakeup
++;
846 enum hrtimer_restart
kvm_s390_idle_wakeup(struct hrtimer
*timer
)
848 struct kvm_vcpu
*vcpu
;
850 vcpu
= container_of(timer
, struct kvm_vcpu
, arch
.ckc_timer
);
851 kvm_s390_vcpu_wakeup(vcpu
);
853 return HRTIMER_NORESTART
;
856 void kvm_s390_clear_local_irqs(struct kvm_vcpu
*vcpu
)
858 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
860 spin_lock(&li
->lock
);
861 li
->pending_irqs
= 0;
862 bitmap_zero(li
->sigp_emerg_pending
, KVM_MAX_VCPUS
);
863 memset(&li
->irq
, 0, sizeof(li
->irq
));
864 spin_unlock(&li
->lock
);
866 /* clear pending external calls set by sigp interpretation facility */
867 atomic_clear_mask(CPUSTAT_ECALL_PEND
, li
->cpuflags
);
868 atomic_clear_mask(SIGP_CTRL_C
,
869 &vcpu
->kvm
->arch
.sca
->cpu
[vcpu
->vcpu_id
].ctrl
);
872 int __must_check
kvm_s390_deliver_pending_interrupts(struct kvm_vcpu
*vcpu
)
874 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
875 struct kvm_s390_float_interrupt
*fi
= vcpu
->arch
.local_int
.float_int
;
876 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
880 unsigned long irq_type
;
881 unsigned long deliverable_irqs
;
883 __reset_intercept_indicators(vcpu
);
885 /* pending ckc conditions might have been invalidated */
886 clear_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
887 if (kvm_cpu_has_pending_timer(vcpu
))
888 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
891 deliverable_irqs
= deliverable_local_irqs(vcpu
);
892 /* bits are in the order of interrupt priority */
893 irq_type
= find_first_bit(&deliverable_irqs
, IRQ_PEND_COUNT
);
894 if (irq_type
== IRQ_PEND_COUNT
)
896 func
= deliver_irq_funcs
[irq_type
];
898 WARN_ON_ONCE(func
== NULL
);
899 clear_bit(irq_type
, &li
->pending_irqs
);
903 } while (!rc
&& irq_type
!= IRQ_PEND_COUNT
);
905 set_intercept_indicators_local(vcpu
);
907 if (!rc
&& atomic_read(&fi
->active
)) {
910 spin_lock(&fi
->lock
);
911 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
912 if (__interrupt_is_deliverable(vcpu
, inti
)) {
913 list_del(&inti
->list
);
918 __set_intercept_indicator(vcpu
, inti
);
920 if (list_empty(&fi
->list
))
921 atomic_set(&fi
->active
, 0);
922 spin_unlock(&fi
->lock
);
924 rc
= __deliver_floating_interrupt(vcpu
, inti
);
927 } while (!rc
&& deliver
);
933 static int __inject_prog(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
935 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
937 li
->irq
.pgm
= irq
->u
.pgm
;
938 set_bit(IRQ_PEND_PROG
, &li
->pending_irqs
);
942 int kvm_s390_inject_program_int(struct kvm_vcpu
*vcpu
, u16 code
)
944 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
945 struct kvm_s390_irq irq
;
947 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from kernel)", code
);
948 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
, code
,
950 spin_lock(&li
->lock
);
951 irq
.u
.pgm
.code
= code
;
952 __inject_prog(vcpu
, &irq
);
953 BUG_ON(waitqueue_active(li
->wq
));
954 spin_unlock(&li
->lock
);
958 int kvm_s390_inject_prog_irq(struct kvm_vcpu
*vcpu
,
959 struct kvm_s390_pgm_info
*pgm_info
)
961 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
962 struct kvm_s390_irq irq
;
965 VCPU_EVENT(vcpu
, 3, "inject: prog irq %d (from kernel)",
967 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_PROGRAM_INT
,
968 pgm_info
->code
, 0, 1);
969 spin_lock(&li
->lock
);
970 irq
.u
.pgm
= *pgm_info
;
971 rc
= __inject_prog(vcpu
, &irq
);
972 BUG_ON(waitqueue_active(li
->wq
));
973 spin_unlock(&li
->lock
);
977 static int __inject_pfault_init(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
979 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
981 VCPU_EVENT(vcpu
, 3, "inject: external irq params:%x, params2:%llx",
982 irq
->u
.ext
.ext_params
, irq
->u
.ext
.ext_params2
);
983 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_PFAULT_INIT
,
984 irq
->u
.ext
.ext_params
,
985 irq
->u
.ext
.ext_params2
, 2);
987 li
->irq
.ext
= irq
->u
.ext
;
988 set_bit(IRQ_PEND_PFAULT_INIT
, &li
->pending_irqs
);
989 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
993 static int __inject_extcall(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
995 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
996 struct kvm_s390_extcall_info
*extcall
= &li
->irq
.extcall
;
998 VCPU_EVENT(vcpu
, 3, "inject: external call source-cpu:%u",
999 irq
->u
.extcall
.code
);
1000 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EXTERNAL_CALL
,
1001 irq
->u
.extcall
.code
, 0, 2);
1003 *extcall
= irq
->u
.extcall
;
1004 set_bit(IRQ_PEND_EXT_EXTERNAL
, &li
->pending_irqs
);
1005 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1009 static int __inject_set_prefix(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1011 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1012 struct kvm_s390_prefix_info
*prefix
= &li
->irq
.prefix
;
1014 VCPU_EVENT(vcpu
, 3, "inject: set prefix to %x (from user)",
1016 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_SET_PREFIX
,
1017 prefix
->address
, 0, 2);
1019 *prefix
= irq
->u
.prefix
;
1020 set_bit(IRQ_PEND_SET_PREFIX
, &li
->pending_irqs
);
1024 static int __inject_sigp_stop(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1026 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1028 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_SIGP_STOP
, 0, 0, 2);
1030 li
->action_bits
|= ACTION_STOP_ON_STOP
;
1031 set_bit(IRQ_PEND_SIGP_STOP
, &li
->pending_irqs
);
1035 static int __inject_sigp_restart(struct kvm_vcpu
*vcpu
,
1036 struct kvm_s390_irq
*irq
)
1038 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1040 VCPU_EVENT(vcpu
, 3, "inject: restart type %llx", irq
->type
);
1041 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_RESTART
, 0, 0, 2);
1043 set_bit(IRQ_PEND_RESTART
, &li
->pending_irqs
);
1047 static int __inject_sigp_emergency(struct kvm_vcpu
*vcpu
,
1048 struct kvm_s390_irq
*irq
)
1050 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1051 struct kvm_s390_emerg_info
*emerg
= &li
->irq
.emerg
;
1053 VCPU_EVENT(vcpu
, 3, "inject: emergency %u\n",
1055 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_EMERGENCY
,
1058 set_bit(emerg
->code
, li
->sigp_emerg_pending
);
1059 set_bit(IRQ_PEND_EXT_EMERGENCY
, &li
->pending_irqs
);
1060 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1064 static int __inject_mchk(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1066 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1067 struct kvm_s390_mchk_info
*mchk
= &li
->irq
.mchk
;
1069 VCPU_EVENT(vcpu
, 5, "inject: machine check parm64:%llx",
1071 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_MCHK
, 0,
1075 * Because repressible machine checks can be indicated along with
1076 * exigent machine checks (PoP, Chapter 11, Interruption action)
1077 * we need to combine cr14, mcic and external damage code.
1078 * Failing storage address and the logout area should not be or'ed
1079 * together, we just indicate the last occurrence of the corresponding
1082 mchk
->cr14
|= irq
->u
.mchk
.cr14
;
1083 mchk
->mcic
|= irq
->u
.mchk
.mcic
;
1084 mchk
->ext_damage_code
|= irq
->u
.mchk
.ext_damage_code
;
1085 mchk
->failing_storage_address
= irq
->u
.mchk
.failing_storage_address
;
1086 memcpy(&mchk
->fixed_logout
, &irq
->u
.mchk
.fixed_logout
,
1087 sizeof(mchk
->fixed_logout
));
1088 if (mchk
->mcic
& MCHK_EX_MASK
)
1089 set_bit(IRQ_PEND_MCHK_EX
, &li
->pending_irqs
);
1090 else if (mchk
->mcic
& MCHK_REP_MASK
)
1091 set_bit(IRQ_PEND_MCHK_REP
, &li
->pending_irqs
);
1095 static int __inject_ckc(struct kvm_vcpu
*vcpu
)
1097 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1099 VCPU_EVENT(vcpu
, 3, "inject: type %x", KVM_S390_INT_CLOCK_COMP
);
1100 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CLOCK_COMP
,
1103 set_bit(IRQ_PEND_EXT_CLOCK_COMP
, &li
->pending_irqs
);
1104 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1108 static int __inject_cpu_timer(struct kvm_vcpu
*vcpu
)
1110 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1112 VCPU_EVENT(vcpu
, 3, "inject: type %x", KVM_S390_INT_CPU_TIMER
);
1113 trace_kvm_s390_inject_vcpu(vcpu
->vcpu_id
, KVM_S390_INT_CPU_TIMER
,
1116 set_bit(IRQ_PEND_EXT_CPU_TIMER
, &li
->pending_irqs
);
1117 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1122 struct kvm_s390_interrupt_info
*kvm_s390_get_io_int(struct kvm
*kvm
,
1125 struct kvm_s390_float_interrupt
*fi
;
1126 struct kvm_s390_interrupt_info
*inti
, *iter
;
1128 if ((!schid
&& !cr6
) || (schid
&& cr6
))
1130 mutex_lock(&kvm
->lock
);
1131 fi
= &kvm
->arch
.float_int
;
1132 spin_lock(&fi
->lock
);
1134 list_for_each_entry(iter
, &fi
->list
, list
) {
1135 if (!is_ioint(iter
->type
))
1138 ((cr6
& int_word_to_isc_bits(iter
->io
.io_int_word
)) == 0))
1141 if (((schid
& 0x00000000ffff0000) >> 16) !=
1142 iter
->io
.subchannel_id
)
1144 if ((schid
& 0x000000000000ffff) !=
1145 iter
->io
.subchannel_nr
)
1152 list_del_init(&inti
->list
);
1155 if (list_empty(&fi
->list
))
1156 atomic_set(&fi
->active
, 0);
1157 spin_unlock(&fi
->lock
);
1158 mutex_unlock(&kvm
->lock
);
1162 static int __inject_vm(struct kvm
*kvm
, struct kvm_s390_interrupt_info
*inti
)
1164 struct kvm_s390_local_interrupt
*li
;
1165 struct kvm_s390_float_interrupt
*fi
;
1166 struct kvm_s390_interrupt_info
*iter
;
1167 struct kvm_vcpu
*dst_vcpu
= NULL
;
1171 mutex_lock(&kvm
->lock
);
1172 fi
= &kvm
->arch
.float_int
;
1173 spin_lock(&fi
->lock
);
1174 if (fi
->irq_count
>= KVM_S390_MAX_FLOAT_IRQS
) {
1179 if (!is_ioint(inti
->type
)) {
1180 list_add_tail(&inti
->list
, &fi
->list
);
1182 u64 isc_bits
= int_word_to_isc_bits(inti
->io
.io_int_word
);
1184 /* Keep I/O interrupts sorted in isc order. */
1185 list_for_each_entry(iter
, &fi
->list
, list
) {
1186 if (!is_ioint(iter
->type
))
1188 if (int_word_to_isc_bits(iter
->io
.io_int_word
)
1193 list_add_tail(&inti
->list
, &iter
->list
);
1195 atomic_set(&fi
->active
, 1);
1196 sigcpu
= find_first_bit(fi
->idle_mask
, KVM_MAX_VCPUS
);
1197 if (sigcpu
== KVM_MAX_VCPUS
) {
1199 sigcpu
= fi
->next_rr_cpu
++;
1200 if (sigcpu
== KVM_MAX_VCPUS
)
1201 sigcpu
= fi
->next_rr_cpu
= 0;
1202 } while (kvm_get_vcpu(kvm
, sigcpu
) == NULL
);
1204 dst_vcpu
= kvm_get_vcpu(kvm
, sigcpu
);
1205 li
= &dst_vcpu
->arch
.local_int
;
1206 spin_lock(&li
->lock
);
1207 switch (inti
->type
) {
1209 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
1211 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1212 atomic_set_mask(CPUSTAT_IO_INT
, li
->cpuflags
);
1215 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
1218 spin_unlock(&li
->lock
);
1219 kvm_s390_vcpu_wakeup(kvm_get_vcpu(kvm
, sigcpu
));
1221 spin_unlock(&fi
->lock
);
1222 mutex_unlock(&kvm
->lock
);
1226 int kvm_s390_inject_vm(struct kvm
*kvm
,
1227 struct kvm_s390_interrupt
*s390int
)
1229 struct kvm_s390_interrupt_info
*inti
;
1231 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1235 inti
->type
= s390int
->type
;
1236 switch (inti
->type
) {
1237 case KVM_S390_INT_VIRTIO
:
1238 VM_EVENT(kvm
, 5, "inject: virtio parm:%x,parm64:%llx",
1239 s390int
->parm
, s390int
->parm64
);
1240 inti
->ext
.ext_params
= s390int
->parm
;
1241 inti
->ext
.ext_params2
= s390int
->parm64
;
1243 case KVM_S390_INT_SERVICE
:
1244 VM_EVENT(kvm
, 5, "inject: sclp parm:%x", s390int
->parm
);
1245 inti
->ext
.ext_params
= s390int
->parm
;
1247 case KVM_S390_INT_PFAULT_DONE
:
1248 inti
->type
= s390int
->type
;
1249 inti
->ext
.ext_params2
= s390int
->parm64
;
1252 VM_EVENT(kvm
, 5, "inject: machine check parm64:%llx",
1254 inti
->mchk
.cr14
= s390int
->parm
; /* upper bits are not used */
1255 inti
->mchk
.mcic
= s390int
->parm64
;
1257 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1258 if (inti
->type
& IOINT_AI_MASK
)
1259 VM_EVENT(kvm
, 5, "%s", "inject: I/O (AI)");
1261 VM_EVENT(kvm
, 5, "inject: I/O css %x ss %x schid %04x",
1262 s390int
->type
& IOINT_CSSID_MASK
,
1263 s390int
->type
& IOINT_SSID_MASK
,
1264 s390int
->type
& IOINT_SCHID_MASK
);
1265 inti
->io
.subchannel_id
= s390int
->parm
>> 16;
1266 inti
->io
.subchannel_nr
= s390int
->parm
& 0x0000ffffu
;
1267 inti
->io
.io_int_parm
= s390int
->parm64
>> 32;
1268 inti
->io
.io_int_word
= s390int
->parm64
& 0x00000000ffffffffull
;
1274 trace_kvm_s390_inject_vm(s390int
->type
, s390int
->parm
, s390int
->parm64
,
1277 return __inject_vm(kvm
, inti
);
1280 void kvm_s390_reinject_io_int(struct kvm
*kvm
,
1281 struct kvm_s390_interrupt_info
*inti
)
1283 __inject_vm(kvm
, inti
);
1286 int s390int_to_s390irq(struct kvm_s390_interrupt
*s390int
,
1287 struct kvm_s390_irq
*irq
)
1289 irq
->type
= s390int
->type
;
1290 switch (irq
->type
) {
1291 case KVM_S390_PROGRAM_INT
:
1292 if (s390int
->parm
& 0xffff0000)
1294 irq
->u
.pgm
.code
= s390int
->parm
;
1296 case KVM_S390_SIGP_SET_PREFIX
:
1297 irq
->u
.prefix
.address
= s390int
->parm
;
1299 case KVM_S390_INT_EXTERNAL_CALL
:
1300 if (irq
->u
.extcall
.code
& 0xffff0000)
1302 irq
->u
.extcall
.code
= s390int
->parm
;
1304 case KVM_S390_INT_EMERGENCY
:
1305 if (irq
->u
.emerg
.code
& 0xffff0000)
1307 irq
->u
.emerg
.code
= s390int
->parm
;
1310 irq
->u
.mchk
.mcic
= s390int
->parm64
;
1316 int kvm_s390_inject_vcpu(struct kvm_vcpu
*vcpu
, struct kvm_s390_irq
*irq
)
1318 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
1321 spin_lock(&li
->lock
);
1322 switch (irq
->type
) {
1323 case KVM_S390_PROGRAM_INT
:
1324 VCPU_EVENT(vcpu
, 3, "inject: program check %d (from user)",
1326 rc
= __inject_prog(vcpu
, irq
);
1328 case KVM_S390_SIGP_SET_PREFIX
:
1329 rc
= __inject_set_prefix(vcpu
, irq
);
1331 case KVM_S390_SIGP_STOP
:
1332 rc
= __inject_sigp_stop(vcpu
, irq
);
1334 case KVM_S390_RESTART
:
1335 rc
= __inject_sigp_restart(vcpu
, irq
);
1337 case KVM_S390_INT_CLOCK_COMP
:
1338 rc
= __inject_ckc(vcpu
);
1340 case KVM_S390_INT_CPU_TIMER
:
1341 rc
= __inject_cpu_timer(vcpu
);
1343 case KVM_S390_INT_EXTERNAL_CALL
:
1344 rc
= __inject_extcall(vcpu
, irq
);
1346 case KVM_S390_INT_EMERGENCY
:
1347 rc
= __inject_sigp_emergency(vcpu
, irq
);
1350 rc
= __inject_mchk(vcpu
, irq
);
1352 case KVM_S390_INT_PFAULT_INIT
:
1353 rc
= __inject_pfault_init(vcpu
, irq
);
1355 case KVM_S390_INT_VIRTIO
:
1356 case KVM_S390_INT_SERVICE
:
1357 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1361 spin_unlock(&li
->lock
);
1363 kvm_s390_vcpu_wakeup(vcpu
);
1367 void kvm_s390_clear_float_irqs(struct kvm
*kvm
)
1369 struct kvm_s390_float_interrupt
*fi
;
1370 struct kvm_s390_interrupt_info
*n
, *inti
= NULL
;
1372 mutex_lock(&kvm
->lock
);
1373 fi
= &kvm
->arch
.float_int
;
1374 spin_lock(&fi
->lock
);
1375 list_for_each_entry_safe(inti
, n
, &fi
->list
, list
) {
1376 list_del(&inti
->list
);
1380 atomic_set(&fi
->active
, 0);
1381 spin_unlock(&fi
->lock
);
1382 mutex_unlock(&kvm
->lock
);
1385 static inline int copy_irq_to_user(struct kvm_s390_interrupt_info
*inti
,
1388 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1389 struct kvm_s390_irq irq
= {0};
1391 irq
.type
= inti
->type
;
1392 switch (inti
->type
) {
1393 case KVM_S390_INT_PFAULT_INIT
:
1394 case KVM_S390_INT_PFAULT_DONE
:
1395 case KVM_S390_INT_VIRTIO
:
1396 case KVM_S390_INT_SERVICE
:
1397 irq
.u
.ext
= inti
->ext
;
1399 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1400 irq
.u
.io
= inti
->io
;
1403 irq
.u
.mchk
= inti
->mchk
;
1409 if (copy_to_user(uptr
, &irq
, sizeof(irq
)))
1415 static int get_all_floating_irqs(struct kvm
*kvm
, __u8
*buf
, __u64 len
)
1417 struct kvm_s390_interrupt_info
*inti
;
1418 struct kvm_s390_float_interrupt
*fi
;
1422 mutex_lock(&kvm
->lock
);
1423 fi
= &kvm
->arch
.float_int
;
1424 spin_lock(&fi
->lock
);
1426 list_for_each_entry(inti
, &fi
->list
, list
) {
1427 if (len
< sizeof(struct kvm_s390_irq
)) {
1428 /* signal userspace to try again */
1432 ret
= copy_irq_to_user(inti
, buf
);
1435 buf
+= sizeof(struct kvm_s390_irq
);
1436 len
-= sizeof(struct kvm_s390_irq
);
1440 spin_unlock(&fi
->lock
);
1441 mutex_unlock(&kvm
->lock
);
1443 return ret
< 0 ? ret
: n
;
1446 static int flic_get_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1450 switch (attr
->group
) {
1451 case KVM_DEV_FLIC_GET_ALL_IRQS
:
1452 r
= get_all_floating_irqs(dev
->kvm
, (u8
*) attr
->addr
,
1462 static inline int copy_irq_from_user(struct kvm_s390_interrupt_info
*inti
,
1465 struct kvm_s390_irq __user
*uptr
= (struct kvm_s390_irq __user
*) addr
;
1466 void *target
= NULL
;
1467 void __user
*source
;
1470 if (get_user(inti
->type
, (u64 __user
*)addr
))
1473 switch (inti
->type
) {
1474 case KVM_S390_INT_PFAULT_INIT
:
1475 case KVM_S390_INT_PFAULT_DONE
:
1476 case KVM_S390_INT_VIRTIO
:
1477 case KVM_S390_INT_SERVICE
:
1478 target
= (void *) &inti
->ext
;
1479 source
= &uptr
->u
.ext
;
1480 size
= sizeof(inti
->ext
);
1482 case KVM_S390_INT_IO_MIN
...KVM_S390_INT_IO_MAX
:
1483 target
= (void *) &inti
->io
;
1484 source
= &uptr
->u
.io
;
1485 size
= sizeof(inti
->io
);
1488 target
= (void *) &inti
->mchk
;
1489 source
= &uptr
->u
.mchk
;
1490 size
= sizeof(inti
->mchk
);
1496 if (copy_from_user(target
, source
, size
))
1502 static int enqueue_floating_irq(struct kvm_device
*dev
,
1503 struct kvm_device_attr
*attr
)
1505 struct kvm_s390_interrupt_info
*inti
= NULL
;
1507 int len
= attr
->attr
;
1509 if (len
% sizeof(struct kvm_s390_irq
) != 0)
1511 else if (len
> KVM_S390_FLIC_MAX_BUFFER
)
1514 while (len
>= sizeof(struct kvm_s390_irq
)) {
1515 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
1519 r
= copy_irq_from_user(inti
, attr
->addr
);
1524 r
= __inject_vm(dev
->kvm
, inti
);
1529 len
-= sizeof(struct kvm_s390_irq
);
1530 attr
->addr
+= sizeof(struct kvm_s390_irq
);
1536 static struct s390_io_adapter
*get_io_adapter(struct kvm
*kvm
, unsigned int id
)
1538 if (id
>= MAX_S390_IO_ADAPTERS
)
1540 return kvm
->arch
.adapters
[id
];
1543 static int register_io_adapter(struct kvm_device
*dev
,
1544 struct kvm_device_attr
*attr
)
1546 struct s390_io_adapter
*adapter
;
1547 struct kvm_s390_io_adapter adapter_info
;
1549 if (copy_from_user(&adapter_info
,
1550 (void __user
*)attr
->addr
, sizeof(adapter_info
)))
1553 if ((adapter_info
.id
>= MAX_S390_IO_ADAPTERS
) ||
1554 (dev
->kvm
->arch
.adapters
[adapter_info
.id
] != NULL
))
1557 adapter
= kzalloc(sizeof(*adapter
), GFP_KERNEL
);
1561 INIT_LIST_HEAD(&adapter
->maps
);
1562 init_rwsem(&adapter
->maps_lock
);
1563 atomic_set(&adapter
->nr_maps
, 0);
1564 adapter
->id
= adapter_info
.id
;
1565 adapter
->isc
= adapter_info
.isc
;
1566 adapter
->maskable
= adapter_info
.maskable
;
1567 adapter
->masked
= false;
1568 adapter
->swap
= adapter_info
.swap
;
1569 dev
->kvm
->arch
.adapters
[adapter
->id
] = adapter
;
1574 int kvm_s390_mask_adapter(struct kvm
*kvm
, unsigned int id
, bool masked
)
1577 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1579 if (!adapter
|| !adapter
->maskable
)
1581 ret
= adapter
->masked
;
1582 adapter
->masked
= masked
;
1586 static int kvm_s390_adapter_map(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1588 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1589 struct s390_map_info
*map
;
1592 if (!adapter
|| !addr
)
1595 map
= kzalloc(sizeof(*map
), GFP_KERNEL
);
1600 INIT_LIST_HEAD(&map
->list
);
1601 map
->guest_addr
= addr
;
1602 map
->addr
= gmap_translate(kvm
->arch
.gmap
, addr
);
1603 if (map
->addr
== -EFAULT
) {
1607 ret
= get_user_pages_fast(map
->addr
, 1, 1, &map
->page
);
1611 down_write(&adapter
->maps_lock
);
1612 if (atomic_inc_return(&adapter
->nr_maps
) < MAX_S390_ADAPTER_MAPS
) {
1613 list_add_tail(&map
->list
, &adapter
->maps
);
1616 put_page(map
->page
);
1619 up_write(&adapter
->maps_lock
);
1626 static int kvm_s390_adapter_unmap(struct kvm
*kvm
, unsigned int id
, __u64 addr
)
1628 struct s390_io_adapter
*adapter
= get_io_adapter(kvm
, id
);
1629 struct s390_map_info
*map
, *tmp
;
1632 if (!adapter
|| !addr
)
1635 down_write(&adapter
->maps_lock
);
1636 list_for_each_entry_safe(map
, tmp
, &adapter
->maps
, list
) {
1637 if (map
->guest_addr
== addr
) {
1639 atomic_dec(&adapter
->nr_maps
);
1640 list_del(&map
->list
);
1641 put_page(map
->page
);
1646 up_write(&adapter
->maps_lock
);
1648 return found
? 0 : -EINVAL
;
1651 void kvm_s390_destroy_adapters(struct kvm
*kvm
)
1654 struct s390_map_info
*map
, *tmp
;
1656 for (i
= 0; i
< MAX_S390_IO_ADAPTERS
; i
++) {
1657 if (!kvm
->arch
.adapters
[i
])
1659 list_for_each_entry_safe(map
, tmp
,
1660 &kvm
->arch
.adapters
[i
]->maps
, list
) {
1661 list_del(&map
->list
);
1662 put_page(map
->page
);
1665 kfree(kvm
->arch
.adapters
[i
]);
1669 static int modify_io_adapter(struct kvm_device
*dev
,
1670 struct kvm_device_attr
*attr
)
1672 struct kvm_s390_io_adapter_req req
;
1673 struct s390_io_adapter
*adapter
;
1676 if (copy_from_user(&req
, (void __user
*)attr
->addr
, sizeof(req
)))
1679 adapter
= get_io_adapter(dev
->kvm
, req
.id
);
1683 case KVM_S390_IO_ADAPTER_MASK
:
1684 ret
= kvm_s390_mask_adapter(dev
->kvm
, req
.id
, req
.mask
);
1688 case KVM_S390_IO_ADAPTER_MAP
:
1689 ret
= kvm_s390_adapter_map(dev
->kvm
, req
.id
, req
.addr
);
1691 case KVM_S390_IO_ADAPTER_UNMAP
:
1692 ret
= kvm_s390_adapter_unmap(dev
->kvm
, req
.id
, req
.addr
);
1701 static int flic_set_attr(struct kvm_device
*dev
, struct kvm_device_attr
*attr
)
1705 struct kvm_vcpu
*vcpu
;
1707 switch (attr
->group
) {
1708 case KVM_DEV_FLIC_ENQUEUE
:
1709 r
= enqueue_floating_irq(dev
, attr
);
1711 case KVM_DEV_FLIC_CLEAR_IRQS
:
1712 kvm_s390_clear_float_irqs(dev
->kvm
);
1714 case KVM_DEV_FLIC_APF_ENABLE
:
1715 dev
->kvm
->arch
.gmap
->pfault_enabled
= 1;
1717 case KVM_DEV_FLIC_APF_DISABLE_WAIT
:
1718 dev
->kvm
->arch
.gmap
->pfault_enabled
= 0;
1720 * Make sure no async faults are in transition when
1721 * clearing the queues. So we don't need to worry
1722 * about late coming workers.
1724 synchronize_srcu(&dev
->kvm
->srcu
);
1725 kvm_for_each_vcpu(i
, vcpu
, dev
->kvm
)
1726 kvm_clear_async_pf_completion_queue(vcpu
);
1728 case KVM_DEV_FLIC_ADAPTER_REGISTER
:
1729 r
= register_io_adapter(dev
, attr
);
1731 case KVM_DEV_FLIC_ADAPTER_MODIFY
:
1732 r
= modify_io_adapter(dev
, attr
);
1741 static int flic_create(struct kvm_device
*dev
, u32 type
)
1745 if (dev
->kvm
->arch
.flic
)
1747 dev
->kvm
->arch
.flic
= dev
;
1751 static void flic_destroy(struct kvm_device
*dev
)
1753 dev
->kvm
->arch
.flic
= NULL
;
1757 /* s390 floating irq controller (flic) */
1758 struct kvm_device_ops kvm_flic_ops
= {
1760 .get_attr
= flic_get_attr
,
1761 .set_attr
= flic_set_attr
,
1762 .create
= flic_create
,
1763 .destroy
= flic_destroy
,
1766 static unsigned long get_ind_bit(__u64 addr
, unsigned long bit_nr
, bool swap
)
1770 bit
= bit_nr
+ (addr
% PAGE_SIZE
) * 8;
1772 return swap
? (bit
^ (BITS_PER_LONG
- 1)) : bit
;
1775 static struct s390_map_info
*get_map_info(struct s390_io_adapter
*adapter
,
1778 struct s390_map_info
*map
;
1783 list_for_each_entry(map
, &adapter
->maps
, list
) {
1784 if (map
->guest_addr
== addr
)
1790 static int adapter_indicators_set(struct kvm
*kvm
,
1791 struct s390_io_adapter
*adapter
,
1792 struct kvm_s390_adapter_int
*adapter_int
)
1795 int summary_set
, idx
;
1796 struct s390_map_info
*info
;
1799 info
= get_map_info(adapter
, adapter_int
->ind_addr
);
1802 map
= page_address(info
->page
);
1803 bit
= get_ind_bit(info
->addr
, adapter_int
->ind_offset
, adapter
->swap
);
1805 idx
= srcu_read_lock(&kvm
->srcu
);
1806 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
1807 set_page_dirty_lock(info
->page
);
1808 info
= get_map_info(adapter
, adapter_int
->summary_addr
);
1810 srcu_read_unlock(&kvm
->srcu
, idx
);
1813 map
= page_address(info
->page
);
1814 bit
= get_ind_bit(info
->addr
, adapter_int
->summary_offset
,
1816 summary_set
= test_and_set_bit(bit
, map
);
1817 mark_page_dirty(kvm
, info
->guest_addr
>> PAGE_SHIFT
);
1818 set_page_dirty_lock(info
->page
);
1819 srcu_read_unlock(&kvm
->srcu
, idx
);
1820 return summary_set
? 0 : 1;
1824 * < 0 - not injected due to error
1825 * = 0 - coalesced, summary indicator already active
1826 * > 0 - injected interrupt
1828 static int set_adapter_int(struct kvm_kernel_irq_routing_entry
*e
,
1829 struct kvm
*kvm
, int irq_source_id
, int level
,
1833 struct s390_io_adapter
*adapter
;
1835 /* We're only interested in the 0->1 transition. */
1838 adapter
= get_io_adapter(kvm
, e
->adapter
.adapter_id
);
1841 down_read(&adapter
->maps_lock
);
1842 ret
= adapter_indicators_set(kvm
, adapter
, &e
->adapter
);
1843 up_read(&adapter
->maps_lock
);
1844 if ((ret
> 0) && !adapter
->masked
) {
1845 struct kvm_s390_interrupt s390int
= {
1846 .type
= KVM_S390_INT_IO(1, 0, 0, 0),
1848 .parm64
= (adapter
->isc
<< 27) | 0x80000000,
1850 ret
= kvm_s390_inject_vm(kvm
, &s390int
);
1857 int kvm_set_routing_entry(struct kvm_kernel_irq_routing_entry
*e
,
1858 const struct kvm_irq_routing_entry
*ue
)
1863 case KVM_IRQ_ROUTING_S390_ADAPTER
:
1864 e
->set
= set_adapter_int
;
1865 e
->adapter
.summary_addr
= ue
->u
.adapter
.summary_addr
;
1866 e
->adapter
.ind_addr
= ue
->u
.adapter
.ind_addr
;
1867 e
->adapter
.summary_offset
= ue
->u
.adapter
.summary_offset
;
1868 e
->adapter
.ind_offset
= ue
->u
.adapter
.ind_offset
;
1869 e
->adapter
.adapter_id
= ue
->u
.adapter
.adapter_id
;
1879 int kvm_set_msi(struct kvm_kernel_irq_routing_entry
*e
, struct kvm
*kvm
,
1880 int irq_source_id
, int level
, bool line_status
)