1 // SPDX-License-Identifier: GPL-2.0
3 * kvm nested virtualization support for s390x
5 * Copyright IBM Corp. 2016, 2018
7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_host.h>
11 #include <linux/bug.h>
12 #include <linux/list.h>
13 #include <linux/bitmap.h>
14 #include <linux/sched/signal.h>
17 #include <asm/mmu_context.h>
25 struct kvm_s390_sie_block scb_s
; /* 0x0000 */
27 * the backup info for machine check. ensure it's at
28 * the same offset as that in struct sie_page!
30 struct mcck_volatile_info mcck_info
; /* 0x0200 */
32 * The pinned original scb. Be aware that other VCPUs can modify
33 * it while we read from it. Values that are used for conditions or
34 * are reused conditionally, should be accessed via READ_ONCE.
36 struct kvm_s390_sie_block
*scb_o
; /* 0x0218 */
37 /* the shadow gmap in use by the vsie_page */
38 struct gmap
*gmap
; /* 0x0220 */
39 /* address of the last reported fault to guest2 */
40 unsigned long fault_addr
; /* 0x0228 */
41 __u8 reserved
[0x0700 - 0x0230]; /* 0x0230 */
42 struct kvm_s390_crypto_cb crycb
; /* 0x0700 */
43 __u8 fac
[S390_ARCH_FAC_LIST_SIZE_BYTE
]; /* 0x0800 */
46 /* trigger a validity icpt for the given scb */
47 static int set_validity_icpt(struct kvm_s390_sie_block
*scb
,
51 scb
->ipb
= ((__u32
) reason_code
) << 16;
52 scb
->icptcode
= ICPT_VALIDITY
;
56 /* mark the prefix as unmapped, this will block the VSIE */
57 static void prefix_unmapped(struct vsie_page
*vsie_page
)
59 atomic_or(PROG_REQUEST
, &vsie_page
->scb_s
.prog20
);
62 /* mark the prefix as unmapped and wait until the VSIE has been left */
63 static void prefix_unmapped_sync(struct vsie_page
*vsie_page
)
65 prefix_unmapped(vsie_page
);
66 if (vsie_page
->scb_s
.prog0c
& PROG_IN_SIE
)
67 atomic_or(CPUSTAT_STOP_INT
, &vsie_page
->scb_s
.cpuflags
);
68 while (vsie_page
->scb_s
.prog0c
& PROG_IN_SIE
)
72 /* mark the prefix as mapped, this will allow the VSIE to run */
73 static void prefix_mapped(struct vsie_page
*vsie_page
)
75 atomic_andnot(PROG_REQUEST
, &vsie_page
->scb_s
.prog20
);
78 /* test if the prefix is mapped into the gmap shadow */
79 static int prefix_is_mapped(struct vsie_page
*vsie_page
)
81 return !(atomic_read(&vsie_page
->scb_s
.prog20
) & PROG_REQUEST
);
84 /* copy the updated intervention request bits into the shadow scb */
85 static void update_intervention_requests(struct vsie_page
*vsie_page
)
87 const int bits
= CPUSTAT_STOP_INT
| CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
;
90 cpuflags
= atomic_read(&vsie_page
->scb_o
->cpuflags
);
91 atomic_andnot(bits
, &vsie_page
->scb_s
.cpuflags
);
92 atomic_or(cpuflags
& bits
, &vsie_page
->scb_s
.cpuflags
);
95 /* shadow (filter and validate) the cpuflags */
96 static int prepare_cpuflags(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
98 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
99 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
100 int newflags
, cpuflags
= atomic_read(&scb_o
->cpuflags
);
102 /* we don't allow ESA/390 guests */
103 if (!(cpuflags
& CPUSTAT_ZARCH
))
104 return set_validity_icpt(scb_s
, 0x0001U
);
106 if (cpuflags
& (CPUSTAT_RRF
| CPUSTAT_MCDS
))
107 return set_validity_icpt(scb_s
, 0x0001U
);
108 else if (cpuflags
& (CPUSTAT_SLSV
| CPUSTAT_SLSR
))
109 return set_validity_icpt(scb_s
, 0x0007U
);
111 /* intervention requests will be set later */
112 newflags
= CPUSTAT_ZARCH
;
113 if (cpuflags
& CPUSTAT_GED
&& test_kvm_facility(vcpu
->kvm
, 8))
114 newflags
|= CPUSTAT_GED
;
115 if (cpuflags
& CPUSTAT_GED2
&& test_kvm_facility(vcpu
->kvm
, 78)) {
116 if (cpuflags
& CPUSTAT_GED
)
117 return set_validity_icpt(scb_s
, 0x0001U
);
118 newflags
|= CPUSTAT_GED2
;
120 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_GPERE
))
121 newflags
|= cpuflags
& CPUSTAT_P
;
122 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_GSLS
))
123 newflags
|= cpuflags
& CPUSTAT_SM
;
124 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_IBS
))
125 newflags
|= cpuflags
& CPUSTAT_IBS
;
126 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_KSS
))
127 newflags
|= cpuflags
& CPUSTAT_KSS
;
129 atomic_set(&scb_s
->cpuflags
, newflags
);
134 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
135 * @vcpu: pointer to the virtual CPU
136 * @apcb_s: pointer to start of apcb in the shadow crycb
137 * @apcb_o: pointer to start of original guest apcb
138 * @apcb_h: pointer to start of apcb in the host
140 * Returns 0 and -EFAULT on error reading guest apcb
142 static int setup_apcb11(struct kvm_vcpu
*vcpu
, unsigned long *apcb_s
,
143 unsigned long apcb_o
,
144 unsigned long *apcb_h
)
146 if (read_guest_real(vcpu
, apcb_o
, apcb_s
,
147 sizeof(struct kvm_s390_apcb1
)))
150 bitmap_and(apcb_s
, apcb_s
, apcb_h
, sizeof(struct kvm_s390_apcb1
));
156 * setup_apcb - Create a shadow copy of the apcb.
157 * @vcpu: pointer to the virtual CPU
158 * @crycb_s: pointer to shadow crycb
159 * @crycb_o: pointer to original guest crycb
160 * @crycb_h: pointer to the host crycb
161 * @fmt_o: format of the original guest crycb.
162 * @fmt_h: format of the host crycb.
164 * Checks the compatibility between the guest and host crycb and calls the
165 * appropriate copy function.
167 * Return 0 or an error number if the guest and host crycb are incompatible.
169 static int setup_apcb(struct kvm_vcpu
*vcpu
, struct kvm_s390_crypto_cb
*crycb_s
,
171 struct kvm_s390_crypto_cb
*crycb_h
,
172 int fmt_o
, int fmt_h
)
174 struct kvm_s390_crypto_cb
*crycb
;
176 crycb
= (struct kvm_s390_crypto_cb
*) (unsigned long)crycb_o
;
180 if ((crycb_o
& PAGE_MASK
) != ((crycb_o
+ 256) & PAGE_MASK
))
182 if (fmt_h
!= CRYCB_FORMAT2
)
184 return setup_apcb11(vcpu
, (unsigned long *)&crycb_s
->apcb1
,
185 (unsigned long) &crycb
->apcb1
,
186 (unsigned long *)&crycb_h
->apcb1
);
192 * shadow_crycb - Create a shadow copy of the crycb block
193 * @vcpu: a pointer to the virtual CPU
194 * @vsie_page: a pointer to internal date used for the vSIE
196 * Create a shadow copy of the crycb block and setup key wrapping, if
197 * requested for guest 3 and enabled for guest 2.
199 * We accept format-1 or format-2, but we convert format-1 into format-2
200 * in the shadow CRYCB.
201 * Using format-2 enables the firmware to choose the right format when
202 * scheduling the SIE.
203 * There is nothing to do for format-0.
205 * This function centralize the issuing of set_validity_icpt() for all
206 * the subfunctions working on the crycb.
208 * Returns: - 0 if shadowed or nothing to do
209 * - > 0 if control has to be given to guest 2
211 static int shadow_crycb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
213 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
214 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
215 const uint32_t crycbd_o
= READ_ONCE(scb_o
->crycbd
);
216 const u32 crycb_addr
= crycbd_o
& 0x7ffffff8U
;
217 unsigned long *b1
, *b2
;
220 int key_msk
= test_kvm_facility(vcpu
->kvm
, 76);
221 int fmt_o
= crycbd_o
& CRYCB_FORMAT_MASK
;
222 int fmt_h
= vcpu
->arch
.sie_block
->crycbd
& CRYCB_FORMAT_MASK
;
226 if (!(crycbd_o
& vcpu
->arch
.sie_block
->crycbd
& CRYCB_FORMAT1
))
229 apie_h
= vcpu
->arch
.sie_block
->eca
& ECA_APIE
;
230 if (!apie_h
&& !key_msk
)
234 return set_validity_icpt(scb_s
, 0x0039U
);
236 if (fmt_o
== CRYCB_FORMAT1
)
237 if ((crycb_addr
& PAGE_MASK
) !=
238 ((crycb_addr
+ 128) & PAGE_MASK
))
239 return set_validity_icpt(scb_s
, 0x003CU
);
241 if (apie_h
&& (scb_o
->eca
& ECA_APIE
)) {
242 ret
= setup_apcb(vcpu
, &vsie_page
->crycb
, crycb_addr
,
243 vcpu
->kvm
->arch
.crypto
.crycb
,
247 scb_s
->eca
|= scb_o
->eca
& ECA_APIE
;
250 /* we may only allow it if enabled for guest 2 */
251 ecb3_flags
= scb_o
->ecb3
& vcpu
->arch
.sie_block
->ecb3
&
252 (ECB3_AES
| ECB3_DEA
);
256 /* copy only the wrapping keys */
257 if (read_guest_real(vcpu
, crycb_addr
+ 72, &vsie_page
->crycb
, 56))
258 return set_validity_icpt(scb_s
, 0x0035U
);
260 scb_s
->ecb3
|= ecb3_flags
;
262 /* xor both blocks in one run */
263 b1
= (unsigned long *) vsie_page
->crycb
.dea_wrapping_key_mask
;
264 b2
= (unsigned long *)
265 vcpu
->kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
;
266 /* as 56%8 == 0, bitmap_xor won't overwrite any data */
267 bitmap_xor(b1
, b1
, b2
, BITS_PER_BYTE
* 56);
271 return set_validity_icpt(scb_s
, 0x0020U
);
273 return set_validity_icpt(scb_s
, 0x0035U
);
275 return set_validity_icpt(scb_s
, 0x003CU
);
277 scb_s
->crycbd
= ((__u32
)(__u64
) &vsie_page
->crycb
) | CRYCB_FORMAT2
;
281 /* shadow (round up/down) the ibc to avoid validity icpt */
282 static void prepare_ibc(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
284 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
285 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
286 /* READ_ONCE does not work on bitfields - use a temporary variable */
287 const uint32_t __new_ibc
= scb_o
->ibc
;
288 const uint32_t new_ibc
= READ_ONCE(__new_ibc
) & 0x0fffU
;
289 __u64 min_ibc
= (sclp
.ibc
>> 16) & 0x0fffU
;
292 /* ibc installed in g2 and requested for g3 */
293 if (vcpu
->kvm
->arch
.model
.ibc
&& new_ibc
) {
294 scb_s
->ibc
= new_ibc
;
295 /* takte care of the minimum ibc level of the machine */
296 if (scb_s
->ibc
< min_ibc
)
297 scb_s
->ibc
= min_ibc
;
298 /* take care of the maximum ibc level set for the guest */
299 if (scb_s
->ibc
> vcpu
->kvm
->arch
.model
.ibc
)
300 scb_s
->ibc
= vcpu
->kvm
->arch
.model
.ibc
;
304 /* unshadow the scb, copying parameters back to the real scb */
305 static void unshadow_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
307 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
308 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
311 scb_o
->icptcode
= scb_s
->icptcode
;
312 scb_o
->icptstatus
= scb_s
->icptstatus
;
313 scb_o
->ipa
= scb_s
->ipa
;
314 scb_o
->ipb
= scb_s
->ipb
;
315 scb_o
->gbea
= scb_s
->gbea
;
318 scb_o
->cputm
= scb_s
->cputm
;
319 scb_o
->ckc
= scb_s
->ckc
;
320 scb_o
->todpr
= scb_s
->todpr
;
323 scb_o
->gpsw
= scb_s
->gpsw
;
324 scb_o
->gg14
= scb_s
->gg14
;
325 scb_o
->gg15
= scb_s
->gg15
;
326 memcpy(scb_o
->gcr
, scb_s
->gcr
, 128);
327 scb_o
->pp
= scb_s
->pp
;
329 /* branch prediction */
330 if (test_kvm_facility(vcpu
->kvm
, 82)) {
331 scb_o
->fpf
&= ~FPF_BPBC
;
332 scb_o
->fpf
|= scb_s
->fpf
& FPF_BPBC
;
335 /* interrupt intercept */
336 switch (scb_s
->icptcode
) {
340 memcpy((void *)((u64
)scb_o
+ 0xc0),
341 (void *)((u64
)scb_s
+ 0xc0), 0xf0 - 0xc0);
345 memcpy((void *)((u64
)scb_o
+ 0xc0),
346 (void *)((u64
)scb_s
+ 0xc0), 0xd0 - 0xc0);
350 if (scb_s
->ihcpu
!= 0xffffU
)
351 scb_o
->ihcpu
= scb_s
->ihcpu
;
355 * Setup the shadow scb by copying and checking the relevant parts of the g2
358 * Returns: - 0 if the scb has been shadowed
359 * - > 0 if control has to be given to guest 2
361 static int shadow_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
363 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
364 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
365 /* READ_ONCE does not work on bitfields - use a temporary variable */
366 const uint32_t __new_prefix
= scb_o
->prefix
;
367 const uint32_t new_prefix
= READ_ONCE(__new_prefix
);
368 const bool wants_tx
= READ_ONCE(scb_o
->ecb
) & ECB_TE
;
369 bool had_tx
= scb_s
->ecb
& ECB_TE
;
370 unsigned long new_mso
= 0;
373 /* make sure we don't have any leftovers when reusing the scb */
383 rc
= prepare_cpuflags(vcpu
, vsie_page
);
388 scb_s
->cputm
= scb_o
->cputm
;
389 scb_s
->ckc
= scb_o
->ckc
;
390 scb_s
->todpr
= scb_o
->todpr
;
391 scb_s
->epoch
= scb_o
->epoch
;
394 scb_s
->gpsw
= scb_o
->gpsw
;
395 scb_s
->gg14
= scb_o
->gg14
;
396 scb_s
->gg15
= scb_o
->gg15
;
397 memcpy(scb_s
->gcr
, scb_o
->gcr
, 128);
398 scb_s
->pp
= scb_o
->pp
;
400 /* interception / execution handling */
401 scb_s
->gbea
= scb_o
->gbea
;
402 scb_s
->lctl
= scb_o
->lctl
;
403 scb_s
->svcc
= scb_o
->svcc
;
404 scb_s
->ictl
= scb_o
->ictl
;
406 * SKEY handling functions can't deal with false setting of PTE invalid
407 * bits. Therefore we cannot provide interpretation and would later
408 * have to provide own emulation handlers.
410 if (!(atomic_read(&scb_s
->cpuflags
) & CPUSTAT_KSS
))
411 scb_s
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
;
413 scb_s
->icpua
= scb_o
->icpua
;
415 if (!(atomic_read(&scb_s
->cpuflags
) & CPUSTAT_SM
))
416 new_mso
= READ_ONCE(scb_o
->mso
) & 0xfffffffffff00000UL
;
417 /* if the hva of the prefix changes, we have to remap the prefix */
418 if (scb_s
->mso
!= new_mso
|| scb_s
->prefix
!= new_prefix
)
419 prefix_unmapped(vsie_page
);
420 /* SIE will do mso/msl validity and exception checks for us */
421 scb_s
->msl
= scb_o
->msl
& 0xfffffffffff00000UL
;
422 scb_s
->mso
= new_mso
;
423 scb_s
->prefix
= new_prefix
;
425 /* We have to definetly flush the tlb if this scb never ran */
426 if (scb_s
->ihcpu
!= 0xffffU
)
427 scb_s
->ihcpu
= scb_o
->ihcpu
;
429 /* MVPG and Protection Exception Interpretation are always available */
430 scb_s
->eca
|= scb_o
->eca
& (ECA_MVPGI
| ECA_PROTEXCI
);
431 /* Host-protection-interruption introduced with ESOP */
432 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_ESOP
))
433 scb_s
->ecb
|= scb_o
->ecb
& ECB_HOSTPROTINT
;
434 /* transactional execution */
435 if (test_kvm_facility(vcpu
->kvm
, 73) && wants_tx
) {
436 /* remap the prefix is tx is toggled on */
438 prefix_unmapped(vsie_page
);
439 scb_s
->ecb
|= ECB_TE
;
441 /* branch prediction */
442 if (test_kvm_facility(vcpu
->kvm
, 82))
443 scb_s
->fpf
|= scb_o
->fpf
& FPF_BPBC
;
445 if (test_kvm_facility(vcpu
->kvm
, 129)) {
446 scb_s
->eca
|= scb_o
->eca
& ECA_VX
;
447 scb_s
->ecd
|= scb_o
->ecd
& ECD_HOSTREGMGMT
;
449 /* Run-time-Instrumentation */
450 if (test_kvm_facility(vcpu
->kvm
, 64))
451 scb_s
->ecb3
|= scb_o
->ecb3
& ECB3_RI
;
452 /* Instruction Execution Prevention */
453 if (test_kvm_facility(vcpu
->kvm
, 130))
454 scb_s
->ecb2
|= scb_o
->ecb2
& ECB2_IEP
;
455 /* Guarded Storage */
456 if (test_kvm_facility(vcpu
->kvm
, 133)) {
457 scb_s
->ecb
|= scb_o
->ecb
& ECB_GS
;
458 scb_s
->ecd
|= scb_o
->ecd
& ECD_HOSTREGMGMT
;
460 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_SIIF
))
461 scb_s
->eca
|= scb_o
->eca
& ECA_SII
;
462 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_IB
))
463 scb_s
->eca
|= scb_o
->eca
& ECA_IB
;
464 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_CEI
))
465 scb_s
->eca
|= scb_o
->eca
& ECA_CEI
;
466 /* Epoch Extension */
467 if (test_kvm_facility(vcpu
->kvm
, 139))
468 scb_s
->ecd
|= scb_o
->ecd
& ECD_MEF
;
471 if (test_kvm_facility(vcpu
->kvm
, 156))
472 scb_s
->ecd
|= scb_o
->ecd
& ECD_ETOKENF
;
474 prepare_ibc(vcpu
, vsie_page
);
475 rc
= shadow_crycb(vcpu
, vsie_page
);
478 unshadow_scb(vcpu
, vsie_page
);
482 void kvm_s390_vsie_gmap_notifier(struct gmap
*gmap
, unsigned long start
,
485 struct kvm
*kvm
= gmap
->private;
486 struct vsie_page
*cur
;
487 unsigned long prefix
;
491 if (!gmap_is_shadow(gmap
))
493 if (start
>= 1UL << 31)
494 /* We are only interested in prefix pages */
498 * Only new shadow blocks are added to the list during runtime,
499 * therefore we can safely reference them all the time.
501 for (i
= 0; i
< kvm
->arch
.vsie
.page_count
; i
++) {
502 page
= READ_ONCE(kvm
->arch
.vsie
.pages
[i
]);
505 cur
= page_to_virt(page
);
506 if (READ_ONCE(cur
->gmap
) != gmap
)
508 prefix
= cur
->scb_s
.prefix
<< GUEST_PREFIX_SHIFT
;
509 /* with mso/msl, the prefix lies at an offset */
510 prefix
+= cur
->scb_s
.mso
;
511 if (prefix
<= end
&& start
<= prefix
+ 2 * PAGE_SIZE
- 1)
512 prefix_unmapped_sync(cur
);
517 * Map the first prefix page and if tx is enabled also the second prefix page.
519 * The prefix will be protected, a gmap notifier will inform about unmaps.
520 * The shadow scb must not be executed until the prefix is remapped, this is
521 * guaranteed by properly handling PROG_REQUEST.
523 * Returns: - 0 on if successfully mapped or already mapped
524 * - > 0 if control has to be given to guest 2
525 * - -EAGAIN if the caller can retry immediately
526 * - -ENOMEM if out of memory
528 static int map_prefix(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
530 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
531 u64 prefix
= scb_s
->prefix
<< GUEST_PREFIX_SHIFT
;
534 if (prefix_is_mapped(vsie_page
))
537 /* mark it as mapped so we can catch any concurrent unmappers */
538 prefix_mapped(vsie_page
);
540 /* with mso/msl, the prefix lies at offset *mso* */
541 prefix
+= scb_s
->mso
;
543 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
, prefix
);
544 if (!rc
&& (scb_s
->ecb
& ECB_TE
))
545 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
548 * We don't have to mprotect, we will be called for all unshadows.
549 * SIE will detect if protection applies and trigger a validity.
552 prefix_unmapped(vsie_page
);
553 if (rc
> 0 || rc
== -EFAULT
)
554 rc
= set_validity_icpt(scb_s
, 0x0037U
);
559 * Pin the guest page given by gpa and set hpa to the pinned host address.
560 * Will always be pinned writable.
562 * Returns: - 0 on success
563 * - -EINVAL if the gpa is not valid guest storage
565 static int pin_guest_page(struct kvm
*kvm
, gpa_t gpa
, hpa_t
*hpa
)
569 page
= gfn_to_page(kvm
, gpa_to_gfn(gpa
));
570 if (is_error_page(page
))
572 *hpa
= (hpa_t
) page_to_virt(page
) + (gpa
& ~PAGE_MASK
);
576 /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
577 static void unpin_guest_page(struct kvm
*kvm
, gpa_t gpa
, hpa_t hpa
)
579 kvm_release_pfn_dirty(hpa
>> PAGE_SHIFT
);
580 /* mark the page always as dirty for migration */
581 mark_page_dirty(kvm
, gpa_to_gfn(gpa
));
584 /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
585 static void unpin_blocks(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
587 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
588 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
592 hpa
= (u64
) scb_s
->scaoh
<< 32 | scb_s
->scaol
;
594 gpa
= scb_o
->scaol
& ~0xfUL
;
595 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_64BSCAO
))
596 gpa
|= (u64
) scb_o
->scaoh
<< 32;
597 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
604 gpa
= scb_o
->itdba
& ~0xffUL
;
605 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
611 gpa
= scb_o
->gvrd
& ~0x1ffUL
;
612 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
618 gpa
= scb_o
->riccbd
& ~0x3fUL
;
619 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
626 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
632 * Instead of shadowing some blocks, we can simply forward them because the
633 * addresses in the scb are 64 bit long.
635 * This works as long as the data lies in one page. If blocks ever exceed one
636 * page, we have to fall back to shadowing.
638 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
639 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
641 * Returns: - 0 if all blocks were pinned.
642 * - > 0 if control has to be given to guest 2
643 * - -ENOMEM if out of memory
645 static int pin_blocks(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
647 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
648 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
653 gpa
= READ_ONCE(scb_o
->scaol
) & ~0xfUL
;
654 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_64BSCAO
))
655 gpa
|= (u64
) READ_ONCE(scb_o
->scaoh
) << 32;
657 if (!(gpa
& ~0x1fffUL
))
658 rc
= set_validity_icpt(scb_s
, 0x0038U
);
659 else if ((gpa
& ~0x1fffUL
) == kvm_s390_get_prefix(vcpu
))
660 rc
= set_validity_icpt(scb_s
, 0x0011U
);
661 else if ((gpa
& PAGE_MASK
) !=
662 ((gpa
+ sizeof(struct bsca_block
) - 1) & PAGE_MASK
))
663 rc
= set_validity_icpt(scb_s
, 0x003bU
);
665 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
667 rc
= set_validity_icpt(scb_s
, 0x0034U
);
671 scb_s
->scaoh
= (u32
)((u64
)hpa
>> 32);
672 scb_s
->scaol
= (u32
)(u64
)hpa
;
675 gpa
= READ_ONCE(scb_o
->itdba
) & ~0xffUL
;
676 if (gpa
&& (scb_s
->ecb
& ECB_TE
)) {
677 if (!(gpa
& ~0x1fffUL
)) {
678 rc
= set_validity_icpt(scb_s
, 0x0080U
);
681 /* 256 bytes cannot cross page boundaries */
682 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
684 rc
= set_validity_icpt(scb_s
, 0x0080U
);
690 gpa
= READ_ONCE(scb_o
->gvrd
) & ~0x1ffUL
;
691 if (gpa
&& (scb_s
->eca
& ECA_VX
) && !(scb_s
->ecd
& ECD_HOSTREGMGMT
)) {
692 if (!(gpa
& ~0x1fffUL
)) {
693 rc
= set_validity_icpt(scb_s
, 0x1310U
);
697 * 512 bytes vector registers cannot cross page boundaries
698 * if this block gets bigger, we have to shadow it.
700 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
702 rc
= set_validity_icpt(scb_s
, 0x1310U
);
708 gpa
= READ_ONCE(scb_o
->riccbd
) & ~0x3fUL
;
709 if (gpa
&& (scb_s
->ecb3
& ECB3_RI
)) {
710 if (!(gpa
& ~0x1fffUL
)) {
711 rc
= set_validity_icpt(scb_s
, 0x0043U
);
714 /* 64 bytes cannot cross page boundaries */
715 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
717 rc
= set_validity_icpt(scb_s
, 0x0043U
);
720 /* Validity 0x0044 will be checked by SIE */
723 if (((scb_s
->ecb
& ECB_GS
) && !(scb_s
->ecd
& ECD_HOSTREGMGMT
)) ||
724 (scb_s
->ecd
& ECD_ETOKENF
)) {
727 gpa
= READ_ONCE(scb_o
->sdnxo
) & ~0xfUL
;
728 sdnxc
= READ_ONCE(scb_o
->sdnxo
) & 0xfUL
;
729 if (!gpa
|| !(gpa
& ~0x1fffUL
)) {
730 rc
= set_validity_icpt(scb_s
, 0x10b0U
);
733 if (sdnxc
< 6 || sdnxc
> 12) {
734 rc
= set_validity_icpt(scb_s
, 0x10b1U
);
737 if (gpa
& ((1 << sdnxc
) - 1)) {
738 rc
= set_validity_icpt(scb_s
, 0x10b2U
);
741 /* Due to alignment rules (checked above) this cannot
742 * cross page boundaries
744 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
746 rc
= set_validity_icpt(scb_s
, 0x10b0U
);
749 scb_s
->sdnxo
= hpa
| sdnxc
;
753 unpin_blocks(vcpu
, vsie_page
);
757 /* unpin the scb provided by guest 2, marking it as dirty */
758 static void unpin_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
,
761 hpa_t hpa
= (hpa_t
) vsie_page
->scb_o
;
764 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
765 vsie_page
->scb_o
= NULL
;
769 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
771 * Returns: - 0 if the scb was pinned.
772 * - > 0 if control has to be given to guest 2
774 static int pin_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
,
780 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
782 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
786 vsie_page
->scb_o
= (struct kvm_s390_sie_block
*) hpa
;
791 * Inject a fault into guest 2.
793 * Returns: - > 0 if control has to be given to guest 2
794 * < 0 if an error occurred during injection.
796 static int inject_fault(struct kvm_vcpu
*vcpu
, __u16 code
, __u64 vaddr
,
799 struct kvm_s390_pgm_info pgm
= {
802 /* 0-51: virtual address */
803 (vaddr
& 0xfffffffffffff000UL
) |
804 /* 52-53: store / fetch */
805 (((unsigned int) !write_flag
) + 1) << 10,
806 /* 62-63: asce id (alway primary == 0) */
807 .exc_access_id
= 0, /* always primary */
808 .op_access_id
= 0, /* not MVPG */
812 if (code
== PGM_PROTECTION
)
813 pgm
.trans_exc_code
|= 0x4UL
;
815 rc
= kvm_s390_inject_prog_irq(vcpu
, &pgm
);
820 * Handle a fault during vsie execution on a gmap shadow.
822 * Returns: - 0 if the fault was resolved
823 * - > 0 if control has to be given to guest 2
824 * - < 0 if an error occurred
826 static int handle_fault(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
830 if (current
->thread
.gmap_int_code
== PGM_PROTECTION
)
831 /* we can directly forward all protection exceptions */
832 return inject_fault(vcpu
, PGM_PROTECTION
,
833 current
->thread
.gmap_addr
, 1);
835 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
836 current
->thread
.gmap_addr
);
838 rc
= inject_fault(vcpu
, rc
,
839 current
->thread
.gmap_addr
,
840 current
->thread
.gmap_write_flag
);
842 vsie_page
->fault_addr
= current
->thread
.gmap_addr
;
848 * Retry the previous fault that required guest 2 intervention. This avoids
849 * one superfluous SIE re-entry and direct exit.
851 * Will ignore any errors. The next SIE fault will do proper fault handling.
853 static void handle_last_fault(struct kvm_vcpu
*vcpu
,
854 struct vsie_page
*vsie_page
)
856 if (vsie_page
->fault_addr
)
857 kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
858 vsie_page
->fault_addr
);
859 vsie_page
->fault_addr
= 0;
862 static inline void clear_vsie_icpt(struct vsie_page
*vsie_page
)
864 vsie_page
->scb_s
.icptcode
= 0;
867 /* rewind the psw and clear the vsie icpt, so we can retry execution */
868 static void retry_vsie_icpt(struct vsie_page
*vsie_page
)
870 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
871 int ilen
= insn_length(scb_s
->ipa
>> 8);
873 /* take care of EXECUTE instructions */
874 if (scb_s
->icptstatus
& 1) {
875 ilen
= (scb_s
->icptstatus
>> 4) & 0x6;
879 scb_s
->gpsw
.addr
= __rewind_psw(scb_s
->gpsw
, ilen
);
880 clear_vsie_icpt(vsie_page
);
884 * Try to shadow + enable the guest 2 provided facility list.
885 * Retry instruction execution if enabled for and provided by guest 2.
887 * Returns: - 0 if handled (retry or guest 2 icpt)
888 * - > 0 if control has to be given to guest 2
890 static int handle_stfle(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
892 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
893 __u32 fac
= READ_ONCE(vsie_page
->scb_o
->fac
) & 0x7ffffff8U
;
895 if (fac
&& test_kvm_facility(vcpu
->kvm
, 7)) {
896 retry_vsie_icpt(vsie_page
);
897 if (read_guest_real(vcpu
, fac
, &vsie_page
->fac
,
898 sizeof(vsie_page
->fac
)))
899 return set_validity_icpt(scb_s
, 0x1090U
);
900 scb_s
->fac
= (__u32
)(__u64
) &vsie_page
->fac
;
906 * Run the vsie on a shadow scb and a shadow gmap, without any further
907 * sanity checks, handling SIE faults.
909 * Returns: - 0 everything went fine
910 * - > 0 if control has to be given to guest 2
911 * - < 0 if an error occurred
913 static int do_vsie_run(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
915 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
916 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
917 int guest_bp_isolation
;
920 handle_last_fault(vcpu
, vsie_page
);
924 if (test_cpu_flag(CIF_MCCK_PENDING
))
927 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
929 /* save current guest state of bp isolation override */
930 guest_bp_isolation
= test_thread_flag(TIF_ISOLATE_BP_GUEST
);
933 * The guest is running with BPBC, so we have to force it on for our
934 * nested guest. This is done by enabling BPBC globally, so the BPBC
935 * control in the SCB (which the nested guest can modify) is simply
938 if (test_kvm_facility(vcpu
->kvm
, 82) &&
939 vcpu
->arch
.sie_block
->fpf
& FPF_BPBC
)
940 set_thread_flag(TIF_ISOLATE_BP_GUEST
);
943 guest_enter_irqoff();
947 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
948 * and VCPU requests also hinder the vSIE from running and lead
949 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
950 * also kick the vSIE.
952 vcpu
->arch
.sie_block
->prog0c
|= PROG_IN_SIE
;
954 if (!kvm_s390_vcpu_sie_inhibited(vcpu
))
955 rc
= sie64a(scb_s
, vcpu
->run
->s
.regs
.gprs
);
957 vcpu
->arch
.sie_block
->prog0c
&= ~PROG_IN_SIE
;
963 /* restore guest state for bp isolation override */
964 if (!guest_bp_isolation
)
965 clear_thread_flag(TIF_ISOLATE_BP_GUEST
);
967 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
970 VCPU_EVENT(vcpu
, 3, "%s", "machine check");
971 kvm_s390_reinject_machine_check(vcpu
, &vsie_page
->mcck_info
);
976 rc
= 0; /* we could still have an icpt */
977 else if (rc
== -EFAULT
)
978 return handle_fault(vcpu
, vsie_page
);
980 switch (scb_s
->icptcode
) {
982 if (scb_s
->ipa
== 0xb2b0)
983 rc
= handle_stfle(vcpu
, vsie_page
);
986 /* stop not requested by g2 - must have been a kick */
987 if (!(atomic_read(&scb_o
->cpuflags
) & CPUSTAT_STOP_INT
))
988 clear_vsie_icpt(vsie_page
);
991 if ((scb_s
->ipa
& 0xf000) != 0xf000)
992 scb_s
->ipa
+= 0x1000;
998 static void release_gmap_shadow(struct vsie_page
*vsie_page
)
1000 if (vsie_page
->gmap
)
1001 gmap_put(vsie_page
->gmap
);
1002 WRITE_ONCE(vsie_page
->gmap
, NULL
);
1003 prefix_unmapped(vsie_page
);
1006 static int acquire_gmap_shadow(struct kvm_vcpu
*vcpu
,
1007 struct vsie_page
*vsie_page
)
1014 asce
= vcpu
->arch
.sie_block
->gcr
[1];
1015 cr0
.val
= vcpu
->arch
.sie_block
->gcr
[0];
1016 edat
= cr0
.edat
&& test_kvm_facility(vcpu
->kvm
, 8);
1017 edat
+= edat
&& test_kvm_facility(vcpu
->kvm
, 78);
1020 * ASCE or EDAT could have changed since last icpt, or the gmap
1021 * we're holding has been unshadowed. If the gmap is still valid,
1022 * we can safely reuse it.
1024 if (vsie_page
->gmap
&& gmap_shadow_valid(vsie_page
->gmap
, asce
, edat
))
1027 /* release the old shadow - if any, and mark the prefix as unmapped */
1028 release_gmap_shadow(vsie_page
);
1029 gmap
= gmap_shadow(vcpu
->arch
.gmap
, asce
, edat
);
1031 return PTR_ERR(gmap
);
1032 gmap
->private = vcpu
->kvm
;
1033 WRITE_ONCE(vsie_page
->gmap
, gmap
);
1038 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1040 static void register_shadow_scb(struct kvm_vcpu
*vcpu
,
1041 struct vsie_page
*vsie_page
)
1043 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
1045 WRITE_ONCE(vcpu
->arch
.vsie_block
, &vsie_page
->scb_s
);
1047 * External calls have to lead to a kick of the vcpu and
1048 * therefore the vsie -> Simulate Wait state.
1050 atomic_or(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
1052 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1053 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1056 scb_s
->epoch
+= vcpu
->kvm
->arch
.epoch
;
1058 if (scb_s
->ecd
& ECD_MEF
) {
1059 scb_s
->epdx
+= vcpu
->kvm
->arch
.epdx
;
1060 if (scb_s
->epoch
< vcpu
->kvm
->arch
.epoch
)
1068 * Unregister a shadow scb from a VCPU.
1070 static void unregister_shadow_scb(struct kvm_vcpu
*vcpu
)
1072 atomic_andnot(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
1073 WRITE_ONCE(vcpu
->arch
.vsie_block
, NULL
);
1077 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1078 * prefix pages and faults.
1080 * Returns: - 0 if no errors occurred
1081 * - > 0 if control has to be given to guest 2
1082 * - -ENOMEM if out of memory
1084 static int vsie_run(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
1086 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
1090 rc
= acquire_gmap_shadow(vcpu
, vsie_page
);
1092 rc
= map_prefix(vcpu
, vsie_page
);
1094 gmap_enable(vsie_page
->gmap
);
1095 update_intervention_requests(vsie_page
);
1096 rc
= do_vsie_run(vcpu
, vsie_page
);
1097 gmap_enable(vcpu
->arch
.gmap
);
1099 atomic_andnot(PROG_BLOCK_SIE
, &scb_s
->prog20
);
1103 if (rc
|| scb_s
->icptcode
|| signal_pending(current
) ||
1104 kvm_s390_vcpu_has_irq(vcpu
, 0) ||
1105 kvm_s390_vcpu_sie_inhibited(vcpu
))
1109 if (rc
== -EFAULT
) {
1111 * Addressing exceptions are always presentes as intercepts.
1112 * As addressing exceptions are suppressing and our guest 3 PSW
1113 * points at the responsible instruction, we have to
1114 * forward the PSW and set the ilc. If we can't read guest 3
1115 * instruction, we can use an arbitrary ilc. Let's always use
1116 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1117 * memory. (we could also fake the shadow so the hardware
1120 scb_s
->icptcode
= ICPT_PROGI
;
1121 scb_s
->iprcc
= PGM_ADDRESSING
;
1123 scb_s
->gpsw
.addr
= __rewind_psw(scb_s
->gpsw
, 4);
1129 * Get or create a vsie page for a scb address.
1131 * Returns: - address of a vsie page (cached or new one)
1132 * - NULL if the same scb address is already used by another VCPU
1133 * - ERR_PTR(-ENOMEM) if out of memory
1135 static struct vsie_page
*get_vsie_page(struct kvm
*kvm
, unsigned long addr
)
1137 struct vsie_page
*vsie_page
;
1142 page
= radix_tree_lookup(&kvm
->arch
.vsie
.addr_to_page
, addr
>> 9);
1145 if (page_ref_inc_return(page
) == 2)
1146 return page_to_virt(page
);
1151 * We want at least #online_vcpus shadows, so every VCPU can execute
1152 * the VSIE in parallel.
1154 nr_vcpus
= atomic_read(&kvm
->online_vcpus
);
1156 mutex_lock(&kvm
->arch
.vsie
.mutex
);
1157 if (kvm
->arch
.vsie
.page_count
< nr_vcpus
) {
1158 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA
);
1160 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1161 return ERR_PTR(-ENOMEM
);
1164 kvm
->arch
.vsie
.pages
[kvm
->arch
.vsie
.page_count
] = page
;
1165 kvm
->arch
.vsie
.page_count
++;
1167 /* reuse an existing entry that belongs to nobody */
1169 page
= kvm
->arch
.vsie
.pages
[kvm
->arch
.vsie
.next
];
1170 if (page_ref_inc_return(page
) == 2)
1173 kvm
->arch
.vsie
.next
++;
1174 kvm
->arch
.vsie
.next
%= nr_vcpus
;
1176 radix_tree_delete(&kvm
->arch
.vsie
.addr_to_page
, page
->index
>> 9);
1179 /* double use of the same address */
1180 if (radix_tree_insert(&kvm
->arch
.vsie
.addr_to_page
, addr
>> 9, page
)) {
1182 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1185 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1187 vsie_page
= page_to_virt(page
);
1188 memset(&vsie_page
->scb_s
, 0, sizeof(struct kvm_s390_sie_block
));
1189 release_gmap_shadow(vsie_page
);
1190 vsie_page
->fault_addr
= 0;
1191 vsie_page
->scb_s
.ihcpu
= 0xffffU
;
1195 /* put a vsie page acquired via get_vsie_page */
1196 static void put_vsie_page(struct kvm
*kvm
, struct vsie_page
*vsie_page
)
1198 struct page
*page
= pfn_to_page(__pa(vsie_page
) >> PAGE_SHIFT
);
1203 int kvm_s390_handle_vsie(struct kvm_vcpu
*vcpu
)
1205 struct vsie_page
*vsie_page
;
1206 unsigned long scb_addr
;
1209 vcpu
->stat
.instruction_sie
++;
1210 if (!test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_SIEF2
))
1212 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1213 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1215 BUILD_BUG_ON(sizeof(struct vsie_page
) != PAGE_SIZE
);
1216 scb_addr
= kvm_s390_get_base_disp_s(vcpu
, NULL
);
1218 /* 512 byte alignment */
1219 if (unlikely(scb_addr
& 0x1ffUL
))
1220 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1222 if (signal_pending(current
) || kvm_s390_vcpu_has_irq(vcpu
, 0) ||
1223 kvm_s390_vcpu_sie_inhibited(vcpu
))
1226 vsie_page
= get_vsie_page(vcpu
->kvm
, scb_addr
);
1227 if (IS_ERR(vsie_page
))
1228 return PTR_ERR(vsie_page
);
1229 else if (!vsie_page
)
1230 /* double use of sie control block - simply do nothing */
1233 rc
= pin_scb(vcpu
, vsie_page
, scb_addr
);
1236 rc
= shadow_scb(vcpu
, vsie_page
);
1239 rc
= pin_blocks(vcpu
, vsie_page
);
1242 register_shadow_scb(vcpu
, vsie_page
);
1243 rc
= vsie_run(vcpu
, vsie_page
);
1244 unregister_shadow_scb(vcpu
);
1245 unpin_blocks(vcpu
, vsie_page
);
1247 unshadow_scb(vcpu
, vsie_page
);
1249 unpin_scb(vcpu
, vsie_page
, scb_addr
);
1251 put_vsie_page(vcpu
->kvm
, vsie_page
);
1253 return rc
< 0 ? rc
: 0;
1256 /* Init the vsie data structures. To be called when a vm is initialized. */
1257 void kvm_s390_vsie_init(struct kvm
*kvm
)
1259 mutex_init(&kvm
->arch
.vsie
.mutex
);
1260 INIT_RADIX_TREE(&kvm
->arch
.vsie
.addr_to_page
, GFP_KERNEL
);
1263 /* Destroy the vsie data structures. To be called when a vm is destroyed. */
1264 void kvm_s390_vsie_destroy(struct kvm
*kvm
)
1266 struct vsie_page
*vsie_page
;
1270 mutex_lock(&kvm
->arch
.vsie
.mutex
);
1271 for (i
= 0; i
< kvm
->arch
.vsie
.page_count
; i
++) {
1272 page
= kvm
->arch
.vsie
.pages
[i
];
1273 kvm
->arch
.vsie
.pages
[i
] = NULL
;
1274 vsie_page
= page_to_virt(page
);
1275 release_gmap_shadow(vsie_page
);
1276 /* free the radix tree entry */
1277 radix_tree_delete(&kvm
->arch
.vsie
.addr_to_page
, page
->index
>> 9);
1280 kvm
->arch
.vsie
.page_count
= 0;
1281 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1284 void kvm_s390_vsie_kick(struct kvm_vcpu
*vcpu
)
1286 struct kvm_s390_sie_block
*scb
= READ_ONCE(vcpu
->arch
.vsie_block
);
1289 * Even if the VCPU lets go of the shadow sie block reference, it is
1290 * still valid in the cache. So we can safely kick it.
1293 atomic_or(PROG_BLOCK_SIE
, &scb
->prog20
);
1294 if (scb
->prog0c
& PROG_IN_SIE
)
1295 atomic_or(CPUSTAT_STOP_INT
, &scb
->cpuflags
);