1 // SPDX-License-Identifier: GPL-2.0
3 * kvm nested virtualization support for s390x
5 * Copyright IBM Corp. 2016, 2018
7 * Author(s): David Hildenbrand <dahi@linux.vnet.ibm.com>
9 #include <linux/vmalloc.h>
10 #include <linux/kvm_host.h>
11 #include <linux/bug.h>
12 #include <linux/list.h>
13 #include <linux/bitmap.h>
14 #include <linux/sched/signal.h>
17 #include <asm/mmu_context.h>
25 struct kvm_s390_sie_block scb_s
; /* 0x0000 */
27 * the backup info for machine check. ensure it's at
28 * the same offset as that in struct sie_page!
30 struct mcck_volatile_info mcck_info
; /* 0x0200 */
32 * The pinned original scb. Be aware that other VCPUs can modify
33 * it while we read from it. Values that are used for conditions or
34 * are reused conditionally, should be accessed via READ_ONCE.
36 struct kvm_s390_sie_block
*scb_o
; /* 0x0218 */
37 /* the shadow gmap in use by the vsie_page */
38 struct gmap
*gmap
; /* 0x0220 */
39 /* address of the last reported fault to guest2 */
40 unsigned long fault_addr
; /* 0x0228 */
41 __u8 reserved
[0x0700 - 0x0230]; /* 0x0230 */
42 struct kvm_s390_crypto_cb crycb
; /* 0x0700 */
43 __u8 fac
[S390_ARCH_FAC_LIST_SIZE_BYTE
]; /* 0x0800 */
46 /* trigger a validity icpt for the given scb */
47 static int set_validity_icpt(struct kvm_s390_sie_block
*scb
,
51 scb
->ipb
= ((__u32
) reason_code
) << 16;
52 scb
->icptcode
= ICPT_VALIDITY
;
56 /* mark the prefix as unmapped, this will block the VSIE */
57 static void prefix_unmapped(struct vsie_page
*vsie_page
)
59 atomic_or(PROG_REQUEST
, &vsie_page
->scb_s
.prog20
);
62 /* mark the prefix as unmapped and wait until the VSIE has been left */
63 static void prefix_unmapped_sync(struct vsie_page
*vsie_page
)
65 prefix_unmapped(vsie_page
);
66 if (vsie_page
->scb_s
.prog0c
& PROG_IN_SIE
)
67 atomic_or(CPUSTAT_STOP_INT
, &vsie_page
->scb_s
.cpuflags
);
68 while (vsie_page
->scb_s
.prog0c
& PROG_IN_SIE
)
72 /* mark the prefix as mapped, this will allow the VSIE to run */
73 static void prefix_mapped(struct vsie_page
*vsie_page
)
75 atomic_andnot(PROG_REQUEST
, &vsie_page
->scb_s
.prog20
);
78 /* test if the prefix is mapped into the gmap shadow */
79 static int prefix_is_mapped(struct vsie_page
*vsie_page
)
81 return !(atomic_read(&vsie_page
->scb_s
.prog20
) & PROG_REQUEST
);
84 /* copy the updated intervention request bits into the shadow scb */
85 static void update_intervention_requests(struct vsie_page
*vsie_page
)
87 const int bits
= CPUSTAT_STOP_INT
| CPUSTAT_IO_INT
| CPUSTAT_EXT_INT
;
90 cpuflags
= atomic_read(&vsie_page
->scb_o
->cpuflags
);
91 atomic_andnot(bits
, &vsie_page
->scb_s
.cpuflags
);
92 atomic_or(cpuflags
& bits
, &vsie_page
->scb_s
.cpuflags
);
95 /* shadow (filter and validate) the cpuflags */
96 static int prepare_cpuflags(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
98 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
99 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
100 int newflags
, cpuflags
= atomic_read(&scb_o
->cpuflags
);
102 /* we don't allow ESA/390 guests */
103 if (!(cpuflags
& CPUSTAT_ZARCH
))
104 return set_validity_icpt(scb_s
, 0x0001U
);
106 if (cpuflags
& (CPUSTAT_RRF
| CPUSTAT_MCDS
))
107 return set_validity_icpt(scb_s
, 0x0001U
);
108 else if (cpuflags
& (CPUSTAT_SLSV
| CPUSTAT_SLSR
))
109 return set_validity_icpt(scb_s
, 0x0007U
);
111 /* intervention requests will be set later */
112 newflags
= CPUSTAT_ZARCH
;
113 if (cpuflags
& CPUSTAT_GED
&& test_kvm_facility(vcpu
->kvm
, 8))
114 newflags
|= CPUSTAT_GED
;
115 if (cpuflags
& CPUSTAT_GED2
&& test_kvm_facility(vcpu
->kvm
, 78)) {
116 if (cpuflags
& CPUSTAT_GED
)
117 return set_validity_icpt(scb_s
, 0x0001U
);
118 newflags
|= CPUSTAT_GED2
;
120 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_GPERE
))
121 newflags
|= cpuflags
& CPUSTAT_P
;
122 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_GSLS
))
123 newflags
|= cpuflags
& CPUSTAT_SM
;
124 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_IBS
))
125 newflags
|= cpuflags
& CPUSTAT_IBS
;
126 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_KSS
))
127 newflags
|= cpuflags
& CPUSTAT_KSS
;
129 atomic_set(&scb_s
->cpuflags
, newflags
);
134 * setup_apcb00 - Copy to APCB FORMAT0 from APCB FORMAT0
135 * @vcpu: pointer to the virtual CPU
136 * @apcb_s: pointer to start of apcb in the shadow crycb
137 * @apcb_o: pointer to start of original apcb in the guest2
138 * @apcb_h: pointer to start of apcb in the guest1
140 * Returns 0 and -EFAULT on error reading guest apcb
142 static int setup_apcb00(struct kvm_vcpu
*vcpu
, unsigned long *apcb_s
,
143 unsigned long apcb_o
, unsigned long *apcb_h
)
145 if (read_guest_real(vcpu
, apcb_o
, apcb_s
,
146 sizeof(struct kvm_s390_apcb0
)))
149 bitmap_and(apcb_s
, apcb_s
, apcb_h
, sizeof(struct kvm_s390_apcb0
));
155 * setup_apcb11 - Copy the FORMAT1 APCB from the guest to the shadow CRYCB
156 * @vcpu: pointer to the virtual CPU
157 * @apcb_s: pointer to start of apcb in the shadow crycb
158 * @apcb_o: pointer to start of original guest apcb
159 * @apcb_h: pointer to start of apcb in the host
161 * Returns 0 and -EFAULT on error reading guest apcb
163 static int setup_apcb11(struct kvm_vcpu
*vcpu
, unsigned long *apcb_s
,
164 unsigned long apcb_o
,
165 unsigned long *apcb_h
)
167 if (read_guest_real(vcpu
, apcb_o
, apcb_s
,
168 sizeof(struct kvm_s390_apcb1
)))
171 bitmap_and(apcb_s
, apcb_s
, apcb_h
, sizeof(struct kvm_s390_apcb1
));
177 * setup_apcb - Create a shadow copy of the apcb.
178 * @vcpu: pointer to the virtual CPU
179 * @crycb_s: pointer to shadow crycb
180 * @crycb_o: pointer to original guest crycb
181 * @crycb_h: pointer to the host crycb
182 * @fmt_o: format of the original guest crycb.
183 * @fmt_h: format of the host crycb.
185 * Checks the compatibility between the guest and host crycb and calls the
186 * appropriate copy function.
188 * Return 0 or an error number if the guest and host crycb are incompatible.
190 static int setup_apcb(struct kvm_vcpu
*vcpu
, struct kvm_s390_crypto_cb
*crycb_s
,
192 struct kvm_s390_crypto_cb
*crycb_h
,
193 int fmt_o
, int fmt_h
)
195 struct kvm_s390_crypto_cb
*crycb
;
197 crycb
= (struct kvm_s390_crypto_cb
*) (unsigned long)crycb_o
;
201 if ((crycb_o
& PAGE_MASK
) != ((crycb_o
+ 256) & PAGE_MASK
))
203 if (fmt_h
!= CRYCB_FORMAT2
)
205 return setup_apcb11(vcpu
, (unsigned long *)&crycb_s
->apcb1
,
206 (unsigned long) &crycb
->apcb1
,
207 (unsigned long *)&crycb_h
->apcb1
);
209 if (fmt_h
!= CRYCB_FORMAT1
)
211 return setup_apcb00(vcpu
, (unsigned long *) &crycb_s
->apcb0
,
212 (unsigned long) &crycb
->apcb0
,
213 (unsigned long *) &crycb_h
->apcb0
);
215 if ((crycb_o
& PAGE_MASK
) != ((crycb_o
+ 32) & PAGE_MASK
))
223 return setup_apcb00(vcpu
,
224 (unsigned long *) &crycb_s
->apcb0
,
225 (unsigned long) &crycb
->apcb0
,
226 (unsigned long *) &crycb_h
->apcb0
);
233 * shadow_crycb - Create a shadow copy of the crycb block
234 * @vcpu: a pointer to the virtual CPU
235 * @vsie_page: a pointer to internal date used for the vSIE
237 * Create a shadow copy of the crycb block and setup key wrapping, if
238 * requested for guest 3 and enabled for guest 2.
240 * We accept format-1 or format-2, but we convert format-1 into format-2
241 * in the shadow CRYCB.
242 * Using format-2 enables the firmware to choose the right format when
243 * scheduling the SIE.
244 * There is nothing to do for format-0.
246 * This function centralize the issuing of set_validity_icpt() for all
247 * the subfunctions working on the crycb.
249 * Returns: - 0 if shadowed or nothing to do
250 * - > 0 if control has to be given to guest 2
252 static int shadow_crycb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
254 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
255 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
256 const uint32_t crycbd_o
= READ_ONCE(scb_o
->crycbd
);
257 const u32 crycb_addr
= crycbd_o
& 0x7ffffff8U
;
258 unsigned long *b1
, *b2
;
261 int key_msk
= test_kvm_facility(vcpu
->kvm
, 76);
262 int fmt_o
= crycbd_o
& CRYCB_FORMAT_MASK
;
263 int fmt_h
= vcpu
->arch
.sie_block
->crycbd
& CRYCB_FORMAT_MASK
;
268 apie_h
= vcpu
->arch
.sie_block
->eca
& ECA_APIE
;
269 if (!apie_h
&& !key_msk
)
273 return set_validity_icpt(scb_s
, 0x0039U
);
275 if (fmt_o
== CRYCB_FORMAT1
)
276 if ((crycb_addr
& PAGE_MASK
) !=
277 ((crycb_addr
+ 128) & PAGE_MASK
))
278 return set_validity_icpt(scb_s
, 0x003CU
);
280 if (apie_h
&& (scb_o
->eca
& ECA_APIE
)) {
281 ret
= setup_apcb(vcpu
, &vsie_page
->crycb
, crycb_addr
,
282 vcpu
->kvm
->arch
.crypto
.crycb
,
286 scb_s
->eca
|= scb_o
->eca
& ECA_APIE
;
289 /* we may only allow it if enabled for guest 2 */
290 ecb3_flags
= scb_o
->ecb3
& vcpu
->arch
.sie_block
->ecb3
&
291 (ECB3_AES
| ECB3_DEA
);
295 /* copy only the wrapping keys */
296 if (read_guest_real(vcpu
, crycb_addr
+ 72, &vsie_page
->crycb
, 56))
297 return set_validity_icpt(scb_s
, 0x0035U
);
299 scb_s
->ecb3
|= ecb3_flags
;
301 /* xor both blocks in one run */
302 b1
= (unsigned long *) vsie_page
->crycb
.dea_wrapping_key_mask
;
303 b2
= (unsigned long *)
304 vcpu
->kvm
->arch
.crypto
.crycb
->dea_wrapping_key_mask
;
305 /* as 56%8 == 0, bitmap_xor won't overwrite any data */
306 bitmap_xor(b1
, b1
, b2
, BITS_PER_BYTE
* 56);
310 return set_validity_icpt(scb_s
, 0x0020U
);
312 return set_validity_icpt(scb_s
, 0x0035U
);
314 return set_validity_icpt(scb_s
, 0x003CU
);
316 scb_s
->crycbd
= ((__u32
)(__u64
) &vsie_page
->crycb
) | CRYCB_FORMAT2
;
320 /* shadow (round up/down) the ibc to avoid validity icpt */
321 static void prepare_ibc(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
323 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
324 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
325 /* READ_ONCE does not work on bitfields - use a temporary variable */
326 const uint32_t __new_ibc
= scb_o
->ibc
;
327 const uint32_t new_ibc
= READ_ONCE(__new_ibc
) & 0x0fffU
;
328 __u64 min_ibc
= (sclp
.ibc
>> 16) & 0x0fffU
;
331 /* ibc installed in g2 and requested for g3 */
332 if (vcpu
->kvm
->arch
.model
.ibc
&& new_ibc
) {
333 scb_s
->ibc
= new_ibc
;
334 /* takte care of the minimum ibc level of the machine */
335 if (scb_s
->ibc
< min_ibc
)
336 scb_s
->ibc
= min_ibc
;
337 /* take care of the maximum ibc level set for the guest */
338 if (scb_s
->ibc
> vcpu
->kvm
->arch
.model
.ibc
)
339 scb_s
->ibc
= vcpu
->kvm
->arch
.model
.ibc
;
343 /* unshadow the scb, copying parameters back to the real scb */
344 static void unshadow_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
346 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
347 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
350 scb_o
->icptcode
= scb_s
->icptcode
;
351 scb_o
->icptstatus
= scb_s
->icptstatus
;
352 scb_o
->ipa
= scb_s
->ipa
;
353 scb_o
->ipb
= scb_s
->ipb
;
354 scb_o
->gbea
= scb_s
->gbea
;
357 scb_o
->cputm
= scb_s
->cputm
;
358 scb_o
->ckc
= scb_s
->ckc
;
359 scb_o
->todpr
= scb_s
->todpr
;
362 scb_o
->gpsw
= scb_s
->gpsw
;
363 scb_o
->gg14
= scb_s
->gg14
;
364 scb_o
->gg15
= scb_s
->gg15
;
365 memcpy(scb_o
->gcr
, scb_s
->gcr
, 128);
366 scb_o
->pp
= scb_s
->pp
;
368 /* branch prediction */
369 if (test_kvm_facility(vcpu
->kvm
, 82)) {
370 scb_o
->fpf
&= ~FPF_BPBC
;
371 scb_o
->fpf
|= scb_s
->fpf
& FPF_BPBC
;
374 /* interrupt intercept */
375 switch (scb_s
->icptcode
) {
379 memcpy((void *)((u64
)scb_o
+ 0xc0),
380 (void *)((u64
)scb_s
+ 0xc0), 0xf0 - 0xc0);
384 memcpy((void *)((u64
)scb_o
+ 0xc0),
385 (void *)((u64
)scb_s
+ 0xc0), 0xd0 - 0xc0);
389 if (scb_s
->ihcpu
!= 0xffffU
)
390 scb_o
->ihcpu
= scb_s
->ihcpu
;
394 * Setup the shadow scb by copying and checking the relevant parts of the g2
397 * Returns: - 0 if the scb has been shadowed
398 * - > 0 if control has to be given to guest 2
400 static int shadow_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
402 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
403 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
404 /* READ_ONCE does not work on bitfields - use a temporary variable */
405 const uint32_t __new_prefix
= scb_o
->prefix
;
406 const uint32_t new_prefix
= READ_ONCE(__new_prefix
);
407 const bool wants_tx
= READ_ONCE(scb_o
->ecb
) & ECB_TE
;
408 bool had_tx
= scb_s
->ecb
& ECB_TE
;
409 unsigned long new_mso
= 0;
412 /* make sure we don't have any leftovers when reusing the scb */
422 rc
= prepare_cpuflags(vcpu
, vsie_page
);
427 scb_s
->cputm
= scb_o
->cputm
;
428 scb_s
->ckc
= scb_o
->ckc
;
429 scb_s
->todpr
= scb_o
->todpr
;
430 scb_s
->epoch
= scb_o
->epoch
;
433 scb_s
->gpsw
= scb_o
->gpsw
;
434 scb_s
->gg14
= scb_o
->gg14
;
435 scb_s
->gg15
= scb_o
->gg15
;
436 memcpy(scb_s
->gcr
, scb_o
->gcr
, 128);
437 scb_s
->pp
= scb_o
->pp
;
439 /* interception / execution handling */
440 scb_s
->gbea
= scb_o
->gbea
;
441 scb_s
->lctl
= scb_o
->lctl
;
442 scb_s
->svcc
= scb_o
->svcc
;
443 scb_s
->ictl
= scb_o
->ictl
;
445 * SKEY handling functions can't deal with false setting of PTE invalid
446 * bits. Therefore we cannot provide interpretation and would later
447 * have to provide own emulation handlers.
449 if (!(atomic_read(&scb_s
->cpuflags
) & CPUSTAT_KSS
))
450 scb_s
->ictl
|= ICTL_ISKE
| ICTL_SSKE
| ICTL_RRBE
;
452 scb_s
->icpua
= scb_o
->icpua
;
454 if (!(atomic_read(&scb_s
->cpuflags
) & CPUSTAT_SM
))
455 new_mso
= READ_ONCE(scb_o
->mso
) & 0xfffffffffff00000UL
;
456 /* if the hva of the prefix changes, we have to remap the prefix */
457 if (scb_s
->mso
!= new_mso
|| scb_s
->prefix
!= new_prefix
)
458 prefix_unmapped(vsie_page
);
459 /* SIE will do mso/msl validity and exception checks for us */
460 scb_s
->msl
= scb_o
->msl
& 0xfffffffffff00000UL
;
461 scb_s
->mso
= new_mso
;
462 scb_s
->prefix
= new_prefix
;
464 /* We have to definetly flush the tlb if this scb never ran */
465 if (scb_s
->ihcpu
!= 0xffffU
)
466 scb_s
->ihcpu
= scb_o
->ihcpu
;
468 /* MVPG and Protection Exception Interpretation are always available */
469 scb_s
->eca
|= scb_o
->eca
& (ECA_MVPGI
| ECA_PROTEXCI
);
470 /* Host-protection-interruption introduced with ESOP */
471 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_ESOP
))
472 scb_s
->ecb
|= scb_o
->ecb
& ECB_HOSTPROTINT
;
473 /* transactional execution */
474 if (test_kvm_facility(vcpu
->kvm
, 73) && wants_tx
) {
475 /* remap the prefix is tx is toggled on */
477 prefix_unmapped(vsie_page
);
478 scb_s
->ecb
|= ECB_TE
;
480 /* branch prediction */
481 if (test_kvm_facility(vcpu
->kvm
, 82))
482 scb_s
->fpf
|= scb_o
->fpf
& FPF_BPBC
;
484 if (test_kvm_facility(vcpu
->kvm
, 129)) {
485 scb_s
->eca
|= scb_o
->eca
& ECA_VX
;
486 scb_s
->ecd
|= scb_o
->ecd
& ECD_HOSTREGMGMT
;
488 /* Run-time-Instrumentation */
489 if (test_kvm_facility(vcpu
->kvm
, 64))
490 scb_s
->ecb3
|= scb_o
->ecb3
& ECB3_RI
;
491 /* Instruction Execution Prevention */
492 if (test_kvm_facility(vcpu
->kvm
, 130))
493 scb_s
->ecb2
|= scb_o
->ecb2
& ECB2_IEP
;
494 /* Guarded Storage */
495 if (test_kvm_facility(vcpu
->kvm
, 133)) {
496 scb_s
->ecb
|= scb_o
->ecb
& ECB_GS
;
497 scb_s
->ecd
|= scb_o
->ecd
& ECD_HOSTREGMGMT
;
499 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_SIIF
))
500 scb_s
->eca
|= scb_o
->eca
& ECA_SII
;
501 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_IB
))
502 scb_s
->eca
|= scb_o
->eca
& ECA_IB
;
503 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_CEI
))
504 scb_s
->eca
|= scb_o
->eca
& ECA_CEI
;
505 /* Epoch Extension */
506 if (test_kvm_facility(vcpu
->kvm
, 139))
507 scb_s
->ecd
|= scb_o
->ecd
& ECD_MEF
;
510 if (test_kvm_facility(vcpu
->kvm
, 156))
511 scb_s
->ecd
|= scb_o
->ecd
& ECD_ETOKENF
;
513 prepare_ibc(vcpu
, vsie_page
);
514 rc
= shadow_crycb(vcpu
, vsie_page
);
517 unshadow_scb(vcpu
, vsie_page
);
521 void kvm_s390_vsie_gmap_notifier(struct gmap
*gmap
, unsigned long start
,
524 struct kvm
*kvm
= gmap
->private;
525 struct vsie_page
*cur
;
526 unsigned long prefix
;
530 if (!gmap_is_shadow(gmap
))
532 if (start
>= 1UL << 31)
533 /* We are only interested in prefix pages */
537 * Only new shadow blocks are added to the list during runtime,
538 * therefore we can safely reference them all the time.
540 for (i
= 0; i
< kvm
->arch
.vsie
.page_count
; i
++) {
541 page
= READ_ONCE(kvm
->arch
.vsie
.pages
[i
]);
544 cur
= page_to_virt(page
);
545 if (READ_ONCE(cur
->gmap
) != gmap
)
547 prefix
= cur
->scb_s
.prefix
<< GUEST_PREFIX_SHIFT
;
548 /* with mso/msl, the prefix lies at an offset */
549 prefix
+= cur
->scb_s
.mso
;
550 if (prefix
<= end
&& start
<= prefix
+ 2 * PAGE_SIZE
- 1)
551 prefix_unmapped_sync(cur
);
556 * Map the first prefix page and if tx is enabled also the second prefix page.
558 * The prefix will be protected, a gmap notifier will inform about unmaps.
559 * The shadow scb must not be executed until the prefix is remapped, this is
560 * guaranteed by properly handling PROG_REQUEST.
562 * Returns: - 0 on if successfully mapped or already mapped
563 * - > 0 if control has to be given to guest 2
564 * - -EAGAIN if the caller can retry immediately
565 * - -ENOMEM if out of memory
567 static int map_prefix(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
569 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
570 u64 prefix
= scb_s
->prefix
<< GUEST_PREFIX_SHIFT
;
573 if (prefix_is_mapped(vsie_page
))
576 /* mark it as mapped so we can catch any concurrent unmappers */
577 prefix_mapped(vsie_page
);
579 /* with mso/msl, the prefix lies at offset *mso* */
580 prefix
+= scb_s
->mso
;
582 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
, prefix
);
583 if (!rc
&& (scb_s
->ecb
& ECB_TE
))
584 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
587 * We don't have to mprotect, we will be called for all unshadows.
588 * SIE will detect if protection applies and trigger a validity.
591 prefix_unmapped(vsie_page
);
592 if (rc
> 0 || rc
== -EFAULT
)
593 rc
= set_validity_icpt(scb_s
, 0x0037U
);
598 * Pin the guest page given by gpa and set hpa to the pinned host address.
599 * Will always be pinned writable.
601 * Returns: - 0 on success
602 * - -EINVAL if the gpa is not valid guest storage
604 static int pin_guest_page(struct kvm
*kvm
, gpa_t gpa
, hpa_t
*hpa
)
608 page
= gfn_to_page(kvm
, gpa_to_gfn(gpa
));
609 if (is_error_page(page
))
611 *hpa
= (hpa_t
) page_to_virt(page
) + (gpa
& ~PAGE_MASK
);
615 /* Unpins a page previously pinned via pin_guest_page, marking it as dirty. */
616 static void unpin_guest_page(struct kvm
*kvm
, gpa_t gpa
, hpa_t hpa
)
618 kvm_release_pfn_dirty(hpa
>> PAGE_SHIFT
);
619 /* mark the page always as dirty for migration */
620 mark_page_dirty(kvm
, gpa_to_gfn(gpa
));
623 /* unpin all blocks previously pinned by pin_blocks(), marking them dirty */
624 static void unpin_blocks(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
626 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
627 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
631 hpa
= (u64
) scb_s
->scaoh
<< 32 | scb_s
->scaol
;
633 gpa
= scb_o
->scaol
& ~0xfUL
;
634 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_64BSCAO
))
635 gpa
|= (u64
) scb_o
->scaoh
<< 32;
636 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
643 gpa
= scb_o
->itdba
& ~0xffUL
;
644 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
650 gpa
= scb_o
->gvrd
& ~0x1ffUL
;
651 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
657 gpa
= scb_o
->riccbd
& ~0x3fUL
;
658 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
665 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
671 * Instead of shadowing some blocks, we can simply forward them because the
672 * addresses in the scb are 64 bit long.
674 * This works as long as the data lies in one page. If blocks ever exceed one
675 * page, we have to fall back to shadowing.
677 * As we reuse the sca, the vcpu pointers contained in it are invalid. We must
678 * therefore not enable any facilities that access these pointers (e.g. SIGPIF).
680 * Returns: - 0 if all blocks were pinned.
681 * - > 0 if control has to be given to guest 2
682 * - -ENOMEM if out of memory
684 static int pin_blocks(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
686 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
687 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
692 gpa
= READ_ONCE(scb_o
->scaol
) & ~0xfUL
;
693 if (test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_64BSCAO
))
694 gpa
|= (u64
) READ_ONCE(scb_o
->scaoh
) << 32;
696 if (!(gpa
& ~0x1fffUL
))
697 rc
= set_validity_icpt(scb_s
, 0x0038U
);
698 else if ((gpa
& ~0x1fffUL
) == kvm_s390_get_prefix(vcpu
))
699 rc
= set_validity_icpt(scb_s
, 0x0011U
);
700 else if ((gpa
& PAGE_MASK
) !=
701 ((gpa
+ sizeof(struct bsca_block
) - 1) & PAGE_MASK
))
702 rc
= set_validity_icpt(scb_s
, 0x003bU
);
704 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
706 rc
= set_validity_icpt(scb_s
, 0x0034U
);
710 scb_s
->scaoh
= (u32
)((u64
)hpa
>> 32);
711 scb_s
->scaol
= (u32
)(u64
)hpa
;
714 gpa
= READ_ONCE(scb_o
->itdba
) & ~0xffUL
;
715 if (gpa
&& (scb_s
->ecb
& ECB_TE
)) {
716 if (!(gpa
& ~0x1fffUL
)) {
717 rc
= set_validity_icpt(scb_s
, 0x0080U
);
720 /* 256 bytes cannot cross page boundaries */
721 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
723 rc
= set_validity_icpt(scb_s
, 0x0080U
);
729 gpa
= READ_ONCE(scb_o
->gvrd
) & ~0x1ffUL
;
730 if (gpa
&& (scb_s
->eca
& ECA_VX
) && !(scb_s
->ecd
& ECD_HOSTREGMGMT
)) {
731 if (!(gpa
& ~0x1fffUL
)) {
732 rc
= set_validity_icpt(scb_s
, 0x1310U
);
736 * 512 bytes vector registers cannot cross page boundaries
737 * if this block gets bigger, we have to shadow it.
739 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
741 rc
= set_validity_icpt(scb_s
, 0x1310U
);
747 gpa
= READ_ONCE(scb_o
->riccbd
) & ~0x3fUL
;
748 if (gpa
&& (scb_s
->ecb3
& ECB3_RI
)) {
749 if (!(gpa
& ~0x1fffUL
)) {
750 rc
= set_validity_icpt(scb_s
, 0x0043U
);
753 /* 64 bytes cannot cross page boundaries */
754 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
756 rc
= set_validity_icpt(scb_s
, 0x0043U
);
759 /* Validity 0x0044 will be checked by SIE */
762 if (((scb_s
->ecb
& ECB_GS
) && !(scb_s
->ecd
& ECD_HOSTREGMGMT
)) ||
763 (scb_s
->ecd
& ECD_ETOKENF
)) {
766 gpa
= READ_ONCE(scb_o
->sdnxo
) & ~0xfUL
;
767 sdnxc
= READ_ONCE(scb_o
->sdnxo
) & 0xfUL
;
768 if (!gpa
|| !(gpa
& ~0x1fffUL
)) {
769 rc
= set_validity_icpt(scb_s
, 0x10b0U
);
772 if (sdnxc
< 6 || sdnxc
> 12) {
773 rc
= set_validity_icpt(scb_s
, 0x10b1U
);
776 if (gpa
& ((1 << sdnxc
) - 1)) {
777 rc
= set_validity_icpt(scb_s
, 0x10b2U
);
780 /* Due to alignment rules (checked above) this cannot
781 * cross page boundaries
783 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
785 rc
= set_validity_icpt(scb_s
, 0x10b0U
);
788 scb_s
->sdnxo
= hpa
| sdnxc
;
792 unpin_blocks(vcpu
, vsie_page
);
796 /* unpin the scb provided by guest 2, marking it as dirty */
797 static void unpin_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
,
800 hpa_t hpa
= (hpa_t
) vsie_page
->scb_o
;
803 unpin_guest_page(vcpu
->kvm
, gpa
, hpa
);
804 vsie_page
->scb_o
= NULL
;
808 * Pin the scb at gpa provided by guest 2 at vsie_page->scb_o.
810 * Returns: - 0 if the scb was pinned.
811 * - > 0 if control has to be given to guest 2
813 static int pin_scb(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
,
819 rc
= pin_guest_page(vcpu
->kvm
, gpa
, &hpa
);
821 rc
= kvm_s390_inject_program_int(vcpu
, PGM_ADDRESSING
);
825 vsie_page
->scb_o
= (struct kvm_s390_sie_block
*) hpa
;
830 * Inject a fault into guest 2.
832 * Returns: - > 0 if control has to be given to guest 2
833 * < 0 if an error occurred during injection.
835 static int inject_fault(struct kvm_vcpu
*vcpu
, __u16 code
, __u64 vaddr
,
838 struct kvm_s390_pgm_info pgm
= {
841 /* 0-51: virtual address */
842 (vaddr
& 0xfffffffffffff000UL
) |
843 /* 52-53: store / fetch */
844 (((unsigned int) !write_flag
) + 1) << 10,
845 /* 62-63: asce id (alway primary == 0) */
846 .exc_access_id
= 0, /* always primary */
847 .op_access_id
= 0, /* not MVPG */
851 if (code
== PGM_PROTECTION
)
852 pgm
.trans_exc_code
|= 0x4UL
;
854 rc
= kvm_s390_inject_prog_irq(vcpu
, &pgm
);
859 * Handle a fault during vsie execution on a gmap shadow.
861 * Returns: - 0 if the fault was resolved
862 * - > 0 if control has to be given to guest 2
863 * - < 0 if an error occurred
865 static int handle_fault(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
869 if (current
->thread
.gmap_int_code
== PGM_PROTECTION
)
870 /* we can directly forward all protection exceptions */
871 return inject_fault(vcpu
, PGM_PROTECTION
,
872 current
->thread
.gmap_addr
, 1);
874 rc
= kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
875 current
->thread
.gmap_addr
);
877 rc
= inject_fault(vcpu
, rc
,
878 current
->thread
.gmap_addr
,
879 current
->thread
.gmap_write_flag
);
881 vsie_page
->fault_addr
= current
->thread
.gmap_addr
;
887 * Retry the previous fault that required guest 2 intervention. This avoids
888 * one superfluous SIE re-entry and direct exit.
890 * Will ignore any errors. The next SIE fault will do proper fault handling.
892 static void handle_last_fault(struct kvm_vcpu
*vcpu
,
893 struct vsie_page
*vsie_page
)
895 if (vsie_page
->fault_addr
)
896 kvm_s390_shadow_fault(vcpu
, vsie_page
->gmap
,
897 vsie_page
->fault_addr
);
898 vsie_page
->fault_addr
= 0;
901 static inline void clear_vsie_icpt(struct vsie_page
*vsie_page
)
903 vsie_page
->scb_s
.icptcode
= 0;
906 /* rewind the psw and clear the vsie icpt, so we can retry execution */
907 static void retry_vsie_icpt(struct vsie_page
*vsie_page
)
909 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
910 int ilen
= insn_length(scb_s
->ipa
>> 8);
912 /* take care of EXECUTE instructions */
913 if (scb_s
->icptstatus
& 1) {
914 ilen
= (scb_s
->icptstatus
>> 4) & 0x6;
918 scb_s
->gpsw
.addr
= __rewind_psw(scb_s
->gpsw
, ilen
);
919 clear_vsie_icpt(vsie_page
);
923 * Try to shadow + enable the guest 2 provided facility list.
924 * Retry instruction execution if enabled for and provided by guest 2.
926 * Returns: - 0 if handled (retry or guest 2 icpt)
927 * - > 0 if control has to be given to guest 2
929 static int handle_stfle(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
931 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
932 __u32 fac
= READ_ONCE(vsie_page
->scb_o
->fac
) & 0x7ffffff8U
;
934 if (fac
&& test_kvm_facility(vcpu
->kvm
, 7)) {
935 retry_vsie_icpt(vsie_page
);
936 if (read_guest_real(vcpu
, fac
, &vsie_page
->fac
,
937 sizeof(vsie_page
->fac
)))
938 return set_validity_icpt(scb_s
, 0x1090U
);
939 scb_s
->fac
= (__u32
)(__u64
) &vsie_page
->fac
;
945 * Run the vsie on a shadow scb and a shadow gmap, without any further
946 * sanity checks, handling SIE faults.
948 * Returns: - 0 everything went fine
949 * - > 0 if control has to be given to guest 2
950 * - < 0 if an error occurred
952 static int do_vsie_run(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
954 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
955 struct kvm_s390_sie_block
*scb_o
= vsie_page
->scb_o
;
956 int guest_bp_isolation
;
959 handle_last_fault(vcpu
, vsie_page
);
963 if (test_cpu_flag(CIF_MCCK_PENDING
))
966 srcu_read_unlock(&vcpu
->kvm
->srcu
, vcpu
->srcu_idx
);
968 /* save current guest state of bp isolation override */
969 guest_bp_isolation
= test_thread_flag(TIF_ISOLATE_BP_GUEST
);
972 * The guest is running with BPBC, so we have to force it on for our
973 * nested guest. This is done by enabling BPBC globally, so the BPBC
974 * control in the SCB (which the nested guest can modify) is simply
977 if (test_kvm_facility(vcpu
->kvm
, 82) &&
978 vcpu
->arch
.sie_block
->fpf
& FPF_BPBC
)
979 set_thread_flag(TIF_ISOLATE_BP_GUEST
);
982 guest_enter_irqoff();
986 * Simulate a SIE entry of the VCPU (see sie64a), so VCPU blocking
987 * and VCPU requests also hinder the vSIE from running and lead
988 * to an immediate exit. kvm_s390_vsie_kick() has to be used to
989 * also kick the vSIE.
991 vcpu
->arch
.sie_block
->prog0c
|= PROG_IN_SIE
;
993 if (!kvm_s390_vcpu_sie_inhibited(vcpu
))
994 rc
= sie64a(scb_s
, vcpu
->run
->s
.regs
.gprs
);
996 vcpu
->arch
.sie_block
->prog0c
&= ~PROG_IN_SIE
;
1002 /* restore guest state for bp isolation override */
1003 if (!guest_bp_isolation
)
1004 clear_thread_flag(TIF_ISOLATE_BP_GUEST
);
1006 vcpu
->srcu_idx
= srcu_read_lock(&vcpu
->kvm
->srcu
);
1009 VCPU_EVENT(vcpu
, 3, "%s", "machine check");
1010 kvm_s390_reinject_machine_check(vcpu
, &vsie_page
->mcck_info
);
1015 rc
= 0; /* we could still have an icpt */
1016 else if (rc
== -EFAULT
)
1017 return handle_fault(vcpu
, vsie_page
);
1019 switch (scb_s
->icptcode
) {
1021 if (scb_s
->ipa
== 0xb2b0)
1022 rc
= handle_stfle(vcpu
, vsie_page
);
1025 /* stop not requested by g2 - must have been a kick */
1026 if (!(atomic_read(&scb_o
->cpuflags
) & CPUSTAT_STOP_INT
))
1027 clear_vsie_icpt(vsie_page
);
1030 if ((scb_s
->ipa
& 0xf000) != 0xf000)
1031 scb_s
->ipa
+= 0x1000;
1037 static void release_gmap_shadow(struct vsie_page
*vsie_page
)
1039 if (vsie_page
->gmap
)
1040 gmap_put(vsie_page
->gmap
);
1041 WRITE_ONCE(vsie_page
->gmap
, NULL
);
1042 prefix_unmapped(vsie_page
);
1045 static int acquire_gmap_shadow(struct kvm_vcpu
*vcpu
,
1046 struct vsie_page
*vsie_page
)
1053 asce
= vcpu
->arch
.sie_block
->gcr
[1];
1054 cr0
.val
= vcpu
->arch
.sie_block
->gcr
[0];
1055 edat
= cr0
.edat
&& test_kvm_facility(vcpu
->kvm
, 8);
1056 edat
+= edat
&& test_kvm_facility(vcpu
->kvm
, 78);
1059 * ASCE or EDAT could have changed since last icpt, or the gmap
1060 * we're holding has been unshadowed. If the gmap is still valid,
1061 * we can safely reuse it.
1063 if (vsie_page
->gmap
&& gmap_shadow_valid(vsie_page
->gmap
, asce
, edat
))
1066 /* release the old shadow - if any, and mark the prefix as unmapped */
1067 release_gmap_shadow(vsie_page
);
1068 gmap
= gmap_shadow(vcpu
->arch
.gmap
, asce
, edat
);
1070 return PTR_ERR(gmap
);
1071 gmap
->private = vcpu
->kvm
;
1072 WRITE_ONCE(vsie_page
->gmap
, gmap
);
1077 * Register the shadow scb at the VCPU, e.g. for kicking out of vsie.
1079 static void register_shadow_scb(struct kvm_vcpu
*vcpu
,
1080 struct vsie_page
*vsie_page
)
1082 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
1084 WRITE_ONCE(vcpu
->arch
.vsie_block
, &vsie_page
->scb_s
);
1086 * External calls have to lead to a kick of the vcpu and
1087 * therefore the vsie -> Simulate Wait state.
1089 atomic_or(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
1091 * We have to adjust the g3 epoch by the g2 epoch. The epoch will
1092 * automatically be adjusted on tod clock changes via kvm_sync_clock.
1095 scb_s
->epoch
+= vcpu
->kvm
->arch
.epoch
;
1097 if (scb_s
->ecd
& ECD_MEF
) {
1098 scb_s
->epdx
+= vcpu
->kvm
->arch
.epdx
;
1099 if (scb_s
->epoch
< vcpu
->kvm
->arch
.epoch
)
1107 * Unregister a shadow scb from a VCPU.
1109 static void unregister_shadow_scb(struct kvm_vcpu
*vcpu
)
1111 atomic_andnot(CPUSTAT_WAIT
, &vcpu
->arch
.sie_block
->cpuflags
);
1112 WRITE_ONCE(vcpu
->arch
.vsie_block
, NULL
);
1116 * Run the vsie on a shadowed scb, managing the gmap shadow, handling
1117 * prefix pages and faults.
1119 * Returns: - 0 if no errors occurred
1120 * - > 0 if control has to be given to guest 2
1121 * - -ENOMEM if out of memory
1123 static int vsie_run(struct kvm_vcpu
*vcpu
, struct vsie_page
*vsie_page
)
1125 struct kvm_s390_sie_block
*scb_s
= &vsie_page
->scb_s
;
1129 rc
= acquire_gmap_shadow(vcpu
, vsie_page
);
1131 rc
= map_prefix(vcpu
, vsie_page
);
1133 gmap_enable(vsie_page
->gmap
);
1134 update_intervention_requests(vsie_page
);
1135 rc
= do_vsie_run(vcpu
, vsie_page
);
1136 gmap_enable(vcpu
->arch
.gmap
);
1138 atomic_andnot(PROG_BLOCK_SIE
, &scb_s
->prog20
);
1142 if (rc
|| scb_s
->icptcode
|| signal_pending(current
) ||
1143 kvm_s390_vcpu_has_irq(vcpu
, 0) ||
1144 kvm_s390_vcpu_sie_inhibited(vcpu
))
1148 if (rc
== -EFAULT
) {
1150 * Addressing exceptions are always presentes as intercepts.
1151 * As addressing exceptions are suppressing and our guest 3 PSW
1152 * points at the responsible instruction, we have to
1153 * forward the PSW and set the ilc. If we can't read guest 3
1154 * instruction, we can use an arbitrary ilc. Let's always use
1155 * ilen = 4 for now, so we can avoid reading in guest 3 virtual
1156 * memory. (we could also fake the shadow so the hardware
1159 scb_s
->icptcode
= ICPT_PROGI
;
1160 scb_s
->iprcc
= PGM_ADDRESSING
;
1162 scb_s
->gpsw
.addr
= __rewind_psw(scb_s
->gpsw
, 4);
1168 * Get or create a vsie page for a scb address.
1170 * Returns: - address of a vsie page (cached or new one)
1171 * - NULL if the same scb address is already used by another VCPU
1172 * - ERR_PTR(-ENOMEM) if out of memory
1174 static struct vsie_page
*get_vsie_page(struct kvm
*kvm
, unsigned long addr
)
1176 struct vsie_page
*vsie_page
;
1181 page
= radix_tree_lookup(&kvm
->arch
.vsie
.addr_to_page
, addr
>> 9);
1184 if (page_ref_inc_return(page
) == 2)
1185 return page_to_virt(page
);
1190 * We want at least #online_vcpus shadows, so every VCPU can execute
1191 * the VSIE in parallel.
1193 nr_vcpus
= atomic_read(&kvm
->online_vcpus
);
1195 mutex_lock(&kvm
->arch
.vsie
.mutex
);
1196 if (kvm
->arch
.vsie
.page_count
< nr_vcpus
) {
1197 page
= alloc_page(GFP_KERNEL
| __GFP_ZERO
| GFP_DMA
);
1199 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1200 return ERR_PTR(-ENOMEM
);
1203 kvm
->arch
.vsie
.pages
[kvm
->arch
.vsie
.page_count
] = page
;
1204 kvm
->arch
.vsie
.page_count
++;
1206 /* reuse an existing entry that belongs to nobody */
1208 page
= kvm
->arch
.vsie
.pages
[kvm
->arch
.vsie
.next
];
1209 if (page_ref_inc_return(page
) == 2)
1212 kvm
->arch
.vsie
.next
++;
1213 kvm
->arch
.vsie
.next
%= nr_vcpus
;
1215 radix_tree_delete(&kvm
->arch
.vsie
.addr_to_page
, page
->index
>> 9);
1218 /* double use of the same address */
1219 if (radix_tree_insert(&kvm
->arch
.vsie
.addr_to_page
, addr
>> 9, page
)) {
1221 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1224 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1226 vsie_page
= page_to_virt(page
);
1227 memset(&vsie_page
->scb_s
, 0, sizeof(struct kvm_s390_sie_block
));
1228 release_gmap_shadow(vsie_page
);
1229 vsie_page
->fault_addr
= 0;
1230 vsie_page
->scb_s
.ihcpu
= 0xffffU
;
1234 /* put a vsie page acquired via get_vsie_page */
1235 static void put_vsie_page(struct kvm
*kvm
, struct vsie_page
*vsie_page
)
1237 struct page
*page
= pfn_to_page(__pa(vsie_page
) >> PAGE_SHIFT
);
1242 int kvm_s390_handle_vsie(struct kvm_vcpu
*vcpu
)
1244 struct vsie_page
*vsie_page
;
1245 unsigned long scb_addr
;
1248 vcpu
->stat
.instruction_sie
++;
1249 if (!test_kvm_cpu_feat(vcpu
->kvm
, KVM_S390_VM_CPU_FEAT_SIEF2
))
1251 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
1252 return kvm_s390_inject_program_int(vcpu
, PGM_PRIVILEGED_OP
);
1254 BUILD_BUG_ON(sizeof(struct vsie_page
) != PAGE_SIZE
);
1255 scb_addr
= kvm_s390_get_base_disp_s(vcpu
, NULL
);
1257 /* 512 byte alignment */
1258 if (unlikely(scb_addr
& 0x1ffUL
))
1259 return kvm_s390_inject_program_int(vcpu
, PGM_SPECIFICATION
);
1261 if (signal_pending(current
) || kvm_s390_vcpu_has_irq(vcpu
, 0) ||
1262 kvm_s390_vcpu_sie_inhibited(vcpu
))
1265 vsie_page
= get_vsie_page(vcpu
->kvm
, scb_addr
);
1266 if (IS_ERR(vsie_page
))
1267 return PTR_ERR(vsie_page
);
1268 else if (!vsie_page
)
1269 /* double use of sie control block - simply do nothing */
1272 rc
= pin_scb(vcpu
, vsie_page
, scb_addr
);
1275 rc
= shadow_scb(vcpu
, vsie_page
);
1278 rc
= pin_blocks(vcpu
, vsie_page
);
1281 register_shadow_scb(vcpu
, vsie_page
);
1282 rc
= vsie_run(vcpu
, vsie_page
);
1283 unregister_shadow_scb(vcpu
);
1284 unpin_blocks(vcpu
, vsie_page
);
1286 unshadow_scb(vcpu
, vsie_page
);
1288 unpin_scb(vcpu
, vsie_page
, scb_addr
);
1290 put_vsie_page(vcpu
->kvm
, vsie_page
);
1292 return rc
< 0 ? rc
: 0;
1295 /* Init the vsie data structures. To be called when a vm is initialized. */
1296 void kvm_s390_vsie_init(struct kvm
*kvm
)
1298 mutex_init(&kvm
->arch
.vsie
.mutex
);
1299 INIT_RADIX_TREE(&kvm
->arch
.vsie
.addr_to_page
, GFP_KERNEL
);
1302 /* Destroy the vsie data structures. To be called when a vm is destroyed. */
1303 void kvm_s390_vsie_destroy(struct kvm
*kvm
)
1305 struct vsie_page
*vsie_page
;
1309 mutex_lock(&kvm
->arch
.vsie
.mutex
);
1310 for (i
= 0; i
< kvm
->arch
.vsie
.page_count
; i
++) {
1311 page
= kvm
->arch
.vsie
.pages
[i
];
1312 kvm
->arch
.vsie
.pages
[i
] = NULL
;
1313 vsie_page
= page_to_virt(page
);
1314 release_gmap_shadow(vsie_page
);
1315 /* free the radix tree entry */
1316 radix_tree_delete(&kvm
->arch
.vsie
.addr_to_page
, page
->index
>> 9);
1319 kvm
->arch
.vsie
.page_count
= 0;
1320 mutex_unlock(&kvm
->arch
.vsie
.mutex
);
1323 void kvm_s390_vsie_kick(struct kvm_vcpu
*vcpu
)
1325 struct kvm_s390_sie_block
*scb
= READ_ONCE(vcpu
->arch
.vsie_block
);
1328 * Even if the VCPU lets go of the shadow sie block reference, it is
1329 * still valid in the cache. So we can safely kick it.
1332 atomic_or(PROG_BLOCK_SIE
, &scb
->prog20
);
1333 if (scb
->prog0c
& PROG_IN_SIE
)
1334 atomic_or(CPUSTAT_STOP_INT
, &scb
->cpuflags
);