]> git.proxmox.com Git - mirror_ubuntu-hirsute-kernel.git/blame - arch/s390/kvm/kvm-s390.c
KVM: s390: use facilities and cpu_id per KVM
[mirror_ubuntu-hirsute-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
cbb870c8 28#include <asm/asm-offsets.h>
b0c632db
HC
29#include <asm/lowcore.h>
30#include <asm/pgtable.h>
f5daba1d 31#include <asm/nmi.h>
a0616cde 32#include <asm/switch_to.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 53 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 54 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 56 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
57 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 60 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
69 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70 { "instruction_spx", VCPU_STAT(instruction_spx) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72 { "instruction_stap", VCPU_STAT(instruction_stap) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
75 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 77 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
78 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 80 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
5288fbf0
CB
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 96 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 97 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 98 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
99 { NULL }
100};
101
9d8d5786
MM
102/* upper facilities limit for kvm */
103unsigned long kvm_s390_fac_list_mask[] = {
104 0xff82fffbf4fc2000UL,
105 0x005c000000000000UL,
106};
b0c632db 107
9d8d5786 108unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 109{
9d8d5786
MM
110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
112}
113
9d8d5786
MM
114static struct gmap_notifier gmap_notifier;
115
b0c632db 116/* Section: not file related */
13a34e06 117int kvm_arch_hardware_enable(void)
b0c632db
HC
118{
119 /* every s390 is virtualization enabled ;-) */
10474ae8 120 return 0;
b0c632db
HC
121}
122
2c70fe44
CB
123static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
124
b0c632db
HC
125int kvm_arch_hardware_setup(void)
126{
2c70fe44
CB
127 gmap_notifier.notifier_call = kvm_gmap_notifier;
128 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
129 return 0;
130}
131
132void kvm_arch_hardware_unsetup(void)
133{
2c70fe44 134 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
135}
136
b0c632db
HC
137int kvm_arch_init(void *opaque)
138{
84877d93
CH
139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
141}
142
b0c632db
HC
143/* Section: device related */
144long kvm_arch_dev_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
146{
147 if (ioctl == KVM_S390_ENABLE_SIE)
148 return s390_enable_sie();
149 return -EINVAL;
150}
151
784aa3d7 152int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 153{
d7b0b5eb
CO
154 int r;
155
2bd0ac4e 156 switch (ext) {
d7b0b5eb 157 case KVM_CAP_S390_PSW:
b6cf8788 158 case KVM_CAP_S390_GMAP:
52e16b18 159 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
160#ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL:
162#endif
3c038e6b 163 case KVM_CAP_ASYNC_PF:
60b413c9 164 case KVM_CAP_SYNC_REGS:
14eebd91 165 case KVM_CAP_ONE_REG:
d6712df9 166 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 167 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 168 case KVM_CAP_IRQFD:
10ccaa1e 169 case KVM_CAP_IOEVENTFD:
c05c4186 170 case KVM_CAP_DEVICE_CTRL:
d938dc55 171 case KVM_CAP_ENABLE_CAP_VM:
78599d90 172 case KVM_CAP_S390_IRQCHIP:
f2061656 173 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 174 case KVM_CAP_MP_STATE:
2444b352 175 case KVM_CAP_S390_USER_SIGP:
d7b0b5eb
CO
176 r = 1;
177 break;
e726b1bd
CB
178 case KVM_CAP_NR_VCPUS:
179 case KVM_CAP_MAX_VCPUS:
180 r = KVM_MAX_VCPUS;
181 break;
e1e2e605
NW
182 case KVM_CAP_NR_MEMSLOTS:
183 r = KVM_USER_MEM_SLOTS;
184 break;
1526bf9c 185 case KVM_CAP_S390_COW:
abf09bed 186 r = MACHINE_HAS_ESOP;
1526bf9c 187 break;
2bd0ac4e 188 default:
d7b0b5eb 189 r = 0;
2bd0ac4e 190 }
d7b0b5eb 191 return r;
b0c632db
HC
192}
193
15f36ebd
JH
194static void kvm_s390_sync_dirty_log(struct kvm *kvm,
195 struct kvm_memory_slot *memslot)
196{
197 gfn_t cur_gfn, last_gfn;
198 unsigned long address;
199 struct gmap *gmap = kvm->arch.gmap;
200
201 down_read(&gmap->mm->mmap_sem);
202 /* Loop over all guest pages */
203 last_gfn = memslot->base_gfn + memslot->npages;
204 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
205 address = gfn_to_hva_memslot(memslot, cur_gfn);
206
207 if (gmap_test_and_clear_dirty(address, gmap))
208 mark_page_dirty(kvm, cur_gfn);
209 }
210 up_read(&gmap->mm->mmap_sem);
211}
212
b0c632db
HC
213/* Section: vm related */
214/*
215 * Get (and clear) the dirty memory log for a memory slot.
216 */
217int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
218 struct kvm_dirty_log *log)
219{
15f36ebd
JH
220 int r;
221 unsigned long n;
222 struct kvm_memory_slot *memslot;
223 int is_dirty = 0;
224
225 mutex_lock(&kvm->slots_lock);
226
227 r = -EINVAL;
228 if (log->slot >= KVM_USER_MEM_SLOTS)
229 goto out;
230
231 memslot = id_to_memslot(kvm->memslots, log->slot);
232 r = -ENOENT;
233 if (!memslot->dirty_bitmap)
234 goto out;
235
236 kvm_s390_sync_dirty_log(kvm, memslot);
237 r = kvm_get_dirty_log(kvm, log, &is_dirty);
238 if (r)
239 goto out;
240
241 /* Clear the dirty log */
242 if (is_dirty) {
243 n = kvm_dirty_bitmap_bytes(memslot);
244 memset(memslot->dirty_bitmap, 0, n);
245 }
246 r = 0;
247out:
248 mutex_unlock(&kvm->slots_lock);
249 return r;
b0c632db
HC
250}
251
d938dc55
CH
252static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
253{
254 int r;
255
256 if (cap->flags)
257 return -EINVAL;
258
259 switch (cap->cap) {
84223598
CH
260 case KVM_CAP_S390_IRQCHIP:
261 kvm->arch.use_irqchip = 1;
262 r = 0;
263 break;
2444b352
DH
264 case KVM_CAP_S390_USER_SIGP:
265 kvm->arch.user_sigp = 1;
266 r = 0;
267 break;
d938dc55
CH
268 default:
269 r = -EINVAL;
270 break;
271 }
272 return r;
273}
274
8c0a7ce6
DD
275static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
276{
277 int ret;
278
279 switch (attr->attr) {
280 case KVM_S390_VM_MEM_LIMIT_SIZE:
281 ret = 0;
282 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
283 ret = -EFAULT;
284 break;
285 default:
286 ret = -ENXIO;
287 break;
288 }
289 return ret;
290}
291
292static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
293{
294 int ret;
295 unsigned int idx;
296 switch (attr->attr) {
297 case KVM_S390_VM_MEM_ENABLE_CMMA:
298 ret = -EBUSY;
299 mutex_lock(&kvm->lock);
300 if (atomic_read(&kvm->online_vcpus) == 0) {
301 kvm->arch.use_cmma = 1;
302 ret = 0;
303 }
304 mutex_unlock(&kvm->lock);
305 break;
306 case KVM_S390_VM_MEM_CLR_CMMA:
307 mutex_lock(&kvm->lock);
308 idx = srcu_read_lock(&kvm->srcu);
a13cff31 309 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
310 srcu_read_unlock(&kvm->srcu, idx);
311 mutex_unlock(&kvm->lock);
312 ret = 0;
313 break;
8c0a7ce6
DD
314 case KVM_S390_VM_MEM_LIMIT_SIZE: {
315 unsigned long new_limit;
316
317 if (kvm_is_ucontrol(kvm))
318 return -EINVAL;
319
320 if (get_user(new_limit, (u64 __user *)attr->addr))
321 return -EFAULT;
322
323 if (new_limit > kvm->arch.gmap->asce_end)
324 return -E2BIG;
325
326 ret = -EBUSY;
327 mutex_lock(&kvm->lock);
328 if (atomic_read(&kvm->online_vcpus) == 0) {
329 /* gmap_alloc will round the limit up */
330 struct gmap *new = gmap_alloc(current->mm, new_limit);
331
332 if (!new) {
333 ret = -ENOMEM;
334 } else {
335 gmap_free(kvm->arch.gmap);
336 new->private = kvm;
337 kvm->arch.gmap = new;
338 ret = 0;
339 }
340 }
341 mutex_unlock(&kvm->lock);
342 break;
343 }
4f718eab
DD
344 default:
345 ret = -ENXIO;
346 break;
347 }
348 return ret;
349}
350
a374e892
TK
351static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
352
353static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
354{
355 struct kvm_vcpu *vcpu;
356 int i;
357
9d8d5786 358 if (!test_kvm_facility(kvm, 76))
a374e892
TK
359 return -EINVAL;
360
361 mutex_lock(&kvm->lock);
362 switch (attr->attr) {
363 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
364 get_random_bytes(
365 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
366 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
367 kvm->arch.crypto.aes_kw = 1;
368 break;
369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
370 get_random_bytes(
371 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
372 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
373 kvm->arch.crypto.dea_kw = 1;
374 break;
375 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
376 kvm->arch.crypto.aes_kw = 0;
377 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
378 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
379 break;
380 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
381 kvm->arch.crypto.dea_kw = 0;
382 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
383 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
384 break;
385 default:
386 mutex_unlock(&kvm->lock);
387 return -ENXIO;
388 }
389
390 kvm_for_each_vcpu(i, vcpu, kvm) {
391 kvm_s390_vcpu_crypto_setup(vcpu);
392 exit_sie(vcpu);
393 }
394 mutex_unlock(&kvm->lock);
395 return 0;
396}
397
72f25020
JH
398static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
399{
400 u8 gtod_high;
401
402 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
403 sizeof(gtod_high)))
404 return -EFAULT;
405
406 if (gtod_high != 0)
407 return -EINVAL;
408
409 return 0;
410}
411
412static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
413{
414 struct kvm_vcpu *cur_vcpu;
415 unsigned int vcpu_idx;
416 u64 host_tod, gtod;
417 int r;
418
419 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
420 return -EFAULT;
421
422 r = store_tod_clock(&host_tod);
423 if (r)
424 return r;
425
426 mutex_lock(&kvm->lock);
427 kvm->arch.epoch = gtod - host_tod;
428 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
429 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
430 exit_sie(cur_vcpu);
431 }
432 mutex_unlock(&kvm->lock);
433 return 0;
434}
435
436static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
437{
438 int ret;
439
440 if (attr->flags)
441 return -EINVAL;
442
443 switch (attr->attr) {
444 case KVM_S390_VM_TOD_HIGH:
445 ret = kvm_s390_set_tod_high(kvm, attr);
446 break;
447 case KVM_S390_VM_TOD_LOW:
448 ret = kvm_s390_set_tod_low(kvm, attr);
449 break;
450 default:
451 ret = -ENXIO;
452 break;
453 }
454 return ret;
455}
456
457static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
458{
459 u8 gtod_high = 0;
460
461 if (copy_to_user((void __user *)attr->addr, &gtod_high,
462 sizeof(gtod_high)))
463 return -EFAULT;
464
465 return 0;
466}
467
468static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
469{
470 u64 host_tod, gtod;
471 int r;
472
473 r = store_tod_clock(&host_tod);
474 if (r)
475 return r;
476
477 gtod = host_tod + kvm->arch.epoch;
478 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
479 return -EFAULT;
480
481 return 0;
482}
483
484static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
485{
486 int ret;
487
488 if (attr->flags)
489 return -EINVAL;
490
491 switch (attr->attr) {
492 case KVM_S390_VM_TOD_HIGH:
493 ret = kvm_s390_get_tod_high(kvm, attr);
494 break;
495 case KVM_S390_VM_TOD_LOW:
496 ret = kvm_s390_get_tod_low(kvm, attr);
497 break;
498 default:
499 ret = -ENXIO;
500 break;
501 }
502 return ret;
503}
504
f2061656
DD
505static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
506{
507 int ret;
508
509 switch (attr->group) {
4f718eab 510 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 511 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 512 break;
72f25020
JH
513 case KVM_S390_VM_TOD:
514 ret = kvm_s390_set_tod(kvm, attr);
515 break;
a374e892
TK
516 case KVM_S390_VM_CRYPTO:
517 ret = kvm_s390_vm_set_crypto(kvm, attr);
518 break;
f2061656
DD
519 default:
520 ret = -ENXIO;
521 break;
522 }
523
524 return ret;
525}
526
527static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
528{
8c0a7ce6
DD
529 int ret;
530
531 switch (attr->group) {
532 case KVM_S390_VM_MEM_CTRL:
533 ret = kvm_s390_get_mem_control(kvm, attr);
534 break;
72f25020
JH
535 case KVM_S390_VM_TOD:
536 ret = kvm_s390_get_tod(kvm, attr);
537 break;
8c0a7ce6
DD
538 default:
539 ret = -ENXIO;
540 break;
541 }
542
543 return ret;
f2061656
DD
544}
545
546static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
547{
548 int ret;
549
550 switch (attr->group) {
4f718eab
DD
551 case KVM_S390_VM_MEM_CTRL:
552 switch (attr->attr) {
553 case KVM_S390_VM_MEM_ENABLE_CMMA:
554 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 555 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
556 ret = 0;
557 break;
558 default:
559 ret = -ENXIO;
560 break;
561 }
562 break;
72f25020
JH
563 case KVM_S390_VM_TOD:
564 switch (attr->attr) {
565 case KVM_S390_VM_TOD_LOW:
566 case KVM_S390_VM_TOD_HIGH:
567 ret = 0;
568 break;
569 default:
570 ret = -ENXIO;
571 break;
572 }
573 break;
a374e892
TK
574 case KVM_S390_VM_CRYPTO:
575 switch (attr->attr) {
576 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
577 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
578 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
579 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
580 ret = 0;
581 break;
582 default:
583 ret = -ENXIO;
584 break;
585 }
586 break;
f2061656
DD
587 default:
588 ret = -ENXIO;
589 break;
590 }
591
592 return ret;
593}
594
b0c632db
HC
595long kvm_arch_vm_ioctl(struct file *filp,
596 unsigned int ioctl, unsigned long arg)
597{
598 struct kvm *kvm = filp->private_data;
599 void __user *argp = (void __user *)arg;
f2061656 600 struct kvm_device_attr attr;
b0c632db
HC
601 int r;
602
603 switch (ioctl) {
ba5c1e9b
CO
604 case KVM_S390_INTERRUPT: {
605 struct kvm_s390_interrupt s390int;
606
607 r = -EFAULT;
608 if (copy_from_user(&s390int, argp, sizeof(s390int)))
609 break;
610 r = kvm_s390_inject_vm(kvm, &s390int);
611 break;
612 }
d938dc55
CH
613 case KVM_ENABLE_CAP: {
614 struct kvm_enable_cap cap;
615 r = -EFAULT;
616 if (copy_from_user(&cap, argp, sizeof(cap)))
617 break;
618 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
619 break;
620 }
84223598
CH
621 case KVM_CREATE_IRQCHIP: {
622 struct kvm_irq_routing_entry routing;
623
624 r = -EINVAL;
625 if (kvm->arch.use_irqchip) {
626 /* Set up dummy routing. */
627 memset(&routing, 0, sizeof(routing));
628 kvm_set_irq_routing(kvm, &routing, 0, 0);
629 r = 0;
630 }
631 break;
632 }
f2061656
DD
633 case KVM_SET_DEVICE_ATTR: {
634 r = -EFAULT;
635 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
636 break;
637 r = kvm_s390_vm_set_attr(kvm, &attr);
638 break;
639 }
640 case KVM_GET_DEVICE_ATTR: {
641 r = -EFAULT;
642 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
643 break;
644 r = kvm_s390_vm_get_attr(kvm, &attr);
645 break;
646 }
647 case KVM_HAS_DEVICE_ATTR: {
648 r = -EFAULT;
649 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
650 break;
651 r = kvm_s390_vm_has_attr(kvm, &attr);
652 break;
653 }
b0c632db 654 default:
367e1319 655 r = -ENOTTY;
b0c632db
HC
656 }
657
658 return r;
659}
660
45c9b47c
TK
661static int kvm_s390_query_ap_config(u8 *config)
662{
663 u32 fcn_code = 0x04000000UL;
664 u32 cc;
665
666 asm volatile(
667 "lgr 0,%1\n"
668 "lgr 2,%2\n"
669 ".long 0xb2af0000\n" /* PQAP(QCI) */
670 "ipm %0\n"
671 "srl %0,28\n"
672 : "=r" (cc)
673 : "r" (fcn_code), "r" (config)
674 : "cc", "0", "2", "memory"
675 );
676
677 return cc;
678}
679
680static int kvm_s390_apxa_installed(void)
681{
682 u8 config[128];
683 int cc;
684
685 if (test_facility(2) && test_facility(12)) {
686 cc = kvm_s390_query_ap_config(config);
687
688 if (cc)
689 pr_err("PQAP(QCI) failed with cc=%d", cc);
690 else
691 return config[0] & 0x40;
692 }
693
694 return 0;
695}
696
697static void kvm_s390_set_crycb_format(struct kvm *kvm)
698{
699 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
700
701 if (kvm_s390_apxa_installed())
702 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
703 else
704 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
705}
706
9d8d5786
MM
707static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
708{
709 get_cpu_id(cpu_id);
710 cpu_id->version = 0xff;
711}
712
5102ee87
TK
713static int kvm_s390_crypto_init(struct kvm *kvm)
714{
9d8d5786 715 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
716 return 0;
717
718 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
719 GFP_KERNEL | GFP_DMA);
720 if (!kvm->arch.crypto.crycb)
721 return -ENOMEM;
722
45c9b47c 723 kvm_s390_set_crycb_format(kvm);
5102ee87 724
a374e892
TK
725 /* Disable AES/DEA protected key functions by default */
726 kvm->arch.crypto.aes_kw = 0;
727 kvm->arch.crypto.dea_kw = 0;
728
5102ee87
TK
729 return 0;
730}
731
e08b9637 732int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 733{
9d8d5786 734 int i, rc;
b0c632db 735 char debug_name[16];
f6c137ff 736 static unsigned long sca_offset;
b0c632db 737
e08b9637
CO
738 rc = -EINVAL;
739#ifdef CONFIG_KVM_S390_UCONTROL
740 if (type & ~KVM_VM_S390_UCONTROL)
741 goto out_err;
742 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
743 goto out_err;
744#else
745 if (type)
746 goto out_err;
747#endif
748
b0c632db
HC
749 rc = s390_enable_sie();
750 if (rc)
d89f5eff 751 goto out_err;
b0c632db 752
b290411a
CO
753 rc = -ENOMEM;
754
b0c632db
HC
755 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
756 if (!kvm->arch.sca)
d89f5eff 757 goto out_err;
f6c137ff
CB
758 spin_lock(&kvm_lock);
759 sca_offset = (sca_offset + 16) & 0x7f0;
760 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
761 spin_unlock(&kvm_lock);
b0c632db
HC
762
763 sprintf(debug_name, "kvm-%u", current->pid);
764
765 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
766 if (!kvm->arch.dbf)
767 goto out_nodbf;
768
9d8d5786
MM
769 /*
770 * The architectural maximum amount of facilities is 16 kbit. To store
771 * this amount, 2 kbyte of memory is required. Thus we need a full
772 * page to hold the active copy (arch.model.fac->sie) and the current
773 * facilities set (arch.model.fac->kvm). Its address size has to be
774 * 31 bits and word aligned.
775 */
776 kvm->arch.model.fac =
777 (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
778 if (!kvm->arch.model.fac)
779 goto out_nofac;
780
781 memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
782 S390_ARCH_FAC_LIST_SIZE_U64);
783
784 /*
785 * Apply the kvm facility mask to limit the kvm supported/tolerated
786 * facility list.
787 */
788 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
789 if (i < kvm_s390_fac_list_mask_size())
790 kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
791 else
792 kvm->arch.model.fac->kvm[i] = 0UL;
793 }
794
795 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
796
5102ee87
TK
797 if (kvm_s390_crypto_init(kvm) < 0)
798 goto out_crypto;
799
ba5c1e9b
CO
800 spin_lock_init(&kvm->arch.float_int.lock);
801 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 802 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 803 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 804
b0c632db
HC
805 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
806 VM_EVENT(kvm, 3, "%s", "vm created");
807
e08b9637
CO
808 if (type & KVM_VM_S390_UCONTROL) {
809 kvm->arch.gmap = NULL;
810 } else {
0349985a 811 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
812 if (!kvm->arch.gmap)
813 goto out_nogmap;
2c70fe44 814 kvm->arch.gmap->private = kvm;
24eb3a82 815 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 816 }
fa6b7fe9
CH
817
818 kvm->arch.css_support = 0;
84223598 819 kvm->arch.use_irqchip = 0;
72f25020 820 kvm->arch.epoch = 0;
fa6b7fe9 821
8ad35755
DH
822 spin_lock_init(&kvm->arch.start_stop_lock);
823
d89f5eff 824 return 0;
598841ca 825out_nogmap:
5102ee87
TK
826 kfree(kvm->arch.crypto.crycb);
827out_crypto:
9d8d5786
MM
828 free_page((unsigned long)kvm->arch.model.fac);
829out_nofac:
598841ca 830 debug_unregister(kvm->arch.dbf);
b0c632db
HC
831out_nodbf:
832 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
833out_err:
834 return rc;
b0c632db
HC
835}
836
d329c035
CB
837void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
838{
839 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 840 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 841 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 842 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
843 if (!kvm_is_ucontrol(vcpu->kvm)) {
844 clear_bit(63 - vcpu->vcpu_id,
845 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
846 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
847 (__u64) vcpu->arch.sie_block)
848 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
849 }
abf4a71e 850 smp_mb();
27e0393f
CO
851
852 if (kvm_is_ucontrol(vcpu->kvm))
853 gmap_free(vcpu->arch.gmap);
854
b31605c1
DD
855 if (kvm_s390_cmma_enabled(vcpu->kvm))
856 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 857 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 858
6692cef3 859 kvm_vcpu_uninit(vcpu);
b110feaf 860 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
861}
862
863static void kvm_free_vcpus(struct kvm *kvm)
864{
865 unsigned int i;
988a2cae 866 struct kvm_vcpu *vcpu;
d329c035 867
988a2cae
GN
868 kvm_for_each_vcpu(i, vcpu, kvm)
869 kvm_arch_vcpu_destroy(vcpu);
870
871 mutex_lock(&kvm->lock);
872 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
873 kvm->vcpus[i] = NULL;
874
875 atomic_set(&kvm->online_vcpus, 0);
876 mutex_unlock(&kvm->lock);
d329c035
CB
877}
878
b0c632db
HC
879void kvm_arch_destroy_vm(struct kvm *kvm)
880{
d329c035 881 kvm_free_vcpus(kvm);
9d8d5786 882 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 883 free_page((unsigned long)(kvm->arch.sca));
d329c035 884 debug_unregister(kvm->arch.dbf);
5102ee87 885 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
886 if (!kvm_is_ucontrol(kvm))
887 gmap_free(kvm->arch.gmap);
841b91c5 888 kvm_s390_destroy_adapters(kvm);
67335e63 889 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
890}
891
892/* Section: vcpu related */
dafd032a
DD
893static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
894{
895 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
896 if (!vcpu->arch.gmap)
897 return -ENOMEM;
898 vcpu->arch.gmap->private = vcpu->kvm;
899
900 return 0;
901}
902
b0c632db
HC
903int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
904{
3c038e6b
DD
905 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
906 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
907 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
908 KVM_SYNC_GPRS |
9eed0735 909 KVM_SYNC_ACRS |
b028ee3e
DH
910 KVM_SYNC_CRS |
911 KVM_SYNC_ARCH0 |
912 KVM_SYNC_PFAULT;
dafd032a
DD
913
914 if (kvm_is_ucontrol(vcpu->kvm))
915 return __kvm_ucontrol_vcpu_init(vcpu);
916
b0c632db
HC
917 return 0;
918}
919
b0c632db
HC
920void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
921{
4725c860
MS
922 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
923 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 924 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
925 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
926 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 927 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 928 gmap_enable(vcpu->arch.gmap);
9e6dabef 929 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
930}
931
932void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
933{
9e6dabef 934 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 935 gmap_disable(vcpu->arch.gmap);
4725c860
MS
936 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
937 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 938 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
939 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
940 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
941 restore_access_regs(vcpu->arch.host_acrs);
942}
943
944static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
945{
946 /* this equals initial cpu reset in pop, but we don't switch to ESA */
947 vcpu->arch.sie_block->gpsw.mask = 0UL;
948 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 949 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
950 vcpu->arch.sie_block->cputm = 0UL;
951 vcpu->arch.sie_block->ckc = 0UL;
952 vcpu->arch.sie_block->todpr = 0;
953 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
954 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
955 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
956 vcpu->arch.guest_fpregs.fpc = 0;
957 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
958 vcpu->arch.sie_block->gbea = 1;
672550fb 959 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
960 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
961 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
962 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
963 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 964 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
965}
966
31928aa5 967void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 968{
72f25020
JH
969 mutex_lock(&vcpu->kvm->lock);
970 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
971 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
972 if (!kvm_is_ucontrol(vcpu->kvm))
973 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
974}
975
5102ee87
TK
976static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
977{
9d8d5786 978 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
979 return;
980
a374e892
TK
981 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
982
983 if (vcpu->kvm->arch.crypto.aes_kw)
984 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
985 if (vcpu->kvm->arch.crypto.dea_kw)
986 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
987
5102ee87
TK
988 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
989}
990
b31605c1
DD
991void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
992{
993 free_page(vcpu->arch.sie_block->cbrlo);
994 vcpu->arch.sie_block->cbrlo = 0;
995}
996
997int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
998{
999 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1000 if (!vcpu->arch.sie_block->cbrlo)
1001 return -ENOMEM;
1002
1003 vcpu->arch.sie_block->ecb2 |= 0x80;
1004 vcpu->arch.sie_block->ecb2 &= ~0x08;
1005 return 0;
1006}
1007
b0c632db
HC
1008int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1009{
b31605c1 1010 int rc = 0;
b31288fa 1011
9e6dabef
CH
1012 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1013 CPUSTAT_SM |
69d0d3a3
CB
1014 CPUSTAT_STOPPED |
1015 CPUSTAT_GED);
fc34531d 1016 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1017 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1018 vcpu->arch.sie_block->ecb |= 0x10;
1019
69d0d3a3 1020 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1021 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
1022 if (sclp_has_siif())
1023 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
1024 if (sclp_has_sigpif())
1025 vcpu->arch.sie_block->eca |= 0x10000000U;
5a5e6536
MR
1026 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
1027 ICTL_TPROT;
1028
b31605c1
DD
1029 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1030 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1031 if (rc)
1032 return rc;
b31288fa 1033 }
0ac96caf 1034 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1035 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786
MM
1036
1037 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
1038 memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
1039 S390_ARCH_FAC_LIST_SIZE_BYTE);
5102ee87
TK
1040
1041 kvm_s390_vcpu_crypto_setup(vcpu);
1042
b31605c1 1043 return rc;
b0c632db
HC
1044}
1045
1046struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1047 unsigned int id)
1048{
4d47555a 1049 struct kvm_vcpu *vcpu;
7feb6bb8 1050 struct sie_page *sie_page;
4d47555a
CO
1051 int rc = -EINVAL;
1052
1053 if (id >= KVM_MAX_VCPUS)
1054 goto out;
1055
1056 rc = -ENOMEM;
b0c632db 1057
b110feaf 1058 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1059 if (!vcpu)
4d47555a 1060 goto out;
b0c632db 1061
7feb6bb8
MM
1062 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1063 if (!sie_page)
b0c632db
HC
1064 goto out_free_cpu;
1065
7feb6bb8
MM
1066 vcpu->arch.sie_block = &sie_page->sie_block;
1067 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1068
b0c632db 1069 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1070 if (!kvm_is_ucontrol(kvm)) {
1071 if (!kvm->arch.sca) {
1072 WARN_ON_ONCE(1);
1073 goto out_free_cpu;
1074 }
1075 if (!kvm->arch.sca->cpu[id].sda)
1076 kvm->arch.sca->cpu[id].sda =
1077 (__u64) vcpu->arch.sie_block;
1078 vcpu->arch.sie_block->scaoh =
1079 (__u32)(((__u64)kvm->arch.sca) >> 32);
1080 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1081 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1082 }
9d8d5786 1083 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
b0c632db 1084
ba5c1e9b 1085 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1086 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1087 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1088 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1089
b0c632db
HC
1090 rc = kvm_vcpu_init(vcpu, kvm, id);
1091 if (rc)
7b06bf2f 1092 goto out_free_sie_block;
b0c632db
HC
1093 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1094 vcpu->arch.sie_block);
ade38c31 1095 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1096
b0c632db 1097 return vcpu;
7b06bf2f
WY
1098out_free_sie_block:
1099 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1100out_free_cpu:
b110feaf 1101 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1102out:
b0c632db
HC
1103 return ERR_PTR(rc);
1104}
1105
b0c632db
HC
1106int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1107{
9a022067 1108 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1109}
1110
49b99e1e
CB
1111void s390_vcpu_block(struct kvm_vcpu *vcpu)
1112{
1113 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1114}
1115
1116void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1117{
1118 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1119}
1120
1121/*
1122 * Kick a guest cpu out of SIE and wait until SIE is not running.
1123 * If the CPU is not running (e.g. waiting as idle) the function will
1124 * return immediately. */
1125void exit_sie(struct kvm_vcpu *vcpu)
1126{
1127 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1128 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1129 cpu_relax();
1130}
1131
1132/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1133void exit_sie_sync(struct kvm_vcpu *vcpu)
1134{
1135 s390_vcpu_block(vcpu);
1136 exit_sie(vcpu);
1137}
1138
2c70fe44
CB
1139static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1140{
1141 int i;
1142 struct kvm *kvm = gmap->private;
1143 struct kvm_vcpu *vcpu;
1144
1145 kvm_for_each_vcpu(i, vcpu, kvm) {
1146 /* match against both prefix pages */
fda902cb 1147 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
1148 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1149 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1150 exit_sie_sync(vcpu);
1151 }
1152 }
1153}
1154
b6d33834
CD
1155int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1156{
1157 /* kvm common code refers to this, but never calls it */
1158 BUG();
1159 return 0;
1160}
1161
14eebd91
CO
1162static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1163 struct kvm_one_reg *reg)
1164{
1165 int r = -EINVAL;
1166
1167 switch (reg->id) {
29b7c71b
CO
1168 case KVM_REG_S390_TODPR:
1169 r = put_user(vcpu->arch.sie_block->todpr,
1170 (u32 __user *)reg->addr);
1171 break;
1172 case KVM_REG_S390_EPOCHDIFF:
1173 r = put_user(vcpu->arch.sie_block->epoch,
1174 (u64 __user *)reg->addr);
1175 break;
46a6dd1c
J
1176 case KVM_REG_S390_CPU_TIMER:
1177 r = put_user(vcpu->arch.sie_block->cputm,
1178 (u64 __user *)reg->addr);
1179 break;
1180 case KVM_REG_S390_CLOCK_COMP:
1181 r = put_user(vcpu->arch.sie_block->ckc,
1182 (u64 __user *)reg->addr);
1183 break;
536336c2
DD
1184 case KVM_REG_S390_PFTOKEN:
1185 r = put_user(vcpu->arch.pfault_token,
1186 (u64 __user *)reg->addr);
1187 break;
1188 case KVM_REG_S390_PFCOMPARE:
1189 r = put_user(vcpu->arch.pfault_compare,
1190 (u64 __user *)reg->addr);
1191 break;
1192 case KVM_REG_S390_PFSELECT:
1193 r = put_user(vcpu->arch.pfault_select,
1194 (u64 __user *)reg->addr);
1195 break;
672550fb
CB
1196 case KVM_REG_S390_PP:
1197 r = put_user(vcpu->arch.sie_block->pp,
1198 (u64 __user *)reg->addr);
1199 break;
afa45ff5
CB
1200 case KVM_REG_S390_GBEA:
1201 r = put_user(vcpu->arch.sie_block->gbea,
1202 (u64 __user *)reg->addr);
1203 break;
14eebd91
CO
1204 default:
1205 break;
1206 }
1207
1208 return r;
1209}
1210
1211static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1212 struct kvm_one_reg *reg)
1213{
1214 int r = -EINVAL;
1215
1216 switch (reg->id) {
29b7c71b
CO
1217 case KVM_REG_S390_TODPR:
1218 r = get_user(vcpu->arch.sie_block->todpr,
1219 (u32 __user *)reg->addr);
1220 break;
1221 case KVM_REG_S390_EPOCHDIFF:
1222 r = get_user(vcpu->arch.sie_block->epoch,
1223 (u64 __user *)reg->addr);
1224 break;
46a6dd1c
J
1225 case KVM_REG_S390_CPU_TIMER:
1226 r = get_user(vcpu->arch.sie_block->cputm,
1227 (u64 __user *)reg->addr);
1228 break;
1229 case KVM_REG_S390_CLOCK_COMP:
1230 r = get_user(vcpu->arch.sie_block->ckc,
1231 (u64 __user *)reg->addr);
1232 break;
536336c2
DD
1233 case KVM_REG_S390_PFTOKEN:
1234 r = get_user(vcpu->arch.pfault_token,
1235 (u64 __user *)reg->addr);
9fbd8082
DH
1236 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1237 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1238 break;
1239 case KVM_REG_S390_PFCOMPARE:
1240 r = get_user(vcpu->arch.pfault_compare,
1241 (u64 __user *)reg->addr);
1242 break;
1243 case KVM_REG_S390_PFSELECT:
1244 r = get_user(vcpu->arch.pfault_select,
1245 (u64 __user *)reg->addr);
1246 break;
672550fb
CB
1247 case KVM_REG_S390_PP:
1248 r = get_user(vcpu->arch.sie_block->pp,
1249 (u64 __user *)reg->addr);
1250 break;
afa45ff5
CB
1251 case KVM_REG_S390_GBEA:
1252 r = get_user(vcpu->arch.sie_block->gbea,
1253 (u64 __user *)reg->addr);
1254 break;
14eebd91
CO
1255 default:
1256 break;
1257 }
1258
1259 return r;
1260}
b6d33834 1261
b0c632db
HC
1262static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1263{
b0c632db 1264 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1265 return 0;
1266}
1267
1268int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1269{
5a32c1af 1270 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1271 return 0;
1272}
1273
1274int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1275{
5a32c1af 1276 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1277 return 0;
1278}
1279
1280int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1281 struct kvm_sregs *sregs)
1282{
59674c1a 1283 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1284 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1285 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1286 return 0;
1287}
1288
1289int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1290 struct kvm_sregs *sregs)
1291{
59674c1a 1292 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1293 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1294 return 0;
1295}
1296
1297int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1298{
4725c860
MS
1299 if (test_fp_ctl(fpu->fpc))
1300 return -EINVAL;
b0c632db 1301 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1302 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1303 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1304 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1305 return 0;
1306}
1307
1308int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1309{
b0c632db
HC
1310 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1311 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1312 return 0;
1313}
1314
1315static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1316{
1317 int rc = 0;
1318
7a42fdc2 1319 if (!is_vcpu_stopped(vcpu))
b0c632db 1320 rc = -EBUSY;
d7b0b5eb
CO
1321 else {
1322 vcpu->run->psw_mask = psw.mask;
1323 vcpu->run->psw_addr = psw.addr;
1324 }
b0c632db
HC
1325 return rc;
1326}
1327
1328int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1329 struct kvm_translation *tr)
1330{
1331 return -EINVAL; /* not implemented yet */
1332}
1333
27291e21
DH
1334#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1335 KVM_GUESTDBG_USE_HW_BP | \
1336 KVM_GUESTDBG_ENABLE)
1337
d0bfb940
JK
1338int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1339 struct kvm_guest_debug *dbg)
b0c632db 1340{
27291e21
DH
1341 int rc = 0;
1342
1343 vcpu->guest_debug = 0;
1344 kvm_s390_clear_bp_data(vcpu);
1345
2de3bfc2 1346 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1347 return -EINVAL;
1348
1349 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1350 vcpu->guest_debug = dbg->control;
1351 /* enforce guest PER */
1352 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1353
1354 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1355 rc = kvm_s390_import_bp_data(vcpu, dbg);
1356 } else {
1357 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1358 vcpu->arch.guestdbg.last_bp = 0;
1359 }
1360
1361 if (rc) {
1362 vcpu->guest_debug = 0;
1363 kvm_s390_clear_bp_data(vcpu);
1364 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1365 }
1366
1367 return rc;
b0c632db
HC
1368}
1369
62d9f0db
MT
1370int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1371 struct kvm_mp_state *mp_state)
1372{
6352e4d2
DH
1373 /* CHECK_STOP and LOAD are not supported yet */
1374 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1375 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1376}
1377
1378int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1379 struct kvm_mp_state *mp_state)
1380{
6352e4d2
DH
1381 int rc = 0;
1382
1383 /* user space knows about this interface - let it control the state */
1384 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1385
1386 switch (mp_state->mp_state) {
1387 case KVM_MP_STATE_STOPPED:
1388 kvm_s390_vcpu_stop(vcpu);
1389 break;
1390 case KVM_MP_STATE_OPERATING:
1391 kvm_s390_vcpu_start(vcpu);
1392 break;
1393 case KVM_MP_STATE_LOAD:
1394 case KVM_MP_STATE_CHECK_STOP:
1395 /* fall through - CHECK_STOP and LOAD are not supported yet */
1396 default:
1397 rc = -ENXIO;
1398 }
1399
1400 return rc;
62d9f0db
MT
1401}
1402
b31605c1
DD
1403bool kvm_s390_cmma_enabled(struct kvm *kvm)
1404{
1405 if (!MACHINE_IS_LPAR)
1406 return false;
1407 /* only enable for z10 and later */
1408 if (!MACHINE_HAS_EDAT1)
1409 return false;
1410 if (!kvm->arch.use_cmma)
1411 return false;
1412 return true;
1413}
1414
8ad35755
DH
1415static bool ibs_enabled(struct kvm_vcpu *vcpu)
1416{
1417 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1418}
1419
2c70fe44
CB
1420static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1421{
8ad35755
DH
1422retry:
1423 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1424 /*
1425 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1426 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1427 * This ensures that the ipte instruction for this request has
1428 * already finished. We might race against a second unmapper that
1429 * wants to set the blocking bit. Lets just retry the request loop.
1430 */
8ad35755 1431 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1432 int rc;
1433 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1434 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1435 PAGE_SIZE * 2);
1436 if (rc)
1437 return rc;
8ad35755 1438 goto retry;
2c70fe44 1439 }
8ad35755 1440
d3d692c8
DH
1441 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1442 vcpu->arch.sie_block->ihcpu = 0xffff;
1443 goto retry;
1444 }
1445
8ad35755
DH
1446 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1447 if (!ibs_enabled(vcpu)) {
1448 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1449 atomic_set_mask(CPUSTAT_IBS,
1450 &vcpu->arch.sie_block->cpuflags);
1451 }
1452 goto retry;
2c70fe44 1453 }
8ad35755
DH
1454
1455 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1456 if (ibs_enabled(vcpu)) {
1457 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1458 atomic_clear_mask(CPUSTAT_IBS,
1459 &vcpu->arch.sie_block->cpuflags);
1460 }
1461 goto retry;
1462 }
1463
0759d068
DH
1464 /* nothing to do, just clear the request */
1465 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1466
2c70fe44
CB
1467 return 0;
1468}
1469
fa576c58
TH
1470/**
1471 * kvm_arch_fault_in_page - fault-in guest page if necessary
1472 * @vcpu: The corresponding virtual cpu
1473 * @gpa: Guest physical address
1474 * @writable: Whether the page should be writable or not
1475 *
1476 * Make sure that a guest page has been faulted-in on the host.
1477 *
1478 * Return: Zero on success, negative error code otherwise.
1479 */
1480long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1481{
527e30b4
MS
1482 return gmap_fault(vcpu->arch.gmap, gpa,
1483 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1484}
1485
3c038e6b
DD
1486static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1487 unsigned long token)
1488{
1489 struct kvm_s390_interrupt inti;
383d0b05 1490 struct kvm_s390_irq irq;
3c038e6b
DD
1491
1492 if (start_token) {
383d0b05
JF
1493 irq.u.ext.ext_params2 = token;
1494 irq.type = KVM_S390_INT_PFAULT_INIT;
1495 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1496 } else {
1497 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1498 inti.parm64 = token;
3c038e6b
DD
1499 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1500 }
1501}
1502
1503void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1504 struct kvm_async_pf *work)
1505{
1506 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1507 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1508}
1509
1510void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1511 struct kvm_async_pf *work)
1512{
1513 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1514 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1515}
1516
1517void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1518 struct kvm_async_pf *work)
1519{
1520 /* s390 will always inject the page directly */
1521}
1522
1523bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1524{
1525 /*
1526 * s390 will always inject the page directly,
1527 * but we still want check_async_completion to cleanup
1528 */
1529 return true;
1530}
1531
1532static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1533{
1534 hva_t hva;
1535 struct kvm_arch_async_pf arch;
1536 int rc;
1537
1538 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1539 return 0;
1540 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1541 vcpu->arch.pfault_compare)
1542 return 0;
1543 if (psw_extint_disabled(vcpu))
1544 return 0;
9a022067 1545 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1546 return 0;
1547 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1548 return 0;
1549 if (!vcpu->arch.gmap->pfault_enabled)
1550 return 0;
1551
81480cc1
HC
1552 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1553 hva += current->thread.gmap_addr & ~PAGE_MASK;
1554 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1555 return 0;
1556
1557 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1558 return rc;
1559}
1560
3fb4c40f 1561static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1562{
3fb4c40f 1563 int rc, cpuflags;
e168bf8d 1564
3c038e6b
DD
1565 /*
1566 * On s390 notifications for arriving pages will be delivered directly
1567 * to the guest but the house keeping for completed pfaults is
1568 * handled outside the worker.
1569 */
1570 kvm_check_async_pf_completion(vcpu);
1571
5a32c1af 1572 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1573
1574 if (need_resched())
1575 schedule();
1576
d3a73acb 1577 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1578 s390_handle_mcck();
1579
79395031
JF
1580 if (!kvm_is_ucontrol(vcpu->kvm)) {
1581 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1582 if (rc)
1583 return rc;
1584 }
0ff31867 1585
2c70fe44
CB
1586 rc = kvm_s390_handle_requests(vcpu);
1587 if (rc)
1588 return rc;
1589
27291e21
DH
1590 if (guestdbg_enabled(vcpu)) {
1591 kvm_s390_backup_guest_per_regs(vcpu);
1592 kvm_s390_patch_guest_per_regs(vcpu);
1593 }
1594
b0c632db 1595 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1596 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1597 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1598 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1599
3fb4c40f
TH
1600 return 0;
1601}
1602
1603static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1604{
24eb3a82 1605 int rc = -1;
2b29a9fd
DD
1606
1607 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1608 vcpu->arch.sie_block->icptcode);
1609 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1610
27291e21
DH
1611 if (guestdbg_enabled(vcpu))
1612 kvm_s390_restore_guest_per_regs(vcpu);
1613
3fb4c40f 1614 if (exit_reason >= 0) {
7c470539 1615 rc = 0;
210b1607
TH
1616 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1617 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1618 vcpu->run->s390_ucontrol.trans_exc_code =
1619 current->thread.gmap_addr;
1620 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1621 rc = -EREMOTE;
24eb3a82
DD
1622
1623 } else if (current->thread.gmap_pfault) {
3c038e6b 1624 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1625 current->thread.gmap_pfault = 0;
fa576c58 1626 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1627 rc = 0;
fa576c58
TH
1628 } else {
1629 gpa_t gpa = current->thread.gmap_addr;
1630 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1631 }
24eb3a82
DD
1632 }
1633
1634 if (rc == -1) {
699bde3b
CB
1635 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1636 trace_kvm_s390_sie_fault(vcpu);
1637 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1638 }
b0c632db 1639
5a32c1af 1640 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1641
a76ccff6
TH
1642 if (rc == 0) {
1643 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1644 /* Don't exit for host interrupts. */
1645 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1646 else
1647 rc = kvm_handle_sie_intercept(vcpu);
1648 }
1649
3fb4c40f
TH
1650 return rc;
1651}
1652
1653static int __vcpu_run(struct kvm_vcpu *vcpu)
1654{
1655 int rc, exit_reason;
1656
800c1065
TH
1657 /*
1658 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1659 * ning the guest), so that memslots (and other stuff) are protected
1660 */
1661 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1662
a76ccff6
TH
1663 do {
1664 rc = vcpu_pre_run(vcpu);
1665 if (rc)
1666 break;
3fb4c40f 1667
800c1065 1668 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1669 /*
1670 * As PF_VCPU will be used in fault handler, between
1671 * guest_enter and guest_exit should be no uaccess.
1672 */
1673 preempt_disable();
1674 kvm_guest_enter();
1675 preempt_enable();
1676 exit_reason = sie64a(vcpu->arch.sie_block,
1677 vcpu->run->s.regs.gprs);
1678 kvm_guest_exit();
800c1065 1679 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1680
1681 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1682 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1683
800c1065 1684 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1685 return rc;
b0c632db
HC
1686}
1687
b028ee3e
DH
1688static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1689{
1690 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1691 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1692 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1693 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1694 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1695 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1696 /* some control register changes require a tlb flush */
1697 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1698 }
1699 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1700 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1701 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1702 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1703 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1704 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1705 }
1706 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1707 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1708 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1709 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
1710 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1711 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
1712 }
1713 kvm_run->kvm_dirty_regs = 0;
1714}
1715
1716static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1717{
1718 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1719 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1720 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1721 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1722 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1723 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1724 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1725 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1726 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1727 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1728 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1729 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1730}
1731
b0c632db
HC
1732int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1733{
8f2abe6a 1734 int rc;
b0c632db
HC
1735 sigset_t sigsaved;
1736
27291e21
DH
1737 if (guestdbg_exit_pending(vcpu)) {
1738 kvm_s390_prepare_debug_exit(vcpu);
1739 return 0;
1740 }
1741
b0c632db
HC
1742 if (vcpu->sigset_active)
1743 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1744
6352e4d2
DH
1745 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1746 kvm_s390_vcpu_start(vcpu);
1747 } else if (is_vcpu_stopped(vcpu)) {
1748 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1749 vcpu->vcpu_id);
1750 return -EINVAL;
1751 }
b0c632db 1752
b028ee3e 1753 sync_regs(vcpu, kvm_run);
d7b0b5eb 1754
dab4079d 1755 might_fault();
a76ccff6 1756 rc = __vcpu_run(vcpu);
9ace903d 1757
b1d16c49
CE
1758 if (signal_pending(current) && !rc) {
1759 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1760 rc = -EINTR;
b1d16c49 1761 }
8f2abe6a 1762
27291e21
DH
1763 if (guestdbg_exit_pending(vcpu) && !rc) {
1764 kvm_s390_prepare_debug_exit(vcpu);
1765 rc = 0;
1766 }
1767
b8e660b8 1768 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1769 /* intercept cannot be handled in-kernel, prepare kvm-run */
1770 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1771 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1772 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1773 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1774 rc = 0;
1775 }
1776
1777 if (rc == -EREMOTE) {
1778 /* intercept was handled, but userspace support is needed
1779 * kvm_run has been prepared by the handler */
1780 rc = 0;
1781 }
b0c632db 1782
b028ee3e 1783 store_regs(vcpu, kvm_run);
d7b0b5eb 1784
b0c632db
HC
1785 if (vcpu->sigset_active)
1786 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1787
b0c632db 1788 vcpu->stat.exit_userspace++;
7e8e6ab4 1789 return rc;
b0c632db
HC
1790}
1791
b0c632db
HC
1792/*
1793 * store status at address
1794 * we use have two special cases:
1795 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1796 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1797 */
d0bce605 1798int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1799{
092670cd 1800 unsigned char archmode = 1;
fda902cb 1801 unsigned int px;
178bd789 1802 u64 clkcomp;
d0bce605 1803 int rc;
b0c632db 1804
d0bce605
HC
1805 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1806 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1807 return -EFAULT;
d0bce605
HC
1808 gpa = SAVE_AREA_BASE;
1809 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1810 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1811 return -EFAULT;
d0bce605
HC
1812 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1813 }
1814 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1815 vcpu->arch.guest_fpregs.fprs, 128);
1816 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1817 vcpu->run->s.regs.gprs, 128);
1818 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1819 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1820 px = kvm_s390_get_prefix(vcpu);
d0bce605 1821 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1822 &px, 4);
d0bce605
HC
1823 rc |= write_guest_abs(vcpu,
1824 gpa + offsetof(struct save_area, fp_ctrl_reg),
1825 &vcpu->arch.guest_fpregs.fpc, 4);
1826 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1827 &vcpu->arch.sie_block->todpr, 4);
1828 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1829 &vcpu->arch.sie_block->cputm, 8);
178bd789 1830 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1831 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1832 &clkcomp, 8);
1833 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1834 &vcpu->run->s.regs.acrs, 64);
1835 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1836 &vcpu->arch.sie_block->gcr, 128);
1837 return rc ? -EFAULT : 0;
b0c632db
HC
1838}
1839
e879892c
TH
1840int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1841{
1842 /*
1843 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1844 * copying in vcpu load/put. Lets update our copies before we save
1845 * it into the save area
1846 */
1847 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1848 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1849 save_access_regs(vcpu->run->s.regs.acrs);
1850
1851 return kvm_s390_store_status_unloaded(vcpu, addr);
1852}
1853
8ad35755
DH
1854static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1855{
1856 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1857 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1858 exit_sie_sync(vcpu);
1859}
1860
1861static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1862{
1863 unsigned int i;
1864 struct kvm_vcpu *vcpu;
1865
1866 kvm_for_each_vcpu(i, vcpu, kvm) {
1867 __disable_ibs_on_vcpu(vcpu);
1868 }
1869}
1870
1871static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1872{
1873 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
1874 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
1875 exit_sie_sync(vcpu);
1876}
1877
6852d7b6
DH
1878void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
1879{
8ad35755
DH
1880 int i, online_vcpus, started_vcpus = 0;
1881
1882 if (!is_vcpu_stopped(vcpu))
1883 return;
1884
6852d7b6 1885 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 1886 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1887 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1888 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1889
1890 for (i = 0; i < online_vcpus; i++) {
1891 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
1892 started_vcpus++;
1893 }
1894
1895 if (started_vcpus == 0) {
1896 /* we're the only active VCPU -> speed it up */
1897 __enable_ibs_on_vcpu(vcpu);
1898 } else if (started_vcpus == 1) {
1899 /*
1900 * As we are starting a second VCPU, we have to disable
1901 * the IBS facility on all VCPUs to remove potentially
1902 * oustanding ENABLE requests.
1903 */
1904 __disable_ibs_on_all_vcpus(vcpu->kvm);
1905 }
1906
6852d7b6 1907 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1908 /*
1909 * Another VCPU might have used IBS while we were offline.
1910 * Let's play safe and flush the VCPU at startup.
1911 */
d3d692c8 1912 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 1913 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1914 return;
6852d7b6
DH
1915}
1916
1917void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
1918{
8ad35755
DH
1919 int i, online_vcpus, started_vcpus = 0;
1920 struct kvm_vcpu *started_vcpu = NULL;
1921
1922 if (is_vcpu_stopped(vcpu))
1923 return;
1924
6852d7b6 1925 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 1926 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 1927 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
1928 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
1929
32f5ff63 1930 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 1931 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 1932
6cddd432 1933 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
1934 __disable_ibs_on_vcpu(vcpu);
1935
1936 for (i = 0; i < online_vcpus; i++) {
1937 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
1938 started_vcpus++;
1939 started_vcpu = vcpu->kvm->vcpus[i];
1940 }
1941 }
1942
1943 if (started_vcpus == 1) {
1944 /*
1945 * As we only have one VCPU left, we want to enable the
1946 * IBS facility for that VCPU to speed it up.
1947 */
1948 __enable_ibs_on_vcpu(started_vcpu);
1949 }
1950
433b9ee4 1951 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 1952 return;
6852d7b6
DH
1953}
1954
d6712df9
CH
1955static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
1956 struct kvm_enable_cap *cap)
1957{
1958 int r;
1959
1960 if (cap->flags)
1961 return -EINVAL;
1962
1963 switch (cap->cap) {
fa6b7fe9
CH
1964 case KVM_CAP_S390_CSS_SUPPORT:
1965 if (!vcpu->kvm->arch.css_support) {
1966 vcpu->kvm->arch.css_support = 1;
1967 trace_kvm_s390_enable_css(vcpu->kvm);
1968 }
1969 r = 0;
1970 break;
d6712df9
CH
1971 default:
1972 r = -EINVAL;
1973 break;
1974 }
1975 return r;
1976}
1977
b0c632db
HC
1978long kvm_arch_vcpu_ioctl(struct file *filp,
1979 unsigned int ioctl, unsigned long arg)
1980{
1981 struct kvm_vcpu *vcpu = filp->private_data;
1982 void __user *argp = (void __user *)arg;
800c1065 1983 int idx;
bc923cc9 1984 long r;
b0c632db 1985
93736624
AK
1986 switch (ioctl) {
1987 case KVM_S390_INTERRUPT: {
ba5c1e9b 1988 struct kvm_s390_interrupt s390int;
383d0b05 1989 struct kvm_s390_irq s390irq;
ba5c1e9b 1990
93736624 1991 r = -EFAULT;
ba5c1e9b 1992 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 1993 break;
383d0b05
JF
1994 if (s390int_to_s390irq(&s390int, &s390irq))
1995 return -EINVAL;
1996 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 1997 break;
ba5c1e9b 1998 }
b0c632db 1999 case KVM_S390_STORE_STATUS:
800c1065 2000 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2001 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2002 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2003 break;
b0c632db
HC
2004 case KVM_S390_SET_INITIAL_PSW: {
2005 psw_t psw;
2006
bc923cc9 2007 r = -EFAULT;
b0c632db 2008 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2009 break;
2010 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2011 break;
b0c632db
HC
2012 }
2013 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2014 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2015 break;
14eebd91
CO
2016 case KVM_SET_ONE_REG:
2017 case KVM_GET_ONE_REG: {
2018 struct kvm_one_reg reg;
2019 r = -EFAULT;
2020 if (copy_from_user(&reg, argp, sizeof(reg)))
2021 break;
2022 if (ioctl == KVM_SET_ONE_REG)
2023 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2024 else
2025 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2026 break;
2027 }
27e0393f
CO
2028#ifdef CONFIG_KVM_S390_UCONTROL
2029 case KVM_S390_UCAS_MAP: {
2030 struct kvm_s390_ucas_mapping ucasmap;
2031
2032 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2033 r = -EFAULT;
2034 break;
2035 }
2036
2037 if (!kvm_is_ucontrol(vcpu->kvm)) {
2038 r = -EINVAL;
2039 break;
2040 }
2041
2042 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2043 ucasmap.vcpu_addr, ucasmap.length);
2044 break;
2045 }
2046 case KVM_S390_UCAS_UNMAP: {
2047 struct kvm_s390_ucas_mapping ucasmap;
2048
2049 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2050 r = -EFAULT;
2051 break;
2052 }
2053
2054 if (!kvm_is_ucontrol(vcpu->kvm)) {
2055 r = -EINVAL;
2056 break;
2057 }
2058
2059 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2060 ucasmap.length);
2061 break;
2062 }
2063#endif
ccc7910f 2064 case KVM_S390_VCPU_FAULT: {
527e30b4 2065 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2066 break;
2067 }
d6712df9
CH
2068 case KVM_ENABLE_CAP:
2069 {
2070 struct kvm_enable_cap cap;
2071 r = -EFAULT;
2072 if (copy_from_user(&cap, argp, sizeof(cap)))
2073 break;
2074 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2075 break;
2076 }
b0c632db 2077 default:
3e6afcf1 2078 r = -ENOTTY;
b0c632db 2079 }
bc923cc9 2080 return r;
b0c632db
HC
2081}
2082
5b1c1493
CO
2083int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2084{
2085#ifdef CONFIG_KVM_S390_UCONTROL
2086 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2087 && (kvm_is_ucontrol(vcpu->kvm))) {
2088 vmf->page = virt_to_page(vcpu->arch.sie_block);
2089 get_page(vmf->page);
2090 return 0;
2091 }
2092#endif
2093 return VM_FAULT_SIGBUS;
2094}
2095
5587027c
AK
2096int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2097 unsigned long npages)
db3fe4eb
TY
2098{
2099 return 0;
2100}
2101
b0c632db 2102/* Section: memory related */
f7784b8e
MT
2103int kvm_arch_prepare_memory_region(struct kvm *kvm,
2104 struct kvm_memory_slot *memslot,
7b6195a9
TY
2105 struct kvm_userspace_memory_region *mem,
2106 enum kvm_mr_change change)
b0c632db 2107{
dd2887e7
NW
2108 /* A few sanity checks. We can have memory slots which have to be
2109 located/ended at a segment boundary (1MB). The memory in userland is
2110 ok to be fragmented into various different vmas. It is okay to mmap()
2111 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2112
598841ca 2113 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2114 return -EINVAL;
2115
598841ca 2116 if (mem->memory_size & 0xffffful)
b0c632db
HC
2117 return -EINVAL;
2118
f7784b8e
MT
2119 return 0;
2120}
2121
2122void kvm_arch_commit_memory_region(struct kvm *kvm,
2123 struct kvm_userspace_memory_region *mem,
8482644a
TY
2124 const struct kvm_memory_slot *old,
2125 enum kvm_mr_change change)
f7784b8e 2126{
f7850c92 2127 int rc;
f7784b8e 2128
2cef4deb
CB
2129 /* If the basics of the memslot do not change, we do not want
2130 * to update the gmap. Every update causes several unnecessary
2131 * segment translation exceptions. This is usually handled just
2132 * fine by the normal fault handler + gmap, but it will also
2133 * cause faults on the prefix page of running guest CPUs.
2134 */
2135 if (old->userspace_addr == mem->userspace_addr &&
2136 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2137 old->npages * PAGE_SIZE == mem->memory_size)
2138 return;
598841ca
CO
2139
2140 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2141 mem->guest_phys_addr, mem->memory_size);
2142 if (rc)
f7850c92 2143 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 2144 return;
b0c632db
HC
2145}
2146
b0c632db
HC
2147static int __init kvm_s390_init(void)
2148{
9d8d5786 2149 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2150}
2151
2152static void __exit kvm_s390_exit(void)
2153{
2154 kvm_exit();
2155}
2156
2157module_init(kvm_s390_init);
2158module_exit(kvm_s390_exit);
566af940
CH
2159
2160/*
2161 * Enable autoloading of the kvm module.
2162 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2163 * since x86 takes a different approach.
2164 */
2165#include <linux/miscdevice.h>
2166MODULE_ALIAS_MISCDEV(KVM_MINOR);
2167MODULE_ALIAS("devname:kvm");