]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/kvm-s390.c
Linux 4.0-rc1
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / kvm-s390.c
CommitLineData
b0c632db 1/*
a53c8fab 2 * hosting zSeries kernel virtual machines
b0c632db 3 *
a53c8fab 4 * Copyright IBM Corp. 2008, 2009
b0c632db
HC
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Heiko Carstens <heiko.carstens@de.ibm.com>
628eb9b8 13 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15f36ebd 14 * Jason J. Herne <jjherne@us.ibm.com>
b0c632db
HC
15 */
16
17#include <linux/compiler.h>
18#include <linux/err.h>
19#include <linux/fs.h>
ca872302 20#include <linux/hrtimer.h>
b0c632db
HC
21#include <linux/init.h>
22#include <linux/kvm.h>
23#include <linux/kvm_host.h>
24#include <linux/module.h>
a374e892 25#include <linux/random.h>
b0c632db 26#include <linux/slab.h>
ba5c1e9b 27#include <linux/timer.h>
cbb870c8 28#include <asm/asm-offsets.h>
b0c632db
HC
29#include <asm/lowcore.h>
30#include <asm/pgtable.h>
f5daba1d 31#include <asm/nmi.h>
a0616cde 32#include <asm/switch_to.h>
1526bf9c 33#include <asm/sclp.h>
8f2abe6a 34#include "kvm-s390.h"
b0c632db
HC
35#include "gaccess.h"
36
5786fffa
CH
37#define CREATE_TRACE_POINTS
38#include "trace.h"
ade38c31 39#include "trace-s390.h"
5786fffa 40
b0c632db
HC
41#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
42
43struct kvm_stats_debugfs_item debugfs_entries[] = {
44 { "userspace_handled", VCPU_STAT(exit_userspace) },
0eaeafa1 45 { "exit_null", VCPU_STAT(exit_null) },
8f2abe6a
CB
46 { "exit_validity", VCPU_STAT(exit_validity) },
47 { "exit_stop_request", VCPU_STAT(exit_stop_request) },
48 { "exit_external_request", VCPU_STAT(exit_external_request) },
49 { "exit_external_interrupt", VCPU_STAT(exit_external_interrupt) },
ba5c1e9b
CO
50 { "exit_instruction", VCPU_STAT(exit_instruction) },
51 { "exit_program_interruption", VCPU_STAT(exit_program_interruption) },
52 { "exit_instr_and_program_int", VCPU_STAT(exit_instr_and_program) },
f7819512 53 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
ce2e4f0b 54 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
f5e10b09 55 { "instruction_lctlg", VCPU_STAT(instruction_lctlg) },
ba5c1e9b 56 { "instruction_lctl", VCPU_STAT(instruction_lctl) },
aba07508
DH
57 { "instruction_stctl", VCPU_STAT(instruction_stctl) },
58 { "instruction_stctg", VCPU_STAT(instruction_stctg) },
ba5c1e9b 59 { "deliver_emergency_signal", VCPU_STAT(deliver_emergency_signal) },
7697e71f 60 { "deliver_external_call", VCPU_STAT(deliver_external_call) },
ba5c1e9b
CO
61 { "deliver_service_signal", VCPU_STAT(deliver_service_signal) },
62 { "deliver_virtio_interrupt", VCPU_STAT(deliver_virtio_interrupt) },
63 { "deliver_stop_signal", VCPU_STAT(deliver_stop_signal) },
64 { "deliver_prefix_signal", VCPU_STAT(deliver_prefix_signal) },
65 { "deliver_restart_signal", VCPU_STAT(deliver_restart_signal) },
66 { "deliver_program_interruption", VCPU_STAT(deliver_program_int) },
67 { "exit_wait_state", VCPU_STAT(exit_wait_state) },
69d0d3a3 68 { "instruction_pfmf", VCPU_STAT(instruction_pfmf) },
453423dc
CB
69 { "instruction_stidp", VCPU_STAT(instruction_stidp) },
70 { "instruction_spx", VCPU_STAT(instruction_spx) },
71 { "instruction_stpx", VCPU_STAT(instruction_stpx) },
72 { "instruction_stap", VCPU_STAT(instruction_stap) },
73 { "instruction_storage_key", VCPU_STAT(instruction_storage_key) },
8a242234 74 { "instruction_ipte_interlock", VCPU_STAT(instruction_ipte_interlock) },
453423dc
CB
75 { "instruction_stsch", VCPU_STAT(instruction_stsch) },
76 { "instruction_chsc", VCPU_STAT(instruction_chsc) },
b31288fa 77 { "instruction_essa", VCPU_STAT(instruction_essa) },
453423dc
CB
78 { "instruction_stsi", VCPU_STAT(instruction_stsi) },
79 { "instruction_stfl", VCPU_STAT(instruction_stfl) },
bb25b9ba 80 { "instruction_tprot", VCPU_STAT(instruction_tprot) },
5288fbf0 81 { "instruction_sigp_sense", VCPU_STAT(instruction_sigp_sense) },
bd59d3a4 82 { "instruction_sigp_sense_running", VCPU_STAT(instruction_sigp_sense_running) },
7697e71f 83 { "instruction_sigp_external_call", VCPU_STAT(instruction_sigp_external_call) },
5288fbf0 84 { "instruction_sigp_emergency", VCPU_STAT(instruction_sigp_emergency) },
42cb0c9f
DH
85 { "instruction_sigp_cond_emergency", VCPU_STAT(instruction_sigp_cond_emergency) },
86 { "instruction_sigp_start", VCPU_STAT(instruction_sigp_start) },
5288fbf0 87 { "instruction_sigp_stop", VCPU_STAT(instruction_sigp_stop) },
42cb0c9f
DH
88 { "instruction_sigp_stop_store_status", VCPU_STAT(instruction_sigp_stop_store_status) },
89 { "instruction_sigp_store_status", VCPU_STAT(instruction_sigp_store_status) },
5288fbf0
CB
90 { "instruction_sigp_set_arch", VCPU_STAT(instruction_sigp_arch) },
91 { "instruction_sigp_set_prefix", VCPU_STAT(instruction_sigp_prefix) },
92 { "instruction_sigp_restart", VCPU_STAT(instruction_sigp_restart) },
42cb0c9f
DH
93 { "instruction_sigp_cpu_reset", VCPU_STAT(instruction_sigp_cpu_reset) },
94 { "instruction_sigp_init_cpu_reset", VCPU_STAT(instruction_sigp_init_cpu_reset) },
95 { "instruction_sigp_unknown", VCPU_STAT(instruction_sigp_unknown) },
388186bc 96 { "diagnose_10", VCPU_STAT(diagnose_10) },
e28acfea 97 { "diagnose_44", VCPU_STAT(diagnose_44) },
41628d33 98 { "diagnose_9c", VCPU_STAT(diagnose_9c) },
b0c632db
HC
99 { NULL }
100};
101
9d8d5786
MM
102/* upper facilities limit for kvm */
103unsigned long kvm_s390_fac_list_mask[] = {
104 0xff82fffbf4fc2000UL,
105 0x005c000000000000UL,
106};
b0c632db 107
9d8d5786 108unsigned long kvm_s390_fac_list_mask_size(void)
78c4b59f 109{
9d8d5786
MM
110 BUILD_BUG_ON(ARRAY_SIZE(kvm_s390_fac_list_mask) > S390_ARCH_FAC_MASK_SIZE_U64);
111 return ARRAY_SIZE(kvm_s390_fac_list_mask);
78c4b59f
MM
112}
113
9d8d5786
MM
114static struct gmap_notifier gmap_notifier;
115
b0c632db 116/* Section: not file related */
13a34e06 117int kvm_arch_hardware_enable(void)
b0c632db
HC
118{
119 /* every s390 is virtualization enabled ;-) */
10474ae8 120 return 0;
b0c632db
HC
121}
122
2c70fe44
CB
123static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address);
124
b0c632db
HC
125int kvm_arch_hardware_setup(void)
126{
2c70fe44
CB
127 gmap_notifier.notifier_call = kvm_gmap_notifier;
128 gmap_register_ipte_notifier(&gmap_notifier);
b0c632db
HC
129 return 0;
130}
131
132void kvm_arch_hardware_unsetup(void)
133{
2c70fe44 134 gmap_unregister_ipte_notifier(&gmap_notifier);
b0c632db
HC
135}
136
b0c632db
HC
137int kvm_arch_init(void *opaque)
138{
84877d93
CH
139 /* Register floating interrupt controller interface. */
140 return kvm_register_device_ops(&kvm_flic_ops, KVM_DEV_TYPE_FLIC);
b0c632db
HC
141}
142
b0c632db
HC
143/* Section: device related */
144long kvm_arch_dev_ioctl(struct file *filp,
145 unsigned int ioctl, unsigned long arg)
146{
147 if (ioctl == KVM_S390_ENABLE_SIE)
148 return s390_enable_sie();
149 return -EINVAL;
150}
151
784aa3d7 152int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
b0c632db 153{
d7b0b5eb
CO
154 int r;
155
2bd0ac4e 156 switch (ext) {
d7b0b5eb 157 case KVM_CAP_S390_PSW:
b6cf8788 158 case KVM_CAP_S390_GMAP:
52e16b18 159 case KVM_CAP_SYNC_MMU:
1efd0f59
CO
160#ifdef CONFIG_KVM_S390_UCONTROL
161 case KVM_CAP_S390_UCONTROL:
162#endif
3c038e6b 163 case KVM_CAP_ASYNC_PF:
60b413c9 164 case KVM_CAP_SYNC_REGS:
14eebd91 165 case KVM_CAP_ONE_REG:
d6712df9 166 case KVM_CAP_ENABLE_CAP:
fa6b7fe9 167 case KVM_CAP_S390_CSS_SUPPORT:
ebc32262 168 case KVM_CAP_IRQFD:
10ccaa1e 169 case KVM_CAP_IOEVENTFD:
c05c4186 170 case KVM_CAP_DEVICE_CTRL:
d938dc55 171 case KVM_CAP_ENABLE_CAP_VM:
78599d90 172 case KVM_CAP_S390_IRQCHIP:
f2061656 173 case KVM_CAP_VM_ATTRIBUTES:
6352e4d2 174 case KVM_CAP_MP_STATE:
2444b352 175 case KVM_CAP_S390_USER_SIGP:
d7b0b5eb
CO
176 r = 1;
177 break;
e726b1bd
CB
178 case KVM_CAP_NR_VCPUS:
179 case KVM_CAP_MAX_VCPUS:
180 r = KVM_MAX_VCPUS;
181 break;
e1e2e605
NW
182 case KVM_CAP_NR_MEMSLOTS:
183 r = KVM_USER_MEM_SLOTS;
184 break;
1526bf9c 185 case KVM_CAP_S390_COW:
abf09bed 186 r = MACHINE_HAS_ESOP;
1526bf9c 187 break;
2bd0ac4e 188 default:
d7b0b5eb 189 r = 0;
2bd0ac4e 190 }
d7b0b5eb 191 return r;
b0c632db
HC
192}
193
15f36ebd
JH
194static void kvm_s390_sync_dirty_log(struct kvm *kvm,
195 struct kvm_memory_slot *memslot)
196{
197 gfn_t cur_gfn, last_gfn;
198 unsigned long address;
199 struct gmap *gmap = kvm->arch.gmap;
200
201 down_read(&gmap->mm->mmap_sem);
202 /* Loop over all guest pages */
203 last_gfn = memslot->base_gfn + memslot->npages;
204 for (cur_gfn = memslot->base_gfn; cur_gfn <= last_gfn; cur_gfn++) {
205 address = gfn_to_hva_memslot(memslot, cur_gfn);
206
207 if (gmap_test_and_clear_dirty(address, gmap))
208 mark_page_dirty(kvm, cur_gfn);
209 }
210 up_read(&gmap->mm->mmap_sem);
211}
212
b0c632db
HC
213/* Section: vm related */
214/*
215 * Get (and clear) the dirty memory log for a memory slot.
216 */
217int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
218 struct kvm_dirty_log *log)
219{
15f36ebd
JH
220 int r;
221 unsigned long n;
222 struct kvm_memory_slot *memslot;
223 int is_dirty = 0;
224
225 mutex_lock(&kvm->slots_lock);
226
227 r = -EINVAL;
228 if (log->slot >= KVM_USER_MEM_SLOTS)
229 goto out;
230
231 memslot = id_to_memslot(kvm->memslots, log->slot);
232 r = -ENOENT;
233 if (!memslot->dirty_bitmap)
234 goto out;
235
236 kvm_s390_sync_dirty_log(kvm, memslot);
237 r = kvm_get_dirty_log(kvm, log, &is_dirty);
238 if (r)
239 goto out;
240
241 /* Clear the dirty log */
242 if (is_dirty) {
243 n = kvm_dirty_bitmap_bytes(memslot);
244 memset(memslot->dirty_bitmap, 0, n);
245 }
246 r = 0;
247out:
248 mutex_unlock(&kvm->slots_lock);
249 return r;
b0c632db
HC
250}
251
d938dc55
CH
252static int kvm_vm_ioctl_enable_cap(struct kvm *kvm, struct kvm_enable_cap *cap)
253{
254 int r;
255
256 if (cap->flags)
257 return -EINVAL;
258
259 switch (cap->cap) {
84223598
CH
260 case KVM_CAP_S390_IRQCHIP:
261 kvm->arch.use_irqchip = 1;
262 r = 0;
263 break;
2444b352
DH
264 case KVM_CAP_S390_USER_SIGP:
265 kvm->arch.user_sigp = 1;
266 r = 0;
267 break;
d938dc55
CH
268 default:
269 r = -EINVAL;
270 break;
271 }
272 return r;
273}
274
8c0a7ce6
DD
275static int kvm_s390_get_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
276{
277 int ret;
278
279 switch (attr->attr) {
280 case KVM_S390_VM_MEM_LIMIT_SIZE:
281 ret = 0;
282 if (put_user(kvm->arch.gmap->asce_end, (u64 __user *)attr->addr))
283 ret = -EFAULT;
284 break;
285 default:
286 ret = -ENXIO;
287 break;
288 }
289 return ret;
290}
291
292static int kvm_s390_set_mem_control(struct kvm *kvm, struct kvm_device_attr *attr)
4f718eab
DD
293{
294 int ret;
295 unsigned int idx;
296 switch (attr->attr) {
297 case KVM_S390_VM_MEM_ENABLE_CMMA:
298 ret = -EBUSY;
299 mutex_lock(&kvm->lock);
300 if (atomic_read(&kvm->online_vcpus) == 0) {
301 kvm->arch.use_cmma = 1;
302 ret = 0;
303 }
304 mutex_unlock(&kvm->lock);
305 break;
306 case KVM_S390_VM_MEM_CLR_CMMA:
307 mutex_lock(&kvm->lock);
308 idx = srcu_read_lock(&kvm->srcu);
a13cff31 309 s390_reset_cmma(kvm->arch.gmap->mm);
4f718eab
DD
310 srcu_read_unlock(&kvm->srcu, idx);
311 mutex_unlock(&kvm->lock);
312 ret = 0;
313 break;
8c0a7ce6
DD
314 case KVM_S390_VM_MEM_LIMIT_SIZE: {
315 unsigned long new_limit;
316
317 if (kvm_is_ucontrol(kvm))
318 return -EINVAL;
319
320 if (get_user(new_limit, (u64 __user *)attr->addr))
321 return -EFAULT;
322
323 if (new_limit > kvm->arch.gmap->asce_end)
324 return -E2BIG;
325
326 ret = -EBUSY;
327 mutex_lock(&kvm->lock);
328 if (atomic_read(&kvm->online_vcpus) == 0) {
329 /* gmap_alloc will round the limit up */
330 struct gmap *new = gmap_alloc(current->mm, new_limit);
331
332 if (!new) {
333 ret = -ENOMEM;
334 } else {
335 gmap_free(kvm->arch.gmap);
336 new->private = kvm;
337 kvm->arch.gmap = new;
338 ret = 0;
339 }
340 }
341 mutex_unlock(&kvm->lock);
342 break;
343 }
4f718eab
DD
344 default:
345 ret = -ENXIO;
346 break;
347 }
348 return ret;
349}
350
a374e892
TK
351static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu);
352
353static int kvm_s390_vm_set_crypto(struct kvm *kvm, struct kvm_device_attr *attr)
354{
355 struct kvm_vcpu *vcpu;
356 int i;
357
9d8d5786 358 if (!test_kvm_facility(kvm, 76))
a374e892
TK
359 return -EINVAL;
360
361 mutex_lock(&kvm->lock);
362 switch (attr->attr) {
363 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
364 get_random_bytes(
365 kvm->arch.crypto.crycb->aes_wrapping_key_mask,
366 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
367 kvm->arch.crypto.aes_kw = 1;
368 break;
369 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
370 get_random_bytes(
371 kvm->arch.crypto.crycb->dea_wrapping_key_mask,
372 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
373 kvm->arch.crypto.dea_kw = 1;
374 break;
375 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
376 kvm->arch.crypto.aes_kw = 0;
377 memset(kvm->arch.crypto.crycb->aes_wrapping_key_mask, 0,
378 sizeof(kvm->arch.crypto.crycb->aes_wrapping_key_mask));
379 break;
380 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
381 kvm->arch.crypto.dea_kw = 0;
382 memset(kvm->arch.crypto.crycb->dea_wrapping_key_mask, 0,
383 sizeof(kvm->arch.crypto.crycb->dea_wrapping_key_mask));
384 break;
385 default:
386 mutex_unlock(&kvm->lock);
387 return -ENXIO;
388 }
389
390 kvm_for_each_vcpu(i, vcpu, kvm) {
391 kvm_s390_vcpu_crypto_setup(vcpu);
392 exit_sie(vcpu);
393 }
394 mutex_unlock(&kvm->lock);
395 return 0;
396}
397
72f25020
JH
398static int kvm_s390_set_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
399{
400 u8 gtod_high;
401
402 if (copy_from_user(&gtod_high, (void __user *)attr->addr,
403 sizeof(gtod_high)))
404 return -EFAULT;
405
406 if (gtod_high != 0)
407 return -EINVAL;
408
409 return 0;
410}
411
412static int kvm_s390_set_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
413{
414 struct kvm_vcpu *cur_vcpu;
415 unsigned int vcpu_idx;
416 u64 host_tod, gtod;
417 int r;
418
419 if (copy_from_user(&gtod, (void __user *)attr->addr, sizeof(gtod)))
420 return -EFAULT;
421
422 r = store_tod_clock(&host_tod);
423 if (r)
424 return r;
425
426 mutex_lock(&kvm->lock);
427 kvm->arch.epoch = gtod - host_tod;
428 kvm_for_each_vcpu(vcpu_idx, cur_vcpu, kvm) {
429 cur_vcpu->arch.sie_block->epoch = kvm->arch.epoch;
430 exit_sie(cur_vcpu);
431 }
432 mutex_unlock(&kvm->lock);
433 return 0;
434}
435
436static int kvm_s390_set_tod(struct kvm *kvm, struct kvm_device_attr *attr)
437{
438 int ret;
439
440 if (attr->flags)
441 return -EINVAL;
442
443 switch (attr->attr) {
444 case KVM_S390_VM_TOD_HIGH:
445 ret = kvm_s390_set_tod_high(kvm, attr);
446 break;
447 case KVM_S390_VM_TOD_LOW:
448 ret = kvm_s390_set_tod_low(kvm, attr);
449 break;
450 default:
451 ret = -ENXIO;
452 break;
453 }
454 return ret;
455}
456
457static int kvm_s390_get_tod_high(struct kvm *kvm, struct kvm_device_attr *attr)
458{
459 u8 gtod_high = 0;
460
461 if (copy_to_user((void __user *)attr->addr, &gtod_high,
462 sizeof(gtod_high)))
463 return -EFAULT;
464
465 return 0;
466}
467
468static int kvm_s390_get_tod_low(struct kvm *kvm, struct kvm_device_attr *attr)
469{
470 u64 host_tod, gtod;
471 int r;
472
473 r = store_tod_clock(&host_tod);
474 if (r)
475 return r;
476
477 gtod = host_tod + kvm->arch.epoch;
478 if (copy_to_user((void __user *)attr->addr, &gtod, sizeof(gtod)))
479 return -EFAULT;
480
481 return 0;
482}
483
484static int kvm_s390_get_tod(struct kvm *kvm, struct kvm_device_attr *attr)
485{
486 int ret;
487
488 if (attr->flags)
489 return -EINVAL;
490
491 switch (attr->attr) {
492 case KVM_S390_VM_TOD_HIGH:
493 ret = kvm_s390_get_tod_high(kvm, attr);
494 break;
495 case KVM_S390_VM_TOD_LOW:
496 ret = kvm_s390_get_tod_low(kvm, attr);
497 break;
498 default:
499 ret = -ENXIO;
500 break;
501 }
502 return ret;
503}
504
658b6eda
MM
505static int kvm_s390_set_processor(struct kvm *kvm, struct kvm_device_attr *attr)
506{
507 struct kvm_s390_vm_cpu_processor *proc;
508 int ret = 0;
509
510 mutex_lock(&kvm->lock);
511 if (atomic_read(&kvm->online_vcpus)) {
512 ret = -EBUSY;
513 goto out;
514 }
515 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
516 if (!proc) {
517 ret = -ENOMEM;
518 goto out;
519 }
520 if (!copy_from_user(proc, (void __user *)attr->addr,
521 sizeof(*proc))) {
522 memcpy(&kvm->arch.model.cpu_id, &proc->cpuid,
523 sizeof(struct cpuid));
524 kvm->arch.model.ibc = proc->ibc;
525 memcpy(kvm->arch.model.fac->kvm, proc->fac_list,
526 S390_ARCH_FAC_LIST_SIZE_BYTE);
527 } else
528 ret = -EFAULT;
529 kfree(proc);
530out:
531 mutex_unlock(&kvm->lock);
532 return ret;
533}
534
535static int kvm_s390_set_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
536{
537 int ret = -ENXIO;
538
539 switch (attr->attr) {
540 case KVM_S390_VM_CPU_PROCESSOR:
541 ret = kvm_s390_set_processor(kvm, attr);
542 break;
543 }
544 return ret;
545}
546
547static int kvm_s390_get_processor(struct kvm *kvm, struct kvm_device_attr *attr)
548{
549 struct kvm_s390_vm_cpu_processor *proc;
550 int ret = 0;
551
552 proc = kzalloc(sizeof(*proc), GFP_KERNEL);
553 if (!proc) {
554 ret = -ENOMEM;
555 goto out;
556 }
557 memcpy(&proc->cpuid, &kvm->arch.model.cpu_id, sizeof(struct cpuid));
558 proc->ibc = kvm->arch.model.ibc;
559 memcpy(&proc->fac_list, kvm->arch.model.fac->kvm, S390_ARCH_FAC_LIST_SIZE_BYTE);
560 if (copy_to_user((void __user *)attr->addr, proc, sizeof(*proc)))
561 ret = -EFAULT;
562 kfree(proc);
563out:
564 return ret;
565}
566
567static int kvm_s390_get_machine(struct kvm *kvm, struct kvm_device_attr *attr)
568{
569 struct kvm_s390_vm_cpu_machine *mach;
570 int ret = 0;
571
572 mach = kzalloc(sizeof(*mach), GFP_KERNEL);
573 if (!mach) {
574 ret = -ENOMEM;
575 goto out;
576 }
577 get_cpu_id((struct cpuid *) &mach->cpuid);
578 mach->ibc = sclp_get_ibc();
579 memcpy(&mach->fac_mask, kvm_s390_fac_list_mask,
580 kvm_s390_fac_list_mask_size() * sizeof(u64));
581 memcpy((unsigned long *)&mach->fac_list, S390_lowcore.stfle_fac_list,
582 S390_ARCH_FAC_LIST_SIZE_U64);
583 if (copy_to_user((void __user *)attr->addr, mach, sizeof(*mach)))
584 ret = -EFAULT;
585 kfree(mach);
586out:
587 return ret;
588}
589
590static int kvm_s390_get_cpu_model(struct kvm *kvm, struct kvm_device_attr *attr)
591{
592 int ret = -ENXIO;
593
594 switch (attr->attr) {
595 case KVM_S390_VM_CPU_PROCESSOR:
596 ret = kvm_s390_get_processor(kvm, attr);
597 break;
598 case KVM_S390_VM_CPU_MACHINE:
599 ret = kvm_s390_get_machine(kvm, attr);
600 break;
601 }
602 return ret;
603}
604
f2061656
DD
605static int kvm_s390_vm_set_attr(struct kvm *kvm, struct kvm_device_attr *attr)
606{
607 int ret;
608
609 switch (attr->group) {
4f718eab 610 case KVM_S390_VM_MEM_CTRL:
8c0a7ce6 611 ret = kvm_s390_set_mem_control(kvm, attr);
4f718eab 612 break;
72f25020
JH
613 case KVM_S390_VM_TOD:
614 ret = kvm_s390_set_tod(kvm, attr);
615 break;
658b6eda
MM
616 case KVM_S390_VM_CPU_MODEL:
617 ret = kvm_s390_set_cpu_model(kvm, attr);
618 break;
a374e892
TK
619 case KVM_S390_VM_CRYPTO:
620 ret = kvm_s390_vm_set_crypto(kvm, attr);
621 break;
f2061656
DD
622 default:
623 ret = -ENXIO;
624 break;
625 }
626
627 return ret;
628}
629
630static int kvm_s390_vm_get_attr(struct kvm *kvm, struct kvm_device_attr *attr)
631{
8c0a7ce6
DD
632 int ret;
633
634 switch (attr->group) {
635 case KVM_S390_VM_MEM_CTRL:
636 ret = kvm_s390_get_mem_control(kvm, attr);
637 break;
72f25020
JH
638 case KVM_S390_VM_TOD:
639 ret = kvm_s390_get_tod(kvm, attr);
640 break;
658b6eda
MM
641 case KVM_S390_VM_CPU_MODEL:
642 ret = kvm_s390_get_cpu_model(kvm, attr);
643 break;
8c0a7ce6
DD
644 default:
645 ret = -ENXIO;
646 break;
647 }
648
649 return ret;
f2061656
DD
650}
651
652static int kvm_s390_vm_has_attr(struct kvm *kvm, struct kvm_device_attr *attr)
653{
654 int ret;
655
656 switch (attr->group) {
4f718eab
DD
657 case KVM_S390_VM_MEM_CTRL:
658 switch (attr->attr) {
659 case KVM_S390_VM_MEM_ENABLE_CMMA:
660 case KVM_S390_VM_MEM_CLR_CMMA:
8c0a7ce6 661 case KVM_S390_VM_MEM_LIMIT_SIZE:
4f718eab
DD
662 ret = 0;
663 break;
664 default:
665 ret = -ENXIO;
666 break;
667 }
668 break;
72f25020
JH
669 case KVM_S390_VM_TOD:
670 switch (attr->attr) {
671 case KVM_S390_VM_TOD_LOW:
672 case KVM_S390_VM_TOD_HIGH:
673 ret = 0;
674 break;
675 default:
676 ret = -ENXIO;
677 break;
678 }
679 break;
658b6eda
MM
680 case KVM_S390_VM_CPU_MODEL:
681 switch (attr->attr) {
682 case KVM_S390_VM_CPU_PROCESSOR:
683 case KVM_S390_VM_CPU_MACHINE:
684 ret = 0;
685 break;
686 default:
687 ret = -ENXIO;
688 break;
689 }
690 break;
a374e892
TK
691 case KVM_S390_VM_CRYPTO:
692 switch (attr->attr) {
693 case KVM_S390_VM_CRYPTO_ENABLE_AES_KW:
694 case KVM_S390_VM_CRYPTO_ENABLE_DEA_KW:
695 case KVM_S390_VM_CRYPTO_DISABLE_AES_KW:
696 case KVM_S390_VM_CRYPTO_DISABLE_DEA_KW:
697 ret = 0;
698 break;
699 default:
700 ret = -ENXIO;
701 break;
702 }
703 break;
f2061656
DD
704 default:
705 ret = -ENXIO;
706 break;
707 }
708
709 return ret;
710}
711
b0c632db
HC
712long kvm_arch_vm_ioctl(struct file *filp,
713 unsigned int ioctl, unsigned long arg)
714{
715 struct kvm *kvm = filp->private_data;
716 void __user *argp = (void __user *)arg;
f2061656 717 struct kvm_device_attr attr;
b0c632db
HC
718 int r;
719
720 switch (ioctl) {
ba5c1e9b
CO
721 case KVM_S390_INTERRUPT: {
722 struct kvm_s390_interrupt s390int;
723
724 r = -EFAULT;
725 if (copy_from_user(&s390int, argp, sizeof(s390int)))
726 break;
727 r = kvm_s390_inject_vm(kvm, &s390int);
728 break;
729 }
d938dc55
CH
730 case KVM_ENABLE_CAP: {
731 struct kvm_enable_cap cap;
732 r = -EFAULT;
733 if (copy_from_user(&cap, argp, sizeof(cap)))
734 break;
735 r = kvm_vm_ioctl_enable_cap(kvm, &cap);
736 break;
737 }
84223598
CH
738 case KVM_CREATE_IRQCHIP: {
739 struct kvm_irq_routing_entry routing;
740
741 r = -EINVAL;
742 if (kvm->arch.use_irqchip) {
743 /* Set up dummy routing. */
744 memset(&routing, 0, sizeof(routing));
745 kvm_set_irq_routing(kvm, &routing, 0, 0);
746 r = 0;
747 }
748 break;
749 }
f2061656
DD
750 case KVM_SET_DEVICE_ATTR: {
751 r = -EFAULT;
752 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
753 break;
754 r = kvm_s390_vm_set_attr(kvm, &attr);
755 break;
756 }
757 case KVM_GET_DEVICE_ATTR: {
758 r = -EFAULT;
759 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
760 break;
761 r = kvm_s390_vm_get_attr(kvm, &attr);
762 break;
763 }
764 case KVM_HAS_DEVICE_ATTR: {
765 r = -EFAULT;
766 if (copy_from_user(&attr, (void __user *)arg, sizeof(attr)))
767 break;
768 r = kvm_s390_vm_has_attr(kvm, &attr);
769 break;
770 }
b0c632db 771 default:
367e1319 772 r = -ENOTTY;
b0c632db
HC
773 }
774
775 return r;
776}
777
45c9b47c
TK
778static int kvm_s390_query_ap_config(u8 *config)
779{
780 u32 fcn_code = 0x04000000UL;
781 u32 cc;
782
783 asm volatile(
784 "lgr 0,%1\n"
785 "lgr 2,%2\n"
786 ".long 0xb2af0000\n" /* PQAP(QCI) */
787 "ipm %0\n"
788 "srl %0,28\n"
789 : "=r" (cc)
790 : "r" (fcn_code), "r" (config)
791 : "cc", "0", "2", "memory"
792 );
793
794 return cc;
795}
796
797static int kvm_s390_apxa_installed(void)
798{
799 u8 config[128];
800 int cc;
801
802 if (test_facility(2) && test_facility(12)) {
803 cc = kvm_s390_query_ap_config(config);
804
805 if (cc)
806 pr_err("PQAP(QCI) failed with cc=%d", cc);
807 else
808 return config[0] & 0x40;
809 }
810
811 return 0;
812}
813
814static void kvm_s390_set_crycb_format(struct kvm *kvm)
815{
816 kvm->arch.crypto.crycbd = (__u32)(unsigned long) kvm->arch.crypto.crycb;
817
818 if (kvm_s390_apxa_installed())
819 kvm->arch.crypto.crycbd |= CRYCB_FORMAT2;
820 else
821 kvm->arch.crypto.crycbd |= CRYCB_FORMAT1;
822}
823
9d8d5786
MM
824static void kvm_s390_get_cpu_id(struct cpuid *cpu_id)
825{
826 get_cpu_id(cpu_id);
827 cpu_id->version = 0xff;
828}
829
5102ee87
TK
830static int kvm_s390_crypto_init(struct kvm *kvm)
831{
9d8d5786 832 if (!test_kvm_facility(kvm, 76))
5102ee87
TK
833 return 0;
834
835 kvm->arch.crypto.crycb = kzalloc(sizeof(*kvm->arch.crypto.crycb),
836 GFP_KERNEL | GFP_DMA);
837 if (!kvm->arch.crypto.crycb)
838 return -ENOMEM;
839
45c9b47c 840 kvm_s390_set_crycb_format(kvm);
5102ee87 841
a374e892
TK
842 /* Disable AES/DEA protected key functions by default */
843 kvm->arch.crypto.aes_kw = 0;
844 kvm->arch.crypto.dea_kw = 0;
845
5102ee87
TK
846 return 0;
847}
848
e08b9637 849int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
b0c632db 850{
9d8d5786 851 int i, rc;
b0c632db 852 char debug_name[16];
f6c137ff 853 static unsigned long sca_offset;
b0c632db 854
e08b9637
CO
855 rc = -EINVAL;
856#ifdef CONFIG_KVM_S390_UCONTROL
857 if (type & ~KVM_VM_S390_UCONTROL)
858 goto out_err;
859 if ((type & KVM_VM_S390_UCONTROL) && (!capable(CAP_SYS_ADMIN)))
860 goto out_err;
861#else
862 if (type)
863 goto out_err;
864#endif
865
b0c632db
HC
866 rc = s390_enable_sie();
867 if (rc)
d89f5eff 868 goto out_err;
b0c632db 869
b290411a
CO
870 rc = -ENOMEM;
871
b0c632db
HC
872 kvm->arch.sca = (struct sca_block *) get_zeroed_page(GFP_KERNEL);
873 if (!kvm->arch.sca)
d89f5eff 874 goto out_err;
f6c137ff
CB
875 spin_lock(&kvm_lock);
876 sca_offset = (sca_offset + 16) & 0x7f0;
877 kvm->arch.sca = (struct sca_block *) ((char *) kvm->arch.sca + sca_offset);
878 spin_unlock(&kvm_lock);
b0c632db
HC
879
880 sprintf(debug_name, "kvm-%u", current->pid);
881
882 kvm->arch.dbf = debug_register(debug_name, 8, 2, 8 * sizeof(long));
883 if (!kvm->arch.dbf)
884 goto out_nodbf;
885
9d8d5786
MM
886 /*
887 * The architectural maximum amount of facilities is 16 kbit. To store
888 * this amount, 2 kbyte of memory is required. Thus we need a full
889 * page to hold the active copy (arch.model.fac->sie) and the current
890 * facilities set (arch.model.fac->kvm). Its address size has to be
891 * 31 bits and word aligned.
892 */
893 kvm->arch.model.fac =
894 (struct s390_model_fac *) get_zeroed_page(GFP_KERNEL | GFP_DMA);
895 if (!kvm->arch.model.fac)
896 goto out_nofac;
897
898 memcpy(kvm->arch.model.fac->kvm, S390_lowcore.stfle_fac_list,
899 S390_ARCH_FAC_LIST_SIZE_U64);
900
658b6eda
MM
901 /*
902 * If this KVM host runs *not* in a LPAR, relax the facility bits
903 * of the kvm facility mask by all missing facilities. This will allow
904 * to determine the right CPU model by means of the remaining facilities.
905 * Live guest migration must prohibit the migration of KVMs running in
906 * a LPAR to non LPAR hosts.
907 */
908 if (!MACHINE_IS_LPAR)
909 for (i = 0; i < kvm_s390_fac_list_mask_size(); i++)
910 kvm_s390_fac_list_mask[i] &= kvm->arch.model.fac->kvm[i];
911
9d8d5786
MM
912 /*
913 * Apply the kvm facility mask to limit the kvm supported/tolerated
914 * facility list.
915 */
916 for (i = 0; i < S390_ARCH_FAC_LIST_SIZE_U64; i++) {
917 if (i < kvm_s390_fac_list_mask_size())
918 kvm->arch.model.fac->kvm[i] &= kvm_s390_fac_list_mask[i];
919 else
920 kvm->arch.model.fac->kvm[i] = 0UL;
921 }
922
923 kvm_s390_get_cpu_id(&kvm->arch.model.cpu_id);
658b6eda 924 kvm->arch.model.ibc = sclp_get_ibc() & 0x0fff;
9d8d5786 925
5102ee87
TK
926 if (kvm_s390_crypto_init(kvm) < 0)
927 goto out_crypto;
928
ba5c1e9b
CO
929 spin_lock_init(&kvm->arch.float_int.lock);
930 INIT_LIST_HEAD(&kvm->arch.float_int.list);
8a242234 931 init_waitqueue_head(&kvm->arch.ipte_wq);
a6b7e459 932 mutex_init(&kvm->arch.ipte_mutex);
ba5c1e9b 933
b0c632db
HC
934 debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
935 VM_EVENT(kvm, 3, "%s", "vm created");
936
e08b9637
CO
937 if (type & KVM_VM_S390_UCONTROL) {
938 kvm->arch.gmap = NULL;
939 } else {
0349985a 940 kvm->arch.gmap = gmap_alloc(current->mm, (1UL << 44) - 1);
e08b9637
CO
941 if (!kvm->arch.gmap)
942 goto out_nogmap;
2c70fe44 943 kvm->arch.gmap->private = kvm;
24eb3a82 944 kvm->arch.gmap->pfault_enabled = 0;
e08b9637 945 }
fa6b7fe9
CH
946
947 kvm->arch.css_support = 0;
84223598 948 kvm->arch.use_irqchip = 0;
72f25020 949 kvm->arch.epoch = 0;
fa6b7fe9 950
8ad35755
DH
951 spin_lock_init(&kvm->arch.start_stop_lock);
952
d89f5eff 953 return 0;
598841ca 954out_nogmap:
5102ee87
TK
955 kfree(kvm->arch.crypto.crycb);
956out_crypto:
9d8d5786
MM
957 free_page((unsigned long)kvm->arch.model.fac);
958out_nofac:
598841ca 959 debug_unregister(kvm->arch.dbf);
b0c632db
HC
960out_nodbf:
961 free_page((unsigned long)(kvm->arch.sca));
d89f5eff
JK
962out_err:
963 return rc;
b0c632db
HC
964}
965
d329c035
CB
966void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
967{
968 VCPU_EVENT(vcpu, 3, "%s", "free cpu");
ade38c31 969 trace_kvm_s390_destroy_vcpu(vcpu->vcpu_id);
67335e63 970 kvm_s390_clear_local_irqs(vcpu);
3c038e6b 971 kvm_clear_async_pf_completion_queue(vcpu);
58f9460b
CO
972 if (!kvm_is_ucontrol(vcpu->kvm)) {
973 clear_bit(63 - vcpu->vcpu_id,
974 (unsigned long *) &vcpu->kvm->arch.sca->mcn);
975 if (vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda ==
976 (__u64) vcpu->arch.sie_block)
977 vcpu->kvm->arch.sca->cpu[vcpu->vcpu_id].sda = 0;
978 }
abf4a71e 979 smp_mb();
27e0393f
CO
980
981 if (kvm_is_ucontrol(vcpu->kvm))
982 gmap_free(vcpu->arch.gmap);
983
b31605c1
DD
984 if (kvm_s390_cmma_enabled(vcpu->kvm))
985 kvm_s390_vcpu_unsetup_cmma(vcpu);
d329c035 986 free_page((unsigned long)(vcpu->arch.sie_block));
b31288fa 987
6692cef3 988 kvm_vcpu_uninit(vcpu);
b110feaf 989 kmem_cache_free(kvm_vcpu_cache, vcpu);
d329c035
CB
990}
991
992static void kvm_free_vcpus(struct kvm *kvm)
993{
994 unsigned int i;
988a2cae 995 struct kvm_vcpu *vcpu;
d329c035 996
988a2cae
GN
997 kvm_for_each_vcpu(i, vcpu, kvm)
998 kvm_arch_vcpu_destroy(vcpu);
999
1000 mutex_lock(&kvm->lock);
1001 for (i = 0; i < atomic_read(&kvm->online_vcpus); i++)
1002 kvm->vcpus[i] = NULL;
1003
1004 atomic_set(&kvm->online_vcpus, 0);
1005 mutex_unlock(&kvm->lock);
d329c035
CB
1006}
1007
b0c632db
HC
1008void kvm_arch_destroy_vm(struct kvm *kvm)
1009{
d329c035 1010 kvm_free_vcpus(kvm);
9d8d5786 1011 free_page((unsigned long)kvm->arch.model.fac);
b0c632db 1012 free_page((unsigned long)(kvm->arch.sca));
d329c035 1013 debug_unregister(kvm->arch.dbf);
5102ee87 1014 kfree(kvm->arch.crypto.crycb);
27e0393f
CO
1015 if (!kvm_is_ucontrol(kvm))
1016 gmap_free(kvm->arch.gmap);
841b91c5 1017 kvm_s390_destroy_adapters(kvm);
67335e63 1018 kvm_s390_clear_float_irqs(kvm);
b0c632db
HC
1019}
1020
1021/* Section: vcpu related */
dafd032a
DD
1022static int __kvm_ucontrol_vcpu_init(struct kvm_vcpu *vcpu)
1023{
1024 vcpu->arch.gmap = gmap_alloc(current->mm, -1UL);
1025 if (!vcpu->arch.gmap)
1026 return -ENOMEM;
1027 vcpu->arch.gmap->private = vcpu->kvm;
1028
1029 return 0;
1030}
1031
b0c632db
HC
1032int kvm_arch_vcpu_init(struct kvm_vcpu *vcpu)
1033{
3c038e6b
DD
1034 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1035 kvm_clear_async_pf_completion_queue(vcpu);
59674c1a
CB
1036 vcpu->run->kvm_valid_regs = KVM_SYNC_PREFIX |
1037 KVM_SYNC_GPRS |
9eed0735 1038 KVM_SYNC_ACRS |
b028ee3e
DH
1039 KVM_SYNC_CRS |
1040 KVM_SYNC_ARCH0 |
1041 KVM_SYNC_PFAULT;
dafd032a
DD
1042
1043 if (kvm_is_ucontrol(vcpu->kvm))
1044 return __kvm_ucontrol_vcpu_init(vcpu);
1045
b0c632db
HC
1046 return 0;
1047}
1048
b0c632db
HC
1049void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1050{
4725c860
MS
1051 save_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1052 save_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db 1053 save_access_regs(vcpu->arch.host_acrs);
4725c860
MS
1054 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1055 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 1056 restore_access_regs(vcpu->run->s.regs.acrs);
480e5926 1057 gmap_enable(vcpu->arch.gmap);
9e6dabef 1058 atomic_set_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
b0c632db
HC
1059}
1060
1061void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
1062{
9e6dabef 1063 atomic_clear_mask(CPUSTAT_RUNNING, &vcpu->arch.sie_block->cpuflags);
480e5926 1064 gmap_disable(vcpu->arch.gmap);
4725c860
MS
1065 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1066 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
59674c1a 1067 save_access_regs(vcpu->run->s.regs.acrs);
4725c860
MS
1068 restore_fp_ctl(&vcpu->arch.host_fpregs.fpc);
1069 restore_fp_regs(vcpu->arch.host_fpregs.fprs);
b0c632db
HC
1070 restore_access_regs(vcpu->arch.host_acrs);
1071}
1072
1073static void kvm_s390_vcpu_initial_reset(struct kvm_vcpu *vcpu)
1074{
1075 /* this equals initial cpu reset in pop, but we don't switch to ESA */
1076 vcpu->arch.sie_block->gpsw.mask = 0UL;
1077 vcpu->arch.sie_block->gpsw.addr = 0UL;
8d26cf7b 1078 kvm_s390_set_prefix(vcpu, 0);
b0c632db
HC
1079 vcpu->arch.sie_block->cputm = 0UL;
1080 vcpu->arch.sie_block->ckc = 0UL;
1081 vcpu->arch.sie_block->todpr = 0;
1082 memset(vcpu->arch.sie_block->gcr, 0, 16 * sizeof(__u64));
1083 vcpu->arch.sie_block->gcr[0] = 0xE0UL;
1084 vcpu->arch.sie_block->gcr[14] = 0xC2000000UL;
1085 vcpu->arch.guest_fpregs.fpc = 0;
1086 asm volatile("lfpc %0" : : "Q" (vcpu->arch.guest_fpregs.fpc));
1087 vcpu->arch.sie_block->gbea = 1;
672550fb 1088 vcpu->arch.sie_block->pp = 0;
3c038e6b
DD
1089 vcpu->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
1090 kvm_clear_async_pf_completion_queue(vcpu);
6352e4d2
DH
1091 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm))
1092 kvm_s390_vcpu_stop(vcpu);
2ed10cc1 1093 kvm_s390_clear_local_irqs(vcpu);
b0c632db
HC
1094}
1095
31928aa5 1096void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
42897d86 1097{
72f25020
JH
1098 mutex_lock(&vcpu->kvm->lock);
1099 vcpu->arch.sie_block->epoch = vcpu->kvm->arch.epoch;
1100 mutex_unlock(&vcpu->kvm->lock);
dafd032a
DD
1101 if (!kvm_is_ucontrol(vcpu->kvm))
1102 vcpu->arch.gmap = vcpu->kvm->arch.gmap;
42897d86
MT
1103}
1104
5102ee87
TK
1105static void kvm_s390_vcpu_crypto_setup(struct kvm_vcpu *vcpu)
1106{
9d8d5786 1107 if (!test_kvm_facility(vcpu->kvm, 76))
5102ee87
TK
1108 return;
1109
a374e892
TK
1110 vcpu->arch.sie_block->ecb3 &= ~(ECB3_AES | ECB3_DEA);
1111
1112 if (vcpu->kvm->arch.crypto.aes_kw)
1113 vcpu->arch.sie_block->ecb3 |= ECB3_AES;
1114 if (vcpu->kvm->arch.crypto.dea_kw)
1115 vcpu->arch.sie_block->ecb3 |= ECB3_DEA;
1116
5102ee87
TK
1117 vcpu->arch.sie_block->crycbd = vcpu->kvm->arch.crypto.crycbd;
1118}
1119
b31605c1
DD
1120void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu)
1121{
1122 free_page(vcpu->arch.sie_block->cbrlo);
1123 vcpu->arch.sie_block->cbrlo = 0;
1124}
1125
1126int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu)
1127{
1128 vcpu->arch.sie_block->cbrlo = get_zeroed_page(GFP_KERNEL);
1129 if (!vcpu->arch.sie_block->cbrlo)
1130 return -ENOMEM;
1131
1132 vcpu->arch.sie_block->ecb2 |= 0x80;
1133 vcpu->arch.sie_block->ecb2 &= ~0x08;
1134 return 0;
1135}
1136
b0c632db
HC
1137int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1138{
b31605c1 1139 int rc = 0;
b31288fa 1140
9e6dabef
CH
1141 atomic_set(&vcpu->arch.sie_block->cpuflags, CPUSTAT_ZARCH |
1142 CPUSTAT_SM |
69d0d3a3
CB
1143 CPUSTAT_STOPPED |
1144 CPUSTAT_GED);
fc34531d 1145 vcpu->arch.sie_block->ecb = 6;
9d8d5786 1146 if (test_kvm_facility(vcpu->kvm, 50) && test_kvm_facility(vcpu->kvm, 73))
7feb6bb8
MM
1147 vcpu->arch.sie_block->ecb |= 0x10;
1148
69d0d3a3 1149 vcpu->arch.sie_block->ecb2 = 8;
ea5f4969 1150 vcpu->arch.sie_block->eca = 0xC1002000U;
217a4406
HC
1151 if (sclp_has_siif())
1152 vcpu->arch.sie_block->eca |= 1;
ea5f4969
DH
1153 if (sclp_has_sigpif())
1154 vcpu->arch.sie_block->eca |= 0x10000000U;
5a5e6536
MR
1155 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE |
1156 ICTL_TPROT;
1157
b31605c1
DD
1158 if (kvm_s390_cmma_enabled(vcpu->kvm)) {
1159 rc = kvm_s390_vcpu_setup_cmma(vcpu);
1160 if (rc)
1161 return rc;
b31288fa 1162 }
0ac96caf 1163 hrtimer_init(&vcpu->arch.ckc_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL);
ca872302 1164 vcpu->arch.ckc_timer.function = kvm_s390_idle_wakeup;
9d8d5786 1165
658b6eda 1166 mutex_lock(&vcpu->kvm->lock);
9d8d5786
MM
1167 vcpu->arch.cpu_id = vcpu->kvm->arch.model.cpu_id;
1168 memcpy(vcpu->kvm->arch.model.fac->sie, vcpu->kvm->arch.model.fac->kvm,
1169 S390_ARCH_FAC_LIST_SIZE_BYTE);
658b6eda
MM
1170 vcpu->arch.sie_block->ibc = vcpu->kvm->arch.model.ibc;
1171 mutex_unlock(&vcpu->kvm->lock);
5102ee87
TK
1172
1173 kvm_s390_vcpu_crypto_setup(vcpu);
1174
b31605c1 1175 return rc;
b0c632db
HC
1176}
1177
1178struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm,
1179 unsigned int id)
1180{
4d47555a 1181 struct kvm_vcpu *vcpu;
7feb6bb8 1182 struct sie_page *sie_page;
4d47555a
CO
1183 int rc = -EINVAL;
1184
1185 if (id >= KVM_MAX_VCPUS)
1186 goto out;
1187
1188 rc = -ENOMEM;
b0c632db 1189
b110feaf 1190 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
b0c632db 1191 if (!vcpu)
4d47555a 1192 goto out;
b0c632db 1193
7feb6bb8
MM
1194 sie_page = (struct sie_page *) get_zeroed_page(GFP_KERNEL);
1195 if (!sie_page)
b0c632db
HC
1196 goto out_free_cpu;
1197
7feb6bb8
MM
1198 vcpu->arch.sie_block = &sie_page->sie_block;
1199 vcpu->arch.sie_block->itdba = (unsigned long) &sie_page->itdb;
1200
b0c632db 1201 vcpu->arch.sie_block->icpua = id;
58f9460b
CO
1202 if (!kvm_is_ucontrol(kvm)) {
1203 if (!kvm->arch.sca) {
1204 WARN_ON_ONCE(1);
1205 goto out_free_cpu;
1206 }
1207 if (!kvm->arch.sca->cpu[id].sda)
1208 kvm->arch.sca->cpu[id].sda =
1209 (__u64) vcpu->arch.sie_block;
1210 vcpu->arch.sie_block->scaoh =
1211 (__u32)(((__u64)kvm->arch.sca) >> 32);
1212 vcpu->arch.sie_block->scaol = (__u32)(__u64)kvm->arch.sca;
1213 set_bit(63 - id, (unsigned long *) &kvm->arch.sca->mcn);
1214 }
9d8d5786 1215 vcpu->arch.sie_block->fac = (int) (long) kvm->arch.model.fac->sie;
b0c632db 1216
ba5c1e9b 1217 spin_lock_init(&vcpu->arch.local_int.lock);
ba5c1e9b 1218 vcpu->arch.local_int.float_int = &kvm->arch.float_int;
d0321a24 1219 vcpu->arch.local_int.wq = &vcpu->wq;
5288fbf0 1220 vcpu->arch.local_int.cpuflags = &vcpu->arch.sie_block->cpuflags;
ba5c1e9b 1221
b0c632db
HC
1222 rc = kvm_vcpu_init(vcpu, kvm, id);
1223 if (rc)
7b06bf2f 1224 goto out_free_sie_block;
b0c632db
HC
1225 VM_EVENT(kvm, 3, "create cpu %d at %p, sie block at %p", id, vcpu,
1226 vcpu->arch.sie_block);
ade38c31 1227 trace_kvm_s390_create_vcpu(id, vcpu, vcpu->arch.sie_block);
b0c632db 1228
b0c632db 1229 return vcpu;
7b06bf2f
WY
1230out_free_sie_block:
1231 free_page((unsigned long)(vcpu->arch.sie_block));
b0c632db 1232out_free_cpu:
b110feaf 1233 kmem_cache_free(kvm_vcpu_cache, vcpu);
4d47555a 1234out:
b0c632db
HC
1235 return ERR_PTR(rc);
1236}
1237
b0c632db
HC
1238int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu)
1239{
9a022067 1240 return kvm_s390_vcpu_has_irq(vcpu, 0);
b0c632db
HC
1241}
1242
49b99e1e
CB
1243void s390_vcpu_block(struct kvm_vcpu *vcpu)
1244{
1245 atomic_set_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1246}
1247
1248void s390_vcpu_unblock(struct kvm_vcpu *vcpu)
1249{
1250 atomic_clear_mask(PROG_BLOCK_SIE, &vcpu->arch.sie_block->prog20);
1251}
1252
1253/*
1254 * Kick a guest cpu out of SIE and wait until SIE is not running.
1255 * If the CPU is not running (e.g. waiting as idle) the function will
1256 * return immediately. */
1257void exit_sie(struct kvm_vcpu *vcpu)
1258{
1259 atomic_set_mask(CPUSTAT_STOP_INT, &vcpu->arch.sie_block->cpuflags);
1260 while (vcpu->arch.sie_block->prog0c & PROG_IN_SIE)
1261 cpu_relax();
1262}
1263
1264/* Kick a guest cpu out of SIE and prevent SIE-reentry */
1265void exit_sie_sync(struct kvm_vcpu *vcpu)
1266{
1267 s390_vcpu_block(vcpu);
1268 exit_sie(vcpu);
1269}
1270
2c70fe44
CB
1271static void kvm_gmap_notifier(struct gmap *gmap, unsigned long address)
1272{
1273 int i;
1274 struct kvm *kvm = gmap->private;
1275 struct kvm_vcpu *vcpu;
1276
1277 kvm_for_each_vcpu(i, vcpu, kvm) {
1278 /* match against both prefix pages */
fda902cb 1279 if (kvm_s390_get_prefix(vcpu) == (address & ~0x1000UL)) {
2c70fe44
CB
1280 VCPU_EVENT(vcpu, 2, "gmap notifier for %lx", address);
1281 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
1282 exit_sie_sync(vcpu);
1283 }
1284 }
1285}
1286
b6d33834
CD
1287int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu)
1288{
1289 /* kvm common code refers to this, but never calls it */
1290 BUG();
1291 return 0;
1292}
1293
14eebd91
CO
1294static int kvm_arch_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu,
1295 struct kvm_one_reg *reg)
1296{
1297 int r = -EINVAL;
1298
1299 switch (reg->id) {
29b7c71b
CO
1300 case KVM_REG_S390_TODPR:
1301 r = put_user(vcpu->arch.sie_block->todpr,
1302 (u32 __user *)reg->addr);
1303 break;
1304 case KVM_REG_S390_EPOCHDIFF:
1305 r = put_user(vcpu->arch.sie_block->epoch,
1306 (u64 __user *)reg->addr);
1307 break;
46a6dd1c
J
1308 case KVM_REG_S390_CPU_TIMER:
1309 r = put_user(vcpu->arch.sie_block->cputm,
1310 (u64 __user *)reg->addr);
1311 break;
1312 case KVM_REG_S390_CLOCK_COMP:
1313 r = put_user(vcpu->arch.sie_block->ckc,
1314 (u64 __user *)reg->addr);
1315 break;
536336c2
DD
1316 case KVM_REG_S390_PFTOKEN:
1317 r = put_user(vcpu->arch.pfault_token,
1318 (u64 __user *)reg->addr);
1319 break;
1320 case KVM_REG_S390_PFCOMPARE:
1321 r = put_user(vcpu->arch.pfault_compare,
1322 (u64 __user *)reg->addr);
1323 break;
1324 case KVM_REG_S390_PFSELECT:
1325 r = put_user(vcpu->arch.pfault_select,
1326 (u64 __user *)reg->addr);
1327 break;
672550fb
CB
1328 case KVM_REG_S390_PP:
1329 r = put_user(vcpu->arch.sie_block->pp,
1330 (u64 __user *)reg->addr);
1331 break;
afa45ff5
CB
1332 case KVM_REG_S390_GBEA:
1333 r = put_user(vcpu->arch.sie_block->gbea,
1334 (u64 __user *)reg->addr);
1335 break;
14eebd91
CO
1336 default:
1337 break;
1338 }
1339
1340 return r;
1341}
1342
1343static int kvm_arch_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu,
1344 struct kvm_one_reg *reg)
1345{
1346 int r = -EINVAL;
1347
1348 switch (reg->id) {
29b7c71b
CO
1349 case KVM_REG_S390_TODPR:
1350 r = get_user(vcpu->arch.sie_block->todpr,
1351 (u32 __user *)reg->addr);
1352 break;
1353 case KVM_REG_S390_EPOCHDIFF:
1354 r = get_user(vcpu->arch.sie_block->epoch,
1355 (u64 __user *)reg->addr);
1356 break;
46a6dd1c
J
1357 case KVM_REG_S390_CPU_TIMER:
1358 r = get_user(vcpu->arch.sie_block->cputm,
1359 (u64 __user *)reg->addr);
1360 break;
1361 case KVM_REG_S390_CLOCK_COMP:
1362 r = get_user(vcpu->arch.sie_block->ckc,
1363 (u64 __user *)reg->addr);
1364 break;
536336c2
DD
1365 case KVM_REG_S390_PFTOKEN:
1366 r = get_user(vcpu->arch.pfault_token,
1367 (u64 __user *)reg->addr);
9fbd8082
DH
1368 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1369 kvm_clear_async_pf_completion_queue(vcpu);
536336c2
DD
1370 break;
1371 case KVM_REG_S390_PFCOMPARE:
1372 r = get_user(vcpu->arch.pfault_compare,
1373 (u64 __user *)reg->addr);
1374 break;
1375 case KVM_REG_S390_PFSELECT:
1376 r = get_user(vcpu->arch.pfault_select,
1377 (u64 __user *)reg->addr);
1378 break;
672550fb
CB
1379 case KVM_REG_S390_PP:
1380 r = get_user(vcpu->arch.sie_block->pp,
1381 (u64 __user *)reg->addr);
1382 break;
afa45ff5
CB
1383 case KVM_REG_S390_GBEA:
1384 r = get_user(vcpu->arch.sie_block->gbea,
1385 (u64 __user *)reg->addr);
1386 break;
14eebd91
CO
1387 default:
1388 break;
1389 }
1390
1391 return r;
1392}
b6d33834 1393
b0c632db
HC
1394static int kvm_arch_vcpu_ioctl_initial_reset(struct kvm_vcpu *vcpu)
1395{
b0c632db 1396 kvm_s390_vcpu_initial_reset(vcpu);
b0c632db
HC
1397 return 0;
1398}
1399
1400int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1401{
5a32c1af 1402 memcpy(&vcpu->run->s.regs.gprs, &regs->gprs, sizeof(regs->gprs));
b0c632db
HC
1403 return 0;
1404}
1405
1406int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1407{
5a32c1af 1408 memcpy(&regs->gprs, &vcpu->run->s.regs.gprs, sizeof(regs->gprs));
b0c632db
HC
1409 return 0;
1410}
1411
1412int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1413 struct kvm_sregs *sregs)
1414{
59674c1a 1415 memcpy(&vcpu->run->s.regs.acrs, &sregs->acrs, sizeof(sregs->acrs));
b0c632db 1416 memcpy(&vcpu->arch.sie_block->gcr, &sregs->crs, sizeof(sregs->crs));
59674c1a 1417 restore_access_regs(vcpu->run->s.regs.acrs);
b0c632db
HC
1418 return 0;
1419}
1420
1421int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1422 struct kvm_sregs *sregs)
1423{
59674c1a 1424 memcpy(&sregs->acrs, &vcpu->run->s.regs.acrs, sizeof(sregs->acrs));
b0c632db 1425 memcpy(&sregs->crs, &vcpu->arch.sie_block->gcr, sizeof(sregs->crs));
b0c632db
HC
1426 return 0;
1427}
1428
1429int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1430{
4725c860
MS
1431 if (test_fp_ctl(fpu->fpc))
1432 return -EINVAL;
b0c632db 1433 memcpy(&vcpu->arch.guest_fpregs.fprs, &fpu->fprs, sizeof(fpu->fprs));
4725c860
MS
1434 vcpu->arch.guest_fpregs.fpc = fpu->fpc;
1435 restore_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1436 restore_fp_regs(vcpu->arch.guest_fpregs.fprs);
b0c632db
HC
1437 return 0;
1438}
1439
1440int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1441{
b0c632db
HC
1442 memcpy(&fpu->fprs, &vcpu->arch.guest_fpregs.fprs, sizeof(fpu->fprs));
1443 fpu->fpc = vcpu->arch.guest_fpregs.fpc;
b0c632db
HC
1444 return 0;
1445}
1446
1447static int kvm_arch_vcpu_ioctl_set_initial_psw(struct kvm_vcpu *vcpu, psw_t psw)
1448{
1449 int rc = 0;
1450
7a42fdc2 1451 if (!is_vcpu_stopped(vcpu))
b0c632db 1452 rc = -EBUSY;
d7b0b5eb
CO
1453 else {
1454 vcpu->run->psw_mask = psw.mask;
1455 vcpu->run->psw_addr = psw.addr;
1456 }
b0c632db
HC
1457 return rc;
1458}
1459
1460int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1461 struct kvm_translation *tr)
1462{
1463 return -EINVAL; /* not implemented yet */
1464}
1465
27291e21
DH
1466#define VALID_GUESTDBG_FLAGS (KVM_GUESTDBG_SINGLESTEP | \
1467 KVM_GUESTDBG_USE_HW_BP | \
1468 KVM_GUESTDBG_ENABLE)
1469
d0bfb940
JK
1470int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1471 struct kvm_guest_debug *dbg)
b0c632db 1472{
27291e21
DH
1473 int rc = 0;
1474
1475 vcpu->guest_debug = 0;
1476 kvm_s390_clear_bp_data(vcpu);
1477
2de3bfc2 1478 if (dbg->control & ~VALID_GUESTDBG_FLAGS)
27291e21
DH
1479 return -EINVAL;
1480
1481 if (dbg->control & KVM_GUESTDBG_ENABLE) {
1482 vcpu->guest_debug = dbg->control;
1483 /* enforce guest PER */
1484 atomic_set_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1485
1486 if (dbg->control & KVM_GUESTDBG_USE_HW_BP)
1487 rc = kvm_s390_import_bp_data(vcpu, dbg);
1488 } else {
1489 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1490 vcpu->arch.guestdbg.last_bp = 0;
1491 }
1492
1493 if (rc) {
1494 vcpu->guest_debug = 0;
1495 kvm_s390_clear_bp_data(vcpu);
1496 atomic_clear_mask(CPUSTAT_P, &vcpu->arch.sie_block->cpuflags);
1497 }
1498
1499 return rc;
b0c632db
HC
1500}
1501
62d9f0db
MT
1502int kvm_arch_vcpu_ioctl_get_mpstate(struct kvm_vcpu *vcpu,
1503 struct kvm_mp_state *mp_state)
1504{
6352e4d2
DH
1505 /* CHECK_STOP and LOAD are not supported yet */
1506 return is_vcpu_stopped(vcpu) ? KVM_MP_STATE_STOPPED :
1507 KVM_MP_STATE_OPERATING;
62d9f0db
MT
1508}
1509
1510int kvm_arch_vcpu_ioctl_set_mpstate(struct kvm_vcpu *vcpu,
1511 struct kvm_mp_state *mp_state)
1512{
6352e4d2
DH
1513 int rc = 0;
1514
1515 /* user space knows about this interface - let it control the state */
1516 vcpu->kvm->arch.user_cpu_state_ctrl = 1;
1517
1518 switch (mp_state->mp_state) {
1519 case KVM_MP_STATE_STOPPED:
1520 kvm_s390_vcpu_stop(vcpu);
1521 break;
1522 case KVM_MP_STATE_OPERATING:
1523 kvm_s390_vcpu_start(vcpu);
1524 break;
1525 case KVM_MP_STATE_LOAD:
1526 case KVM_MP_STATE_CHECK_STOP:
1527 /* fall through - CHECK_STOP and LOAD are not supported yet */
1528 default:
1529 rc = -ENXIO;
1530 }
1531
1532 return rc;
62d9f0db
MT
1533}
1534
b31605c1
DD
1535bool kvm_s390_cmma_enabled(struct kvm *kvm)
1536{
1537 if (!MACHINE_IS_LPAR)
1538 return false;
1539 /* only enable for z10 and later */
1540 if (!MACHINE_HAS_EDAT1)
1541 return false;
1542 if (!kvm->arch.use_cmma)
1543 return false;
1544 return true;
1545}
1546
8ad35755
DH
1547static bool ibs_enabled(struct kvm_vcpu *vcpu)
1548{
1549 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_IBS;
1550}
1551
2c70fe44
CB
1552static int kvm_s390_handle_requests(struct kvm_vcpu *vcpu)
1553{
8ad35755
DH
1554retry:
1555 s390_vcpu_unblock(vcpu);
2c70fe44
CB
1556 /*
1557 * We use MMU_RELOAD just to re-arm the ipte notifier for the
1558 * guest prefix page. gmap_ipte_notify will wait on the ptl lock.
1559 * This ensures that the ipte instruction for this request has
1560 * already finished. We might race against a second unmapper that
1561 * wants to set the blocking bit. Lets just retry the request loop.
1562 */
8ad35755 1563 if (kvm_check_request(KVM_REQ_MMU_RELOAD, vcpu)) {
2c70fe44
CB
1564 int rc;
1565 rc = gmap_ipte_notify(vcpu->arch.gmap,
fda902cb 1566 kvm_s390_get_prefix(vcpu),
2c70fe44
CB
1567 PAGE_SIZE * 2);
1568 if (rc)
1569 return rc;
8ad35755 1570 goto retry;
2c70fe44 1571 }
8ad35755 1572
d3d692c8
DH
1573 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1574 vcpu->arch.sie_block->ihcpu = 0xffff;
1575 goto retry;
1576 }
1577
8ad35755
DH
1578 if (kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu)) {
1579 if (!ibs_enabled(vcpu)) {
1580 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 1);
1581 atomic_set_mask(CPUSTAT_IBS,
1582 &vcpu->arch.sie_block->cpuflags);
1583 }
1584 goto retry;
2c70fe44 1585 }
8ad35755
DH
1586
1587 if (kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu)) {
1588 if (ibs_enabled(vcpu)) {
1589 trace_kvm_s390_enable_disable_ibs(vcpu->vcpu_id, 0);
1590 atomic_clear_mask(CPUSTAT_IBS,
1591 &vcpu->arch.sie_block->cpuflags);
1592 }
1593 goto retry;
1594 }
1595
0759d068
DH
1596 /* nothing to do, just clear the request */
1597 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
1598
2c70fe44
CB
1599 return 0;
1600}
1601
fa576c58
TH
1602/**
1603 * kvm_arch_fault_in_page - fault-in guest page if necessary
1604 * @vcpu: The corresponding virtual cpu
1605 * @gpa: Guest physical address
1606 * @writable: Whether the page should be writable or not
1607 *
1608 * Make sure that a guest page has been faulted-in on the host.
1609 *
1610 * Return: Zero on success, negative error code otherwise.
1611 */
1612long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable)
24eb3a82 1613{
527e30b4
MS
1614 return gmap_fault(vcpu->arch.gmap, gpa,
1615 writable ? FAULT_FLAG_WRITE : 0);
24eb3a82
DD
1616}
1617
3c038e6b
DD
1618static void __kvm_inject_pfault_token(struct kvm_vcpu *vcpu, bool start_token,
1619 unsigned long token)
1620{
1621 struct kvm_s390_interrupt inti;
383d0b05 1622 struct kvm_s390_irq irq;
3c038e6b
DD
1623
1624 if (start_token) {
383d0b05
JF
1625 irq.u.ext.ext_params2 = token;
1626 irq.type = KVM_S390_INT_PFAULT_INIT;
1627 WARN_ON_ONCE(kvm_s390_inject_vcpu(vcpu, &irq));
3c038e6b
DD
1628 } else {
1629 inti.type = KVM_S390_INT_PFAULT_DONE;
383d0b05 1630 inti.parm64 = token;
3c038e6b
DD
1631 WARN_ON_ONCE(kvm_s390_inject_vm(vcpu->kvm, &inti));
1632 }
1633}
1634
1635void kvm_arch_async_page_not_present(struct kvm_vcpu *vcpu,
1636 struct kvm_async_pf *work)
1637{
1638 trace_kvm_s390_pfault_init(vcpu, work->arch.pfault_token);
1639 __kvm_inject_pfault_token(vcpu, true, work->arch.pfault_token);
1640}
1641
1642void kvm_arch_async_page_present(struct kvm_vcpu *vcpu,
1643 struct kvm_async_pf *work)
1644{
1645 trace_kvm_s390_pfault_done(vcpu, work->arch.pfault_token);
1646 __kvm_inject_pfault_token(vcpu, false, work->arch.pfault_token);
1647}
1648
1649void kvm_arch_async_page_ready(struct kvm_vcpu *vcpu,
1650 struct kvm_async_pf *work)
1651{
1652 /* s390 will always inject the page directly */
1653}
1654
1655bool kvm_arch_can_inject_async_page_present(struct kvm_vcpu *vcpu)
1656{
1657 /*
1658 * s390 will always inject the page directly,
1659 * but we still want check_async_completion to cleanup
1660 */
1661 return true;
1662}
1663
1664static int kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
1665{
1666 hva_t hva;
1667 struct kvm_arch_async_pf arch;
1668 int rc;
1669
1670 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1671 return 0;
1672 if ((vcpu->arch.sie_block->gpsw.mask & vcpu->arch.pfault_select) !=
1673 vcpu->arch.pfault_compare)
1674 return 0;
1675 if (psw_extint_disabled(vcpu))
1676 return 0;
9a022067 1677 if (kvm_s390_vcpu_has_irq(vcpu, 0))
3c038e6b
DD
1678 return 0;
1679 if (!(vcpu->arch.sie_block->gcr[0] & 0x200ul))
1680 return 0;
1681 if (!vcpu->arch.gmap->pfault_enabled)
1682 return 0;
1683
81480cc1
HC
1684 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(current->thread.gmap_addr));
1685 hva += current->thread.gmap_addr & ~PAGE_MASK;
1686 if (read_guest_real(vcpu, vcpu->arch.pfault_token, &arch.pfault_token, 8))
3c038e6b
DD
1687 return 0;
1688
1689 rc = kvm_setup_async_pf(vcpu, current->thread.gmap_addr, hva, &arch);
1690 return rc;
1691}
1692
3fb4c40f 1693static int vcpu_pre_run(struct kvm_vcpu *vcpu)
b0c632db 1694{
3fb4c40f 1695 int rc, cpuflags;
e168bf8d 1696
3c038e6b
DD
1697 /*
1698 * On s390 notifications for arriving pages will be delivered directly
1699 * to the guest but the house keeping for completed pfaults is
1700 * handled outside the worker.
1701 */
1702 kvm_check_async_pf_completion(vcpu);
1703
5a32c1af 1704 memcpy(&vcpu->arch.sie_block->gg14, &vcpu->run->s.regs.gprs[14], 16);
b0c632db
HC
1705
1706 if (need_resched())
1707 schedule();
1708
d3a73acb 1709 if (test_cpu_flag(CIF_MCCK_PENDING))
71cde587
CB
1710 s390_handle_mcck();
1711
79395031
JF
1712 if (!kvm_is_ucontrol(vcpu->kvm)) {
1713 rc = kvm_s390_deliver_pending_interrupts(vcpu);
1714 if (rc)
1715 return rc;
1716 }
0ff31867 1717
2c70fe44
CB
1718 rc = kvm_s390_handle_requests(vcpu);
1719 if (rc)
1720 return rc;
1721
27291e21
DH
1722 if (guestdbg_enabled(vcpu)) {
1723 kvm_s390_backup_guest_per_regs(vcpu);
1724 kvm_s390_patch_guest_per_regs(vcpu);
1725 }
1726
b0c632db 1727 vcpu->arch.sie_block->icptcode = 0;
3fb4c40f
TH
1728 cpuflags = atomic_read(&vcpu->arch.sie_block->cpuflags);
1729 VCPU_EVENT(vcpu, 6, "entering sie flags %x", cpuflags);
1730 trace_kvm_s390_sie_enter(vcpu, cpuflags);
2b29a9fd 1731
3fb4c40f
TH
1732 return 0;
1733}
1734
1735static int vcpu_post_run(struct kvm_vcpu *vcpu, int exit_reason)
1736{
24eb3a82 1737 int rc = -1;
2b29a9fd
DD
1738
1739 VCPU_EVENT(vcpu, 6, "exit sie icptcode %d",
1740 vcpu->arch.sie_block->icptcode);
1741 trace_kvm_s390_sie_exit(vcpu, vcpu->arch.sie_block->icptcode);
1742
27291e21
DH
1743 if (guestdbg_enabled(vcpu))
1744 kvm_s390_restore_guest_per_regs(vcpu);
1745
3fb4c40f 1746 if (exit_reason >= 0) {
7c470539 1747 rc = 0;
210b1607
TH
1748 } else if (kvm_is_ucontrol(vcpu->kvm)) {
1749 vcpu->run->exit_reason = KVM_EXIT_S390_UCONTROL;
1750 vcpu->run->s390_ucontrol.trans_exc_code =
1751 current->thread.gmap_addr;
1752 vcpu->run->s390_ucontrol.pgm_code = 0x10;
1753 rc = -EREMOTE;
24eb3a82
DD
1754
1755 } else if (current->thread.gmap_pfault) {
3c038e6b 1756 trace_kvm_s390_major_guest_pfault(vcpu);
24eb3a82 1757 current->thread.gmap_pfault = 0;
fa576c58 1758 if (kvm_arch_setup_async_pf(vcpu)) {
24eb3a82 1759 rc = 0;
fa576c58
TH
1760 } else {
1761 gpa_t gpa = current->thread.gmap_addr;
1762 rc = kvm_arch_fault_in_page(vcpu, gpa, 1);
1763 }
24eb3a82
DD
1764 }
1765
1766 if (rc == -1) {
699bde3b
CB
1767 VCPU_EVENT(vcpu, 3, "%s", "fault in sie instruction");
1768 trace_kvm_s390_sie_fault(vcpu);
1769 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1f0d0f09 1770 }
b0c632db 1771
5a32c1af 1772 memcpy(&vcpu->run->s.regs.gprs[14], &vcpu->arch.sie_block->gg14, 16);
3fb4c40f 1773
a76ccff6
TH
1774 if (rc == 0) {
1775 if (kvm_is_ucontrol(vcpu->kvm))
2955c83f
CB
1776 /* Don't exit for host interrupts. */
1777 rc = vcpu->arch.sie_block->icptcode ? -EOPNOTSUPP : 0;
a76ccff6
TH
1778 else
1779 rc = kvm_handle_sie_intercept(vcpu);
1780 }
1781
3fb4c40f
TH
1782 return rc;
1783}
1784
1785static int __vcpu_run(struct kvm_vcpu *vcpu)
1786{
1787 int rc, exit_reason;
1788
800c1065
TH
1789 /*
1790 * We try to hold kvm->srcu during most of vcpu_run (except when run-
1791 * ning the guest), so that memslots (and other stuff) are protected
1792 */
1793 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1794
a76ccff6
TH
1795 do {
1796 rc = vcpu_pre_run(vcpu);
1797 if (rc)
1798 break;
3fb4c40f 1799
800c1065 1800 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
a76ccff6
TH
1801 /*
1802 * As PF_VCPU will be used in fault handler, between
1803 * guest_enter and guest_exit should be no uaccess.
1804 */
1805 preempt_disable();
1806 kvm_guest_enter();
1807 preempt_enable();
1808 exit_reason = sie64a(vcpu->arch.sie_block,
1809 vcpu->run->s.regs.gprs);
1810 kvm_guest_exit();
800c1065 1811 vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
a76ccff6
TH
1812
1813 rc = vcpu_post_run(vcpu, exit_reason);
27291e21 1814 } while (!signal_pending(current) && !guestdbg_exit_pending(vcpu) && !rc);
3fb4c40f 1815
800c1065 1816 srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
e168bf8d 1817 return rc;
b0c632db
HC
1818}
1819
b028ee3e
DH
1820static void sync_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1821{
1822 vcpu->arch.sie_block->gpsw.mask = kvm_run->psw_mask;
1823 vcpu->arch.sie_block->gpsw.addr = kvm_run->psw_addr;
1824 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PREFIX)
1825 kvm_s390_set_prefix(vcpu, kvm_run->s.regs.prefix);
1826 if (kvm_run->kvm_dirty_regs & KVM_SYNC_CRS) {
1827 memcpy(&vcpu->arch.sie_block->gcr, &kvm_run->s.regs.crs, 128);
d3d692c8
DH
1828 /* some control register changes require a tlb flush */
1829 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
b028ee3e
DH
1830 }
1831 if (kvm_run->kvm_dirty_regs & KVM_SYNC_ARCH0) {
1832 vcpu->arch.sie_block->cputm = kvm_run->s.regs.cputm;
1833 vcpu->arch.sie_block->ckc = kvm_run->s.regs.ckc;
1834 vcpu->arch.sie_block->todpr = kvm_run->s.regs.todpr;
1835 vcpu->arch.sie_block->pp = kvm_run->s.regs.pp;
1836 vcpu->arch.sie_block->gbea = kvm_run->s.regs.gbea;
1837 }
1838 if (kvm_run->kvm_dirty_regs & KVM_SYNC_PFAULT) {
1839 vcpu->arch.pfault_token = kvm_run->s.regs.pft;
1840 vcpu->arch.pfault_select = kvm_run->s.regs.pfs;
1841 vcpu->arch.pfault_compare = kvm_run->s.regs.pfc;
9fbd8082
DH
1842 if (vcpu->arch.pfault_token == KVM_S390_PFAULT_TOKEN_INVALID)
1843 kvm_clear_async_pf_completion_queue(vcpu);
b028ee3e
DH
1844 }
1845 kvm_run->kvm_dirty_regs = 0;
1846}
1847
1848static void store_regs(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1849{
1850 kvm_run->psw_mask = vcpu->arch.sie_block->gpsw.mask;
1851 kvm_run->psw_addr = vcpu->arch.sie_block->gpsw.addr;
1852 kvm_run->s.regs.prefix = kvm_s390_get_prefix(vcpu);
1853 memcpy(&kvm_run->s.regs.crs, &vcpu->arch.sie_block->gcr, 128);
1854 kvm_run->s.regs.cputm = vcpu->arch.sie_block->cputm;
1855 kvm_run->s.regs.ckc = vcpu->arch.sie_block->ckc;
1856 kvm_run->s.regs.todpr = vcpu->arch.sie_block->todpr;
1857 kvm_run->s.regs.pp = vcpu->arch.sie_block->pp;
1858 kvm_run->s.regs.gbea = vcpu->arch.sie_block->gbea;
1859 kvm_run->s.regs.pft = vcpu->arch.pfault_token;
1860 kvm_run->s.regs.pfs = vcpu->arch.pfault_select;
1861 kvm_run->s.regs.pfc = vcpu->arch.pfault_compare;
1862}
1863
b0c632db
HC
1864int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
1865{
8f2abe6a 1866 int rc;
b0c632db
HC
1867 sigset_t sigsaved;
1868
27291e21
DH
1869 if (guestdbg_exit_pending(vcpu)) {
1870 kvm_s390_prepare_debug_exit(vcpu);
1871 return 0;
1872 }
1873
b0c632db
HC
1874 if (vcpu->sigset_active)
1875 sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved);
1876
6352e4d2
DH
1877 if (!kvm_s390_user_cpu_state_ctrl(vcpu->kvm)) {
1878 kvm_s390_vcpu_start(vcpu);
1879 } else if (is_vcpu_stopped(vcpu)) {
1880 pr_err_ratelimited("kvm-s390: can't run stopped vcpu %d\n",
1881 vcpu->vcpu_id);
1882 return -EINVAL;
1883 }
b0c632db 1884
b028ee3e 1885 sync_regs(vcpu, kvm_run);
d7b0b5eb 1886
dab4079d 1887 might_fault();
a76ccff6 1888 rc = __vcpu_run(vcpu);
9ace903d 1889
b1d16c49
CE
1890 if (signal_pending(current) && !rc) {
1891 kvm_run->exit_reason = KVM_EXIT_INTR;
8f2abe6a 1892 rc = -EINTR;
b1d16c49 1893 }
8f2abe6a 1894
27291e21
DH
1895 if (guestdbg_exit_pending(vcpu) && !rc) {
1896 kvm_s390_prepare_debug_exit(vcpu);
1897 rc = 0;
1898 }
1899
b8e660b8 1900 if (rc == -EOPNOTSUPP) {
8f2abe6a
CB
1901 /* intercept cannot be handled in-kernel, prepare kvm-run */
1902 kvm_run->exit_reason = KVM_EXIT_S390_SIEIC;
1903 kvm_run->s390_sieic.icptcode = vcpu->arch.sie_block->icptcode;
8f2abe6a
CB
1904 kvm_run->s390_sieic.ipa = vcpu->arch.sie_block->ipa;
1905 kvm_run->s390_sieic.ipb = vcpu->arch.sie_block->ipb;
1906 rc = 0;
1907 }
1908
1909 if (rc == -EREMOTE) {
1910 /* intercept was handled, but userspace support is needed
1911 * kvm_run has been prepared by the handler */
1912 rc = 0;
1913 }
b0c632db 1914
b028ee3e 1915 store_regs(vcpu, kvm_run);
d7b0b5eb 1916
b0c632db
HC
1917 if (vcpu->sigset_active)
1918 sigprocmask(SIG_SETMASK, &sigsaved, NULL);
1919
b0c632db 1920 vcpu->stat.exit_userspace++;
7e8e6ab4 1921 return rc;
b0c632db
HC
1922}
1923
b0c632db
HC
1924/*
1925 * store status at address
1926 * we use have two special cases:
1927 * KVM_S390_STORE_STATUS_NOADDR: -> 0x1200 on 64 bit
1928 * KVM_S390_STORE_STATUS_PREFIXED: -> prefix
1929 */
d0bce605 1930int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long gpa)
b0c632db 1931{
092670cd 1932 unsigned char archmode = 1;
fda902cb 1933 unsigned int px;
178bd789 1934 u64 clkcomp;
d0bce605 1935 int rc;
b0c632db 1936
d0bce605
HC
1937 if (gpa == KVM_S390_STORE_STATUS_NOADDR) {
1938 if (write_guest_abs(vcpu, 163, &archmode, 1))
b0c632db 1939 return -EFAULT;
d0bce605
HC
1940 gpa = SAVE_AREA_BASE;
1941 } else if (gpa == KVM_S390_STORE_STATUS_PREFIXED) {
1942 if (write_guest_real(vcpu, 163, &archmode, 1))
b0c632db 1943 return -EFAULT;
d0bce605
HC
1944 gpa = kvm_s390_real_to_abs(vcpu, SAVE_AREA_BASE);
1945 }
1946 rc = write_guest_abs(vcpu, gpa + offsetof(struct save_area, fp_regs),
1947 vcpu->arch.guest_fpregs.fprs, 128);
1948 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, gp_regs),
1949 vcpu->run->s.regs.gprs, 128);
1950 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, psw),
1951 &vcpu->arch.sie_block->gpsw, 16);
fda902cb 1952 px = kvm_s390_get_prefix(vcpu);
d0bce605 1953 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, pref_reg),
fda902cb 1954 &px, 4);
d0bce605
HC
1955 rc |= write_guest_abs(vcpu,
1956 gpa + offsetof(struct save_area, fp_ctrl_reg),
1957 &vcpu->arch.guest_fpregs.fpc, 4);
1958 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, tod_reg),
1959 &vcpu->arch.sie_block->todpr, 4);
1960 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, timer),
1961 &vcpu->arch.sie_block->cputm, 8);
178bd789 1962 clkcomp = vcpu->arch.sie_block->ckc >> 8;
d0bce605
HC
1963 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, clk_cmp),
1964 &clkcomp, 8);
1965 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, acc_regs),
1966 &vcpu->run->s.regs.acrs, 64);
1967 rc |= write_guest_abs(vcpu, gpa + offsetof(struct save_area, ctrl_regs),
1968 &vcpu->arch.sie_block->gcr, 128);
1969 return rc ? -EFAULT : 0;
b0c632db
HC
1970}
1971
e879892c
TH
1972int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr)
1973{
1974 /*
1975 * The guest FPRS and ACRS are in the host FPRS/ACRS due to the lazy
1976 * copying in vcpu load/put. Lets update our copies before we save
1977 * it into the save area
1978 */
1979 save_fp_ctl(&vcpu->arch.guest_fpregs.fpc);
1980 save_fp_regs(vcpu->arch.guest_fpregs.fprs);
1981 save_access_regs(vcpu->run->s.regs.acrs);
1982
1983 return kvm_s390_store_status_unloaded(vcpu, addr);
1984}
1985
8ad35755
DH
1986static void __disable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
1987{
1988 kvm_check_request(KVM_REQ_ENABLE_IBS, vcpu);
1989 kvm_make_request(KVM_REQ_DISABLE_IBS, vcpu);
1990 exit_sie_sync(vcpu);
1991}
1992
1993static void __disable_ibs_on_all_vcpus(struct kvm *kvm)
1994{
1995 unsigned int i;
1996 struct kvm_vcpu *vcpu;
1997
1998 kvm_for_each_vcpu(i, vcpu, kvm) {
1999 __disable_ibs_on_vcpu(vcpu);
2000 }
2001}
2002
2003static void __enable_ibs_on_vcpu(struct kvm_vcpu *vcpu)
2004{
2005 kvm_check_request(KVM_REQ_DISABLE_IBS, vcpu);
2006 kvm_make_request(KVM_REQ_ENABLE_IBS, vcpu);
2007 exit_sie_sync(vcpu);
2008}
2009
6852d7b6
DH
2010void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu)
2011{
8ad35755
DH
2012 int i, online_vcpus, started_vcpus = 0;
2013
2014 if (!is_vcpu_stopped(vcpu))
2015 return;
2016
6852d7b6 2017 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 1);
8ad35755 2018 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2019 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2020 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2021
2022 for (i = 0; i < online_vcpus; i++) {
2023 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i]))
2024 started_vcpus++;
2025 }
2026
2027 if (started_vcpus == 0) {
2028 /* we're the only active VCPU -> speed it up */
2029 __enable_ibs_on_vcpu(vcpu);
2030 } else if (started_vcpus == 1) {
2031 /*
2032 * As we are starting a second VCPU, we have to disable
2033 * the IBS facility on all VCPUs to remove potentially
2034 * oustanding ENABLE requests.
2035 */
2036 __disable_ibs_on_all_vcpus(vcpu->kvm);
2037 }
2038
6852d7b6 2039 atomic_clear_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2040 /*
2041 * Another VCPU might have used IBS while we were offline.
2042 * Let's play safe and flush the VCPU at startup.
2043 */
d3d692c8 2044 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
433b9ee4 2045 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2046 return;
6852d7b6
DH
2047}
2048
2049void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu)
2050{
8ad35755
DH
2051 int i, online_vcpus, started_vcpus = 0;
2052 struct kvm_vcpu *started_vcpu = NULL;
2053
2054 if (is_vcpu_stopped(vcpu))
2055 return;
2056
6852d7b6 2057 trace_kvm_s390_vcpu_start_stop(vcpu->vcpu_id, 0);
8ad35755 2058 /* Only one cpu at a time may enter/leave the STOPPED state. */
433b9ee4 2059 spin_lock(&vcpu->kvm->arch.start_stop_lock);
8ad35755
DH
2060 online_vcpus = atomic_read(&vcpu->kvm->online_vcpus);
2061
32f5ff63 2062 /* SIGP STOP and SIGP STOP AND STORE STATUS has been fully processed */
6cddd432 2063 kvm_s390_clear_stop_irq(vcpu);
32f5ff63 2064
6cddd432 2065 atomic_set_mask(CPUSTAT_STOPPED, &vcpu->arch.sie_block->cpuflags);
8ad35755
DH
2066 __disable_ibs_on_vcpu(vcpu);
2067
2068 for (i = 0; i < online_vcpus; i++) {
2069 if (!is_vcpu_stopped(vcpu->kvm->vcpus[i])) {
2070 started_vcpus++;
2071 started_vcpu = vcpu->kvm->vcpus[i];
2072 }
2073 }
2074
2075 if (started_vcpus == 1) {
2076 /*
2077 * As we only have one VCPU left, we want to enable the
2078 * IBS facility for that VCPU to speed it up.
2079 */
2080 __enable_ibs_on_vcpu(started_vcpu);
2081 }
2082
433b9ee4 2083 spin_unlock(&vcpu->kvm->arch.start_stop_lock);
8ad35755 2084 return;
6852d7b6
DH
2085}
2086
d6712df9
CH
2087static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
2088 struct kvm_enable_cap *cap)
2089{
2090 int r;
2091
2092 if (cap->flags)
2093 return -EINVAL;
2094
2095 switch (cap->cap) {
fa6b7fe9
CH
2096 case KVM_CAP_S390_CSS_SUPPORT:
2097 if (!vcpu->kvm->arch.css_support) {
2098 vcpu->kvm->arch.css_support = 1;
2099 trace_kvm_s390_enable_css(vcpu->kvm);
2100 }
2101 r = 0;
2102 break;
d6712df9
CH
2103 default:
2104 r = -EINVAL;
2105 break;
2106 }
2107 return r;
2108}
2109
b0c632db
HC
2110long kvm_arch_vcpu_ioctl(struct file *filp,
2111 unsigned int ioctl, unsigned long arg)
2112{
2113 struct kvm_vcpu *vcpu = filp->private_data;
2114 void __user *argp = (void __user *)arg;
800c1065 2115 int idx;
bc923cc9 2116 long r;
b0c632db 2117
93736624
AK
2118 switch (ioctl) {
2119 case KVM_S390_INTERRUPT: {
ba5c1e9b 2120 struct kvm_s390_interrupt s390int;
383d0b05 2121 struct kvm_s390_irq s390irq;
ba5c1e9b 2122
93736624 2123 r = -EFAULT;
ba5c1e9b 2124 if (copy_from_user(&s390int, argp, sizeof(s390int)))
93736624 2125 break;
383d0b05
JF
2126 if (s390int_to_s390irq(&s390int, &s390irq))
2127 return -EINVAL;
2128 r = kvm_s390_inject_vcpu(vcpu, &s390irq);
93736624 2129 break;
ba5c1e9b 2130 }
b0c632db 2131 case KVM_S390_STORE_STATUS:
800c1065 2132 idx = srcu_read_lock(&vcpu->kvm->srcu);
bc923cc9 2133 r = kvm_s390_vcpu_store_status(vcpu, arg);
800c1065 2134 srcu_read_unlock(&vcpu->kvm->srcu, idx);
bc923cc9 2135 break;
b0c632db
HC
2136 case KVM_S390_SET_INITIAL_PSW: {
2137 psw_t psw;
2138
bc923cc9 2139 r = -EFAULT;
b0c632db 2140 if (copy_from_user(&psw, argp, sizeof(psw)))
bc923cc9
AK
2141 break;
2142 r = kvm_arch_vcpu_ioctl_set_initial_psw(vcpu, psw);
2143 break;
b0c632db
HC
2144 }
2145 case KVM_S390_INITIAL_RESET:
bc923cc9
AK
2146 r = kvm_arch_vcpu_ioctl_initial_reset(vcpu);
2147 break;
14eebd91
CO
2148 case KVM_SET_ONE_REG:
2149 case KVM_GET_ONE_REG: {
2150 struct kvm_one_reg reg;
2151 r = -EFAULT;
2152 if (copy_from_user(&reg, argp, sizeof(reg)))
2153 break;
2154 if (ioctl == KVM_SET_ONE_REG)
2155 r = kvm_arch_vcpu_ioctl_set_one_reg(vcpu, &reg);
2156 else
2157 r = kvm_arch_vcpu_ioctl_get_one_reg(vcpu, &reg);
2158 break;
2159 }
27e0393f
CO
2160#ifdef CONFIG_KVM_S390_UCONTROL
2161 case KVM_S390_UCAS_MAP: {
2162 struct kvm_s390_ucas_mapping ucasmap;
2163
2164 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2165 r = -EFAULT;
2166 break;
2167 }
2168
2169 if (!kvm_is_ucontrol(vcpu->kvm)) {
2170 r = -EINVAL;
2171 break;
2172 }
2173
2174 r = gmap_map_segment(vcpu->arch.gmap, ucasmap.user_addr,
2175 ucasmap.vcpu_addr, ucasmap.length);
2176 break;
2177 }
2178 case KVM_S390_UCAS_UNMAP: {
2179 struct kvm_s390_ucas_mapping ucasmap;
2180
2181 if (copy_from_user(&ucasmap, argp, sizeof(ucasmap))) {
2182 r = -EFAULT;
2183 break;
2184 }
2185
2186 if (!kvm_is_ucontrol(vcpu->kvm)) {
2187 r = -EINVAL;
2188 break;
2189 }
2190
2191 r = gmap_unmap_segment(vcpu->arch.gmap, ucasmap.vcpu_addr,
2192 ucasmap.length);
2193 break;
2194 }
2195#endif
ccc7910f 2196 case KVM_S390_VCPU_FAULT: {
527e30b4 2197 r = gmap_fault(vcpu->arch.gmap, arg, 0);
ccc7910f
CO
2198 break;
2199 }
d6712df9
CH
2200 case KVM_ENABLE_CAP:
2201 {
2202 struct kvm_enable_cap cap;
2203 r = -EFAULT;
2204 if (copy_from_user(&cap, argp, sizeof(cap)))
2205 break;
2206 r = kvm_vcpu_ioctl_enable_cap(vcpu, &cap);
2207 break;
2208 }
b0c632db 2209 default:
3e6afcf1 2210 r = -ENOTTY;
b0c632db 2211 }
bc923cc9 2212 return r;
b0c632db
HC
2213}
2214
5b1c1493
CO
2215int kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
2216{
2217#ifdef CONFIG_KVM_S390_UCONTROL
2218 if ((vmf->pgoff == KVM_S390_SIE_PAGE_OFFSET)
2219 && (kvm_is_ucontrol(vcpu->kvm))) {
2220 vmf->page = virt_to_page(vcpu->arch.sie_block);
2221 get_page(vmf->page);
2222 return 0;
2223 }
2224#endif
2225 return VM_FAULT_SIGBUS;
2226}
2227
5587027c
AK
2228int kvm_arch_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
2229 unsigned long npages)
db3fe4eb
TY
2230{
2231 return 0;
2232}
2233
b0c632db 2234/* Section: memory related */
f7784b8e
MT
2235int kvm_arch_prepare_memory_region(struct kvm *kvm,
2236 struct kvm_memory_slot *memslot,
7b6195a9
TY
2237 struct kvm_userspace_memory_region *mem,
2238 enum kvm_mr_change change)
b0c632db 2239{
dd2887e7
NW
2240 /* A few sanity checks. We can have memory slots which have to be
2241 located/ended at a segment boundary (1MB). The memory in userland is
2242 ok to be fragmented into various different vmas. It is okay to mmap()
2243 and munmap() stuff in this slot after doing this call at any time */
b0c632db 2244
598841ca 2245 if (mem->userspace_addr & 0xffffful)
b0c632db
HC
2246 return -EINVAL;
2247
598841ca 2248 if (mem->memory_size & 0xffffful)
b0c632db
HC
2249 return -EINVAL;
2250
f7784b8e
MT
2251 return 0;
2252}
2253
2254void kvm_arch_commit_memory_region(struct kvm *kvm,
2255 struct kvm_userspace_memory_region *mem,
8482644a
TY
2256 const struct kvm_memory_slot *old,
2257 enum kvm_mr_change change)
f7784b8e 2258{
f7850c92 2259 int rc;
f7784b8e 2260
2cef4deb
CB
2261 /* If the basics of the memslot do not change, we do not want
2262 * to update the gmap. Every update causes several unnecessary
2263 * segment translation exceptions. This is usually handled just
2264 * fine by the normal fault handler + gmap, but it will also
2265 * cause faults on the prefix page of running guest CPUs.
2266 */
2267 if (old->userspace_addr == mem->userspace_addr &&
2268 old->base_gfn * PAGE_SIZE == mem->guest_phys_addr &&
2269 old->npages * PAGE_SIZE == mem->memory_size)
2270 return;
598841ca
CO
2271
2272 rc = gmap_map_segment(kvm->arch.gmap, mem->userspace_addr,
2273 mem->guest_phys_addr, mem->memory_size);
2274 if (rc)
f7850c92 2275 printk(KERN_WARNING "kvm-s390: failed to commit memory region\n");
598841ca 2276 return;
b0c632db
HC
2277}
2278
b0c632db
HC
2279static int __init kvm_s390_init(void)
2280{
9d8d5786 2281 return kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
b0c632db
HC
2282}
2283
2284static void __exit kvm_s390_exit(void)
2285{
2286 kvm_exit();
2287}
2288
2289module_init(kvm_s390_init);
2290module_exit(kvm_s390_exit);
566af940
CH
2291
2292/*
2293 * Enable autoloading of the kvm module.
2294 * Note that we add the module alias here instead of virt/kvm/kvm_main.c
2295 * since x86 takes a different approach.
2296 */
2297#include <linux/miscdevice.h>
2298MODULE_ALIAS_MISCDEV(KVM_MINOR);
2299MODULE_ALIAS("devname:kvm");