]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/priv.c
KVM: s390/sclp: correctly set eca siif bit
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / priv.c
CommitLineData
453423dc 1/*
a53c8fab 2 * handling privileged instructions
453423dc 3 *
69d0d3a3 4 * Copyright IBM Corp. 2008, 2013
453423dc
CB
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14#include <linux/kvm.h>
5a0e3ad6 15#include <linux/gfp.h>
453423dc 16#include <linux/errno.h>
b13b5dc7 17#include <linux/compat.h>
7c959e82 18#include <asm/asm-offsets.h>
e769ece3 19#include <asm/facility.h>
453423dc
CB
20#include <asm/current.h>
21#include <asm/debug.h>
22#include <asm/ebcdic.h>
23#include <asm/sysinfo.h>
69d0d3a3
CB
24#include <asm/pgtable.h>
25#include <asm/pgalloc.h>
26#include <asm/io.h>
48a3e950
CH
27#include <asm/ptrace.h>
28#include <asm/compat.h>
453423dc
CB
29#include "gaccess.h"
30#include "kvm-s390.h"
5786fffa 31#include "trace.h"
453423dc 32
6a3f95a6
TH
33/* Handle SCK (SET CLOCK) interception */
34static int handle_set_clock(struct kvm_vcpu *vcpu)
35{
36 struct kvm_vcpu *cpup;
37 s64 hostclk, val;
38 u64 op2;
39 int i;
40
41 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
42 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
43
44 op2 = kvm_s390_get_base_disp_s(vcpu);
45 if (op2 & 7) /* Operand must be on a doubleword boundary */
46 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
47 if (get_guest(vcpu, val, (u64 __user *) op2))
48 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
49
50 if (store_tod_clock(&hostclk)) {
51 kvm_s390_set_psw_cc(vcpu, 3);
52 return 0;
53 }
54 val = (val - hostclk) & ~0x3fUL;
55
56 mutex_lock(&vcpu->kvm->lock);
57 kvm_for_each_vcpu(i, cpup, vcpu->kvm)
58 cpup->arch.sie_block->epoch = val;
59 mutex_unlock(&vcpu->kvm->lock);
60
61 kvm_s390_set_psw_cc(vcpu, 0);
62 return 0;
63}
64
453423dc
CB
65static int handle_set_prefix(struct kvm_vcpu *vcpu)
66{
453423dc
CB
67 u64 operand2;
68 u32 address = 0;
69 u8 tmp;
70
71 vcpu->stat.instruction_spx++;
72
5087dfa6
TH
73 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
74 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
75
b1c571a5 76 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
77
78 /* must be word boundary */
db4a29cb
HC
79 if (operand2 & 3)
80 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc
CB
81
82 /* get the value */
db4a29cb
HC
83 if (get_guest(vcpu, address, (u32 __user *) operand2))
84 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc
CB
85
86 address = address & 0x7fffe000u;
87
88 /* make sure that the new value is valid memory */
89 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
db4a29cb
HC
90 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)))
91 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc 92
8d26cf7b 93 kvm_s390_set_prefix(vcpu, address);
453423dc
CB
94
95 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
5786fffa 96 trace_kvm_s390_handle_prefix(vcpu, 1, address);
453423dc
CB
97 return 0;
98}
99
100static int handle_store_prefix(struct kvm_vcpu *vcpu)
101{
453423dc
CB
102 u64 operand2;
103 u32 address;
104
105 vcpu->stat.instruction_stpx++;
b1c571a5 106
5087dfa6
TH
107 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
108 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
109
b1c571a5 110 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc
CB
111
112 /* must be word boundary */
db4a29cb
HC
113 if (operand2 & 3)
114 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc
CB
115
116 address = vcpu->arch.sie_block->prefix;
117 address = address & 0x7fffe000u;
118
119 /* get the value */
db4a29cb
HC
120 if (put_guest(vcpu, address, (u32 __user *)operand2))
121 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc
CB
122
123 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
5786fffa 124 trace_kvm_s390_handle_prefix(vcpu, 0, address);
453423dc
CB
125 return 0;
126}
127
128static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
129{
453423dc 130 u64 useraddr;
453423dc
CB
131
132 vcpu->stat.instruction_stap++;
b1c571a5 133
5087dfa6
TH
134 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
135 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
136
b1c571a5 137 useraddr = kvm_s390_get_base_disp_s(vcpu);
453423dc 138
db4a29cb
HC
139 if (useraddr & 1)
140 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc 141
db4a29cb
HC
142 if (put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr))
143 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc 144
33e19115 145 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
5786fffa 146 trace_kvm_s390_handle_stap(vcpu, useraddr);
453423dc
CB
147 return 0;
148}
149
693ffc08
DD
150static void __skey_check_enable(struct kvm_vcpu *vcpu)
151{
152 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
153 return;
154
155 s390_enable_skey();
156 trace_kvm_s390_skey_related_inst(vcpu);
157 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
158}
159
160
453423dc
CB
161static int handle_skey(struct kvm_vcpu *vcpu)
162{
693ffc08
DD
163 __skey_check_enable(vcpu);
164
453423dc 165 vcpu->stat.instruction_storage_key++;
5087dfa6
TH
166
167 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
168 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
169
dfcf7dc6
MS
170 vcpu->arch.sie_block->gpsw.addr =
171 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
453423dc
CB
172 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
173 return 0;
174}
175
aca84241
TH
176static int handle_test_block(struct kvm_vcpu *vcpu)
177{
178 unsigned long hva;
179 gpa_t addr;
180 int reg2;
181
182 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
183 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
184
185 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
186 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
187 addr = kvm_s390_real_to_abs(vcpu, addr);
188
189 hva = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
190 if (kvm_is_error_hva(hva))
191 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
192 /*
193 * We don't expect errors on modern systems, and do not care
194 * about storage keys (yet), so let's just clear the page.
195 */
196 if (clear_user((void __user *)hva, PAGE_SIZE) != 0)
197 return -EFAULT;
198 kvm_s390_set_psw_cc(vcpu, 0);
199 vcpu->run->s.regs.gprs[0] = 0;
200 return 0;
201}
202
fa6b7fe9 203static int handle_tpi(struct kvm_vcpu *vcpu)
453423dc 204{
fa6b7fe9 205 struct kvm_s390_interrupt_info *inti;
7c959e82 206 u64 addr;
fa6b7fe9
CH
207 int cc;
208
209 addr = kvm_s390_get_base_disp_s(vcpu);
db4a29cb
HC
210 if (addr & 3)
211 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
7c959e82 212 cc = 0;
f092669e 213 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
7c959e82
HC
214 if (!inti)
215 goto no_interrupt;
216 cc = 1;
217 if (addr) {
218 /*
219 * Store the two-word I/O interruption code into the
220 * provided area.
221 */
133608f3
TH
222 if (put_guest(vcpu, inti->io.subchannel_id, (u16 __user *)addr)
223 || put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *)(addr + 2))
224 || put_guest(vcpu, inti->io.io_int_parm, (u32 __user *)(addr + 4)))
225 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
7c959e82
HC
226 } else {
227 /*
228 * Store the three-word I/O interruption code into
229 * the appropriate lowcore area.
230 */
0a75ca27
HC
231 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
232 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
233 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
234 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
7c959e82 235 }
fa6b7fe9 236 kfree(inti);
7c959e82 237no_interrupt:
fa6b7fe9 238 /* Set condition code and we're done. */
ea828ebf 239 kvm_s390_set_psw_cc(vcpu, cc);
453423dc
CB
240 return 0;
241}
242
fa6b7fe9
CH
243static int handle_tsch(struct kvm_vcpu *vcpu)
244{
245 struct kvm_s390_interrupt_info *inti;
246
247 inti = kvm_s390_get_io_int(vcpu->kvm, 0,
248 vcpu->run->s.regs.gprs[1]);
249
250 /*
251 * Prepare exit to userspace.
252 * We indicate whether we dequeued a pending I/O interrupt
253 * so that userspace can re-inject it if the instruction gets
254 * a program check. While this may re-order the pending I/O
255 * interrupts, this is no problem since the priority is kept
256 * intact.
257 */
258 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
259 vcpu->run->s390_tsch.dequeued = !!inti;
260 if (inti) {
261 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
262 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
263 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
264 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
265 }
266 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
267 kfree(inti);
268 return -EREMOTE;
269}
270
271static int handle_io_inst(struct kvm_vcpu *vcpu)
272{
273 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
274
5087dfa6
TH
275 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
276 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
277
fa6b7fe9
CH
278 if (vcpu->kvm->arch.css_support) {
279 /*
280 * Most I/O instructions will be handled by userspace.
281 * Exceptions are tpi and the interrupt portion of tsch.
282 */
283 if (vcpu->arch.sie_block->ipa == 0xb236)
284 return handle_tpi(vcpu);
285 if (vcpu->arch.sie_block->ipa == 0xb235)
286 return handle_tsch(vcpu);
287 /* Handle in userspace. */
288 return -EOPNOTSUPP;
289 } else {
290 /*
b4a96015 291 * Set condition code 3 to stop the guest from issuing channel
fa6b7fe9
CH
292 * I/O instructions.
293 */
ea828ebf 294 kvm_s390_set_psw_cc(vcpu, 3);
fa6b7fe9
CH
295 return 0;
296 }
297}
298
453423dc
CB
299static int handle_stfl(struct kvm_vcpu *vcpu)
300{
453423dc
CB
301 int rc;
302
303 vcpu->stat.instruction_stfl++;
5087dfa6
TH
304
305 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
306 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
307
453423dc 308 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
78c4b59f 309 vfacilities, 4);
dc5008b9 310 if (rc)
db4a29cb 311 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
78c4b59f
MM
312 VCPU_EVENT(vcpu, 5, "store facility list value %x",
313 *(unsigned int *) vfacilities);
314 trace_kvm_s390_handle_stfl(vcpu, *(unsigned int *) vfacilities);
453423dc
CB
315 return 0;
316}
317
48a3e950
CH
318static void handle_new_psw(struct kvm_vcpu *vcpu)
319{
320 /* Check whether the new psw is enabled for machine checks. */
321 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
322 kvm_s390_deliver_pending_machine_checks(vcpu);
323}
324
325#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
326#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
d21683ea 327#define PSW_ADDR_24 0x0000000000ffffffUL
48a3e950
CH
328#define PSW_ADDR_31 0x000000007fffffffUL
329
3736b874
HC
330static int is_valid_psw(psw_t *psw) {
331 if (psw->mask & PSW_MASK_UNASSIGNED)
332 return 0;
333 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
334 if (psw->addr & ~PSW_ADDR_31)
335 return 0;
336 }
337 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
338 return 0;
339 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
340 return 0;
341 return 1;
342}
343
48a3e950
CH
344int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
345{
3736b874 346 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
48a3e950 347 psw_compat_t new_psw;
3736b874 348 u64 addr;
48a3e950 349
3736b874 350 if (gpsw->mask & PSW_MASK_PSTATE)
208dd756
TH
351 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
352
48a3e950 353 addr = kvm_s390_get_base_disp_s(vcpu);
6fd0fcc9
HC
354 if (addr & 7)
355 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
6fd0fcc9
HC
356 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
357 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
6fd0fcc9
HC
358 if (!(new_psw.mask & PSW32_MASK_BASE))
359 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
3736b874
HC
360 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
361 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
362 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
363 if (!is_valid_psw(gpsw))
6fd0fcc9 364 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
48a3e950 365 handle_new_psw(vcpu);
48a3e950
CH
366 return 0;
367}
368
369static int handle_lpswe(struct kvm_vcpu *vcpu)
370{
48a3e950 371 psw_t new_psw;
3736b874 372 u64 addr;
48a3e950 373
5087dfa6
TH
374 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
375 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
376
48a3e950 377 addr = kvm_s390_get_base_disp_s(vcpu);
6fd0fcc9
HC
378 if (addr & 7)
379 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
6fd0fcc9
HC
380 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw)))
381 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
3736b874
HC
382 vcpu->arch.sie_block->gpsw = new_psw;
383 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
6fd0fcc9 384 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
48a3e950 385 handle_new_psw(vcpu);
48a3e950
CH
386 return 0;
387}
388
453423dc
CB
389static int handle_stidp(struct kvm_vcpu *vcpu)
390{
453423dc 391 u64 operand2;
453423dc
CB
392
393 vcpu->stat.instruction_stidp++;
b1c571a5 394
5087dfa6
TH
395 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
396 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
397
b1c571a5 398 operand2 = kvm_s390_get_base_disp_s(vcpu);
453423dc 399
db4a29cb
HC
400 if (operand2 & 7)
401 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc 402
db4a29cb
HC
403 if (put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2))
404 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc
CB
405
406 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
453423dc
CB
407 return 0;
408}
409
410static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
411{
453423dc
CB
412 int cpus = 0;
413 int n;
414
ff520a63 415 cpus = atomic_read(&vcpu->kvm->online_vcpus);
453423dc
CB
416
417 /* deal with other level 3 hypervisors */
caf757c6 418 if (stsi(mem, 3, 2, 2))
453423dc
CB
419 mem->count = 0;
420 if (mem->count < 8)
421 mem->count++;
422 for (n = mem->count - 1; n > 0 ; n--)
423 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
424
425 mem->vm[0].cpus_total = cpus;
426 mem->vm[0].cpus_configured = cpus;
427 mem->vm[0].cpus_standby = 0;
428 mem->vm[0].cpus_reserved = 0;
429 mem->vm[0].caf = 1000;
430 memcpy(mem->vm[0].name, "KVMguest", 8);
431 ASCEBC(mem->vm[0].name, 8);
432 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
433 ASCEBC(mem->vm[0].cpi, 16);
434}
435
436static int handle_stsi(struct kvm_vcpu *vcpu)
437{
5a32c1af
CB
438 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
439 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
440 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
c51f068c 441 unsigned long mem = 0;
453423dc 442 u64 operand2;
db4a29cb 443 int rc = 0;
453423dc
CB
444
445 vcpu->stat.instruction_stsi++;
446 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
447
5087dfa6
TH
448 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
449 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
450
87d41fb4 451 if (fc > 3) {
ea828ebf 452 kvm_s390_set_psw_cc(vcpu, 3);
87d41fb4
TH
453 return 0;
454 }
453423dc 455
87d41fb4
TH
456 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
457 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
453423dc
CB
458 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
459
87d41fb4 460 if (fc == 0) {
5a32c1af 461 vcpu->run->s.regs.gprs[0] = 3 << 28;
ea828ebf 462 kvm_s390_set_psw_cc(vcpu, 0);
453423dc 463 return 0;
87d41fb4
TH
464 }
465
466 operand2 = kvm_s390_get_base_disp_s(vcpu);
467
468 if (operand2 & 0xfff)
469 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
470
471 switch (fc) {
453423dc
CB
472 case 1: /* same handling for 1 and 2 */
473 case 2:
474 mem = get_zeroed_page(GFP_KERNEL);
475 if (!mem)
c51f068c 476 goto out_no_data;
caf757c6 477 if (stsi((void *) mem, fc, sel1, sel2))
c51f068c 478 goto out_no_data;
453423dc
CB
479 break;
480 case 3:
481 if (sel1 != 2 || sel2 != 2)
c51f068c 482 goto out_no_data;
453423dc
CB
483 mem = get_zeroed_page(GFP_KERNEL);
484 if (!mem)
c51f068c 485 goto out_no_data;
453423dc
CB
486 handle_stsi_3_2_2(vcpu, (void *) mem);
487 break;
453423dc
CB
488 }
489
490 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
db4a29cb 491 rc = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
c51f068c 492 goto out_exception;
453423dc 493 }
5786fffa 494 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
453423dc 495 free_page(mem);
ea828ebf 496 kvm_s390_set_psw_cc(vcpu, 0);
5a32c1af 497 vcpu->run->s.regs.gprs[0] = 0;
453423dc 498 return 0;
c51f068c 499out_no_data:
ea828ebf 500 kvm_s390_set_psw_cc(vcpu, 3);
c51f068c
HC
501out_exception:
502 free_page(mem);
db4a29cb 503 return rc;
453423dc
CB
504}
505
f379aae5 506static const intercept_handler_t b2_handlers[256] = {
453423dc 507 [0x02] = handle_stidp,
6a3f95a6 508 [0x04] = handle_set_clock,
453423dc
CB
509 [0x10] = handle_set_prefix,
510 [0x11] = handle_store_prefix,
511 [0x12] = handle_store_cpu_address,
512 [0x29] = handle_skey,
513 [0x2a] = handle_skey,
514 [0x2b] = handle_skey,
aca84241 515 [0x2c] = handle_test_block,
f379aae5
CH
516 [0x30] = handle_io_inst,
517 [0x31] = handle_io_inst,
518 [0x32] = handle_io_inst,
519 [0x33] = handle_io_inst,
520 [0x34] = handle_io_inst,
521 [0x35] = handle_io_inst,
522 [0x36] = handle_io_inst,
523 [0x37] = handle_io_inst,
524 [0x38] = handle_io_inst,
525 [0x39] = handle_io_inst,
526 [0x3a] = handle_io_inst,
527 [0x3b] = handle_io_inst,
528 [0x3c] = handle_io_inst,
529 [0x5f] = handle_io_inst,
530 [0x74] = handle_io_inst,
531 [0x76] = handle_io_inst,
453423dc
CB
532 [0x7d] = handle_stsi,
533 [0xb1] = handle_stfl,
48a3e950 534 [0xb2] = handle_lpswe,
453423dc
CB
535};
536
70455a36 537int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
453423dc
CB
538{
539 intercept_handler_t handler;
540
70455a36 541 /*
5087dfa6
TH
542 * A lot of B2 instructions are priviledged. Here we check for
543 * the privileged ones, that we can handle in the kernel.
544 * Anything else goes to userspace.
545 */
f379aae5 546 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
5087dfa6
TH
547 if (handler)
548 return handler(vcpu);
549
b8e660b8 550 return -EOPNOTSUPP;
453423dc 551}
bb25b9ba 552
48a3e950
CH
553static int handle_epsw(struct kvm_vcpu *vcpu)
554{
555 int reg1, reg2;
556
aeb87c3c 557 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
48a3e950
CH
558
559 /* This basically extracts the mask half of the psw. */
843200e7 560 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
48a3e950
CH
561 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
562 if (reg2) {
843200e7 563 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
48a3e950 564 vcpu->run->s.regs.gprs[reg2] |=
843200e7 565 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
48a3e950
CH
566 }
567 return 0;
568}
569
69d0d3a3
CB
570#define PFMF_RESERVED 0xfffc0101UL
571#define PFMF_SK 0x00020000UL
572#define PFMF_CF 0x00010000UL
573#define PFMF_UI 0x00008000UL
574#define PFMF_FSC 0x00007000UL
575#define PFMF_NQ 0x00000800UL
576#define PFMF_MR 0x00000400UL
577#define PFMF_MC 0x00000200UL
578#define PFMF_KEY 0x000000feUL
579
580static int handle_pfmf(struct kvm_vcpu *vcpu)
581{
582 int reg1, reg2;
583 unsigned long start, end;
584
585 vcpu->stat.instruction_pfmf++;
586
587 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
588
589 if (!MACHINE_HAS_PFMF)
590 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
591
592 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
208dd756 593 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
69d0d3a3
CB
594
595 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
596 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
597
598 /* Only provide non-quiescing support if the host supports it */
e769ece3 599 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ && !test_facility(14))
69d0d3a3
CB
600 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
601
602 /* No support for conditional-SSKE */
603 if (vcpu->run->s.regs.gprs[reg1] & (PFMF_MR | PFMF_MC))
604 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
605
606 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
607 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
608 case 0x00000000:
609 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
610 break;
611 case 0x00001000:
612 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
613 break;
614 /* We dont support EDAT2
615 case 0x00002000:
616 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
617 break;*/
618 default:
619 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
620 }
621 while (start < end) {
622 unsigned long useraddr;
623
624 useraddr = gmap_translate(start, vcpu->arch.gmap);
625 if (IS_ERR((void *)useraddr))
626 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
627
628 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
629 if (clear_user((void __user *)useraddr, PAGE_SIZE))
630 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
631 }
632
633 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
693ffc08 634 __skey_check_enable(vcpu);
69d0d3a3
CB
635 if (set_guest_storage_key(current->mm, useraddr,
636 vcpu->run->s.regs.gprs[reg1] & PFMF_KEY,
637 vcpu->run->s.regs.gprs[reg1] & PFMF_NQ))
638 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
639 }
640
641 start += PAGE_SIZE;
642 }
643 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC)
644 vcpu->run->s.regs.gprs[reg2] = end;
645 return 0;
646}
647
b31288fa
KW
648static int handle_essa(struct kvm_vcpu *vcpu)
649{
650 /* entries expected to be 1FF */
651 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
652 unsigned long *cbrlo, cbrle;
653 struct gmap *gmap;
654 int i;
655
656 VCPU_EVENT(vcpu, 5, "cmma release %d pages", entries);
657 gmap = vcpu->arch.gmap;
658 vcpu->stat.instruction_essa++;
b31605c1 659 if (!kvm_s390_cmma_enabled(vcpu->kvm))
b31288fa
KW
660 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
661
662 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
663 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
664
665 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
666 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
667
668 /* Rewind PSW to repeat the ESSA instruction */
669 vcpu->arch.sie_block->gpsw.addr =
670 __rewind_psw(vcpu->arch.sie_block->gpsw, 4);
671 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
672 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
673 down_read(&gmap->mm->mmap_sem);
674 for (i = 0; i < entries; ++i) {
675 cbrle = cbrlo[i];
676 if (unlikely(cbrle & ~PAGE_MASK || cbrle < 2 * PAGE_SIZE))
677 /* invalid entry */
678 break;
679 /* try to free backing */
680 __gmap_zap(cbrle, gmap);
681 }
682 up_read(&gmap->mm->mmap_sem);
683 if (i < entries)
684 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
685 return 0;
686}
687
48a3e950
CH
688static const intercept_handler_t b9_handlers[256] = {
689 [0x8d] = handle_epsw,
b31288fa 690 [0xab] = handle_essa,
69d0d3a3 691 [0xaf] = handle_pfmf,
48a3e950
CH
692};
693
694int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
695{
696 intercept_handler_t handler;
697
698 /* This is handled just as for the B2 instructions. */
699 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
5087dfa6
TH
700 if (handler)
701 return handler(vcpu);
702
48a3e950
CH
703 return -EOPNOTSUPP;
704}
705
953ed88d
TH
706int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
707{
708 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
709 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
710 u64 useraddr;
711 u32 val = 0;
712 int reg, rc;
713
714 vcpu->stat.instruction_lctl++;
715
716 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
717 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
718
719 useraddr = kvm_s390_get_base_disp_rs(vcpu);
720
721 if (useraddr & 3)
722 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
723
724 VCPU_EVENT(vcpu, 5, "lctl r1:%x, r3:%x, addr:%llx", reg1, reg3,
725 useraddr);
726 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, useraddr);
727
728 reg = reg1;
729 do {
730 rc = get_guest(vcpu, val, (u32 __user *) useraddr);
731 if (rc)
732 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
733 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
734 vcpu->arch.sie_block->gcr[reg] |= val;
735 useraddr += 4;
736 if (reg == reg3)
737 break;
738 reg = (reg + 1) % 16;
739 } while (1);
740
741 return 0;
742}
743
744static int handle_lctlg(struct kvm_vcpu *vcpu)
745{
746 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
747 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
748 u64 useraddr;
749 int reg, rc;
750
751 vcpu->stat.instruction_lctlg++;
752
753 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
754 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
755
756 useraddr = kvm_s390_get_base_disp_rsy(vcpu);
757
758 if (useraddr & 7)
759 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
760
761 reg = reg1;
762
763 VCPU_EVENT(vcpu, 5, "lctlg r1:%x, r3:%x, addr:%llx", reg1, reg3,
764 useraddr);
765 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, useraddr);
766
767 do {
768 rc = get_guest(vcpu, vcpu->arch.sie_block->gcr[reg],
769 (u64 __user *) useraddr);
770 if (rc)
771 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
772 useraddr += 8;
773 if (reg == reg3)
774 break;
775 reg = (reg + 1) % 16;
776 } while (1);
777
778 return 0;
779}
780
f379aae5 781static const intercept_handler_t eb_handlers[256] = {
953ed88d 782 [0x2f] = handle_lctlg,
f379aae5
CH
783};
784
953ed88d 785int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
f379aae5
CH
786{
787 intercept_handler_t handler;
788
f379aae5
CH
789 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
790 if (handler)
791 return handler(vcpu);
792 return -EOPNOTSUPP;
793}
794
bb25b9ba
CB
795static int handle_tprot(struct kvm_vcpu *vcpu)
796{
b1c571a5 797 u64 address1, address2;
bb25b9ba 798 struct vm_area_struct *vma;
1eddb85f 799 unsigned long user_address;
bb25b9ba
CB
800
801 vcpu->stat.instruction_tprot++;
802
f9f6bbc6
TH
803 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
804 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
805
b1c571a5
CH
806 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
807
bb25b9ba
CB
808 /* we only handle the Linux memory detection case:
809 * access key == 0
810 * guest DAT == off
811 * everything else goes to userspace. */
812 if (address2 & 0xf0)
813 return -EOPNOTSUPP;
814 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
815 return -EOPNOTSUPP;
816
bb25b9ba 817 down_read(&current->mm->mmap_sem);
59a1fa2d
HC
818 user_address = __gmap_translate(address1, vcpu->arch.gmap);
819 if (IS_ERR_VALUE(user_address))
820 goto out_inject;
1eddb85f 821 vma = find_vma(current->mm, user_address);
59a1fa2d
HC
822 if (!vma)
823 goto out_inject;
bb25b9ba
CB
824 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
825 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
826 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
827 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
828 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
829
830 up_read(&current->mm->mmap_sem);
831 return 0;
59a1fa2d
HC
832
833out_inject:
834 up_read(&current->mm->mmap_sem);
835 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
bb25b9ba
CB
836}
837
838int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
839{
840 /* For e5xx... instructions we only handle TPROT */
841 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
842 return handle_tprot(vcpu);
843 return -EOPNOTSUPP;
844}
845
8c3f61e2
CH
846static int handle_sckpf(struct kvm_vcpu *vcpu)
847{
848 u32 value;
849
850 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
208dd756 851 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
8c3f61e2
CH
852
853 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
854 return kvm_s390_inject_program_int(vcpu,
855 PGM_SPECIFICATION);
856
857 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
858 vcpu->arch.sie_block->todpr = value;
859
860 return 0;
861}
862
77975357 863static const intercept_handler_t x01_handlers[256] = {
8c3f61e2
CH
864 [0x07] = handle_sckpf,
865};
866
867int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
868{
869 intercept_handler_t handler;
870
871 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
872 if (handler)
873 return handler(vcpu);
874 return -EOPNOTSUPP;
875}