]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blame - arch/s390/kvm/priv.c
Linux 4.19-rc2
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / priv.c
CommitLineData
d809aa23 1// SPDX-License-Identifier: GPL-2.0
453423dc 2/*
a53c8fab 3 * handling privileged instructions
453423dc 4 *
a37cb07a 5 * Copyright IBM Corp. 2008, 2018
453423dc 6 *
453423dc
CB
7 * Author(s): Carsten Otte <cotte@de.ibm.com>
8 * Christian Borntraeger <borntraeger@de.ibm.com>
9 */
10
11#include <linux/kvm.h>
5a0e3ad6 12#include <linux/gfp.h>
453423dc 13#include <linux/errno.h>
b13b5dc7 14#include <linux/compat.h>
589ee628
IM
15#include <linux/mm_types.h>
16
7c959e82 17#include <asm/asm-offsets.h>
e769ece3 18#include <asm/facility.h>
453423dc
CB
19#include <asm/current.h>
20#include <asm/debug.h>
21#include <asm/ebcdic.h>
22#include <asm/sysinfo.h>
69d0d3a3 23#include <asm/pgtable.h>
190df4a2 24#include <asm/page-states.h>
69d0d3a3 25#include <asm/pgalloc.h>
1e133ab2 26#include <asm/gmap.h>
69d0d3a3 27#include <asm/io.h>
48a3e950 28#include <asm/ptrace.h>
a7e19ab5 29#include <asm/sclp.h>
453423dc
CB
30#include "gaccess.h"
31#include "kvm-s390.h"
5786fffa 32#include "trace.h"
453423dc 33
80cd8763
FZ
34static int handle_ri(struct kvm_vcpu *vcpu)
35{
a37cb07a
CB
36 vcpu->stat.instruction_ri++;
37
80cd8763 38 if (test_kvm_facility(vcpu->kvm, 64)) {
4d5f2c04 39 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
0c9d8683 40 vcpu->arch.sie_block->ecb3 |= ECB3_RI;
80cd8763
FZ
41 kvm_s390_retry_instr(vcpu);
42 return 0;
43 } else
44 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
45}
46
47int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
48{
49 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
50 return handle_ri(vcpu);
51 else
52 return -EOPNOTSUPP;
53}
54
4e0b1ab7
FZ
55static int handle_gs(struct kvm_vcpu *vcpu)
56{
a37cb07a
CB
57 vcpu->stat.instruction_gs++;
58
4e0b1ab7
FZ
59 if (test_kvm_facility(vcpu->kvm, 133)) {
60 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: GS (lazy)");
61 preempt_disable();
62 __ctl_set_bit(2, 4);
63 current->thread.gs_cb = (struct gs_cb *)&vcpu->run->s.regs.gscb;
64 restore_gs_cb(current->thread.gs_cb);
65 preempt_enable();
66 vcpu->arch.sie_block->ecb |= ECB_GS;
67 vcpu->arch.sie_block->ecd |= ECD_HOSTREGMGMT;
68 vcpu->arch.gs_enabled = 1;
69 kvm_s390_retry_instr(vcpu);
70 return 0;
71 } else
72 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
73}
74
75int kvm_s390_handle_e3(struct kvm_vcpu *vcpu)
76{
77 int code = vcpu->arch.sie_block->ipb & 0xff;
78
79 if (code == 0x49 || code == 0x4d)
80 return handle_gs(vcpu);
81 else
82 return -EOPNOTSUPP;
83}
6a3f95a6
TH
84/* Handle SCK (SET CLOCK) interception */
85static int handle_set_clock(struct kvm_vcpu *vcpu)
86{
0e7def5f 87 struct kvm_s390_vm_tod_clock gtod = { 0 };
25ed1675 88 int rc;
27f67f87 89 u8 ar;
0e7def5f 90 u64 op2;
6a3f95a6 91
a37cb07a
CB
92 vcpu->stat.instruction_sck++;
93
6a3f95a6
TH
94 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
95 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
96
8ae04b8f 97 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
6a3f95a6
TH
98 if (op2 & 7) /* Operand must be on a doubleword boundary */
99 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
0e7def5f 100 rc = read_guest(vcpu, op2, ar, &gtod.tod, sizeof(gtod.tod));
0e7a3f94
HC
101 if (rc)
102 return kvm_s390_inject_prog_cond(vcpu, rc);
6a3f95a6 103
0e7def5f
DH
104 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", gtod.tod);
105 kvm_s390_set_tod_clock(vcpu->kvm, &gtod);
6a3f95a6
TH
106
107 kvm_s390_set_psw_cc(vcpu, 0);
108 return 0;
109}
110
453423dc
CB
111static int handle_set_prefix(struct kvm_vcpu *vcpu)
112{
453423dc 113 u64 operand2;
665170cb
HC
114 u32 address;
115 int rc;
27f67f87 116 u8 ar;
453423dc
CB
117
118 vcpu->stat.instruction_spx++;
119
5087dfa6
TH
120 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
121 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
122
8ae04b8f 123 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
453423dc
CB
124
125 /* must be word boundary */
db4a29cb
HC
126 if (operand2 & 3)
127 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc
CB
128
129 /* get the value */
8ae04b8f 130 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
665170cb
HC
131 if (rc)
132 return kvm_s390_inject_prog_cond(vcpu, rc);
133
134 address &= 0x7fffe000u;
135
136 /*
137 * Make sure the new value is valid memory. We only need to check the
138 * first page, since address is 8k aligned and memory pieces are always
139 * at least 1MB aligned and have at least a size of 1MB.
140 */
141 if (kvm_is_error_gpa(vcpu->kvm, address))
db4a29cb 142 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
453423dc 143
8d26cf7b 144 kvm_s390_set_prefix(vcpu, address);
5786fffa 145 trace_kvm_s390_handle_prefix(vcpu, 1, address);
453423dc
CB
146 return 0;
147}
148
149static int handle_store_prefix(struct kvm_vcpu *vcpu)
150{
453423dc
CB
151 u64 operand2;
152 u32 address;
f748f4a7 153 int rc;
27f67f87 154 u8 ar;
453423dc
CB
155
156 vcpu->stat.instruction_stpx++;
b1c571a5 157
5087dfa6
TH
158 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
159 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
160
8ae04b8f 161 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
453423dc
CB
162
163 /* must be word boundary */
db4a29cb
HC
164 if (operand2 & 3)
165 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc 166
fda902cb 167 address = kvm_s390_get_prefix(vcpu);
453423dc
CB
168
169 /* get the value */
8ae04b8f 170 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
f748f4a7
HC
171 if (rc)
172 return kvm_s390_inject_prog_cond(vcpu, rc);
453423dc 173
7cbde76b 174 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
5786fffa 175 trace_kvm_s390_handle_prefix(vcpu, 0, address);
453423dc
CB
176 return 0;
177}
178
179static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
180{
8b96de0e
HC
181 u16 vcpu_id = vcpu->vcpu_id;
182 u64 ga;
183 int rc;
27f67f87 184 u8 ar;
453423dc
CB
185
186 vcpu->stat.instruction_stap++;
b1c571a5 187
5087dfa6
TH
188 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
189 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
190
8ae04b8f 191 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
453423dc 192
8b96de0e 193 if (ga & 1)
db4a29cb 194 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc 195
8ae04b8f 196 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
8b96de0e
HC
197 if (rc)
198 return kvm_s390_inject_prog_cond(vcpu, rc);
453423dc 199
7cbde76b 200 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
8b96de0e 201 trace_kvm_s390_handle_stap(vcpu, ga);
453423dc
CB
202 return 0;
203}
204
730cd632 205int kvm_s390_skey_check_enable(struct kvm_vcpu *vcpu)
693ffc08 206{
55531b74 207 int rc;
11ddcd41
DH
208
209 trace_kvm_s390_skey_related_inst(vcpu);
55531b74 210 /* Already enabled? */
57cb198c 211 if (vcpu->arch.skey_enabled)
55531b74 212 return 0;
693ffc08 213
3ac8e380 214 rc = s390_enable_skey();
11ddcd41 215 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
55531b74
JF
216 if (rc)
217 return rc;
218
219 if (kvm_s390_test_cpuflags(vcpu, CPUSTAT_KSS))
220 kvm_s390_clear_cpuflags(vcpu, CPUSTAT_KSS);
221 if (!vcpu->kvm->arch.use_skf)
57cb198c 222 vcpu->arch.sie_block->ictl |= ICTL_ISKE | ICTL_SSKE | ICTL_RRBE;
55531b74 223 else
57cb198c
JF
224 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
225 vcpu->arch.skey_enabled = true;
55531b74 226 return 0;
693ffc08
DD
227}
228
a7e19ab5 229static int try_handle_skey(struct kvm_vcpu *vcpu)
453423dc 230{
11ddcd41 231 int rc;
693ffc08 232
730cd632 233 rc = kvm_s390_skey_check_enable(vcpu);
3ac8e380
DD
234 if (rc)
235 return rc;
55531b74 236 if (vcpu->kvm->arch.use_skf) {
a7e19ab5
DH
237 /* with storage-key facility, SIE interprets it for us */
238 kvm_s390_retry_instr(vcpu);
239 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
240 return -EAGAIN;
241 }
a7e19ab5
DH
242 return 0;
243}
5087dfa6 244
a7e19ab5
DH
245static int handle_iske(struct kvm_vcpu *vcpu)
246{
bd096f64 247 unsigned long gaddr, vmaddr;
a7e19ab5
DH
248 unsigned char key;
249 int reg1, reg2;
bd096f64 250 bool unlocked;
a7e19ab5
DH
251 int rc;
252
a37cb07a
CB
253 vcpu->stat.instruction_iske++;
254
ca76ec9c
JF
255 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
256 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
257
a7e19ab5
DH
258 rc = try_handle_skey(vcpu);
259 if (rc)
260 return rc != -EAGAIN ? rc : 0;
261
262 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
263
bd096f64
JF
264 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
265 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
266 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
267 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
268 if (kvm_is_error_hva(vmaddr))
a7e19ab5 269 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
bd096f64
JF
270retry:
271 unlocked = false;
a7e19ab5 272 down_read(&current->mm->mmap_sem);
bd096f64
JF
273 rc = get_guest_storage_key(current->mm, vmaddr, &key);
274
275 if (rc) {
276 rc = fixup_user_fault(current, current->mm, vmaddr,
277 FAULT_FLAG_WRITE, &unlocked);
278 if (!rc) {
279 up_read(&current->mm->mmap_sem);
280 goto retry;
281 }
282 }
a7e19ab5
DH
283 if (rc)
284 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
bd096f64 285 up_read(&current->mm->mmap_sem);
a7e19ab5
DH
286 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
287 vcpu->run->s.regs.gprs[reg1] |= key;
288 return 0;
289}
290
291static int handle_rrbe(struct kvm_vcpu *vcpu)
292{
bd096f64 293 unsigned long vmaddr, gaddr;
a7e19ab5 294 int reg1, reg2;
bd096f64 295 bool unlocked;
a7e19ab5
DH
296 int rc;
297
a37cb07a
CB
298 vcpu->stat.instruction_rrbe++;
299
ca76ec9c
JF
300 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
301 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
302
a7e19ab5
DH
303 rc = try_handle_skey(vcpu);
304 if (rc)
305 return rc != -EAGAIN ? rc : 0;
306
307 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
308
bd096f64
JF
309 gaddr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
310 gaddr = kvm_s390_logical_to_effective(vcpu, gaddr);
311 gaddr = kvm_s390_real_to_abs(vcpu, gaddr);
312 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(gaddr));
313 if (kvm_is_error_hva(vmaddr))
a7e19ab5 314 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
bd096f64
JF
315retry:
316 unlocked = false;
a7e19ab5 317 down_read(&current->mm->mmap_sem);
bd096f64
JF
318 rc = reset_guest_reference_bit(current->mm, vmaddr);
319 if (rc < 0) {
320 rc = fixup_user_fault(current, current->mm, vmaddr,
321 FAULT_FLAG_WRITE, &unlocked);
322 if (!rc) {
323 up_read(&current->mm->mmap_sem);
324 goto retry;
325 }
326 }
a7e19ab5
DH
327 if (rc < 0)
328 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
bd096f64 329 up_read(&current->mm->mmap_sem);
a7e19ab5
DH
330 kvm_s390_set_psw_cc(vcpu, rc);
331 return 0;
332}
333
334#define SSKE_NQ 0x8
335#define SSKE_MR 0x4
336#define SSKE_MC 0x2
337#define SSKE_MB 0x1
338static int handle_sske(struct kvm_vcpu *vcpu)
339{
340 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
341 unsigned long start, end;
342 unsigned char key, oldkey;
343 int reg1, reg2;
bd096f64 344 bool unlocked;
a7e19ab5
DH
345 int rc;
346
a37cb07a
CB
347 vcpu->stat.instruction_sske++;
348
ca76ec9c
JF
349 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
350 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
351
a7e19ab5
DH
352 rc = try_handle_skey(vcpu);
353 if (rc)
354 return rc != -EAGAIN ? rc : 0;
355
356 if (!test_kvm_facility(vcpu->kvm, 8))
357 m3 &= ~SSKE_MB;
358 if (!test_kvm_facility(vcpu->kvm, 10))
359 m3 &= ~(SSKE_MC | SSKE_MR);
360 if (!test_kvm_facility(vcpu->kvm, 14))
361 m3 &= ~SSKE_NQ;
362
363 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
364
365 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
366 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
367 start = kvm_s390_logical_to_effective(vcpu, start);
368 if (m3 & SSKE_MB) {
369 /* start already designates an absolute address */
58cdf5eb 370 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
a7e19ab5
DH
371 } else {
372 start = kvm_s390_real_to_abs(vcpu, start);
373 end = start + PAGE_SIZE;
374 }
375
376 while (start != end) {
bd096f64
JF
377 unsigned long vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
378 unlocked = false;
a7e19ab5 379
bd096f64 380 if (kvm_is_error_hva(vmaddr))
a7e19ab5
DH
381 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
382
383 down_read(&current->mm->mmap_sem);
bd096f64 384 rc = cond_set_guest_storage_key(current->mm, vmaddr, key, &oldkey,
a7e19ab5
DH
385 m3 & SSKE_NQ, m3 & SSKE_MR,
386 m3 & SSKE_MC);
bd096f64
JF
387
388 if (rc < 0) {
389 rc = fixup_user_fault(current, current->mm, vmaddr,
390 FAULT_FLAG_WRITE, &unlocked);
391 rc = !rc ? -EAGAIN : rc;
392 }
393 if (rc == -EFAULT)
a7e19ab5 394 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
bd096f64
JF
395
396 up_read(&current->mm->mmap_sem);
397 if (rc >= 0)
398 start += PAGE_SIZE;
0b925159 399 }
a7e19ab5
DH
400
401 if (m3 & (SSKE_MC | SSKE_MR)) {
402 if (m3 & SSKE_MB) {
403 /* skey in reg1 is unpredictable */
404 kvm_s390_set_psw_cc(vcpu, 3);
405 } else {
406 kvm_s390_set_psw_cc(vcpu, rc);
407 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
408 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
409 }
410 }
411 if (m3 & SSKE_MB) {
8bb3fdd6 412 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT)
a7e19ab5
DH
413 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
414 else
415 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
416 end = kvm_s390_logical_to_effective(vcpu, end);
417 vcpu->run->s.regs.gprs[reg2] |= end;
418 }
453423dc
CB
419 return 0;
420}
421
8a242234
HC
422static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
423{
8a242234 424 vcpu->stat.instruction_ipte_interlock++;
a7525982 425 if (psw_bits(vcpu->arch.sie_block->gpsw).pstate)
8a242234
HC
426 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
427 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
0e8bc06a 428 kvm_s390_retry_instr(vcpu);
8a242234
HC
429 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
430 return 0;
431}
432
aca84241
TH
433static int handle_test_block(struct kvm_vcpu *vcpu)
434{
aca84241
TH
435 gpa_t addr;
436 int reg2;
437
a37cb07a
CB
438 vcpu->stat.instruction_tb++;
439
aca84241
TH
440 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
441 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
442
443 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
444 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
e45efa28 445 addr = kvm_s390_logical_to_effective(vcpu, addr);
dd9e5b7b 446 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
e45efa28 447 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
aca84241
TH
448 addr = kvm_s390_real_to_abs(vcpu, addr);
449
ef23e779 450 if (kvm_is_error_gpa(vcpu->kvm, addr))
aca84241
TH
451 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
452 /*
453 * We don't expect errors on modern systems, and do not care
454 * about storage keys (yet), so let's just clear the page.
455 */
ef23e779 456 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
aca84241
TH
457 return -EFAULT;
458 kvm_s390_set_psw_cc(vcpu, 0);
459 vcpu->run->s.regs.gprs[0] = 0;
460 return 0;
461}
462
fa6b7fe9 463static int handle_tpi(struct kvm_vcpu *vcpu)
453423dc 464{
fa6b7fe9 465 struct kvm_s390_interrupt_info *inti;
4799b557
HC
466 unsigned long len;
467 u32 tpi_data[3];
261520dc 468 int rc;
7c959e82 469 u64 addr;
27f67f87 470 u8 ar;
fa6b7fe9 471
a37cb07a
CB
472 vcpu->stat.instruction_tpi++;
473
8ae04b8f 474 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
db4a29cb
HC
475 if (addr & 3)
476 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
261520dc 477
f092669e 478 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
261520dc
DH
479 if (!inti) {
480 kvm_s390_set_psw_cc(vcpu, 0);
481 return 0;
482 }
483
4799b557
HC
484 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
485 tpi_data[1] = inti->io.io_int_parm;
486 tpi_data[2] = inti->io.io_int_word;
7c959e82
HC
487 if (addr) {
488 /*
489 * Store the two-word I/O interruption code into the
490 * provided area.
491 */
4799b557 492 len = sizeof(tpi_data) - 4;
8ae04b8f 493 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
261520dc
DH
494 if (rc) {
495 rc = kvm_s390_inject_prog_cond(vcpu, rc);
496 goto reinject_interrupt;
497 }
7c959e82
HC
498 } else {
499 /*
500 * Store the three-word I/O interruption code into
501 * the appropriate lowcore area.
502 */
4799b557 503 len = sizeof(tpi_data);
261520dc
DH
504 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
505 /* failed writes to the low core are not recoverable */
4799b557 506 rc = -EFAULT;
261520dc
DH
507 goto reinject_interrupt;
508 }
7c959e82 509 }
261520dc
DH
510
511 /* irq was successfully handed to the guest */
512 kfree(inti);
513 kvm_s390_set_psw_cc(vcpu, 1);
514 return 0;
515reinject_interrupt:
2f32d4ea
CH
516 /*
517 * If we encounter a problem storing the interruption code, the
518 * instruction is suppressed from the guest's view: reinject the
519 * interrupt.
520 */
15462e37
DH
521 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
522 kfree(inti);
523 rc = -EFAULT;
524 }
261520dc 525 /* don't set the cc, a pgm irq was injected or we drop to user space */
4799b557 526 return rc ? -EFAULT : 0;
453423dc
CB
527}
528
fa6b7fe9
CH
529static int handle_tsch(struct kvm_vcpu *vcpu)
530{
6d3da241
JF
531 struct kvm_s390_interrupt_info *inti = NULL;
532 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
fa6b7fe9 533
a37cb07a
CB
534 vcpu->stat.instruction_tsch++;
535
6d3da241
JF
536 /* a valid schid has at least one bit set */
537 if (vcpu->run->s.regs.gprs[1])
538 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
539 vcpu->run->s.regs.gprs[1]);
fa6b7fe9
CH
540
541 /*
542 * Prepare exit to userspace.
543 * We indicate whether we dequeued a pending I/O interrupt
544 * so that userspace can re-inject it if the instruction gets
545 * a program check. While this may re-order the pending I/O
546 * interrupts, this is no problem since the priority is kept
547 * intact.
548 */
549 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
550 vcpu->run->s390_tsch.dequeued = !!inti;
551 if (inti) {
552 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
553 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
554 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
555 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
556 }
557 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
558 kfree(inti);
559 return -EREMOTE;
560}
561
562static int handle_io_inst(struct kvm_vcpu *vcpu)
563{
564 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
565
5087dfa6
TH
566 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
567 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
568
fa6b7fe9
CH
569 if (vcpu->kvm->arch.css_support) {
570 /*
571 * Most I/O instructions will be handled by userspace.
572 * Exceptions are tpi and the interrupt portion of tsch.
573 */
574 if (vcpu->arch.sie_block->ipa == 0xb236)
575 return handle_tpi(vcpu);
576 if (vcpu->arch.sie_block->ipa == 0xb235)
577 return handle_tsch(vcpu);
578 /* Handle in userspace. */
a37cb07a 579 vcpu->stat.instruction_io_other++;
fa6b7fe9
CH
580 return -EOPNOTSUPP;
581 } else {
582 /*
b4a96015 583 * Set condition code 3 to stop the guest from issuing channel
fa6b7fe9
CH
584 * I/O instructions.
585 */
ea828ebf 586 kvm_s390_set_psw_cc(vcpu, 3);
fa6b7fe9
CH
587 return 0;
588 }
589}
590
453423dc
CB
591static int handle_stfl(struct kvm_vcpu *vcpu)
592{
453423dc 593 int rc;
9d8d5786 594 unsigned int fac;
453423dc
CB
595
596 vcpu->stat.instruction_stfl++;
5087dfa6
TH
597
598 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
599 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
600
9d8d5786
MM
601 /*
602 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
603 * into a u32 memory representation. They will remain bits 0-31.
604 */
c54f0d6a 605 fac = *vcpu->kvm->arch.model.fac_list >> 32;
c667aeac 606 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
9d8d5786 607 &fac, sizeof(fac));
dc5008b9 608 if (rc)
0f9701c6 609 return rc;
7cbde76b 610 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
9d8d5786 611 trace_kvm_s390_handle_stfl(vcpu, fac);
453423dc
CB
612 return 0;
613}
614
48a3e950
CH
615#define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
616#define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
d21683ea 617#define PSW_ADDR_24 0x0000000000ffffffUL
48a3e950
CH
618#define PSW_ADDR_31 0x000000007fffffffUL
619
a3fb577e
TH
620int is_valid_psw(psw_t *psw)
621{
3736b874
HC
622 if (psw->mask & PSW_MASK_UNASSIGNED)
623 return 0;
624 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
625 if (psw->addr & ~PSW_ADDR_31)
626 return 0;
627 }
628 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
629 return 0;
630 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
631 return 0;
a3fb577e
TH
632 if (psw->addr & 1)
633 return 0;
3736b874
HC
634 return 1;
635}
636
48a3e950
CH
637int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
638{
3736b874 639 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
48a3e950 640 psw_compat_t new_psw;
3736b874 641 u64 addr;
2d8bcaed 642 int rc;
27f67f87 643 u8 ar;
48a3e950 644
a37cb07a
CB
645 vcpu->stat.instruction_lpsw++;
646
3736b874 647 if (gpsw->mask & PSW_MASK_PSTATE)
208dd756
TH
648 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
649
8ae04b8f 650 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
6fd0fcc9
HC
651 if (addr & 7)
652 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
2d8bcaed 653
8ae04b8f 654 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
2d8bcaed
HC
655 if (rc)
656 return kvm_s390_inject_prog_cond(vcpu, rc);
6fd0fcc9
HC
657 if (!(new_psw.mask & PSW32_MASK_BASE))
658 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
3736b874
HC
659 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
660 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
661 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
662 if (!is_valid_psw(gpsw))
6fd0fcc9 663 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
48a3e950
CH
664 return 0;
665}
666
667static int handle_lpswe(struct kvm_vcpu *vcpu)
668{
48a3e950 669 psw_t new_psw;
3736b874 670 u64 addr;
2d8bcaed 671 int rc;
27f67f87 672 u8 ar;
48a3e950 673
a37cb07a
CB
674 vcpu->stat.instruction_lpswe++;
675
5087dfa6
TH
676 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
677 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
678
8ae04b8f 679 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
6fd0fcc9
HC
680 if (addr & 7)
681 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
8ae04b8f 682 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
2d8bcaed
HC
683 if (rc)
684 return kvm_s390_inject_prog_cond(vcpu, rc);
3736b874
HC
685 vcpu->arch.sie_block->gpsw = new_psw;
686 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
6fd0fcc9 687 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
48a3e950
CH
688 return 0;
689}
690
453423dc
CB
691static int handle_stidp(struct kvm_vcpu *vcpu)
692{
9bb0ec09 693 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
453423dc 694 u64 operand2;
7d777d78 695 int rc;
27f67f87 696 u8 ar;
453423dc
CB
697
698 vcpu->stat.instruction_stidp++;
b1c571a5 699
5087dfa6
TH
700 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
701 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
702
8ae04b8f 703 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
453423dc 704
db4a29cb
HC
705 if (operand2 & 7)
706 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
453423dc 707
8ae04b8f 708 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
7d777d78
HC
709 if (rc)
710 return kvm_s390_inject_prog_cond(vcpu, rc);
453423dc 711
7cbde76b 712 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
453423dc
CB
713 return 0;
714}
715
716static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
717{
453423dc
CB
718 int cpus = 0;
719 int n;
720
ff520a63 721 cpus = atomic_read(&vcpu->kvm->online_vcpus);
453423dc
CB
722
723 /* deal with other level 3 hypervisors */
caf757c6 724 if (stsi(mem, 3, 2, 2))
453423dc
CB
725 mem->count = 0;
726 if (mem->count < 8)
727 mem->count++;
728 for (n = mem->count - 1; n > 0 ; n--)
729 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
730
b75f4c9a 731 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
453423dc
CB
732 mem->vm[0].cpus_total = cpus;
733 mem->vm[0].cpus_configured = cpus;
734 mem->vm[0].cpus_standby = 0;
735 mem->vm[0].cpus_reserved = 0;
736 mem->vm[0].caf = 1000;
737 memcpy(mem->vm[0].name, "KVMguest", 8);
738 ASCEBC(mem->vm[0].name, 8);
739 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
740 ASCEBC(mem->vm[0].cpi, 16);
741}
742
27f67f87 743static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
e44fc8c9
ET
744 u8 fc, u8 sel1, u16 sel2)
745{
746 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
747 vcpu->run->s390_stsi.addr = addr;
748 vcpu->run->s390_stsi.ar = ar;
749 vcpu->run->s390_stsi.fc = fc;
750 vcpu->run->s390_stsi.sel1 = sel1;
751 vcpu->run->s390_stsi.sel2 = sel2;
752}
753
453423dc
CB
754static int handle_stsi(struct kvm_vcpu *vcpu)
755{
5a32c1af
CB
756 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
757 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
758 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
c51f068c 759 unsigned long mem = 0;
453423dc 760 u64 operand2;
db4a29cb 761 int rc = 0;
27f67f87 762 u8 ar;
453423dc
CB
763
764 vcpu->stat.instruction_stsi++;
7cbde76b 765 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
453423dc 766
5087dfa6
TH
767 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
768 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
769
87d41fb4 770 if (fc > 3) {
ea828ebf 771 kvm_s390_set_psw_cc(vcpu, 3);
87d41fb4
TH
772 return 0;
773 }
453423dc 774
87d41fb4
TH
775 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
776 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
453423dc
CB
777 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
778
87d41fb4 779 if (fc == 0) {
5a32c1af 780 vcpu->run->s.regs.gprs[0] = 3 << 28;
ea828ebf 781 kvm_s390_set_psw_cc(vcpu, 0);
453423dc 782 return 0;
87d41fb4
TH
783 }
784
8ae04b8f 785 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
87d41fb4
TH
786
787 if (operand2 & 0xfff)
788 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
789
790 switch (fc) {
453423dc
CB
791 case 1: /* same handling for 1 and 2 */
792 case 2:
793 mem = get_zeroed_page(GFP_KERNEL);
794 if (!mem)
c51f068c 795 goto out_no_data;
caf757c6 796 if (stsi((void *) mem, fc, sel1, sel2))
c51f068c 797 goto out_no_data;
453423dc
CB
798 break;
799 case 3:
800 if (sel1 != 2 || sel2 != 2)
c51f068c 801 goto out_no_data;
453423dc
CB
802 mem = get_zeroed_page(GFP_KERNEL);
803 if (!mem)
c51f068c 804 goto out_no_data;
453423dc
CB
805 handle_stsi_3_2_2(vcpu, (void *) mem);
806 break;
453423dc
CB
807 }
808
8ae04b8f 809 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
645c5bc1
HC
810 if (rc) {
811 rc = kvm_s390_inject_prog_cond(vcpu, rc);
812 goto out;
453423dc 813 }
e44fc8c9
ET
814 if (vcpu->kvm->arch.user_stsi) {
815 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
816 rc = -EREMOTE;
817 }
5786fffa 818 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
453423dc 819 free_page(mem);
ea828ebf 820 kvm_s390_set_psw_cc(vcpu, 0);
5a32c1af 821 vcpu->run->s.regs.gprs[0] = 0;
e44fc8c9 822 return rc;
c51f068c 823out_no_data:
ea828ebf 824 kvm_s390_set_psw_cc(vcpu, 3);
645c5bc1 825out:
c51f068c 826 free_page(mem);
db4a29cb 827 return rc;
453423dc
CB
828}
829
70455a36 830int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
453423dc 831{
6db4263f
CB
832 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
833 case 0x02:
834 return handle_stidp(vcpu);
835 case 0x04:
836 return handle_set_clock(vcpu);
837 case 0x10:
838 return handle_set_prefix(vcpu);
839 case 0x11:
840 return handle_store_prefix(vcpu);
841 case 0x12:
842 return handle_store_cpu_address(vcpu);
843 case 0x14:
844 return kvm_s390_handle_vsie(vcpu);
845 case 0x21:
846 case 0x50:
847 return handle_ipte_interlock(vcpu);
848 case 0x29:
849 return handle_iske(vcpu);
850 case 0x2a:
851 return handle_rrbe(vcpu);
852 case 0x2b:
853 return handle_sske(vcpu);
854 case 0x2c:
855 return handle_test_block(vcpu);
856 case 0x30:
857 case 0x31:
858 case 0x32:
859 case 0x33:
860 case 0x34:
861 case 0x35:
862 case 0x36:
863 case 0x37:
864 case 0x38:
865 case 0x39:
866 case 0x3a:
867 case 0x3b:
868 case 0x3c:
869 case 0x5f:
870 case 0x74:
871 case 0x76:
872 return handle_io_inst(vcpu);
873 case 0x56:
874 return handle_sthyi(vcpu);
875 case 0x7d:
876 return handle_stsi(vcpu);
877 case 0xb1:
878 return handle_stfl(vcpu);
879 case 0xb2:
880 return handle_lpswe(vcpu);
881 default:
882 return -EOPNOTSUPP;
883 }
453423dc 884}
bb25b9ba 885
48a3e950
CH
886static int handle_epsw(struct kvm_vcpu *vcpu)
887{
888 int reg1, reg2;
889
a37cb07a
CB
890 vcpu->stat.instruction_epsw++;
891
aeb87c3c 892 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
48a3e950
CH
893
894 /* This basically extracts the mask half of the psw. */
843200e7 895 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
48a3e950
CH
896 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
897 if (reg2) {
843200e7 898 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
48a3e950 899 vcpu->run->s.regs.gprs[reg2] |=
843200e7 900 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
48a3e950
CH
901 }
902 return 0;
903}
904
69d0d3a3
CB
905#define PFMF_RESERVED 0xfffc0101UL
906#define PFMF_SK 0x00020000UL
907#define PFMF_CF 0x00010000UL
908#define PFMF_UI 0x00008000UL
909#define PFMF_FSC 0x00007000UL
910#define PFMF_NQ 0x00000800UL
911#define PFMF_MR 0x00000400UL
912#define PFMF_MC 0x00000200UL
913#define PFMF_KEY 0x000000feUL
914
915static int handle_pfmf(struct kvm_vcpu *vcpu)
916{
1824c723 917 bool mr = false, mc = false, nq;
69d0d3a3
CB
918 int reg1, reg2;
919 unsigned long start, end;
1824c723 920 unsigned char key;
69d0d3a3
CB
921
922 vcpu->stat.instruction_pfmf++;
923
924 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
925
03c02807 926 if (!test_kvm_facility(vcpu->kvm, 8))
69d0d3a3
CB
927 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
928
929 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
208dd756 930 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
69d0d3a3
CB
931
932 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
933 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
934
edc5b055
DH
935 /* Only provide non-quiescing support if enabled for the guest */
936 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
937 !test_kvm_facility(vcpu->kvm, 14))
69d0d3a3
CB
938 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
939
1824c723
DH
940 /* Only provide conditional-SSKE support if enabled for the guest */
941 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
942 test_kvm_facility(vcpu->kvm, 10)) {
943 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
944 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
945 }
946
947 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
948 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
69d0d3a3 949 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
a02689fe 950 start = kvm_s390_logical_to_effective(vcpu, start);
fb34c603 951
6164a2e9
DH
952 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
953 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
954 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
955 }
956
69d0d3a3
CB
957 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
958 case 0x00000000:
6164a2e9
DH
959 /* only 4k frames specify a real address */
960 start = kvm_s390_real_to_abs(vcpu, start);
58cdf5eb 961 end = (start + PAGE_SIZE) & ~(PAGE_SIZE - 1);
69d0d3a3
CB
962 break;
963 case 0x00001000:
58cdf5eb 964 end = (start + _SEGMENT_SIZE) & ~(_SEGMENT_SIZE - 1);
69d0d3a3 965 break;
69d0d3a3 966 case 0x00002000:
53df84f8
GH
967 /* only support 2G frame size if EDAT2 is available and we are
968 not in 24-bit addressing mode */
969 if (!test_kvm_facility(vcpu->kvm, 78) ||
8bb3fdd6 970 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_24BIT)
53df84f8 971 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
58cdf5eb 972 end = (start + _REGION3_SIZE) & ~(_REGION3_SIZE - 1);
53df84f8 973 break;
69d0d3a3
CB
974 default:
975 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
976 }
a02689fe 977
695be0e7 978 while (start != end) {
bd096f64
JF
979 unsigned long vmaddr;
980 bool unlocked = false;
fb34c603
TH
981
982 /* Translate guest address to host address */
bd096f64
JF
983 vmaddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
984 if (kvm_is_error_hva(vmaddr))
69d0d3a3
CB
985 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
986
987 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
0230cae7 988 if (kvm_clear_guest(vcpu->kvm, start, PAGE_SIZE))
69d0d3a3
CB
989 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
990 }
991
992 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
730cd632 993 int rc = kvm_s390_skey_check_enable(vcpu);
3ac8e380
DD
994
995 if (rc)
996 return rc;
d3ed1cee 997 down_read(&current->mm->mmap_sem);
bd096f64 998 rc = cond_set_guest_storage_key(current->mm, vmaddr,
1824c723 999 key, NULL, nq, mr, mc);
bd096f64
JF
1000 if (rc < 0) {
1001 rc = fixup_user_fault(current, current->mm, vmaddr,
1002 FAULT_FLAG_WRITE, &unlocked);
1003 rc = !rc ? -EAGAIN : rc;
1004 }
1005 if (rc == -EFAULT)
69d0d3a3 1006 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
69d0d3a3 1007
bd096f64
JF
1008 up_read(&current->mm->mmap_sem);
1009 if (rc >= 0)
1010 start += PAGE_SIZE;
1011 }
69d0d3a3 1012 }
2c26d1d2 1013 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
8bb3fdd6 1014 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_BITS_AMODE_64BIT) {
2c26d1d2
DH
1015 vcpu->run->s.regs.gprs[reg2] = end;
1016 } else {
1017 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
1018 end = kvm_s390_logical_to_effective(vcpu, end);
1019 vcpu->run->s.regs.gprs[reg2] |= end;
1020 }
1021 }
69d0d3a3
CB
1022 return 0;
1023}
1024
afdad616
CI
1025/*
1026 * Must be called with relevant read locks held (kvm->mm->mmap_sem, kvm->srcu)
1027 */
1028static inline int __do_essa(struct kvm_vcpu *vcpu, const int orc)
190df4a2 1029{
190df4a2
CI
1030 int r1, r2, nappended, entries;
1031 unsigned long gfn, hva, res, pgstev, ptev;
1032 unsigned long *cbrlo;
1033
1034 /*
1035 * We don't need to set SD.FPF.SK to 1 here, because if we have a
1036 * machine check here we either handle it or crash
1037 */
1038
1039 kvm_s390_get_regs_rre(vcpu, &r1, &r2);
1040 gfn = vcpu->run->s.regs.gprs[r2] >> PAGE_SHIFT;
1041 hva = gfn_to_hva(vcpu->kvm, gfn);
1042 entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
1043
1044 if (kvm_is_error_hva(hva))
1045 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1046
1047 nappended = pgste_perform_essa(vcpu->kvm->mm, hva, orc, &ptev, &pgstev);
1048 if (nappended < 0) {
1049 res = orc ? 0x10 : 0;
1050 vcpu->run->s.regs.gprs[r1] = res; /* Exception Indication */
1051 return 0;
1052 }
1053 res = (pgstev & _PGSTE_GPS_USAGE_MASK) >> 22;
1054 /*
1055 * Set the block-content state part of the result. 0 means resident, so
1056 * nothing to do if the page is valid. 2 is for preserved pages
1057 * (non-present and non-zero), and 3 for zero pages (non-present and
1058 * zero).
1059 */
1060 if (ptev & _PAGE_INVALID) {
1061 res |= 2;
1062 if (pgstev & _PGSTE_GPS_ZERO)
1063 res |= 1;
1064 }
1bab1c02
CI
1065 if (pgstev & _PGSTE_GPS_NODAT)
1066 res |= 0x20;
190df4a2
CI
1067 vcpu->run->s.regs.gprs[r1] = res;
1068 /*
1069 * It is possible that all the normal 511 slots were full, in which case
1070 * we will now write in the 512th slot, which is reserved for host use.
1071 * In both cases we let the normal essa handling code process all the
1072 * slots, including the reserved one, if needed.
1073 */
1074 if (nappended > 0) {
1075 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo & PAGE_MASK);
1076 cbrlo[entries] = gfn << PAGE_SHIFT;
1077 }
1078
afdad616
CI
1079 if (orc) {
1080 struct kvm_memory_slot *ms = gfn_to_memslot(vcpu->kvm, gfn);
1081
1082 /* Increment only if we are really flipping the bit */
1083 if (ms && !test_and_set_bit(gfn - ms->base_gfn, kvm_second_dirty_bitmap(ms)))
1084 atomic64_inc(&vcpu->kvm->arch.cmma_dirty_pages);
190df4a2
CI
1085 }
1086
1087 return nappended;
1088}
1089
b31288fa
KW
1090static int handle_essa(struct kvm_vcpu *vcpu)
1091{
1092 /* entries expected to be 1FF */
1093 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
4a5e7e38 1094 unsigned long *cbrlo;
b31288fa 1095 struct gmap *gmap;
190df4a2 1096 int i, orc;
b31288fa 1097
7cbde76b 1098 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
b31288fa
KW
1099 gmap = vcpu->arch.gmap;
1100 vcpu->stat.instruction_essa++;
e6db1d61 1101 if (!vcpu->kvm->arch.use_cmma)
b31288fa
KW
1102 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
1103
1104 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1105 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
190df4a2
CI
1106 /* Check for invalid operation request code */
1107 orc = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
1bab1c02
CI
1108 /* ORCs 0-6 are always valid */
1109 if (orc > (test_kvm_facility(vcpu->kvm, 147) ? ESSA_SET_STABLE_NODAT
1110 : ESSA_SET_STABLE_IF_RESIDENT))
b31288fa
KW
1111 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1112
afdad616 1113 if (!vcpu->kvm->arch.migration_mode) {
190df4a2
CI
1114 /*
1115 * CMMA is enabled in the KVM settings, but is disabled in
1116 * the SIE block and in the mm_context, and we are not doing
1117 * a migration. Enable CMMA in the mm_context.
1118 * Since we need to take a write lock to write to the context
1119 * to avoid races with storage keys handling, we check if the
1120 * value really needs to be written to; if the value is
1121 * already correct, we do nothing and avoid the lock.
1122 */
c9f0a2b8 1123 if (vcpu->kvm->mm->context.uses_cmm == 0) {
190df4a2 1124 down_write(&vcpu->kvm->mm->mmap_sem);
c9f0a2b8 1125 vcpu->kvm->mm->context.uses_cmm = 1;
190df4a2
CI
1126 up_write(&vcpu->kvm->mm->mmap_sem);
1127 }
1128 /*
1129 * If we are here, we are supposed to have CMMA enabled in
1130 * the SIE block. Enabling CMMA works on a per-CPU basis,
1131 * while the context use_cmma flag is per process.
1132 * It's possible that the context flag is enabled and the
1133 * SIE flag is not, so we set the flag always; if it was
1134 * already set, nothing changes, otherwise we enable it
1135 * on this CPU too.
1136 */
1137 vcpu->arch.sie_block->ecb2 |= ECB2_CMMA;
1138 /* Retry the ESSA instruction */
1139 kvm_s390_retry_instr(vcpu);
1140 } else {
afdad616
CI
1141 int srcu_idx;
1142
1143 down_read(&vcpu->kvm->mm->mmap_sem);
1144 srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
1145 i = __do_essa(vcpu, orc);
1146 srcu_read_unlock(&vcpu->kvm->srcu, srcu_idx);
1147 up_read(&vcpu->kvm->mm->mmap_sem);
190df4a2
CI
1148 if (i < 0)
1149 return i;
afdad616 1150 /* Account for the possible extra cbrl entry */
190df4a2
CI
1151 entries += i;
1152 }
b31288fa
KW
1153 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
1154 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
1155 down_read(&gmap->mm->mmap_sem);
4a5e7e38
DH
1156 for (i = 0; i < entries; ++i)
1157 __gmap_zap(gmap, cbrlo[i]);
b31288fa 1158 up_read(&gmap->mm->mmap_sem);
b31288fa
KW
1159 return 0;
1160}
1161
48a3e950
CH
1162int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
1163{
6db4263f
CB
1164 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1165 case 0x8a:
1166 case 0x8e:
1167 case 0x8f:
1168 return handle_ipte_interlock(vcpu);
1169 case 0x8d:
1170 return handle_epsw(vcpu);
1171 case 0xab:
1172 return handle_essa(vcpu);
1173 case 0xaf:
1174 return handle_pfmf(vcpu);
1175 default:
1176 return -EOPNOTSUPP;
1177 }
48a3e950
CH
1178}
1179
953ed88d
TH
1180int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
1181{
1182 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1183 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
fc56eb66
HC
1184 int reg, rc, nr_regs;
1185 u32 ctl_array[16];
f987a3ee 1186 u64 ga;
27f67f87 1187 u8 ar;
953ed88d
TH
1188
1189 vcpu->stat.instruction_lctl++;
1190
1191 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1192 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1193
8ae04b8f 1194 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
953ed88d 1195
f987a3ee 1196 if (ga & 3)
953ed88d
TH
1197 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1198
7cbde76b 1199 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
f987a3ee 1200 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
953ed88d 1201
fc56eb66 1202 nr_regs = ((reg3 - reg1) & 0xf) + 1;
8ae04b8f 1203 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
fc56eb66
HC
1204 if (rc)
1205 return kvm_s390_inject_prog_cond(vcpu, rc);
953ed88d 1206 reg = reg1;
fc56eb66 1207 nr_regs = 0;
953ed88d 1208 do {
953ed88d 1209 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
fc56eb66 1210 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
953ed88d
TH
1211 if (reg == reg3)
1212 break;
1213 reg = (reg + 1) % 16;
1214 } while (1);
2dca485f 1215 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
953ed88d
TH
1216 return 0;
1217}
1218
aba07508
DH
1219int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1220{
1221 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1222 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
fc56eb66
HC
1223 int reg, rc, nr_regs;
1224 u32 ctl_array[16];
aba07508 1225 u64 ga;
27f67f87 1226 u8 ar;
aba07508
DH
1227
1228 vcpu->stat.instruction_stctl++;
1229
1230 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1231 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1232
8ae04b8f 1233 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
aba07508
DH
1234
1235 if (ga & 3)
1236 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1237
7cbde76b 1238 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
aba07508
DH
1239 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1240
1241 reg = reg1;
fc56eb66 1242 nr_regs = 0;
aba07508 1243 do {
fc56eb66 1244 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
aba07508
DH
1245 if (reg == reg3)
1246 break;
1247 reg = (reg + 1) % 16;
1248 } while (1);
8ae04b8f 1249 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
fc56eb66 1250 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
aba07508
DH
1251}
1252
953ed88d
TH
1253static int handle_lctlg(struct kvm_vcpu *vcpu)
1254{
1255 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1256 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
fc56eb66
HC
1257 int reg, rc, nr_regs;
1258 u64 ctl_array[16];
1259 u64 ga;
27f67f87 1260 u8 ar;
953ed88d
TH
1261
1262 vcpu->stat.instruction_lctlg++;
1263
1264 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1265 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1266
8ae04b8f 1267 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
953ed88d 1268
f987a3ee 1269 if (ga & 7)
953ed88d
TH
1270 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1271
7cbde76b 1272 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
f987a3ee 1273 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
953ed88d 1274
fc56eb66 1275 nr_regs = ((reg3 - reg1) & 0xf) + 1;
8ae04b8f 1276 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
fc56eb66
HC
1277 if (rc)
1278 return kvm_s390_inject_prog_cond(vcpu, rc);
1279 reg = reg1;
1280 nr_regs = 0;
953ed88d 1281 do {
fc56eb66 1282 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
953ed88d
TH
1283 if (reg == reg3)
1284 break;
1285 reg = (reg + 1) % 16;
1286 } while (1);
2dca485f 1287 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
953ed88d
TH
1288 return 0;
1289}
1290
aba07508
DH
1291static int handle_stctg(struct kvm_vcpu *vcpu)
1292{
1293 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1294 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
fc56eb66
HC
1295 int reg, rc, nr_regs;
1296 u64 ctl_array[16];
1297 u64 ga;
27f67f87 1298 u8 ar;
aba07508
DH
1299
1300 vcpu->stat.instruction_stctg++;
1301
1302 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1303 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1304
8ae04b8f 1305 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
aba07508
DH
1306
1307 if (ga & 7)
1308 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1309
7cbde76b 1310 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
aba07508
DH
1311 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1312
fc56eb66
HC
1313 reg = reg1;
1314 nr_regs = 0;
aba07508 1315 do {
fc56eb66 1316 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
aba07508
DH
1317 if (reg == reg3)
1318 break;
1319 reg = (reg + 1) % 16;
1320 } while (1);
8ae04b8f 1321 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
fc56eb66 1322 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
aba07508
DH
1323}
1324
953ed88d 1325int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
f379aae5 1326{
6db4263f
CB
1327 switch (vcpu->arch.sie_block->ipb & 0x000000ff) {
1328 case 0x25:
1329 return handle_stctg(vcpu);
1330 case 0x2f:
1331 return handle_lctlg(vcpu);
1332 case 0x60:
1333 case 0x61:
1334 case 0x62:
1335 return handle_ri(vcpu);
1336 default:
1337 return -EOPNOTSUPP;
1338 }
f379aae5
CH
1339}
1340
bb25b9ba
CB
1341static int handle_tprot(struct kvm_vcpu *vcpu)
1342{
b1c571a5 1343 u64 address1, address2;
a0465f9a
TH
1344 unsigned long hva, gpa;
1345 int ret = 0, cc = 0;
1346 bool writable;
27f67f87 1347 u8 ar;
bb25b9ba
CB
1348
1349 vcpu->stat.instruction_tprot++;
1350
f9f6bbc6
TH
1351 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1352 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1353
8ae04b8f 1354 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
b1c571a5 1355
bb25b9ba
CB
1356 /* we only handle the Linux memory detection case:
1357 * access key == 0
bb25b9ba
CB
1358 * everything else goes to userspace. */
1359 if (address2 & 0xf0)
1360 return -EOPNOTSUPP;
1361 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
a0465f9a 1362 ipte_lock(vcpu);
92c96321 1363 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
a0465f9a
TH
1364 if (ret == PGM_PROTECTION) {
1365 /* Write protected? Try again with read-only... */
1366 cc = 1;
92c96321
DH
1367 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1368 GACC_FETCH);
a0465f9a
TH
1369 }
1370 if (ret) {
1371 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1372 ret = kvm_s390_inject_program_int(vcpu, ret);
1373 } else if (ret > 0) {
1374 /* Translation not available */
1375 kvm_s390_set_psw_cc(vcpu, 3);
1376 ret = 0;
1377 }
1378 goto out_unlock;
1379 }
59a1fa2d 1380
a0465f9a
TH
1381 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1382 if (kvm_is_error_hva(hva)) {
1383 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1384 } else {
1385 if (!writable)
1386 cc = 1; /* Write not permitted ==> read-only */
1387 kvm_s390_set_psw_cc(vcpu, cc);
1388 /* Note: CC2 only occurs for storage keys (not supported yet) */
1389 }
1390out_unlock:
1391 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1392 ipte_unlock(vcpu);
1393 return ret;
bb25b9ba
CB
1394}
1395
1396int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1397{
6db4263f
CB
1398 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1399 case 0x01:
bb25b9ba 1400 return handle_tprot(vcpu);
6db4263f
CB
1401 default:
1402 return -EOPNOTSUPP;
1403 }
bb25b9ba
CB
1404}
1405
8c3f61e2
CH
1406static int handle_sckpf(struct kvm_vcpu *vcpu)
1407{
1408 u32 value;
1409
a37cb07a
CB
1410 vcpu->stat.instruction_sckpf++;
1411
8c3f61e2 1412 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
208dd756 1413 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
8c3f61e2
CH
1414
1415 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1416 return kvm_s390_inject_program_int(vcpu,
1417 PGM_SPECIFICATION);
1418
1419 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1420 vcpu->arch.sie_block->todpr = value;
1421
1422 return 0;
1423}
1424
9acc317b
DH
1425static int handle_ptff(struct kvm_vcpu *vcpu)
1426{
a37cb07a
CB
1427 vcpu->stat.instruction_ptff++;
1428
9acc317b
DH
1429 /* we don't emulate any control instructions yet */
1430 kvm_s390_set_psw_cc(vcpu, 3);
1431 return 0;
1432}
1433
8c3f61e2
CH
1434int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1435{
6db4263f
CB
1436 switch (vcpu->arch.sie_block->ipa & 0x00ff) {
1437 case 0x04:
1438 return handle_ptff(vcpu);
1439 case 0x07:
1440 return handle_sckpf(vcpu);
1441 default:
1442 return -EOPNOTSUPP;
1443 }
8c3f61e2 1444}