]> git.proxmox.com Git - mirror_ubuntu-jammy-kernel.git/blob - arch/s390/kvm/priv.c
26f30bab9c2f4ce3cb2cd085b80efad8ac2bf5ed
[mirror_ubuntu-jammy-kernel.git] / arch / s390 / kvm / priv.c
1 /*
2 * handling privileged instructions
3 *
4 * Copyright IBM Corp. 2008, 2013
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <linux/compat.h>
18 #include <linux/mm_types.h>
19
20 #include <asm/asm-offsets.h>
21 #include <asm/facility.h>
22 #include <asm/current.h>
23 #include <asm/debug.h>
24 #include <asm/ebcdic.h>
25 #include <asm/sysinfo.h>
26 #include <asm/pgtable.h>
27 #include <asm/pgalloc.h>
28 #include <asm/gmap.h>
29 #include <asm/io.h>
30 #include <asm/ptrace.h>
31 #include <asm/compat.h>
32 #include <asm/sclp.h>
33 #include "gaccess.h"
34 #include "kvm-s390.h"
35 #include "trace.h"
36
37 static int handle_ri(struct kvm_vcpu *vcpu)
38 {
39 if (test_kvm_facility(vcpu->kvm, 64)) {
40 VCPU_EVENT(vcpu, 3, "%s", "ENABLE: RI (lazy)");
41 vcpu->arch.sie_block->ecb3 |= 0x01;
42 kvm_s390_retry_instr(vcpu);
43 return 0;
44 } else
45 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
46 }
47
48 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu)
49 {
50 if ((vcpu->arch.sie_block->ipa & 0xf) <= 4)
51 return handle_ri(vcpu);
52 else
53 return -EOPNOTSUPP;
54 }
55
56 /* Handle SCK (SET CLOCK) interception */
57 static int handle_set_clock(struct kvm_vcpu *vcpu)
58 {
59 int rc;
60 u8 ar;
61 u64 op2, val;
62
63 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
64 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
65
66 op2 = kvm_s390_get_base_disp_s(vcpu, &ar);
67 if (op2 & 7) /* Operand must be on a doubleword boundary */
68 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
69 rc = read_guest(vcpu, op2, ar, &val, sizeof(val));
70 if (rc)
71 return kvm_s390_inject_prog_cond(vcpu, rc);
72
73 VCPU_EVENT(vcpu, 3, "SCK: setting guest TOD to 0x%llx", val);
74 kvm_s390_set_tod_clock(vcpu->kvm, val);
75
76 kvm_s390_set_psw_cc(vcpu, 0);
77 return 0;
78 }
79
80 static int handle_set_prefix(struct kvm_vcpu *vcpu)
81 {
82 u64 operand2;
83 u32 address;
84 int rc;
85 u8 ar;
86
87 vcpu->stat.instruction_spx++;
88
89 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
90 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
91
92 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
93
94 /* must be word boundary */
95 if (operand2 & 3)
96 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
97
98 /* get the value */
99 rc = read_guest(vcpu, operand2, ar, &address, sizeof(address));
100 if (rc)
101 return kvm_s390_inject_prog_cond(vcpu, rc);
102
103 address &= 0x7fffe000u;
104
105 /*
106 * Make sure the new value is valid memory. We only need to check the
107 * first page, since address is 8k aligned and memory pieces are always
108 * at least 1MB aligned and have at least a size of 1MB.
109 */
110 if (kvm_is_error_gpa(vcpu->kvm, address))
111 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
112
113 kvm_s390_set_prefix(vcpu, address);
114 trace_kvm_s390_handle_prefix(vcpu, 1, address);
115 return 0;
116 }
117
118 static int handle_store_prefix(struct kvm_vcpu *vcpu)
119 {
120 u64 operand2;
121 u32 address;
122 int rc;
123 u8 ar;
124
125 vcpu->stat.instruction_stpx++;
126
127 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
128 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
129
130 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
131
132 /* must be word boundary */
133 if (operand2 & 3)
134 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
135
136 address = kvm_s390_get_prefix(vcpu);
137
138 /* get the value */
139 rc = write_guest(vcpu, operand2, ar, &address, sizeof(address));
140 if (rc)
141 return kvm_s390_inject_prog_cond(vcpu, rc);
142
143 VCPU_EVENT(vcpu, 3, "STPX: storing prefix 0x%x into 0x%llx", address, operand2);
144 trace_kvm_s390_handle_prefix(vcpu, 0, address);
145 return 0;
146 }
147
148 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
149 {
150 u16 vcpu_id = vcpu->vcpu_id;
151 u64 ga;
152 int rc;
153 u8 ar;
154
155 vcpu->stat.instruction_stap++;
156
157 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
158 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
159
160 ga = kvm_s390_get_base_disp_s(vcpu, &ar);
161
162 if (ga & 1)
163 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
164
165 rc = write_guest(vcpu, ga, ar, &vcpu_id, sizeof(vcpu_id));
166 if (rc)
167 return kvm_s390_inject_prog_cond(vcpu, rc);
168
169 VCPU_EVENT(vcpu, 3, "STAP: storing cpu address (%u) to 0x%llx", vcpu_id, ga);
170 trace_kvm_s390_handle_stap(vcpu, ga);
171 return 0;
172 }
173
174 static int __skey_check_enable(struct kvm_vcpu *vcpu)
175 {
176 int rc = 0;
177
178 trace_kvm_s390_skey_related_inst(vcpu);
179 if (!(vcpu->arch.sie_block->ictl & (ICTL_ISKE | ICTL_SSKE | ICTL_RRBE)))
180 return rc;
181
182 rc = s390_enable_skey();
183 VCPU_EVENT(vcpu, 3, "enabling storage keys for guest: %d", rc);
184 if (!rc)
185 vcpu->arch.sie_block->ictl &= ~(ICTL_ISKE | ICTL_SSKE | ICTL_RRBE);
186 return rc;
187 }
188
189 static int try_handle_skey(struct kvm_vcpu *vcpu)
190 {
191 int rc;
192
193 vcpu->stat.instruction_storage_key++;
194 rc = __skey_check_enable(vcpu);
195 if (rc)
196 return rc;
197 if (sclp.has_skey) {
198 /* with storage-key facility, SIE interprets it for us */
199 kvm_s390_retry_instr(vcpu);
200 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
201 return -EAGAIN;
202 }
203 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
204 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
205 return 0;
206 }
207
208 static int handle_iske(struct kvm_vcpu *vcpu)
209 {
210 unsigned long addr;
211 unsigned char key;
212 int reg1, reg2;
213 int rc;
214
215 rc = try_handle_skey(vcpu);
216 if (rc)
217 return rc != -EAGAIN ? rc : 0;
218
219 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
220
221 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
222 addr = kvm_s390_logical_to_effective(vcpu, addr);
223 addr = kvm_s390_real_to_abs(vcpu, addr);
224 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
225 if (kvm_is_error_hva(addr))
226 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
227
228 down_read(&current->mm->mmap_sem);
229 rc = get_guest_storage_key(current->mm, addr, &key);
230 up_read(&current->mm->mmap_sem);
231 if (rc)
232 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
233 vcpu->run->s.regs.gprs[reg1] &= ~0xff;
234 vcpu->run->s.regs.gprs[reg1] |= key;
235 return 0;
236 }
237
238 static int handle_rrbe(struct kvm_vcpu *vcpu)
239 {
240 unsigned long addr;
241 int reg1, reg2;
242 int rc;
243
244 rc = try_handle_skey(vcpu);
245 if (rc)
246 return rc != -EAGAIN ? rc : 0;
247
248 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
249
250 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
251 addr = kvm_s390_logical_to_effective(vcpu, addr);
252 addr = kvm_s390_real_to_abs(vcpu, addr);
253 addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(addr));
254 if (kvm_is_error_hva(addr))
255 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
256
257 down_read(&current->mm->mmap_sem);
258 rc = reset_guest_reference_bit(current->mm, addr);
259 up_read(&current->mm->mmap_sem);
260 if (rc < 0)
261 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
262
263 kvm_s390_set_psw_cc(vcpu, rc);
264 return 0;
265 }
266
267 #define SSKE_NQ 0x8
268 #define SSKE_MR 0x4
269 #define SSKE_MC 0x2
270 #define SSKE_MB 0x1
271 static int handle_sske(struct kvm_vcpu *vcpu)
272 {
273 unsigned char m3 = vcpu->arch.sie_block->ipb >> 28;
274 unsigned long start, end;
275 unsigned char key, oldkey;
276 int reg1, reg2;
277 int rc;
278
279 rc = try_handle_skey(vcpu);
280 if (rc)
281 return rc != -EAGAIN ? rc : 0;
282
283 if (!test_kvm_facility(vcpu->kvm, 8))
284 m3 &= ~SSKE_MB;
285 if (!test_kvm_facility(vcpu->kvm, 10))
286 m3 &= ~(SSKE_MC | SSKE_MR);
287 if (!test_kvm_facility(vcpu->kvm, 14))
288 m3 &= ~SSKE_NQ;
289
290 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
291
292 key = vcpu->run->s.regs.gprs[reg1] & 0xfe;
293 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
294 start = kvm_s390_logical_to_effective(vcpu, start);
295 if (m3 & SSKE_MB) {
296 /* start already designates an absolute address */
297 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
298 } else {
299 start = kvm_s390_real_to_abs(vcpu, start);
300 end = start + PAGE_SIZE;
301 }
302
303 while (start != end) {
304 unsigned long addr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
305
306 if (kvm_is_error_hva(addr))
307 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
308
309 down_read(&current->mm->mmap_sem);
310 rc = cond_set_guest_storage_key(current->mm, addr, key, &oldkey,
311 m3 & SSKE_NQ, m3 & SSKE_MR,
312 m3 & SSKE_MC);
313 up_read(&current->mm->mmap_sem);
314 if (rc < 0)
315 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
316 start += PAGE_SIZE;
317 }
318
319 if (m3 & (SSKE_MC | SSKE_MR)) {
320 if (m3 & SSKE_MB) {
321 /* skey in reg1 is unpredictable */
322 kvm_s390_set_psw_cc(vcpu, 3);
323 } else {
324 kvm_s390_set_psw_cc(vcpu, rc);
325 vcpu->run->s.regs.gprs[reg1] &= ~0xff00UL;
326 vcpu->run->s.regs.gprs[reg1] |= (u64) oldkey << 8;
327 }
328 }
329 if (m3 & SSKE_MB) {
330 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT)
331 vcpu->run->s.regs.gprs[reg2] &= ~PAGE_MASK;
332 else
333 vcpu->run->s.regs.gprs[reg2] &= ~0xfffff000UL;
334 end = kvm_s390_logical_to_effective(vcpu, end);
335 vcpu->run->s.regs.gprs[reg2] |= end;
336 }
337 return 0;
338 }
339
340 static int handle_ipte_interlock(struct kvm_vcpu *vcpu)
341 {
342 vcpu->stat.instruction_ipte_interlock++;
343 if (psw_bits(vcpu->arch.sie_block->gpsw).p)
344 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
345 wait_event(vcpu->kvm->arch.ipte_wq, !ipte_lock_held(vcpu));
346 kvm_s390_retry_instr(vcpu);
347 VCPU_EVENT(vcpu, 4, "%s", "retrying ipte interlock operation");
348 return 0;
349 }
350
351 static int handle_test_block(struct kvm_vcpu *vcpu)
352 {
353 gpa_t addr;
354 int reg2;
355
356 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
357 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
358
359 kvm_s390_get_regs_rre(vcpu, NULL, &reg2);
360 addr = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
361 addr = kvm_s390_logical_to_effective(vcpu, addr);
362 if (kvm_s390_check_low_addr_prot_real(vcpu, addr))
363 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
364 addr = kvm_s390_real_to_abs(vcpu, addr);
365
366 if (kvm_is_error_gpa(vcpu->kvm, addr))
367 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
368 /*
369 * We don't expect errors on modern systems, and do not care
370 * about storage keys (yet), so let's just clear the page.
371 */
372 if (kvm_clear_guest(vcpu->kvm, addr, PAGE_SIZE))
373 return -EFAULT;
374 kvm_s390_set_psw_cc(vcpu, 0);
375 vcpu->run->s.regs.gprs[0] = 0;
376 return 0;
377 }
378
379 static int handle_tpi(struct kvm_vcpu *vcpu)
380 {
381 struct kvm_s390_interrupt_info *inti;
382 unsigned long len;
383 u32 tpi_data[3];
384 int rc;
385 u64 addr;
386 u8 ar;
387
388 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
389 if (addr & 3)
390 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
391
392 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->arch.sie_block->gcr[6], 0);
393 if (!inti) {
394 kvm_s390_set_psw_cc(vcpu, 0);
395 return 0;
396 }
397
398 tpi_data[0] = inti->io.subchannel_id << 16 | inti->io.subchannel_nr;
399 tpi_data[1] = inti->io.io_int_parm;
400 tpi_data[2] = inti->io.io_int_word;
401 if (addr) {
402 /*
403 * Store the two-word I/O interruption code into the
404 * provided area.
405 */
406 len = sizeof(tpi_data) - 4;
407 rc = write_guest(vcpu, addr, ar, &tpi_data, len);
408 if (rc) {
409 rc = kvm_s390_inject_prog_cond(vcpu, rc);
410 goto reinject_interrupt;
411 }
412 } else {
413 /*
414 * Store the three-word I/O interruption code into
415 * the appropriate lowcore area.
416 */
417 len = sizeof(tpi_data);
418 if (write_guest_lc(vcpu, __LC_SUBCHANNEL_ID, &tpi_data, len)) {
419 /* failed writes to the low core are not recoverable */
420 rc = -EFAULT;
421 goto reinject_interrupt;
422 }
423 }
424
425 /* irq was successfully handed to the guest */
426 kfree(inti);
427 kvm_s390_set_psw_cc(vcpu, 1);
428 return 0;
429 reinject_interrupt:
430 /*
431 * If we encounter a problem storing the interruption code, the
432 * instruction is suppressed from the guest's view: reinject the
433 * interrupt.
434 */
435 if (kvm_s390_reinject_io_int(vcpu->kvm, inti)) {
436 kfree(inti);
437 rc = -EFAULT;
438 }
439 /* don't set the cc, a pgm irq was injected or we drop to user space */
440 return rc ? -EFAULT : 0;
441 }
442
443 static int handle_tsch(struct kvm_vcpu *vcpu)
444 {
445 struct kvm_s390_interrupt_info *inti = NULL;
446 const u64 isc_mask = 0xffUL << 24; /* all iscs set */
447
448 /* a valid schid has at least one bit set */
449 if (vcpu->run->s.regs.gprs[1])
450 inti = kvm_s390_get_io_int(vcpu->kvm, isc_mask,
451 vcpu->run->s.regs.gprs[1]);
452
453 /*
454 * Prepare exit to userspace.
455 * We indicate whether we dequeued a pending I/O interrupt
456 * so that userspace can re-inject it if the instruction gets
457 * a program check. While this may re-order the pending I/O
458 * interrupts, this is no problem since the priority is kept
459 * intact.
460 */
461 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
462 vcpu->run->s390_tsch.dequeued = !!inti;
463 if (inti) {
464 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
465 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
466 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
467 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
468 }
469 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
470 kfree(inti);
471 return -EREMOTE;
472 }
473
474 static int handle_io_inst(struct kvm_vcpu *vcpu)
475 {
476 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
477
478 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
479 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
480
481 if (vcpu->kvm->arch.css_support) {
482 /*
483 * Most I/O instructions will be handled by userspace.
484 * Exceptions are tpi and the interrupt portion of tsch.
485 */
486 if (vcpu->arch.sie_block->ipa == 0xb236)
487 return handle_tpi(vcpu);
488 if (vcpu->arch.sie_block->ipa == 0xb235)
489 return handle_tsch(vcpu);
490 /* Handle in userspace. */
491 return -EOPNOTSUPP;
492 } else {
493 /*
494 * Set condition code 3 to stop the guest from issuing channel
495 * I/O instructions.
496 */
497 kvm_s390_set_psw_cc(vcpu, 3);
498 return 0;
499 }
500 }
501
502 static int handle_stfl(struct kvm_vcpu *vcpu)
503 {
504 int rc;
505 unsigned int fac;
506
507 vcpu->stat.instruction_stfl++;
508
509 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
510 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
511
512 /*
513 * We need to shift the lower 32 facility bits (bit 0-31) from a u64
514 * into a u32 memory representation. They will remain bits 0-31.
515 */
516 fac = *vcpu->kvm->arch.model.fac_list >> 32;
517 rc = write_guest_lc(vcpu, offsetof(struct lowcore, stfl_fac_list),
518 &fac, sizeof(fac));
519 if (rc)
520 return rc;
521 VCPU_EVENT(vcpu, 3, "STFL: store facility list 0x%x", fac);
522 trace_kvm_s390_handle_stfl(vcpu, fac);
523 return 0;
524 }
525
526 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
527 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
528 #define PSW_ADDR_24 0x0000000000ffffffUL
529 #define PSW_ADDR_31 0x000000007fffffffUL
530
531 int is_valid_psw(psw_t *psw)
532 {
533 if (psw->mask & PSW_MASK_UNASSIGNED)
534 return 0;
535 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_BA) {
536 if (psw->addr & ~PSW_ADDR_31)
537 return 0;
538 }
539 if (!(psw->mask & PSW_MASK_ADDR_MODE) && (psw->addr & ~PSW_ADDR_24))
540 return 0;
541 if ((psw->mask & PSW_MASK_ADDR_MODE) == PSW_MASK_EA)
542 return 0;
543 if (psw->addr & 1)
544 return 0;
545 return 1;
546 }
547
548 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
549 {
550 psw_t *gpsw = &vcpu->arch.sie_block->gpsw;
551 psw_compat_t new_psw;
552 u64 addr;
553 int rc;
554 u8 ar;
555
556 if (gpsw->mask & PSW_MASK_PSTATE)
557 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
558
559 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
560 if (addr & 7)
561 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
562
563 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
564 if (rc)
565 return kvm_s390_inject_prog_cond(vcpu, rc);
566 if (!(new_psw.mask & PSW32_MASK_BASE))
567 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
568 gpsw->mask = (new_psw.mask & ~PSW32_MASK_BASE) << 32;
569 gpsw->mask |= new_psw.addr & PSW32_ADDR_AMODE;
570 gpsw->addr = new_psw.addr & ~PSW32_ADDR_AMODE;
571 if (!is_valid_psw(gpsw))
572 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
573 return 0;
574 }
575
576 static int handle_lpswe(struct kvm_vcpu *vcpu)
577 {
578 psw_t new_psw;
579 u64 addr;
580 int rc;
581 u8 ar;
582
583 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
584 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
585
586 addr = kvm_s390_get_base_disp_s(vcpu, &ar);
587 if (addr & 7)
588 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
589 rc = read_guest(vcpu, addr, ar, &new_psw, sizeof(new_psw));
590 if (rc)
591 return kvm_s390_inject_prog_cond(vcpu, rc);
592 vcpu->arch.sie_block->gpsw = new_psw;
593 if (!is_valid_psw(&vcpu->arch.sie_block->gpsw))
594 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
595 return 0;
596 }
597
598 static int handle_stidp(struct kvm_vcpu *vcpu)
599 {
600 u64 stidp_data = vcpu->kvm->arch.model.cpuid;
601 u64 operand2;
602 int rc;
603 u8 ar;
604
605 vcpu->stat.instruction_stidp++;
606
607 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
608 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
609
610 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
611
612 if (operand2 & 7)
613 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
614
615 rc = write_guest(vcpu, operand2, ar, &stidp_data, sizeof(stidp_data));
616 if (rc)
617 return kvm_s390_inject_prog_cond(vcpu, rc);
618
619 VCPU_EVENT(vcpu, 3, "STIDP: store cpu id 0x%llx", stidp_data);
620 return 0;
621 }
622
623 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
624 {
625 int cpus = 0;
626 int n;
627
628 cpus = atomic_read(&vcpu->kvm->online_vcpus);
629
630 /* deal with other level 3 hypervisors */
631 if (stsi(mem, 3, 2, 2))
632 mem->count = 0;
633 if (mem->count < 8)
634 mem->count++;
635 for (n = mem->count - 1; n > 0 ; n--)
636 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
637
638 memset(&mem->vm[0], 0, sizeof(mem->vm[0]));
639 mem->vm[0].cpus_total = cpus;
640 mem->vm[0].cpus_configured = cpus;
641 mem->vm[0].cpus_standby = 0;
642 mem->vm[0].cpus_reserved = 0;
643 mem->vm[0].caf = 1000;
644 memcpy(mem->vm[0].name, "KVMguest", 8);
645 ASCEBC(mem->vm[0].name, 8);
646 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
647 ASCEBC(mem->vm[0].cpi, 16);
648 }
649
650 static void insert_stsi_usr_data(struct kvm_vcpu *vcpu, u64 addr, u8 ar,
651 u8 fc, u8 sel1, u16 sel2)
652 {
653 vcpu->run->exit_reason = KVM_EXIT_S390_STSI;
654 vcpu->run->s390_stsi.addr = addr;
655 vcpu->run->s390_stsi.ar = ar;
656 vcpu->run->s390_stsi.fc = fc;
657 vcpu->run->s390_stsi.sel1 = sel1;
658 vcpu->run->s390_stsi.sel2 = sel2;
659 }
660
661 static int handle_stsi(struct kvm_vcpu *vcpu)
662 {
663 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
664 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
665 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
666 unsigned long mem = 0;
667 u64 operand2;
668 int rc = 0;
669 u8 ar;
670
671 vcpu->stat.instruction_stsi++;
672 VCPU_EVENT(vcpu, 3, "STSI: fc: %u sel1: %u sel2: %u", fc, sel1, sel2);
673
674 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
675 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
676
677 if (fc > 3) {
678 kvm_s390_set_psw_cc(vcpu, 3);
679 return 0;
680 }
681
682 if (vcpu->run->s.regs.gprs[0] & 0x0fffff00
683 || vcpu->run->s.regs.gprs[1] & 0xffff0000)
684 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
685
686 if (fc == 0) {
687 vcpu->run->s.regs.gprs[0] = 3 << 28;
688 kvm_s390_set_psw_cc(vcpu, 0);
689 return 0;
690 }
691
692 operand2 = kvm_s390_get_base_disp_s(vcpu, &ar);
693
694 if (operand2 & 0xfff)
695 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
696
697 switch (fc) {
698 case 1: /* same handling for 1 and 2 */
699 case 2:
700 mem = get_zeroed_page(GFP_KERNEL);
701 if (!mem)
702 goto out_no_data;
703 if (stsi((void *) mem, fc, sel1, sel2))
704 goto out_no_data;
705 break;
706 case 3:
707 if (sel1 != 2 || sel2 != 2)
708 goto out_no_data;
709 mem = get_zeroed_page(GFP_KERNEL);
710 if (!mem)
711 goto out_no_data;
712 handle_stsi_3_2_2(vcpu, (void *) mem);
713 break;
714 }
715
716 rc = write_guest(vcpu, operand2, ar, (void *)mem, PAGE_SIZE);
717 if (rc) {
718 rc = kvm_s390_inject_prog_cond(vcpu, rc);
719 goto out;
720 }
721 if (vcpu->kvm->arch.user_stsi) {
722 insert_stsi_usr_data(vcpu, operand2, ar, fc, sel1, sel2);
723 rc = -EREMOTE;
724 }
725 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
726 free_page(mem);
727 kvm_s390_set_psw_cc(vcpu, 0);
728 vcpu->run->s.regs.gprs[0] = 0;
729 return rc;
730 out_no_data:
731 kvm_s390_set_psw_cc(vcpu, 3);
732 out:
733 free_page(mem);
734 return rc;
735 }
736
737 static const intercept_handler_t b2_handlers[256] = {
738 [0x02] = handle_stidp,
739 [0x04] = handle_set_clock,
740 [0x10] = handle_set_prefix,
741 [0x11] = handle_store_prefix,
742 [0x12] = handle_store_cpu_address,
743 [0x14] = kvm_s390_handle_vsie,
744 [0x21] = handle_ipte_interlock,
745 [0x29] = handle_iske,
746 [0x2a] = handle_rrbe,
747 [0x2b] = handle_sske,
748 [0x2c] = handle_test_block,
749 [0x30] = handle_io_inst,
750 [0x31] = handle_io_inst,
751 [0x32] = handle_io_inst,
752 [0x33] = handle_io_inst,
753 [0x34] = handle_io_inst,
754 [0x35] = handle_io_inst,
755 [0x36] = handle_io_inst,
756 [0x37] = handle_io_inst,
757 [0x38] = handle_io_inst,
758 [0x39] = handle_io_inst,
759 [0x3a] = handle_io_inst,
760 [0x3b] = handle_io_inst,
761 [0x3c] = handle_io_inst,
762 [0x50] = handle_ipte_interlock,
763 [0x56] = handle_sthyi,
764 [0x5f] = handle_io_inst,
765 [0x74] = handle_io_inst,
766 [0x76] = handle_io_inst,
767 [0x7d] = handle_stsi,
768 [0xb1] = handle_stfl,
769 [0xb2] = handle_lpswe,
770 };
771
772 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
773 {
774 intercept_handler_t handler;
775
776 /*
777 * A lot of B2 instructions are priviledged. Here we check for
778 * the privileged ones, that we can handle in the kernel.
779 * Anything else goes to userspace.
780 */
781 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
782 if (handler)
783 return handler(vcpu);
784
785 return -EOPNOTSUPP;
786 }
787
788 static int handle_epsw(struct kvm_vcpu *vcpu)
789 {
790 int reg1, reg2;
791
792 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
793
794 /* This basically extracts the mask half of the psw. */
795 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000UL;
796 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
797 if (reg2) {
798 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000UL;
799 vcpu->run->s.regs.gprs[reg2] |=
800 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffffUL;
801 }
802 return 0;
803 }
804
805 #define PFMF_RESERVED 0xfffc0101UL
806 #define PFMF_SK 0x00020000UL
807 #define PFMF_CF 0x00010000UL
808 #define PFMF_UI 0x00008000UL
809 #define PFMF_FSC 0x00007000UL
810 #define PFMF_NQ 0x00000800UL
811 #define PFMF_MR 0x00000400UL
812 #define PFMF_MC 0x00000200UL
813 #define PFMF_KEY 0x000000feUL
814
815 static int handle_pfmf(struct kvm_vcpu *vcpu)
816 {
817 bool mr = false, mc = false, nq;
818 int reg1, reg2;
819 unsigned long start, end;
820 unsigned char key;
821
822 vcpu->stat.instruction_pfmf++;
823
824 kvm_s390_get_regs_rre(vcpu, &reg1, &reg2);
825
826 if (!test_kvm_facility(vcpu->kvm, 8))
827 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
828
829 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
830 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
831
832 if (vcpu->run->s.regs.gprs[reg1] & PFMF_RESERVED)
833 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
834
835 /* Only provide non-quiescing support if enabled for the guest */
836 if (vcpu->run->s.regs.gprs[reg1] & PFMF_NQ &&
837 !test_kvm_facility(vcpu->kvm, 14))
838 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
839
840 /* Only provide conditional-SSKE support if enabled for the guest */
841 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK &&
842 test_kvm_facility(vcpu->kvm, 10)) {
843 mr = vcpu->run->s.regs.gprs[reg1] & PFMF_MR;
844 mc = vcpu->run->s.regs.gprs[reg1] & PFMF_MC;
845 }
846
847 nq = vcpu->run->s.regs.gprs[reg1] & PFMF_NQ;
848 key = vcpu->run->s.regs.gprs[reg1] & PFMF_KEY;
849 start = vcpu->run->s.regs.gprs[reg2] & PAGE_MASK;
850 start = kvm_s390_logical_to_effective(vcpu, start);
851
852 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
853 if (kvm_s390_check_low_addr_prot_real(vcpu, start))
854 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
855 }
856
857 switch (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
858 case 0x00000000:
859 /* only 4k frames specify a real address */
860 start = kvm_s390_real_to_abs(vcpu, start);
861 end = (start + (1UL << 12)) & ~((1UL << 12) - 1);
862 break;
863 case 0x00001000:
864 end = (start + (1UL << 20)) & ~((1UL << 20) - 1);
865 break;
866 case 0x00002000:
867 /* only support 2G frame size if EDAT2 is available and we are
868 not in 24-bit addressing mode */
869 if (!test_kvm_facility(vcpu->kvm, 78) ||
870 psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_24BIT)
871 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
872 end = (start + (1UL << 31)) & ~((1UL << 31) - 1);
873 break;
874 default:
875 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
876 }
877
878 while (start != end) {
879 unsigned long useraddr;
880
881 /* Translate guest address to host address */
882 useraddr = gfn_to_hva(vcpu->kvm, gpa_to_gfn(start));
883 if (kvm_is_error_hva(useraddr))
884 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
885
886 if (vcpu->run->s.regs.gprs[reg1] & PFMF_CF) {
887 if (clear_user((void __user *)useraddr, PAGE_SIZE))
888 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
889 }
890
891 if (vcpu->run->s.regs.gprs[reg1] & PFMF_SK) {
892 int rc = __skey_check_enable(vcpu);
893
894 if (rc)
895 return rc;
896 down_read(&current->mm->mmap_sem);
897 rc = cond_set_guest_storage_key(current->mm, useraddr,
898 key, NULL, nq, mr, mc);
899 up_read(&current->mm->mmap_sem);
900 if (rc < 0)
901 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
902 }
903
904 start += PAGE_SIZE;
905 }
906 if (vcpu->run->s.regs.gprs[reg1] & PFMF_FSC) {
907 if (psw_bits(vcpu->arch.sie_block->gpsw).eaba == PSW_AMODE_64BIT) {
908 vcpu->run->s.regs.gprs[reg2] = end;
909 } else {
910 vcpu->run->s.regs.gprs[reg2] &= ~0xffffffffUL;
911 end = kvm_s390_logical_to_effective(vcpu, end);
912 vcpu->run->s.regs.gprs[reg2] |= end;
913 }
914 }
915 return 0;
916 }
917
918 static int handle_essa(struct kvm_vcpu *vcpu)
919 {
920 /* entries expected to be 1FF */
921 int entries = (vcpu->arch.sie_block->cbrlo & ~PAGE_MASK) >> 3;
922 unsigned long *cbrlo;
923 struct gmap *gmap;
924 int i;
925
926 VCPU_EVENT(vcpu, 4, "ESSA: release %d pages", entries);
927 gmap = vcpu->arch.gmap;
928 vcpu->stat.instruction_essa++;
929 if (!vcpu->kvm->arch.use_cmma)
930 return kvm_s390_inject_program_int(vcpu, PGM_OPERATION);
931
932 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
933 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
934
935 if (((vcpu->arch.sie_block->ipb & 0xf0000000) >> 28) > 6)
936 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
937
938 /* Retry the ESSA instruction */
939 kvm_s390_retry_instr(vcpu);
940 vcpu->arch.sie_block->cbrlo &= PAGE_MASK; /* reset nceo */
941 cbrlo = phys_to_virt(vcpu->arch.sie_block->cbrlo);
942 down_read(&gmap->mm->mmap_sem);
943 for (i = 0; i < entries; ++i)
944 __gmap_zap(gmap, cbrlo[i]);
945 up_read(&gmap->mm->mmap_sem);
946 return 0;
947 }
948
949 static const intercept_handler_t b9_handlers[256] = {
950 [0x8a] = handle_ipte_interlock,
951 [0x8d] = handle_epsw,
952 [0x8e] = handle_ipte_interlock,
953 [0x8f] = handle_ipte_interlock,
954 [0xab] = handle_essa,
955 [0xaf] = handle_pfmf,
956 };
957
958 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
959 {
960 intercept_handler_t handler;
961
962 /* This is handled just as for the B2 instructions. */
963 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
964 if (handler)
965 return handler(vcpu);
966
967 return -EOPNOTSUPP;
968 }
969
970 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu)
971 {
972 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
973 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
974 int reg, rc, nr_regs;
975 u32 ctl_array[16];
976 u64 ga;
977 u8 ar;
978
979 vcpu->stat.instruction_lctl++;
980
981 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
982 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
983
984 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
985
986 if (ga & 3)
987 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
988
989 VCPU_EVENT(vcpu, 4, "LCTL: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
990 trace_kvm_s390_handle_lctl(vcpu, 0, reg1, reg3, ga);
991
992 nr_regs = ((reg3 - reg1) & 0xf) + 1;
993 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
994 if (rc)
995 return kvm_s390_inject_prog_cond(vcpu, rc);
996 reg = reg1;
997 nr_regs = 0;
998 do {
999 vcpu->arch.sie_block->gcr[reg] &= 0xffffffff00000000ul;
1000 vcpu->arch.sie_block->gcr[reg] |= ctl_array[nr_regs++];
1001 if (reg == reg3)
1002 break;
1003 reg = (reg + 1) % 16;
1004 } while (1);
1005 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1006 return 0;
1007 }
1008
1009 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu)
1010 {
1011 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1012 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1013 int reg, rc, nr_regs;
1014 u32 ctl_array[16];
1015 u64 ga;
1016 u8 ar;
1017
1018 vcpu->stat.instruction_stctl++;
1019
1020 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1021 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1022
1023 ga = kvm_s390_get_base_disp_rs(vcpu, &ar);
1024
1025 if (ga & 3)
1026 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1027
1028 VCPU_EVENT(vcpu, 4, "STCTL r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1029 trace_kvm_s390_handle_stctl(vcpu, 0, reg1, reg3, ga);
1030
1031 reg = reg1;
1032 nr_regs = 0;
1033 do {
1034 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1035 if (reg == reg3)
1036 break;
1037 reg = (reg + 1) % 16;
1038 } while (1);
1039 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u32));
1040 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1041 }
1042
1043 static int handle_lctlg(struct kvm_vcpu *vcpu)
1044 {
1045 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1046 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1047 int reg, rc, nr_regs;
1048 u64 ctl_array[16];
1049 u64 ga;
1050 u8 ar;
1051
1052 vcpu->stat.instruction_lctlg++;
1053
1054 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1055 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1056
1057 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1058
1059 if (ga & 7)
1060 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1061
1062 VCPU_EVENT(vcpu, 4, "LCTLG: r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1063 trace_kvm_s390_handle_lctl(vcpu, 1, reg1, reg3, ga);
1064
1065 nr_regs = ((reg3 - reg1) & 0xf) + 1;
1066 rc = read_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1067 if (rc)
1068 return kvm_s390_inject_prog_cond(vcpu, rc);
1069 reg = reg1;
1070 nr_regs = 0;
1071 do {
1072 vcpu->arch.sie_block->gcr[reg] = ctl_array[nr_regs++];
1073 if (reg == reg3)
1074 break;
1075 reg = (reg + 1) % 16;
1076 } while (1);
1077 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
1078 return 0;
1079 }
1080
1081 static int handle_stctg(struct kvm_vcpu *vcpu)
1082 {
1083 int reg1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
1084 int reg3 = vcpu->arch.sie_block->ipa & 0x000f;
1085 int reg, rc, nr_regs;
1086 u64 ctl_array[16];
1087 u64 ga;
1088 u8 ar;
1089
1090 vcpu->stat.instruction_stctg++;
1091
1092 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1093 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1094
1095 ga = kvm_s390_get_base_disp_rsy(vcpu, &ar);
1096
1097 if (ga & 7)
1098 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
1099
1100 VCPU_EVENT(vcpu, 4, "STCTG r1:%d, r3:%d, addr: 0x%llx", reg1, reg3, ga);
1101 trace_kvm_s390_handle_stctl(vcpu, 1, reg1, reg3, ga);
1102
1103 reg = reg1;
1104 nr_regs = 0;
1105 do {
1106 ctl_array[nr_regs++] = vcpu->arch.sie_block->gcr[reg];
1107 if (reg == reg3)
1108 break;
1109 reg = (reg + 1) % 16;
1110 } while (1);
1111 rc = write_guest(vcpu, ga, ar, ctl_array, nr_regs * sizeof(u64));
1112 return rc ? kvm_s390_inject_prog_cond(vcpu, rc) : 0;
1113 }
1114
1115 static const intercept_handler_t eb_handlers[256] = {
1116 [0x2f] = handle_lctlg,
1117 [0x25] = handle_stctg,
1118 [0x60] = handle_ri,
1119 [0x61] = handle_ri,
1120 [0x62] = handle_ri,
1121 };
1122
1123 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu)
1124 {
1125 intercept_handler_t handler;
1126
1127 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
1128 if (handler)
1129 return handler(vcpu);
1130 return -EOPNOTSUPP;
1131 }
1132
1133 static int handle_tprot(struct kvm_vcpu *vcpu)
1134 {
1135 u64 address1, address2;
1136 unsigned long hva, gpa;
1137 int ret = 0, cc = 0;
1138 bool writable;
1139 u8 ar;
1140
1141 vcpu->stat.instruction_tprot++;
1142
1143 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1144 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1145
1146 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2, &ar, NULL);
1147
1148 /* we only handle the Linux memory detection case:
1149 * access key == 0
1150 * everything else goes to userspace. */
1151 if (address2 & 0xf0)
1152 return -EOPNOTSUPP;
1153 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1154 ipte_lock(vcpu);
1155 ret = guest_translate_address(vcpu, address1, ar, &gpa, GACC_STORE);
1156 if (ret == PGM_PROTECTION) {
1157 /* Write protected? Try again with read-only... */
1158 cc = 1;
1159 ret = guest_translate_address(vcpu, address1, ar, &gpa,
1160 GACC_FETCH);
1161 }
1162 if (ret) {
1163 if (ret == PGM_ADDRESSING || ret == PGM_TRANSLATION_SPEC) {
1164 ret = kvm_s390_inject_program_int(vcpu, ret);
1165 } else if (ret > 0) {
1166 /* Translation not available */
1167 kvm_s390_set_psw_cc(vcpu, 3);
1168 ret = 0;
1169 }
1170 goto out_unlock;
1171 }
1172
1173 hva = gfn_to_hva_prot(vcpu->kvm, gpa_to_gfn(gpa), &writable);
1174 if (kvm_is_error_hva(hva)) {
1175 ret = kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
1176 } else {
1177 if (!writable)
1178 cc = 1; /* Write not permitted ==> read-only */
1179 kvm_s390_set_psw_cc(vcpu, cc);
1180 /* Note: CC2 only occurs for storage keys (not supported yet) */
1181 }
1182 out_unlock:
1183 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
1184 ipte_unlock(vcpu);
1185 return ret;
1186 }
1187
1188 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
1189 {
1190 /* For e5xx... instructions we only handle TPROT */
1191 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
1192 return handle_tprot(vcpu);
1193 return -EOPNOTSUPP;
1194 }
1195
1196 static int handle_sckpf(struct kvm_vcpu *vcpu)
1197 {
1198 u32 value;
1199
1200 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
1201 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
1202
1203 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
1204 return kvm_s390_inject_program_int(vcpu,
1205 PGM_SPECIFICATION);
1206
1207 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
1208 vcpu->arch.sie_block->todpr = value;
1209
1210 return 0;
1211 }
1212
1213 static int handle_ptff(struct kvm_vcpu *vcpu)
1214 {
1215 /* we don't emulate any control instructions yet */
1216 kvm_s390_set_psw_cc(vcpu, 3);
1217 return 0;
1218 }
1219
1220 static const intercept_handler_t x01_handlers[256] = {
1221 [0x04] = handle_ptff,
1222 [0x07] = handle_sckpf,
1223 };
1224
1225 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
1226 {
1227 intercept_handler_t handler;
1228
1229 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
1230 if (handler)
1231 return handler(vcpu);
1232 return -EOPNOTSUPP;
1233 }