]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blob - arch/s390/kvm/priv.c
7b397b37d11aa8f1c85226563cadcd18688e4135
[mirror_ubuntu-bionic-kernel.git] / arch / s390 / kvm / priv.c
1 /*
2 * handling privileged instructions
3 *
4 * Copyright IBM Corp. 2008
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 */
13
14 #include <linux/kvm.h>
15 #include <linux/gfp.h>
16 #include <linux/errno.h>
17 #include <asm/asm-offsets.h>
18 #include <asm/current.h>
19 #include <asm/debug.h>
20 #include <asm/ebcdic.h>
21 #include <asm/sysinfo.h>
22 #include <asm/ptrace.h>
23 #include <asm/compat.h>
24 #include "gaccess.h"
25 #include "kvm-s390.h"
26 #include "trace.h"
27
28 static int handle_set_prefix(struct kvm_vcpu *vcpu)
29 {
30 u64 operand2;
31 u32 address = 0;
32 u8 tmp;
33
34 vcpu->stat.instruction_spx++;
35
36 operand2 = kvm_s390_get_base_disp_s(vcpu);
37
38 /* must be word boundary */
39 if (operand2 & 3) {
40 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
41 goto out;
42 }
43
44 /* get the value */
45 if (get_guest(vcpu, address, (u32 __user *) operand2)) {
46 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
47 goto out;
48 }
49
50 address = address & 0x7fffe000u;
51
52 /* make sure that the new value is valid memory */
53 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
54 (copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1))) {
55 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
56 goto out;
57 }
58
59 kvm_s390_set_prefix(vcpu, address);
60
61 VCPU_EVENT(vcpu, 5, "setting prefix to %x", address);
62 trace_kvm_s390_handle_prefix(vcpu, 1, address);
63 out:
64 return 0;
65 }
66
67 static int handle_store_prefix(struct kvm_vcpu *vcpu)
68 {
69 u64 operand2;
70 u32 address;
71
72 vcpu->stat.instruction_stpx++;
73
74 operand2 = kvm_s390_get_base_disp_s(vcpu);
75
76 /* must be word boundary */
77 if (operand2 & 3) {
78 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
79 goto out;
80 }
81
82 address = vcpu->arch.sie_block->prefix;
83 address = address & 0x7fffe000u;
84
85 /* get the value */
86 if (put_guest(vcpu, address, (u32 __user *)operand2)) {
87 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
88 goto out;
89 }
90
91 VCPU_EVENT(vcpu, 5, "storing prefix to %x", address);
92 trace_kvm_s390_handle_prefix(vcpu, 0, address);
93 out:
94 return 0;
95 }
96
97 static int handle_store_cpu_address(struct kvm_vcpu *vcpu)
98 {
99 u64 useraddr;
100 int rc;
101
102 vcpu->stat.instruction_stap++;
103
104 useraddr = kvm_s390_get_base_disp_s(vcpu);
105
106 if (useraddr & 1) {
107 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
108 goto out;
109 }
110
111 rc = put_guest(vcpu, vcpu->vcpu_id, (u16 __user *)useraddr);
112 if (rc) {
113 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
114 goto out;
115 }
116
117 VCPU_EVENT(vcpu, 5, "storing cpu address to %llx", useraddr);
118 trace_kvm_s390_handle_stap(vcpu, useraddr);
119 out:
120 return 0;
121 }
122
123 static int handle_skey(struct kvm_vcpu *vcpu)
124 {
125 vcpu->stat.instruction_storage_key++;
126 vcpu->arch.sie_block->gpsw.addr -= 4;
127 VCPU_EVENT(vcpu, 4, "%s", "retrying storage key operation");
128 return 0;
129 }
130
131 static int handle_tpi(struct kvm_vcpu *vcpu)
132 {
133 struct kvm_s390_interrupt_info *inti;
134 u64 addr;
135 int cc;
136
137 addr = kvm_s390_get_base_disp_s(vcpu);
138 if (addr & 3) {
139 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
140 goto out;
141 }
142 cc = 0;
143 inti = kvm_s390_get_io_int(vcpu->kvm, vcpu->run->s.regs.crs[6], 0);
144 if (!inti)
145 goto no_interrupt;
146 cc = 1;
147 if (addr) {
148 /*
149 * Store the two-word I/O interruption code into the
150 * provided area.
151 */
152 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) addr);
153 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) (addr + 2));
154 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) (addr + 4));
155 } else {
156 /*
157 * Store the three-word I/O interruption code into
158 * the appropriate lowcore area.
159 */
160 put_guest(vcpu, inti->io.subchannel_id, (u16 __user *) __LC_SUBCHANNEL_ID);
161 put_guest(vcpu, inti->io.subchannel_nr, (u16 __user *) __LC_SUBCHANNEL_NR);
162 put_guest(vcpu, inti->io.io_int_parm, (u32 __user *) __LC_IO_INT_PARM);
163 put_guest(vcpu, inti->io.io_int_word, (u32 __user *) __LC_IO_INT_WORD);
164 }
165 kfree(inti);
166 no_interrupt:
167 /* Set condition code and we're done. */
168 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
169 vcpu->arch.sie_block->gpsw.mask |= (cc & 3ul) << 44;
170 out:
171 return 0;
172 }
173
174 static int handle_tsch(struct kvm_vcpu *vcpu)
175 {
176 struct kvm_s390_interrupt_info *inti;
177
178 inti = kvm_s390_get_io_int(vcpu->kvm, 0,
179 vcpu->run->s.regs.gprs[1]);
180
181 /*
182 * Prepare exit to userspace.
183 * We indicate whether we dequeued a pending I/O interrupt
184 * so that userspace can re-inject it if the instruction gets
185 * a program check. While this may re-order the pending I/O
186 * interrupts, this is no problem since the priority is kept
187 * intact.
188 */
189 vcpu->run->exit_reason = KVM_EXIT_S390_TSCH;
190 vcpu->run->s390_tsch.dequeued = !!inti;
191 if (inti) {
192 vcpu->run->s390_tsch.subchannel_id = inti->io.subchannel_id;
193 vcpu->run->s390_tsch.subchannel_nr = inti->io.subchannel_nr;
194 vcpu->run->s390_tsch.io_int_parm = inti->io.io_int_parm;
195 vcpu->run->s390_tsch.io_int_word = inti->io.io_int_word;
196 }
197 vcpu->run->s390_tsch.ipb = vcpu->arch.sie_block->ipb;
198 kfree(inti);
199 return -EREMOTE;
200 }
201
202 static int handle_io_inst(struct kvm_vcpu *vcpu)
203 {
204 VCPU_EVENT(vcpu, 4, "%s", "I/O instruction");
205
206 if (vcpu->kvm->arch.css_support) {
207 /*
208 * Most I/O instructions will be handled by userspace.
209 * Exceptions are tpi and the interrupt portion of tsch.
210 */
211 if (vcpu->arch.sie_block->ipa == 0xb236)
212 return handle_tpi(vcpu);
213 if (vcpu->arch.sie_block->ipa == 0xb235)
214 return handle_tsch(vcpu);
215 /* Handle in userspace. */
216 return -EOPNOTSUPP;
217 } else {
218 /*
219 * Set condition code 3 to stop the guest from issueing channel
220 * I/O instructions.
221 */
222 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
223 vcpu->arch.sie_block->gpsw.mask |= (3 & 3ul) << 44;
224 return 0;
225 }
226 }
227
228 static int handle_stfl(struct kvm_vcpu *vcpu)
229 {
230 unsigned int facility_list;
231 int rc;
232
233 vcpu->stat.instruction_stfl++;
234 /* only pass the facility bits, which we can handle */
235 facility_list = S390_lowcore.stfl_fac_list & 0xff00fff3;
236
237 rc = copy_to_guest(vcpu, offsetof(struct _lowcore, stfl_fac_list),
238 &facility_list, sizeof(facility_list));
239 if (rc)
240 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
241 else {
242 VCPU_EVENT(vcpu, 5, "store facility list value %x",
243 facility_list);
244 trace_kvm_s390_handle_stfl(vcpu, facility_list);
245 }
246 return 0;
247 }
248
249 static void handle_new_psw(struct kvm_vcpu *vcpu)
250 {
251 /* Check whether the new psw is enabled for machine checks. */
252 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_MCHECK)
253 kvm_s390_deliver_pending_machine_checks(vcpu);
254 }
255
256 #define PSW_MASK_ADDR_MODE (PSW_MASK_EA | PSW_MASK_BA)
257 #define PSW_MASK_UNASSIGNED 0xb80800fe7fffffffUL
258 #define PSW_ADDR_24 0x0000000000ffffffUL
259 #define PSW_ADDR_31 0x000000007fffffffUL
260
261 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu)
262 {
263 u64 addr;
264 psw_compat_t new_psw;
265
266 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
267 return kvm_s390_inject_program_int(vcpu,
268 PGM_PRIVILEGED_OPERATION);
269
270 addr = kvm_s390_get_base_disp_s(vcpu);
271
272 if (addr & 7) {
273 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
274 goto out;
275 }
276
277 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
278 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
279 goto out;
280 }
281
282 if (!(new_psw.mask & PSW32_MASK_BASE)) {
283 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
284 goto out;
285 }
286
287 vcpu->arch.sie_block->gpsw.mask =
288 (new_psw.mask & ~PSW32_MASK_BASE) << 32;
289 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
290
291 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
292 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
293 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
294 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
295 PSW_MASK_EA)) {
296 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
297 goto out;
298 }
299
300 handle_new_psw(vcpu);
301 out:
302 return 0;
303 }
304
305 static int handle_lpswe(struct kvm_vcpu *vcpu)
306 {
307 u64 addr;
308 psw_t new_psw;
309
310 addr = kvm_s390_get_base_disp_s(vcpu);
311
312 if (addr & 7) {
313 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
314 goto out;
315 }
316
317 if (copy_from_guest(vcpu, &new_psw, addr, sizeof(new_psw))) {
318 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
319 goto out;
320 }
321
322 vcpu->arch.sie_block->gpsw.mask = new_psw.mask;
323 vcpu->arch.sie_block->gpsw.addr = new_psw.addr;
324
325 if ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_UNASSIGNED) ||
326 (((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
327 PSW_MASK_BA) &&
328 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_31)) ||
329 (!(vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) &&
330 (vcpu->arch.sie_block->gpsw.addr & ~PSW_ADDR_24)) ||
331 ((vcpu->arch.sie_block->gpsw.mask & PSW_MASK_ADDR_MODE) ==
332 PSW_MASK_EA)) {
333 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
334 goto out;
335 }
336
337 handle_new_psw(vcpu);
338 out:
339 return 0;
340 }
341
342 static int handle_stidp(struct kvm_vcpu *vcpu)
343 {
344 u64 operand2;
345 int rc;
346
347 vcpu->stat.instruction_stidp++;
348
349 operand2 = kvm_s390_get_base_disp_s(vcpu);
350
351 if (operand2 & 7) {
352 kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
353 goto out;
354 }
355
356 rc = put_guest(vcpu, vcpu->arch.stidp_data, (u64 __user *)operand2);
357 if (rc) {
358 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
359 goto out;
360 }
361
362 VCPU_EVENT(vcpu, 5, "%s", "store cpu id");
363 out:
364 return 0;
365 }
366
367 static void handle_stsi_3_2_2(struct kvm_vcpu *vcpu, struct sysinfo_3_2_2 *mem)
368 {
369 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
370 int cpus = 0;
371 int n;
372
373 spin_lock(&fi->lock);
374 for (n = 0; n < KVM_MAX_VCPUS; n++)
375 if (fi->local_int[n])
376 cpus++;
377 spin_unlock(&fi->lock);
378
379 /* deal with other level 3 hypervisors */
380 if (stsi(mem, 3, 2, 2))
381 mem->count = 0;
382 if (mem->count < 8)
383 mem->count++;
384 for (n = mem->count - 1; n > 0 ; n--)
385 memcpy(&mem->vm[n], &mem->vm[n - 1], sizeof(mem->vm[0]));
386
387 mem->vm[0].cpus_total = cpus;
388 mem->vm[0].cpus_configured = cpus;
389 mem->vm[0].cpus_standby = 0;
390 mem->vm[0].cpus_reserved = 0;
391 mem->vm[0].caf = 1000;
392 memcpy(mem->vm[0].name, "KVMguest", 8);
393 ASCEBC(mem->vm[0].name, 8);
394 memcpy(mem->vm[0].cpi, "KVM/Linux ", 16);
395 ASCEBC(mem->vm[0].cpi, 16);
396 }
397
398 static int handle_stsi(struct kvm_vcpu *vcpu)
399 {
400 int fc = (vcpu->run->s.regs.gprs[0] & 0xf0000000) >> 28;
401 int sel1 = vcpu->run->s.regs.gprs[0] & 0xff;
402 int sel2 = vcpu->run->s.regs.gprs[1] & 0xffff;
403 u64 operand2;
404 unsigned long mem;
405
406 vcpu->stat.instruction_stsi++;
407 VCPU_EVENT(vcpu, 4, "stsi: fc: %x sel1: %x sel2: %x", fc, sel1, sel2);
408
409 operand2 = kvm_s390_get_base_disp_s(vcpu);
410
411 if (operand2 & 0xfff && fc > 0)
412 return kvm_s390_inject_program_int(vcpu, PGM_SPECIFICATION);
413
414 switch (fc) {
415 case 0:
416 vcpu->run->s.regs.gprs[0] = 3 << 28;
417 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
418 return 0;
419 case 1: /* same handling for 1 and 2 */
420 case 2:
421 mem = get_zeroed_page(GFP_KERNEL);
422 if (!mem)
423 goto out_fail;
424 if (stsi((void *) mem, fc, sel1, sel2))
425 goto out_mem;
426 break;
427 case 3:
428 if (sel1 != 2 || sel2 != 2)
429 goto out_fail;
430 mem = get_zeroed_page(GFP_KERNEL);
431 if (!mem)
432 goto out_fail;
433 handle_stsi_3_2_2(vcpu, (void *) mem);
434 break;
435 default:
436 goto out_fail;
437 }
438
439 if (copy_to_guest_absolute(vcpu, operand2, (void *) mem, PAGE_SIZE)) {
440 kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
441 goto out_mem;
442 }
443 trace_kvm_s390_handle_stsi(vcpu, fc, sel1, sel2, operand2);
444 free_page(mem);
445 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
446 vcpu->run->s.regs.gprs[0] = 0;
447 return 0;
448 out_mem:
449 free_page(mem);
450 out_fail:
451 /* condition code 3 */
452 vcpu->arch.sie_block->gpsw.mask |= 3ul << 44;
453 return 0;
454 }
455
456 static const intercept_handler_t b2_handlers[256] = {
457 [0x02] = handle_stidp,
458 [0x10] = handle_set_prefix,
459 [0x11] = handle_store_prefix,
460 [0x12] = handle_store_cpu_address,
461 [0x29] = handle_skey,
462 [0x2a] = handle_skey,
463 [0x2b] = handle_skey,
464 [0x30] = handle_io_inst,
465 [0x31] = handle_io_inst,
466 [0x32] = handle_io_inst,
467 [0x33] = handle_io_inst,
468 [0x34] = handle_io_inst,
469 [0x35] = handle_io_inst,
470 [0x36] = handle_io_inst,
471 [0x37] = handle_io_inst,
472 [0x38] = handle_io_inst,
473 [0x39] = handle_io_inst,
474 [0x3a] = handle_io_inst,
475 [0x3b] = handle_io_inst,
476 [0x3c] = handle_io_inst,
477 [0x5f] = handle_io_inst,
478 [0x74] = handle_io_inst,
479 [0x76] = handle_io_inst,
480 [0x7d] = handle_stsi,
481 [0xb1] = handle_stfl,
482 [0xb2] = handle_lpswe,
483 };
484
485 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu)
486 {
487 intercept_handler_t handler;
488
489 /*
490 * a lot of B2 instructions are priviledged. We first check for
491 * the privileged ones, that we can handle in the kernel. If the
492 * kernel can handle this instruction, we check for the problem
493 * state bit and (a) handle the instruction or (b) send a code 2
494 * program check.
495 * Anything else goes to userspace.*/
496 handler = b2_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
497 if (handler) {
498 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
499 return kvm_s390_inject_program_int(vcpu,
500 PGM_PRIVILEGED_OPERATION);
501 else
502 return handler(vcpu);
503 }
504 return -EOPNOTSUPP;
505 }
506
507 static int handle_epsw(struct kvm_vcpu *vcpu)
508 {
509 int reg1, reg2;
510
511 reg1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 24;
512 reg2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
513
514 /* This basically extracts the mask half of the psw. */
515 vcpu->run->s.regs.gprs[reg1] &= 0xffffffff00000000;
516 vcpu->run->s.regs.gprs[reg1] |= vcpu->arch.sie_block->gpsw.mask >> 32;
517 if (reg2) {
518 vcpu->run->s.regs.gprs[reg2] &= 0xffffffff00000000;
519 vcpu->run->s.regs.gprs[reg2] |=
520 vcpu->arch.sie_block->gpsw.mask & 0x00000000ffffffff;
521 }
522 return 0;
523 }
524
525 static const intercept_handler_t b9_handlers[256] = {
526 [0x8d] = handle_epsw,
527 [0x9c] = handle_io_inst,
528 };
529
530 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu)
531 {
532 intercept_handler_t handler;
533
534 /* This is handled just as for the B2 instructions. */
535 handler = b9_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
536 if (handler) {
537 if ((handler != handle_epsw) &&
538 (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE))
539 return kvm_s390_inject_program_int(vcpu,
540 PGM_PRIVILEGED_OPERATION);
541 else
542 return handler(vcpu);
543 }
544 return -EOPNOTSUPP;
545 }
546
547 static const intercept_handler_t eb_handlers[256] = {
548 [0x8a] = handle_io_inst,
549 };
550
551 int kvm_s390_handle_priv_eb(struct kvm_vcpu *vcpu)
552 {
553 intercept_handler_t handler;
554
555 /* All eb instructions that end up here are privileged. */
556 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
557 return kvm_s390_inject_program_int(vcpu,
558 PGM_PRIVILEGED_OPERATION);
559 handler = eb_handlers[vcpu->arch.sie_block->ipb & 0xff];
560 if (handler)
561 return handler(vcpu);
562 return -EOPNOTSUPP;
563 }
564
565 static int handle_tprot(struct kvm_vcpu *vcpu)
566 {
567 u64 address1, address2;
568 struct vm_area_struct *vma;
569 unsigned long user_address;
570
571 vcpu->stat.instruction_tprot++;
572
573 kvm_s390_get_base_disp_sse(vcpu, &address1, &address2);
574
575 /* we only handle the Linux memory detection case:
576 * access key == 0
577 * guest DAT == off
578 * everything else goes to userspace. */
579 if (address2 & 0xf0)
580 return -EOPNOTSUPP;
581 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_DAT)
582 return -EOPNOTSUPP;
583
584 down_read(&current->mm->mmap_sem);
585 user_address = __gmap_translate(address1, vcpu->arch.gmap);
586 if (IS_ERR_VALUE(user_address))
587 goto out_inject;
588 vma = find_vma(current->mm, user_address);
589 if (!vma)
590 goto out_inject;
591 vcpu->arch.sie_block->gpsw.mask &= ~(3ul << 44);
592 if (!(vma->vm_flags & VM_WRITE) && (vma->vm_flags & VM_READ))
593 vcpu->arch.sie_block->gpsw.mask |= (1ul << 44);
594 if (!(vma->vm_flags & VM_WRITE) && !(vma->vm_flags & VM_READ))
595 vcpu->arch.sie_block->gpsw.mask |= (2ul << 44);
596
597 up_read(&current->mm->mmap_sem);
598 return 0;
599
600 out_inject:
601 up_read(&current->mm->mmap_sem);
602 return kvm_s390_inject_program_int(vcpu, PGM_ADDRESSING);
603 }
604
605 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu)
606 {
607 /* For e5xx... instructions we only handle TPROT */
608 if ((vcpu->arch.sie_block->ipa & 0x00ff) == 0x01)
609 return handle_tprot(vcpu);
610 return -EOPNOTSUPP;
611 }
612
613 static int handle_sckpf(struct kvm_vcpu *vcpu)
614 {
615 u32 value;
616
617 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
618 return kvm_s390_inject_program_int(vcpu,
619 PGM_PRIVILEGED_OPERATION);
620
621 if (vcpu->run->s.regs.gprs[0] & 0x00000000ffff0000)
622 return kvm_s390_inject_program_int(vcpu,
623 PGM_SPECIFICATION);
624
625 value = vcpu->run->s.regs.gprs[0] & 0x000000000000ffff;
626 vcpu->arch.sie_block->todpr = value;
627
628 return 0;
629 }
630
631 static const intercept_handler_t x01_handlers[256] = {
632 [0x07] = handle_sckpf,
633 };
634
635 int kvm_s390_handle_01(struct kvm_vcpu *vcpu)
636 {
637 intercept_handler_t handler;
638
639 handler = x01_handlers[vcpu->arch.sie_block->ipa & 0x00ff];
640 if (handler)
641 return handler(vcpu);
642 return -EOPNOTSUPP;
643 }