]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/kvm/sigp.c
KVM: s390: Fixed CC of SIGP SET_PREFIX handler
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / sigp.c
1 /*
2 * handling interprocessor communication
3 *
4 * Copyright IBM Corp. 2008, 2013
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
13 */
14
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
18 #include <asm/sigp.h>
19 #include "gaccess.h"
20 #include "kvm-s390.h"
21 #include "trace.h"
22
23 static int __sigp_sense(struct kvm_vcpu *vcpu, u16 cpu_addr,
24 u64 *reg)
25 {
26 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
27 int rc;
28
29 if (cpu_addr >= KVM_MAX_VCPUS)
30 return SIGP_CC_NOT_OPERATIONAL;
31
32 spin_lock(&fi->lock);
33 if (fi->local_int[cpu_addr] == NULL)
34 rc = SIGP_CC_NOT_OPERATIONAL;
35 else if (!(atomic_read(fi->local_int[cpu_addr]->cpuflags)
36 & (CPUSTAT_ECALL_PEND | CPUSTAT_STOPPED)))
37 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
38 else {
39 *reg &= 0xffffffff00000000UL;
40 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
41 & CPUSTAT_ECALL_PEND)
42 *reg |= SIGP_STATUS_EXT_CALL_PENDING;
43 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
44 & CPUSTAT_STOPPED)
45 *reg |= SIGP_STATUS_STOPPED;
46 rc = SIGP_CC_STATUS_STORED;
47 }
48 spin_unlock(&fi->lock);
49
50 VCPU_EVENT(vcpu, 4, "sensed status of cpu %x rc %x", cpu_addr, rc);
51 return rc;
52 }
53
54 static int __sigp_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr)
55 {
56 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
57 struct kvm_s390_local_interrupt *li;
58 struct kvm_s390_interrupt_info *inti;
59 int rc;
60
61 if (cpu_addr >= KVM_MAX_VCPUS)
62 return SIGP_CC_NOT_OPERATIONAL;
63
64 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
65 if (!inti)
66 return -ENOMEM;
67
68 inti->type = KVM_S390_INT_EMERGENCY;
69 inti->emerg.code = vcpu->vcpu_id;
70
71 spin_lock(&fi->lock);
72 li = fi->local_int[cpu_addr];
73 if (li == NULL) {
74 rc = SIGP_CC_NOT_OPERATIONAL;
75 kfree(inti);
76 goto unlock;
77 }
78 spin_lock_bh(&li->lock);
79 list_add_tail(&inti->list, &li->list);
80 atomic_set(&li->active, 1);
81 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
82 if (waitqueue_active(li->wq))
83 wake_up_interruptible(li->wq);
84 spin_unlock_bh(&li->lock);
85 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
86 VCPU_EVENT(vcpu, 4, "sent sigp emerg to cpu %x", cpu_addr);
87 unlock:
88 spin_unlock(&fi->lock);
89 return rc;
90 }
91
92 static int __sigp_conditional_emergency(struct kvm_vcpu *vcpu, u16 cpu_addr,
93 u16 asn, u64 *reg)
94 {
95 struct kvm_vcpu *dst_vcpu = NULL;
96 const u64 psw_int_mask = PSW_MASK_IO | PSW_MASK_EXT;
97 u16 p_asn, s_asn;
98 psw_t *psw;
99 u32 flags;
100
101 if (cpu_addr < KVM_MAX_VCPUS)
102 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
103 if (!dst_vcpu)
104 return SIGP_CC_NOT_OPERATIONAL;
105 flags = atomic_read(&dst_vcpu->arch.sie_block->cpuflags);
106 psw = &dst_vcpu->arch.sie_block->gpsw;
107 p_asn = dst_vcpu->arch.sie_block->gcr[4] & 0xffff; /* Primary ASN */
108 s_asn = dst_vcpu->arch.sie_block->gcr[3] & 0xffff; /* Secondary ASN */
109
110 /* Deliver the emergency signal? */
111 if (!(flags & CPUSTAT_STOPPED)
112 || (psw->mask & psw_int_mask) != psw_int_mask
113 || ((flags & CPUSTAT_WAIT) && psw->addr != 0)
114 || (!(flags & CPUSTAT_WAIT) && (asn == p_asn || asn == s_asn))) {
115 return __sigp_emergency(vcpu, cpu_addr);
116 } else {
117 *reg &= 0xffffffff00000000UL;
118 *reg |= SIGP_STATUS_INCORRECT_STATE;
119 return SIGP_CC_STATUS_STORED;
120 }
121 }
122
123 static int __sigp_external_call(struct kvm_vcpu *vcpu, u16 cpu_addr)
124 {
125 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
126 struct kvm_s390_local_interrupt *li;
127 struct kvm_s390_interrupt_info *inti;
128 int rc;
129
130 if (cpu_addr >= KVM_MAX_VCPUS)
131 return SIGP_CC_NOT_OPERATIONAL;
132
133 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
134 if (!inti)
135 return -ENOMEM;
136
137 inti->type = KVM_S390_INT_EXTERNAL_CALL;
138 inti->extcall.code = vcpu->vcpu_id;
139
140 spin_lock(&fi->lock);
141 li = fi->local_int[cpu_addr];
142 if (li == NULL) {
143 rc = SIGP_CC_NOT_OPERATIONAL;
144 kfree(inti);
145 goto unlock;
146 }
147 spin_lock_bh(&li->lock);
148 list_add_tail(&inti->list, &li->list);
149 atomic_set(&li->active, 1);
150 atomic_set_mask(CPUSTAT_EXT_INT, li->cpuflags);
151 if (waitqueue_active(li->wq))
152 wake_up_interruptible(li->wq);
153 spin_unlock_bh(&li->lock);
154 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
155 VCPU_EVENT(vcpu, 4, "sent sigp ext call to cpu %x", cpu_addr);
156 unlock:
157 spin_unlock(&fi->lock);
158 return rc;
159 }
160
161 static int __inject_sigp_stop(struct kvm_s390_local_interrupt *li, int action)
162 {
163 struct kvm_s390_interrupt_info *inti;
164 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
165
166 inti = kzalloc(sizeof(*inti), GFP_ATOMIC);
167 if (!inti)
168 return -ENOMEM;
169 inti->type = KVM_S390_SIGP_STOP;
170
171 spin_lock_bh(&li->lock);
172 if ((atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
173 kfree(inti);
174 if ((action & ACTION_STORE_ON_STOP) != 0)
175 rc = -ESHUTDOWN;
176 goto out;
177 }
178 list_add_tail(&inti->list, &li->list);
179 atomic_set(&li->active, 1);
180 atomic_set_mask(CPUSTAT_STOP_INT, li->cpuflags);
181 li->action_bits |= action;
182 if (waitqueue_active(li->wq))
183 wake_up_interruptible(li->wq);
184 out:
185 spin_unlock_bh(&li->lock);
186
187 return rc;
188 }
189
190 static int __sigp_stop(struct kvm_vcpu *vcpu, u16 cpu_addr, int action)
191 {
192 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
193 struct kvm_s390_local_interrupt *li;
194 int rc;
195
196 if (cpu_addr >= KVM_MAX_VCPUS)
197 return SIGP_CC_NOT_OPERATIONAL;
198
199 spin_lock(&fi->lock);
200 li = fi->local_int[cpu_addr];
201 if (li == NULL) {
202 rc = SIGP_CC_NOT_OPERATIONAL;
203 goto unlock;
204 }
205
206 rc = __inject_sigp_stop(li, action);
207
208 unlock:
209 spin_unlock(&fi->lock);
210 VCPU_EVENT(vcpu, 4, "sent sigp stop to cpu %x", cpu_addr);
211
212 if ((action & ACTION_STORE_ON_STOP) != 0 && rc == -ESHUTDOWN) {
213 /* If the CPU has already been stopped, we still have
214 * to save the status when doing stop-and-store. This
215 * has to be done after unlocking all spinlocks. */
216 struct kvm_vcpu *dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
217 rc = kvm_s390_store_status_unloaded(dst_vcpu,
218 KVM_S390_STORE_STATUS_NOADDR);
219 }
220
221 return rc;
222 }
223
224 static int __sigp_set_arch(struct kvm_vcpu *vcpu, u32 parameter)
225 {
226 int rc;
227 unsigned int i;
228 struct kvm_vcpu *v;
229
230 switch (parameter & 0xff) {
231 case 0:
232 rc = SIGP_CC_NOT_OPERATIONAL;
233 break;
234 case 1:
235 case 2:
236 kvm_for_each_vcpu(i, v, vcpu->kvm) {
237 v->arch.pfault_token = KVM_S390_PFAULT_TOKEN_INVALID;
238 kvm_clear_async_pf_completion_queue(v);
239 }
240
241 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
242 break;
243 default:
244 rc = -EOPNOTSUPP;
245 }
246 return rc;
247 }
248
249 static int __sigp_set_prefix(struct kvm_vcpu *vcpu, u16 cpu_addr, u32 address,
250 u64 *reg)
251 {
252 struct kvm_s390_local_interrupt *li;
253 struct kvm_vcpu *dst_vcpu = NULL;
254 struct kvm_s390_interrupt_info *inti;
255 int rc;
256 u8 tmp;
257
258 if (cpu_addr < KVM_MAX_VCPUS)
259 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_addr);
260 if (!dst_vcpu)
261 return SIGP_CC_NOT_OPERATIONAL;
262 li = &dst_vcpu->arch.local_int;
263
264 /* make sure that the new value is valid memory */
265 address = address & 0x7fffe000u;
266 if (copy_from_guest_absolute(vcpu, &tmp, address, 1) ||
267 copy_from_guest_absolute(vcpu, &tmp, address + PAGE_SIZE, 1)) {
268 *reg &= 0xffffffff00000000UL;
269 *reg |= SIGP_STATUS_INVALID_PARAMETER;
270 return SIGP_CC_STATUS_STORED;
271 }
272
273 inti = kzalloc(sizeof(*inti), GFP_KERNEL);
274 if (!inti)
275 return SIGP_CC_BUSY;
276
277 spin_lock_bh(&li->lock);
278 /* cpu must be in stopped state */
279 if (!(atomic_read(li->cpuflags) & CPUSTAT_STOPPED)) {
280 *reg &= 0xffffffff00000000UL;
281 *reg |= SIGP_STATUS_INCORRECT_STATE;
282 rc = SIGP_CC_STATUS_STORED;
283 kfree(inti);
284 goto out_li;
285 }
286
287 inti->type = KVM_S390_SIGP_SET_PREFIX;
288 inti->prefix.address = address;
289
290 list_add_tail(&inti->list, &li->list);
291 atomic_set(&li->active, 1);
292 if (waitqueue_active(li->wq))
293 wake_up_interruptible(li->wq);
294 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
295
296 VCPU_EVENT(vcpu, 4, "set prefix of cpu %02x to %x", cpu_addr, address);
297 out_li:
298 spin_unlock_bh(&li->lock);
299 return rc;
300 }
301
302 static int __sigp_store_status_at_addr(struct kvm_vcpu *vcpu, u16 cpu_id,
303 u32 addr, u64 *reg)
304 {
305 struct kvm_vcpu *dst_vcpu = NULL;
306 int flags;
307 int rc;
308
309 if (cpu_id < KVM_MAX_VCPUS)
310 dst_vcpu = kvm_get_vcpu(vcpu->kvm, cpu_id);
311 if (!dst_vcpu)
312 return SIGP_CC_NOT_OPERATIONAL;
313
314 spin_lock_bh(&dst_vcpu->arch.local_int.lock);
315 flags = atomic_read(dst_vcpu->arch.local_int.cpuflags);
316 spin_unlock_bh(&dst_vcpu->arch.local_int.lock);
317 if (!(flags & CPUSTAT_STOPPED)) {
318 *reg &= 0xffffffff00000000UL;
319 *reg |= SIGP_STATUS_INCORRECT_STATE;
320 return SIGP_CC_STATUS_STORED;
321 }
322
323 addr &= 0x7ffffe00;
324 rc = kvm_s390_store_status_unloaded(dst_vcpu, addr);
325 if (rc == -EFAULT) {
326 *reg &= 0xffffffff00000000UL;
327 *reg |= SIGP_STATUS_INVALID_PARAMETER;
328 rc = SIGP_CC_STATUS_STORED;
329 }
330 return rc;
331 }
332
333 static int __sigp_sense_running(struct kvm_vcpu *vcpu, u16 cpu_addr,
334 u64 *reg)
335 {
336 int rc;
337 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
338
339 if (cpu_addr >= KVM_MAX_VCPUS)
340 return SIGP_CC_NOT_OPERATIONAL;
341
342 spin_lock(&fi->lock);
343 if (fi->local_int[cpu_addr] == NULL)
344 rc = SIGP_CC_NOT_OPERATIONAL;
345 else {
346 if (atomic_read(fi->local_int[cpu_addr]->cpuflags)
347 & CPUSTAT_RUNNING) {
348 /* running */
349 rc = SIGP_CC_ORDER_CODE_ACCEPTED;
350 } else {
351 /* not running */
352 *reg &= 0xffffffff00000000UL;
353 *reg |= SIGP_STATUS_NOT_RUNNING;
354 rc = SIGP_CC_STATUS_STORED;
355 }
356 }
357 spin_unlock(&fi->lock);
358
359 VCPU_EVENT(vcpu, 4, "sensed running status of cpu %x rc %x", cpu_addr,
360 rc);
361
362 return rc;
363 }
364
365 /* Test whether the destination CPU is available and not busy */
366 static int sigp_check_callable(struct kvm_vcpu *vcpu, u16 cpu_addr)
367 {
368 struct kvm_s390_float_interrupt *fi = &vcpu->kvm->arch.float_int;
369 struct kvm_s390_local_interrupt *li;
370 int rc = SIGP_CC_ORDER_CODE_ACCEPTED;
371
372 if (cpu_addr >= KVM_MAX_VCPUS)
373 return SIGP_CC_NOT_OPERATIONAL;
374
375 spin_lock(&fi->lock);
376 li = fi->local_int[cpu_addr];
377 if (li == NULL) {
378 rc = SIGP_CC_NOT_OPERATIONAL;
379 goto out;
380 }
381
382 spin_lock_bh(&li->lock);
383 if (li->action_bits & ACTION_STOP_ON_STOP)
384 rc = SIGP_CC_BUSY;
385 spin_unlock_bh(&li->lock);
386 out:
387 spin_unlock(&fi->lock);
388 return rc;
389 }
390
391 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu)
392 {
393 int r1 = (vcpu->arch.sie_block->ipa & 0x00f0) >> 4;
394 int r3 = vcpu->arch.sie_block->ipa & 0x000f;
395 u32 parameter;
396 u16 cpu_addr = vcpu->run->s.regs.gprs[r3];
397 u8 order_code;
398 int rc;
399
400 /* sigp in userspace can exit */
401 if (vcpu->arch.sie_block->gpsw.mask & PSW_MASK_PSTATE)
402 return kvm_s390_inject_program_int(vcpu, PGM_PRIVILEGED_OP);
403
404 order_code = kvm_s390_get_base_disp_rs(vcpu);
405
406 if (r1 % 2)
407 parameter = vcpu->run->s.regs.gprs[r1];
408 else
409 parameter = vcpu->run->s.regs.gprs[r1 + 1];
410
411 trace_kvm_s390_handle_sigp(vcpu, order_code, cpu_addr, parameter);
412 switch (order_code) {
413 case SIGP_SENSE:
414 vcpu->stat.instruction_sigp_sense++;
415 rc = __sigp_sense(vcpu, cpu_addr,
416 &vcpu->run->s.regs.gprs[r1]);
417 break;
418 case SIGP_EXTERNAL_CALL:
419 vcpu->stat.instruction_sigp_external_call++;
420 rc = __sigp_external_call(vcpu, cpu_addr);
421 break;
422 case SIGP_EMERGENCY_SIGNAL:
423 vcpu->stat.instruction_sigp_emergency++;
424 rc = __sigp_emergency(vcpu, cpu_addr);
425 break;
426 case SIGP_STOP:
427 vcpu->stat.instruction_sigp_stop++;
428 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STOP_ON_STOP);
429 break;
430 case SIGP_STOP_AND_STORE_STATUS:
431 vcpu->stat.instruction_sigp_stop++;
432 rc = __sigp_stop(vcpu, cpu_addr, ACTION_STORE_ON_STOP |
433 ACTION_STOP_ON_STOP);
434 break;
435 case SIGP_STORE_STATUS_AT_ADDRESS:
436 rc = __sigp_store_status_at_addr(vcpu, cpu_addr, parameter,
437 &vcpu->run->s.regs.gprs[r1]);
438 break;
439 case SIGP_SET_ARCHITECTURE:
440 vcpu->stat.instruction_sigp_arch++;
441 rc = __sigp_set_arch(vcpu, parameter);
442 break;
443 case SIGP_SET_PREFIX:
444 vcpu->stat.instruction_sigp_prefix++;
445 rc = __sigp_set_prefix(vcpu, cpu_addr, parameter,
446 &vcpu->run->s.regs.gprs[r1]);
447 break;
448 case SIGP_COND_EMERGENCY_SIGNAL:
449 rc = __sigp_conditional_emergency(vcpu, cpu_addr, parameter,
450 &vcpu->run->s.regs.gprs[r1]);
451 break;
452 case SIGP_SENSE_RUNNING:
453 vcpu->stat.instruction_sigp_sense_running++;
454 rc = __sigp_sense_running(vcpu, cpu_addr,
455 &vcpu->run->s.regs.gprs[r1]);
456 break;
457 case SIGP_START:
458 rc = sigp_check_callable(vcpu, cpu_addr);
459 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED)
460 rc = -EOPNOTSUPP; /* Handle START in user space */
461 break;
462 case SIGP_RESTART:
463 vcpu->stat.instruction_sigp_restart++;
464 rc = sigp_check_callable(vcpu, cpu_addr);
465 if (rc == SIGP_CC_ORDER_CODE_ACCEPTED) {
466 VCPU_EVENT(vcpu, 4,
467 "sigp restart %x to handle userspace",
468 cpu_addr);
469 /* user space must know about restart */
470 rc = -EOPNOTSUPP;
471 }
472 break;
473 default:
474 return -EOPNOTSUPP;
475 }
476
477 if (rc < 0)
478 return rc;
479
480 kvm_s390_set_psw_cc(vcpu, rc);
481 return 0;
482 }