2 * handling interprocessor communication
4 * Copyright IBM Corp. 2008, 2009
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
15 #include <linux/kvm.h>
16 #include <linux/kvm_host.h>
17 #include <linux/slab.h>
22 static int __sigp_sense(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
25 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
28 if (cpu_addr
>= KVM_MAX_VCPUS
)
29 return 3; /* not operational */
32 if (fi
->local_int
[cpu_addr
] == NULL
)
33 rc
= 3; /* not operational */
34 else if (!(atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
36 *reg
&= 0xffffffff00000000UL
;
37 rc
= 1; /* status stored */
39 *reg
&= 0xffffffff00000000UL
;
40 *reg
|= SIGP_STATUS_STOPPED
;
41 rc
= 1; /* status stored */
43 spin_unlock(&fi
->lock
);
45 VCPU_EVENT(vcpu
, 4, "sensed status of cpu %x rc %x", cpu_addr
, rc
);
49 static int __sigp_emergency(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
51 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
52 struct kvm_s390_local_interrupt
*li
;
53 struct kvm_s390_interrupt_info
*inti
;
56 if (cpu_addr
>= KVM_MAX_VCPUS
)
57 return 3; /* not operational */
59 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
63 inti
->type
= KVM_S390_INT_EMERGENCY
;
64 inti
->emerg
.code
= vcpu
->vcpu_id
;
67 li
= fi
->local_int
[cpu_addr
];
69 rc
= 3; /* not operational */
73 spin_lock_bh(&li
->lock
);
74 list_add_tail(&inti
->list
, &li
->list
);
75 atomic_set(&li
->active
, 1);
76 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
77 if (waitqueue_active(&li
->wq
))
78 wake_up_interruptible(&li
->wq
);
79 spin_unlock_bh(&li
->lock
);
80 rc
= 0; /* order accepted */
81 VCPU_EVENT(vcpu
, 4, "sent sigp emerg to cpu %x", cpu_addr
);
83 spin_unlock(&fi
->lock
);
87 static int __sigp_external_call(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
89 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
90 struct kvm_s390_local_interrupt
*li
;
91 struct kvm_s390_interrupt_info
*inti
;
94 if (cpu_addr
>= KVM_MAX_VCPUS
)
95 return 3; /* not operational */
97 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
101 inti
->type
= KVM_S390_INT_EXTERNAL_CALL
;
102 inti
->extcall
.code
= vcpu
->vcpu_id
;
104 spin_lock(&fi
->lock
);
105 li
= fi
->local_int
[cpu_addr
];
107 rc
= 3; /* not operational */
111 spin_lock_bh(&li
->lock
);
112 list_add_tail(&inti
->list
, &li
->list
);
113 atomic_set(&li
->active
, 1);
114 atomic_set_mask(CPUSTAT_EXT_INT
, li
->cpuflags
);
115 if (waitqueue_active(&li
->wq
))
116 wake_up_interruptible(&li
->wq
);
117 spin_unlock_bh(&li
->lock
);
118 rc
= 0; /* order accepted */
119 VCPU_EVENT(vcpu
, 4, "sent sigp ext call to cpu %x", cpu_addr
);
121 spin_unlock(&fi
->lock
);
125 static int __inject_sigp_stop(struct kvm_s390_local_interrupt
*li
, int action
)
127 struct kvm_s390_interrupt_info
*inti
;
129 inti
= kzalloc(sizeof(*inti
), GFP_ATOMIC
);
132 inti
->type
= KVM_S390_SIGP_STOP
;
134 spin_lock_bh(&li
->lock
);
135 if ((atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
))
137 list_add_tail(&inti
->list
, &li
->list
);
138 atomic_set(&li
->active
, 1);
139 atomic_set_mask(CPUSTAT_STOP_INT
, li
->cpuflags
);
140 li
->action_bits
|= action
;
141 if (waitqueue_active(&li
->wq
))
142 wake_up_interruptible(&li
->wq
);
144 spin_unlock_bh(&li
->lock
);
146 return 0; /* order accepted */
149 static int __sigp_stop(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, int action
)
151 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
152 struct kvm_s390_local_interrupt
*li
;
155 if (cpu_addr
>= KVM_MAX_VCPUS
)
156 return 3; /* not operational */
158 spin_lock(&fi
->lock
);
159 li
= fi
->local_int
[cpu_addr
];
161 rc
= 3; /* not operational */
165 rc
= __inject_sigp_stop(li
, action
);
168 spin_unlock(&fi
->lock
);
169 VCPU_EVENT(vcpu
, 4, "sent sigp stop to cpu %x", cpu_addr
);
173 int kvm_s390_inject_sigp_stop(struct kvm_vcpu
*vcpu
, int action
)
175 struct kvm_s390_local_interrupt
*li
= &vcpu
->arch
.local_int
;
176 return __inject_sigp_stop(li
, action
);
179 static int __sigp_set_arch(struct kvm_vcpu
*vcpu
, u32 parameter
)
183 switch (parameter
& 0xff) {
185 rc
= 3; /* not operational */
189 rc
= 0; /* order accepted */
197 static int __sigp_set_prefix(struct kvm_vcpu
*vcpu
, u16 cpu_addr
, u32 address
,
200 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
201 struct kvm_s390_local_interrupt
*li
= NULL
;
202 struct kvm_s390_interrupt_info
*inti
;
206 /* make sure that the new value is valid memory */
207 address
= address
& 0x7fffe000u
;
208 if (copy_from_guest_absolute(vcpu
, &tmp
, address
, 1) ||
209 copy_from_guest_absolute(vcpu
, &tmp
, address
+ PAGE_SIZE
, 1)) {
210 *reg
|= SIGP_STATUS_INVALID_PARAMETER
;
211 return 1; /* invalid parameter */
214 inti
= kzalloc(sizeof(*inti
), GFP_KERNEL
);
218 spin_lock(&fi
->lock
);
219 if (cpu_addr
< KVM_MAX_VCPUS
)
220 li
= fi
->local_int
[cpu_addr
];
223 rc
= 1; /* incorrect state */
224 *reg
&= SIGP_STATUS_INCORRECT_STATE
;
229 spin_lock_bh(&li
->lock
);
230 /* cpu must be in stopped state */
231 if (!(atomic_read(li
->cpuflags
) & CPUSTAT_STOPPED
)) {
232 rc
= 1; /* incorrect state */
233 *reg
&= SIGP_STATUS_INCORRECT_STATE
;
238 inti
->type
= KVM_S390_SIGP_SET_PREFIX
;
239 inti
->prefix
.address
= address
;
241 list_add_tail(&inti
->list
, &li
->list
);
242 atomic_set(&li
->active
, 1);
243 if (waitqueue_active(&li
->wq
))
244 wake_up_interruptible(&li
->wq
);
245 rc
= 0; /* order accepted */
247 VCPU_EVENT(vcpu
, 4, "set prefix of cpu %02x to %x", cpu_addr
, address
);
249 spin_unlock_bh(&li
->lock
);
251 spin_unlock(&fi
->lock
);
255 static int __sigp_sense_running(struct kvm_vcpu
*vcpu
, u16 cpu_addr
,
259 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
261 if (cpu_addr
>= KVM_MAX_VCPUS
)
262 return 3; /* not operational */
264 spin_lock(&fi
->lock
);
265 if (fi
->local_int
[cpu_addr
] == NULL
)
266 rc
= 3; /* not operational */
268 if (atomic_read(fi
->local_int
[cpu_addr
]->cpuflags
)
274 *reg
&= 0xffffffff00000000UL
;
275 *reg
|= SIGP_STATUS_NOT_RUNNING
;
279 spin_unlock(&fi
->lock
);
281 VCPU_EVENT(vcpu
, 4, "sensed running status of cpu %x rc %x", cpu_addr
,
287 static int __sigp_restart(struct kvm_vcpu
*vcpu
, u16 cpu_addr
)
290 struct kvm_s390_float_interrupt
*fi
= &vcpu
->kvm
->arch
.float_int
;
291 struct kvm_s390_local_interrupt
*li
;
293 if (cpu_addr
>= KVM_MAX_VCPUS
)
294 return 3; /* not operational */
296 spin_lock(&fi
->lock
);
297 li
= fi
->local_int
[cpu_addr
];
299 rc
= 3; /* not operational */
303 spin_lock_bh(&li
->lock
);
304 if (li
->action_bits
& ACTION_STOP_ON_STOP
)
307 VCPU_EVENT(vcpu
, 4, "sigp restart %x to handle userspace",
309 spin_unlock_bh(&li
->lock
);
311 spin_unlock(&fi
->lock
);
315 int kvm_s390_handle_sigp(struct kvm_vcpu
*vcpu
)
317 int r1
= (vcpu
->arch
.sie_block
->ipa
& 0x00f0) >> 4;
318 int r3
= vcpu
->arch
.sie_block
->ipa
& 0x000f;
319 int base2
= vcpu
->arch
.sie_block
->ipb
>> 28;
320 int disp2
= ((vcpu
->arch
.sie_block
->ipb
& 0x0fff0000) >> 16);
322 u16 cpu_addr
= vcpu
->run
->s
.regs
.gprs
[r3
];
326 /* sigp in userspace can exit */
327 if (vcpu
->arch
.sie_block
->gpsw
.mask
& PSW_MASK_PSTATE
)
328 return kvm_s390_inject_program_int(vcpu
,
329 PGM_PRIVILEGED_OPERATION
);
333 order_code
+= vcpu
->run
->s
.regs
.gprs
[base2
];
336 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
];
338 parameter
= vcpu
->run
->s
.regs
.gprs
[r1
+ 1];
340 switch (order_code
) {
342 vcpu
->stat
.instruction_sigp_sense
++;
343 rc
= __sigp_sense(vcpu
, cpu_addr
,
344 &vcpu
->run
->s
.regs
.gprs
[r1
]);
346 case SIGP_EXTERNAL_CALL
:
347 vcpu
->stat
.instruction_sigp_external_call
++;
348 rc
= __sigp_external_call(vcpu
, cpu_addr
);
350 case SIGP_EMERGENCY_SIGNAL
:
351 vcpu
->stat
.instruction_sigp_emergency
++;
352 rc
= __sigp_emergency(vcpu
, cpu_addr
);
355 vcpu
->stat
.instruction_sigp_stop
++;
356 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STOP_ON_STOP
);
358 case SIGP_STOP_AND_STORE_STATUS
:
359 vcpu
->stat
.instruction_sigp_stop
++;
360 rc
= __sigp_stop(vcpu
, cpu_addr
, ACTION_STORE_ON_STOP
|
361 ACTION_STOP_ON_STOP
);
363 case SIGP_SET_ARCHITECTURE
:
364 vcpu
->stat
.instruction_sigp_arch
++;
365 rc
= __sigp_set_arch(vcpu
, parameter
);
367 case SIGP_SET_PREFIX
:
368 vcpu
->stat
.instruction_sigp_prefix
++;
369 rc
= __sigp_set_prefix(vcpu
, cpu_addr
, parameter
,
370 &vcpu
->run
->s
.regs
.gprs
[r1
]);
372 case SIGP_SENSE_RUNNING
:
373 vcpu
->stat
.instruction_sigp_sense_running
++;
374 rc
= __sigp_sense_running(vcpu
, cpu_addr
,
375 &vcpu
->run
->s
.regs
.gprs
[r1
]);
378 vcpu
->stat
.instruction_sigp_restart
++;
379 rc
= __sigp_restart(vcpu
, cpu_addr
);
380 if (rc
== 2) /* busy */
382 /* user space must know about restart */
390 vcpu
->arch
.sie_block
->gpsw
.mask
&= ~(3ul << 44);
391 vcpu
->arch
.sie_block
->gpsw
.mask
|= (rc
& 3ul) << 44;