]> git.proxmox.com Git - mirror_ubuntu-artful-kernel.git/blob - arch/s390/kvm/kvm-s390.h
Merge remote-tracking branches 'spi/topic/delay', 'spi/topic/dw', 'spi/topic/fsl...
[mirror_ubuntu-artful-kernel.git] / arch / s390 / kvm / kvm-s390.h
1 /*
2 * definition for kvm on s390
3 *
4 * Copyright IBM Corp. 2008, 2009
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License (version 2 only)
8 * as published by the Free Software Foundation.
9 *
10 * Author(s): Carsten Otte <cotte@de.ibm.com>
11 * Christian Borntraeger <borntraeger@de.ibm.com>
12 * Christian Ehrhardt <ehrhardt@de.ibm.com>
13 */
14
15 #ifndef ARCH_S390_KVM_S390_H
16 #define ARCH_S390_KVM_S390_H
17
18 #include <linux/hrtimer.h>
19 #include <linux/kvm.h>
20 #include <linux/kvm_host.h>
21 #include <asm/facility.h>
22 #include <asm/processor.h>
23 #include <asm/sclp.h>
24
25 typedef int (*intercept_handler_t)(struct kvm_vcpu *vcpu);
26
27 /* Transactional Memory Execution related macros */
28 #define IS_TE_ENABLED(vcpu) ((vcpu->arch.sie_block->ecb & 0x10))
29 #define TDB_FORMAT1 1
30 #define IS_ITDB_VALID(vcpu) ((*(char *)vcpu->arch.sie_block->itdba == TDB_FORMAT1))
31
32 extern debug_info_t *kvm_s390_dbf;
33 #define KVM_EVENT(d_loglevel, d_string, d_args...)\
34 do { \
35 debug_sprintf_event(kvm_s390_dbf, d_loglevel, d_string "\n", \
36 d_args); \
37 } while (0)
38
39 #define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
40 do { \
41 debug_sprintf_event(d_kvm->arch.dbf, d_loglevel, d_string "\n", \
42 d_args); \
43 } while (0)
44
45 #define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...)\
46 do { \
47 debug_sprintf_event(d_vcpu->kvm->arch.dbf, d_loglevel, \
48 "%02d[%016lx-%016lx]: " d_string "\n", d_vcpu->vcpu_id, \
49 d_vcpu->arch.sie_block->gpsw.mask, d_vcpu->arch.sie_block->gpsw.addr,\
50 d_args); \
51 } while (0)
52
53 static inline int is_vcpu_stopped(struct kvm_vcpu *vcpu)
54 {
55 return atomic_read(&vcpu->arch.sie_block->cpuflags) & CPUSTAT_STOPPED;
56 }
57
58 static inline int is_vcpu_idle(struct kvm_vcpu *vcpu)
59 {
60 return test_bit(vcpu->vcpu_id, vcpu->arch.local_int.float_int->idle_mask);
61 }
62
63 static inline int kvm_is_ucontrol(struct kvm *kvm)
64 {
65 #ifdef CONFIG_KVM_S390_UCONTROL
66 if (kvm->arch.gmap)
67 return 0;
68 return 1;
69 #else
70 return 0;
71 #endif
72 }
73
74 #define GUEST_PREFIX_SHIFT 13
75 static inline u32 kvm_s390_get_prefix(struct kvm_vcpu *vcpu)
76 {
77 return vcpu->arch.sie_block->prefix << GUEST_PREFIX_SHIFT;
78 }
79
80 static inline void kvm_s390_set_prefix(struct kvm_vcpu *vcpu, u32 prefix)
81 {
82 VCPU_EVENT(vcpu, 3, "set prefix of cpu %03u to 0x%x", vcpu->vcpu_id,
83 prefix);
84 vcpu->arch.sie_block->prefix = prefix >> GUEST_PREFIX_SHIFT;
85 kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
86 kvm_make_request(KVM_REQ_MMU_RELOAD, vcpu);
87 }
88
89 typedef u8 __bitwise ar_t;
90
91 static inline u64 kvm_s390_get_base_disp_s(struct kvm_vcpu *vcpu, ar_t *ar)
92 {
93 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
94 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
95
96 if (ar)
97 *ar = base2;
98
99 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
100 }
101
102 static inline void kvm_s390_get_base_disp_sse(struct kvm_vcpu *vcpu,
103 u64 *address1, u64 *address2,
104 ar_t *ar_b1, ar_t *ar_b2)
105 {
106 u32 base1 = (vcpu->arch.sie_block->ipb & 0xf0000000) >> 28;
107 u32 disp1 = (vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16;
108 u32 base2 = (vcpu->arch.sie_block->ipb & 0xf000) >> 12;
109 u32 disp2 = vcpu->arch.sie_block->ipb & 0x0fff;
110
111 *address1 = (base1 ? vcpu->run->s.regs.gprs[base1] : 0) + disp1;
112 *address2 = (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
113
114 if (ar_b1)
115 *ar_b1 = base1;
116 if (ar_b2)
117 *ar_b2 = base2;
118 }
119
120 static inline void kvm_s390_get_regs_rre(struct kvm_vcpu *vcpu, int *r1, int *r2)
121 {
122 if (r1)
123 *r1 = (vcpu->arch.sie_block->ipb & 0x00f00000) >> 20;
124 if (r2)
125 *r2 = (vcpu->arch.sie_block->ipb & 0x000f0000) >> 16;
126 }
127
128 static inline u64 kvm_s390_get_base_disp_rsy(struct kvm_vcpu *vcpu, ar_t *ar)
129 {
130 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
131 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16) +
132 ((vcpu->arch.sie_block->ipb & 0xff00) << 4);
133 /* The displacement is a 20bit _SIGNED_ value */
134 if (disp2 & 0x80000)
135 disp2+=0xfff00000;
136
137 if (ar)
138 *ar = base2;
139
140 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + (long)(int)disp2;
141 }
142
143 static inline u64 kvm_s390_get_base_disp_rs(struct kvm_vcpu *vcpu, ar_t *ar)
144 {
145 u32 base2 = vcpu->arch.sie_block->ipb >> 28;
146 u32 disp2 = ((vcpu->arch.sie_block->ipb & 0x0fff0000) >> 16);
147
148 if (ar)
149 *ar = base2;
150
151 return (base2 ? vcpu->run->s.regs.gprs[base2] : 0) + disp2;
152 }
153
154 /* Set the condition code in the guest program status word */
155 static inline void kvm_s390_set_psw_cc(struct kvm_vcpu *vcpu, unsigned long cc)
156 {
157 vcpu->arch.sie_block->gpsw.mask &= ~(3UL << 44);
158 vcpu->arch.sie_block->gpsw.mask |= cc << 44;
159 }
160
161 /* test availability of facility in a kvm instance */
162 static inline int test_kvm_facility(struct kvm *kvm, unsigned long nr)
163 {
164 return __test_facility(nr, kvm->arch.model.fac_mask) &&
165 __test_facility(nr, kvm->arch.model.fac_list);
166 }
167
168 static inline int set_kvm_facility(u64 *fac_list, unsigned long nr)
169 {
170 unsigned char *ptr;
171
172 if (nr >= MAX_FACILITY_BIT)
173 return -EINVAL;
174 ptr = (unsigned char *) fac_list + (nr >> 3);
175 *ptr |= (0x80UL >> (nr & 7));
176 return 0;
177 }
178
179 static inline int test_kvm_cpu_feat(struct kvm *kvm, unsigned long nr)
180 {
181 WARN_ON_ONCE(nr >= KVM_S390_VM_CPU_FEAT_NR_BITS);
182 return test_bit_inv(nr, kvm->arch.cpu_feat);
183 }
184
185 /* are cpu states controlled by user space */
186 static inline int kvm_s390_user_cpu_state_ctrl(struct kvm *kvm)
187 {
188 return kvm->arch.user_cpu_state_ctrl != 0;
189 }
190
191 /* implemented in interrupt.c */
192 int kvm_s390_handle_wait(struct kvm_vcpu *vcpu);
193 void kvm_s390_vcpu_wakeup(struct kvm_vcpu *vcpu);
194 enum hrtimer_restart kvm_s390_idle_wakeup(struct hrtimer *timer);
195 int __must_check kvm_s390_deliver_pending_interrupts(struct kvm_vcpu *vcpu);
196 void kvm_s390_clear_local_irqs(struct kvm_vcpu *vcpu);
197 void kvm_s390_clear_float_irqs(struct kvm *kvm);
198 int __must_check kvm_s390_inject_vm(struct kvm *kvm,
199 struct kvm_s390_interrupt *s390int);
200 int __must_check kvm_s390_inject_vcpu(struct kvm_vcpu *vcpu,
201 struct kvm_s390_irq *irq);
202 static inline int kvm_s390_inject_prog_irq(struct kvm_vcpu *vcpu,
203 struct kvm_s390_pgm_info *pgm_info)
204 {
205 struct kvm_s390_irq irq = {
206 .type = KVM_S390_PROGRAM_INT,
207 .u.pgm = *pgm_info,
208 };
209
210 return kvm_s390_inject_vcpu(vcpu, &irq);
211 }
212 static inline int kvm_s390_inject_program_int(struct kvm_vcpu *vcpu, u16 code)
213 {
214 struct kvm_s390_irq irq = {
215 .type = KVM_S390_PROGRAM_INT,
216 .u.pgm.code = code,
217 };
218
219 return kvm_s390_inject_vcpu(vcpu, &irq);
220 }
221 struct kvm_s390_interrupt_info *kvm_s390_get_io_int(struct kvm *kvm,
222 u64 isc_mask, u32 schid);
223 int kvm_s390_reinject_io_int(struct kvm *kvm,
224 struct kvm_s390_interrupt_info *inti);
225 int kvm_s390_mask_adapter(struct kvm *kvm, unsigned int id, bool masked);
226
227 /* implemented in intercept.c */
228 u8 kvm_s390_get_ilen(struct kvm_vcpu *vcpu);
229 int kvm_handle_sie_intercept(struct kvm_vcpu *vcpu);
230 static inline void kvm_s390_rewind_psw(struct kvm_vcpu *vcpu, int ilen)
231 {
232 struct kvm_s390_sie_block *sie_block = vcpu->arch.sie_block;
233
234 sie_block->gpsw.addr = __rewind_psw(sie_block->gpsw, ilen);
235 }
236 static inline void kvm_s390_forward_psw(struct kvm_vcpu *vcpu, int ilen)
237 {
238 kvm_s390_rewind_psw(vcpu, -ilen);
239 }
240 static inline void kvm_s390_retry_instr(struct kvm_vcpu *vcpu)
241 {
242 /* don't inject PER events if we re-execute the instruction */
243 vcpu->arch.sie_block->icptstatus &= ~0x02;
244 kvm_s390_rewind_psw(vcpu, kvm_s390_get_ilen(vcpu));
245 }
246
247 /* implemented in priv.c */
248 int is_valid_psw(psw_t *psw);
249 int kvm_s390_handle_aa(struct kvm_vcpu *vcpu);
250 int kvm_s390_handle_b2(struct kvm_vcpu *vcpu);
251 int kvm_s390_handle_e5(struct kvm_vcpu *vcpu);
252 int kvm_s390_handle_01(struct kvm_vcpu *vcpu);
253 int kvm_s390_handle_b9(struct kvm_vcpu *vcpu);
254 int kvm_s390_handle_lpsw(struct kvm_vcpu *vcpu);
255 int kvm_s390_handle_stctl(struct kvm_vcpu *vcpu);
256 int kvm_s390_handle_lctl(struct kvm_vcpu *vcpu);
257 int kvm_s390_handle_eb(struct kvm_vcpu *vcpu);
258
259 /* implemented in vsie.c */
260 int kvm_s390_handle_vsie(struct kvm_vcpu *vcpu);
261 void kvm_s390_vsie_kick(struct kvm_vcpu *vcpu);
262 void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, unsigned long start,
263 unsigned long end);
264 void kvm_s390_vsie_init(struct kvm *kvm);
265 void kvm_s390_vsie_destroy(struct kvm *kvm);
266
267 /* implemented in sigp.c */
268 int kvm_s390_handle_sigp(struct kvm_vcpu *vcpu);
269 int kvm_s390_handle_sigp_pei(struct kvm_vcpu *vcpu);
270
271 /* implemented in sthyi.c */
272 int handle_sthyi(struct kvm_vcpu *vcpu);
273
274 /* implemented in kvm-s390.c */
275 void kvm_s390_set_tod_clock(struct kvm *kvm, u64 tod);
276 long kvm_arch_fault_in_page(struct kvm_vcpu *vcpu, gpa_t gpa, int writable);
277 int kvm_s390_store_status_unloaded(struct kvm_vcpu *vcpu, unsigned long addr);
278 int kvm_s390_vcpu_store_status(struct kvm_vcpu *vcpu, unsigned long addr);
279 void kvm_s390_vcpu_start(struct kvm_vcpu *vcpu);
280 void kvm_s390_vcpu_stop(struct kvm_vcpu *vcpu);
281 void kvm_s390_vcpu_block(struct kvm_vcpu *vcpu);
282 void kvm_s390_vcpu_unblock(struct kvm_vcpu *vcpu);
283 void exit_sie(struct kvm_vcpu *vcpu);
284 void kvm_s390_sync_request(int req, struct kvm_vcpu *vcpu);
285 int kvm_s390_vcpu_setup_cmma(struct kvm_vcpu *vcpu);
286 void kvm_s390_vcpu_unsetup_cmma(struct kvm_vcpu *vcpu);
287 unsigned long kvm_s390_fac_list_mask_size(void);
288 extern unsigned long kvm_s390_fac_list_mask[];
289 void kvm_s390_set_cpu_timer(struct kvm_vcpu *vcpu, __u64 cputm);
290 __u64 kvm_s390_get_cpu_timer(struct kvm_vcpu *vcpu);
291
292 /* implemented in diag.c */
293 int kvm_s390_handle_diag(struct kvm_vcpu *vcpu);
294
295 static inline void kvm_s390_vcpu_block_all(struct kvm *kvm)
296 {
297 int i;
298 struct kvm_vcpu *vcpu;
299
300 WARN_ON(!mutex_is_locked(&kvm->lock));
301 kvm_for_each_vcpu(i, vcpu, kvm)
302 kvm_s390_vcpu_block(vcpu);
303 }
304
305 static inline void kvm_s390_vcpu_unblock_all(struct kvm *kvm)
306 {
307 int i;
308 struct kvm_vcpu *vcpu;
309
310 kvm_for_each_vcpu(i, vcpu, kvm)
311 kvm_s390_vcpu_unblock(vcpu);
312 }
313
314 static inline u64 kvm_s390_get_tod_clock_fast(struct kvm *kvm)
315 {
316 u64 rc;
317
318 preempt_disable();
319 rc = get_tod_clock_fast() + kvm->arch.epoch;
320 preempt_enable();
321 return rc;
322 }
323
324 /**
325 * kvm_s390_inject_prog_cond - conditionally inject a program check
326 * @vcpu: virtual cpu
327 * @rc: original return/error code
328 *
329 * This function is supposed to be used after regular guest access functions
330 * failed, to conditionally inject a program check to a vcpu. The typical
331 * pattern would look like
332 *
333 * rc = write_guest(vcpu, addr, data, len);
334 * if (rc)
335 * return kvm_s390_inject_prog_cond(vcpu, rc);
336 *
337 * A negative return code from guest access functions implies an internal error
338 * like e.g. out of memory. In these cases no program check should be injected
339 * to the guest.
340 * A positive value implies that an exception happened while accessing a guest's
341 * memory. In this case all data belonging to the corresponding program check
342 * has been stored in vcpu->arch.pgm and can be injected with
343 * kvm_s390_inject_prog_irq().
344 *
345 * Returns: - the original @rc value if @rc was negative (internal error)
346 * - zero if @rc was already zero
347 * - zero or error code from injecting if @rc was positive
348 * (program check injected to @vcpu)
349 */
350 static inline int kvm_s390_inject_prog_cond(struct kvm_vcpu *vcpu, int rc)
351 {
352 if (rc <= 0)
353 return rc;
354 return kvm_s390_inject_prog_irq(vcpu, &vcpu->arch.pgm);
355 }
356
357 int s390int_to_s390irq(struct kvm_s390_interrupt *s390int,
358 struct kvm_s390_irq *s390irq);
359
360 /* implemented in interrupt.c */
361 int kvm_s390_vcpu_has_irq(struct kvm_vcpu *vcpu, int exclude_stop);
362 int psw_extint_disabled(struct kvm_vcpu *vcpu);
363 void kvm_s390_destroy_adapters(struct kvm *kvm);
364 int kvm_s390_ext_call_pending(struct kvm_vcpu *vcpu);
365 extern struct kvm_device_ops kvm_flic_ops;
366 int kvm_s390_is_stop_irq_pending(struct kvm_vcpu *vcpu);
367 void kvm_s390_clear_stop_irq(struct kvm_vcpu *vcpu);
368 int kvm_s390_set_irq_state(struct kvm_vcpu *vcpu,
369 void __user *buf, int len);
370 int kvm_s390_get_irq_state(struct kvm_vcpu *vcpu,
371 __u8 __user *buf, int len);
372
373 /* implemented in guestdbg.c */
374 void kvm_s390_backup_guest_per_regs(struct kvm_vcpu *vcpu);
375 void kvm_s390_restore_guest_per_regs(struct kvm_vcpu *vcpu);
376 void kvm_s390_patch_guest_per_regs(struct kvm_vcpu *vcpu);
377 int kvm_s390_import_bp_data(struct kvm_vcpu *vcpu,
378 struct kvm_guest_debug *dbg);
379 void kvm_s390_clear_bp_data(struct kvm_vcpu *vcpu);
380 void kvm_s390_prepare_debug_exit(struct kvm_vcpu *vcpu);
381 int kvm_s390_handle_per_ifetch_icpt(struct kvm_vcpu *vcpu);
382 void kvm_s390_handle_per_event(struct kvm_vcpu *vcpu);
383
384 /* support for Basic/Extended SCA handling */
385 static inline union ipte_control *kvm_s390_get_ipte_control(struct kvm *kvm)
386 {
387 struct bsca_block *sca = kvm->arch.sca; /* SCA version doesn't matter */
388
389 return &sca->ipte_control;
390 }
391 static inline int kvm_s390_use_sca_entries(void)
392 {
393 /*
394 * Without SIGP interpretation, only SRS interpretation (if available)
395 * might use the entries. By not setting the entries and keeping them
396 * invalid, hardware will not access them but intercept.
397 */
398 return sclp.has_sigpif;
399 }
400 #endif