]> git.proxmox.com Git - mirror_ubuntu-zesty-kernel.git/blob - arch/powerpc/include/asm/kvm_ppc.h
463def919c2cd8defe744e83f28b8d7b942da6c1
[mirror_ubuntu-zesty-kernel.git] / arch / powerpc / include / asm / kvm_ppc.h
1 /*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20 #ifndef __POWERPC_KVM_PPC_H__
21 #define __POWERPC_KVM_PPC_H__
22
23 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26 #include <linux/mutex.h>
27 #include <linux/timer.h>
28 #include <linux/types.h>
29 #include <linux/kvm_types.h>
30 #include <linux/kvm_host.h>
31 #include <linux/bug.h>
32 #ifdef CONFIG_PPC_BOOK3S
33 #include <asm/kvm_book3s.h>
34 #else
35 #include <asm/kvm_booke.h>
36 #endif
37 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38 #include <asm/paca.h>
39 #endif
40
41 /*
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
44 */
45 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
46
47 enum emulation_result {
48 EMULATE_DONE, /* no further processing */
49 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
50 EMULATE_FAIL, /* can't emulate this instruction */
51 EMULATE_AGAIN, /* something went wrong. go again */
52 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
53 };
54
55 enum instruction_type {
56 INST_GENERIC,
57 INST_SC, /* system call */
58 };
59
60 enum xlate_instdata {
61 XLATE_INST, /* translate instruction address */
62 XLATE_DATA /* translate data address */
63 };
64
65 enum xlate_readwrite {
66 XLATE_READ, /* check for read permissions */
67 XLATE_WRITE /* check for write permissions */
68 };
69
70 extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
71 extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
72 extern void kvmppc_handler_highmem(void);
73
74 extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75 extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes,
77 int is_default_endian);
78 extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79 unsigned int rt, unsigned int bytes,
80 int is_default_endian);
81 extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
82 u64 val, unsigned int bytes,
83 int is_default_endian);
84
85 extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
86 enum instruction_type type, u32 *inst);
87
88 extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
89 bool data);
90 extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
91 bool data);
92 extern int kvmppc_emulate_instruction(struct kvm_run *run,
93 struct kvm_vcpu *vcpu);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
95 extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
97 extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
99 extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
102
103 /* Core-specific hooks */
104
105 extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
106 unsigned int gtlb_idx);
107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
109 extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
110 extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
111 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
113 extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
114 gva_t eaddr);
115 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
116 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
117 extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
118 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
119 struct kvmppc_pte *pte);
120
121 extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
122 unsigned int id);
123 extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
124 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
125 extern int kvmppc_core_check_processor_compat(void);
126 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
127 struct kvm_translation *tr);
128
129 extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
130 extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
131
132 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
133 extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
134 extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
135 extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
136 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
137 extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
138 struct kvm_interrupt *irq);
139 extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
140 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
141 ulong esr_flags);
142 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
143 ulong dear_flags,
144 ulong esr_flags);
145 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
146 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
147 ulong esr_flags);
148 extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
149 extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
150
151 extern int kvmppc_booke_init(void);
152 extern void kvmppc_booke_exit(void);
153
154 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
155 extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
156 extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
157
158 extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
159 extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
160 extern void kvmppc_free_hpt(struct kvm *kvm);
161 extern long kvmppc_prepare_vrma(struct kvm *kvm,
162 struct kvm_userspace_memory_region *mem);
163 extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
164 struct kvm_memory_slot *memslot, unsigned long porder);
165 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
166
167 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
168 struct kvm_create_spapr_tce_64 *args);
169 extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
170 struct kvm *kvm, unsigned long liobn);
171 extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
172 unsigned long ioba, unsigned long npages);
173 extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
174 unsigned long tce);
175 extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
176 unsigned long *ua, unsigned long **prmap);
177 extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
178 unsigned long idx, unsigned long tce);
179 extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
180 unsigned long ioba, unsigned long tce);
181 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
182 unsigned long liobn, unsigned long ioba,
183 unsigned long tce_list, unsigned long npages);
184 extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
185 unsigned long liobn, unsigned long ioba,
186 unsigned long tce_value, unsigned long npages);
187 extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
188 unsigned long ioba);
189 extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
190 extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
191 extern int kvmppc_core_init_vm(struct kvm *kvm);
192 extern void kvmppc_core_destroy_vm(struct kvm *kvm);
193 extern void kvmppc_core_free_memslot(struct kvm *kvm,
194 struct kvm_memory_slot *free,
195 struct kvm_memory_slot *dont);
196 extern int kvmppc_core_create_memslot(struct kvm *kvm,
197 struct kvm_memory_slot *slot,
198 unsigned long npages);
199 extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
200 struct kvm_memory_slot *memslot,
201 const struct kvm_userspace_memory_region *mem);
202 extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
203 const struct kvm_userspace_memory_region *mem,
204 const struct kvm_memory_slot *old,
205 const struct kvm_memory_slot *new);
206 extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
207 struct kvm_ppc_smmu_info *info);
208 extern void kvmppc_core_flush_memslot(struct kvm *kvm,
209 struct kvm_memory_slot *memslot);
210
211 extern int kvmppc_bookehv_init(void);
212 extern void kvmppc_bookehv_exit(void);
213
214 extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
215
216 extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
217
218 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
219
220 extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
221 extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
222 extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
223 extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
224 u32 priority);
225 extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
226 u32 *priority);
227 extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
228 extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
229
230 void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
231 void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
232
233 union kvmppc_one_reg {
234 u32 wval;
235 u64 dval;
236 vector128 vval;
237 u64 vsxval[2];
238 struct {
239 u64 addr;
240 u64 length;
241 } vpaval;
242 };
243
244 struct kvmppc_ops {
245 struct module *owner;
246 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
247 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
248 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
249 union kvmppc_one_reg *val);
250 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
251 union kvmppc_one_reg *val);
252 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
253 void (*vcpu_put)(struct kvm_vcpu *vcpu);
254 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
255 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
256 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
257 void (*vcpu_free)(struct kvm_vcpu *vcpu);
258 int (*check_requests)(struct kvm_vcpu *vcpu);
259 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
260 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
261 int (*prepare_memory_region)(struct kvm *kvm,
262 struct kvm_memory_slot *memslot,
263 const struct kvm_userspace_memory_region *mem);
264 void (*commit_memory_region)(struct kvm *kvm,
265 const struct kvm_userspace_memory_region *mem,
266 const struct kvm_memory_slot *old,
267 const struct kvm_memory_slot *new);
268 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
269 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
270 unsigned long end);
271 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
272 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
273 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
274 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
275 void (*free_memslot)(struct kvm_memory_slot *free,
276 struct kvm_memory_slot *dont);
277 int (*create_memslot)(struct kvm_memory_slot *slot,
278 unsigned long npages);
279 int (*init_vm)(struct kvm *kvm);
280 void (*destroy_vm)(struct kvm *kvm);
281 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
282 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
283 unsigned int inst, int *advance);
284 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
285 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
286 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
287 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
288 unsigned long arg);
289 int (*hcall_implemented)(unsigned long hcall);
290 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
291 struct irq_bypass_producer *);
292 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
293 struct irq_bypass_producer *);
294 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
295 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
296 };
297
298 extern struct kvmppc_ops *kvmppc_hv_ops;
299 extern struct kvmppc_ops *kvmppc_pr_ops;
300
301 static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
302 enum instruction_type type, u32 *inst)
303 {
304 int ret = EMULATE_DONE;
305 u32 fetched_inst;
306
307 /* Load the instruction manually if it failed to do so in the
308 * exit path */
309 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
310 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
311
312 /* Write fetch_failed unswapped if the fetch failed */
313 if (ret == EMULATE_DONE)
314 fetched_inst = kvmppc_need_byteswap(vcpu) ?
315 swab32(vcpu->arch.last_inst) :
316 vcpu->arch.last_inst;
317 else
318 fetched_inst = vcpu->arch.last_inst;
319
320 *inst = fetched_inst;
321 return ret;
322 }
323
324 static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
325 {
326 return kvm->arch.kvm_ops == kvmppc_hv_ops;
327 }
328
329 extern int kvmppc_hwrng_present(void);
330
331 /*
332 * Cuts out inst bits with ordering according to spec.
333 * That means the leftmost bit is zero. All given bits are included.
334 */
335 static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
336 {
337 u32 r;
338 u32 mask;
339
340 BUG_ON(msb > lsb);
341
342 mask = (1 << (lsb - msb + 1)) - 1;
343 r = (inst >> (63 - lsb)) & mask;
344
345 return r;
346 }
347
348 /*
349 * Replaces inst bits with ordering according to spec.
350 */
351 static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
352 {
353 u32 r;
354 u32 mask;
355
356 BUG_ON(msb > lsb);
357
358 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
359 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
360
361 return r;
362 }
363
364 #define one_reg_size(id) \
365 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
366
367 #define get_reg_val(id, reg) ({ \
368 union kvmppc_one_reg __u; \
369 switch (one_reg_size(id)) { \
370 case 4: __u.wval = (reg); break; \
371 case 8: __u.dval = (reg); break; \
372 default: BUG(); \
373 } \
374 __u; \
375 })
376
377
378 #define set_reg_val(id, val) ({ \
379 u64 __v; \
380 switch (one_reg_size(id)) { \
381 case 4: __v = (val).wval; break; \
382 case 8: __v = (val).dval; break; \
383 default: BUG(); \
384 } \
385 __v; \
386 })
387
388 int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
389 int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
390
391 int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
392 int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
393
394 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
395 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
396 int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
397 int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
398
399 void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
400
401 struct openpic;
402
403 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
404 extern void kvm_cma_reserve(void) __init;
405 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
406 {
407 paca[cpu].kvm_hstate.xics_phys = addr;
408 }
409
410 static inline u32 kvmppc_get_xics_latch(void)
411 {
412 u32 xirr;
413
414 xirr = get_paca()->kvm_hstate.saved_xirr;
415 get_paca()->kvm_hstate.saved_xirr = 0;
416 return xirr;
417 }
418
419 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
420 {
421 paca[cpu].kvm_hstate.host_ipi = host_ipi;
422 }
423
424 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
425 {
426 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
427 }
428
429 extern void kvm_hv_vm_activated(void);
430 extern void kvm_hv_vm_deactivated(void);
431 extern bool kvm_hv_mode_active(void);
432
433 #else
434 static inline void __init kvm_cma_reserve(void)
435 {}
436
437 static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
438 {}
439
440 static inline u32 kvmppc_get_xics_latch(void)
441 {
442 return 0;
443 }
444
445 static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
446 {}
447
448 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
449 {
450 kvm_vcpu_kick(vcpu);
451 }
452
453 static inline bool kvm_hv_mode_active(void) { return false; }
454
455 #endif
456
457 #ifdef CONFIG_KVM_XICS
458 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
459 {
460 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
461 }
462
463 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
464 struct kvm *kvm)
465 {
466 if (kvm && kvm_irq_bypass)
467 return kvm->arch.pimap;
468 return NULL;
469 }
470
471 extern void kvmppc_alloc_host_rm_ops(void);
472 extern void kvmppc_free_host_rm_ops(void);
473 extern void kvmppc_free_pimap(struct kvm *kvm);
474 extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
475 extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
476 extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
477 extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
478 extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
479 extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
480 extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
481 extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
482 struct kvm_vcpu *vcpu, u32 cpu);
483 extern void kvmppc_xics_ipi_action(void);
484 extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
485 unsigned long host_irq);
486 extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
487 unsigned long host_irq);
488 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
489 struct kvmppc_irq_map *irq_map,
490 struct kvmppc_passthru_irqmap *pimap,
491 bool *again);
492 extern int h_ipi_redirect;
493 #else
494 static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
495 struct kvm *kvm)
496 { return NULL; }
497 static inline void kvmppc_alloc_host_rm_ops(void) {};
498 static inline void kvmppc_free_host_rm_ops(void) {};
499 static inline void kvmppc_free_pimap(struct kvm *kvm) {};
500 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
501 { return 0; }
502 static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
503 { return 0; }
504 static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
505 static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
506 unsigned long server)
507 { return -EINVAL; }
508 static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
509 struct kvm_irq_level *args)
510 { return -ENOTTY; }
511 static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
512 { return 0; }
513 #endif
514
515 /*
516 * Prototypes for functions called only from assembler code.
517 * Having prototypes reduces sparse errors.
518 */
519 long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
520 unsigned long ioba, unsigned long tce);
521 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
522 unsigned long liobn, unsigned long ioba,
523 unsigned long tce_list, unsigned long npages);
524 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
525 unsigned long liobn, unsigned long ioba,
526 unsigned long tce_value, unsigned long npages);
527 long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
528 unsigned int yield_count);
529 long kvmppc_h_random(struct kvm_vcpu *vcpu);
530 void kvmhv_commence_exit(int trap);
531 long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
532 void kvmppc_subcore_enter_guest(void);
533 void kvmppc_subcore_exit_guest(void);
534 long kvmppc_realmode_hmi_handler(void);
535 long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
536 long pte_index, unsigned long pteh, unsigned long ptel);
537 long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
538 unsigned long pte_index, unsigned long avpn);
539 long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
540 long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
541 unsigned long pte_index, unsigned long avpn,
542 unsigned long va);
543 long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
544 unsigned long pte_index);
545 long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
546 unsigned long pte_index);
547 long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
548 unsigned long pte_index);
549 long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
550 unsigned long slb_v, unsigned int status, bool data);
551 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
552 int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
553 unsigned long mfrr);
554 int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
555 int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
556
557 /*
558 * Host-side operations we want to set up while running in real
559 * mode in the guest operating on the xics.
560 * Currently only VCPU wakeup is supported.
561 */
562
563 union kvmppc_rm_state {
564 unsigned long raw;
565 struct {
566 u32 in_host;
567 u32 rm_action;
568 };
569 };
570
571 struct kvmppc_host_rm_core {
572 union kvmppc_rm_state rm_state;
573 void *rm_data;
574 char pad[112];
575 };
576
577 struct kvmppc_host_rm_ops {
578 struct kvmppc_host_rm_core *rm_core;
579 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
580 };
581
582 extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
583
584 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
585 {
586 #ifdef CONFIG_KVM_BOOKE_HV
587 return mfspr(SPRN_GEPR);
588 #elif defined(CONFIG_BOOKE)
589 return vcpu->arch.epr;
590 #else
591 return 0;
592 #endif
593 }
594
595 static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
596 {
597 #ifdef CONFIG_KVM_BOOKE_HV
598 mtspr(SPRN_GEPR, epr);
599 #elif defined(CONFIG_BOOKE)
600 vcpu->arch.epr = epr;
601 #endif
602 }
603
604 #ifdef CONFIG_KVM_MPIC
605
606 void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
607 int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
608 u32 cpu);
609 void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
610
611 #else
612
613 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
614 {
615 }
616
617 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
618 struct kvm_vcpu *vcpu, u32 cpu)
619 {
620 return -EINVAL;
621 }
622
623 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
624 struct kvm_vcpu *vcpu)
625 {
626 }
627
628 #endif /* CONFIG_KVM_MPIC */
629
630 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
631 struct kvm_config_tlb *cfg);
632 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
633 struct kvm_dirty_tlb *cfg);
634
635 long kvmppc_alloc_lpid(void);
636 void kvmppc_claim_lpid(long lpid);
637 void kvmppc_free_lpid(long lpid);
638 void kvmppc_init_lpid(unsigned long nr_lpids);
639
640 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
641 {
642 struct page *page;
643 /*
644 * We can only access pages that the kernel maps
645 * as memory. Bail out for unmapped ones.
646 */
647 if (!pfn_valid(pfn))
648 return;
649
650 /* Clear i-cache for new pages */
651 page = pfn_to_page(pfn);
652 if (!test_bit(PG_arch_1, &page->flags)) {
653 flush_dcache_icache_page(page);
654 set_bit(PG_arch_1, &page->flags);
655 }
656 }
657
658 /*
659 * Shared struct helpers. The shared struct can be little or big endian,
660 * depending on the guest endianness. So expose helpers to all of them.
661 */
662 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
663 {
664 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
665 /* Only Book3S_64 PR supports bi-endian for now */
666 return vcpu->arch.shared_big_endian;
667 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
668 /* Book3s_64 HV on little endian is always little endian */
669 return false;
670 #else
671 return true;
672 #endif
673 }
674
675 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
676 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
677 { \
678 return mfspr(bookehv_spr); \
679 } \
680
681 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
682 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
683 { \
684 mtspr(bookehv_spr, val); \
685 } \
686
687 #define SHARED_WRAPPER_GET(reg, size) \
688 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
689 { \
690 if (kvmppc_shared_big_endian(vcpu)) \
691 return be##size##_to_cpu(vcpu->arch.shared->reg); \
692 else \
693 return le##size##_to_cpu(vcpu->arch.shared->reg); \
694 } \
695
696 #define SHARED_WRAPPER_SET(reg, size) \
697 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
698 { \
699 if (kvmppc_shared_big_endian(vcpu)) \
700 vcpu->arch.shared->reg = cpu_to_be##size(val); \
701 else \
702 vcpu->arch.shared->reg = cpu_to_le##size(val); \
703 } \
704
705 #define SHARED_WRAPPER(reg, size) \
706 SHARED_WRAPPER_GET(reg, size) \
707 SHARED_WRAPPER_SET(reg, size) \
708
709 #define SPRNG_WRAPPER(reg, bookehv_spr) \
710 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
711 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
712
713 #ifdef CONFIG_KVM_BOOKE_HV
714
715 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
716 SPRNG_WRAPPER(reg, bookehv_spr) \
717
718 #else
719
720 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
721 SHARED_WRAPPER(reg, size) \
722
723 #endif
724
725 SHARED_WRAPPER(critical, 64)
726 SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
727 SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
728 SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
729 SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
730 SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
731 SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
732 SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
733 SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
734 SHARED_WRAPPER_GET(msr, 64)
735 static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
736 {
737 if (kvmppc_shared_big_endian(vcpu))
738 vcpu->arch.shared->msr = cpu_to_be64(val);
739 else
740 vcpu->arch.shared->msr = cpu_to_le64(val);
741 }
742 SHARED_WRAPPER(dsisr, 32)
743 SHARED_WRAPPER(int_pending, 32)
744 SHARED_WRAPPER(sprg4, 64)
745 SHARED_WRAPPER(sprg5, 64)
746 SHARED_WRAPPER(sprg6, 64)
747 SHARED_WRAPPER(sprg7, 64)
748
749 static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
750 {
751 if (kvmppc_shared_big_endian(vcpu))
752 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
753 else
754 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
755 }
756
757 static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
758 {
759 if (kvmppc_shared_big_endian(vcpu))
760 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
761 else
762 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
763 }
764
765 /*
766 * Please call after prepare_to_enter. This function puts the lazy ee and irq
767 * disabled tracking state back to normal mode, without actually enabling
768 * interrupts.
769 */
770 static inline void kvmppc_fix_ee_before_entry(void)
771 {
772 trace_hardirqs_on();
773
774 #ifdef CONFIG_PPC64
775 /*
776 * To avoid races, the caller must have gone directly from having
777 * interrupts fully-enabled to hard-disabled.
778 */
779 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
780
781 /* Only need to enable IRQs by hard enabling them after this */
782 local_paca->irq_happened = 0;
783 local_paca->soft_enabled = 1;
784 #endif
785 }
786
787 static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
788 {
789 ulong ea;
790 ulong msr_64bit = 0;
791
792 ea = kvmppc_get_gpr(vcpu, rb);
793 if (ra)
794 ea += kvmppc_get_gpr(vcpu, ra);
795
796 #if defined(CONFIG_PPC_BOOK3E_64)
797 msr_64bit = MSR_CM;
798 #elif defined(CONFIG_PPC_BOOK3S_64)
799 msr_64bit = MSR_SF;
800 #endif
801
802 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
803 ea = (uint32_t)ea;
804
805 return ea;
806 }
807
808 extern void xics_wake_cpu(int cpu);
809
810 #endif /* __POWERPC_KVM_PPC_H__ */