]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/include/asm/kvm_ppc.h
KVM: PPC: Provide functions for queueing up FP/VEC/VSX unavailable interrupts
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / include / asm / kvm_ppc.h
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
a136a8bd 31#include <linux/bug.h>
1c0006d8
AG
32#ifdef CONFIG_PPC_BOOK3S
33#include <asm/kvm_book3s.h>
c7f38f46
AG
34#else
35#include <asm/kvm_booke.h>
1c0006d8 36#endif
371fefd6
PM
37#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38#include <asm/paca.h>
39#endif
bbf45ba5 40
a59c1d9e
MS
41/*
42 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
43 * for supporting software breakpoint.
44 */
45#define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
46
bbf45ba5
HB
47enum emulation_result {
48 EMULATE_DONE, /* no further processing */
49 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
bbf45ba5 50 EMULATE_FAIL, /* can't emulate this instruction */
37f5bca6 51 EMULATE_AGAIN, /* something went wrong. go again */
c402a3f4 52 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
bbf45ba5
HB
53};
54
51f04726
MC
55enum instruction_type {
56 INST_GENERIC,
57 INST_SC, /* system call */
58};
59
7d15c06f
AG
60enum xlate_instdata {
61 XLATE_INST, /* translate instruction address */
62 XLATE_DATA /* translate data address */
63};
64
65enum xlate_readwrite {
66 XLATE_READ, /* check for read permissions */
67 XLATE_WRITE /* check for write permissions */
68};
69
df6909e5 70extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
bbf45ba5 71extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
29eb61bc 72extern void kvmppc_handler_highmem(void);
bbf45ba5
HB
73
74extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
75extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
76 unsigned int rt, unsigned int bytes,
73601775 77 int is_default_endian);
3587d534
AG
78extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
79 unsigned int rt, unsigned int bytes,
73601775 80 int is_default_endian);
bbf45ba5 81extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
82 u64 val, unsigned int bytes,
83 int is_default_endian);
bbf45ba5 84
51f04726
MC
85extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
86 enum instruction_type type, u32 *inst);
87
35c4a733
AG
88extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
89 bool data);
90extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
91 bool data);
bbf45ba5
HB
92extern int kvmppc_emulate_instruction(struct kvm_run *run,
93 struct kvm_vcpu *vcpu);
d69614a2 94extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
ce263d70 95extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
75f74f0d 96extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
5ce941ee 97extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
d02d4d15 98extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
af8f38b3 99extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
f61c94bb
BB
100extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
101extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
bbf45ba5 102
ecc0981f
HB
103/* Core-specific hooks */
104
89168618 105extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
7924bd41 106 unsigned int gtlb_idx);
bbf45ba5 107extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
49dd2c49 108extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
ecc0981f 109extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
9cc5e953 110extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
fa86b8dd
HB
111extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
112extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
be8d1cae
HB
113extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
114 gva_t eaddr);
b52a638c
HB
115extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
116extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
7d15c06f
AG
117extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
118 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
119 struct kvmppc_pte *pte);
9dd921cf 120
db93f574
HB
121extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
122 unsigned int id);
123extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
5cbb5106 124extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
9dd921cf 125extern int kvmppc_core_check_processor_compat(void);
5cbb5106
HB
126extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
127 struct kvm_translation *tr);
9dd921cf
HB
128
129extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
130extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
131
a8e4ef84 132extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
9dd921cf 133extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
25a8a02d 134extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
307d9279
PM
135extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu);
136extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu);
137extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu *vcpu);
9dd921cf 138extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
7706664d 139extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
9dd921cf
HB
140extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
141 struct kvm_interrupt *irq);
4fe27d2a 142extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
8de12015
AG
143extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
144 ulong esr_flags);
145extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
146 ulong dear_flags,
147 ulong esr_flags);
148extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
149extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
150 ulong esr_flags);
862d31f7 151extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
7c973a2e 152extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
75f74f0d 153
db93f574
HB
154extern int kvmppc_booke_init(void);
155extern void kvmppc_booke_exit(void);
156
c30f8a6c 157extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
2a342ed5 158extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
a4cd8b23 159extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
c30f8a6c 160
aae0777f
DG
161extern int kvmppc_allocate_hpt(struct kvm_hpt_info *info, u32 order);
162extern void kvmppc_set_hpt(struct kvm *kvm, struct kvm_hpt_info *info);
f98a8bf9 163extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, int order);
aae0777f 164extern void kvmppc_free_hpt(struct kvm_hpt_info *info);
de56a948
PM
165extern long kvmppc_prepare_vrma(struct kvm *kvm,
166 struct kvm_userspace_memory_region *mem);
c77162de 167extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
da9d1d7f 168 struct kvm_memory_slot *memslot, unsigned long porder);
a8606e20 169extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
bc5ad3f3 170
54738c09 171extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
58ded420 172 struct kvm_create_spapr_tce_64 *args);
d3695aa4
AK
173extern struct kvmppc_spapr_tce_table *kvmppc_find_table(
174 struct kvm_vcpu *vcpu, unsigned long liobn);
5ee7af18
AK
175extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt,
176 unsigned long ioba, unsigned long npages);
177extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt,
178 unsigned long tce);
d3695aa4
AK
179extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa,
180 unsigned long *ua, unsigned long **prmap);
181extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt,
182 unsigned long idx, unsigned long tce);
f31e65e1
BH
183extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
184 unsigned long ioba, unsigned long tce);
d3695aa4
AK
185extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu,
186 unsigned long liobn, unsigned long ioba,
187 unsigned long tce_list, unsigned long npages);
188extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu,
189 unsigned long liobn, unsigned long ioba,
190 unsigned long tce_value, unsigned long npages);
69e9fbb2
LD
191extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
192 unsigned long ioba);
db9a290d
DG
193extern struct page *kvm_alloc_hpt_cma(unsigned long nr_pages);
194extern void kvm_free_hpt_cma(struct page *page, unsigned long nr_pages);
f9e0554d
PM
195extern int kvmppc_core_init_vm(struct kvm *kvm);
196extern void kvmppc_core_destroy_vm(struct kvm *kvm);
5587027c
AK
197extern void kvmppc_core_free_memslot(struct kvm *kvm,
198 struct kvm_memory_slot *free,
a66b48c3 199 struct kvm_memory_slot *dont);
5587027c
AK
200extern int kvmppc_core_create_memslot(struct kvm *kvm,
201 struct kvm_memory_slot *slot,
a66b48c3 202 unsigned long npages);
f9e0554d 203extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
a66b48c3 204 struct kvm_memory_slot *memslot,
09170a49 205 const struct kvm_userspace_memory_region *mem);
f9e0554d 206extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
09170a49 207 const struct kvm_userspace_memory_region *mem,
f36f3f28
PB
208 const struct kvm_memory_slot *old,
209 const struct kvm_memory_slot *new);
5b74716e
BH
210extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
211 struct kvm_ppc_smmu_info *info);
dfe49dbd
PM
212extern void kvmppc_core_flush_memslot(struct kvm *kvm,
213 struct kvm_memory_slot *memslot);
f9e0554d 214
d30f6e48
SW
215extern int kvmppc_bookehv_init(void);
216extern void kvmppc_bookehv_exit(void);
217
03d25c5b
AG
218extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
219
a2932923 220extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
5e985969
DG
221extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm *kvm,
222 struct kvm_ppc_resize_hpt *rhpt);
223extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm *kvm,
224 struct kvm_ppc_resize_hpt *rhpt);
a2932923 225
5df554ad
SW
226int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
227
8e591cb7
ME
228extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
229extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
230extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
bc5ad3f3
BH
231extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
232 u32 priority);
233extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
234 u32 *priority);
d19bd862
PM
235extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
236extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
8e591cb7 237
2f699a59
BB
238void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
239void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
240
3a167bea
AK
241union kvmppc_one_reg {
242 u32 wval;
243 u64 dval;
244 vector128 vval;
245 u64 vsxval[2];
246 struct {
247 u64 addr;
248 u64 length;
249 } vpaval;
250};
251
252struct kvmppc_ops {
cbbc58d4 253 struct module *owner;
3a167bea
AK
254 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
255 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
256 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
257 union kvmppc_one_reg *val);
258 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
259 union kvmppc_one_reg *val);
260 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
261 void (*vcpu_put)(struct kvm_vcpu *vcpu);
262 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
263 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
264 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
265 void (*vcpu_free)(struct kvm_vcpu *vcpu);
266 int (*check_requests)(struct kvm_vcpu *vcpu);
267 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
268 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
269 int (*prepare_memory_region)(struct kvm *kvm,
270 struct kvm_memory_slot *memslot,
09170a49 271 const struct kvm_userspace_memory_region *mem);
3a167bea 272 void (*commit_memory_region)(struct kvm *kvm,
09170a49 273 const struct kvm_userspace_memory_region *mem,
f36f3f28
PB
274 const struct kvm_memory_slot *old,
275 const struct kvm_memory_slot *new);
3a167bea
AK
276 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
277 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
278 unsigned long end);
57128468 279 int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end);
3a167bea
AK
280 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
281 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
282 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
283 void (*free_memslot)(struct kvm_memory_slot *free,
284 struct kvm_memory_slot *dont);
285 int (*create_memslot)(struct kvm_memory_slot *slot,
286 unsigned long npages);
287 int (*init_vm)(struct kvm *kvm);
288 void (*destroy_vm)(struct kvm *kvm);
3a167bea
AK
289 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
290 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
291 unsigned int inst, int *advance);
292 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
293 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
294 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
295 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
296 unsigned long arg);
ae2113a4 297 int (*hcall_implemented)(unsigned long hcall);
9576730d
SW
298 int (*irq_bypass_add_producer)(struct irq_bypass_consumer *,
299 struct irq_bypass_producer *);
300 void (*irq_bypass_del_producer)(struct irq_bypass_consumer *,
301 struct irq_bypass_producer *);
c9270132
PM
302 int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg);
303 int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info);
3a167bea
AK
304};
305
cbbc58d4
AK
306extern struct kvmppc_ops *kvmppc_hv_ops;
307extern struct kvmppc_ops *kvmppc_pr_ops;
3a167bea 308
51f04726
MC
309static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
310 enum instruction_type type, u32 *inst)
311{
312 int ret = EMULATE_DONE;
313 u32 fetched_inst;
314
315 /* Load the instruction manually if it failed to do so in the
316 * exit path */
317 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
318 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
319
320 /* Write fetch_failed unswapped if the fetch failed */
321 if (ret == EMULATE_DONE)
322 fetched_inst = kvmppc_need_byteswap(vcpu) ?
323 swab32(vcpu->arch.last_inst) :
324 vcpu->arch.last_inst;
325 else
326 fetched_inst = vcpu->arch.last_inst;
327
328 *inst = fetched_inst;
329 return ret;
330}
331
a78b55d1
AK
332static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
333{
334 return kvm->arch.kvm_ops == kvmppc_hv_ops;
335}
336
e928e9cb
ME
337extern int kvmppc_hwrng_present(void);
338
0564ee8a
AG
339/*
340 * Cuts out inst bits with ordering according to spec.
341 * That means the leftmost bit is zero. All given bits are included.
342 */
343static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
344{
345 u32 r;
346 u32 mask;
347
348 BUG_ON(msb > lsb);
349
350 mask = (1 << (lsb - msb + 1)) - 1;
351 r = (inst >> (63 - lsb)) & mask;
352
353 return r;
354}
355
356/*
357 * Replaces inst bits with ordering according to spec.
358 */
359static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
360{
361 u32 r;
362 u32 mask;
363
364 BUG_ON(msb > lsb);
365
366 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
367 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
368
369 return r;
370}
371
a136a8bd
PM
372#define one_reg_size(id) \
373 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
374
375#define get_reg_val(id, reg) ({ \
376 union kvmppc_one_reg __u; \
377 switch (one_reg_size(id)) { \
378 case 4: __u.wval = (reg); break; \
379 case 8: __u.dval = (reg); break; \
380 default: BUG(); \
381 } \
382 __u; \
383})
384
385
386#define set_reg_val(id, val) ({ \
387 u64 __v; \
388 switch (one_reg_size(id)) { \
389 case 4: __v = (val).wval; break; \
390 case 8: __v = (val).dval; break; \
391 default: BUG(); \
392 } \
393 __v; \
394})
395
3a167bea 396int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
397int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
398
3a167bea 399int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
400int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
401
31f3438e
PM
402int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
403int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
a136a8bd
PM
404int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
405int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
31f3438e 406
5ce941ee
SW
407void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
408
5df554ad 409struct openpic;
5df554ad 410
9975f5e3 411#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
fa61a4e3 412extern void kvm_cma_reserve(void) __init;
371fefd6
PM
413static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
414{
415 paca[cpu].kvm_hstate.xics_phys = addr;
416}
aa04b4cc 417
54695c30
BH
418static inline u32 kvmppc_get_xics_latch(void)
419{
699cc876 420 u32 xirr;
54695c30 421
699cc876 422 xirr = get_paca()->kvm_hstate.saved_xirr;
54695c30 423 get_paca()->kvm_hstate.saved_xirr = 0;
54695c30
BH
424 return xirr;
425}
426
427static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
428{
429 paca[cpu].kvm_hstate.host_ipi = host_ipi;
430}
431
3a167bea
AK
432static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
433{
cbbc58d4 434 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
3a167bea 435}
aa04b4cc 436
441c19c8
ME
437extern void kvm_hv_vm_activated(void);
438extern void kvm_hv_vm_deactivated(void);
439extern bool kvm_hv_mode_active(void);
440
371fefd6 441#else
fa61a4e3
AK
442static inline void __init kvm_cma_reserve(void)
443{}
444
371fefd6
PM
445static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
446{}
aa04b4cc 447
54695c30
BH
448static inline u32 kvmppc_get_xics_latch(void)
449{
450 return 0;
451}
452
453static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
454{}
455
456static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
457{
458 kvm_vcpu_kick(vcpu);
459}
441c19c8
ME
460
461static inline bool kvm_hv_mode_active(void) { return false; }
462
bc5ad3f3
BH
463#endif
464
465#ifdef CONFIG_KVM_XICS
466static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
467{
468 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
469}
8daaafc8
SW
470
471static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
472 struct kvm *kvm)
473{
644abbb2 474 if (kvm && kvm_irq_bypass)
8daaafc8
SW
475 return kvm->arch.pimap;
476 return NULL;
477}
478
79b6c247
SW
479extern void kvmppc_alloc_host_rm_ops(void);
480extern void kvmppc_free_host_rm_ops(void);
8daaafc8 481extern void kvmppc_free_pimap(struct kvm *kvm);
f7af5209 482extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall);
bc5ad3f3
BH
483extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
484extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
485extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
486extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
8b78645c
PM
487extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
488extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
5975a2e0
PM
489extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
490 struct kvm_vcpu *vcpu, u32 cpu);
0c2a6606 491extern void kvmppc_xics_ipi_action(void);
5d375199
PM
492extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq,
493 unsigned long host_irq);
494extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq,
495 unsigned long host_irq);
f725758b
PM
496extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr,
497 struct kvmppc_irq_map *irq_map,
498 struct kvmppc_passthru_irqmap *pimap,
499 bool *again);
520fe9c6 500extern int h_ipi_redirect;
bc5ad3f3 501#else
8daaafc8
SW
502static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap(
503 struct kvm *kvm)
504 { return NULL; }
79b6c247
SW
505static inline void kvmppc_alloc_host_rm_ops(void) {};
506static inline void kvmppc_free_host_rm_ops(void) {};
8daaafc8 507static inline void kvmppc_free_pimap(struct kvm *kvm) {};
f7af5209
SW
508static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall)
509 { return 0; }
bc5ad3f3
BH
510static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
511 { return 0; }
512static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
513static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
514 unsigned long server)
515 { return -EINVAL; }
516static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
517 struct kvm_irq_level *args)
518 { return -ENOTTY; }
519static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
520 { return 0; }
371fefd6
PM
521#endif
522
e34af784
PM
523/*
524 * Prototypes for functions called only from assembler code.
525 * Having prototypes reduces sparse errors.
526 */
527long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
528 unsigned long ioba, unsigned long tce);
529long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu,
530 unsigned long liobn, unsigned long ioba,
531 unsigned long tce_list, unsigned long npages);
532long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu,
533 unsigned long liobn, unsigned long ioba,
534 unsigned long tce_value, unsigned long npages);
535long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target,
536 unsigned int yield_count);
537long kvmppc_h_random(struct kvm_vcpu *vcpu);
538void kvmhv_commence_exit(int trap);
539long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu);
540void kvmppc_subcore_enter_guest(void);
541void kvmppc_subcore_exit_guest(void);
542long kvmppc_realmode_hmi_handler(void);
543long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
544 long pte_index, unsigned long pteh, unsigned long ptel);
545long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
546 unsigned long pte_index, unsigned long avpn);
547long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu);
548long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
549 unsigned long pte_index, unsigned long avpn,
550 unsigned long va);
551long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
552 unsigned long pte_index);
553long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags,
554 unsigned long pte_index);
555long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags,
556 unsigned long pte_index);
557long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
558 unsigned long slb_v, unsigned int status, bool data);
559unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu);
560int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server,
561 unsigned long mfrr);
562int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr);
563int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr);
564
79b6c247
SW
565/*
566 * Host-side operations we want to set up while running in real
567 * mode in the guest operating on the xics.
568 * Currently only VCPU wakeup is supported.
569 */
570
571union kvmppc_rm_state {
572 unsigned long raw;
573 struct {
574 u32 in_host;
575 u32 rm_action;
576 };
577};
578
579struct kvmppc_host_rm_core {
580 union kvmppc_rm_state rm_state;
581 void *rm_data;
582 char pad[112];
583};
584
585struct kvmppc_host_rm_ops {
586 struct kvmppc_host_rm_core *rm_core;
587 void (*vcpu_kick)(struct kvm_vcpu *vcpu);
588};
589
590extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv;
591
34f754b9
BB
592static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
593{
594#ifdef CONFIG_KVM_BOOKE_HV
595 return mfspr(SPRN_GEPR);
596#elif defined(CONFIG_BOOKE)
597 return vcpu->arch.epr;
598#else
599 return 0;
600#endif
601}
602
1c810636
AG
603static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
604{
605#ifdef CONFIG_KVM_BOOKE_HV
606 mtspr(SPRN_GEPR, epr);
607#elif defined(CONFIG_BOOKE)
608 vcpu->arch.epr = epr;
609#endif
610}
611
5df554ad
SW
612#ifdef CONFIG_KVM_MPIC
613
614void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
eb1e4f43
SW
615int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
616 u32 cpu);
617void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
5df554ad
SW
618
619#else
620
621static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
622{
623}
624
eb1e4f43
SW
625static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
626 struct kvm_vcpu *vcpu, u32 cpu)
627{
628 return -EINVAL;
629}
630
631static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
632 struct kvm_vcpu *vcpu)
633{
634}
635
5df554ad
SW
636#endif /* CONFIG_KVM_MPIC */
637
dc83b8bc
SW
638int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
639 struct kvm_config_tlb *cfg);
640int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
641 struct kvm_dirty_tlb *cfg);
642
043cc4d7
SW
643long kvmppc_alloc_lpid(void);
644void kvmppc_claim_lpid(long lpid);
645void kvmppc_free_lpid(long lpid);
646void kvmppc_init_lpid(unsigned long nr_lpids);
647
ba049e93 648static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn)
249ba1ee 649{
249ba1ee 650 struct page *page;
adccf65c
BB
651 /*
652 * We can only access pages that the kernel maps
653 * as memory. Bail out for unmapped ones.
654 */
655 if (!pfn_valid(pfn))
656 return;
657
658 /* Clear i-cache for new pages */
249ba1ee
AG
659 page = pfn_to_page(pfn);
660 if (!test_bit(PG_arch_1, &page->flags)) {
661 flush_dcache_icache_page(page);
662 set_bit(PG_arch_1, &page->flags);
663 }
664}
665
5deb8e7a
AG
666/*
667 * Shared struct helpers. The shared struct can be little or big endian,
668 * depending on the guest endianness. So expose helpers to all of them.
669 */
670static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
671{
672#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
673 /* Only Book3S_64 PR supports bi-endian for now */
674 return vcpu->arch.shared_big_endian;
675#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
676 /* Book3s_64 HV on little endian is always little endian */
677 return false;
678#else
679 return true;
680#endif
681}
682
5a484c7c 683#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
1dc0c5b8
BB
684static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
685{ \
5a484c7c 686 return mfspr(bookehv_spr); \
1dc0c5b8
BB
687} \
688
5a484c7c 689#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
690static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
691{ \
5a484c7c 692 mtspr(bookehv_spr, val); \
1dc0c5b8
BB
693} \
694
5deb8e7a 695#define SHARED_WRAPPER_GET(reg, size) \
1dc0c5b8 696static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
5deb8e7a
AG
697{ \
698 if (kvmppc_shared_big_endian(vcpu)) \
699 return be##size##_to_cpu(vcpu->arch.shared->reg); \
700 else \
701 return le##size##_to_cpu(vcpu->arch.shared->reg); \
702} \
703
704#define SHARED_WRAPPER_SET(reg, size) \
705static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
706{ \
707 if (kvmppc_shared_big_endian(vcpu)) \
708 vcpu->arch.shared->reg = cpu_to_be##size(val); \
709 else \
710 vcpu->arch.shared->reg = cpu_to_le##size(val); \
711} \
712
713#define SHARED_WRAPPER(reg, size) \
714 SHARED_WRAPPER_GET(reg, size) \
715 SHARED_WRAPPER_SET(reg, size) \
716
5a484c7c
BB
717#define SPRNG_WRAPPER(reg, bookehv_spr) \
718 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
719 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
720
721#ifdef CONFIG_KVM_BOOKE_HV
722
5a484c7c
BB
723#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
724 SPRNG_WRAPPER(reg, bookehv_spr) \
1dc0c5b8
BB
725
726#else
727
5a484c7c 728#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
1dc0c5b8
BB
729 SHARED_WRAPPER(reg, size) \
730
731#endif
732
5deb8e7a 733SHARED_WRAPPER(critical, 64)
1dc0c5b8
BB
734SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
735SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
736SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
737SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
738SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
739SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
740SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
dc168549 741SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
5deb8e7a
AG
742SHARED_WRAPPER_GET(msr, 64)
743static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
744{
745 if (kvmppc_shared_big_endian(vcpu))
746 vcpu->arch.shared->msr = cpu_to_be64(val);
747 else
748 vcpu->arch.shared->msr = cpu_to_le64(val);
749}
750SHARED_WRAPPER(dsisr, 32)
751SHARED_WRAPPER(int_pending, 32)
752SHARED_WRAPPER(sprg4, 64)
753SHARED_WRAPPER(sprg5, 64)
754SHARED_WRAPPER(sprg6, 64)
755SHARED_WRAPPER(sprg7, 64)
756
757static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
758{
759 if (kvmppc_shared_big_endian(vcpu))
760 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
761 else
762 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
763}
764
765static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
766{
767 if (kvmppc_shared_big_endian(vcpu))
768 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
769 else
770 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
771}
772
5f1c248f
SW
773/*
774 * Please call after prepare_to_enter. This function puts the lazy ee and irq
775 * disabled tracking state back to normal mode, without actually enabling
776 * interrupts.
777 */
778static inline void kvmppc_fix_ee_before_entry(void)
bd2be683 779{
5f1c248f
SW
780 trace_hardirqs_on();
781
bd2be683 782#ifdef CONFIG_PPC64
6c85f52b
SW
783 /*
784 * To avoid races, the caller must have gone directly from having
785 * interrupts fully-enabled to hard-disabled.
786 */
787 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
788
bd2be683
AG
789 /* Only need to enable IRQs by hard enabling them after this */
790 local_paca->irq_happened = 0;
791 local_paca->soft_enabled = 1;
792#endif
793}
249ba1ee 794
7cdd7a95
MC
795static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
796{
797 ulong ea;
8823a8fd 798 ulong msr_64bit = 0;
7cdd7a95
MC
799
800 ea = kvmppc_get_gpr(vcpu, rb);
801 if (ra)
802 ea += kvmppc_get_gpr(vcpu, ra);
803
8823a8fd
MC
804#if defined(CONFIG_PPC_BOOK3E_64)
805 msr_64bit = MSR_CM;
806#elif defined(CONFIG_PPC_BOOK3S_64)
807 msr_64bit = MSR_SF;
808#endif
809
5deb8e7a 810 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
8823a8fd
MC
811 ea = (uint32_t)ea;
812
7cdd7a95
MC
813 return ea;
814}
815
54695c30
BH
816extern void xics_wake_cpu(int cpu);
817
bbf45ba5 818#endif /* __POWERPC_KVM_PPC_H__ */