]> git.proxmox.com Git - mirror_ubuntu-bionic-kernel.git/blame - arch/powerpc/include/asm/kvm_ppc.h
KVM: PPC: Book3E: Enable e6500 core
[mirror_ubuntu-bionic-kernel.git] / arch / powerpc / include / asm / kvm_ppc.h
CommitLineData
bbf45ba5
HB
1/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
a136a8bd 31#include <linux/bug.h>
1c0006d8
AG
32#ifdef CONFIG_PPC_BOOK3S
33#include <asm/kvm_book3s.h>
c7f38f46
AG
34#else
35#include <asm/kvm_booke.h>
1c0006d8 36#endif
371fefd6
PM
37#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38#include <asm/paca.h>
39#endif
bbf45ba5 40
bbf45ba5
HB
41enum emulation_result {
42 EMULATE_DONE, /* no further processing */
43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
bbf45ba5 44 EMULATE_FAIL, /* can't emulate this instruction */
37f5bca6 45 EMULATE_AGAIN, /* something went wrong. go again */
c402a3f4 46 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
bbf45ba5
HB
47};
48
51f04726
MC
49enum instruction_type {
50 INST_GENERIC,
51 INST_SC, /* system call */
52};
53
7d15c06f
AG
54enum xlate_instdata {
55 XLATE_INST, /* translate instruction address */
56 XLATE_DATA /* translate data address */
57};
58
59enum xlate_readwrite {
60 XLATE_READ, /* check for read permissions */
61 XLATE_WRITE /* check for write permissions */
62};
63
df6909e5 64extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
bbf45ba5 65extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
29eb61bc 66extern void kvmppc_handler_highmem(void);
bbf45ba5
HB
67
68extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
69extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
70 unsigned int rt, unsigned int bytes,
73601775 71 int is_default_endian);
3587d534
AG
72extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
73 unsigned int rt, unsigned int bytes,
73601775 74 int is_default_endian);
bbf45ba5 75extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
73601775
CLG
76 u64 val, unsigned int bytes,
77 int is_default_endian);
bbf45ba5 78
51f04726
MC
79extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
80 enum instruction_type type, u32 *inst);
81
35c4a733
AG
82extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
83 bool data);
84extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
85 bool data);
bbf45ba5
HB
86extern int kvmppc_emulate_instruction(struct kvm_run *run,
87 struct kvm_vcpu *vcpu);
d69614a2 88extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
ce263d70 89extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
75f74f0d 90extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
5ce941ee 91extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
d02d4d15 92extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu);
af8f38b3 93extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
f61c94bb
BB
94extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
95extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
bbf45ba5 96
ecc0981f
HB
97/* Core-specific hooks */
98
89168618 99extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
7924bd41 100 unsigned int gtlb_idx);
bbf45ba5 101extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
49dd2c49 102extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
ecc0981f 103extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
9cc5e953 104extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
fa86b8dd
HB
105extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
106extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
be8d1cae
HB
107extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
108 gva_t eaddr);
b52a638c
HB
109extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
110extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
7d15c06f
AG
111extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
112 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
113 struct kvmppc_pte *pte);
9dd921cf 114
db93f574
HB
115extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
116 unsigned int id);
117extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
5cbb5106 118extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
9dd921cf 119extern int kvmppc_core_check_processor_compat(void);
5cbb5106
HB
120extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
121 struct kvm_translation *tr);
9dd921cf
HB
122
123extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
124extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
125
a8e4ef84 126extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
9dd921cf 127extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
25a8a02d 128extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
9dd921cf 129extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
7706664d 130extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
9dd921cf
HB
131extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
132 struct kvm_interrupt *irq);
4fe27d2a 133extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
8de12015
AG
134extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags,
135 ulong esr_flags);
136extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
137 ulong dear_flags,
138 ulong esr_flags);
139extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu);
140extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
141 ulong esr_flags);
862d31f7 142extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
7c973a2e 143extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
75f74f0d 144
db93f574
HB
145extern int kvmppc_booke_init(void);
146extern void kvmppc_booke_exit(void);
147
c30f8a6c 148extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
2a342ed5 149extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
a4cd8b23 150extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
c30f8a6c 151
32fad281
PM
152extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
153extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
de56a948
PM
154extern void kvmppc_free_hpt(struct kvm *kvm);
155extern long kvmppc_prepare_vrma(struct kvm *kvm,
156 struct kvm_userspace_memory_region *mem);
c77162de 157extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
da9d1d7f 158 struct kvm_memory_slot *memslot, unsigned long porder);
a8606e20 159extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
bc5ad3f3 160
54738c09
DG
161extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
162 struct kvm_create_spapr_tce *args);
f31e65e1
BH
163extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
164 unsigned long ioba, unsigned long tce);
69e9fbb2
LD
165extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
166 unsigned long ioba);
6c45b810
AK
167extern struct kvm_rma_info *kvm_alloc_rma(void);
168extern void kvm_release_rma(struct kvm_rma_info *ri);
fa61a4e3
AK
169extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
170extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
f9e0554d
PM
171extern int kvmppc_core_init_vm(struct kvm *kvm);
172extern void kvmppc_core_destroy_vm(struct kvm *kvm);
5587027c
AK
173extern void kvmppc_core_free_memslot(struct kvm *kvm,
174 struct kvm_memory_slot *free,
a66b48c3 175 struct kvm_memory_slot *dont);
5587027c
AK
176extern int kvmppc_core_create_memslot(struct kvm *kvm,
177 struct kvm_memory_slot *slot,
a66b48c3 178 unsigned long npages);
f9e0554d 179extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
a66b48c3 180 struct kvm_memory_slot *memslot,
f9e0554d
PM
181 struct kvm_userspace_memory_region *mem);
182extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
dfe49dbd 183 struct kvm_userspace_memory_region *mem,
8482644a 184 const struct kvm_memory_slot *old);
5b74716e
BH
185extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
186 struct kvm_ppc_smmu_info *info);
dfe49dbd
PM
187extern void kvmppc_core_flush_memslot(struct kvm *kvm,
188 struct kvm_memory_slot *memslot);
f9e0554d 189
d30f6e48
SW
190extern int kvmppc_bookehv_init(void);
191extern void kvmppc_bookehv_exit(void);
192
03d25c5b
AG
193extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
194
a2932923
PM
195extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
196
5df554ad
SW
197int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
198
8e591cb7
ME
199extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
200extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
201extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
bc5ad3f3
BH
202extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
203 u32 priority);
204extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
205 u32 *priority);
d19bd862
PM
206extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
207extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
8e591cb7 208
2f699a59
BB
209void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu);
210void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu);
211
3a167bea
AK
212union kvmppc_one_reg {
213 u32 wval;
214 u64 dval;
215 vector128 vval;
216 u64 vsxval[2];
217 struct {
218 u64 addr;
219 u64 length;
220 } vpaval;
221};
222
223struct kvmppc_ops {
cbbc58d4 224 struct module *owner;
3a167bea
AK
225 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
226 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
227 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
228 union kvmppc_one_reg *val);
229 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
230 union kvmppc_one_reg *val);
231 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
232 void (*vcpu_put)(struct kvm_vcpu *vcpu);
233 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
234 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
235 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
236 void (*vcpu_free)(struct kvm_vcpu *vcpu);
237 int (*check_requests)(struct kvm_vcpu *vcpu);
238 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
239 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
240 int (*prepare_memory_region)(struct kvm *kvm,
241 struct kvm_memory_slot *memslot,
242 struct kvm_userspace_memory_region *mem);
243 void (*commit_memory_region)(struct kvm *kvm,
244 struct kvm_userspace_memory_region *mem,
245 const struct kvm_memory_slot *old);
246 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
247 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
248 unsigned long end);
249 int (*age_hva)(struct kvm *kvm, unsigned long hva);
250 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
251 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
252 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
253 void (*free_memslot)(struct kvm_memory_slot *free,
254 struct kvm_memory_slot *dont);
255 int (*create_memslot)(struct kvm_memory_slot *slot,
256 unsigned long npages);
257 int (*init_vm)(struct kvm *kvm);
258 void (*destroy_vm)(struct kvm *kvm);
3a167bea
AK
259 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
260 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
261 unsigned int inst, int *advance);
262 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
263 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
264 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
265 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
266 unsigned long arg);
ae2113a4 267 int (*hcall_implemented)(unsigned long hcall);
3a167bea
AK
268};
269
cbbc58d4
AK
270extern struct kvmppc_ops *kvmppc_hv_ops;
271extern struct kvmppc_ops *kvmppc_pr_ops;
3a167bea 272
51f04726
MC
273static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
274 enum instruction_type type, u32 *inst)
275{
276 int ret = EMULATE_DONE;
277 u32 fetched_inst;
278
279 /* Load the instruction manually if it failed to do so in the
280 * exit path */
281 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
282 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
283
284 /* Write fetch_failed unswapped if the fetch failed */
285 if (ret == EMULATE_DONE)
286 fetched_inst = kvmppc_need_byteswap(vcpu) ?
287 swab32(vcpu->arch.last_inst) :
288 vcpu->arch.last_inst;
289 else
290 fetched_inst = vcpu->arch.last_inst;
291
292 *inst = fetched_inst;
293 return ret;
294}
295
a78b55d1
AK
296static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
297{
298 return kvm->arch.kvm_ops == kvmppc_hv_ops;
299}
300
0564ee8a
AG
301/*
302 * Cuts out inst bits with ordering according to spec.
303 * That means the leftmost bit is zero. All given bits are included.
304 */
305static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
306{
307 u32 r;
308 u32 mask;
309
310 BUG_ON(msb > lsb);
311
312 mask = (1 << (lsb - msb + 1)) - 1;
313 r = (inst >> (63 - lsb)) & mask;
314
315 return r;
316}
317
318/*
319 * Replaces inst bits with ordering according to spec.
320 */
321static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
322{
323 u32 r;
324 u32 mask;
325
326 BUG_ON(msb > lsb);
327
328 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
329 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
330
331 return r;
332}
333
a136a8bd
PM
334#define one_reg_size(id) \
335 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
336
337#define get_reg_val(id, reg) ({ \
338 union kvmppc_one_reg __u; \
339 switch (one_reg_size(id)) { \
340 case 4: __u.wval = (reg); break; \
341 case 8: __u.dval = (reg); break; \
342 default: BUG(); \
343 } \
344 __u; \
345})
346
347
348#define set_reg_val(id, val) ({ \
349 u64 __v; \
350 switch (one_reg_size(id)) { \
351 case 4: __v = (val).wval; break; \
352 case 8: __v = (val).dval; break; \
353 default: BUG(); \
354 } \
355 __v; \
356})
357
3a167bea 358int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
359int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
360
3a167bea 361int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
5ce941ee
SW
362int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
363
31f3438e
PM
364int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
365int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
a136a8bd
PM
366int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
367int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
31f3438e 368
5ce941ee
SW
369void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
370
5df554ad 371struct openpic;
5df554ad 372
9975f5e3 373#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
fa61a4e3 374extern void kvm_cma_reserve(void) __init;
371fefd6
PM
375static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
376{
377 paca[cpu].kvm_hstate.xics_phys = addr;
378}
aa04b4cc 379
54695c30
BH
380static inline u32 kvmppc_get_xics_latch(void)
381{
699cc876 382 u32 xirr;
54695c30 383
699cc876 384 xirr = get_paca()->kvm_hstate.saved_xirr;
54695c30 385 get_paca()->kvm_hstate.saved_xirr = 0;
54695c30
BH
386 return xirr;
387}
388
389static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
390{
391 paca[cpu].kvm_hstate.host_ipi = host_ipi;
392}
393
3a167bea
AK
394static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
395{
cbbc58d4 396 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
3a167bea 397}
aa04b4cc 398
441c19c8
ME
399extern void kvm_hv_vm_activated(void);
400extern void kvm_hv_vm_deactivated(void);
401extern bool kvm_hv_mode_active(void);
402
371fefd6 403#else
fa61a4e3
AK
404static inline void __init kvm_cma_reserve(void)
405{}
406
371fefd6
PM
407static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
408{}
aa04b4cc 409
54695c30
BH
410static inline u32 kvmppc_get_xics_latch(void)
411{
412 return 0;
413}
414
415static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
416{}
417
418static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
419{
420 kvm_vcpu_kick(vcpu);
421}
441c19c8
ME
422
423static inline bool kvm_hv_mode_active(void) { return false; }
424
bc5ad3f3
BH
425#endif
426
427#ifdef CONFIG_KVM_XICS
428static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
429{
430 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
431}
432extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
433extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
434extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
435extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
8b78645c
PM
436extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
437extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
5975a2e0
PM
438extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
439 struct kvm_vcpu *vcpu, u32 cpu);
bc5ad3f3
BH
440#else
441static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
442 { return 0; }
443static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
444static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
445 unsigned long server)
446 { return -EINVAL; }
447static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
448 struct kvm_irq_level *args)
449 { return -ENOTTY; }
450static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
451 { return 0; }
371fefd6
PM
452#endif
453
34f754b9
BB
454static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
455{
456#ifdef CONFIG_KVM_BOOKE_HV
457 return mfspr(SPRN_GEPR);
458#elif defined(CONFIG_BOOKE)
459 return vcpu->arch.epr;
460#else
461 return 0;
462#endif
463}
464
1c810636
AG
465static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
466{
467#ifdef CONFIG_KVM_BOOKE_HV
468 mtspr(SPRN_GEPR, epr);
469#elif defined(CONFIG_BOOKE)
470 vcpu->arch.epr = epr;
471#endif
472}
473
5df554ad
SW
474#ifdef CONFIG_KVM_MPIC
475
476void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
eb1e4f43
SW
477int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
478 u32 cpu);
479void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
5df554ad
SW
480
481#else
482
483static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
484{
485}
486
eb1e4f43
SW
487static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
488 struct kvm_vcpu *vcpu, u32 cpu)
489{
490 return -EINVAL;
491}
492
493static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
494 struct kvm_vcpu *vcpu)
495{
496}
497
5df554ad
SW
498#endif /* CONFIG_KVM_MPIC */
499
dc83b8bc
SW
500int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
501 struct kvm_config_tlb *cfg);
502int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
503 struct kvm_dirty_tlb *cfg);
504
043cc4d7
SW
505long kvmppc_alloc_lpid(void);
506void kvmppc_claim_lpid(long lpid);
507void kvmppc_free_lpid(long lpid);
508void kvmppc_init_lpid(unsigned long nr_lpids);
509
249ba1ee
AG
510static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
511{
249ba1ee 512 struct page *page;
adccf65c
BB
513 /*
514 * We can only access pages that the kernel maps
515 * as memory. Bail out for unmapped ones.
516 */
517 if (!pfn_valid(pfn))
518 return;
519
520 /* Clear i-cache for new pages */
249ba1ee
AG
521 page = pfn_to_page(pfn);
522 if (!test_bit(PG_arch_1, &page->flags)) {
523 flush_dcache_icache_page(page);
524 set_bit(PG_arch_1, &page->flags);
525 }
526}
527
5deb8e7a
AG
528/*
529 * Shared struct helpers. The shared struct can be little or big endian,
530 * depending on the guest endianness. So expose helpers to all of them.
531 */
532static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
533{
534#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
535 /* Only Book3S_64 PR supports bi-endian for now */
536 return vcpu->arch.shared_big_endian;
537#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
538 /* Book3s_64 HV on little endian is always little endian */
539 return false;
540#else
541 return true;
542#endif
543}
544
5a484c7c 545#define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
1dc0c5b8
BB
546static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
547{ \
5a484c7c 548 return mfspr(bookehv_spr); \
1dc0c5b8
BB
549} \
550
5a484c7c 551#define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
552static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
553{ \
5a484c7c 554 mtspr(bookehv_spr, val); \
1dc0c5b8
BB
555} \
556
5deb8e7a 557#define SHARED_WRAPPER_GET(reg, size) \
1dc0c5b8 558static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
5deb8e7a
AG
559{ \
560 if (kvmppc_shared_big_endian(vcpu)) \
561 return be##size##_to_cpu(vcpu->arch.shared->reg); \
562 else \
563 return le##size##_to_cpu(vcpu->arch.shared->reg); \
564} \
565
566#define SHARED_WRAPPER_SET(reg, size) \
567static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
568{ \
569 if (kvmppc_shared_big_endian(vcpu)) \
570 vcpu->arch.shared->reg = cpu_to_be##size(val); \
571 else \
572 vcpu->arch.shared->reg = cpu_to_le##size(val); \
573} \
574
575#define SHARED_WRAPPER(reg, size) \
576 SHARED_WRAPPER_GET(reg, size) \
577 SHARED_WRAPPER_SET(reg, size) \
578
5a484c7c
BB
579#define SPRNG_WRAPPER(reg, bookehv_spr) \
580 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
581 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
1dc0c5b8
BB
582
583#ifdef CONFIG_KVM_BOOKE_HV
584
5a484c7c
BB
585#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
586 SPRNG_WRAPPER(reg, bookehv_spr) \
1dc0c5b8
BB
587
588#else
589
5a484c7c 590#define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
1dc0c5b8
BB
591 SHARED_WRAPPER(reg, size) \
592
593#endif
594
5deb8e7a 595SHARED_WRAPPER(critical, 64)
1dc0c5b8
BB
596SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
597SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
598SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
599SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
600SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
601SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
602SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
dc168549 603SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
5deb8e7a
AG
604SHARED_WRAPPER_GET(msr, 64)
605static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
606{
607 if (kvmppc_shared_big_endian(vcpu))
608 vcpu->arch.shared->msr = cpu_to_be64(val);
609 else
610 vcpu->arch.shared->msr = cpu_to_le64(val);
611}
612SHARED_WRAPPER(dsisr, 32)
613SHARED_WRAPPER(int_pending, 32)
614SHARED_WRAPPER(sprg4, 64)
615SHARED_WRAPPER(sprg5, 64)
616SHARED_WRAPPER(sprg6, 64)
617SHARED_WRAPPER(sprg7, 64)
618
619static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
620{
621 if (kvmppc_shared_big_endian(vcpu))
622 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
623 else
624 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
625}
626
627static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
628{
629 if (kvmppc_shared_big_endian(vcpu))
630 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
631 else
632 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
633}
634
5f1c248f
SW
635/*
636 * Please call after prepare_to_enter. This function puts the lazy ee and irq
637 * disabled tracking state back to normal mode, without actually enabling
638 * interrupts.
639 */
640static inline void kvmppc_fix_ee_before_entry(void)
bd2be683 641{
5f1c248f
SW
642 trace_hardirqs_on();
643
bd2be683 644#ifdef CONFIG_PPC64
6c85f52b
SW
645 /*
646 * To avoid races, the caller must have gone directly from having
647 * interrupts fully-enabled to hard-disabled.
648 */
649 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
650
bd2be683
AG
651 /* Only need to enable IRQs by hard enabling them after this */
652 local_paca->irq_happened = 0;
653 local_paca->soft_enabled = 1;
654#endif
655}
249ba1ee 656
7cdd7a95
MC
657static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
658{
659 ulong ea;
8823a8fd 660 ulong msr_64bit = 0;
7cdd7a95
MC
661
662 ea = kvmppc_get_gpr(vcpu, rb);
663 if (ra)
664 ea += kvmppc_get_gpr(vcpu, ra);
665
8823a8fd
MC
666#if defined(CONFIG_PPC_BOOK3E_64)
667 msr_64bit = MSR_CM;
668#elif defined(CONFIG_PPC_BOOK3S_64)
669 msr_64bit = MSR_SF;
670#endif
671
5deb8e7a 672 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
8823a8fd
MC
673 ea = (uint32_t)ea;
674
7cdd7a95
MC
675 return ea;
676}
677
54695c30
BH
678extern void xics_wake_cpu(int cpu);
679
bbf45ba5 680#endif /* __POWERPC_KVM_PPC_H__ */