1 /* SPDX-License-Identifier: GPL-2.0-only */
4 * Copyright IBM Corp. 2008
6 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
9 #ifndef __POWERPC_KVM_PPC_H__
10 #define __POWERPC_KVM_PPC_H__
12 /* This file exists just so we can dereference kvm_vcpu, avoiding nested header
15 #include <linux/mutex.h>
16 #include <linux/timer.h>
17 #include <linux/types.h>
18 #include <linux/kvm_types.h>
19 #include <linux/kvm_host.h>
20 #include <linux/bug.h>
21 #ifdef CONFIG_PPC_BOOK3S
22 #include <asm/kvm_book3s.h>
24 #include <asm/kvm_booke.h>
26 #ifdef CONFIG_KVM_BOOK3S_64_HANDLER
29 #include <asm/cpu_has_feature.h>
33 * KVMPPC_INST_SW_BREAKPOINT is debug Instruction
34 * for supporting software breakpoint.
36 #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00
38 enum emulation_result
{
39 EMULATE_DONE
, /* no further processing */
40 EMULATE_DO_MMIO
, /* kvm_run filled with MMIO request */
41 EMULATE_FAIL
, /* can't emulate this instruction */
42 EMULATE_AGAIN
, /* something went wrong. go again */
43 EMULATE_EXIT_USER
, /* emulation requires exit to user-space */
46 enum instruction_fetch_type
{
48 INST_SC
, /* system call */
52 XLATE_INST
, /* translate instruction address */
53 XLATE_DATA
/* translate data address */
56 enum xlate_readwrite
{
57 XLATE_READ
, /* check for read permissions */
58 XLATE_WRITE
/* check for write permissions */
61 extern int kvmppc_vcpu_run(struct kvm_vcpu
*vcpu
);
62 extern int __kvmppc_vcpu_run(struct kvm_vcpu
*vcpu
);
63 extern void kvmppc_handler_highmem(void);
65 extern void kvmppc_dump_vcpu(struct kvm_vcpu
*vcpu
);
66 extern int kvmppc_handle_load(struct kvm_vcpu
*vcpu
,
67 unsigned int rt
, unsigned int bytes
,
68 int is_default_endian
);
69 extern int kvmppc_handle_loads(struct kvm_vcpu
*vcpu
,
70 unsigned int rt
, unsigned int bytes
,
71 int is_default_endian
);
72 extern int kvmppc_handle_vsx_load(struct kvm_vcpu
*vcpu
,
73 unsigned int rt
, unsigned int bytes
,
74 int is_default_endian
, int mmio_sign_extend
);
75 extern int kvmppc_handle_vmx_load(struct kvm_vcpu
*vcpu
,
76 unsigned int rt
, unsigned int bytes
, int is_default_endian
);
77 extern int kvmppc_handle_vmx_store(struct kvm_vcpu
*vcpu
,
78 unsigned int rs
, unsigned int bytes
, int is_default_endian
);
79 extern int kvmppc_handle_store(struct kvm_vcpu
*vcpu
,
80 u64 val
, unsigned int bytes
,
81 int is_default_endian
);
82 extern int kvmppc_handle_vsx_store(struct kvm_vcpu
*vcpu
,
83 int rs
, unsigned int bytes
,
84 int is_default_endian
);
86 extern int kvmppc_load_last_inst(struct kvm_vcpu
*vcpu
,
87 enum instruction_fetch_type type
, u32
*inst
);
89 extern int kvmppc_ld(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
91 extern int kvmppc_st(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, int size
, void *ptr
,
93 extern int kvmppc_emulate_instruction(struct kvm_vcpu
*vcpu
);
94 extern int kvmppc_emulate_loadstore(struct kvm_vcpu
*vcpu
);
95 extern int kvmppc_emulate_mmio(struct kvm_vcpu
*vcpu
);
96 extern void kvmppc_emulate_dec(struct kvm_vcpu
*vcpu
);
97 extern u32
kvmppc_get_dec(struct kvm_vcpu
*vcpu
, u64 tb
);
98 extern void kvmppc_decrementer_func(struct kvm_vcpu
*vcpu
);
99 extern int kvmppc_sanity_check(struct kvm_vcpu
*vcpu
);
100 extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu
*vcpu
);
101 extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu
*vcpu
);
103 /* Core-specific hooks */
105 extern void kvmppc_mmu_map(struct kvm_vcpu
*vcpu
, u64 gvaddr
, gpa_t gpaddr
,
106 unsigned int gtlb_idx
);
107 extern void kvmppc_mmu_priv_switch(struct kvm_vcpu
*vcpu
, int usermode
);
108 extern void kvmppc_mmu_switch_pid(struct kvm_vcpu
*vcpu
, u32 pid
);
109 extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
);
110 extern int kvmppc_mmu_itlb_index(struct kvm_vcpu
*vcpu
, gva_t eaddr
);
111 extern gpa_t
kvmppc_mmu_xlate(struct kvm_vcpu
*vcpu
, unsigned int gtlb_index
,
113 extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu
*vcpu
);
114 extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu
*vcpu
);
115 extern int kvmppc_xlate(struct kvm_vcpu
*vcpu
, ulong eaddr
,
116 enum xlate_instdata xlid
, enum xlate_readwrite xlrw
,
117 struct kvmppc_pte
*pte
);
119 extern int kvmppc_core_vcpu_create(struct kvm_vcpu
*vcpu
);
120 extern void kvmppc_core_vcpu_free(struct kvm_vcpu
*vcpu
);
121 extern int kvmppc_core_vcpu_setup(struct kvm_vcpu
*vcpu
);
122 extern int kvmppc_core_check_processor_compat(void);
123 extern int kvmppc_core_vcpu_translate(struct kvm_vcpu
*vcpu
,
124 struct kvm_translation
*tr
);
126 extern void kvmppc_core_vcpu_load(struct kvm_vcpu
*vcpu
, int cpu
);
127 extern void kvmppc_core_vcpu_put(struct kvm_vcpu
*vcpu
);
129 extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu
*vcpu
);
130 extern int kvmppc_core_pending_dec(struct kvm_vcpu
*vcpu
);
131 extern void kvmppc_core_queue_machine_check(struct kvm_vcpu
*vcpu
, ulong flags
);
132 extern void kvmppc_core_queue_syscall(struct kvm_vcpu
*vcpu
);
133 extern void kvmppc_core_queue_program(struct kvm_vcpu
*vcpu
, ulong flags
);
134 extern void kvmppc_core_queue_fpunavail(struct kvm_vcpu
*vcpu
);
135 extern void kvmppc_core_queue_vec_unavail(struct kvm_vcpu
*vcpu
);
136 extern void kvmppc_core_queue_vsx_unavail(struct kvm_vcpu
*vcpu
);
137 extern void kvmppc_core_queue_dec(struct kvm_vcpu
*vcpu
);
138 extern void kvmppc_core_dequeue_dec(struct kvm_vcpu
*vcpu
);
139 extern void kvmppc_core_queue_external(struct kvm_vcpu
*vcpu
,
140 struct kvm_interrupt
*irq
);
141 extern void kvmppc_core_dequeue_external(struct kvm_vcpu
*vcpu
);
142 extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu
*vcpu
, ulong dear_flags
,
144 extern void kvmppc_core_queue_data_storage(struct kvm_vcpu
*vcpu
,
147 extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu
*vcpu
);
148 extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu
*vcpu
,
150 extern void kvmppc_core_flush_tlb(struct kvm_vcpu
*vcpu
);
151 extern int kvmppc_core_check_requests(struct kvm_vcpu
*vcpu
);
153 extern int kvmppc_booke_init(void);
154 extern void kvmppc_booke_exit(void);
156 extern void kvmppc_core_destroy_mmu(struct kvm_vcpu
*vcpu
);
157 extern int kvmppc_kvm_pv(struct kvm_vcpu
*vcpu
);
158 extern void kvmppc_map_magic(struct kvm_vcpu
*vcpu
);
160 extern int kvmppc_allocate_hpt(struct kvm_hpt_info
*info
, u32 order
);
161 extern void kvmppc_set_hpt(struct kvm
*kvm
, struct kvm_hpt_info
*info
);
162 extern long kvmppc_alloc_reset_hpt(struct kvm
*kvm
, int order
);
163 extern void kvmppc_free_hpt(struct kvm_hpt_info
*info
);
164 extern void kvmppc_rmap_reset(struct kvm
*kvm
);
165 extern long kvmppc_prepare_vrma(struct kvm
*kvm
,
166 struct kvm_userspace_memory_region
*mem
);
167 extern void kvmppc_map_vrma(struct kvm_vcpu
*vcpu
,
168 struct kvm_memory_slot
*memslot
, unsigned long porder
);
169 extern int kvmppc_pseries_do_hcall(struct kvm_vcpu
*vcpu
);
170 extern long kvm_spapr_tce_attach_iommu_group(struct kvm
*kvm
, int tablefd
,
171 struct iommu_group
*grp
);
172 extern void kvm_spapr_tce_release_iommu_group(struct kvm
*kvm
,
173 struct iommu_group
*grp
);
174 extern int kvmppc_switch_mmu_to_hpt(struct kvm
*kvm
);
175 extern int kvmppc_switch_mmu_to_radix(struct kvm
*kvm
);
176 extern void kvmppc_setup_partition_table(struct kvm
*kvm
);
178 extern long kvm_vm_ioctl_create_spapr_tce(struct kvm
*kvm
,
179 struct kvm_create_spapr_tce_64
*args
);
180 extern struct kvmppc_spapr_tce_table
*kvmppc_find_table(
181 struct kvm
*kvm
, unsigned long liobn
);
182 #define kvmppc_ioba_validate(stt, ioba, npages) \
183 (iommu_tce_check_ioba((stt)->page_shift, (stt)->offset, \
184 (stt)->size, (ioba), (npages)) ? \
185 H_PARAMETER : H_SUCCESS)
186 extern long kvmppc_h_put_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
187 unsigned long ioba
, unsigned long tce
);
188 extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu
*vcpu
,
189 unsigned long liobn
, unsigned long ioba
,
190 unsigned long tce_list
, unsigned long npages
);
191 extern long kvmppc_h_stuff_tce(struct kvm_vcpu
*vcpu
,
192 unsigned long liobn
, unsigned long ioba
,
193 unsigned long tce_value
, unsigned long npages
);
194 extern long kvmppc_h_get_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
196 extern struct page
*kvm_alloc_hpt_cma(unsigned long nr_pages
);
197 extern void kvm_free_hpt_cma(struct page
*page
, unsigned long nr_pages
);
198 extern int kvmppc_core_init_vm(struct kvm
*kvm
);
199 extern void kvmppc_core_destroy_vm(struct kvm
*kvm
);
200 extern void kvmppc_core_free_memslot(struct kvm
*kvm
,
201 struct kvm_memory_slot
*slot
);
202 extern int kvmppc_core_prepare_memory_region(struct kvm
*kvm
,
203 struct kvm_memory_slot
*memslot
,
204 const struct kvm_userspace_memory_region
*mem
,
205 enum kvm_mr_change change
);
206 extern void kvmppc_core_commit_memory_region(struct kvm
*kvm
,
207 const struct kvm_userspace_memory_region
*mem
,
208 const struct kvm_memory_slot
*old
,
209 const struct kvm_memory_slot
*new,
210 enum kvm_mr_change change
);
211 extern int kvm_vm_ioctl_get_smmu_info(struct kvm
*kvm
,
212 struct kvm_ppc_smmu_info
*info
);
213 extern void kvmppc_core_flush_memslot(struct kvm
*kvm
,
214 struct kvm_memory_slot
*memslot
);
216 extern int kvmppc_bookehv_init(void);
217 extern void kvmppc_bookehv_exit(void);
219 extern int kvmppc_prepare_to_enter(struct kvm_vcpu
*vcpu
);
221 extern int kvm_vm_ioctl_get_htab_fd(struct kvm
*kvm
, struct kvm_get_htab_fd
*);
222 extern long kvm_vm_ioctl_resize_hpt_prepare(struct kvm
*kvm
,
223 struct kvm_ppc_resize_hpt
*rhpt
);
224 extern long kvm_vm_ioctl_resize_hpt_commit(struct kvm
*kvm
,
225 struct kvm_ppc_resize_hpt
*rhpt
);
227 int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu
*vcpu
, struct kvm_interrupt
*irq
);
229 extern int kvm_vm_ioctl_rtas_define_token(struct kvm
*kvm
, void __user
*argp
);
230 extern int kvmppc_rtas_hcall(struct kvm_vcpu
*vcpu
);
231 extern void kvmppc_rtas_tokens_free(struct kvm
*kvm
);
233 extern int kvmppc_xics_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
,
235 extern int kvmppc_xics_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
,
237 extern int kvmppc_xics_int_on(struct kvm
*kvm
, u32 irq
);
238 extern int kvmppc_xics_int_off(struct kvm
*kvm
, u32 irq
);
240 void kvmppc_core_dequeue_debug(struct kvm_vcpu
*vcpu
);
241 void kvmppc_core_queue_debug(struct kvm_vcpu
*vcpu
);
243 union kvmppc_one_reg
{
259 struct module
*owner
;
260 int (*get_sregs
)(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
261 int (*set_sregs
)(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
262 int (*get_one_reg
)(struct kvm_vcpu
*vcpu
, u64 id
,
263 union kvmppc_one_reg
*val
);
264 int (*set_one_reg
)(struct kvm_vcpu
*vcpu
, u64 id
,
265 union kvmppc_one_reg
*val
);
266 void (*vcpu_load
)(struct kvm_vcpu
*vcpu
, int cpu
);
267 void (*vcpu_put
)(struct kvm_vcpu
*vcpu
);
268 void (*inject_interrupt
)(struct kvm_vcpu
*vcpu
, int vec
, u64 srr1_flags
);
269 void (*set_msr
)(struct kvm_vcpu
*vcpu
, u64 msr
);
270 int (*vcpu_run
)(struct kvm_vcpu
*vcpu
);
271 int (*vcpu_create
)(struct kvm_vcpu
*vcpu
);
272 void (*vcpu_free
)(struct kvm_vcpu
*vcpu
);
273 int (*check_requests
)(struct kvm_vcpu
*vcpu
);
274 int (*get_dirty_log
)(struct kvm
*kvm
, struct kvm_dirty_log
*log
);
275 void (*flush_memslot
)(struct kvm
*kvm
, struct kvm_memory_slot
*memslot
);
276 int (*prepare_memory_region
)(struct kvm
*kvm
,
277 struct kvm_memory_slot
*memslot
,
278 const struct kvm_userspace_memory_region
*mem
,
279 enum kvm_mr_change change
);
280 void (*commit_memory_region
)(struct kvm
*kvm
,
281 const struct kvm_userspace_memory_region
*mem
,
282 const struct kvm_memory_slot
*old
,
283 const struct kvm_memory_slot
*new,
284 enum kvm_mr_change change
);
285 bool (*unmap_gfn_range
)(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
286 bool (*age_gfn
)(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
287 bool (*test_age_gfn
)(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
288 bool (*set_spte_gfn
)(struct kvm
*kvm
, struct kvm_gfn_range
*range
);
289 void (*free_memslot
)(struct kvm_memory_slot
*slot
);
290 int (*init_vm
)(struct kvm
*kvm
);
291 void (*destroy_vm
)(struct kvm
*kvm
);
292 int (*get_smmu_info
)(struct kvm
*kvm
, struct kvm_ppc_smmu_info
*info
);
293 int (*emulate_op
)(struct kvm_vcpu
*vcpu
,
294 unsigned int inst
, int *advance
);
295 int (*emulate_mtspr
)(struct kvm_vcpu
*vcpu
, int sprn
, ulong spr_val
);
296 int (*emulate_mfspr
)(struct kvm_vcpu
*vcpu
, int sprn
, ulong
*spr_val
);
297 void (*fast_vcpu_kick
)(struct kvm_vcpu
*vcpu
);
298 long (*arch_vm_ioctl
)(struct file
*filp
, unsigned int ioctl
,
300 int (*hcall_implemented
)(unsigned long hcall
);
301 int (*irq_bypass_add_producer
)(struct irq_bypass_consumer
*,
302 struct irq_bypass_producer
*);
303 void (*irq_bypass_del_producer
)(struct irq_bypass_consumer
*,
304 struct irq_bypass_producer
*);
305 int (*configure_mmu
)(struct kvm
*kvm
, struct kvm_ppc_mmuv3_cfg
*cfg
);
306 int (*get_rmmu_info
)(struct kvm
*kvm
, struct kvm_ppc_rmmu_info
*info
);
307 int (*set_smt_mode
)(struct kvm
*kvm
, unsigned long mode
,
308 unsigned long flags
);
309 void (*giveup_ext
)(struct kvm_vcpu
*vcpu
, ulong msr
);
310 int (*enable_nested
)(struct kvm
*kvm
);
311 int (*load_from_eaddr
)(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, void *ptr
,
313 int (*store_to_eaddr
)(struct kvm_vcpu
*vcpu
, ulong
*eaddr
, void *ptr
,
315 int (*enable_svm
)(struct kvm
*kvm
);
316 int (*svm_off
)(struct kvm
*kvm
);
317 int (*enable_dawr1
)(struct kvm
*kvm
);
318 bool (*hash_v3_possible
)(void);
321 extern struct kvmppc_ops
*kvmppc_hv_ops
;
322 extern struct kvmppc_ops
*kvmppc_pr_ops
;
324 static inline int kvmppc_get_last_inst(struct kvm_vcpu
*vcpu
,
325 enum instruction_fetch_type type
, u32
*inst
)
327 int ret
= EMULATE_DONE
;
330 /* Load the instruction manually if it failed to do so in the
332 if (vcpu
->arch
.last_inst
== KVM_INST_FETCH_FAILED
)
333 ret
= kvmppc_load_last_inst(vcpu
, type
, &vcpu
->arch
.last_inst
);
335 /* Write fetch_failed unswapped if the fetch failed */
336 if (ret
== EMULATE_DONE
)
337 fetched_inst
= kvmppc_need_byteswap(vcpu
) ?
338 swab32(vcpu
->arch
.last_inst
) :
339 vcpu
->arch
.last_inst
;
341 fetched_inst
= vcpu
->arch
.last_inst
;
343 *inst
= fetched_inst
;
347 static inline bool is_kvmppc_hv_enabled(struct kvm
*kvm
)
349 return kvm
->arch
.kvm_ops
== kvmppc_hv_ops
;
352 extern int kvmppc_hwrng_present(void);
355 * Cuts out inst bits with ordering according to spec.
356 * That means the leftmost bit is zero. All given bits are included.
358 static inline u32
kvmppc_get_field(u64 inst
, int msb
, int lsb
)
365 mask
= (1 << (lsb
- msb
+ 1)) - 1;
366 r
= (inst
>> (63 - lsb
)) & mask
;
372 * Replaces inst bits with ordering according to spec.
374 static inline u32
kvmppc_set_field(u64 inst
, int msb
, int lsb
, int value
)
381 mask
= ((1 << (lsb
- msb
+ 1)) - 1) << (63 - lsb
);
382 r
= (inst
& ~mask
) | ((value
<< (63 - lsb
)) & mask
);
387 #define one_reg_size(id) \
388 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
390 #define get_reg_val(id, reg) ({ \
391 union kvmppc_one_reg __u; \
392 switch (one_reg_size(id)) { \
393 case 4: __u.wval = (reg); break; \
394 case 8: __u.dval = (reg); break; \
401 #define set_reg_val(id, val) ({ \
403 switch (one_reg_size(id)) { \
404 case 4: __v = (val).wval; break; \
405 case 8: __v = (val).dval; break; \
411 int kvmppc_core_get_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
412 int kvmppc_core_set_sregs(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
414 int kvmppc_get_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
415 int kvmppc_set_sregs_ivor(struct kvm_vcpu
*vcpu
, struct kvm_sregs
*sregs
);
417 int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
);
418 int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu
*vcpu
, struct kvm_one_reg
*reg
);
419 int kvmppc_get_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*);
420 int kvmppc_set_one_reg(struct kvm_vcpu
*vcpu
, u64 id
, union kvmppc_one_reg
*);
422 void kvmppc_set_pid(struct kvm_vcpu
*vcpu
, u32 pid
);
426 #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
427 extern void kvm_cma_reserve(void) __init
;
428 static inline void kvmppc_set_xics_phys(int cpu
, unsigned long addr
)
430 paca_ptrs
[cpu
]->kvm_hstate
.xics_phys
= (void __iomem
*)addr
;
433 static inline void kvmppc_set_xive_tima(int cpu
,
434 unsigned long phys_addr
,
435 void __iomem
*virt_addr
)
437 paca_ptrs
[cpu
]->kvm_hstate
.xive_tima_phys
= (void __iomem
*)phys_addr
;
438 paca_ptrs
[cpu
]->kvm_hstate
.xive_tima_virt
= virt_addr
;
441 static inline u32
kvmppc_get_xics_latch(void)
445 xirr
= get_paca()->kvm_hstate
.saved_xirr
;
446 get_paca()->kvm_hstate
.saved_xirr
= 0;
451 * To avoid the need to unnecessarily exit fully to the host kernel, an IPI to
452 * a CPU thread that's running/napping inside of a guest is by default regarded
453 * as a request to wake the CPU (if needed) and continue execution within the
454 * guest, potentially to process new state like externally-generated
455 * interrupts or IPIs sent from within the guest itself (e.g. H_PROD/H_IPI).
457 * To force an exit to the host kernel, kvmppc_set_host_ipi() must be called
458 * prior to issuing the IPI to set the corresponding 'host_ipi' flag in the
459 * target CPU's PACA. To avoid unnecessary exits to the host, this flag should
460 * be immediately cleared via kvmppc_clear_host_ipi() by the IPI handler on
461 * the receiving side prior to processing the IPI work.
465 * We currently issue an smp_mb() at the beginning of kvmppc_set_host_ipi().
466 * This is to guard against sequences such as the following:
469 * X: smp_muxed_ipi_set_message():
471 * X: message[RESCHEDULE] = 1
472 * X: doorbell_global_ipi(42):
473 * X: kvmppc_set_host_ipi(42)
474 * X: ppc_msgsnd_sync()/smp_mb()
475 * X: ppc_msgsnd() -> 42
476 * 42: doorbell_exception(): // from CPU X
478 * 105: smp_muxed_ipi_set_message():
480 * // STORE DEFERRED DUE TO RE-ORDERING
481 * --105: message[CALL_FUNCTION] = 1
482 * | 105: doorbell_global_ipi(42):
483 * | 105: kvmppc_set_host_ipi(42)
484 * | 42: kvmppc_clear_host_ipi(42)
485 * | 42: smp_ipi_demux_relaxed()
486 * | 42: // returns to executing guest
487 * | // RE-ORDERED STORE COMPLETES
488 * ->105: message[CALL_FUNCTION] = 1
489 * 105: ppc_msgsnd_sync()/smp_mb()
490 * 105: ppc_msgsnd() -> 42
491 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
492 * 105: // hangs waiting on 42 to process messages/call_single_queue
494 * We also issue an smp_mb() at the end of kvmppc_clear_host_ipi(). This is
495 * to guard against sequences such as the following (as well as to create
496 * a read-side pairing with the barrier in kvmppc_set_host_ipi()):
499 * X: smp_muxed_ipi_set_message():
501 * X: message[RESCHEDULE] = 1
502 * X: doorbell_global_ipi(42):
503 * X: kvmppc_set_host_ipi(42)
504 * X: ppc_msgsnd_sync()/smp_mb()
505 * X: ppc_msgsnd() -> 42
506 * 42: doorbell_exception(): // from CPU X
508 * // STORE DEFERRED DUE TO RE-ORDERING
509 * -- 42: kvmppc_clear_host_ipi(42)
510 * | 42: smp_ipi_demux_relaxed()
511 * | 105: smp_muxed_ipi_set_message():
513 * | 105: message[CALL_FUNCTION] = 1
514 * | 105: doorbell_global_ipi(42):
515 * | 105: kvmppc_set_host_ipi(42)
516 * | // RE-ORDERED STORE COMPLETES
517 * -> 42: kvmppc_clear_host_ipi(42)
518 * 42: // returns to executing guest
519 * 105: ppc_msgsnd_sync()/smp_mb()
520 * 105: ppc_msgsnd() -> 42
521 * 42: local_paca->kvm_hstate.host_ipi == 0 // IPI ignored
522 * 105: // hangs waiting on 42 to process messages/call_single_queue
524 static inline void kvmppc_set_host_ipi(int cpu
)
527 * order stores of IPI messages vs. setting of host_ipi flag
529 * pairs with the barrier in kvmppc_clear_host_ipi()
532 paca_ptrs
[cpu
]->kvm_hstate
.host_ipi
= 1;
535 static inline void kvmppc_clear_host_ipi(int cpu
)
537 paca_ptrs
[cpu
]->kvm_hstate
.host_ipi
= 0;
539 * order clearing of host_ipi flag vs. processing of IPI messages
541 * pairs with the barrier in kvmppc_set_host_ipi()
546 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu
*vcpu
)
548 vcpu
->kvm
->arch
.kvm_ops
->fast_vcpu_kick(vcpu
);
551 extern void kvm_hv_vm_activated(void);
552 extern void kvm_hv_vm_deactivated(void);
553 extern bool kvm_hv_mode_active(void);
555 extern void kvmppc_check_need_tlb_flush(struct kvm
*kvm
, int pcpu
,
556 struct kvm_nested_guest
*nested
);
559 static inline void __init
kvm_cma_reserve(void)
562 static inline void kvmppc_set_xics_phys(int cpu
, unsigned long addr
)
565 static inline void kvmppc_set_xive_tima(int cpu
,
566 unsigned long phys_addr
,
567 void __iomem
*virt_addr
)
570 static inline u32
kvmppc_get_xics_latch(void)
575 static inline void kvmppc_set_host_ipi(int cpu
)
578 static inline void kvmppc_clear_host_ipi(int cpu
)
581 static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu
*vcpu
)
586 static inline bool kvm_hv_mode_active(void) { return false; }
590 #ifdef CONFIG_KVM_XICS
591 static inline int kvmppc_xics_enabled(struct kvm_vcpu
*vcpu
)
593 return vcpu
->arch
.irq_type
== KVMPPC_IRQ_XICS
;
596 static inline struct kvmppc_passthru_irqmap
*kvmppc_get_passthru_irqmap(
599 if (kvm
&& kvm_irq_bypass
)
600 return kvm
->arch
.pimap
;
604 extern void kvmppc_alloc_host_rm_ops(void);
605 extern void kvmppc_free_host_rm_ops(void);
606 extern void kvmppc_free_pimap(struct kvm
*kvm
);
607 extern int kvmppc_xics_rm_complete(struct kvm_vcpu
*vcpu
, u32 hcall
);
608 extern void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
);
609 extern int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 cmd
);
610 extern int kvmppc_xive_xics_hcall(struct kvm_vcpu
*vcpu
, u32 req
);
611 extern u64
kvmppc_xics_get_icp(struct kvm_vcpu
*vcpu
);
612 extern int kvmppc_xics_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
);
613 extern int kvmppc_xics_connect_vcpu(struct kvm_device
*dev
,
614 struct kvm_vcpu
*vcpu
, u32 cpu
);
615 extern void kvmppc_xics_ipi_action(void);
616 extern void kvmppc_xics_set_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
617 unsigned long host_irq
);
618 extern void kvmppc_xics_clr_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
619 unsigned long host_irq
);
620 extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu
*vcpu
, __be32 xirr
,
621 struct kvmppc_irq_map
*irq_map
,
622 struct kvmppc_passthru_irqmap
*pimap
,
625 extern int kvmppc_xics_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
,
626 int level
, bool line_status
);
628 extern int h_ipi_redirect
;
630 static inline struct kvmppc_passthru_irqmap
*kvmppc_get_passthru_irqmap(
633 static inline void kvmppc_alloc_host_rm_ops(void) {}
634 static inline void kvmppc_free_host_rm_ops(void) {}
635 static inline void kvmppc_free_pimap(struct kvm
*kvm
) {}
636 static inline int kvmppc_xics_rm_complete(struct kvm_vcpu
*vcpu
, u32 hcall
)
638 static inline int kvmppc_xics_enabled(struct kvm_vcpu
*vcpu
)
640 static inline void kvmppc_xics_free_icp(struct kvm_vcpu
*vcpu
) { }
641 static inline int kvmppc_xics_hcall(struct kvm_vcpu
*vcpu
, u32 cmd
)
643 static inline int kvmppc_xive_xics_hcall(struct kvm_vcpu
*vcpu
, u32 req
)
647 #ifdef CONFIG_KVM_XIVE
649 * Below the first "xive" is the "eXternal Interrupt Virtualization Engine"
650 * ie. P9 new interrupt controller, while the second "xive" is the legacy
651 * "eXternal Interrupt Vector Entry" which is the configuration of an
652 * interrupt on the "xics" interrupt controller on P8 and earlier. Those
653 * two function consume or produce a legacy "XIVE" state from the
654 * new "XIVE" interrupt controller.
656 extern int kvmppc_xive_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
,
658 extern int kvmppc_xive_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
,
660 extern int kvmppc_xive_int_on(struct kvm
*kvm
, u32 irq
);
661 extern int kvmppc_xive_int_off(struct kvm
*kvm
, u32 irq
);
663 extern int kvmppc_xive_connect_vcpu(struct kvm_device
*dev
,
664 struct kvm_vcpu
*vcpu
, u32 cpu
);
665 extern void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu
*vcpu
);
666 extern int kvmppc_xive_set_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
667 unsigned long host_irq
);
668 extern int kvmppc_xive_clr_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
669 unsigned long host_irq
);
670 extern u64
kvmppc_xive_get_icp(struct kvm_vcpu
*vcpu
);
671 extern int kvmppc_xive_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
);
673 extern int kvmppc_xive_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
,
674 int level
, bool line_status
);
675 extern void kvmppc_xive_push_vcpu(struct kvm_vcpu
*vcpu
);
676 extern void kvmppc_xive_pull_vcpu(struct kvm_vcpu
*vcpu
);
677 extern void kvmppc_xive_rearm_escalation(struct kvm_vcpu
*vcpu
);
679 static inline int kvmppc_xive_enabled(struct kvm_vcpu
*vcpu
)
681 return vcpu
->arch
.irq_type
== KVMPPC_IRQ_XIVE
;
684 extern int kvmppc_xive_native_connect_vcpu(struct kvm_device
*dev
,
685 struct kvm_vcpu
*vcpu
, u32 cpu
);
686 extern void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu
*vcpu
);
687 extern int kvmppc_xive_native_get_vp(struct kvm_vcpu
*vcpu
,
688 union kvmppc_one_reg
*val
);
689 extern int kvmppc_xive_native_set_vp(struct kvm_vcpu
*vcpu
,
690 union kvmppc_one_reg
*val
);
691 extern bool kvmppc_xive_native_supported(void);
694 static inline int kvmppc_xive_set_xive(struct kvm
*kvm
, u32 irq
, u32 server
,
695 u32 priority
) { return -1; }
696 static inline int kvmppc_xive_get_xive(struct kvm
*kvm
, u32 irq
, u32
*server
,
697 u32
*priority
) { return -1; }
698 static inline int kvmppc_xive_int_on(struct kvm
*kvm
, u32 irq
) { return -1; }
699 static inline int kvmppc_xive_int_off(struct kvm
*kvm
, u32 irq
) { return -1; }
701 static inline int kvmppc_xive_connect_vcpu(struct kvm_device
*dev
,
702 struct kvm_vcpu
*vcpu
, u32 cpu
) { return -EBUSY
; }
703 static inline void kvmppc_xive_cleanup_vcpu(struct kvm_vcpu
*vcpu
) { }
704 static inline int kvmppc_xive_set_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
705 struct irq_desc
*host_desc
) { return -ENODEV
; }
706 static inline int kvmppc_xive_clr_mapped(struct kvm
*kvm
, unsigned long guest_irq
,
707 struct irq_desc
*host_desc
) { return -ENODEV
; }
708 static inline u64
kvmppc_xive_get_icp(struct kvm_vcpu
*vcpu
) { return 0; }
709 static inline int kvmppc_xive_set_icp(struct kvm_vcpu
*vcpu
, u64 icpval
) { return -ENOENT
; }
711 static inline int kvmppc_xive_set_irq(struct kvm
*kvm
, int irq_source_id
, u32 irq
,
712 int level
, bool line_status
) { return -ENODEV
; }
713 static inline void kvmppc_xive_push_vcpu(struct kvm_vcpu
*vcpu
) { }
714 static inline void kvmppc_xive_pull_vcpu(struct kvm_vcpu
*vcpu
) { }
715 static inline void kvmppc_xive_rearm_escalation(struct kvm_vcpu
*vcpu
) { }
717 static inline int kvmppc_xive_enabled(struct kvm_vcpu
*vcpu
)
719 static inline int kvmppc_xive_native_connect_vcpu(struct kvm_device
*dev
,
720 struct kvm_vcpu
*vcpu
, u32 cpu
) { return -EBUSY
; }
721 static inline void kvmppc_xive_native_cleanup_vcpu(struct kvm_vcpu
*vcpu
) { }
722 static inline int kvmppc_xive_native_get_vp(struct kvm_vcpu
*vcpu
,
723 union kvmppc_one_reg
*val
)
725 static inline int kvmppc_xive_native_set_vp(struct kvm_vcpu
*vcpu
,
726 union kvmppc_one_reg
*val
)
729 #endif /* CONFIG_KVM_XIVE */
731 #if defined(CONFIG_PPC_POWERNV) && defined(CONFIG_KVM_BOOK3S_64_HANDLER)
732 static inline bool xics_on_xive(void)
734 return xive_enabled() && cpu_has_feature(CPU_FTR_HVMODE
);
737 static inline bool xics_on_xive(void)
744 * Prototypes for functions called only from assembler code.
745 * Having prototypes reduces sparse errors.
747 long kvmppc_rm_h_put_tce(struct kvm_vcpu
*vcpu
, unsigned long liobn
,
748 unsigned long ioba
, unsigned long tce
);
749 long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu
*vcpu
,
750 unsigned long liobn
, unsigned long ioba
,
751 unsigned long tce_list
, unsigned long npages
);
752 long kvmppc_rm_h_stuff_tce(struct kvm_vcpu
*vcpu
,
753 unsigned long liobn
, unsigned long ioba
,
754 unsigned long tce_value
, unsigned long npages
);
755 long int kvmppc_rm_h_confer(struct kvm_vcpu
*vcpu
, int target
,
756 unsigned int yield_count
);
757 long kvmppc_rm_h_random(struct kvm_vcpu
*vcpu
);
758 void kvmhv_commence_exit(int trap
);
759 void kvmppc_realmode_machine_check(struct kvm_vcpu
*vcpu
);
760 void kvmppc_subcore_enter_guest(void);
761 void kvmppc_subcore_exit_guest(void);
762 long kvmppc_realmode_hmi_handler(void);
763 long kvmppc_h_enter(struct kvm_vcpu
*vcpu
, unsigned long flags
,
764 long pte_index
, unsigned long pteh
, unsigned long ptel
);
765 long kvmppc_h_remove(struct kvm_vcpu
*vcpu
, unsigned long flags
,
766 unsigned long pte_index
, unsigned long avpn
);
767 long kvmppc_h_bulk_remove(struct kvm_vcpu
*vcpu
);
768 long kvmppc_h_protect(struct kvm_vcpu
*vcpu
, unsigned long flags
,
769 unsigned long pte_index
, unsigned long avpn
);
770 long kvmppc_h_read(struct kvm_vcpu
*vcpu
, unsigned long flags
,
771 unsigned long pte_index
);
772 long kvmppc_h_clear_ref(struct kvm_vcpu
*vcpu
, unsigned long flags
,
773 unsigned long pte_index
);
774 long kvmppc_h_clear_mod(struct kvm_vcpu
*vcpu
, unsigned long flags
,
775 unsigned long pte_index
);
776 long kvmppc_rm_h_page_init(struct kvm_vcpu
*vcpu
, unsigned long flags
,
777 unsigned long dest
, unsigned long src
);
778 long kvmppc_hpte_hv_fault(struct kvm_vcpu
*vcpu
, unsigned long addr
,
779 unsigned long slb_v
, unsigned int status
, bool data
);
780 unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu
*vcpu
);
781 unsigned long kvmppc_rm_h_xirr_x(struct kvm_vcpu
*vcpu
);
782 unsigned long kvmppc_rm_h_ipoll(struct kvm_vcpu
*vcpu
, unsigned long server
);
783 int kvmppc_rm_h_ipi(struct kvm_vcpu
*vcpu
, unsigned long server
,
785 int kvmppc_rm_h_cppr(struct kvm_vcpu
*vcpu
, unsigned long cppr
);
786 int kvmppc_rm_h_eoi(struct kvm_vcpu
*vcpu
, unsigned long xirr
);
787 void kvmppc_guest_entry_inject_int(struct kvm_vcpu
*vcpu
);
790 * Host-side operations we want to set up while running in real
791 * mode in the guest operating on the xics.
792 * Currently only VCPU wakeup is supported.
795 union kvmppc_rm_state
{
803 struct kvmppc_host_rm_core
{
804 union kvmppc_rm_state rm_state
;
809 struct kvmppc_host_rm_ops
{
810 struct kvmppc_host_rm_core
*rm_core
;
811 void (*vcpu_kick
)(struct kvm_vcpu
*vcpu
);
814 extern struct kvmppc_host_rm_ops
*kvmppc_host_rm_ops_hv
;
816 static inline unsigned long kvmppc_get_epr(struct kvm_vcpu
*vcpu
)
818 #ifdef CONFIG_KVM_BOOKE_HV
819 return mfspr(SPRN_GEPR
);
820 #elif defined(CONFIG_BOOKE)
821 return vcpu
->arch
.epr
;
827 static inline void kvmppc_set_epr(struct kvm_vcpu
*vcpu
, u32 epr
)
829 #ifdef CONFIG_KVM_BOOKE_HV
830 mtspr(SPRN_GEPR
, epr
);
831 #elif defined(CONFIG_BOOKE)
832 vcpu
->arch
.epr
= epr
;
836 #ifdef CONFIG_KVM_MPIC
838 void kvmppc_mpic_set_epr(struct kvm_vcpu
*vcpu
);
839 int kvmppc_mpic_connect_vcpu(struct kvm_device
*dev
, struct kvm_vcpu
*vcpu
,
841 void kvmppc_mpic_disconnect_vcpu(struct openpic
*opp
, struct kvm_vcpu
*vcpu
);
845 static inline void kvmppc_mpic_set_epr(struct kvm_vcpu
*vcpu
)
849 static inline int kvmppc_mpic_connect_vcpu(struct kvm_device
*dev
,
850 struct kvm_vcpu
*vcpu
, u32 cpu
)
855 static inline void kvmppc_mpic_disconnect_vcpu(struct openpic
*opp
,
856 struct kvm_vcpu
*vcpu
)
860 #endif /* CONFIG_KVM_MPIC */
862 int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu
*vcpu
,
863 struct kvm_config_tlb
*cfg
);
864 int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu
*vcpu
,
865 struct kvm_dirty_tlb
*cfg
);
867 long kvmppc_alloc_lpid(void);
868 void kvmppc_claim_lpid(long lpid
);
869 void kvmppc_free_lpid(long lpid
);
870 void kvmppc_init_lpid(unsigned long nr_lpids
);
872 static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn
)
876 * We can only access pages that the kernel maps
877 * as memory. Bail out for unmapped ones.
882 /* Clear i-cache for new pages */
883 page
= pfn_to_page(pfn
);
884 if (!test_bit(PG_dcache_clean
, &page
->flags
)) {
885 flush_dcache_icache_page(page
);
886 set_bit(PG_dcache_clean
, &page
->flags
);
891 * Shared struct helpers. The shared struct can be little or big endian,
892 * depending on the guest endianness. So expose helpers to all of them.
894 static inline bool kvmppc_shared_big_endian(struct kvm_vcpu
*vcpu
)
896 #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
897 /* Only Book3S_64 PR supports bi-endian for now */
898 return vcpu
->arch
.shared_big_endian
;
899 #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
900 /* Book3s_64 HV on little endian is always little endian */
907 #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \
908 static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
910 return mfspr(bookehv_spr); \
913 #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \
914 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
916 mtspr(bookehv_spr, val); \
919 #define SHARED_WRAPPER_GET(reg, size) \
920 static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
922 if (kvmppc_shared_big_endian(vcpu)) \
923 return be##size##_to_cpu(vcpu->arch.shared->reg); \
925 return le##size##_to_cpu(vcpu->arch.shared->reg); \
928 #define SHARED_WRAPPER_SET(reg, size) \
929 static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
931 if (kvmppc_shared_big_endian(vcpu)) \
932 vcpu->arch.shared->reg = cpu_to_be##size(val); \
934 vcpu->arch.shared->reg = cpu_to_le##size(val); \
937 #define SHARED_WRAPPER(reg, size) \
938 SHARED_WRAPPER_GET(reg, size) \
939 SHARED_WRAPPER_SET(reg, size) \
941 #define SPRNG_WRAPPER(reg, bookehv_spr) \
942 SPRNG_WRAPPER_GET(reg, bookehv_spr) \
943 SPRNG_WRAPPER_SET(reg, bookehv_spr) \
945 #ifdef CONFIG_KVM_BOOKE_HV
947 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
948 SPRNG_WRAPPER(reg, bookehv_spr) \
952 #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \
953 SHARED_WRAPPER(reg, size) \
957 SHARED_WRAPPER(critical
, 64)
958 SHARED_SPRNG_WRAPPER(sprg0
, 64, SPRN_GSPRG0
)
959 SHARED_SPRNG_WRAPPER(sprg1
, 64, SPRN_GSPRG1
)
960 SHARED_SPRNG_WRAPPER(sprg2
, 64, SPRN_GSPRG2
)
961 SHARED_SPRNG_WRAPPER(sprg3
, 64, SPRN_GSPRG3
)
962 SHARED_SPRNG_WRAPPER(srr0
, 64, SPRN_GSRR0
)
963 SHARED_SPRNG_WRAPPER(srr1
, 64, SPRN_GSRR1
)
964 SHARED_SPRNG_WRAPPER(dar
, 64, SPRN_GDEAR
)
965 SHARED_SPRNG_WRAPPER(esr
, 64, SPRN_GESR
)
966 SHARED_WRAPPER_GET(msr
, 64)
967 static inline void kvmppc_set_msr_fast(struct kvm_vcpu
*vcpu
, u64 val
)
969 if (kvmppc_shared_big_endian(vcpu
))
970 vcpu
->arch
.shared
->msr
= cpu_to_be64(val
);
972 vcpu
->arch
.shared
->msr
= cpu_to_le64(val
);
974 SHARED_WRAPPER(dsisr
, 32)
975 SHARED_WRAPPER(int_pending
, 32)
976 SHARED_WRAPPER(sprg4
, 64)
977 SHARED_WRAPPER(sprg5
, 64)
978 SHARED_WRAPPER(sprg6
, 64)
979 SHARED_WRAPPER(sprg7
, 64)
981 static inline u32
kvmppc_get_sr(struct kvm_vcpu
*vcpu
, int nr
)
983 if (kvmppc_shared_big_endian(vcpu
))
984 return be32_to_cpu(vcpu
->arch
.shared
->sr
[nr
]);
986 return le32_to_cpu(vcpu
->arch
.shared
->sr
[nr
]);
989 static inline void kvmppc_set_sr(struct kvm_vcpu
*vcpu
, int nr
, u32 val
)
991 if (kvmppc_shared_big_endian(vcpu
))
992 vcpu
->arch
.shared
->sr
[nr
] = cpu_to_be32(val
);
994 vcpu
->arch
.shared
->sr
[nr
] = cpu_to_le32(val
);
998 * Please call after prepare_to_enter. This function puts the lazy ee and irq
999 * disabled tracking state back to normal mode, without actually enabling
1002 static inline void kvmppc_fix_ee_before_entry(void)
1004 trace_hardirqs_on();
1008 * To avoid races, the caller must have gone directly from having
1009 * interrupts fully-enabled to hard-disabled.
1011 WARN_ON(local_paca
->irq_happened
!= PACA_IRQ_HARD_DIS
);
1013 /* Only need to enable IRQs by hard enabling them after this */
1014 local_paca
->irq_happened
= 0;
1015 irq_soft_mask_set(IRQS_ENABLED
);
1019 static inline ulong
kvmppc_get_ea_indexed(struct kvm_vcpu
*vcpu
, int ra
, int rb
)
1022 ulong msr_64bit
= 0;
1024 ea
= kvmppc_get_gpr(vcpu
, rb
);
1026 ea
+= kvmppc_get_gpr(vcpu
, ra
);
1028 #if defined(CONFIG_PPC_BOOK3E_64)
1030 #elif defined(CONFIG_PPC_BOOK3S_64)
1034 if (!(kvmppc_get_msr(vcpu
) & msr_64bit
))
1040 extern void xics_wake_cpu(int cpu
);
1042 #endif /* __POWERPC_KVM_PPC_H__ */