]>
Commit | Line | Data |
---|---|---|
bbf45ba5 HB |
1 | /* |
2 | * This program is free software; you can redistribute it and/or modify | |
3 | * it under the terms of the GNU General Public License, version 2, as | |
4 | * published by the Free Software Foundation. | |
5 | * | |
6 | * This program is distributed in the hope that it will be useful, | |
7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
9 | * GNU General Public License for more details. | |
10 | * | |
11 | * You should have received a copy of the GNU General Public License | |
12 | * along with this program; if not, write to the Free Software | |
13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. | |
14 | * | |
15 | * Copyright IBM Corp. 2008 | |
16 | * | |
17 | * Authors: Hollis Blanchard <hollisb@us.ibm.com> | |
18 | */ | |
19 | ||
20 | #ifndef __POWERPC_KVM_PPC_H__ | |
21 | #define __POWERPC_KVM_PPC_H__ | |
22 | ||
23 | /* This file exists just so we can dereference kvm_vcpu, avoiding nested header | |
24 | * dependencies. */ | |
25 | ||
26 | #include <linux/mutex.h> | |
27 | #include <linux/timer.h> | |
28 | #include <linux/types.h> | |
29 | #include <linux/kvm_types.h> | |
30 | #include <linux/kvm_host.h> | |
a136a8bd | 31 | #include <linux/bug.h> |
1c0006d8 AG |
32 | #ifdef CONFIG_PPC_BOOK3S |
33 | #include <asm/kvm_book3s.h> | |
c7f38f46 AG |
34 | #else |
35 | #include <asm/kvm_booke.h> | |
1c0006d8 | 36 | #endif |
371fefd6 PM |
37 | #ifdef CONFIG_KVM_BOOK3S_64_HANDLER |
38 | #include <asm/paca.h> | |
39 | #endif | |
bbf45ba5 | 40 | |
a59c1d9e MS |
41 | /* |
42 | * KVMPPC_INST_SW_BREAKPOINT is debug Instruction | |
43 | * for supporting software breakpoint. | |
44 | */ | |
45 | #define KVMPPC_INST_SW_BREAKPOINT 0x00dddd00 | |
46 | ||
bbf45ba5 HB |
47 | enum emulation_result { |
48 | EMULATE_DONE, /* no further processing */ | |
49 | EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */ | |
bbf45ba5 | 50 | EMULATE_FAIL, /* can't emulate this instruction */ |
37f5bca6 | 51 | EMULATE_AGAIN, /* something went wrong. go again */ |
c402a3f4 | 52 | EMULATE_EXIT_USER, /* emulation requires exit to user-space */ |
bbf45ba5 HB |
53 | }; |
54 | ||
51f04726 MC |
55 | enum instruction_type { |
56 | INST_GENERIC, | |
57 | INST_SC, /* system call */ | |
58 | }; | |
59 | ||
7d15c06f AG |
60 | enum xlate_instdata { |
61 | XLATE_INST, /* translate instruction address */ | |
62 | XLATE_DATA /* translate data address */ | |
63 | }; | |
64 | ||
65 | enum xlate_readwrite { | |
66 | XLATE_READ, /* check for read permissions */ | |
67 | XLATE_WRITE /* check for write permissions */ | |
68 | }; | |
69 | ||
df6909e5 | 70 | extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
bbf45ba5 | 71 | extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu); |
29eb61bc | 72 | extern void kvmppc_handler_highmem(void); |
bbf45ba5 HB |
73 | |
74 | extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu); | |
75 | extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
76 | unsigned int rt, unsigned int bytes, | |
73601775 | 77 | int is_default_endian); |
3587d534 AG |
78 | extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu, |
79 | unsigned int rt, unsigned int bytes, | |
73601775 | 80 | int is_default_endian); |
bbf45ba5 | 81 | extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu, |
73601775 CLG |
82 | u64 val, unsigned int bytes, |
83 | int is_default_endian); | |
bbf45ba5 | 84 | |
51f04726 MC |
85 | extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, |
86 | enum instruction_type type, u32 *inst); | |
87 | ||
35c4a733 AG |
88 | extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, |
89 | bool data); | |
90 | extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr, | |
91 | bool data); | |
bbf45ba5 HB |
92 | extern int kvmppc_emulate_instruction(struct kvm_run *run, |
93 | struct kvm_vcpu *vcpu); | |
d69614a2 | 94 | extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu); |
ce263d70 | 95 | extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu); |
75f74f0d | 96 | extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu); |
5ce941ee | 97 | extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb); |
d02d4d15 | 98 | extern void kvmppc_decrementer_func(struct kvm_vcpu *vcpu); |
af8f38b3 | 99 | extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu); |
f61c94bb BB |
100 | extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu); |
101 | extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu); | |
bbf45ba5 | 102 | |
ecc0981f HB |
103 | /* Core-specific hooks */ |
104 | ||
89168618 | 105 | extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr, |
7924bd41 | 106 | unsigned int gtlb_idx); |
bbf45ba5 | 107 | extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode); |
49dd2c49 | 108 | extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid); |
ecc0981f | 109 | extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu); |
9cc5e953 | 110 | extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu); |
fa86b8dd HB |
111 | extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); |
112 | extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr); | |
be8d1cae HB |
113 | extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index, |
114 | gva_t eaddr); | |
b52a638c HB |
115 | extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu); |
116 | extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu); | |
7d15c06f AG |
117 | extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, |
118 | enum xlate_instdata xlid, enum xlate_readwrite xlrw, | |
119 | struct kvmppc_pte *pte); | |
9dd921cf | 120 | |
db93f574 HB |
121 | extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, |
122 | unsigned int id); | |
123 | extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu); | |
5cbb5106 | 124 | extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu); |
9dd921cf | 125 | extern int kvmppc_core_check_processor_compat(void); |
5cbb5106 HB |
126 | extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu, |
127 | struct kvm_translation *tr); | |
9dd921cf HB |
128 | |
129 | extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu); | |
130 | extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu); | |
131 | ||
a8e4ef84 | 132 | extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu); |
9dd921cf | 133 | extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu); |
25a8a02d | 134 | extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags); |
9dd921cf | 135 | extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu); |
7706664d | 136 | extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu); |
9dd921cf HB |
137 | extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu, |
138 | struct kvm_interrupt *irq); | |
4fe27d2a | 139 | extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu); |
8de12015 AG |
140 | extern void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu, ulong dear_flags, |
141 | ulong esr_flags); | |
142 | extern void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu, | |
143 | ulong dear_flags, | |
144 | ulong esr_flags); | |
145 | extern void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu); | |
146 | extern void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, | |
147 | ulong esr_flags); | |
862d31f7 | 148 | extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu); |
7c973a2e | 149 | extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu); |
75f74f0d | 150 | |
db93f574 HB |
151 | extern int kvmppc_booke_init(void); |
152 | extern void kvmppc_booke_exit(void); | |
153 | ||
c30f8a6c | 154 | extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu); |
2a342ed5 | 155 | extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu); |
a4cd8b23 | 156 | extern void kvmppc_map_magic(struct kvm_vcpu *vcpu); |
c30f8a6c | 157 | |
32fad281 PM |
158 | extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp); |
159 | extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp); | |
de56a948 PM |
160 | extern void kvmppc_free_hpt(struct kvm *kvm); |
161 | extern long kvmppc_prepare_vrma(struct kvm *kvm, | |
162 | struct kvm_userspace_memory_region *mem); | |
c77162de | 163 | extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu, |
da9d1d7f | 164 | struct kvm_memory_slot *memslot, unsigned long porder); |
a8606e20 | 165 | extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu); |
20464b9b AK |
166 | extern long kvm_spapr_tce_attach_iommu_group(struct kvm *kvm, int tablefd, |
167 | struct iommu_group *grp); | |
168 | extern void kvm_spapr_tce_release_iommu_group(struct kvm *kvm, | |
169 | struct iommu_group *grp); | |
bc5ad3f3 | 170 | |
54738c09 | 171 | extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm, |
58ded420 | 172 | struct kvm_create_spapr_tce_64 *args); |
d3695aa4 | 173 | extern struct kvmppc_spapr_tce_table *kvmppc_find_table( |
69e0373c | 174 | struct kvm *kvm, unsigned long liobn); |
5ee7af18 AK |
175 | extern long kvmppc_ioba_validate(struct kvmppc_spapr_tce_table *stt, |
176 | unsigned long ioba, unsigned long npages); | |
177 | extern long kvmppc_tce_validate(struct kvmppc_spapr_tce_table *tt, | |
178 | unsigned long tce); | |
d3695aa4 AK |
179 | extern long kvmppc_gpa_to_ua(struct kvm *kvm, unsigned long gpa, |
180 | unsigned long *ua, unsigned long **prmap); | |
181 | extern void kvmppc_tce_put(struct kvmppc_spapr_tce_table *tt, | |
182 | unsigned long idx, unsigned long tce); | |
f31e65e1 BH |
183 | extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
184 | unsigned long ioba, unsigned long tce); | |
d3695aa4 AK |
185 | extern long kvmppc_h_put_tce_indirect(struct kvm_vcpu *vcpu, |
186 | unsigned long liobn, unsigned long ioba, | |
187 | unsigned long tce_list, unsigned long npages); | |
188 | extern long kvmppc_h_stuff_tce(struct kvm_vcpu *vcpu, | |
189 | unsigned long liobn, unsigned long ioba, | |
190 | unsigned long tce_value, unsigned long npages); | |
69e9fbb2 LD |
191 | extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn, |
192 | unsigned long ioba); | |
fa61a4e3 AK |
193 | extern struct page *kvm_alloc_hpt(unsigned long nr_pages); |
194 | extern void kvm_release_hpt(struct page *page, unsigned long nr_pages); | |
f9e0554d PM |
195 | extern int kvmppc_core_init_vm(struct kvm *kvm); |
196 | extern void kvmppc_core_destroy_vm(struct kvm *kvm); | |
5587027c AK |
197 | extern void kvmppc_core_free_memslot(struct kvm *kvm, |
198 | struct kvm_memory_slot *free, | |
a66b48c3 | 199 | struct kvm_memory_slot *dont); |
5587027c AK |
200 | extern int kvmppc_core_create_memslot(struct kvm *kvm, |
201 | struct kvm_memory_slot *slot, | |
a66b48c3 | 202 | unsigned long npages); |
f9e0554d | 203 | extern int kvmppc_core_prepare_memory_region(struct kvm *kvm, |
a66b48c3 | 204 | struct kvm_memory_slot *memslot, |
09170a49 | 205 | const struct kvm_userspace_memory_region *mem); |
f9e0554d | 206 | extern void kvmppc_core_commit_memory_region(struct kvm *kvm, |
09170a49 | 207 | const struct kvm_userspace_memory_region *mem, |
f36f3f28 PB |
208 | const struct kvm_memory_slot *old, |
209 | const struct kvm_memory_slot *new); | |
5b74716e BH |
210 | extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, |
211 | struct kvm_ppc_smmu_info *info); | |
dfe49dbd PM |
212 | extern void kvmppc_core_flush_memslot(struct kvm *kvm, |
213 | struct kvm_memory_slot *memslot); | |
f9e0554d | 214 | |
d30f6e48 SW |
215 | extern int kvmppc_bookehv_init(void); |
216 | extern void kvmppc_bookehv_exit(void); | |
217 | ||
03d25c5b AG |
218 | extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu); |
219 | ||
a2932923 PM |
220 | extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *); |
221 | ||
5df554ad SW |
222 | int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq); |
223 | ||
8e591cb7 ME |
224 | extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp); |
225 | extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu); | |
226 | extern void kvmppc_rtas_tokens_free(struct kvm *kvm); | |
bc5ad3f3 BH |
227 | extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server, |
228 | u32 priority); | |
229 | extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server, | |
230 | u32 *priority); | |
d19bd862 PM |
231 | extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq); |
232 | extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq); | |
8e591cb7 | 233 | |
2f699a59 BB |
234 | void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu); |
235 | void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu); | |
236 | ||
3a167bea AK |
237 | union kvmppc_one_reg { |
238 | u32 wval; | |
239 | u64 dval; | |
240 | vector128 vval; | |
241 | u64 vsxval[2]; | |
242 | struct { | |
243 | u64 addr; | |
244 | u64 length; | |
245 | } vpaval; | |
246 | }; | |
247 | ||
248 | struct kvmppc_ops { | |
cbbc58d4 | 249 | struct module *owner; |
3a167bea AK |
250 | int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
251 | int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); | |
252 | int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id, | |
253 | union kvmppc_one_reg *val); | |
254 | int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id, | |
255 | union kvmppc_one_reg *val); | |
256 | void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu); | |
257 | void (*vcpu_put)(struct kvm_vcpu *vcpu); | |
258 | void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr); | |
259 | int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu); | |
260 | struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id); | |
261 | void (*vcpu_free)(struct kvm_vcpu *vcpu); | |
262 | int (*check_requests)(struct kvm_vcpu *vcpu); | |
263 | int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log); | |
264 | void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot); | |
265 | int (*prepare_memory_region)(struct kvm *kvm, | |
266 | struct kvm_memory_slot *memslot, | |
09170a49 | 267 | const struct kvm_userspace_memory_region *mem); |
3a167bea | 268 | void (*commit_memory_region)(struct kvm *kvm, |
09170a49 | 269 | const struct kvm_userspace_memory_region *mem, |
f36f3f28 PB |
270 | const struct kvm_memory_slot *old, |
271 | const struct kvm_memory_slot *new); | |
3a167bea AK |
272 | int (*unmap_hva)(struct kvm *kvm, unsigned long hva); |
273 | int (*unmap_hva_range)(struct kvm *kvm, unsigned long start, | |
274 | unsigned long end); | |
57128468 | 275 | int (*age_hva)(struct kvm *kvm, unsigned long start, unsigned long end); |
3a167bea AK |
276 | int (*test_age_hva)(struct kvm *kvm, unsigned long hva); |
277 | void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte); | |
278 | void (*mmu_destroy)(struct kvm_vcpu *vcpu); | |
279 | void (*free_memslot)(struct kvm_memory_slot *free, | |
280 | struct kvm_memory_slot *dont); | |
281 | int (*create_memslot)(struct kvm_memory_slot *slot, | |
282 | unsigned long npages); | |
283 | int (*init_vm)(struct kvm *kvm); | |
284 | void (*destroy_vm)(struct kvm *kvm); | |
3a167bea AK |
285 | int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info); |
286 | int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu, | |
287 | unsigned int inst, int *advance); | |
288 | int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val); | |
289 | int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val); | |
290 | void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu); | |
291 | long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl, | |
292 | unsigned long arg); | |
ae2113a4 | 293 | int (*hcall_implemented)(unsigned long hcall); |
9576730d SW |
294 | int (*irq_bypass_add_producer)(struct irq_bypass_consumer *, |
295 | struct irq_bypass_producer *); | |
296 | void (*irq_bypass_del_producer)(struct irq_bypass_consumer *, | |
297 | struct irq_bypass_producer *); | |
d046dddb PM |
298 | int (*configure_mmu)(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg); |
299 | int (*get_rmmu_info)(struct kvm *kvm, struct kvm_ppc_rmmu_info *info); | |
3a167bea AK |
300 | }; |
301 | ||
cbbc58d4 AK |
302 | extern struct kvmppc_ops *kvmppc_hv_ops; |
303 | extern struct kvmppc_ops *kvmppc_pr_ops; | |
3a167bea | 304 | |
51f04726 MC |
305 | static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu, |
306 | enum instruction_type type, u32 *inst) | |
307 | { | |
308 | int ret = EMULATE_DONE; | |
309 | u32 fetched_inst; | |
310 | ||
311 | /* Load the instruction manually if it failed to do so in the | |
312 | * exit path */ | |
313 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) | |
314 | ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst); | |
315 | ||
316 | /* Write fetch_failed unswapped if the fetch failed */ | |
317 | if (ret == EMULATE_DONE) | |
318 | fetched_inst = kvmppc_need_byteswap(vcpu) ? | |
319 | swab32(vcpu->arch.last_inst) : | |
320 | vcpu->arch.last_inst; | |
321 | else | |
322 | fetched_inst = vcpu->arch.last_inst; | |
323 | ||
324 | *inst = fetched_inst; | |
325 | return ret; | |
326 | } | |
327 | ||
a78b55d1 AK |
328 | static inline bool is_kvmppc_hv_enabled(struct kvm *kvm) |
329 | { | |
330 | return kvm->arch.kvm_ops == kvmppc_hv_ops; | |
331 | } | |
332 | ||
e928e9cb ME |
333 | extern int kvmppc_hwrng_present(void); |
334 | ||
0564ee8a AG |
335 | /* |
336 | * Cuts out inst bits with ordering according to spec. | |
337 | * That means the leftmost bit is zero. All given bits are included. | |
338 | */ | |
339 | static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb) | |
340 | { | |
341 | u32 r; | |
342 | u32 mask; | |
343 | ||
344 | BUG_ON(msb > lsb); | |
345 | ||
346 | mask = (1 << (lsb - msb + 1)) - 1; | |
347 | r = (inst >> (63 - lsb)) & mask; | |
348 | ||
349 | return r; | |
350 | } | |
351 | ||
352 | /* | |
353 | * Replaces inst bits with ordering according to spec. | |
354 | */ | |
355 | static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value) | |
356 | { | |
357 | u32 r; | |
358 | u32 mask; | |
359 | ||
360 | BUG_ON(msb > lsb); | |
361 | ||
362 | mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb); | |
363 | r = (inst & ~mask) | ((value << (63 - lsb)) & mask); | |
364 | ||
365 | return r; | |
366 | } | |
367 | ||
a136a8bd PM |
368 | #define one_reg_size(id) \ |
369 | (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT)) | |
370 | ||
371 | #define get_reg_val(id, reg) ({ \ | |
372 | union kvmppc_one_reg __u; \ | |
373 | switch (one_reg_size(id)) { \ | |
374 | case 4: __u.wval = (reg); break; \ | |
375 | case 8: __u.dval = (reg); break; \ | |
376 | default: BUG(); \ | |
377 | } \ | |
378 | __u; \ | |
379 | }) | |
380 | ||
381 | ||
382 | #define set_reg_val(id, val) ({ \ | |
383 | u64 __v; \ | |
384 | switch (one_reg_size(id)) { \ | |
385 | case 4: __v = (val).wval; break; \ | |
386 | case 8: __v = (val).dval; break; \ | |
387 | default: BUG(); \ | |
388 | } \ | |
389 | __v; \ | |
390 | }) | |
391 | ||
3a167bea | 392 | int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
5ce941ee SW |
393 | int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
394 | ||
3a167bea | 395 | int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
5ce941ee SW |
396 | int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs); |
397 | ||
31f3438e PM |
398 | int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); |
399 | int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg); | |
a136a8bd PM |
400 | int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); |
401 | int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *); | |
31f3438e | 402 | |
5ce941ee SW |
403 | void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid); |
404 | ||
5df554ad | 405 | struct openpic; |
5df554ad | 406 | |
9975f5e3 | 407 | #ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE |
fa61a4e3 | 408 | extern void kvm_cma_reserve(void) __init; |
371fefd6 PM |
409 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) |
410 | { | |
411 | paca[cpu].kvm_hstate.xics_phys = addr; | |
412 | } | |
aa04b4cc | 413 | |
54695c30 BH |
414 | static inline u32 kvmppc_get_xics_latch(void) |
415 | { | |
699cc876 | 416 | u32 xirr; |
54695c30 | 417 | |
699cc876 | 418 | xirr = get_paca()->kvm_hstate.saved_xirr; |
54695c30 | 419 | get_paca()->kvm_hstate.saved_xirr = 0; |
54695c30 BH |
420 | return xirr; |
421 | } | |
422 | ||
423 | static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) | |
424 | { | |
425 | paca[cpu].kvm_hstate.host_ipi = host_ipi; | |
426 | } | |
427 | ||
3a167bea AK |
428 | static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) |
429 | { | |
cbbc58d4 | 430 | vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu); |
3a167bea | 431 | } |
aa04b4cc | 432 | |
441c19c8 ME |
433 | extern void kvm_hv_vm_activated(void); |
434 | extern void kvm_hv_vm_deactivated(void); | |
435 | extern bool kvm_hv_mode_active(void); | |
436 | ||
371fefd6 | 437 | #else |
fa61a4e3 AK |
438 | static inline void __init kvm_cma_reserve(void) |
439 | {} | |
440 | ||
371fefd6 PM |
441 | static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr) |
442 | {} | |
aa04b4cc | 443 | |
54695c30 BH |
444 | static inline u32 kvmppc_get_xics_latch(void) |
445 | { | |
446 | return 0; | |
447 | } | |
448 | ||
449 | static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi) | |
450 | {} | |
451 | ||
452 | static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu) | |
453 | { | |
454 | kvm_vcpu_kick(vcpu); | |
455 | } | |
441c19c8 ME |
456 | |
457 | static inline bool kvm_hv_mode_active(void) { return false; } | |
458 | ||
bc5ad3f3 BH |
459 | #endif |
460 | ||
461 | #ifdef CONFIG_KVM_XICS | |
462 | static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) | |
463 | { | |
464 | return vcpu->arch.irq_type == KVMPPC_IRQ_XICS; | |
465 | } | |
8daaafc8 SW |
466 | |
467 | static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( | |
468 | struct kvm *kvm) | |
469 | { | |
644abbb2 | 470 | if (kvm && kvm_irq_bypass) |
8daaafc8 SW |
471 | return kvm->arch.pimap; |
472 | return NULL; | |
473 | } | |
474 | ||
79b6c247 SW |
475 | extern void kvmppc_alloc_host_rm_ops(void); |
476 | extern void kvmppc_free_host_rm_ops(void); | |
8daaafc8 | 477 | extern void kvmppc_free_pimap(struct kvm *kvm); |
f7af5209 | 478 | extern int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall); |
bc5ad3f3 BH |
479 | extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu); |
480 | extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server); | |
481 | extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args); | |
482 | extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd); | |
8b78645c PM |
483 | extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu); |
484 | extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval); | |
5975a2e0 PM |
485 | extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev, |
486 | struct kvm_vcpu *vcpu, u32 cpu); | |
0c2a6606 | 487 | extern void kvmppc_xics_ipi_action(void); |
5d375199 PM |
488 | extern void kvmppc_xics_set_mapped(struct kvm *kvm, unsigned long guest_irq, |
489 | unsigned long host_irq); | |
490 | extern void kvmppc_xics_clr_mapped(struct kvm *kvm, unsigned long guest_irq, | |
491 | unsigned long host_irq); | |
f725758b PM |
492 | extern long kvmppc_deliver_irq_passthru(struct kvm_vcpu *vcpu, __be32 xirr, |
493 | struct kvmppc_irq_map *irq_map, | |
494 | struct kvmppc_passthru_irqmap *pimap, | |
495 | bool *again); | |
520fe9c6 | 496 | extern int h_ipi_redirect; |
bc5ad3f3 | 497 | #else |
8daaafc8 SW |
498 | static inline struct kvmppc_passthru_irqmap *kvmppc_get_passthru_irqmap( |
499 | struct kvm *kvm) | |
500 | { return NULL; } | |
79b6c247 SW |
501 | static inline void kvmppc_alloc_host_rm_ops(void) {}; |
502 | static inline void kvmppc_free_host_rm_ops(void) {}; | |
8daaafc8 | 503 | static inline void kvmppc_free_pimap(struct kvm *kvm) {}; |
f7af5209 SW |
504 | static inline int kvmppc_xics_rm_complete(struct kvm_vcpu *vcpu, u32 hcall) |
505 | { return 0; } | |
bc5ad3f3 BH |
506 | static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu) |
507 | { return 0; } | |
508 | static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { } | |
509 | static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, | |
510 | unsigned long server) | |
511 | { return -EINVAL; } | |
512 | static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm, | |
513 | struct kvm_irq_level *args) | |
514 | { return -ENOTTY; } | |
515 | static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) | |
516 | { return 0; } | |
371fefd6 PM |
517 | #endif |
518 | ||
e34af784 PM |
519 | /* |
520 | * Prototypes for functions called only from assembler code. | |
521 | * Having prototypes reduces sparse errors. | |
522 | */ | |
523 | long kvmppc_rm_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn, | |
524 | unsigned long ioba, unsigned long tce); | |
525 | long kvmppc_rm_h_put_tce_indirect(struct kvm_vcpu *vcpu, | |
526 | unsigned long liobn, unsigned long ioba, | |
527 | unsigned long tce_list, unsigned long npages); | |
528 | long kvmppc_rm_h_stuff_tce(struct kvm_vcpu *vcpu, | |
529 | unsigned long liobn, unsigned long ioba, | |
530 | unsigned long tce_value, unsigned long npages); | |
531 | long int kvmppc_rm_h_confer(struct kvm_vcpu *vcpu, int target, | |
532 | unsigned int yield_count); | |
533 | long kvmppc_h_random(struct kvm_vcpu *vcpu); | |
534 | void kvmhv_commence_exit(int trap); | |
535 | long kvmppc_realmode_machine_check(struct kvm_vcpu *vcpu); | |
536 | void kvmppc_subcore_enter_guest(void); | |
537 | void kvmppc_subcore_exit_guest(void); | |
538 | long kvmppc_realmode_hmi_handler(void); | |
539 | long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, | |
540 | long pte_index, unsigned long pteh, unsigned long ptel); | |
541 | long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags, | |
542 | unsigned long pte_index, unsigned long avpn); | |
543 | long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu); | |
544 | long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags, | |
545 | unsigned long pte_index, unsigned long avpn, | |
546 | unsigned long va); | |
547 | long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags, | |
548 | unsigned long pte_index); | |
549 | long kvmppc_h_clear_ref(struct kvm_vcpu *vcpu, unsigned long flags, | |
550 | unsigned long pte_index); | |
551 | long kvmppc_h_clear_mod(struct kvm_vcpu *vcpu, unsigned long flags, | |
552 | unsigned long pte_index); | |
553 | long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr, | |
554 | unsigned long slb_v, unsigned int status, bool data); | |
555 | unsigned long kvmppc_rm_h_xirr(struct kvm_vcpu *vcpu); | |
556 | int kvmppc_rm_h_ipi(struct kvm_vcpu *vcpu, unsigned long server, | |
557 | unsigned long mfrr); | |
558 | int kvmppc_rm_h_cppr(struct kvm_vcpu *vcpu, unsigned long cppr); | |
559 | int kvmppc_rm_h_eoi(struct kvm_vcpu *vcpu, unsigned long xirr); | |
560 | ||
79b6c247 SW |
561 | /* |
562 | * Host-side operations we want to set up while running in real | |
563 | * mode in the guest operating on the xics. | |
564 | * Currently only VCPU wakeup is supported. | |
565 | */ | |
566 | ||
567 | union kvmppc_rm_state { | |
568 | unsigned long raw; | |
569 | struct { | |
570 | u32 in_host; | |
571 | u32 rm_action; | |
572 | }; | |
573 | }; | |
574 | ||
575 | struct kvmppc_host_rm_core { | |
576 | union kvmppc_rm_state rm_state; | |
577 | void *rm_data; | |
578 | char pad[112]; | |
579 | }; | |
580 | ||
581 | struct kvmppc_host_rm_ops { | |
582 | struct kvmppc_host_rm_core *rm_core; | |
583 | void (*vcpu_kick)(struct kvm_vcpu *vcpu); | |
584 | }; | |
585 | ||
586 | extern struct kvmppc_host_rm_ops *kvmppc_host_rm_ops_hv; | |
587 | ||
34f754b9 BB |
588 | static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu) |
589 | { | |
590 | #ifdef CONFIG_KVM_BOOKE_HV | |
591 | return mfspr(SPRN_GEPR); | |
592 | #elif defined(CONFIG_BOOKE) | |
593 | return vcpu->arch.epr; | |
594 | #else | |
595 | return 0; | |
596 | #endif | |
597 | } | |
598 | ||
1c810636 AG |
599 | static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr) |
600 | { | |
601 | #ifdef CONFIG_KVM_BOOKE_HV | |
602 | mtspr(SPRN_GEPR, epr); | |
603 | #elif defined(CONFIG_BOOKE) | |
604 | vcpu->arch.epr = epr; | |
605 | #endif | |
606 | } | |
607 | ||
5df554ad SW |
608 | #ifdef CONFIG_KVM_MPIC |
609 | ||
610 | void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu); | |
eb1e4f43 SW |
611 | int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu, |
612 | u32 cpu); | |
613 | void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu); | |
5df554ad SW |
614 | |
615 | #else | |
616 | ||
617 | static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu) | |
618 | { | |
619 | } | |
620 | ||
eb1e4f43 SW |
621 | static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, |
622 | struct kvm_vcpu *vcpu, u32 cpu) | |
623 | { | |
624 | return -EINVAL; | |
625 | } | |
626 | ||
627 | static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, | |
628 | struct kvm_vcpu *vcpu) | |
629 | { | |
630 | } | |
631 | ||
5df554ad SW |
632 | #endif /* CONFIG_KVM_MPIC */ |
633 | ||
dc83b8bc SW |
634 | int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu, |
635 | struct kvm_config_tlb *cfg); | |
636 | int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu, | |
637 | struct kvm_dirty_tlb *cfg); | |
638 | ||
043cc4d7 SW |
639 | long kvmppc_alloc_lpid(void); |
640 | void kvmppc_claim_lpid(long lpid); | |
641 | void kvmppc_free_lpid(long lpid); | |
642 | void kvmppc_init_lpid(unsigned long nr_lpids); | |
643 | ||
ba049e93 | 644 | static inline void kvmppc_mmu_flush_icache(kvm_pfn_t pfn) |
249ba1ee | 645 | { |
249ba1ee | 646 | struct page *page; |
adccf65c BB |
647 | /* |
648 | * We can only access pages that the kernel maps | |
649 | * as memory. Bail out for unmapped ones. | |
650 | */ | |
651 | if (!pfn_valid(pfn)) | |
652 | return; | |
653 | ||
654 | /* Clear i-cache for new pages */ | |
249ba1ee AG |
655 | page = pfn_to_page(pfn); |
656 | if (!test_bit(PG_arch_1, &page->flags)) { | |
657 | flush_dcache_icache_page(page); | |
658 | set_bit(PG_arch_1, &page->flags); | |
659 | } | |
660 | } | |
661 | ||
5deb8e7a AG |
662 | /* |
663 | * Shared struct helpers. The shared struct can be little or big endian, | |
664 | * depending on the guest endianness. So expose helpers to all of them. | |
665 | */ | |
666 | static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu) | |
667 | { | |
668 | #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) | |
669 | /* Only Book3S_64 PR supports bi-endian for now */ | |
670 | return vcpu->arch.shared_big_endian; | |
671 | #elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__) | |
672 | /* Book3s_64 HV on little endian is always little endian */ | |
673 | return false; | |
674 | #else | |
675 | return true; | |
676 | #endif | |
677 | } | |
678 | ||
5a484c7c | 679 | #define SPRNG_WRAPPER_GET(reg, bookehv_spr) \ |
1dc0c5b8 BB |
680 | static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ |
681 | { \ | |
5a484c7c | 682 | return mfspr(bookehv_spr); \ |
1dc0c5b8 BB |
683 | } \ |
684 | ||
5a484c7c | 685 | #define SPRNG_WRAPPER_SET(reg, bookehv_spr) \ |
1dc0c5b8 BB |
686 | static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \ |
687 | { \ | |
5a484c7c | 688 | mtspr(bookehv_spr, val); \ |
1dc0c5b8 BB |
689 | } \ |
690 | ||
5deb8e7a | 691 | #define SHARED_WRAPPER_GET(reg, size) \ |
1dc0c5b8 | 692 | static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \ |
5deb8e7a AG |
693 | { \ |
694 | if (kvmppc_shared_big_endian(vcpu)) \ | |
695 | return be##size##_to_cpu(vcpu->arch.shared->reg); \ | |
696 | else \ | |
697 | return le##size##_to_cpu(vcpu->arch.shared->reg); \ | |
698 | } \ | |
699 | ||
700 | #define SHARED_WRAPPER_SET(reg, size) \ | |
701 | static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \ | |
702 | { \ | |
703 | if (kvmppc_shared_big_endian(vcpu)) \ | |
704 | vcpu->arch.shared->reg = cpu_to_be##size(val); \ | |
705 | else \ | |
706 | vcpu->arch.shared->reg = cpu_to_le##size(val); \ | |
707 | } \ | |
708 | ||
709 | #define SHARED_WRAPPER(reg, size) \ | |
710 | SHARED_WRAPPER_GET(reg, size) \ | |
711 | SHARED_WRAPPER_SET(reg, size) \ | |
712 | ||
5a484c7c BB |
713 | #define SPRNG_WRAPPER(reg, bookehv_spr) \ |
714 | SPRNG_WRAPPER_GET(reg, bookehv_spr) \ | |
715 | SPRNG_WRAPPER_SET(reg, bookehv_spr) \ | |
1dc0c5b8 BB |
716 | |
717 | #ifdef CONFIG_KVM_BOOKE_HV | |
718 | ||
5a484c7c BB |
719 | #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ |
720 | SPRNG_WRAPPER(reg, bookehv_spr) \ | |
1dc0c5b8 BB |
721 | |
722 | #else | |
723 | ||
5a484c7c | 724 | #define SHARED_SPRNG_WRAPPER(reg, size, bookehv_spr) \ |
1dc0c5b8 BB |
725 | SHARED_WRAPPER(reg, size) \ |
726 | ||
727 | #endif | |
728 | ||
5deb8e7a | 729 | SHARED_WRAPPER(critical, 64) |
1dc0c5b8 BB |
730 | SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0) |
731 | SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1) | |
732 | SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2) | |
733 | SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3) | |
734 | SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0) | |
735 | SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1) | |
736 | SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR) | |
dc168549 | 737 | SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR) |
5deb8e7a AG |
738 | SHARED_WRAPPER_GET(msr, 64) |
739 | static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val) | |
740 | { | |
741 | if (kvmppc_shared_big_endian(vcpu)) | |
742 | vcpu->arch.shared->msr = cpu_to_be64(val); | |
743 | else | |
744 | vcpu->arch.shared->msr = cpu_to_le64(val); | |
745 | } | |
746 | SHARED_WRAPPER(dsisr, 32) | |
747 | SHARED_WRAPPER(int_pending, 32) | |
748 | SHARED_WRAPPER(sprg4, 64) | |
749 | SHARED_WRAPPER(sprg5, 64) | |
750 | SHARED_WRAPPER(sprg6, 64) | |
751 | SHARED_WRAPPER(sprg7, 64) | |
752 | ||
753 | static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr) | |
754 | { | |
755 | if (kvmppc_shared_big_endian(vcpu)) | |
756 | return be32_to_cpu(vcpu->arch.shared->sr[nr]); | |
757 | else | |
758 | return le32_to_cpu(vcpu->arch.shared->sr[nr]); | |
759 | } | |
760 | ||
761 | static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val) | |
762 | { | |
763 | if (kvmppc_shared_big_endian(vcpu)) | |
764 | vcpu->arch.shared->sr[nr] = cpu_to_be32(val); | |
765 | else | |
766 | vcpu->arch.shared->sr[nr] = cpu_to_le32(val); | |
767 | } | |
768 | ||
5f1c248f SW |
769 | /* |
770 | * Please call after prepare_to_enter. This function puts the lazy ee and irq | |
771 | * disabled tracking state back to normal mode, without actually enabling | |
772 | * interrupts. | |
773 | */ | |
774 | static inline void kvmppc_fix_ee_before_entry(void) | |
bd2be683 | 775 | { |
5f1c248f SW |
776 | trace_hardirqs_on(); |
777 | ||
bd2be683 | 778 | #ifdef CONFIG_PPC64 |
6c85f52b SW |
779 | /* |
780 | * To avoid races, the caller must have gone directly from having | |
781 | * interrupts fully-enabled to hard-disabled. | |
782 | */ | |
783 | WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS); | |
784 | ||
bd2be683 AG |
785 | /* Only need to enable IRQs by hard enabling them after this */ |
786 | local_paca->irq_happened = 0; | |
787 | local_paca->soft_enabled = 1; | |
788 | #endif | |
789 | } | |
249ba1ee | 790 | |
7cdd7a95 MC |
791 | static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb) |
792 | { | |
793 | ulong ea; | |
8823a8fd | 794 | ulong msr_64bit = 0; |
7cdd7a95 MC |
795 | |
796 | ea = kvmppc_get_gpr(vcpu, rb); | |
797 | if (ra) | |
798 | ea += kvmppc_get_gpr(vcpu, ra); | |
799 | ||
8823a8fd MC |
800 | #if defined(CONFIG_PPC_BOOK3E_64) |
801 | msr_64bit = MSR_CM; | |
802 | #elif defined(CONFIG_PPC_BOOK3S_64) | |
803 | msr_64bit = MSR_SF; | |
804 | #endif | |
805 | ||
5deb8e7a | 806 | if (!(kvmppc_get_msr(vcpu) & msr_64bit)) |
8823a8fd MC |
807 | ea = (uint32_t)ea; |
808 | ||
7cdd7a95 MC |
809 | return ea; |
810 | } | |
811 | ||
54695c30 BH |
812 | extern void xics_wake_cpu(int cpu); |
813 | ||
bbf45ba5 | 814 | #endif /* __POWERPC_KVM_PPC_H__ */ |