]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/kvm_ppc.h
ppc/spapr: Support reboot of secure pseries guest
[mirror_qemu.git] / target / ppc / kvm_ppc.h
1 /*
2 * Copyright 2008 IBM Corporation.
3 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
4 *
5 * This work is licensed under the GNU GPL license version 2 or later.
6 *
7 */
8
9 #ifndef KVM_PPC_H
10 #define KVM_PPC_H
11
12 #define TYPE_HOST_POWERPC_CPU POWERPC_CPU_TYPE_NAME("host")
13
14 #ifdef CONFIG_KVM
15
16 uint32_t kvmppc_get_tbfreq(void);
17 uint64_t kvmppc_get_clockfreq(void);
18 bool kvmppc_get_host_model(char **buf);
19 bool kvmppc_get_host_serial(char **buf);
20 int kvmppc_get_hasidle(CPUPPCState *env);
21 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len);
22 int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level);
23 void kvmppc_enable_logical_ci_hcalls(void);
24 void kvmppc_enable_set_mode_hcall(void);
25 void kvmppc_enable_clear_ref_mod_hcalls(void);
26 void kvmppc_enable_h_page_init(void);
27 void kvmppc_set_papr(PowerPCCPU *cpu);
28 int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr);
29 void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy);
30 int kvmppc_smt_threads(void);
31 void kvmppc_error_append_smt_possible_hint(Error *const *errp);
32 int kvmppc_set_smt_threads(int smt);
33 int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits);
34 int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits);
35 int kvmppc_set_tcr(PowerPCCPU *cpu);
36 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu);
37 target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
38 bool radix, bool gtse,
39 uint64_t proc_tbl);
40 void kvmppc_svm_off(Error **errp);
41 #ifndef CONFIG_USER_ONLY
42 bool kvmppc_spapr_use_multitce(void);
43 int kvmppc_spapr_enable_inkernel_multitce(void);
44 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
45 uint64_t bus_offset, uint32_t nb_table,
46 int *pfd, bool need_vfio);
47 int kvmppc_remove_spapr_tce(void *table, int pfd, uint32_t window_size);
48 int kvmppc_reset_htab(int shift_hint);
49 uint64_t kvmppc_rma_size(uint64_t current_size, unsigned int hash_shift);
50 bool kvmppc_has_cap_spapr_vfio(void);
51 #endif /* !CONFIG_USER_ONLY */
52 bool kvmppc_has_cap_epr(void);
53 int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function);
54 int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp);
55 int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns);
56 int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
57 uint16_t n_valid, uint16_t n_invalid);
58 void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n);
59 void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1);
60 bool kvmppc_has_cap_fixup_hcalls(void);
61 bool kvmppc_has_cap_htm(void);
62 bool kvmppc_has_cap_mmu_radix(void);
63 bool kvmppc_has_cap_mmu_hash_v3(void);
64 bool kvmppc_has_cap_xive(void);
65 int kvmppc_get_cap_safe_cache(void);
66 int kvmppc_get_cap_safe_bounds_check(void);
67 int kvmppc_get_cap_safe_indirect_branch(void);
68 int kvmppc_get_cap_count_cache_flush_assist(void);
69 bool kvmppc_has_cap_nested_kvm_hv(void);
70 int kvmppc_set_cap_nested_kvm_hv(int enable);
71 int kvmppc_get_cap_large_decr(void);
72 int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable);
73 int kvmppc_enable_hwrng(void);
74 int kvmppc_put_books_sregs(PowerPCCPU *cpu);
75 PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void);
76 void kvmppc_check_papr_resize_hpt(Error **errp);
77 int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift);
78 int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift);
79 bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu);
80
81 bool kvmppc_hpt_needs_host_contiguous_pages(void);
82 void kvm_check_mmu(PowerPCCPU *cpu, Error **errp);
83 void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online);
84 void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset);
85
86 #else
87
88 static inline uint32_t kvmppc_get_tbfreq(void)
89 {
90 return 0;
91 }
92
93 static inline bool kvmppc_get_host_model(char **buf)
94 {
95 return false;
96 }
97
98 static inline bool kvmppc_get_host_serial(char **buf)
99 {
100 return false;
101 }
102
103 static inline uint64_t kvmppc_get_clockfreq(void)
104 {
105 return 0;
106 }
107
108 static inline uint32_t kvmppc_get_vmx(void)
109 {
110 return 0;
111 }
112
113 static inline uint32_t kvmppc_get_dfp(void)
114 {
115 return 0;
116 }
117
118 static inline int kvmppc_get_hasidle(CPUPPCState *env)
119 {
120 return 0;
121 }
122
123 static inline int kvmppc_get_hypercall(CPUPPCState *env,
124 uint8_t *buf, int buf_len)
125 {
126 return -1;
127 }
128
129 static inline int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
130 {
131 return -1;
132 }
133
134 static inline void kvmppc_enable_logical_ci_hcalls(void)
135 {
136 }
137
138 static inline void kvmppc_enable_set_mode_hcall(void)
139 {
140 }
141
142 static inline void kvmppc_enable_clear_ref_mod_hcalls(void)
143 {
144 }
145
146 static inline void kvmppc_enable_h_page_init(void)
147 {
148 }
149
150 static inline void kvmppc_set_papr(PowerPCCPU *cpu)
151 {
152 }
153
154 static inline int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
155 {
156 return 0;
157 }
158
159 static inline void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
160 {
161 }
162
163 static inline int kvmppc_smt_threads(void)
164 {
165 return 1;
166 }
167
168 static inline void kvmppc_error_append_smt_possible_hint(Error *const *errp)
169 {
170 return;
171 }
172
173 static inline int kvmppc_set_smt_threads(int smt)
174 {
175 return 0;
176 }
177
178 static inline int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
179 {
180 return 0;
181 }
182
183 static inline int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
184 {
185 return 0;
186 }
187
188 static inline int kvmppc_set_tcr(PowerPCCPU *cpu)
189 {
190 return 0;
191 }
192
193 static inline int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
194 {
195 return -1;
196 }
197
198 static inline target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
199 bool radix, bool gtse,
200 uint64_t proc_tbl)
201 {
202 return 0;
203 }
204
205 static inline void kvmppc_svm_off(Error **errp)
206 {
207 return;
208 }
209
210 static inline void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu,
211 unsigned int online)
212 {
213 return;
214 }
215
216 static inline void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
217 {
218 }
219
220 #ifndef CONFIG_USER_ONLY
221 static inline bool kvmppc_spapr_use_multitce(void)
222 {
223 return false;
224 }
225
226 static inline int kvmppc_spapr_enable_inkernel_multitce(void)
227 {
228 return -1;
229 }
230
231 static inline void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
232 uint64_t bus_offset,
233 uint32_t nb_table,
234 int *pfd, bool need_vfio)
235 {
236 return NULL;
237 }
238
239 static inline int kvmppc_remove_spapr_tce(void *table, int pfd,
240 uint32_t nb_table)
241 {
242 return -1;
243 }
244
245 static inline int kvmppc_reset_htab(int shift_hint)
246 {
247 return 0;
248 }
249
250 static inline uint64_t kvmppc_rma_size(uint64_t current_size,
251 unsigned int hash_shift)
252 {
253 return ram_size;
254 }
255
256 static inline bool kvmppc_hpt_needs_host_contiguous_pages(void)
257 {
258 return false;
259 }
260
261 static inline void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
262 {
263 }
264
265 static inline bool kvmppc_has_cap_spapr_vfio(void)
266 {
267 return false;
268 }
269
270 #endif /* !CONFIG_USER_ONLY */
271
272 static inline bool kvmppc_has_cap_epr(void)
273 {
274 return false;
275 }
276
277 static inline int kvmppc_define_rtas_kernel_token(uint32_t token,
278 const char *function)
279 {
280 return -1;
281 }
282
283 static inline int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
284 {
285 return -1;
286 }
287
288 static inline int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize,
289 int64_t max_ns)
290 {
291 abort();
292 }
293
294 static inline int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
295 uint16_t n_valid, uint16_t n_invalid)
296 {
297 abort();
298 }
299
300 static inline void kvmppc_read_hptes(ppc_hash_pte64_t *hptes,
301 hwaddr ptex, int n)
302 {
303 abort();
304 }
305
306 static inline void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
307 {
308 abort();
309 }
310
311 static inline bool kvmppc_has_cap_fixup_hcalls(void)
312 {
313 abort();
314 }
315
316 static inline bool kvmppc_has_cap_htm(void)
317 {
318 return false;
319 }
320
321 static inline bool kvmppc_has_cap_mmu_radix(void)
322 {
323 return false;
324 }
325
326 static inline bool kvmppc_has_cap_mmu_hash_v3(void)
327 {
328 return false;
329 }
330
331 static inline bool kvmppc_has_cap_xive(void)
332 {
333 return false;
334 }
335
336 static inline int kvmppc_get_cap_safe_cache(void)
337 {
338 return 0;
339 }
340
341 static inline int kvmppc_get_cap_safe_bounds_check(void)
342 {
343 return 0;
344 }
345
346 static inline int kvmppc_get_cap_safe_indirect_branch(void)
347 {
348 return 0;
349 }
350
351 static inline int kvmppc_get_cap_count_cache_flush_assist(void)
352 {
353 return 0;
354 }
355
356 static inline bool kvmppc_has_cap_nested_kvm_hv(void)
357 {
358 return false;
359 }
360
361 static inline int kvmppc_set_cap_nested_kvm_hv(int enable)
362 {
363 return -1;
364 }
365
366 static inline int kvmppc_get_cap_large_decr(void)
367 {
368 return 0;
369 }
370
371 static inline int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
372 {
373 return -1;
374 }
375
376 static inline int kvmppc_enable_hwrng(void)
377 {
378 return -1;
379 }
380
381 static inline int kvmppc_put_books_sregs(PowerPCCPU *cpu)
382 {
383 abort();
384 }
385
386 static inline PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
387 {
388 return NULL;
389 }
390
391 static inline void kvmppc_check_papr_resize_hpt(Error **errp)
392 {
393 return;
394 }
395
396 static inline int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu,
397 target_ulong flags, int shift)
398 {
399 return -ENOSYS;
400 }
401
402 static inline int kvmppc_resize_hpt_commit(PowerPCCPU *cpu,
403 target_ulong flags, int shift)
404 {
405 return -ENOSYS;
406 }
407
408 static inline bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
409 {
410 return false;
411 }
412
413 #endif
414
415 #ifndef CONFIG_KVM
416
417 #define kvmppc_eieio() do { } while (0)
418
419 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
420 {
421 }
422
423 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
424 {
425 }
426
427 #else /* CONFIG_KVM */
428
429 #define kvmppc_eieio() \
430 do { \
431 if (kvm_enabled()) { \
432 asm volatile("eieio" : : : "memory"); \
433 } \
434 } while (0)
435
436 /* Store data cache blocks back to memory */
437 static inline void kvmppc_dcbst_range(PowerPCCPU *cpu, uint8_t *addr, int len)
438 {
439 uint8_t *p;
440
441 for (p = addr; p < addr + len; p += cpu->env.dcache_line_size) {
442 asm volatile("dcbst 0,%0" : : "r"(p) : "memory");
443 }
444 }
445
446 /* Invalidate instruction cache blocks */
447 static inline void kvmppc_icbi_range(PowerPCCPU *cpu, uint8_t *addr, int len)
448 {
449 uint8_t *p;
450
451 for (p = addr; p < addr + len; p += cpu->env.icache_line_size) {
452 asm volatile("icbi 0,%0" : : "r"(p));
453 }
454 }
455
456 #endif /* CONFIG_KVM */
457
458 #endif /* KVM_PPC_H */