]> git.proxmox.com Git - mirror_qemu.git/blob - target/ppc/kvm.c
target/ppc: Clean up local variable shadowing in kvm_arch_*_registers()
[mirror_qemu.git] / target / ppc / kvm.c
1 /*
2 * PowerPC implementation of KVM hooks
3 *
4 * Copyright IBM Corp. 2007
5 * Copyright (C) 2011 Freescale Semiconductor, Inc.
6 *
7 * Authors:
8 * Jerone Young <jyoung5@us.ibm.com>
9 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
10 * Hollis Blanchard <hollisb@us.ibm.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2 or later.
13 * See the COPYING file in the top-level directory.
14 *
15 */
16
17 #include "qemu/osdep.h"
18 #include <dirent.h>
19 #include <sys/ioctl.h>
20 #include <sys/vfs.h>
21
22 #include <linux/kvm.h>
23
24 #include "qapi/error.h"
25 #include "qemu/error-report.h"
26 #include "cpu.h"
27 #include "cpu-models.h"
28 #include "qemu/timer.h"
29 #include "sysemu/hw_accel.h"
30 #include "kvm_ppc.h"
31 #include "sysemu/cpus.h"
32 #include "sysemu/device_tree.h"
33 #include "mmu-hash64.h"
34
35 #include "hw/ppc/spapr.h"
36 #include "hw/ppc/spapr_cpu_core.h"
37 #include "hw/hw.h"
38 #include "hw/ppc/ppc.h"
39 #include "migration/qemu-file-types.h"
40 #include "sysemu/watchdog.h"
41 #include "trace.h"
42 #include "exec/gdbstub.h"
43 #include "exec/memattrs.h"
44 #include "exec/ram_addr.h"
45 #include "sysemu/hostmem.h"
46 #include "qemu/cutils.h"
47 #include "qemu/main-loop.h"
48 #include "qemu/mmap-alloc.h"
49 #include "elf.h"
50 #include "sysemu/kvm_int.h"
51
52 #define PROC_DEVTREE_CPU "/proc/device-tree/cpus/"
53
54 #define DEBUG_RETURN_GUEST 0
55 #define DEBUG_RETURN_GDB 1
56
57 const KVMCapabilityInfo kvm_arch_required_capabilities[] = {
58 KVM_CAP_LAST_INFO
59 };
60
61 static int cap_interrupt_unset;
62 static int cap_segstate;
63 static int cap_booke_sregs;
64 static int cap_ppc_smt;
65 static int cap_ppc_smt_possible;
66 static int cap_spapr_tce;
67 static int cap_spapr_tce_64;
68 static int cap_spapr_multitce;
69 static int cap_spapr_vfio;
70 static int cap_hior;
71 static int cap_one_reg;
72 static int cap_epr;
73 static int cap_ppc_watchdog;
74 static int cap_papr;
75 static int cap_htab_fd;
76 static int cap_fixup_hcalls;
77 static int cap_htm; /* Hardware transactional memory support */
78 static int cap_mmu_radix;
79 static int cap_mmu_hash_v3;
80 static int cap_xive;
81 static int cap_resize_hpt;
82 static int cap_ppc_pvr_compat;
83 static int cap_ppc_safe_cache;
84 static int cap_ppc_safe_bounds_check;
85 static int cap_ppc_safe_indirect_branch;
86 static int cap_ppc_count_cache_flush_assist;
87 static int cap_ppc_nested_kvm_hv;
88 static int cap_large_decr;
89 static int cap_fwnmi;
90 static int cap_rpt_invalidate;
91 static int cap_ail_mode_3;
92
93 static uint32_t debug_inst_opcode;
94
95 /*
96 * Check whether we are running with KVM-PR (instead of KVM-HV). This
97 * should only be used for fallback tests - generally we should use
98 * explicit capabilities for the features we want, rather than
99 * assuming what is/isn't available depending on the KVM variant.
100 */
101 static bool kvmppc_is_pr(KVMState *ks)
102 {
103 /* Assume KVM-PR if the GET_PVINFO capability is available */
104 return kvm_vm_check_extension(ks, KVM_CAP_PPC_GET_PVINFO) != 0;
105 }
106
107 static int kvm_ppc_register_host_cpu_type(void);
108 static void kvmppc_get_cpu_characteristics(KVMState *s);
109 static int kvmppc_get_dec_bits(void);
110
111 int kvm_arch_get_default_type(MachineState *ms)
112 {
113 return 0;
114 }
115
116 int kvm_arch_init(MachineState *ms, KVMState *s)
117 {
118 cap_interrupt_unset = kvm_check_extension(s, KVM_CAP_PPC_UNSET_IRQ);
119 cap_segstate = kvm_check_extension(s, KVM_CAP_PPC_SEGSTATE);
120 cap_booke_sregs = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_SREGS);
121 cap_ppc_smt_possible = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT_POSSIBLE);
122 cap_spapr_tce = kvm_check_extension(s, KVM_CAP_SPAPR_TCE);
123 cap_spapr_tce_64 = kvm_check_extension(s, KVM_CAP_SPAPR_TCE_64);
124 cap_spapr_multitce = kvm_check_extension(s, KVM_CAP_SPAPR_MULTITCE);
125 cap_spapr_vfio = kvm_vm_check_extension(s, KVM_CAP_SPAPR_TCE_VFIO);
126 cap_one_reg = kvm_check_extension(s, KVM_CAP_ONE_REG);
127 cap_hior = kvm_check_extension(s, KVM_CAP_PPC_HIOR);
128 cap_epr = kvm_check_extension(s, KVM_CAP_PPC_EPR);
129 cap_ppc_watchdog = kvm_check_extension(s, KVM_CAP_PPC_BOOKE_WATCHDOG);
130 /*
131 * Note: we don't set cap_papr here, because this capability is
132 * only activated after this by kvmppc_set_papr()
133 */
134 cap_htab_fd = kvm_vm_check_extension(s, KVM_CAP_PPC_HTAB_FD);
135 cap_fixup_hcalls = kvm_check_extension(s, KVM_CAP_PPC_FIXUP_HCALL);
136 cap_ppc_smt = kvm_vm_check_extension(s, KVM_CAP_PPC_SMT);
137 cap_htm = kvm_vm_check_extension(s, KVM_CAP_PPC_HTM);
138 cap_mmu_radix = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_RADIX);
139 cap_mmu_hash_v3 = kvm_vm_check_extension(s, KVM_CAP_PPC_MMU_HASH_V3);
140 cap_xive = kvm_vm_check_extension(s, KVM_CAP_PPC_IRQ_XIVE);
141 cap_resize_hpt = kvm_vm_check_extension(s, KVM_CAP_SPAPR_RESIZE_HPT);
142 kvmppc_get_cpu_characteristics(s);
143 cap_ppc_nested_kvm_hv = kvm_vm_check_extension(s, KVM_CAP_PPC_NESTED_HV);
144 cap_large_decr = kvmppc_get_dec_bits();
145 cap_fwnmi = kvm_vm_check_extension(s, KVM_CAP_PPC_FWNMI);
146 /*
147 * Note: setting it to false because there is not such capability
148 * in KVM at this moment.
149 *
150 * TODO: call kvm_vm_check_extension() with the right capability
151 * after the kernel starts implementing it.
152 */
153 cap_ppc_pvr_compat = false;
154
155 if (!kvm_check_extension(s, KVM_CAP_PPC_IRQ_LEVEL)) {
156 error_report("KVM: Host kernel doesn't have level irq capability");
157 exit(1);
158 }
159
160 cap_rpt_invalidate = kvm_vm_check_extension(s, KVM_CAP_PPC_RPT_INVALIDATE);
161 cap_ail_mode_3 = kvm_vm_check_extension(s, KVM_CAP_PPC_AIL_MODE_3);
162 kvm_ppc_register_host_cpu_type();
163
164 return 0;
165 }
166
167 int kvm_arch_irqchip_create(KVMState *s)
168 {
169 return 0;
170 }
171
172 static int kvm_arch_sync_sregs(PowerPCCPU *cpu)
173 {
174 CPUPPCState *cenv = &cpu->env;
175 CPUState *cs = CPU(cpu);
176 struct kvm_sregs sregs;
177 int ret;
178
179 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
180 /*
181 * What we're really trying to say is "if we're on BookE, we
182 * use the native PVR for now". This is the only sane way to
183 * check it though, so we potentially confuse users that they
184 * can run BookE guests on BookS. Let's hope nobody dares
185 * enough :)
186 */
187 return 0;
188 } else {
189 if (!cap_segstate) {
190 fprintf(stderr, "kvm error: missing PVR setting capability\n");
191 return -ENOSYS;
192 }
193 }
194
195 ret = kvm_vcpu_ioctl(cs, KVM_GET_SREGS, &sregs);
196 if (ret) {
197 return ret;
198 }
199
200 sregs.pvr = cenv->spr[SPR_PVR];
201 return kvm_vcpu_ioctl(cs, KVM_SET_SREGS, &sregs);
202 }
203
204 /* Set up a shared TLB array with KVM */
205 static int kvm_booke206_tlb_init(PowerPCCPU *cpu)
206 {
207 CPUPPCState *env = &cpu->env;
208 CPUState *cs = CPU(cpu);
209 struct kvm_book3e_206_tlb_params params = {};
210 struct kvm_config_tlb cfg = {};
211 unsigned int entries = 0;
212 int ret, i;
213
214 if (!kvm_enabled() ||
215 !kvm_check_extension(cs->kvm_state, KVM_CAP_SW_TLB)) {
216 return 0;
217 }
218
219 assert(ARRAY_SIZE(params.tlb_sizes) == BOOKE206_MAX_TLBN);
220
221 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
222 params.tlb_sizes[i] = booke206_tlb_size(env, i);
223 params.tlb_ways[i] = booke206_tlb_ways(env, i);
224 entries += params.tlb_sizes[i];
225 }
226
227 assert(entries == env->nb_tlb);
228 assert(sizeof(struct kvm_book3e_206_tlb_entry) == sizeof(ppcmas_tlb_t));
229
230 env->tlb_dirty = true;
231
232 cfg.array = (uintptr_t)env->tlb.tlbm;
233 cfg.array_len = sizeof(ppcmas_tlb_t) * entries;
234 cfg.params = (uintptr_t)&params;
235 cfg.mmu_type = KVM_MMU_FSL_BOOKE_NOHV;
236
237 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_SW_TLB, 0, (uintptr_t)&cfg);
238 if (ret < 0) {
239 fprintf(stderr, "%s: couldn't enable KVM_CAP_SW_TLB: %s\n",
240 __func__, strerror(-ret));
241 return ret;
242 }
243
244 env->kvm_sw_tlb = true;
245 return 0;
246 }
247
248
249 #if defined(TARGET_PPC64)
250 static void kvm_get_smmu_info(struct kvm_ppc_smmu_info *info, Error **errp)
251 {
252 int ret;
253
254 assert(kvm_state != NULL);
255
256 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_GET_SMMU_INFO)) {
257 error_setg(errp, "KVM doesn't expose the MMU features it supports");
258 error_append_hint(errp, "Consider switching to a newer KVM\n");
259 return;
260 }
261
262 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_SMMU_INFO, info);
263 if (ret == 0) {
264 return;
265 }
266
267 error_setg_errno(errp, -ret,
268 "KVM failed to provide the MMU features it supports");
269 }
270
271 struct ppc_radix_page_info *kvm_get_radix_page_info(void)
272 {
273 KVMState *s = KVM_STATE(current_accel());
274 struct ppc_radix_page_info *radix_page_info;
275 struct kvm_ppc_rmmu_info rmmu_info = { };
276 int i;
277
278 if (!kvm_check_extension(s, KVM_CAP_PPC_MMU_RADIX)) {
279 return NULL;
280 }
281 if (kvm_vm_ioctl(s, KVM_PPC_GET_RMMU_INFO, &rmmu_info)) {
282 return NULL;
283 }
284 radix_page_info = g_malloc0(sizeof(*radix_page_info));
285 radix_page_info->count = 0;
286 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
287 if (rmmu_info.ap_encodings[i]) {
288 radix_page_info->entries[i] = rmmu_info.ap_encodings[i];
289 radix_page_info->count++;
290 }
291 }
292 return radix_page_info;
293 }
294
295 target_ulong kvmppc_configure_v3_mmu(PowerPCCPU *cpu,
296 bool radix, bool gtse,
297 uint64_t proc_tbl)
298 {
299 CPUState *cs = CPU(cpu);
300 int ret;
301 uint64_t flags = 0;
302 struct kvm_ppc_mmuv3_cfg cfg = {
303 .process_table = proc_tbl,
304 };
305
306 if (radix) {
307 flags |= KVM_PPC_MMUV3_RADIX;
308 }
309 if (gtse) {
310 flags |= KVM_PPC_MMUV3_GTSE;
311 }
312 cfg.flags = flags;
313 ret = kvm_vm_ioctl(cs->kvm_state, KVM_PPC_CONFIGURE_V3_MMU, &cfg);
314 switch (ret) {
315 case 0:
316 return H_SUCCESS;
317 case -EINVAL:
318 return H_PARAMETER;
319 case -ENODEV:
320 return H_NOT_AVAILABLE;
321 default:
322 return H_HARDWARE;
323 }
324 }
325
326 bool kvmppc_hpt_needs_host_contiguous_pages(void)
327 {
328 static struct kvm_ppc_smmu_info smmu_info;
329
330 if (!kvm_enabled()) {
331 return false;
332 }
333
334 kvm_get_smmu_info(&smmu_info, &error_fatal);
335 return !!(smmu_info.flags & KVM_PPC_PAGE_SIZES_REAL);
336 }
337
338 void kvm_check_mmu(PowerPCCPU *cpu, Error **errp)
339 {
340 struct kvm_ppc_smmu_info smmu_info;
341 int iq, ik, jq, jk;
342 Error *local_err = NULL;
343
344 /* For now, we only have anything to check on hash64 MMUs */
345 if (!cpu->hash64_opts || !kvm_enabled()) {
346 return;
347 }
348
349 kvm_get_smmu_info(&smmu_info, &local_err);
350 if (local_err) {
351 error_propagate(errp, local_err);
352 return;
353 }
354
355 if (ppc_hash64_has(cpu, PPC_HASH64_1TSEG)
356 && !(smmu_info.flags & KVM_PPC_1T_SEGMENTS)) {
357 error_setg(errp,
358 "KVM does not support 1TiB segments which guest expects");
359 return;
360 }
361
362 if (smmu_info.slb_size < cpu->hash64_opts->slb_size) {
363 error_setg(errp, "KVM only supports %u SLB entries, but guest needs %u",
364 smmu_info.slb_size, cpu->hash64_opts->slb_size);
365 return;
366 }
367
368 /*
369 * Verify that every pagesize supported by the cpu model is
370 * supported by KVM with the same encodings
371 */
372 for (iq = 0; iq < ARRAY_SIZE(cpu->hash64_opts->sps); iq++) {
373 PPCHash64SegmentPageSizes *qsps = &cpu->hash64_opts->sps[iq];
374 struct kvm_ppc_one_seg_page_size *ksps;
375
376 for (ik = 0; ik < ARRAY_SIZE(smmu_info.sps); ik++) {
377 if (qsps->page_shift == smmu_info.sps[ik].page_shift) {
378 break;
379 }
380 }
381 if (ik >= ARRAY_SIZE(smmu_info.sps)) {
382 error_setg(errp, "KVM doesn't support for base page shift %u",
383 qsps->page_shift);
384 return;
385 }
386
387 ksps = &smmu_info.sps[ik];
388 if (ksps->slb_enc != qsps->slb_enc) {
389 error_setg(errp,
390 "KVM uses SLB encoding 0x%x for page shift %u, but guest expects 0x%x",
391 ksps->slb_enc, ksps->page_shift, qsps->slb_enc);
392 return;
393 }
394
395 for (jq = 0; jq < ARRAY_SIZE(qsps->enc); jq++) {
396 for (jk = 0; jk < ARRAY_SIZE(ksps->enc); jk++) {
397 if (qsps->enc[jq].page_shift == ksps->enc[jk].page_shift) {
398 break;
399 }
400 }
401
402 if (jk >= ARRAY_SIZE(ksps->enc)) {
403 error_setg(errp, "KVM doesn't support page shift %u/%u",
404 qsps->enc[jq].page_shift, qsps->page_shift);
405 return;
406 }
407 if (qsps->enc[jq].pte_enc != ksps->enc[jk].pte_enc) {
408 error_setg(errp,
409 "KVM uses PTE encoding 0x%x for page shift %u/%u, but guest expects 0x%x",
410 ksps->enc[jk].pte_enc, qsps->enc[jq].page_shift,
411 qsps->page_shift, qsps->enc[jq].pte_enc);
412 return;
413 }
414 }
415 }
416
417 if (ppc_hash64_has(cpu, PPC_HASH64_CI_LARGEPAGE)) {
418 /*
419 * Mostly what guest pagesizes we can use are related to the
420 * host pages used to map guest RAM, which is handled in the
421 * platform code. Cache-Inhibited largepages (64k) however are
422 * used for I/O, so if they're mapped to the host at all it
423 * will be a normal mapping, not a special hugepage one used
424 * for RAM.
425 */
426 if (qemu_real_host_page_size() < 0x10000) {
427 error_setg(errp,
428 "KVM can't supply 64kiB CI pages, which guest expects");
429 }
430 }
431 }
432 #endif /* !defined (TARGET_PPC64) */
433
434 unsigned long kvm_arch_vcpu_id(CPUState *cpu)
435 {
436 return POWERPC_CPU(cpu)->vcpu_id;
437 }
438
439 /*
440 * e500 supports 2 h/w breakpoint and 2 watchpoint. book3s supports
441 * only 1 watchpoint, so array size of 4 is sufficient for now.
442 */
443 #define MAX_HW_BKPTS 4
444
445 static struct HWBreakpoint {
446 target_ulong addr;
447 int type;
448 } hw_debug_points[MAX_HW_BKPTS];
449
450 static CPUWatchpoint hw_watchpoint;
451
452 /* Default there is no breakpoint and watchpoint supported */
453 static int max_hw_breakpoint;
454 static int max_hw_watchpoint;
455 static int nb_hw_breakpoint;
456 static int nb_hw_watchpoint;
457
458 static void kvmppc_hw_debug_points_init(CPUPPCState *cenv)
459 {
460 if (cenv->excp_model == POWERPC_EXCP_BOOKE) {
461 max_hw_breakpoint = 2;
462 max_hw_watchpoint = 2;
463 }
464
465 if ((max_hw_breakpoint + max_hw_watchpoint) > MAX_HW_BKPTS) {
466 fprintf(stderr, "Error initializing h/w breakpoints\n");
467 return;
468 }
469 }
470
471 int kvm_arch_init_vcpu(CPUState *cs)
472 {
473 PowerPCCPU *cpu = POWERPC_CPU(cs);
474 CPUPPCState *cenv = &cpu->env;
475 int ret;
476
477 /* Synchronize sregs with kvm */
478 ret = kvm_arch_sync_sregs(cpu);
479 if (ret) {
480 if (ret == -EINVAL) {
481 error_report("Register sync failed... If you're using kvm-hv.ko,"
482 " only \"-cpu host\" is possible");
483 }
484 return ret;
485 }
486
487 switch (cenv->mmu_model) {
488 case POWERPC_MMU_BOOKE206:
489 /* This target supports access to KVM's guest TLB */
490 ret = kvm_booke206_tlb_init(cpu);
491 break;
492 case POWERPC_MMU_2_07:
493 if (!cap_htm && !kvmppc_is_pr(cs->kvm_state)) {
494 /*
495 * KVM-HV has transactional memory on POWER8 also without
496 * the KVM_CAP_PPC_HTM extension, so enable it here
497 * instead as long as it's available to userspace on the
498 * host.
499 */
500 if (qemu_getauxval(AT_HWCAP2) & PPC_FEATURE2_HAS_HTM) {
501 cap_htm = true;
502 }
503 }
504 break;
505 default:
506 break;
507 }
508
509 kvm_get_one_reg(cs, KVM_REG_PPC_DEBUG_INST, &debug_inst_opcode);
510 kvmppc_hw_debug_points_init(cenv);
511
512 return ret;
513 }
514
515 int kvm_arch_destroy_vcpu(CPUState *cs)
516 {
517 return 0;
518 }
519
520 static void kvm_sw_tlb_put(PowerPCCPU *cpu)
521 {
522 CPUPPCState *env = &cpu->env;
523 CPUState *cs = CPU(cpu);
524 struct kvm_dirty_tlb dirty_tlb;
525 unsigned char *bitmap;
526 int ret;
527
528 if (!env->kvm_sw_tlb) {
529 return;
530 }
531
532 bitmap = g_malloc((env->nb_tlb + 7) / 8);
533 memset(bitmap, 0xFF, (env->nb_tlb + 7) / 8);
534
535 dirty_tlb.bitmap = (uintptr_t)bitmap;
536 dirty_tlb.num_dirty = env->nb_tlb;
537
538 ret = kvm_vcpu_ioctl(cs, KVM_DIRTY_TLB, &dirty_tlb);
539 if (ret) {
540 fprintf(stderr, "%s: KVM_DIRTY_TLB: %s\n",
541 __func__, strerror(-ret));
542 }
543
544 g_free(bitmap);
545 }
546
547 static void kvm_get_one_spr(CPUState *cs, uint64_t id, int spr)
548 {
549 PowerPCCPU *cpu = POWERPC_CPU(cs);
550 CPUPPCState *env = &cpu->env;
551 /* Init 'val' to avoid "uninitialised value" Valgrind warnings */
552 union {
553 uint32_t u32;
554 uint64_t u64;
555 } val = { };
556 struct kvm_one_reg reg = {
557 .id = id,
558 .addr = (uintptr_t) &val,
559 };
560 int ret;
561
562 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
563 if (ret != 0) {
564 trace_kvm_failed_spr_get(spr, strerror(errno));
565 } else {
566 switch (id & KVM_REG_SIZE_MASK) {
567 case KVM_REG_SIZE_U32:
568 env->spr[spr] = val.u32;
569 break;
570
571 case KVM_REG_SIZE_U64:
572 env->spr[spr] = val.u64;
573 break;
574
575 default:
576 /* Don't handle this size yet */
577 abort();
578 }
579 }
580 }
581
582 static void kvm_put_one_spr(CPUState *cs, uint64_t id, int spr)
583 {
584 PowerPCCPU *cpu = POWERPC_CPU(cs);
585 CPUPPCState *env = &cpu->env;
586 union {
587 uint32_t u32;
588 uint64_t u64;
589 } val;
590 struct kvm_one_reg reg = {
591 .id = id,
592 .addr = (uintptr_t) &val,
593 };
594 int ret;
595
596 switch (id & KVM_REG_SIZE_MASK) {
597 case KVM_REG_SIZE_U32:
598 val.u32 = env->spr[spr];
599 break;
600
601 case KVM_REG_SIZE_U64:
602 val.u64 = env->spr[spr];
603 break;
604
605 default:
606 /* Don't handle this size yet */
607 abort();
608 }
609
610 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
611 if (ret != 0) {
612 trace_kvm_failed_spr_set(spr, strerror(errno));
613 }
614 }
615
616 static int kvm_put_fp(CPUState *cs)
617 {
618 PowerPCCPU *cpu = POWERPC_CPU(cs);
619 CPUPPCState *env = &cpu->env;
620 struct kvm_one_reg reg;
621 int i;
622 int ret;
623
624 if (env->insns_flags & PPC_FLOAT) {
625 uint64_t fpscr = env->fpscr;
626 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
627
628 reg.id = KVM_REG_PPC_FPSCR;
629 reg.addr = (uintptr_t)&fpscr;
630 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
631 if (ret < 0) {
632 trace_kvm_failed_fpscr_set(strerror(errno));
633 return ret;
634 }
635
636 for (i = 0; i < 32; i++) {
637 uint64_t vsr[2];
638 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
639 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
640
641 #if HOST_BIG_ENDIAN
642 vsr[0] = float64_val(*fpr);
643 vsr[1] = *vsrl;
644 #else
645 vsr[0] = *vsrl;
646 vsr[1] = float64_val(*fpr);
647 #endif
648 reg.addr = (uintptr_t) &vsr;
649 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
650
651 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
652 if (ret < 0) {
653 trace_kvm_failed_fp_set(vsx ? "VSR" : "FPR", i,
654 strerror(errno));
655 return ret;
656 }
657 }
658 }
659
660 if (env->insns_flags & PPC_ALTIVEC) {
661 reg.id = KVM_REG_PPC_VSCR;
662 reg.addr = (uintptr_t)&env->vscr;
663 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
664 if (ret < 0) {
665 trace_kvm_failed_vscr_set(strerror(errno));
666 return ret;
667 }
668
669 for (i = 0; i < 32; i++) {
670 reg.id = KVM_REG_PPC_VR(i);
671 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
672 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
673 if (ret < 0) {
674 trace_kvm_failed_vr_set(i, strerror(errno));
675 return ret;
676 }
677 }
678 }
679
680 return 0;
681 }
682
683 static int kvm_get_fp(CPUState *cs)
684 {
685 PowerPCCPU *cpu = POWERPC_CPU(cs);
686 CPUPPCState *env = &cpu->env;
687 struct kvm_one_reg reg;
688 int i;
689 int ret;
690
691 if (env->insns_flags & PPC_FLOAT) {
692 uint64_t fpscr;
693 bool vsx = !!(env->insns_flags2 & PPC2_VSX);
694
695 reg.id = KVM_REG_PPC_FPSCR;
696 reg.addr = (uintptr_t)&fpscr;
697 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
698 if (ret < 0) {
699 trace_kvm_failed_fpscr_get(strerror(errno));
700 return ret;
701 } else {
702 env->fpscr = fpscr;
703 }
704
705 for (i = 0; i < 32; i++) {
706 uint64_t vsr[2];
707 uint64_t *fpr = cpu_fpr_ptr(&cpu->env, i);
708 uint64_t *vsrl = cpu_vsrl_ptr(&cpu->env, i);
709
710 reg.addr = (uintptr_t) &vsr;
711 reg.id = vsx ? KVM_REG_PPC_VSR(i) : KVM_REG_PPC_FPR(i);
712
713 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
714 if (ret < 0) {
715 trace_kvm_failed_fp_get(vsx ? "VSR" : "FPR", i,
716 strerror(errno));
717 return ret;
718 } else {
719 #if HOST_BIG_ENDIAN
720 *fpr = vsr[0];
721 if (vsx) {
722 *vsrl = vsr[1];
723 }
724 #else
725 *fpr = vsr[1];
726 if (vsx) {
727 *vsrl = vsr[0];
728 }
729 #endif
730 }
731 }
732 }
733
734 if (env->insns_flags & PPC_ALTIVEC) {
735 reg.id = KVM_REG_PPC_VSCR;
736 reg.addr = (uintptr_t)&env->vscr;
737 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
738 if (ret < 0) {
739 trace_kvm_failed_vscr_get(strerror(errno));
740 return ret;
741 }
742
743 for (i = 0; i < 32; i++) {
744 reg.id = KVM_REG_PPC_VR(i);
745 reg.addr = (uintptr_t)cpu_avr_ptr(env, i);
746 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
747 if (ret < 0) {
748 trace_kvm_failed_vr_get(i, strerror(errno));
749 return ret;
750 }
751 }
752 }
753
754 return 0;
755 }
756
757 #if defined(TARGET_PPC64)
758 static int kvm_get_vpa(CPUState *cs)
759 {
760 PowerPCCPU *cpu = POWERPC_CPU(cs);
761 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
762 struct kvm_one_reg reg;
763 int ret;
764
765 reg.id = KVM_REG_PPC_VPA_ADDR;
766 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
767 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
768 if (ret < 0) {
769 trace_kvm_failed_vpa_addr_get(strerror(errno));
770 return ret;
771 }
772
773 assert((uintptr_t)&spapr_cpu->slb_shadow_size
774 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
775 reg.id = KVM_REG_PPC_VPA_SLB;
776 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
777 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
778 if (ret < 0) {
779 trace_kvm_failed_slb_get(strerror(errno));
780 return ret;
781 }
782
783 assert((uintptr_t)&spapr_cpu->dtl_size
784 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
785 reg.id = KVM_REG_PPC_VPA_DTL;
786 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
787 ret = kvm_vcpu_ioctl(cs, KVM_GET_ONE_REG, &reg);
788 if (ret < 0) {
789 trace_kvm_failed_dtl_get(strerror(errno));
790 return ret;
791 }
792
793 return 0;
794 }
795
796 static int kvm_put_vpa(CPUState *cs)
797 {
798 PowerPCCPU *cpu = POWERPC_CPU(cs);
799 SpaprCpuState *spapr_cpu = spapr_cpu_state(cpu);
800 struct kvm_one_reg reg;
801 int ret;
802
803 /*
804 * SLB shadow or DTL can't be registered unless a master VPA is
805 * registered. That means when restoring state, if a VPA *is*
806 * registered, we need to set that up first. If not, we need to
807 * deregister the others before deregistering the master VPA
808 */
809 assert(spapr_cpu->vpa_addr
810 || !(spapr_cpu->slb_shadow_addr || spapr_cpu->dtl_addr));
811
812 if (spapr_cpu->vpa_addr) {
813 reg.id = KVM_REG_PPC_VPA_ADDR;
814 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
815 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
816 if (ret < 0) {
817 trace_kvm_failed_vpa_addr_set(strerror(errno));
818 return ret;
819 }
820 }
821
822 assert((uintptr_t)&spapr_cpu->slb_shadow_size
823 == ((uintptr_t)&spapr_cpu->slb_shadow_addr + 8));
824 reg.id = KVM_REG_PPC_VPA_SLB;
825 reg.addr = (uintptr_t)&spapr_cpu->slb_shadow_addr;
826 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
827 if (ret < 0) {
828 trace_kvm_failed_slb_set(strerror(errno));
829 return ret;
830 }
831
832 assert((uintptr_t)&spapr_cpu->dtl_size
833 == ((uintptr_t)&spapr_cpu->dtl_addr + 8));
834 reg.id = KVM_REG_PPC_VPA_DTL;
835 reg.addr = (uintptr_t)&spapr_cpu->dtl_addr;
836 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
837 if (ret < 0) {
838 trace_kvm_failed_dtl_set(strerror(errno));
839 return ret;
840 }
841
842 if (!spapr_cpu->vpa_addr) {
843 reg.id = KVM_REG_PPC_VPA_ADDR;
844 reg.addr = (uintptr_t)&spapr_cpu->vpa_addr;
845 ret = kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
846 if (ret < 0) {
847 trace_kvm_failed_null_vpa_addr_set(strerror(errno));
848 return ret;
849 }
850 }
851
852 return 0;
853 }
854 #endif /* TARGET_PPC64 */
855
856 int kvmppc_put_books_sregs(PowerPCCPU *cpu)
857 {
858 CPUPPCState *env = &cpu->env;
859 struct kvm_sregs sregs = { };
860 int i;
861
862 sregs.pvr = env->spr[SPR_PVR];
863
864 if (cpu->vhyp) {
865 PPCVirtualHypervisorClass *vhc =
866 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
867 sregs.u.s.sdr1 = vhc->encode_hpt_for_kvm_pr(cpu->vhyp);
868 } else {
869 sregs.u.s.sdr1 = env->spr[SPR_SDR1];
870 }
871
872 /* Sync SLB */
873 #ifdef TARGET_PPC64
874 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
875 sregs.u.s.ppc64.slb[i].slbe = env->slb[i].esid;
876 if (env->slb[i].esid & SLB_ESID_V) {
877 sregs.u.s.ppc64.slb[i].slbe |= i;
878 }
879 sregs.u.s.ppc64.slb[i].slbv = env->slb[i].vsid;
880 }
881 #endif
882
883 /* Sync SRs */
884 for (i = 0; i < 16; i++) {
885 sregs.u.s.ppc32.sr[i] = env->sr[i];
886 }
887
888 /* Sync BATs */
889 for (i = 0; i < 8; i++) {
890 /* Beware. We have to swap upper and lower bits here */
891 sregs.u.s.ppc32.dbat[i] = ((uint64_t)env->DBAT[0][i] << 32)
892 | env->DBAT[1][i];
893 sregs.u.s.ppc32.ibat[i] = ((uint64_t)env->IBAT[0][i] << 32)
894 | env->IBAT[1][i];
895 }
896
897 return kvm_vcpu_ioctl(CPU(cpu), KVM_SET_SREGS, &sregs);
898 }
899
900 int kvm_arch_put_registers(CPUState *cs, int level)
901 {
902 PowerPCCPU *cpu = POWERPC_CPU(cs);
903 CPUPPCState *env = &cpu->env;
904 struct kvm_regs regs;
905 int ret;
906 int i;
907
908 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
909 if (ret < 0) {
910 return ret;
911 }
912
913 regs.ctr = env->ctr;
914 regs.lr = env->lr;
915 regs.xer = cpu_read_xer(env);
916 regs.msr = env->msr;
917 regs.pc = env->nip;
918
919 regs.srr0 = env->spr[SPR_SRR0];
920 regs.srr1 = env->spr[SPR_SRR1];
921
922 regs.sprg0 = env->spr[SPR_SPRG0];
923 regs.sprg1 = env->spr[SPR_SPRG1];
924 regs.sprg2 = env->spr[SPR_SPRG2];
925 regs.sprg3 = env->spr[SPR_SPRG3];
926 regs.sprg4 = env->spr[SPR_SPRG4];
927 regs.sprg5 = env->spr[SPR_SPRG5];
928 regs.sprg6 = env->spr[SPR_SPRG6];
929 regs.sprg7 = env->spr[SPR_SPRG7];
930
931 regs.pid = env->spr[SPR_BOOKE_PID];
932
933 for (i = 0; i < 32; i++) {
934 regs.gpr[i] = env->gpr[i];
935 }
936
937 regs.cr = ppc_get_cr(env);
938
939 ret = kvm_vcpu_ioctl(cs, KVM_SET_REGS, &regs);
940 if (ret < 0) {
941 return ret;
942 }
943
944 kvm_put_fp(cs);
945
946 if (env->tlb_dirty) {
947 kvm_sw_tlb_put(cpu);
948 env->tlb_dirty = false;
949 }
950
951 if (cap_segstate && (level >= KVM_PUT_RESET_STATE)) {
952 ret = kvmppc_put_books_sregs(cpu);
953 if (ret < 0) {
954 return ret;
955 }
956 }
957
958 if (cap_hior && (level >= KVM_PUT_RESET_STATE)) {
959 kvm_put_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
960 }
961
962 if (cap_one_reg) {
963 /*
964 * We deliberately ignore errors here, for kernels which have
965 * the ONE_REG calls, but don't support the specific
966 * registers, there's a reasonable chance things will still
967 * work, at least until we try to migrate.
968 */
969 for (i = 0; i < 1024; i++) {
970 uint64_t id = env->spr_cb[i].one_reg_id;
971
972 if (id != 0) {
973 kvm_put_one_spr(cs, id, i);
974 }
975 }
976
977 #ifdef TARGET_PPC64
978 if (FIELD_EX64(env->msr, MSR, TS)) {
979 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
980 kvm_set_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
981 }
982 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
983 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
984 }
985 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
986 kvm_set_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
987 kvm_set_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
988 kvm_set_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
989 kvm_set_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
990 kvm_set_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
991 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
992 kvm_set_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
993 kvm_set_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
994 kvm_set_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
995 }
996
997 if (cap_papr) {
998 if (kvm_put_vpa(cs) < 0) {
999 trace_kvm_failed_put_vpa();
1000 }
1001 }
1002
1003 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1004
1005 if (level > KVM_PUT_RUNTIME_STATE) {
1006 kvm_put_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1007 }
1008 #endif /* TARGET_PPC64 */
1009 }
1010
1011 return ret;
1012 }
1013
1014 static void kvm_sync_excp(CPUPPCState *env, int vector, int ivor)
1015 {
1016 env->excp_vectors[vector] = env->spr[ivor] + env->spr[SPR_BOOKE_IVPR];
1017 }
1018
1019 static int kvmppc_get_booke_sregs(PowerPCCPU *cpu)
1020 {
1021 CPUPPCState *env = &cpu->env;
1022 struct kvm_sregs sregs;
1023 int ret;
1024
1025 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1026 if (ret < 0) {
1027 return ret;
1028 }
1029
1030 if (sregs.u.e.features & KVM_SREGS_E_BASE) {
1031 env->spr[SPR_BOOKE_CSRR0] = sregs.u.e.csrr0;
1032 env->spr[SPR_BOOKE_CSRR1] = sregs.u.e.csrr1;
1033 env->spr[SPR_BOOKE_ESR] = sregs.u.e.esr;
1034 env->spr[SPR_BOOKE_DEAR] = sregs.u.e.dear;
1035 env->spr[SPR_BOOKE_MCSR] = sregs.u.e.mcsr;
1036 env->spr[SPR_BOOKE_TSR] = sregs.u.e.tsr;
1037 env->spr[SPR_BOOKE_TCR] = sregs.u.e.tcr;
1038 env->spr[SPR_DECR] = sregs.u.e.dec;
1039 env->spr[SPR_TBL] = sregs.u.e.tb & 0xffffffff;
1040 env->spr[SPR_TBU] = sregs.u.e.tb >> 32;
1041 env->spr[SPR_VRSAVE] = sregs.u.e.vrsave;
1042 }
1043
1044 if (sregs.u.e.features & KVM_SREGS_E_ARCH206) {
1045 env->spr[SPR_BOOKE_PIR] = sregs.u.e.pir;
1046 env->spr[SPR_BOOKE_MCSRR0] = sregs.u.e.mcsrr0;
1047 env->spr[SPR_BOOKE_MCSRR1] = sregs.u.e.mcsrr1;
1048 env->spr[SPR_BOOKE_DECAR] = sregs.u.e.decar;
1049 env->spr[SPR_BOOKE_IVPR] = sregs.u.e.ivpr;
1050 }
1051
1052 if (sregs.u.e.features & KVM_SREGS_E_64) {
1053 env->spr[SPR_BOOKE_EPCR] = sregs.u.e.epcr;
1054 }
1055
1056 if (sregs.u.e.features & KVM_SREGS_E_SPRG8) {
1057 env->spr[SPR_BOOKE_SPRG8] = sregs.u.e.sprg8;
1058 }
1059
1060 if (sregs.u.e.features & KVM_SREGS_E_IVOR) {
1061 env->spr[SPR_BOOKE_IVOR0] = sregs.u.e.ivor_low[0];
1062 kvm_sync_excp(env, POWERPC_EXCP_CRITICAL, SPR_BOOKE_IVOR0);
1063 env->spr[SPR_BOOKE_IVOR1] = sregs.u.e.ivor_low[1];
1064 kvm_sync_excp(env, POWERPC_EXCP_MCHECK, SPR_BOOKE_IVOR1);
1065 env->spr[SPR_BOOKE_IVOR2] = sregs.u.e.ivor_low[2];
1066 kvm_sync_excp(env, POWERPC_EXCP_DSI, SPR_BOOKE_IVOR2);
1067 env->spr[SPR_BOOKE_IVOR3] = sregs.u.e.ivor_low[3];
1068 kvm_sync_excp(env, POWERPC_EXCP_ISI, SPR_BOOKE_IVOR3);
1069 env->spr[SPR_BOOKE_IVOR4] = sregs.u.e.ivor_low[4];
1070 kvm_sync_excp(env, POWERPC_EXCP_EXTERNAL, SPR_BOOKE_IVOR4);
1071 env->spr[SPR_BOOKE_IVOR5] = sregs.u.e.ivor_low[5];
1072 kvm_sync_excp(env, POWERPC_EXCP_ALIGN, SPR_BOOKE_IVOR5);
1073 env->spr[SPR_BOOKE_IVOR6] = sregs.u.e.ivor_low[6];
1074 kvm_sync_excp(env, POWERPC_EXCP_PROGRAM, SPR_BOOKE_IVOR6);
1075 env->spr[SPR_BOOKE_IVOR7] = sregs.u.e.ivor_low[7];
1076 kvm_sync_excp(env, POWERPC_EXCP_FPU, SPR_BOOKE_IVOR7);
1077 env->spr[SPR_BOOKE_IVOR8] = sregs.u.e.ivor_low[8];
1078 kvm_sync_excp(env, POWERPC_EXCP_SYSCALL, SPR_BOOKE_IVOR8);
1079 env->spr[SPR_BOOKE_IVOR9] = sregs.u.e.ivor_low[9];
1080 kvm_sync_excp(env, POWERPC_EXCP_APU, SPR_BOOKE_IVOR9);
1081 env->spr[SPR_BOOKE_IVOR10] = sregs.u.e.ivor_low[10];
1082 kvm_sync_excp(env, POWERPC_EXCP_DECR, SPR_BOOKE_IVOR10);
1083 env->spr[SPR_BOOKE_IVOR11] = sregs.u.e.ivor_low[11];
1084 kvm_sync_excp(env, POWERPC_EXCP_FIT, SPR_BOOKE_IVOR11);
1085 env->spr[SPR_BOOKE_IVOR12] = sregs.u.e.ivor_low[12];
1086 kvm_sync_excp(env, POWERPC_EXCP_WDT, SPR_BOOKE_IVOR12);
1087 env->spr[SPR_BOOKE_IVOR13] = sregs.u.e.ivor_low[13];
1088 kvm_sync_excp(env, POWERPC_EXCP_DTLB, SPR_BOOKE_IVOR13);
1089 env->spr[SPR_BOOKE_IVOR14] = sregs.u.e.ivor_low[14];
1090 kvm_sync_excp(env, POWERPC_EXCP_ITLB, SPR_BOOKE_IVOR14);
1091 env->spr[SPR_BOOKE_IVOR15] = sregs.u.e.ivor_low[15];
1092 kvm_sync_excp(env, POWERPC_EXCP_DEBUG, SPR_BOOKE_IVOR15);
1093
1094 if (sregs.u.e.features & KVM_SREGS_E_SPE) {
1095 env->spr[SPR_BOOKE_IVOR32] = sregs.u.e.ivor_high[0];
1096 kvm_sync_excp(env, POWERPC_EXCP_SPEU, SPR_BOOKE_IVOR32);
1097 env->spr[SPR_BOOKE_IVOR33] = sregs.u.e.ivor_high[1];
1098 kvm_sync_excp(env, POWERPC_EXCP_EFPDI, SPR_BOOKE_IVOR33);
1099 env->spr[SPR_BOOKE_IVOR34] = sregs.u.e.ivor_high[2];
1100 kvm_sync_excp(env, POWERPC_EXCP_EFPRI, SPR_BOOKE_IVOR34);
1101 }
1102
1103 if (sregs.u.e.features & KVM_SREGS_E_PM) {
1104 env->spr[SPR_BOOKE_IVOR35] = sregs.u.e.ivor_high[3];
1105 kvm_sync_excp(env, POWERPC_EXCP_EPERFM, SPR_BOOKE_IVOR35);
1106 }
1107
1108 if (sregs.u.e.features & KVM_SREGS_E_PC) {
1109 env->spr[SPR_BOOKE_IVOR36] = sregs.u.e.ivor_high[4];
1110 kvm_sync_excp(env, POWERPC_EXCP_DOORI, SPR_BOOKE_IVOR36);
1111 env->spr[SPR_BOOKE_IVOR37] = sregs.u.e.ivor_high[5];
1112 kvm_sync_excp(env, POWERPC_EXCP_DOORCI, SPR_BOOKE_IVOR37);
1113 }
1114 }
1115
1116 if (sregs.u.e.features & KVM_SREGS_E_ARCH206_MMU) {
1117 env->spr[SPR_BOOKE_MAS0] = sregs.u.e.mas0;
1118 env->spr[SPR_BOOKE_MAS1] = sregs.u.e.mas1;
1119 env->spr[SPR_BOOKE_MAS2] = sregs.u.e.mas2;
1120 env->spr[SPR_BOOKE_MAS3] = sregs.u.e.mas7_3 & 0xffffffff;
1121 env->spr[SPR_BOOKE_MAS4] = sregs.u.e.mas4;
1122 env->spr[SPR_BOOKE_MAS6] = sregs.u.e.mas6;
1123 env->spr[SPR_BOOKE_MAS7] = sregs.u.e.mas7_3 >> 32;
1124 env->spr[SPR_MMUCFG] = sregs.u.e.mmucfg;
1125 env->spr[SPR_BOOKE_TLB0CFG] = sregs.u.e.tlbcfg[0];
1126 env->spr[SPR_BOOKE_TLB1CFG] = sregs.u.e.tlbcfg[1];
1127 }
1128
1129 if (sregs.u.e.features & KVM_SREGS_EXP) {
1130 env->spr[SPR_BOOKE_EPR] = sregs.u.e.epr;
1131 }
1132
1133 if (sregs.u.e.features & KVM_SREGS_E_PD) {
1134 env->spr[SPR_BOOKE_EPLC] = sregs.u.e.eplc;
1135 env->spr[SPR_BOOKE_EPSC] = sregs.u.e.epsc;
1136 }
1137
1138 if (sregs.u.e.impl_id == KVM_SREGS_E_IMPL_FSL) {
1139 env->spr[SPR_E500_SVR] = sregs.u.e.impl.fsl.svr;
1140 env->spr[SPR_Exxx_MCAR] = sregs.u.e.impl.fsl.mcar;
1141 env->spr[SPR_HID0] = sregs.u.e.impl.fsl.hid0;
1142
1143 if (sregs.u.e.impl.fsl.features & KVM_SREGS_E_FSL_PIDn) {
1144 env->spr[SPR_BOOKE_PID1] = sregs.u.e.impl.fsl.pid1;
1145 env->spr[SPR_BOOKE_PID2] = sregs.u.e.impl.fsl.pid2;
1146 }
1147 }
1148
1149 return 0;
1150 }
1151
1152 static int kvmppc_get_books_sregs(PowerPCCPU *cpu)
1153 {
1154 CPUPPCState *env = &cpu->env;
1155 struct kvm_sregs sregs;
1156 int ret;
1157 int i;
1158
1159 ret = kvm_vcpu_ioctl(CPU(cpu), KVM_GET_SREGS, &sregs);
1160 if (ret < 0) {
1161 return ret;
1162 }
1163
1164 if (!cpu->vhyp) {
1165 ppc_store_sdr1(env, sregs.u.s.sdr1);
1166 }
1167
1168 /* Sync SLB */
1169 #ifdef TARGET_PPC64
1170 /*
1171 * The packed SLB array we get from KVM_GET_SREGS only contains
1172 * information about valid entries. So we flush our internal copy
1173 * to get rid of stale ones, then put all valid SLB entries back
1174 * in.
1175 */
1176 memset(env->slb, 0, sizeof(env->slb));
1177 for (i = 0; i < ARRAY_SIZE(env->slb); i++) {
1178 target_ulong rb = sregs.u.s.ppc64.slb[i].slbe;
1179 target_ulong rs = sregs.u.s.ppc64.slb[i].slbv;
1180 /*
1181 * Only restore valid entries
1182 */
1183 if (rb & SLB_ESID_V) {
1184 ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs);
1185 }
1186 }
1187 #endif
1188
1189 /* Sync SRs */
1190 for (i = 0; i < 16; i++) {
1191 env->sr[i] = sregs.u.s.ppc32.sr[i];
1192 }
1193
1194 /* Sync BATs */
1195 for (i = 0; i < 8; i++) {
1196 env->DBAT[0][i] = sregs.u.s.ppc32.dbat[i] & 0xffffffff;
1197 env->DBAT[1][i] = sregs.u.s.ppc32.dbat[i] >> 32;
1198 env->IBAT[0][i] = sregs.u.s.ppc32.ibat[i] & 0xffffffff;
1199 env->IBAT[1][i] = sregs.u.s.ppc32.ibat[i] >> 32;
1200 }
1201
1202 return 0;
1203 }
1204
1205 int kvm_arch_get_registers(CPUState *cs)
1206 {
1207 PowerPCCPU *cpu = POWERPC_CPU(cs);
1208 CPUPPCState *env = &cpu->env;
1209 struct kvm_regs regs;
1210 int i, ret;
1211
1212 ret = kvm_vcpu_ioctl(cs, KVM_GET_REGS, &regs);
1213 if (ret < 0) {
1214 return ret;
1215 }
1216
1217 ppc_set_cr(env, regs.cr);
1218 env->ctr = regs.ctr;
1219 env->lr = regs.lr;
1220 cpu_write_xer(env, regs.xer);
1221 env->msr = regs.msr;
1222 env->nip = regs.pc;
1223
1224 env->spr[SPR_SRR0] = regs.srr0;
1225 env->spr[SPR_SRR1] = regs.srr1;
1226
1227 env->spr[SPR_SPRG0] = regs.sprg0;
1228 env->spr[SPR_SPRG1] = regs.sprg1;
1229 env->spr[SPR_SPRG2] = regs.sprg2;
1230 env->spr[SPR_SPRG3] = regs.sprg3;
1231 env->spr[SPR_SPRG4] = regs.sprg4;
1232 env->spr[SPR_SPRG5] = regs.sprg5;
1233 env->spr[SPR_SPRG6] = regs.sprg6;
1234 env->spr[SPR_SPRG7] = regs.sprg7;
1235
1236 env->spr[SPR_BOOKE_PID] = regs.pid;
1237
1238 for (i = 0; i < 32; i++) {
1239 env->gpr[i] = regs.gpr[i];
1240 }
1241
1242 kvm_get_fp(cs);
1243
1244 if (cap_booke_sregs) {
1245 ret = kvmppc_get_booke_sregs(cpu);
1246 if (ret < 0) {
1247 return ret;
1248 }
1249 }
1250
1251 if (cap_segstate) {
1252 ret = kvmppc_get_books_sregs(cpu);
1253 if (ret < 0) {
1254 return ret;
1255 }
1256 }
1257
1258 if (cap_hior) {
1259 kvm_get_one_spr(cs, KVM_REG_PPC_HIOR, SPR_HIOR);
1260 }
1261
1262 if (cap_one_reg) {
1263 /*
1264 * We deliberately ignore errors here, for kernels which have
1265 * the ONE_REG calls, but don't support the specific
1266 * registers, there's a reasonable chance things will still
1267 * work, at least until we try to migrate.
1268 */
1269 for (i = 0; i < 1024; i++) {
1270 uint64_t id = env->spr_cb[i].one_reg_id;
1271
1272 if (id != 0) {
1273 kvm_get_one_spr(cs, id, i);
1274 }
1275 }
1276
1277 #ifdef TARGET_PPC64
1278 if (FIELD_EX64(env->msr, MSR, TS)) {
1279 for (i = 0; i < ARRAY_SIZE(env->tm_gpr); i++) {
1280 kvm_get_one_reg(cs, KVM_REG_PPC_TM_GPR(i), &env->tm_gpr[i]);
1281 }
1282 for (i = 0; i < ARRAY_SIZE(env->tm_vsr); i++) {
1283 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSR(i), &env->tm_vsr[i]);
1284 }
1285 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CR, &env->tm_cr);
1286 kvm_get_one_reg(cs, KVM_REG_PPC_TM_LR, &env->tm_lr);
1287 kvm_get_one_reg(cs, KVM_REG_PPC_TM_CTR, &env->tm_ctr);
1288 kvm_get_one_reg(cs, KVM_REG_PPC_TM_FPSCR, &env->tm_fpscr);
1289 kvm_get_one_reg(cs, KVM_REG_PPC_TM_AMR, &env->tm_amr);
1290 kvm_get_one_reg(cs, KVM_REG_PPC_TM_PPR, &env->tm_ppr);
1291 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VRSAVE, &env->tm_vrsave);
1292 kvm_get_one_reg(cs, KVM_REG_PPC_TM_VSCR, &env->tm_vscr);
1293 kvm_get_one_reg(cs, KVM_REG_PPC_TM_DSCR, &env->tm_dscr);
1294 kvm_get_one_reg(cs, KVM_REG_PPC_TM_TAR, &env->tm_tar);
1295 }
1296
1297 if (cap_papr) {
1298 if (kvm_get_vpa(cs) < 0) {
1299 trace_kvm_failed_get_vpa();
1300 }
1301 }
1302
1303 kvm_get_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &env->tb_env->tb_offset);
1304 kvm_get_one_spr(cs, KVM_REG_PPC_DPDES, SPR_DPDES);
1305 #endif
1306 }
1307
1308 return 0;
1309 }
1310
1311 int kvmppc_set_interrupt(PowerPCCPU *cpu, int irq, int level)
1312 {
1313 unsigned virq = level ? KVM_INTERRUPT_SET_LEVEL : KVM_INTERRUPT_UNSET;
1314
1315 if (irq != PPC_INTERRUPT_EXT) {
1316 return 0;
1317 }
1318
1319 if (!cap_interrupt_unset) {
1320 return 0;
1321 }
1322
1323 kvm_vcpu_ioctl(CPU(cpu), KVM_INTERRUPT, &virq);
1324
1325 return 0;
1326 }
1327
1328 void kvm_arch_pre_run(CPUState *cs, struct kvm_run *run)
1329 {
1330 return;
1331 }
1332
1333 MemTxAttrs kvm_arch_post_run(CPUState *cs, struct kvm_run *run)
1334 {
1335 return MEMTXATTRS_UNSPECIFIED;
1336 }
1337
1338 int kvm_arch_process_async_events(CPUState *cs)
1339 {
1340 return cs->halted;
1341 }
1342
1343 static int kvmppc_handle_halt(PowerPCCPU *cpu)
1344 {
1345 CPUState *cs = CPU(cpu);
1346 CPUPPCState *env = &cpu->env;
1347
1348 if (!(cs->interrupt_request & CPU_INTERRUPT_HARD) &&
1349 FIELD_EX64(env->msr, MSR, EE)) {
1350 cs->halted = 1;
1351 cs->exception_index = EXCP_HLT;
1352 }
1353
1354 return 0;
1355 }
1356
1357 /* map dcr access to existing qemu dcr emulation */
1358 static int kvmppc_handle_dcr_read(CPUPPCState *env,
1359 uint32_t dcrn, uint32_t *data)
1360 {
1361 if (ppc_dcr_read(env->dcr_env, dcrn, data) < 0) {
1362 fprintf(stderr, "Read to unhandled DCR (0x%x)\n", dcrn);
1363 }
1364
1365 return 0;
1366 }
1367
1368 static int kvmppc_handle_dcr_write(CPUPPCState *env,
1369 uint32_t dcrn, uint32_t data)
1370 {
1371 if (ppc_dcr_write(env->dcr_env, dcrn, data) < 0) {
1372 fprintf(stderr, "Write to unhandled DCR (0x%x)\n", dcrn);
1373 }
1374
1375 return 0;
1376 }
1377
1378 int kvm_arch_insert_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1379 {
1380 /* Mixed endian case is not handled */
1381 uint32_t sc = debug_inst_opcode;
1382
1383 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1384 sizeof(sc), 0) ||
1385 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 1)) {
1386 return -EINVAL;
1387 }
1388
1389 return 0;
1390 }
1391
1392 int kvm_arch_remove_sw_breakpoint(CPUState *cs, struct kvm_sw_breakpoint *bp)
1393 {
1394 uint32_t sc;
1395
1396 if (cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&sc, sizeof(sc), 0) ||
1397 sc != debug_inst_opcode ||
1398 cpu_memory_rw_debug(cs, bp->pc, (uint8_t *)&bp->saved_insn,
1399 sizeof(sc), 1)) {
1400 return -EINVAL;
1401 }
1402
1403 return 0;
1404 }
1405
1406 static int find_hw_breakpoint(target_ulong addr, int type)
1407 {
1408 int n;
1409
1410 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1411 <= ARRAY_SIZE(hw_debug_points));
1412
1413 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1414 if (hw_debug_points[n].addr == addr &&
1415 hw_debug_points[n].type == type) {
1416 return n;
1417 }
1418 }
1419
1420 return -1;
1421 }
1422
1423 static int find_hw_watchpoint(target_ulong addr, int *flag)
1424 {
1425 int n;
1426
1427 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_ACCESS);
1428 if (n >= 0) {
1429 *flag = BP_MEM_ACCESS;
1430 return n;
1431 }
1432
1433 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_WRITE);
1434 if (n >= 0) {
1435 *flag = BP_MEM_WRITE;
1436 return n;
1437 }
1438
1439 n = find_hw_breakpoint(addr, GDB_WATCHPOINT_READ);
1440 if (n >= 0) {
1441 *flag = BP_MEM_READ;
1442 return n;
1443 }
1444
1445 return -1;
1446 }
1447
1448 int kvm_arch_insert_hw_breakpoint(vaddr addr, vaddr len, int type)
1449 {
1450 const unsigned breakpoint_index = nb_hw_breakpoint + nb_hw_watchpoint;
1451 if (breakpoint_index >= ARRAY_SIZE(hw_debug_points)) {
1452 return -ENOBUFS;
1453 }
1454
1455 hw_debug_points[breakpoint_index].addr = addr;
1456 hw_debug_points[breakpoint_index].type = type;
1457
1458 switch (type) {
1459 case GDB_BREAKPOINT_HW:
1460 if (nb_hw_breakpoint >= max_hw_breakpoint) {
1461 return -ENOBUFS;
1462 }
1463
1464 if (find_hw_breakpoint(addr, type) >= 0) {
1465 return -EEXIST;
1466 }
1467
1468 nb_hw_breakpoint++;
1469 break;
1470
1471 case GDB_WATCHPOINT_WRITE:
1472 case GDB_WATCHPOINT_READ:
1473 case GDB_WATCHPOINT_ACCESS:
1474 if (nb_hw_watchpoint >= max_hw_watchpoint) {
1475 return -ENOBUFS;
1476 }
1477
1478 if (find_hw_breakpoint(addr, type) >= 0) {
1479 return -EEXIST;
1480 }
1481
1482 nb_hw_watchpoint++;
1483 break;
1484
1485 default:
1486 return -ENOSYS;
1487 }
1488
1489 return 0;
1490 }
1491
1492 int kvm_arch_remove_hw_breakpoint(vaddr addr, vaddr len, int type)
1493 {
1494 int n;
1495
1496 n = find_hw_breakpoint(addr, type);
1497 if (n < 0) {
1498 return -ENOENT;
1499 }
1500
1501 switch (type) {
1502 case GDB_BREAKPOINT_HW:
1503 nb_hw_breakpoint--;
1504 break;
1505
1506 case GDB_WATCHPOINT_WRITE:
1507 case GDB_WATCHPOINT_READ:
1508 case GDB_WATCHPOINT_ACCESS:
1509 nb_hw_watchpoint--;
1510 break;
1511
1512 default:
1513 return -ENOSYS;
1514 }
1515 hw_debug_points[n] = hw_debug_points[nb_hw_breakpoint + nb_hw_watchpoint];
1516
1517 return 0;
1518 }
1519
1520 void kvm_arch_remove_all_hw_breakpoints(void)
1521 {
1522 nb_hw_breakpoint = nb_hw_watchpoint = 0;
1523 }
1524
1525 void kvm_arch_update_guest_debug(CPUState *cs, struct kvm_guest_debug *dbg)
1526 {
1527 int n;
1528
1529 /* Software Breakpoint updates */
1530 if (kvm_sw_breakpoints_active(cs)) {
1531 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_SW_BP;
1532 }
1533
1534 assert((nb_hw_breakpoint + nb_hw_watchpoint)
1535 <= ARRAY_SIZE(hw_debug_points));
1536 assert((nb_hw_breakpoint + nb_hw_watchpoint) <= ARRAY_SIZE(dbg->arch.bp));
1537
1538 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1539 dbg->control |= KVM_GUESTDBG_ENABLE | KVM_GUESTDBG_USE_HW_BP;
1540 memset(dbg->arch.bp, 0, sizeof(dbg->arch.bp));
1541 for (n = 0; n < nb_hw_breakpoint + nb_hw_watchpoint; n++) {
1542 switch (hw_debug_points[n].type) {
1543 case GDB_BREAKPOINT_HW:
1544 dbg->arch.bp[n].type = KVMPPC_DEBUG_BREAKPOINT;
1545 break;
1546 case GDB_WATCHPOINT_WRITE:
1547 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE;
1548 break;
1549 case GDB_WATCHPOINT_READ:
1550 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_READ;
1551 break;
1552 case GDB_WATCHPOINT_ACCESS:
1553 dbg->arch.bp[n].type = KVMPPC_DEBUG_WATCH_WRITE |
1554 KVMPPC_DEBUG_WATCH_READ;
1555 break;
1556 default:
1557 cpu_abort(cs, "Unsupported breakpoint type\n");
1558 }
1559 dbg->arch.bp[n].addr = hw_debug_points[n].addr;
1560 }
1561 }
1562 }
1563
1564 static int kvm_handle_hw_breakpoint(CPUState *cs,
1565 struct kvm_debug_exit_arch *arch_info)
1566 {
1567 int handle = DEBUG_RETURN_GUEST;
1568 int n;
1569 int flag = 0;
1570
1571 if (nb_hw_breakpoint + nb_hw_watchpoint > 0) {
1572 if (arch_info->status & KVMPPC_DEBUG_BREAKPOINT) {
1573 n = find_hw_breakpoint(arch_info->address, GDB_BREAKPOINT_HW);
1574 if (n >= 0) {
1575 handle = DEBUG_RETURN_GDB;
1576 }
1577 } else if (arch_info->status & (KVMPPC_DEBUG_WATCH_READ |
1578 KVMPPC_DEBUG_WATCH_WRITE)) {
1579 n = find_hw_watchpoint(arch_info->address, &flag);
1580 if (n >= 0) {
1581 handle = DEBUG_RETURN_GDB;
1582 cs->watchpoint_hit = &hw_watchpoint;
1583 hw_watchpoint.vaddr = hw_debug_points[n].addr;
1584 hw_watchpoint.flags = flag;
1585 }
1586 }
1587 }
1588 return handle;
1589 }
1590
1591 static int kvm_handle_singlestep(void)
1592 {
1593 return DEBUG_RETURN_GDB;
1594 }
1595
1596 static int kvm_handle_sw_breakpoint(void)
1597 {
1598 return DEBUG_RETURN_GDB;
1599 }
1600
1601 static int kvm_handle_debug(PowerPCCPU *cpu, struct kvm_run *run)
1602 {
1603 CPUState *cs = CPU(cpu);
1604 CPUPPCState *env = &cpu->env;
1605 struct kvm_debug_exit_arch *arch_info = &run->debug.arch;
1606
1607 if (cs->singlestep_enabled) {
1608 return kvm_handle_singlestep();
1609 }
1610
1611 if (arch_info->status) {
1612 return kvm_handle_hw_breakpoint(cs, arch_info);
1613 }
1614
1615 if (kvm_find_sw_breakpoint(cs, arch_info->address)) {
1616 return kvm_handle_sw_breakpoint();
1617 }
1618
1619 /*
1620 * QEMU is not able to handle debug exception, so inject
1621 * program exception to guest;
1622 * Yes program exception NOT debug exception !!
1623 * When QEMU is using debug resources then debug exception must
1624 * be always set. To achieve this we set MSR_DE and also set
1625 * MSRP_DEP so guest cannot change MSR_DE.
1626 * When emulating debug resource for guest we want guest
1627 * to control MSR_DE (enable/disable debug interrupt on need).
1628 * Supporting both configurations are NOT possible.
1629 * So the result is that we cannot share debug resources
1630 * between QEMU and Guest on BOOKE architecture.
1631 * In the current design QEMU gets the priority over guest,
1632 * this means that if QEMU is using debug resources then guest
1633 * cannot use them;
1634 * For software breakpoint QEMU uses a privileged instruction;
1635 * So there cannot be any reason that we are here for guest
1636 * set debug exception, only possibility is guest executed a
1637 * privileged / illegal instruction and that's why we are
1638 * injecting a program interrupt.
1639 */
1640 cpu_synchronize_state(cs);
1641 /*
1642 * env->nip is PC, so increment this by 4 to use
1643 * ppc_cpu_do_interrupt(), which set srr0 = env->nip - 4.
1644 */
1645 env->nip += 4;
1646 cs->exception_index = POWERPC_EXCP_PROGRAM;
1647 env->error_code = POWERPC_EXCP_INVAL;
1648 ppc_cpu_do_interrupt(cs);
1649
1650 return DEBUG_RETURN_GUEST;
1651 }
1652
1653 int kvm_arch_handle_exit(CPUState *cs, struct kvm_run *run)
1654 {
1655 PowerPCCPU *cpu = POWERPC_CPU(cs);
1656 CPUPPCState *env = &cpu->env;
1657 int ret;
1658
1659 qemu_mutex_lock_iothread();
1660
1661 switch (run->exit_reason) {
1662 case KVM_EXIT_DCR:
1663 if (run->dcr.is_write) {
1664 trace_kvm_handle_dcr_write();
1665 ret = kvmppc_handle_dcr_write(env, run->dcr.dcrn, run->dcr.data);
1666 } else {
1667 trace_kvm_handle_dcr_read();
1668 ret = kvmppc_handle_dcr_read(env, run->dcr.dcrn, &run->dcr.data);
1669 }
1670 break;
1671 case KVM_EXIT_HLT:
1672 trace_kvm_handle_halt();
1673 ret = kvmppc_handle_halt(cpu);
1674 break;
1675 #if defined(TARGET_PPC64)
1676 case KVM_EXIT_PAPR_HCALL:
1677 trace_kvm_handle_papr_hcall(run->papr_hcall.nr);
1678 run->papr_hcall.ret = spapr_hypercall(cpu,
1679 run->papr_hcall.nr,
1680 run->papr_hcall.args);
1681 ret = 0;
1682 break;
1683 #endif
1684 case KVM_EXIT_EPR:
1685 trace_kvm_handle_epr();
1686 run->epr.epr = ldl_phys(cs->as, env->mpic_iack);
1687 ret = 0;
1688 break;
1689 case KVM_EXIT_WATCHDOG:
1690 trace_kvm_handle_watchdog_expiry();
1691 watchdog_perform_action();
1692 ret = 0;
1693 break;
1694
1695 case KVM_EXIT_DEBUG:
1696 trace_kvm_handle_debug_exception();
1697 if (kvm_handle_debug(cpu, run)) {
1698 ret = EXCP_DEBUG;
1699 break;
1700 }
1701 /* re-enter, this exception was guest-internal */
1702 ret = 0;
1703 break;
1704
1705 #if defined(TARGET_PPC64)
1706 case KVM_EXIT_NMI:
1707 trace_kvm_handle_nmi_exception();
1708 ret = kvm_handle_nmi(cpu, run);
1709 break;
1710 #endif
1711
1712 default:
1713 fprintf(stderr, "KVM: unknown exit reason %d\n", run->exit_reason);
1714 ret = -1;
1715 break;
1716 }
1717
1718 qemu_mutex_unlock_iothread();
1719 return ret;
1720 }
1721
1722 int kvmppc_or_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1723 {
1724 CPUState *cs = CPU(cpu);
1725 uint32_t bits = tsr_bits;
1726 struct kvm_one_reg reg = {
1727 .id = KVM_REG_PPC_OR_TSR,
1728 .addr = (uintptr_t) &bits,
1729 };
1730
1731 if (!kvm_enabled()) {
1732 return 0;
1733 }
1734
1735 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1736 }
1737
1738 int kvmppc_clear_tsr_bits(PowerPCCPU *cpu, uint32_t tsr_bits)
1739 {
1740
1741 CPUState *cs = CPU(cpu);
1742 uint32_t bits = tsr_bits;
1743 struct kvm_one_reg reg = {
1744 .id = KVM_REG_PPC_CLEAR_TSR,
1745 .addr = (uintptr_t) &bits,
1746 };
1747
1748 if (!kvm_enabled()) {
1749 return 0;
1750 }
1751
1752 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1753 }
1754
1755 int kvmppc_set_tcr(PowerPCCPU *cpu)
1756 {
1757 CPUState *cs = CPU(cpu);
1758 CPUPPCState *env = &cpu->env;
1759 uint32_t tcr = env->spr[SPR_BOOKE_TCR];
1760
1761 struct kvm_one_reg reg = {
1762 .id = KVM_REG_PPC_TCR,
1763 .addr = (uintptr_t) &tcr,
1764 };
1765
1766 if (!kvm_enabled()) {
1767 return 0;
1768 }
1769
1770 return kvm_vcpu_ioctl(cs, KVM_SET_ONE_REG, &reg);
1771 }
1772
1773 int kvmppc_booke_watchdog_enable(PowerPCCPU *cpu)
1774 {
1775 CPUState *cs = CPU(cpu);
1776 int ret;
1777
1778 if (!kvm_enabled()) {
1779 return -1;
1780 }
1781
1782 if (!cap_ppc_watchdog) {
1783 printf("warning: KVM does not support watchdog");
1784 return -1;
1785 }
1786
1787 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_BOOKE_WATCHDOG, 0);
1788 if (ret < 0) {
1789 fprintf(stderr, "%s: couldn't enable KVM_CAP_PPC_BOOKE_WATCHDOG: %s\n",
1790 __func__, strerror(-ret));
1791 return ret;
1792 }
1793
1794 return ret;
1795 }
1796
1797 static int read_cpuinfo(const char *field, char *value, int len)
1798 {
1799 FILE *f;
1800 int ret = -1;
1801 int field_len = strlen(field);
1802 char line[512];
1803
1804 f = fopen("/proc/cpuinfo", "r");
1805 if (!f) {
1806 return -1;
1807 }
1808
1809 do {
1810 if (!fgets(line, sizeof(line), f)) {
1811 break;
1812 }
1813 if (!strncmp(line, field, field_len)) {
1814 pstrcpy(value, len, line);
1815 ret = 0;
1816 break;
1817 }
1818 } while (*line);
1819
1820 fclose(f);
1821
1822 return ret;
1823 }
1824
1825 static uint32_t kvmppc_get_tbfreq_procfs(void)
1826 {
1827 char line[512];
1828 char *ns;
1829 uint32_t tbfreq_fallback = NANOSECONDS_PER_SECOND;
1830 uint32_t tbfreq_procfs;
1831
1832 if (read_cpuinfo("timebase", line, sizeof(line))) {
1833 return tbfreq_fallback;
1834 }
1835
1836 ns = strchr(line, ':');
1837 if (!ns) {
1838 return tbfreq_fallback;
1839 }
1840
1841 tbfreq_procfs = atoi(++ns);
1842
1843 /* 0 is certainly not acceptable by the guest, return fallback value */
1844 return tbfreq_procfs ? tbfreq_procfs : tbfreq_fallback;
1845 }
1846
1847 uint32_t kvmppc_get_tbfreq(void)
1848 {
1849 static uint32_t cached_tbfreq;
1850
1851 if (!cached_tbfreq) {
1852 cached_tbfreq = kvmppc_get_tbfreq_procfs();
1853 }
1854
1855 return cached_tbfreq;
1856 }
1857
1858 bool kvmppc_get_host_serial(char **value)
1859 {
1860 return g_file_get_contents("/proc/device-tree/system-id", value, NULL,
1861 NULL);
1862 }
1863
1864 bool kvmppc_get_host_model(char **value)
1865 {
1866 return g_file_get_contents("/proc/device-tree/model", value, NULL, NULL);
1867 }
1868
1869 /* Try to find a device tree node for a CPU with clock-frequency property */
1870 static int kvmppc_find_cpu_dt(char *buf, int buf_len)
1871 {
1872 struct dirent *dirp;
1873 DIR *dp;
1874
1875 dp = opendir(PROC_DEVTREE_CPU);
1876 if (!dp) {
1877 printf("Can't open directory " PROC_DEVTREE_CPU "\n");
1878 return -1;
1879 }
1880
1881 buf[0] = '\0';
1882 while ((dirp = readdir(dp)) != NULL) {
1883 FILE *f;
1884
1885 /* Don't accidentally read from the current and parent directories */
1886 if (strcmp(dirp->d_name, ".") == 0 || strcmp(dirp->d_name, "..") == 0) {
1887 continue;
1888 }
1889
1890 snprintf(buf, buf_len, "%s%s/clock-frequency", PROC_DEVTREE_CPU,
1891 dirp->d_name);
1892 f = fopen(buf, "r");
1893 if (f) {
1894 snprintf(buf, buf_len, "%s%s", PROC_DEVTREE_CPU, dirp->d_name);
1895 fclose(f);
1896 break;
1897 }
1898 buf[0] = '\0';
1899 }
1900 closedir(dp);
1901 if (buf[0] == '\0') {
1902 printf("Unknown host!\n");
1903 return -1;
1904 }
1905
1906 return 0;
1907 }
1908
1909 static uint64_t kvmppc_read_int_dt(const char *filename)
1910 {
1911 union {
1912 uint32_t v32;
1913 uint64_t v64;
1914 } u;
1915 FILE *f;
1916 int len;
1917
1918 f = fopen(filename, "rb");
1919 if (!f) {
1920 return -1;
1921 }
1922
1923 len = fread(&u, 1, sizeof(u), f);
1924 fclose(f);
1925 switch (len) {
1926 case 4:
1927 /* property is a 32-bit quantity */
1928 return be32_to_cpu(u.v32);
1929 case 8:
1930 return be64_to_cpu(u.v64);
1931 }
1932
1933 return 0;
1934 }
1935
1936 /*
1937 * Read a CPU node property from the host device tree that's a single
1938 * integer (32-bit or 64-bit). Returns 0 if anything goes wrong
1939 * (can't find or open the property, or doesn't understand the format)
1940 */
1941 static uint64_t kvmppc_read_int_cpu_dt(const char *propname)
1942 {
1943 char buf[PATH_MAX], *tmp;
1944 uint64_t val;
1945
1946 if (kvmppc_find_cpu_dt(buf, sizeof(buf))) {
1947 return -1;
1948 }
1949
1950 tmp = g_strdup_printf("%s/%s", buf, propname);
1951 val = kvmppc_read_int_dt(tmp);
1952 g_free(tmp);
1953
1954 return val;
1955 }
1956
1957 uint64_t kvmppc_get_clockfreq(void)
1958 {
1959 return kvmppc_read_int_cpu_dt("clock-frequency");
1960 }
1961
1962 static int kvmppc_get_dec_bits(void)
1963 {
1964 int nr_bits = kvmppc_read_int_cpu_dt("ibm,dec-bits");
1965
1966 if (nr_bits > 0) {
1967 return nr_bits;
1968 }
1969 return 0;
1970 }
1971
1972 static int kvmppc_get_pvinfo(CPUPPCState *env, struct kvm_ppc_pvinfo *pvinfo)
1973 {
1974 CPUState *cs = env_cpu(env);
1975
1976 if (kvm_vm_check_extension(cs->kvm_state, KVM_CAP_PPC_GET_PVINFO) &&
1977 !kvm_vm_ioctl(cs->kvm_state, KVM_PPC_GET_PVINFO, pvinfo)) {
1978 return 0;
1979 }
1980
1981 return 1;
1982 }
1983
1984 int kvmppc_get_hasidle(CPUPPCState *env)
1985 {
1986 struct kvm_ppc_pvinfo pvinfo;
1987
1988 if (!kvmppc_get_pvinfo(env, &pvinfo) &&
1989 (pvinfo.flags & KVM_PPC_PVINFO_FLAGS_EV_IDLE)) {
1990 return 1;
1991 }
1992
1993 return 0;
1994 }
1995
1996 int kvmppc_get_hypercall(CPUPPCState *env, uint8_t *buf, int buf_len)
1997 {
1998 uint32_t *hc = (uint32_t *)buf;
1999 struct kvm_ppc_pvinfo pvinfo;
2000
2001 if (!kvmppc_get_pvinfo(env, &pvinfo)) {
2002 memcpy(buf, pvinfo.hcall, buf_len);
2003 return 0;
2004 }
2005
2006 /*
2007 * Fallback to always fail hypercalls regardless of endianness:
2008 *
2009 * tdi 0,r0,72 (becomes b .+8 in wrong endian, nop in good endian)
2010 * li r3, -1
2011 * b .+8 (becomes nop in wrong endian)
2012 * bswap32(li r3, -1)
2013 */
2014
2015 hc[0] = cpu_to_be32(0x08000048);
2016 hc[1] = cpu_to_be32(0x3860ffff);
2017 hc[2] = cpu_to_be32(0x48000008);
2018 hc[3] = cpu_to_be32(bswap32(0x3860ffff));
2019
2020 return 1;
2021 }
2022
2023 static inline int kvmppc_enable_hcall(KVMState *s, target_ulong hcall)
2024 {
2025 return kvm_vm_enable_cap(s, KVM_CAP_PPC_ENABLE_HCALL, 0, hcall, 1);
2026 }
2027
2028 void kvmppc_enable_logical_ci_hcalls(void)
2029 {
2030 /*
2031 * FIXME: it would be nice if we could detect the cases where
2032 * we're using a device which requires the in kernel
2033 * implementation of these hcalls, but the kernel lacks them and
2034 * produce a warning.
2035 */
2036 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_LOAD);
2037 kvmppc_enable_hcall(kvm_state, H_LOGICAL_CI_STORE);
2038 }
2039
2040 void kvmppc_enable_set_mode_hcall(void)
2041 {
2042 kvmppc_enable_hcall(kvm_state, H_SET_MODE);
2043 }
2044
2045 void kvmppc_enable_clear_ref_mod_hcalls(void)
2046 {
2047 kvmppc_enable_hcall(kvm_state, H_CLEAR_REF);
2048 kvmppc_enable_hcall(kvm_state, H_CLEAR_MOD);
2049 }
2050
2051 void kvmppc_enable_h_page_init(void)
2052 {
2053 kvmppc_enable_hcall(kvm_state, H_PAGE_INIT);
2054 }
2055
2056 void kvmppc_enable_h_rpt_invalidate(void)
2057 {
2058 kvmppc_enable_hcall(kvm_state, H_RPT_INVALIDATE);
2059 }
2060
2061 void kvmppc_set_papr(PowerPCCPU *cpu)
2062 {
2063 CPUState *cs = CPU(cpu);
2064 int ret;
2065
2066 if (!kvm_enabled()) {
2067 return;
2068 }
2069
2070 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_PAPR, 0);
2071 if (ret) {
2072 error_report("This vCPU type or KVM version does not support PAPR");
2073 exit(1);
2074 }
2075
2076 /*
2077 * Update the capability flag so we sync the right information
2078 * with kvm
2079 */
2080 cap_papr = 1;
2081 }
2082
2083 int kvmppc_set_compat(PowerPCCPU *cpu, uint32_t compat_pvr)
2084 {
2085 return kvm_set_one_reg(CPU(cpu), KVM_REG_PPC_ARCH_COMPAT, &compat_pvr);
2086 }
2087
2088 void kvmppc_set_mpic_proxy(PowerPCCPU *cpu, int mpic_proxy)
2089 {
2090 CPUState *cs = CPU(cpu);
2091 int ret;
2092
2093 ret = kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_EPR, 0, mpic_proxy);
2094 if (ret && mpic_proxy) {
2095 error_report("This KVM version does not support EPR");
2096 exit(1);
2097 }
2098 }
2099
2100 bool kvmppc_get_fwnmi(void)
2101 {
2102 return cap_fwnmi;
2103 }
2104
2105 int kvmppc_set_fwnmi(PowerPCCPU *cpu)
2106 {
2107 CPUState *cs = CPU(cpu);
2108
2109 return kvm_vcpu_enable_cap(cs, KVM_CAP_PPC_FWNMI, 0);
2110 }
2111
2112 int kvmppc_smt_threads(void)
2113 {
2114 return cap_ppc_smt ? cap_ppc_smt : 1;
2115 }
2116
2117 int kvmppc_set_smt_threads(int smt)
2118 {
2119 int ret;
2120
2121 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_SMT, 0, smt, 0);
2122 if (!ret) {
2123 cap_ppc_smt = smt;
2124 }
2125 return ret;
2126 }
2127
2128 void kvmppc_error_append_smt_possible_hint(Error *const *errp)
2129 {
2130 int i;
2131 GString *g;
2132 char *s;
2133
2134 assert(kvm_enabled());
2135 if (cap_ppc_smt_possible) {
2136 g = g_string_new("Available VSMT modes:");
2137 for (i = 63; i >= 0; i--) {
2138 if ((1UL << i) & cap_ppc_smt_possible) {
2139 g_string_append_printf(g, " %lu", (1UL << i));
2140 }
2141 }
2142 s = g_string_free(g, false);
2143 error_append_hint(errp, "%s.\n", s);
2144 g_free(s);
2145 } else {
2146 error_append_hint(errp,
2147 "This KVM seems to be too old to support VSMT.\n");
2148 }
2149 }
2150
2151
2152 #ifdef TARGET_PPC64
2153 uint64_t kvmppc_vrma_limit(unsigned int hash_shift)
2154 {
2155 struct kvm_ppc_smmu_info info;
2156 long rampagesize, best_page_shift;
2157 int i;
2158
2159 /*
2160 * Find the largest hardware supported page size that's less than
2161 * or equal to the (logical) backing page size of guest RAM
2162 */
2163 kvm_get_smmu_info(&info, &error_fatal);
2164 rampagesize = qemu_minrampagesize();
2165 best_page_shift = 0;
2166
2167 for (i = 0; i < KVM_PPC_PAGE_SIZES_MAX_SZ; i++) {
2168 struct kvm_ppc_one_seg_page_size *sps = &info.sps[i];
2169
2170 if (!sps->page_shift) {
2171 continue;
2172 }
2173
2174 if ((sps->page_shift > best_page_shift)
2175 && ((1UL << sps->page_shift) <= rampagesize)) {
2176 best_page_shift = sps->page_shift;
2177 }
2178 }
2179
2180 return 1ULL << (best_page_shift + hash_shift - 7);
2181 }
2182 #endif
2183
2184 bool kvmppc_spapr_use_multitce(void)
2185 {
2186 return cap_spapr_multitce;
2187 }
2188
2189 int kvmppc_spapr_enable_inkernel_multitce(void)
2190 {
2191 int ret;
2192
2193 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2194 H_PUT_TCE_INDIRECT, 1);
2195 if (!ret) {
2196 ret = kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_ENABLE_HCALL, 0,
2197 H_STUFF_TCE, 1);
2198 }
2199
2200 return ret;
2201 }
2202
2203 void *kvmppc_create_spapr_tce(uint32_t liobn, uint32_t page_shift,
2204 uint64_t bus_offset, uint32_t nb_table,
2205 int *pfd, bool need_vfio)
2206 {
2207 long len;
2208 int fd;
2209 void *table;
2210
2211 /*
2212 * Must set fd to -1 so we don't try to munmap when called for
2213 * destroying the table, which the upper layers -will- do
2214 */
2215 *pfd = -1;
2216 if (!cap_spapr_tce || (need_vfio && !cap_spapr_vfio)) {
2217 return NULL;
2218 }
2219
2220 if (cap_spapr_tce_64) {
2221 struct kvm_create_spapr_tce_64 args = {
2222 .liobn = liobn,
2223 .page_shift = page_shift,
2224 .offset = bus_offset >> page_shift,
2225 .size = nb_table,
2226 .flags = 0
2227 };
2228 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE_64, &args);
2229 if (fd < 0) {
2230 fprintf(stderr,
2231 "KVM: Failed to create TCE64 table for liobn 0x%x\n",
2232 liobn);
2233 return NULL;
2234 }
2235 } else if (cap_spapr_tce) {
2236 uint64_t window_size = (uint64_t) nb_table << page_shift;
2237 struct kvm_create_spapr_tce args = {
2238 .liobn = liobn,
2239 .window_size = window_size,
2240 };
2241 if ((window_size != args.window_size) || bus_offset) {
2242 return NULL;
2243 }
2244 fd = kvm_vm_ioctl(kvm_state, KVM_CREATE_SPAPR_TCE, &args);
2245 if (fd < 0) {
2246 fprintf(stderr, "KVM: Failed to create TCE table for liobn 0x%x\n",
2247 liobn);
2248 return NULL;
2249 }
2250 } else {
2251 return NULL;
2252 }
2253
2254 len = nb_table * sizeof(uint64_t);
2255 /* FIXME: round this up to page size */
2256
2257 table = mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
2258 if (table == MAP_FAILED) {
2259 fprintf(stderr, "KVM: Failed to map TCE table for liobn 0x%x\n",
2260 liobn);
2261 close(fd);
2262 return NULL;
2263 }
2264
2265 *pfd = fd;
2266 return table;
2267 }
2268
2269 int kvmppc_remove_spapr_tce(void *table, int fd, uint32_t nb_table)
2270 {
2271 long len;
2272
2273 if (fd < 0) {
2274 return -1;
2275 }
2276
2277 len = nb_table * sizeof(uint64_t);
2278 if ((munmap(table, len) < 0) ||
2279 (close(fd) < 0)) {
2280 fprintf(stderr, "KVM: Unexpected error removing TCE table: %s",
2281 strerror(errno));
2282 /* Leak the table */
2283 }
2284
2285 return 0;
2286 }
2287
2288 int kvmppc_reset_htab(int shift_hint)
2289 {
2290 uint32_t shift = shift_hint;
2291
2292 if (!kvm_enabled()) {
2293 /* Full emulation, tell caller to allocate htab itself */
2294 return 0;
2295 }
2296 if (kvm_vm_check_extension(kvm_state, KVM_CAP_PPC_ALLOC_HTAB)) {
2297 int ret;
2298 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_ALLOCATE_HTAB, &shift);
2299 if (ret == -ENOTTY) {
2300 /*
2301 * At least some versions of PR KVM advertise the
2302 * capability, but don't implement the ioctl(). Oops.
2303 * Return 0 so that we allocate the htab in qemu, as is
2304 * correct for PR.
2305 */
2306 return 0;
2307 } else if (ret < 0) {
2308 return ret;
2309 }
2310 return shift;
2311 }
2312
2313 /*
2314 * We have a kernel that predates the htab reset calls. For PR
2315 * KVM, we need to allocate the htab ourselves, for an HV KVM of
2316 * this era, it has allocated a 16MB fixed size hash table
2317 * already.
2318 */
2319 if (kvmppc_is_pr(kvm_state)) {
2320 /* PR - tell caller to allocate htab */
2321 return 0;
2322 } else {
2323 /* HV - assume 16MB kernel allocated htab */
2324 return 24;
2325 }
2326 }
2327
2328 static inline uint32_t mfpvr(void)
2329 {
2330 uint32_t pvr;
2331
2332 asm ("mfpvr %0"
2333 : "=r"(pvr));
2334 return pvr;
2335 }
2336
2337 static void alter_insns(uint64_t *word, uint64_t flags, bool on)
2338 {
2339 if (on) {
2340 *word |= flags;
2341 } else {
2342 *word &= ~flags;
2343 }
2344 }
2345
2346 static void kvmppc_host_cpu_class_init(ObjectClass *oc, void *data)
2347 {
2348 PowerPCCPUClass *pcc = POWERPC_CPU_CLASS(oc);
2349 uint32_t dcache_size = kvmppc_read_int_cpu_dt("d-cache-size");
2350 uint32_t icache_size = kvmppc_read_int_cpu_dt("i-cache-size");
2351
2352 /* Now fix up the class with information we can query from the host */
2353 pcc->pvr = mfpvr();
2354
2355 alter_insns(&pcc->insns_flags, PPC_ALTIVEC,
2356 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_ALTIVEC);
2357 alter_insns(&pcc->insns_flags2, PPC2_VSX,
2358 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_VSX);
2359 alter_insns(&pcc->insns_flags2, PPC2_DFP,
2360 qemu_getauxval(AT_HWCAP) & PPC_FEATURE_HAS_DFP);
2361
2362 if (dcache_size != -1) {
2363 pcc->l1_dcache_size = dcache_size;
2364 }
2365
2366 if (icache_size != -1) {
2367 pcc->l1_icache_size = icache_size;
2368 }
2369
2370 #if defined(TARGET_PPC64)
2371 pcc->radix_page_info = kvm_get_radix_page_info();
2372
2373 if ((pcc->pvr & 0xffffff00) == CPU_POWERPC_POWER9_DD1) {
2374 /*
2375 * POWER9 DD1 has some bugs which make it not really ISA 3.00
2376 * compliant. More importantly, advertising ISA 3.00
2377 * architected mode may prevent guests from activating
2378 * necessary DD1 workarounds.
2379 */
2380 pcc->pcr_supported &= ~(PCR_COMPAT_3_00 | PCR_COMPAT_2_07
2381 | PCR_COMPAT_2_06 | PCR_COMPAT_2_05);
2382 }
2383 #endif /* defined(TARGET_PPC64) */
2384 }
2385
2386 bool kvmppc_has_cap_epr(void)
2387 {
2388 return cap_epr;
2389 }
2390
2391 bool kvmppc_has_cap_fixup_hcalls(void)
2392 {
2393 return cap_fixup_hcalls;
2394 }
2395
2396 bool kvmppc_has_cap_htm(void)
2397 {
2398 return cap_htm;
2399 }
2400
2401 bool kvmppc_has_cap_mmu_radix(void)
2402 {
2403 return cap_mmu_radix;
2404 }
2405
2406 bool kvmppc_has_cap_mmu_hash_v3(void)
2407 {
2408 return cap_mmu_hash_v3;
2409 }
2410
2411 static bool kvmppc_power8_host(void)
2412 {
2413 bool ret = false;
2414 #ifdef TARGET_PPC64
2415 {
2416 uint32_t base_pvr = CPU_POWERPC_POWER_SERVER_MASK & mfpvr();
2417 ret = (base_pvr == CPU_POWERPC_POWER8E_BASE) ||
2418 (base_pvr == CPU_POWERPC_POWER8NVL_BASE) ||
2419 (base_pvr == CPU_POWERPC_POWER8_BASE);
2420 }
2421 #endif /* TARGET_PPC64 */
2422 return ret;
2423 }
2424
2425 static int parse_cap_ppc_safe_cache(struct kvm_ppc_cpu_char c)
2426 {
2427 bool l1d_thread_priv_req = !kvmppc_power8_host();
2428
2429 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_L1D_FLUSH_PR) {
2430 return 2;
2431 } else if ((!l1d_thread_priv_req ||
2432 c.character & c.character_mask & H_CPU_CHAR_L1D_THREAD_PRIV) &&
2433 (c.character & c.character_mask
2434 & (H_CPU_CHAR_L1D_FLUSH_ORI30 | H_CPU_CHAR_L1D_FLUSH_TRIG2))) {
2435 return 1;
2436 }
2437
2438 return 0;
2439 }
2440
2441 static int parse_cap_ppc_safe_bounds_check(struct kvm_ppc_cpu_char c)
2442 {
2443 if (~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_BNDS_CHK_SPEC_BAR) {
2444 return 2;
2445 } else if (c.character & c.character_mask & H_CPU_CHAR_SPEC_BAR_ORI31) {
2446 return 1;
2447 }
2448
2449 return 0;
2450 }
2451
2452 static int parse_cap_ppc_safe_indirect_branch(struct kvm_ppc_cpu_char c)
2453 {
2454 if ((~c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) &&
2455 (~c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) &&
2456 (~c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED)) {
2457 return SPAPR_CAP_FIXED_NA;
2458 } else if (c.behaviour & c.behaviour_mask & H_CPU_BEHAV_FLUSH_COUNT_CACHE) {
2459 return SPAPR_CAP_WORKAROUND;
2460 } else if (c.character & c.character_mask & H_CPU_CHAR_CACHE_COUNT_DIS) {
2461 return SPAPR_CAP_FIXED_CCD;
2462 } else if (c.character & c.character_mask & H_CPU_CHAR_BCCTRL_SERIALISED) {
2463 return SPAPR_CAP_FIXED_IBS;
2464 }
2465
2466 return 0;
2467 }
2468
2469 static int parse_cap_ppc_count_cache_flush_assist(struct kvm_ppc_cpu_char c)
2470 {
2471 if (c.character & c.character_mask & H_CPU_CHAR_BCCTR_FLUSH_ASSIST) {
2472 return 1;
2473 }
2474 return 0;
2475 }
2476
2477 bool kvmppc_has_cap_xive(void)
2478 {
2479 return cap_xive;
2480 }
2481
2482 static void kvmppc_get_cpu_characteristics(KVMState *s)
2483 {
2484 struct kvm_ppc_cpu_char c;
2485 int ret;
2486
2487 /* Assume broken */
2488 cap_ppc_safe_cache = 0;
2489 cap_ppc_safe_bounds_check = 0;
2490 cap_ppc_safe_indirect_branch = 0;
2491
2492 ret = kvm_vm_check_extension(s, KVM_CAP_PPC_GET_CPU_CHAR);
2493 if (!ret) {
2494 return;
2495 }
2496 ret = kvm_vm_ioctl(s, KVM_PPC_GET_CPU_CHAR, &c);
2497 if (ret < 0) {
2498 return;
2499 }
2500
2501 cap_ppc_safe_cache = parse_cap_ppc_safe_cache(c);
2502 cap_ppc_safe_bounds_check = parse_cap_ppc_safe_bounds_check(c);
2503 cap_ppc_safe_indirect_branch = parse_cap_ppc_safe_indirect_branch(c);
2504 cap_ppc_count_cache_flush_assist =
2505 parse_cap_ppc_count_cache_flush_assist(c);
2506 }
2507
2508 int kvmppc_get_cap_safe_cache(void)
2509 {
2510 return cap_ppc_safe_cache;
2511 }
2512
2513 int kvmppc_get_cap_safe_bounds_check(void)
2514 {
2515 return cap_ppc_safe_bounds_check;
2516 }
2517
2518 int kvmppc_get_cap_safe_indirect_branch(void)
2519 {
2520 return cap_ppc_safe_indirect_branch;
2521 }
2522
2523 int kvmppc_get_cap_count_cache_flush_assist(void)
2524 {
2525 return cap_ppc_count_cache_flush_assist;
2526 }
2527
2528 bool kvmppc_has_cap_nested_kvm_hv(void)
2529 {
2530 return !!cap_ppc_nested_kvm_hv;
2531 }
2532
2533 int kvmppc_set_cap_nested_kvm_hv(int enable)
2534 {
2535 return kvm_vm_enable_cap(kvm_state, KVM_CAP_PPC_NESTED_HV, 0, enable);
2536 }
2537
2538 bool kvmppc_has_cap_spapr_vfio(void)
2539 {
2540 return cap_spapr_vfio;
2541 }
2542
2543 int kvmppc_get_cap_large_decr(void)
2544 {
2545 return cap_large_decr;
2546 }
2547
2548 int kvmppc_enable_cap_large_decr(PowerPCCPU *cpu, int enable)
2549 {
2550 CPUState *cs = CPU(cpu);
2551 uint64_t lpcr = 0;
2552
2553 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2554 /* Do we need to modify the LPCR? */
2555 if (!!(lpcr & LPCR_LD) != !!enable) {
2556 if (enable) {
2557 lpcr |= LPCR_LD;
2558 } else {
2559 lpcr &= ~LPCR_LD;
2560 }
2561 kvm_set_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2562 kvm_get_one_reg(cs, KVM_REG_PPC_LPCR_64, &lpcr);
2563
2564 if (!!(lpcr & LPCR_LD) != !!enable) {
2565 return -1;
2566 }
2567 }
2568
2569 return 0;
2570 }
2571
2572 int kvmppc_has_cap_rpt_invalidate(void)
2573 {
2574 return cap_rpt_invalidate;
2575 }
2576
2577 bool kvmppc_supports_ail_3(void)
2578 {
2579 return cap_ail_mode_3;
2580 }
2581
2582 PowerPCCPUClass *kvm_ppc_get_host_cpu_class(void)
2583 {
2584 uint32_t host_pvr = mfpvr();
2585 PowerPCCPUClass *pvr_pcc;
2586
2587 pvr_pcc = ppc_cpu_class_by_pvr(host_pvr);
2588 if (pvr_pcc == NULL) {
2589 pvr_pcc = ppc_cpu_class_by_pvr_mask(host_pvr);
2590 }
2591
2592 return pvr_pcc;
2593 }
2594
2595 static void pseries_machine_class_fixup(ObjectClass *oc, void *opaque)
2596 {
2597 MachineClass *mc = MACHINE_CLASS(oc);
2598
2599 mc->default_cpu_type = TYPE_HOST_POWERPC_CPU;
2600 }
2601
2602 static int kvm_ppc_register_host_cpu_type(void)
2603 {
2604 TypeInfo type_info = {
2605 .name = TYPE_HOST_POWERPC_CPU,
2606 .class_init = kvmppc_host_cpu_class_init,
2607 };
2608 PowerPCCPUClass *pvr_pcc;
2609 ObjectClass *oc;
2610 DeviceClass *dc;
2611 int i;
2612
2613 pvr_pcc = kvm_ppc_get_host_cpu_class();
2614 if (pvr_pcc == NULL) {
2615 return -1;
2616 }
2617 type_info.parent = object_class_get_name(OBJECT_CLASS(pvr_pcc));
2618 type_register(&type_info);
2619 /* override TCG default cpu type with 'host' cpu model */
2620 object_class_foreach(pseries_machine_class_fixup, TYPE_SPAPR_MACHINE,
2621 false, NULL);
2622
2623 oc = object_class_by_name(type_info.name);
2624 g_assert(oc);
2625
2626 /*
2627 * Update generic CPU family class alias (e.g. on a POWER8NVL host,
2628 * we want "POWER8" to be a "family" alias that points to the current
2629 * host CPU type, too)
2630 */
2631 dc = DEVICE_CLASS(ppc_cpu_get_family_class(pvr_pcc));
2632 for (i = 0; ppc_cpu_aliases[i].alias != NULL; i++) {
2633 if (strcasecmp(ppc_cpu_aliases[i].alias, dc->desc) == 0) {
2634 char *suffix;
2635
2636 ppc_cpu_aliases[i].model = g_strdup(object_class_get_name(oc));
2637 suffix = strstr(ppc_cpu_aliases[i].model, POWERPC_CPU_TYPE_SUFFIX);
2638 if (suffix) {
2639 *suffix = 0;
2640 }
2641 break;
2642 }
2643 }
2644
2645 return 0;
2646 }
2647
2648 int kvmppc_define_rtas_kernel_token(uint32_t token, const char *function)
2649 {
2650 struct kvm_rtas_token_args args = {
2651 .token = token,
2652 };
2653
2654 if (!kvm_check_extension(kvm_state, KVM_CAP_PPC_RTAS)) {
2655 return -ENOENT;
2656 }
2657
2658 strncpy(args.name, function, sizeof(args.name) - 1);
2659
2660 return kvm_vm_ioctl(kvm_state, KVM_PPC_RTAS_DEFINE_TOKEN, &args);
2661 }
2662
2663 int kvmppc_get_htab_fd(bool write, uint64_t index, Error **errp)
2664 {
2665 struct kvm_get_htab_fd s = {
2666 .flags = write ? KVM_GET_HTAB_WRITE : 0,
2667 .start_index = index,
2668 };
2669 int ret;
2670
2671 if (!cap_htab_fd) {
2672 error_setg(errp, "KVM version doesn't support %s the HPT",
2673 write ? "writing" : "reading");
2674 return -ENOTSUP;
2675 }
2676
2677 ret = kvm_vm_ioctl(kvm_state, KVM_PPC_GET_HTAB_FD, &s);
2678 if (ret < 0) {
2679 error_setg(errp, "Unable to open fd for %s HPT %s KVM: %s",
2680 write ? "writing" : "reading", write ? "to" : "from",
2681 strerror(errno));
2682 return -errno;
2683 }
2684
2685 return ret;
2686 }
2687
2688 int kvmppc_save_htab(QEMUFile *f, int fd, size_t bufsize, int64_t max_ns)
2689 {
2690 int64_t starttime = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2691 uint8_t buf[bufsize];
2692 ssize_t rc;
2693
2694 do {
2695 rc = read(fd, buf, bufsize);
2696 if (rc < 0) {
2697 fprintf(stderr, "Error reading data from KVM HTAB fd: %s\n",
2698 strerror(errno));
2699 return rc;
2700 } else if (rc) {
2701 uint8_t *buffer = buf;
2702 ssize_t n = rc;
2703 while (n) {
2704 struct kvm_get_htab_header *head =
2705 (struct kvm_get_htab_header *) buffer;
2706 size_t chunksize = sizeof(*head) +
2707 HASH_PTE_SIZE_64 * head->n_valid;
2708
2709 qemu_put_be32(f, head->index);
2710 qemu_put_be16(f, head->n_valid);
2711 qemu_put_be16(f, head->n_invalid);
2712 qemu_put_buffer(f, (void *)(head + 1),
2713 HASH_PTE_SIZE_64 * head->n_valid);
2714
2715 buffer += chunksize;
2716 n -= chunksize;
2717 }
2718 }
2719 } while ((rc != 0)
2720 && ((max_ns < 0) ||
2721 ((qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - starttime) < max_ns)));
2722
2723 return (rc == 0) ? 1 : 0;
2724 }
2725
2726 int kvmppc_load_htab_chunk(QEMUFile *f, int fd, uint32_t index,
2727 uint16_t n_valid, uint16_t n_invalid, Error **errp)
2728 {
2729 struct kvm_get_htab_header *buf;
2730 size_t chunksize = sizeof(*buf) + n_valid * HASH_PTE_SIZE_64;
2731 ssize_t rc;
2732
2733 buf = alloca(chunksize);
2734 buf->index = index;
2735 buf->n_valid = n_valid;
2736 buf->n_invalid = n_invalid;
2737
2738 qemu_get_buffer(f, (void *)(buf + 1), HASH_PTE_SIZE_64 * n_valid);
2739
2740 rc = write(fd, buf, chunksize);
2741 if (rc < 0) {
2742 error_setg_errno(errp, errno, "Error writing the KVM hash table");
2743 return -errno;
2744 }
2745 if (rc != chunksize) {
2746 /* We should never get a short write on a single chunk */
2747 error_setg(errp, "Short write while restoring the KVM hash table");
2748 return -ENOSPC;
2749 }
2750 return 0;
2751 }
2752
2753 bool kvm_arch_stop_on_emulation_error(CPUState *cpu)
2754 {
2755 return true;
2756 }
2757
2758 void kvm_arch_init_irq_routing(KVMState *s)
2759 {
2760 }
2761
2762 void kvmppc_read_hptes(ppc_hash_pte64_t *hptes, hwaddr ptex, int n)
2763 {
2764 int fd, rc;
2765 int i;
2766
2767 fd = kvmppc_get_htab_fd(false, ptex, &error_abort);
2768
2769 i = 0;
2770 while (i < n) {
2771 struct kvm_get_htab_header *hdr;
2772 int m = n < HPTES_PER_GROUP ? n : HPTES_PER_GROUP;
2773 char buf[sizeof(*hdr) + m * HASH_PTE_SIZE_64];
2774
2775 rc = read(fd, buf, sizeof(buf));
2776 if (rc < 0) {
2777 hw_error("kvmppc_read_hptes: Unable to read HPTEs");
2778 }
2779
2780 hdr = (struct kvm_get_htab_header *)buf;
2781 while ((i < n) && ((char *)hdr < (buf + rc))) {
2782 int invalid = hdr->n_invalid, valid = hdr->n_valid;
2783
2784 if (hdr->index != (ptex + i)) {
2785 hw_error("kvmppc_read_hptes: Unexpected HPTE index %"PRIu32
2786 " != (%"HWADDR_PRIu" + %d", hdr->index, ptex, i);
2787 }
2788
2789 if (n - i < valid) {
2790 valid = n - i;
2791 }
2792 memcpy(hptes + i, hdr + 1, HASH_PTE_SIZE_64 * valid);
2793 i += valid;
2794
2795 if ((n - i) < invalid) {
2796 invalid = n - i;
2797 }
2798 memset(hptes + i, 0, invalid * HASH_PTE_SIZE_64);
2799 i += invalid;
2800
2801 hdr = (struct kvm_get_htab_header *)
2802 ((char *)(hdr + 1) + HASH_PTE_SIZE_64 * hdr->n_valid);
2803 }
2804 }
2805
2806 close(fd);
2807 }
2808
2809 void kvmppc_write_hpte(hwaddr ptex, uint64_t pte0, uint64_t pte1)
2810 {
2811 int fd, rc;
2812 struct {
2813 struct kvm_get_htab_header hdr;
2814 uint64_t pte0;
2815 uint64_t pte1;
2816 } buf;
2817
2818 fd = kvmppc_get_htab_fd(true, 0 /* Ignored */, &error_abort);
2819
2820 buf.hdr.n_valid = 1;
2821 buf.hdr.n_invalid = 0;
2822 buf.hdr.index = ptex;
2823 buf.pte0 = cpu_to_be64(pte0);
2824 buf.pte1 = cpu_to_be64(pte1);
2825
2826 rc = write(fd, &buf, sizeof(buf));
2827 if (rc != sizeof(buf)) {
2828 hw_error("kvmppc_write_hpte: Unable to update KVM HPT");
2829 }
2830 close(fd);
2831 }
2832
2833 int kvm_arch_fixup_msi_route(struct kvm_irq_routing_entry *route,
2834 uint64_t address, uint32_t data, PCIDevice *dev)
2835 {
2836 return 0;
2837 }
2838
2839 int kvm_arch_add_msi_route_post(struct kvm_irq_routing_entry *route,
2840 int vector, PCIDevice *dev)
2841 {
2842 return 0;
2843 }
2844
2845 int kvm_arch_release_virq_post(int virq)
2846 {
2847 return 0;
2848 }
2849
2850 int kvm_arch_msi_data_to_gsi(uint32_t data)
2851 {
2852 return data & 0xffff;
2853 }
2854
2855 #if defined(TARGET_PPC64)
2856 int kvm_handle_nmi(PowerPCCPU *cpu, struct kvm_run *run)
2857 {
2858 uint16_t flags = run->flags & KVM_RUN_PPC_NMI_DISP_MASK;
2859
2860 cpu_synchronize_state(CPU(cpu));
2861
2862 spapr_mce_req_event(cpu, flags == KVM_RUN_PPC_NMI_DISP_FULLY_RECOV);
2863
2864 return 0;
2865 }
2866 #endif
2867
2868 int kvmppc_enable_hwrng(void)
2869 {
2870 if (!kvm_enabled() || !kvm_check_extension(kvm_state, KVM_CAP_PPC_HWRNG)) {
2871 return -1;
2872 }
2873
2874 return kvmppc_enable_hcall(kvm_state, H_RANDOM);
2875 }
2876
2877 void kvmppc_check_papr_resize_hpt(Error **errp)
2878 {
2879 if (!kvm_enabled()) {
2880 return; /* No KVM, we're good */
2881 }
2882
2883 if (cap_resize_hpt) {
2884 return; /* Kernel has explicit support, we're good */
2885 }
2886
2887 /* Otherwise fallback on looking for PR KVM */
2888 if (kvmppc_is_pr(kvm_state)) {
2889 return;
2890 }
2891
2892 error_setg(errp,
2893 "Hash page table resizing not available with this KVM version");
2894 }
2895
2896 int kvmppc_resize_hpt_prepare(PowerPCCPU *cpu, target_ulong flags, int shift)
2897 {
2898 CPUState *cs = CPU(cpu);
2899 struct kvm_ppc_resize_hpt rhpt = {
2900 .flags = flags,
2901 .shift = shift,
2902 };
2903
2904 if (!cap_resize_hpt) {
2905 return -ENOSYS;
2906 }
2907
2908 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_PREPARE, &rhpt);
2909 }
2910
2911 int kvmppc_resize_hpt_commit(PowerPCCPU *cpu, target_ulong flags, int shift)
2912 {
2913 CPUState *cs = CPU(cpu);
2914 struct kvm_ppc_resize_hpt rhpt = {
2915 .flags = flags,
2916 .shift = shift,
2917 };
2918
2919 if (!cap_resize_hpt) {
2920 return -ENOSYS;
2921 }
2922
2923 return kvm_vm_ioctl(cs->kvm_state, KVM_PPC_RESIZE_HPT_COMMIT, &rhpt);
2924 }
2925
2926 /*
2927 * This is a helper function to detect a post migration scenario
2928 * in which a guest, running as KVM-HV, freezes in cpu_post_load because
2929 * the guest kernel can't handle a PVR value other than the actual host
2930 * PVR in KVM_SET_SREGS, even if pvr_match() returns true.
2931 *
2932 * If we don't have cap_ppc_pvr_compat and we're not running in PR
2933 * (so, we're HV), return true. The workaround itself is done in
2934 * cpu_post_load.
2935 *
2936 * The order here is important: we'll only check for KVM PR as a
2937 * fallback if the guest kernel can't handle the situation itself.
2938 * We need to avoid as much as possible querying the running KVM type
2939 * in QEMU level.
2940 */
2941 bool kvmppc_pvr_workaround_required(PowerPCCPU *cpu)
2942 {
2943 CPUState *cs = CPU(cpu);
2944
2945 if (!kvm_enabled()) {
2946 return false;
2947 }
2948
2949 if (cap_ppc_pvr_compat) {
2950 return false;
2951 }
2952
2953 return !kvmppc_is_pr(cs->kvm_state);
2954 }
2955
2956 void kvmppc_set_reg_ppc_online(PowerPCCPU *cpu, unsigned int online)
2957 {
2958 CPUState *cs = CPU(cpu);
2959
2960 if (kvm_enabled()) {
2961 kvm_set_one_reg(cs, KVM_REG_PPC_ONLINE, &online);
2962 }
2963 }
2964
2965 void kvmppc_set_reg_tb_offset(PowerPCCPU *cpu, int64_t tb_offset)
2966 {
2967 CPUState *cs = CPU(cpu);
2968
2969 if (kvm_enabled()) {
2970 kvm_set_one_reg(cs, KVM_REG_PPC_TB_OFFSET, &tb_offset);
2971 }
2972 }
2973
2974 bool kvm_arch_cpu_check_are_resettable(void)
2975 {
2976 return true;
2977 }
2978
2979 void kvm_arch_accel_class_init(ObjectClass *oc)
2980 {
2981 }