2 * QEMU Hypervisor.framework support for Apple Silicon
4 * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5 * Copyright 2020 Google LLC
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
12 #include "qemu/osdep.h"
13 #include "qemu-common.h"
14 #include "qemu/error-report.h"
16 #include "sysemu/runstate.h"
17 #include "sysemu/hvf.h"
18 #include "sysemu/hvf_int.h"
19 #include "sysemu/hw_accel.h"
21 #include <mach/mach_time.h>
23 #include "exec/address-spaces.h"
25 #include "qemu/main-loop.h"
26 #include "sysemu/cpus.h"
27 #include "target/arm/cpu.h"
28 #include "target/arm/internals.h"
29 #include "trace/trace-target_arm_hvf.h"
30 #include "migration/vmstate.h"
32 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
33 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
34 #define PL1_WRITE_MASK 0x4
36 #define SYSREG(op0, op1, crn, crm, op2) \
37 ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1))
38 #define SYSREG_MASK SYSREG(0x3, 0x7, 0xf, 0xf, 0x7)
39 #define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
40 #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
41 #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
42 #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
44 #define WFX_IS_WFE (1 << 0)
46 #define TMR_CTL_ENABLE (1 << 0)
47 #define TMR_CTL_IMASK (1 << 1)
48 #define TMR_CTL_ISTATUS (1 << 2)
50 typedef struct HVFVTimer
{
51 /* Vtimer value during migration and paused state */
55 static HVFVTimer vtimer
;
57 struct hvf_reg_match
{
62 static const struct hvf_reg_match hvf_reg_match
[] = {
63 { HV_REG_X0
, offsetof(CPUARMState
, xregs
[0]) },
64 { HV_REG_X1
, offsetof(CPUARMState
, xregs
[1]) },
65 { HV_REG_X2
, offsetof(CPUARMState
, xregs
[2]) },
66 { HV_REG_X3
, offsetof(CPUARMState
, xregs
[3]) },
67 { HV_REG_X4
, offsetof(CPUARMState
, xregs
[4]) },
68 { HV_REG_X5
, offsetof(CPUARMState
, xregs
[5]) },
69 { HV_REG_X6
, offsetof(CPUARMState
, xregs
[6]) },
70 { HV_REG_X7
, offsetof(CPUARMState
, xregs
[7]) },
71 { HV_REG_X8
, offsetof(CPUARMState
, xregs
[8]) },
72 { HV_REG_X9
, offsetof(CPUARMState
, xregs
[9]) },
73 { HV_REG_X10
, offsetof(CPUARMState
, xregs
[10]) },
74 { HV_REG_X11
, offsetof(CPUARMState
, xregs
[11]) },
75 { HV_REG_X12
, offsetof(CPUARMState
, xregs
[12]) },
76 { HV_REG_X13
, offsetof(CPUARMState
, xregs
[13]) },
77 { HV_REG_X14
, offsetof(CPUARMState
, xregs
[14]) },
78 { HV_REG_X15
, offsetof(CPUARMState
, xregs
[15]) },
79 { HV_REG_X16
, offsetof(CPUARMState
, xregs
[16]) },
80 { HV_REG_X17
, offsetof(CPUARMState
, xregs
[17]) },
81 { HV_REG_X18
, offsetof(CPUARMState
, xregs
[18]) },
82 { HV_REG_X19
, offsetof(CPUARMState
, xregs
[19]) },
83 { HV_REG_X20
, offsetof(CPUARMState
, xregs
[20]) },
84 { HV_REG_X21
, offsetof(CPUARMState
, xregs
[21]) },
85 { HV_REG_X22
, offsetof(CPUARMState
, xregs
[22]) },
86 { HV_REG_X23
, offsetof(CPUARMState
, xregs
[23]) },
87 { HV_REG_X24
, offsetof(CPUARMState
, xregs
[24]) },
88 { HV_REG_X25
, offsetof(CPUARMState
, xregs
[25]) },
89 { HV_REG_X26
, offsetof(CPUARMState
, xregs
[26]) },
90 { HV_REG_X27
, offsetof(CPUARMState
, xregs
[27]) },
91 { HV_REG_X28
, offsetof(CPUARMState
, xregs
[28]) },
92 { HV_REG_X29
, offsetof(CPUARMState
, xregs
[29]) },
93 { HV_REG_X30
, offsetof(CPUARMState
, xregs
[30]) },
94 { HV_REG_PC
, offsetof(CPUARMState
, pc
) },
97 static const struct hvf_reg_match hvf_fpreg_match
[] = {
98 { HV_SIMD_FP_REG_Q0
, offsetof(CPUARMState
, vfp
.zregs
[0]) },
99 { HV_SIMD_FP_REG_Q1
, offsetof(CPUARMState
, vfp
.zregs
[1]) },
100 { HV_SIMD_FP_REG_Q2
, offsetof(CPUARMState
, vfp
.zregs
[2]) },
101 { HV_SIMD_FP_REG_Q3
, offsetof(CPUARMState
, vfp
.zregs
[3]) },
102 { HV_SIMD_FP_REG_Q4
, offsetof(CPUARMState
, vfp
.zregs
[4]) },
103 { HV_SIMD_FP_REG_Q5
, offsetof(CPUARMState
, vfp
.zregs
[5]) },
104 { HV_SIMD_FP_REG_Q6
, offsetof(CPUARMState
, vfp
.zregs
[6]) },
105 { HV_SIMD_FP_REG_Q7
, offsetof(CPUARMState
, vfp
.zregs
[7]) },
106 { HV_SIMD_FP_REG_Q8
, offsetof(CPUARMState
, vfp
.zregs
[8]) },
107 { HV_SIMD_FP_REG_Q9
, offsetof(CPUARMState
, vfp
.zregs
[9]) },
108 { HV_SIMD_FP_REG_Q10
, offsetof(CPUARMState
, vfp
.zregs
[10]) },
109 { HV_SIMD_FP_REG_Q11
, offsetof(CPUARMState
, vfp
.zregs
[11]) },
110 { HV_SIMD_FP_REG_Q12
, offsetof(CPUARMState
, vfp
.zregs
[12]) },
111 { HV_SIMD_FP_REG_Q13
, offsetof(CPUARMState
, vfp
.zregs
[13]) },
112 { HV_SIMD_FP_REG_Q14
, offsetof(CPUARMState
, vfp
.zregs
[14]) },
113 { HV_SIMD_FP_REG_Q15
, offsetof(CPUARMState
, vfp
.zregs
[15]) },
114 { HV_SIMD_FP_REG_Q16
, offsetof(CPUARMState
, vfp
.zregs
[16]) },
115 { HV_SIMD_FP_REG_Q17
, offsetof(CPUARMState
, vfp
.zregs
[17]) },
116 { HV_SIMD_FP_REG_Q18
, offsetof(CPUARMState
, vfp
.zregs
[18]) },
117 { HV_SIMD_FP_REG_Q19
, offsetof(CPUARMState
, vfp
.zregs
[19]) },
118 { HV_SIMD_FP_REG_Q20
, offsetof(CPUARMState
, vfp
.zregs
[20]) },
119 { HV_SIMD_FP_REG_Q21
, offsetof(CPUARMState
, vfp
.zregs
[21]) },
120 { HV_SIMD_FP_REG_Q22
, offsetof(CPUARMState
, vfp
.zregs
[22]) },
121 { HV_SIMD_FP_REG_Q23
, offsetof(CPUARMState
, vfp
.zregs
[23]) },
122 { HV_SIMD_FP_REG_Q24
, offsetof(CPUARMState
, vfp
.zregs
[24]) },
123 { HV_SIMD_FP_REG_Q25
, offsetof(CPUARMState
, vfp
.zregs
[25]) },
124 { HV_SIMD_FP_REG_Q26
, offsetof(CPUARMState
, vfp
.zregs
[26]) },
125 { HV_SIMD_FP_REG_Q27
, offsetof(CPUARMState
, vfp
.zregs
[27]) },
126 { HV_SIMD_FP_REG_Q28
, offsetof(CPUARMState
, vfp
.zregs
[28]) },
127 { HV_SIMD_FP_REG_Q29
, offsetof(CPUARMState
, vfp
.zregs
[29]) },
128 { HV_SIMD_FP_REG_Q30
, offsetof(CPUARMState
, vfp
.zregs
[30]) },
129 { HV_SIMD_FP_REG_Q31
, offsetof(CPUARMState
, vfp
.zregs
[31]) },
132 struct hvf_sreg_match
{
138 static struct hvf_sreg_match hvf_sreg_match
[] = {
139 { HV_SYS_REG_DBGBVR0_EL1
, HVF_SYSREG(0, 0, 14, 0, 4) },
140 { HV_SYS_REG_DBGBCR0_EL1
, HVF_SYSREG(0, 0, 14, 0, 5) },
141 { HV_SYS_REG_DBGWVR0_EL1
, HVF_SYSREG(0, 0, 14, 0, 6) },
142 { HV_SYS_REG_DBGWCR0_EL1
, HVF_SYSREG(0, 0, 14, 0, 7) },
144 { HV_SYS_REG_DBGBVR1_EL1
, HVF_SYSREG(0, 1, 14, 0, 4) },
145 { HV_SYS_REG_DBGBCR1_EL1
, HVF_SYSREG(0, 1, 14, 0, 5) },
146 { HV_SYS_REG_DBGWVR1_EL1
, HVF_SYSREG(0, 1, 14, 0, 6) },
147 { HV_SYS_REG_DBGWCR1_EL1
, HVF_SYSREG(0, 1, 14, 0, 7) },
149 { HV_SYS_REG_DBGBVR2_EL1
, HVF_SYSREG(0, 2, 14, 0, 4) },
150 { HV_SYS_REG_DBGBCR2_EL1
, HVF_SYSREG(0, 2, 14, 0, 5) },
151 { HV_SYS_REG_DBGWVR2_EL1
, HVF_SYSREG(0, 2, 14, 0, 6) },
152 { HV_SYS_REG_DBGWCR2_EL1
, HVF_SYSREG(0, 2, 14, 0, 7) },
154 { HV_SYS_REG_DBGBVR3_EL1
, HVF_SYSREG(0, 3, 14, 0, 4) },
155 { HV_SYS_REG_DBGBCR3_EL1
, HVF_SYSREG(0, 3, 14, 0, 5) },
156 { HV_SYS_REG_DBGWVR3_EL1
, HVF_SYSREG(0, 3, 14, 0, 6) },
157 { HV_SYS_REG_DBGWCR3_EL1
, HVF_SYSREG(0, 3, 14, 0, 7) },
159 { HV_SYS_REG_DBGBVR4_EL1
, HVF_SYSREG(0, 4, 14, 0, 4) },
160 { HV_SYS_REG_DBGBCR4_EL1
, HVF_SYSREG(0, 4, 14, 0, 5) },
161 { HV_SYS_REG_DBGWVR4_EL1
, HVF_SYSREG(0, 4, 14, 0, 6) },
162 { HV_SYS_REG_DBGWCR4_EL1
, HVF_SYSREG(0, 4, 14, 0, 7) },
164 { HV_SYS_REG_DBGBVR5_EL1
, HVF_SYSREG(0, 5, 14, 0, 4) },
165 { HV_SYS_REG_DBGBCR5_EL1
, HVF_SYSREG(0, 5, 14, 0, 5) },
166 { HV_SYS_REG_DBGWVR5_EL1
, HVF_SYSREG(0, 5, 14, 0, 6) },
167 { HV_SYS_REG_DBGWCR5_EL1
, HVF_SYSREG(0, 5, 14, 0, 7) },
169 { HV_SYS_REG_DBGBVR6_EL1
, HVF_SYSREG(0, 6, 14, 0, 4) },
170 { HV_SYS_REG_DBGBCR6_EL1
, HVF_SYSREG(0, 6, 14, 0, 5) },
171 { HV_SYS_REG_DBGWVR6_EL1
, HVF_SYSREG(0, 6, 14, 0, 6) },
172 { HV_SYS_REG_DBGWCR6_EL1
, HVF_SYSREG(0, 6, 14, 0, 7) },
174 { HV_SYS_REG_DBGBVR7_EL1
, HVF_SYSREG(0, 7, 14, 0, 4) },
175 { HV_SYS_REG_DBGBCR7_EL1
, HVF_SYSREG(0, 7, 14, 0, 5) },
176 { HV_SYS_REG_DBGWVR7_EL1
, HVF_SYSREG(0, 7, 14, 0, 6) },
177 { HV_SYS_REG_DBGWCR7_EL1
, HVF_SYSREG(0, 7, 14, 0, 7) },
179 { HV_SYS_REG_DBGBVR8_EL1
, HVF_SYSREG(0, 8, 14, 0, 4) },
180 { HV_SYS_REG_DBGBCR8_EL1
, HVF_SYSREG(0, 8, 14, 0, 5) },
181 { HV_SYS_REG_DBGWVR8_EL1
, HVF_SYSREG(0, 8, 14, 0, 6) },
182 { HV_SYS_REG_DBGWCR8_EL1
, HVF_SYSREG(0, 8, 14, 0, 7) },
184 { HV_SYS_REG_DBGBVR9_EL1
, HVF_SYSREG(0, 9, 14, 0, 4) },
185 { HV_SYS_REG_DBGBCR9_EL1
, HVF_SYSREG(0, 9, 14, 0, 5) },
186 { HV_SYS_REG_DBGWVR9_EL1
, HVF_SYSREG(0, 9, 14, 0, 6) },
187 { HV_SYS_REG_DBGWCR9_EL1
, HVF_SYSREG(0, 9, 14, 0, 7) },
189 { HV_SYS_REG_DBGBVR10_EL1
, HVF_SYSREG(0, 10, 14, 0, 4) },
190 { HV_SYS_REG_DBGBCR10_EL1
, HVF_SYSREG(0, 10, 14, 0, 5) },
191 { HV_SYS_REG_DBGWVR10_EL1
, HVF_SYSREG(0, 10, 14, 0, 6) },
192 { HV_SYS_REG_DBGWCR10_EL1
, HVF_SYSREG(0, 10, 14, 0, 7) },
194 { HV_SYS_REG_DBGBVR11_EL1
, HVF_SYSREG(0, 11, 14, 0, 4) },
195 { HV_SYS_REG_DBGBCR11_EL1
, HVF_SYSREG(0, 11, 14, 0, 5) },
196 { HV_SYS_REG_DBGWVR11_EL1
, HVF_SYSREG(0, 11, 14, 0, 6) },
197 { HV_SYS_REG_DBGWCR11_EL1
, HVF_SYSREG(0, 11, 14, 0, 7) },
199 { HV_SYS_REG_DBGBVR12_EL1
, HVF_SYSREG(0, 12, 14, 0, 4) },
200 { HV_SYS_REG_DBGBCR12_EL1
, HVF_SYSREG(0, 12, 14, 0, 5) },
201 { HV_SYS_REG_DBGWVR12_EL1
, HVF_SYSREG(0, 12, 14, 0, 6) },
202 { HV_SYS_REG_DBGWCR12_EL1
, HVF_SYSREG(0, 12, 14, 0, 7) },
204 { HV_SYS_REG_DBGBVR13_EL1
, HVF_SYSREG(0, 13, 14, 0, 4) },
205 { HV_SYS_REG_DBGBCR13_EL1
, HVF_SYSREG(0, 13, 14, 0, 5) },
206 { HV_SYS_REG_DBGWVR13_EL1
, HVF_SYSREG(0, 13, 14, 0, 6) },
207 { HV_SYS_REG_DBGWCR13_EL1
, HVF_SYSREG(0, 13, 14, 0, 7) },
209 { HV_SYS_REG_DBGBVR14_EL1
, HVF_SYSREG(0, 14, 14, 0, 4) },
210 { HV_SYS_REG_DBGBCR14_EL1
, HVF_SYSREG(0, 14, 14, 0, 5) },
211 { HV_SYS_REG_DBGWVR14_EL1
, HVF_SYSREG(0, 14, 14, 0, 6) },
212 { HV_SYS_REG_DBGWCR14_EL1
, HVF_SYSREG(0, 14, 14, 0, 7) },
214 { HV_SYS_REG_DBGBVR15_EL1
, HVF_SYSREG(0, 15, 14, 0, 4) },
215 { HV_SYS_REG_DBGBCR15_EL1
, HVF_SYSREG(0, 15, 14, 0, 5) },
216 { HV_SYS_REG_DBGWVR15_EL1
, HVF_SYSREG(0, 15, 14, 0, 6) },
217 { HV_SYS_REG_DBGWCR15_EL1
, HVF_SYSREG(0, 15, 14, 0, 7) },
219 #ifdef SYNC_NO_RAW_REGS
221 * The registers below are manually synced on init because they are
222 * marked as NO_RAW. We still list them to make number space sync easier.
224 { HV_SYS_REG_MDCCINT_EL1
, HVF_SYSREG(0, 2, 2, 0, 0) },
225 { HV_SYS_REG_MIDR_EL1
, HVF_SYSREG(0, 0, 3, 0, 0) },
226 { HV_SYS_REG_MPIDR_EL1
, HVF_SYSREG(0, 0, 3, 0, 5) },
227 { HV_SYS_REG_ID_AA64PFR0_EL1
, HVF_SYSREG(0, 4, 3, 0, 0) },
229 { HV_SYS_REG_ID_AA64PFR1_EL1
, HVF_SYSREG(0, 4, 3, 0, 2) },
230 { HV_SYS_REG_ID_AA64DFR0_EL1
, HVF_SYSREG(0, 5, 3, 0, 0) },
231 { HV_SYS_REG_ID_AA64DFR1_EL1
, HVF_SYSREG(0, 5, 3, 0, 1) },
232 { HV_SYS_REG_ID_AA64ISAR0_EL1
, HVF_SYSREG(0, 6, 3, 0, 0) },
233 { HV_SYS_REG_ID_AA64ISAR1_EL1
, HVF_SYSREG(0, 6, 3, 0, 1) },
235 /* We keep the hardware MMFR0 around. HW limits are there anyway */
236 { HV_SYS_REG_ID_AA64MMFR0_EL1
, HVF_SYSREG(0, 7, 3, 0, 0) },
238 { HV_SYS_REG_ID_AA64MMFR1_EL1
, HVF_SYSREG(0, 7, 3, 0, 1) },
239 { HV_SYS_REG_ID_AA64MMFR2_EL1
, HVF_SYSREG(0, 7, 3, 0, 2) },
241 { HV_SYS_REG_MDSCR_EL1
, HVF_SYSREG(0, 2, 2, 0, 2) },
242 { HV_SYS_REG_SCTLR_EL1
, HVF_SYSREG(1, 0, 3, 0, 0) },
243 { HV_SYS_REG_CPACR_EL1
, HVF_SYSREG(1, 0, 3, 0, 2) },
244 { HV_SYS_REG_TTBR0_EL1
, HVF_SYSREG(2, 0, 3, 0, 0) },
245 { HV_SYS_REG_TTBR1_EL1
, HVF_SYSREG(2, 0, 3, 0, 1) },
246 { HV_SYS_REG_TCR_EL1
, HVF_SYSREG(2, 0, 3, 0, 2) },
248 { HV_SYS_REG_APIAKEYLO_EL1
, HVF_SYSREG(2, 1, 3, 0, 0) },
249 { HV_SYS_REG_APIAKEYHI_EL1
, HVF_SYSREG(2, 1, 3, 0, 1) },
250 { HV_SYS_REG_APIBKEYLO_EL1
, HVF_SYSREG(2, 1, 3, 0, 2) },
251 { HV_SYS_REG_APIBKEYHI_EL1
, HVF_SYSREG(2, 1, 3, 0, 3) },
252 { HV_SYS_REG_APDAKEYLO_EL1
, HVF_SYSREG(2, 2, 3, 0, 0) },
253 { HV_SYS_REG_APDAKEYHI_EL1
, HVF_SYSREG(2, 2, 3, 0, 1) },
254 { HV_SYS_REG_APDBKEYLO_EL1
, HVF_SYSREG(2, 2, 3, 0, 2) },
255 { HV_SYS_REG_APDBKEYHI_EL1
, HVF_SYSREG(2, 2, 3, 0, 3) },
256 { HV_SYS_REG_APGAKEYLO_EL1
, HVF_SYSREG(2, 3, 3, 0, 0) },
257 { HV_SYS_REG_APGAKEYHI_EL1
, HVF_SYSREG(2, 3, 3, 0, 1) },
259 { HV_SYS_REG_SPSR_EL1
, HVF_SYSREG(4, 0, 3, 0, 0) },
260 { HV_SYS_REG_ELR_EL1
, HVF_SYSREG(4, 0, 3, 0, 1) },
261 { HV_SYS_REG_SP_EL0
, HVF_SYSREG(4, 1, 3, 0, 0) },
262 { HV_SYS_REG_AFSR0_EL1
, HVF_SYSREG(5, 1, 3, 0, 0) },
263 { HV_SYS_REG_AFSR1_EL1
, HVF_SYSREG(5, 1, 3, 0, 1) },
264 { HV_SYS_REG_ESR_EL1
, HVF_SYSREG(5, 2, 3, 0, 0) },
265 { HV_SYS_REG_FAR_EL1
, HVF_SYSREG(6, 0, 3, 0, 0) },
266 { HV_SYS_REG_PAR_EL1
, HVF_SYSREG(7, 4, 3, 0, 0) },
267 { HV_SYS_REG_MAIR_EL1
, HVF_SYSREG(10, 2, 3, 0, 0) },
268 { HV_SYS_REG_AMAIR_EL1
, HVF_SYSREG(10, 3, 3, 0, 0) },
269 { HV_SYS_REG_VBAR_EL1
, HVF_SYSREG(12, 0, 3, 0, 0) },
270 { HV_SYS_REG_CONTEXTIDR_EL1
, HVF_SYSREG(13, 0, 3, 0, 1) },
271 { HV_SYS_REG_TPIDR_EL1
, HVF_SYSREG(13, 0, 3, 0, 4) },
272 { HV_SYS_REG_CNTKCTL_EL1
, HVF_SYSREG(14, 1, 3, 0, 0) },
273 { HV_SYS_REG_CSSELR_EL1
, HVF_SYSREG(0, 0, 3, 2, 0) },
274 { HV_SYS_REG_TPIDR_EL0
, HVF_SYSREG(13, 0, 3, 3, 2) },
275 { HV_SYS_REG_TPIDRRO_EL0
, HVF_SYSREG(13, 0, 3, 3, 3) },
276 { HV_SYS_REG_CNTV_CTL_EL0
, HVF_SYSREG(14, 3, 3, 3, 1) },
277 { HV_SYS_REG_CNTV_CVAL_EL0
, HVF_SYSREG(14, 3, 3, 3, 2) },
278 { HV_SYS_REG_SP_EL1
, HVF_SYSREG(4, 1, 3, 4, 0) },
281 int hvf_get_registers(CPUState
*cpu
)
283 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
284 CPUARMState
*env
= &arm_cpu
->env
;
287 hv_simd_fp_uchar16_t fpval
;
290 for (i
= 0; i
< ARRAY_SIZE(hvf_reg_match
); i
++) {
291 ret
= hv_vcpu_get_reg(cpu
->hvf
->fd
, hvf_reg_match
[i
].reg
, &val
);
292 *(uint64_t *)((void *)env
+ hvf_reg_match
[i
].offset
) = val
;
296 for (i
= 0; i
< ARRAY_SIZE(hvf_fpreg_match
); i
++) {
297 ret
= hv_vcpu_get_simd_fp_reg(cpu
->hvf
->fd
, hvf_fpreg_match
[i
].reg
,
299 memcpy((void *)env
+ hvf_fpreg_match
[i
].offset
, &fpval
, sizeof(fpval
));
304 ret
= hv_vcpu_get_reg(cpu
->hvf
->fd
, HV_REG_FPCR
, &val
);
306 vfp_set_fpcr(env
, val
);
309 ret
= hv_vcpu_get_reg(cpu
->hvf
->fd
, HV_REG_FPSR
, &val
);
311 vfp_set_fpsr(env
, val
);
313 ret
= hv_vcpu_get_reg(cpu
->hvf
->fd
, HV_REG_CPSR
, &val
);
315 pstate_write(env
, val
);
317 for (i
= 0; i
< ARRAY_SIZE(hvf_sreg_match
); i
++) {
318 if (hvf_sreg_match
[i
].cp_idx
== -1) {
322 ret
= hv_vcpu_get_sys_reg(cpu
->hvf
->fd
, hvf_sreg_match
[i
].reg
, &val
);
325 arm_cpu
->cpreg_values
[hvf_sreg_match
[i
].cp_idx
] = val
;
327 assert(write_list_to_cpustate(arm_cpu
));
329 aarch64_restore_sp(env
, arm_current_el(env
));
334 int hvf_put_registers(CPUState
*cpu
)
336 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
337 CPUARMState
*env
= &arm_cpu
->env
;
340 hv_simd_fp_uchar16_t fpval
;
343 for (i
= 0; i
< ARRAY_SIZE(hvf_reg_match
); i
++) {
344 val
= *(uint64_t *)((void *)env
+ hvf_reg_match
[i
].offset
);
345 ret
= hv_vcpu_set_reg(cpu
->hvf
->fd
, hvf_reg_match
[i
].reg
, val
);
349 for (i
= 0; i
< ARRAY_SIZE(hvf_fpreg_match
); i
++) {
350 memcpy(&fpval
, (void *)env
+ hvf_fpreg_match
[i
].offset
, sizeof(fpval
));
351 ret
= hv_vcpu_set_simd_fp_reg(cpu
->hvf
->fd
, hvf_fpreg_match
[i
].reg
,
356 ret
= hv_vcpu_set_reg(cpu
->hvf
->fd
, HV_REG_FPCR
, vfp_get_fpcr(env
));
359 ret
= hv_vcpu_set_reg(cpu
->hvf
->fd
, HV_REG_FPSR
, vfp_get_fpsr(env
));
362 ret
= hv_vcpu_set_reg(cpu
->hvf
->fd
, HV_REG_CPSR
, pstate_read(env
));
365 aarch64_save_sp(env
, arm_current_el(env
));
367 assert(write_cpustate_to_list(arm_cpu
, false));
368 for (i
= 0; i
< ARRAY_SIZE(hvf_sreg_match
); i
++) {
369 if (hvf_sreg_match
[i
].cp_idx
== -1) {
373 val
= arm_cpu
->cpreg_values
[hvf_sreg_match
[i
].cp_idx
];
374 ret
= hv_vcpu_set_sys_reg(cpu
->hvf
->fd
, hvf_sreg_match
[i
].reg
, val
);
378 ret
= hv_vcpu_set_vtimer_offset(cpu
->hvf
->fd
, hvf_state
->vtimer_offset
);
384 static void flush_cpu_state(CPUState
*cpu
)
386 if (cpu
->vcpu_dirty
) {
387 hvf_put_registers(cpu
);
388 cpu
->vcpu_dirty
= false;
392 static void hvf_set_reg(CPUState
*cpu
, int rt
, uint64_t val
)
396 flush_cpu_state(cpu
);
399 r
= hv_vcpu_set_reg(cpu
->hvf
->fd
, HV_REG_X0
+ rt
, val
);
404 static uint64_t hvf_get_reg(CPUState
*cpu
, int rt
)
409 flush_cpu_state(cpu
);
412 r
= hv_vcpu_get_reg(cpu
->hvf
->fd
, HV_REG_X0
+ rt
, &val
);
419 void hvf_arch_vcpu_destroy(CPUState
*cpu
)
423 int hvf_arch_init_vcpu(CPUState
*cpu
)
425 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
426 CPUARMState
*env
= &arm_cpu
->env
;
427 uint32_t sregs_match_len
= ARRAY_SIZE(hvf_sreg_match
);
428 uint32_t sregs_cnt
= 0;
434 asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu
->gt_cntfrq_hz
));
436 /* Allocate enough space for our sysreg sync */
437 arm_cpu
->cpreg_indexes
= g_renew(uint64_t, arm_cpu
->cpreg_indexes
,
439 arm_cpu
->cpreg_values
= g_renew(uint64_t, arm_cpu
->cpreg_values
,
441 arm_cpu
->cpreg_vmstate_indexes
= g_renew(uint64_t,
442 arm_cpu
->cpreg_vmstate_indexes
,
444 arm_cpu
->cpreg_vmstate_values
= g_renew(uint64_t,
445 arm_cpu
->cpreg_vmstate_values
,
448 memset(arm_cpu
->cpreg_values
, 0, sregs_match_len
* sizeof(uint64_t));
450 /* Populate cp list for all known sysregs */
451 for (i
= 0; i
< sregs_match_len
; i
++) {
452 const ARMCPRegInfo
*ri
;
453 uint32_t key
= hvf_sreg_match
[i
].key
;
455 ri
= get_arm_cp_reginfo(arm_cpu
->cp_regs
, key
);
457 assert(!(ri
->type
& ARM_CP_NO_RAW
));
458 hvf_sreg_match
[i
].cp_idx
= sregs_cnt
;
459 arm_cpu
->cpreg_indexes
[sregs_cnt
++] = cpreg_to_kvm_id(key
);
461 hvf_sreg_match
[i
].cp_idx
= -1;
464 arm_cpu
->cpreg_array_len
= sregs_cnt
;
465 arm_cpu
->cpreg_vmstate_array_len
= sregs_cnt
;
467 assert(write_cpustate_to_list(arm_cpu
, false));
469 /* Set CP_NO_RAW system registers on init */
470 ret
= hv_vcpu_set_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_MIDR_EL1
,
474 ret
= hv_vcpu_set_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_MPIDR_EL1
,
475 arm_cpu
->mp_affinity
);
478 ret
= hv_vcpu_get_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_ID_AA64PFR0_EL1
, &pfr
);
480 pfr
|= env
->gicv3state
? (1 << 24) : 0;
481 ret
= hv_vcpu_set_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_ID_AA64PFR0_EL1
, pfr
);
484 /* We're limited to underlying hardware caps, override internal versions */
485 ret
= hv_vcpu_get_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_ID_AA64MMFR0_EL1
,
486 &arm_cpu
->isar
.id_aa64mmfr0
);
492 void hvf_kick_vcpu_thread(CPUState
*cpu
)
494 cpus_kick_thread(cpu
);
495 hv_vcpus_exit(&cpu
->hvf
->fd
, 1);
498 static void hvf_raise_exception(CPUState
*cpu
, uint32_t excp
,
501 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
502 CPUARMState
*env
= &arm_cpu
->env
;
504 cpu
->exception_index
= excp
;
505 env
->exception
.target_el
= 1;
506 env
->exception
.syndrome
= syndrome
;
508 arm_cpu_do_interrupt(cpu
);
511 static int hvf_sysreg_read(CPUState
*cpu
, uint32_t reg
, uint32_t rt
)
513 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
514 CPUARMState
*env
= &arm_cpu
->env
;
518 case SYSREG_CNTPCT_EL0
:
519 val
= qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL
) /
520 gt_cntfrq_period_ns(arm_cpu
);
522 case SYSREG_OSLSR_EL1
:
523 val
= env
->cp15
.oslsr_el1
;
525 case SYSREG_OSDLR_EL1
:
529 cpu_synchronize_state(cpu
);
530 trace_hvf_unhandled_sysreg_read(env
->pc
, reg
,
536 hvf_raise_exception(cpu
, EXCP_UDEF
, syn_uncategorized());
540 trace_hvf_sysreg_read(reg
,
547 hvf_set_reg(cpu
, rt
, val
);
552 static int hvf_sysreg_write(CPUState
*cpu
, uint32_t reg
, uint64_t val
)
554 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
555 CPUARMState
*env
= &arm_cpu
->env
;
557 trace_hvf_sysreg_write(reg
,
566 case SYSREG_OSLAR_EL1
:
567 env
->cp15
.oslsr_el1
= val
& 1;
569 case SYSREG_OSDLR_EL1
:
573 cpu_synchronize_state(cpu
);
574 trace_hvf_unhandled_sysreg_write(env
->pc
, reg
,
580 hvf_raise_exception(cpu
, EXCP_UDEF
, syn_uncategorized());
587 static int hvf_inject_interrupts(CPUState
*cpu
)
589 if (cpu
->interrupt_request
& CPU_INTERRUPT_FIQ
) {
590 trace_hvf_inject_fiq();
591 hv_vcpu_set_pending_interrupt(cpu
->hvf
->fd
, HV_INTERRUPT_TYPE_FIQ
,
595 if (cpu
->interrupt_request
& CPU_INTERRUPT_HARD
) {
596 trace_hvf_inject_irq();
597 hv_vcpu_set_pending_interrupt(cpu
->hvf
->fd
, HV_INTERRUPT_TYPE_IRQ
,
604 static uint64_t hvf_vtimer_val_raw(void)
607 * mach_absolute_time() returns the vtimer value without the VM
608 * offset that we define. Add our own offset on top.
610 return mach_absolute_time() - hvf_state
->vtimer_offset
;
613 static uint64_t hvf_vtimer_val(void)
615 if (!runstate_is_running()) {
616 /* VM is paused, the vtimer value is in vtimer.vtimer_val */
617 return vtimer
.vtimer_val
;
620 return hvf_vtimer_val_raw();
623 static void hvf_wait_for_ipi(CPUState
*cpu
, struct timespec
*ts
)
626 * Use pselect to sleep so that other threads can IPI us while we're
629 qatomic_mb_set(&cpu
->thread_kicked
, false);
630 qemu_mutex_unlock_iothread();
631 pselect(0, 0, 0, 0, ts
, &cpu
->hvf
->unblock_ipi_mask
);
632 qemu_mutex_lock_iothread();
635 static void hvf_wfi(CPUState
*cpu
)
637 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
642 int64_t ticks_to_sleep
;
647 if (cpu
->interrupt_request
& (CPU_INTERRUPT_HARD
| CPU_INTERRUPT_FIQ
)) {
648 /* Interrupt pending, no need to wait */
652 r
= hv_vcpu_get_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_CNTV_CTL_EL0
, &ctl
);
655 if (!(ctl
& 1) || (ctl
& 2)) {
656 /* Timer disabled or masked, just wait for an IPI. */
657 hvf_wait_for_ipi(cpu
, NULL
);
661 r
= hv_vcpu_get_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_CNTV_CVAL_EL0
, &cval
);
664 ticks_to_sleep
= cval
- hvf_vtimer_val();
665 if (ticks_to_sleep
< 0) {
669 cntfrq
= gt_cntfrq_period_ns(arm_cpu
);
670 seconds
= muldiv64(ticks_to_sleep
, cntfrq
, NANOSECONDS_PER_SECOND
);
671 ticks_to_sleep
-= muldiv64(seconds
, NANOSECONDS_PER_SECOND
, cntfrq
);
672 nanos
= ticks_to_sleep
* cntfrq
;
675 * Don't sleep for less than the time a context switch would take,
676 * so that we can satisfy fast timer requests on the same CPU.
677 * Measurements on M1 show the sweet spot to be ~2ms.
679 if (!seconds
&& nanos
< (2 * SCALE_MS
)) {
683 ts
= (struct timespec
) { seconds
, nanos
};
684 hvf_wait_for_ipi(cpu
, &ts
);
687 static void hvf_sync_vtimer(CPUState
*cpu
)
689 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
694 if (!cpu
->hvf
->vtimer_masked
) {
695 /* We will get notified on vtimer changes by hvf, nothing to do */
699 r
= hv_vcpu_get_sys_reg(cpu
->hvf
->fd
, HV_SYS_REG_CNTV_CTL_EL0
, &ctl
);
702 irq_state
= (ctl
& (TMR_CTL_ENABLE
| TMR_CTL_IMASK
| TMR_CTL_ISTATUS
)) ==
703 (TMR_CTL_ENABLE
| TMR_CTL_ISTATUS
);
704 qemu_set_irq(arm_cpu
->gt_timer_outputs
[GTIMER_VIRT
], irq_state
);
707 /* Timer no longer asserting, we can unmask it */
708 hv_vcpu_set_vtimer_mask(cpu
->hvf
->fd
, false);
709 cpu
->hvf
->vtimer_masked
= false;
713 int hvf_vcpu_exec(CPUState
*cpu
)
715 ARMCPU
*arm_cpu
= ARM_CPU(cpu
);
716 CPUARMState
*env
= &arm_cpu
->env
;
717 hv_vcpu_exit_t
*hvf_exit
= cpu
->hvf
->exit
;
719 bool advance_pc
= false;
721 if (hvf_inject_interrupts(cpu
)) {
722 return EXCP_INTERRUPT
;
729 flush_cpu_state(cpu
);
731 qemu_mutex_unlock_iothread();
732 assert_hvf_ok(hv_vcpu_run(cpu
->hvf
->fd
));
735 uint64_t exit_reason
= hvf_exit
->reason
;
736 uint64_t syndrome
= hvf_exit
->exception
.syndrome
;
737 uint32_t ec
= syn_get_ec(syndrome
);
739 qemu_mutex_lock_iothread();
740 switch (exit_reason
) {
741 case HV_EXIT_REASON_EXCEPTION
:
742 /* This is the main one, handle below. */
744 case HV_EXIT_REASON_VTIMER_ACTIVATED
:
745 qemu_set_irq(arm_cpu
->gt_timer_outputs
[GTIMER_VIRT
], 1);
746 cpu
->hvf
->vtimer_masked
= true;
748 case HV_EXIT_REASON_CANCELED
:
749 /* we got kicked, no exit to process */
755 hvf_sync_vtimer(cpu
);
759 bool isv
= syndrome
& ARM_EL_ISV
;
760 bool iswrite
= (syndrome
>> 6) & 1;
761 bool s1ptw
= (syndrome
>> 7) & 1;
762 uint32_t sas
= (syndrome
>> 22) & 3;
763 uint32_t len
= 1 << sas
;
764 uint32_t srt
= (syndrome
>> 16) & 0x1f;
767 trace_hvf_data_abort(env
->pc
, hvf_exit
->exception
.virtual_address
,
768 hvf_exit
->exception
.physical_address
, isv
,
769 iswrite
, s1ptw
, len
, srt
);
774 val
= hvf_get_reg(cpu
, srt
);
775 address_space_write(&address_space_memory
,
776 hvf_exit
->exception
.physical_address
,
777 MEMTXATTRS_UNSPECIFIED
, &val
, len
);
779 address_space_read(&address_space_memory
,
780 hvf_exit
->exception
.physical_address
,
781 MEMTXATTRS_UNSPECIFIED
, &val
, len
);
782 hvf_set_reg(cpu
, srt
, val
);
788 case EC_SYSTEMREGISTERTRAP
: {
789 bool isread
= (syndrome
>> 0) & 1;
790 uint32_t rt
= (syndrome
>> 5) & 0x1f;
791 uint32_t reg
= syndrome
& SYSREG_MASK
;
796 ret
= hvf_sysreg_read(cpu
, reg
, rt
);
798 val
= hvf_get_reg(cpu
, rt
);
799 ret
= hvf_sysreg_write(cpu
, reg
, val
);
807 if (!(syndrome
& WFX_IS_WFE
)) {
812 cpu_synchronize_state(cpu
);
813 trace_hvf_unknown_hvc(env
->xregs
[0]);
814 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
818 cpu_synchronize_state(cpu
);
819 trace_hvf_unknown_smc(env
->xregs
[0]);
820 hvf_raise_exception(cpu
, EXCP_UDEF
, syn_uncategorized());
823 cpu_synchronize_state(cpu
);
824 trace_hvf_exit(syndrome
, ec
, env
->pc
);
825 error_report("0x%llx: unhandled exception ec=0x%x", env
->pc
, ec
);
831 flush_cpu_state(cpu
);
833 r
= hv_vcpu_get_reg(cpu
->hvf
->fd
, HV_REG_PC
, &pc
);
836 r
= hv_vcpu_set_reg(cpu
->hvf
->fd
, HV_REG_PC
, pc
);
843 static const VMStateDescription vmstate_hvf_vtimer
= {
844 .name
= "hvf-vtimer",
846 .minimum_version_id
= 1,
847 .fields
= (VMStateField
[]) {
848 VMSTATE_UINT64(vtimer_val
, HVFVTimer
),
849 VMSTATE_END_OF_LIST()
853 static void hvf_vm_state_change(void *opaque
, bool running
, RunState state
)
855 HVFVTimer
*s
= opaque
;
858 /* Update vtimer offset on all CPUs */
859 hvf_state
->vtimer_offset
= mach_absolute_time() - s
->vtimer_val
;
860 cpu_synchronize_all_states();
862 /* Remember vtimer value on every pause */
863 s
->vtimer_val
= hvf_vtimer_val_raw();
867 int hvf_arch_init(void)
869 hvf_state
->vtimer_offset
= mach_absolute_time();
870 vmstate_register(NULL
, 0, &vmstate_hvf_vtimer
, &vtimer
);
871 qemu_add_vm_change_state_handler(hvf_vm_state_change
, &vtimer
);