]> git.proxmox.com Git - mirror_qemu.git/blob - target/arm/hvf/hvf.c
arm/hvf: Add a WFI handler
[mirror_qemu.git] / target / arm / hvf / hvf.c
1 /*
2 * QEMU Hypervisor.framework support for Apple Silicon
3
4 * Copyright 2020 Alexander Graf <agraf@csgraf.de>
5 * Copyright 2020 Google LLC
6 *
7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
8 * See the COPYING file in the top-level directory.
9 *
10 */
11
12 #include "qemu/osdep.h"
13 #include "qemu-common.h"
14 #include "qemu/error-report.h"
15
16 #include "sysemu/runstate.h"
17 #include "sysemu/hvf.h"
18 #include "sysemu/hvf_int.h"
19 #include "sysemu/hw_accel.h"
20
21 #include <mach/mach_time.h>
22
23 #include "exec/address-spaces.h"
24 #include "hw/irq.h"
25 #include "qemu/main-loop.h"
26 #include "sysemu/cpus.h"
27 #include "target/arm/cpu.h"
28 #include "target/arm/internals.h"
29 #include "trace/trace-target_arm_hvf.h"
30 #include "migration/vmstate.h"
31
32 #define HVF_SYSREG(crn, crm, op0, op1, op2) \
33 ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2)
34 #define PL1_WRITE_MASK 0x4
35
36 #define SYSREG(op0, op1, crn, crm, op2) \
37 ((op0 << 20) | (op2 << 17) | (op1 << 14) | (crn << 10) | (crm << 1))
38 #define SYSREG_MASK SYSREG(0x3, 0x7, 0xf, 0xf, 0x7)
39 #define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4)
40 #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4)
41 #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4)
42 #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1)
43
44 #define WFX_IS_WFE (1 << 0)
45
46 #define TMR_CTL_ENABLE (1 << 0)
47 #define TMR_CTL_IMASK (1 << 1)
48 #define TMR_CTL_ISTATUS (1 << 2)
49
50 typedef struct HVFVTimer {
51 /* Vtimer value during migration and paused state */
52 uint64_t vtimer_val;
53 } HVFVTimer;
54
55 static HVFVTimer vtimer;
56
57 struct hvf_reg_match {
58 int reg;
59 uint64_t offset;
60 };
61
62 static const struct hvf_reg_match hvf_reg_match[] = {
63 { HV_REG_X0, offsetof(CPUARMState, xregs[0]) },
64 { HV_REG_X1, offsetof(CPUARMState, xregs[1]) },
65 { HV_REG_X2, offsetof(CPUARMState, xregs[2]) },
66 { HV_REG_X3, offsetof(CPUARMState, xregs[3]) },
67 { HV_REG_X4, offsetof(CPUARMState, xregs[4]) },
68 { HV_REG_X5, offsetof(CPUARMState, xregs[5]) },
69 { HV_REG_X6, offsetof(CPUARMState, xregs[6]) },
70 { HV_REG_X7, offsetof(CPUARMState, xregs[7]) },
71 { HV_REG_X8, offsetof(CPUARMState, xregs[8]) },
72 { HV_REG_X9, offsetof(CPUARMState, xregs[9]) },
73 { HV_REG_X10, offsetof(CPUARMState, xregs[10]) },
74 { HV_REG_X11, offsetof(CPUARMState, xregs[11]) },
75 { HV_REG_X12, offsetof(CPUARMState, xregs[12]) },
76 { HV_REG_X13, offsetof(CPUARMState, xregs[13]) },
77 { HV_REG_X14, offsetof(CPUARMState, xregs[14]) },
78 { HV_REG_X15, offsetof(CPUARMState, xregs[15]) },
79 { HV_REG_X16, offsetof(CPUARMState, xregs[16]) },
80 { HV_REG_X17, offsetof(CPUARMState, xregs[17]) },
81 { HV_REG_X18, offsetof(CPUARMState, xregs[18]) },
82 { HV_REG_X19, offsetof(CPUARMState, xregs[19]) },
83 { HV_REG_X20, offsetof(CPUARMState, xregs[20]) },
84 { HV_REG_X21, offsetof(CPUARMState, xregs[21]) },
85 { HV_REG_X22, offsetof(CPUARMState, xregs[22]) },
86 { HV_REG_X23, offsetof(CPUARMState, xregs[23]) },
87 { HV_REG_X24, offsetof(CPUARMState, xregs[24]) },
88 { HV_REG_X25, offsetof(CPUARMState, xregs[25]) },
89 { HV_REG_X26, offsetof(CPUARMState, xregs[26]) },
90 { HV_REG_X27, offsetof(CPUARMState, xregs[27]) },
91 { HV_REG_X28, offsetof(CPUARMState, xregs[28]) },
92 { HV_REG_X29, offsetof(CPUARMState, xregs[29]) },
93 { HV_REG_X30, offsetof(CPUARMState, xregs[30]) },
94 { HV_REG_PC, offsetof(CPUARMState, pc) },
95 };
96
97 static const struct hvf_reg_match hvf_fpreg_match[] = {
98 { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) },
99 { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) },
100 { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) },
101 { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) },
102 { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) },
103 { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) },
104 { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) },
105 { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) },
106 { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) },
107 { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) },
108 { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) },
109 { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) },
110 { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) },
111 { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) },
112 { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) },
113 { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) },
114 { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) },
115 { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) },
116 { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) },
117 { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) },
118 { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) },
119 { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) },
120 { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) },
121 { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) },
122 { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) },
123 { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) },
124 { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) },
125 { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) },
126 { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) },
127 { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) },
128 { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) },
129 { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) },
130 };
131
132 struct hvf_sreg_match {
133 int reg;
134 uint32_t key;
135 uint32_t cp_idx;
136 };
137
138 static struct hvf_sreg_match hvf_sreg_match[] = {
139 { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) },
140 { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) },
141 { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) },
142 { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) },
143
144 { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) },
145 { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) },
146 { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) },
147 { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) },
148
149 { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) },
150 { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) },
151 { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) },
152 { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) },
153
154 { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) },
155 { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) },
156 { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) },
157 { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) },
158
159 { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) },
160 { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) },
161 { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) },
162 { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) },
163
164 { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) },
165 { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) },
166 { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) },
167 { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) },
168
169 { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) },
170 { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) },
171 { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) },
172 { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) },
173
174 { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) },
175 { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) },
176 { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) },
177 { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) },
178
179 { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) },
180 { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) },
181 { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) },
182 { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) },
183
184 { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) },
185 { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) },
186 { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) },
187 { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) },
188
189 { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) },
190 { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) },
191 { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) },
192 { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) },
193
194 { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) },
195 { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) },
196 { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) },
197 { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) },
198
199 { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) },
200 { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) },
201 { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) },
202 { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) },
203
204 { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) },
205 { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) },
206 { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) },
207 { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) },
208
209 { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) },
210 { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) },
211 { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) },
212 { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) },
213
214 { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) },
215 { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) },
216 { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) },
217 { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) },
218
219 #ifdef SYNC_NO_RAW_REGS
220 /*
221 * The registers below are manually synced on init because they are
222 * marked as NO_RAW. We still list them to make number space sync easier.
223 */
224 { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) },
225 { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) },
226 { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) },
227 { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) },
228 #endif
229 { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) },
230 { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) },
231 { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) },
232 { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) },
233 { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) },
234 #ifdef SYNC_NO_MMFR0
235 /* We keep the hardware MMFR0 around. HW limits are there anyway */
236 { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) },
237 #endif
238 { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) },
239 { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) },
240
241 { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) },
242 { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) },
243 { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) },
244 { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) },
245 { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) },
246 { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) },
247
248 { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) },
249 { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) },
250 { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) },
251 { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) },
252 { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) },
253 { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) },
254 { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) },
255 { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) },
256 { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) },
257 { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) },
258
259 { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) },
260 { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) },
261 { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) },
262 { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) },
263 { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) },
264 { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) },
265 { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) },
266 { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) },
267 { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) },
268 { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) },
269 { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) },
270 { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) },
271 { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) },
272 { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) },
273 { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) },
274 { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) },
275 { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) },
276 { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) },
277 { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) },
278 { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) },
279 };
280
281 int hvf_get_registers(CPUState *cpu)
282 {
283 ARMCPU *arm_cpu = ARM_CPU(cpu);
284 CPUARMState *env = &arm_cpu->env;
285 hv_return_t ret;
286 uint64_t val;
287 hv_simd_fp_uchar16_t fpval;
288 int i;
289
290 for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
291 ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val);
292 *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val;
293 assert_hvf_ok(ret);
294 }
295
296 for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
297 ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
298 &fpval);
299 memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval));
300 assert_hvf_ok(ret);
301 }
302
303 val = 0;
304 ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val);
305 assert_hvf_ok(ret);
306 vfp_set_fpcr(env, val);
307
308 val = 0;
309 ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val);
310 assert_hvf_ok(ret);
311 vfp_set_fpsr(env, val);
312
313 ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val);
314 assert_hvf_ok(ret);
315 pstate_write(env, val);
316
317 for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
318 if (hvf_sreg_match[i].cp_idx == -1) {
319 continue;
320 }
321
322 ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val);
323 assert_hvf_ok(ret);
324
325 arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val;
326 }
327 assert(write_list_to_cpustate(arm_cpu));
328
329 aarch64_restore_sp(env, arm_current_el(env));
330
331 return 0;
332 }
333
334 int hvf_put_registers(CPUState *cpu)
335 {
336 ARMCPU *arm_cpu = ARM_CPU(cpu);
337 CPUARMState *env = &arm_cpu->env;
338 hv_return_t ret;
339 uint64_t val;
340 hv_simd_fp_uchar16_t fpval;
341 int i;
342
343 for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) {
344 val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset);
345 ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val);
346 assert_hvf_ok(ret);
347 }
348
349 for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) {
350 memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval));
351 ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg,
352 fpval);
353 assert_hvf_ok(ret);
354 }
355
356 ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env));
357 assert_hvf_ok(ret);
358
359 ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env));
360 assert_hvf_ok(ret);
361
362 ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env));
363 assert_hvf_ok(ret);
364
365 aarch64_save_sp(env, arm_current_el(env));
366
367 assert(write_cpustate_to_list(arm_cpu, false));
368 for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) {
369 if (hvf_sreg_match[i].cp_idx == -1) {
370 continue;
371 }
372
373 val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx];
374 ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val);
375 assert_hvf_ok(ret);
376 }
377
378 ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset);
379 assert_hvf_ok(ret);
380
381 return 0;
382 }
383
384 static void flush_cpu_state(CPUState *cpu)
385 {
386 if (cpu->vcpu_dirty) {
387 hvf_put_registers(cpu);
388 cpu->vcpu_dirty = false;
389 }
390 }
391
392 static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val)
393 {
394 hv_return_t r;
395
396 flush_cpu_state(cpu);
397
398 if (rt < 31) {
399 r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val);
400 assert_hvf_ok(r);
401 }
402 }
403
404 static uint64_t hvf_get_reg(CPUState *cpu, int rt)
405 {
406 uint64_t val = 0;
407 hv_return_t r;
408
409 flush_cpu_state(cpu);
410
411 if (rt < 31) {
412 r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val);
413 assert_hvf_ok(r);
414 }
415
416 return val;
417 }
418
419 void hvf_arch_vcpu_destroy(CPUState *cpu)
420 {
421 }
422
423 int hvf_arch_init_vcpu(CPUState *cpu)
424 {
425 ARMCPU *arm_cpu = ARM_CPU(cpu);
426 CPUARMState *env = &arm_cpu->env;
427 uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match);
428 uint32_t sregs_cnt = 0;
429 uint64_t pfr;
430 hv_return_t ret;
431 int i;
432
433 env->aarch64 = 1;
434 asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz));
435
436 /* Allocate enough space for our sysreg sync */
437 arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes,
438 sregs_match_len);
439 arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values,
440 sregs_match_len);
441 arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t,
442 arm_cpu->cpreg_vmstate_indexes,
443 sregs_match_len);
444 arm_cpu->cpreg_vmstate_values = g_renew(uint64_t,
445 arm_cpu->cpreg_vmstate_values,
446 sregs_match_len);
447
448 memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t));
449
450 /* Populate cp list for all known sysregs */
451 for (i = 0; i < sregs_match_len; i++) {
452 const ARMCPRegInfo *ri;
453 uint32_t key = hvf_sreg_match[i].key;
454
455 ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key);
456 if (ri) {
457 assert(!(ri->type & ARM_CP_NO_RAW));
458 hvf_sreg_match[i].cp_idx = sregs_cnt;
459 arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key);
460 } else {
461 hvf_sreg_match[i].cp_idx = -1;
462 }
463 }
464 arm_cpu->cpreg_array_len = sregs_cnt;
465 arm_cpu->cpreg_vmstate_array_len = sregs_cnt;
466
467 assert(write_cpustate_to_list(arm_cpu, false));
468
469 /* Set CP_NO_RAW system registers on init */
470 ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1,
471 arm_cpu->midr);
472 assert_hvf_ok(ret);
473
474 ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1,
475 arm_cpu->mp_affinity);
476 assert_hvf_ok(ret);
477
478 ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr);
479 assert_hvf_ok(ret);
480 pfr |= env->gicv3state ? (1 << 24) : 0;
481 ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr);
482 assert_hvf_ok(ret);
483
484 /* We're limited to underlying hardware caps, override internal versions */
485 ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1,
486 &arm_cpu->isar.id_aa64mmfr0);
487 assert_hvf_ok(ret);
488
489 return 0;
490 }
491
492 void hvf_kick_vcpu_thread(CPUState *cpu)
493 {
494 cpus_kick_thread(cpu);
495 hv_vcpus_exit(&cpu->hvf->fd, 1);
496 }
497
498 static void hvf_raise_exception(CPUState *cpu, uint32_t excp,
499 uint32_t syndrome)
500 {
501 ARMCPU *arm_cpu = ARM_CPU(cpu);
502 CPUARMState *env = &arm_cpu->env;
503
504 cpu->exception_index = excp;
505 env->exception.target_el = 1;
506 env->exception.syndrome = syndrome;
507
508 arm_cpu_do_interrupt(cpu);
509 }
510
511 static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt)
512 {
513 ARMCPU *arm_cpu = ARM_CPU(cpu);
514 CPUARMState *env = &arm_cpu->env;
515 uint64_t val = 0;
516
517 switch (reg) {
518 case SYSREG_CNTPCT_EL0:
519 val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) /
520 gt_cntfrq_period_ns(arm_cpu);
521 break;
522 case SYSREG_OSLSR_EL1:
523 val = env->cp15.oslsr_el1;
524 break;
525 case SYSREG_OSDLR_EL1:
526 /* Dummy register */
527 break;
528 default:
529 cpu_synchronize_state(cpu);
530 trace_hvf_unhandled_sysreg_read(env->pc, reg,
531 (reg >> 20) & 0x3,
532 (reg >> 14) & 0x7,
533 (reg >> 10) & 0xf,
534 (reg >> 1) & 0xf,
535 (reg >> 17) & 0x7);
536 hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
537 return 1;
538 }
539
540 trace_hvf_sysreg_read(reg,
541 (reg >> 20) & 0x3,
542 (reg >> 14) & 0x7,
543 (reg >> 10) & 0xf,
544 (reg >> 1) & 0xf,
545 (reg >> 17) & 0x7,
546 val);
547 hvf_set_reg(cpu, rt, val);
548
549 return 0;
550 }
551
552 static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val)
553 {
554 ARMCPU *arm_cpu = ARM_CPU(cpu);
555 CPUARMState *env = &arm_cpu->env;
556
557 trace_hvf_sysreg_write(reg,
558 (reg >> 20) & 0x3,
559 (reg >> 14) & 0x7,
560 (reg >> 10) & 0xf,
561 (reg >> 1) & 0xf,
562 (reg >> 17) & 0x7,
563 val);
564
565 switch (reg) {
566 case SYSREG_OSLAR_EL1:
567 env->cp15.oslsr_el1 = val & 1;
568 break;
569 case SYSREG_OSDLR_EL1:
570 /* Dummy register */
571 break;
572 default:
573 cpu_synchronize_state(cpu);
574 trace_hvf_unhandled_sysreg_write(env->pc, reg,
575 (reg >> 20) & 0x3,
576 (reg >> 14) & 0x7,
577 (reg >> 10) & 0xf,
578 (reg >> 1) & 0xf,
579 (reg >> 17) & 0x7);
580 hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
581 return 1;
582 }
583
584 return 0;
585 }
586
587 static int hvf_inject_interrupts(CPUState *cpu)
588 {
589 if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) {
590 trace_hvf_inject_fiq();
591 hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ,
592 true);
593 }
594
595 if (cpu->interrupt_request & CPU_INTERRUPT_HARD) {
596 trace_hvf_inject_irq();
597 hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ,
598 true);
599 }
600
601 return 0;
602 }
603
604 static uint64_t hvf_vtimer_val_raw(void)
605 {
606 /*
607 * mach_absolute_time() returns the vtimer value without the VM
608 * offset that we define. Add our own offset on top.
609 */
610 return mach_absolute_time() - hvf_state->vtimer_offset;
611 }
612
613 static uint64_t hvf_vtimer_val(void)
614 {
615 if (!runstate_is_running()) {
616 /* VM is paused, the vtimer value is in vtimer.vtimer_val */
617 return vtimer.vtimer_val;
618 }
619
620 return hvf_vtimer_val_raw();
621 }
622
623 static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts)
624 {
625 /*
626 * Use pselect to sleep so that other threads can IPI us while we're
627 * sleeping.
628 */
629 qatomic_mb_set(&cpu->thread_kicked, false);
630 qemu_mutex_unlock_iothread();
631 pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask);
632 qemu_mutex_lock_iothread();
633 }
634
635 static void hvf_wfi(CPUState *cpu)
636 {
637 ARMCPU *arm_cpu = ARM_CPU(cpu);
638 struct timespec ts;
639 hv_return_t r;
640 uint64_t ctl;
641 uint64_t cval;
642 int64_t ticks_to_sleep;
643 uint64_t seconds;
644 uint64_t nanos;
645 uint32_t cntfrq;
646
647 if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) {
648 /* Interrupt pending, no need to wait */
649 return;
650 }
651
652 r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
653 assert_hvf_ok(r);
654
655 if (!(ctl & 1) || (ctl & 2)) {
656 /* Timer disabled or masked, just wait for an IPI. */
657 hvf_wait_for_ipi(cpu, NULL);
658 return;
659 }
660
661 r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval);
662 assert_hvf_ok(r);
663
664 ticks_to_sleep = cval - hvf_vtimer_val();
665 if (ticks_to_sleep < 0) {
666 return;
667 }
668
669 cntfrq = gt_cntfrq_period_ns(arm_cpu);
670 seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND);
671 ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq);
672 nanos = ticks_to_sleep * cntfrq;
673
674 /*
675 * Don't sleep for less than the time a context switch would take,
676 * so that we can satisfy fast timer requests on the same CPU.
677 * Measurements on M1 show the sweet spot to be ~2ms.
678 */
679 if (!seconds && nanos < (2 * SCALE_MS)) {
680 return;
681 }
682
683 ts = (struct timespec) { seconds, nanos };
684 hvf_wait_for_ipi(cpu, &ts);
685 }
686
687 static void hvf_sync_vtimer(CPUState *cpu)
688 {
689 ARMCPU *arm_cpu = ARM_CPU(cpu);
690 hv_return_t r;
691 uint64_t ctl;
692 bool irq_state;
693
694 if (!cpu->hvf->vtimer_masked) {
695 /* We will get notified on vtimer changes by hvf, nothing to do */
696 return;
697 }
698
699 r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl);
700 assert_hvf_ok(r);
701
702 irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) ==
703 (TMR_CTL_ENABLE | TMR_CTL_ISTATUS);
704 qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state);
705
706 if (!irq_state) {
707 /* Timer no longer asserting, we can unmask it */
708 hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false);
709 cpu->hvf->vtimer_masked = false;
710 }
711 }
712
713 int hvf_vcpu_exec(CPUState *cpu)
714 {
715 ARMCPU *arm_cpu = ARM_CPU(cpu);
716 CPUARMState *env = &arm_cpu->env;
717 hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit;
718 hv_return_t r;
719 bool advance_pc = false;
720
721 if (hvf_inject_interrupts(cpu)) {
722 return EXCP_INTERRUPT;
723 }
724
725 if (cpu->halted) {
726 return EXCP_HLT;
727 }
728
729 flush_cpu_state(cpu);
730
731 qemu_mutex_unlock_iothread();
732 assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd));
733
734 /* handle VMEXIT */
735 uint64_t exit_reason = hvf_exit->reason;
736 uint64_t syndrome = hvf_exit->exception.syndrome;
737 uint32_t ec = syn_get_ec(syndrome);
738
739 qemu_mutex_lock_iothread();
740 switch (exit_reason) {
741 case HV_EXIT_REASON_EXCEPTION:
742 /* This is the main one, handle below. */
743 break;
744 case HV_EXIT_REASON_VTIMER_ACTIVATED:
745 qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1);
746 cpu->hvf->vtimer_masked = true;
747 return 0;
748 case HV_EXIT_REASON_CANCELED:
749 /* we got kicked, no exit to process */
750 return 0;
751 default:
752 assert(0);
753 }
754
755 hvf_sync_vtimer(cpu);
756
757 switch (ec) {
758 case EC_DATAABORT: {
759 bool isv = syndrome & ARM_EL_ISV;
760 bool iswrite = (syndrome >> 6) & 1;
761 bool s1ptw = (syndrome >> 7) & 1;
762 uint32_t sas = (syndrome >> 22) & 3;
763 uint32_t len = 1 << sas;
764 uint32_t srt = (syndrome >> 16) & 0x1f;
765 uint64_t val = 0;
766
767 trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address,
768 hvf_exit->exception.physical_address, isv,
769 iswrite, s1ptw, len, srt);
770
771 assert(isv);
772
773 if (iswrite) {
774 val = hvf_get_reg(cpu, srt);
775 address_space_write(&address_space_memory,
776 hvf_exit->exception.physical_address,
777 MEMTXATTRS_UNSPECIFIED, &val, len);
778 } else {
779 address_space_read(&address_space_memory,
780 hvf_exit->exception.physical_address,
781 MEMTXATTRS_UNSPECIFIED, &val, len);
782 hvf_set_reg(cpu, srt, val);
783 }
784
785 advance_pc = true;
786 break;
787 }
788 case EC_SYSTEMREGISTERTRAP: {
789 bool isread = (syndrome >> 0) & 1;
790 uint32_t rt = (syndrome >> 5) & 0x1f;
791 uint32_t reg = syndrome & SYSREG_MASK;
792 uint64_t val;
793 int ret = 0;
794
795 if (isread) {
796 ret = hvf_sysreg_read(cpu, reg, rt);
797 } else {
798 val = hvf_get_reg(cpu, rt);
799 ret = hvf_sysreg_write(cpu, reg, val);
800 }
801
802 advance_pc = !ret;
803 break;
804 }
805 case EC_WFX_TRAP:
806 advance_pc = true;
807 if (!(syndrome & WFX_IS_WFE)) {
808 hvf_wfi(cpu);
809 }
810 break;
811 case EC_AA64_HVC:
812 cpu_synchronize_state(cpu);
813 trace_hvf_unknown_hvc(env->xregs[0]);
814 /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */
815 env->xregs[0] = -1;
816 break;
817 case EC_AA64_SMC:
818 cpu_synchronize_state(cpu);
819 trace_hvf_unknown_smc(env->xregs[0]);
820 hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized());
821 break;
822 default:
823 cpu_synchronize_state(cpu);
824 trace_hvf_exit(syndrome, ec, env->pc);
825 error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec);
826 }
827
828 if (advance_pc) {
829 uint64_t pc;
830
831 flush_cpu_state(cpu);
832
833 r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc);
834 assert_hvf_ok(r);
835 pc += 4;
836 r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc);
837 assert_hvf_ok(r);
838 }
839
840 return 0;
841 }
842
843 static const VMStateDescription vmstate_hvf_vtimer = {
844 .name = "hvf-vtimer",
845 .version_id = 1,
846 .minimum_version_id = 1,
847 .fields = (VMStateField[]) {
848 VMSTATE_UINT64(vtimer_val, HVFVTimer),
849 VMSTATE_END_OF_LIST()
850 },
851 };
852
853 static void hvf_vm_state_change(void *opaque, bool running, RunState state)
854 {
855 HVFVTimer *s = opaque;
856
857 if (running) {
858 /* Update vtimer offset on all CPUs */
859 hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val;
860 cpu_synchronize_all_states();
861 } else {
862 /* Remember vtimer value on every pause */
863 s->vtimer_val = hvf_vtimer_val_raw();
864 }
865 }
866
867 int hvf_arch_init(void)
868 {
869 hvf_state->vtimer_offset = mach_absolute_time();
870 vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer);
871 qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer);
872 return 0;
873 }