]>
Commit | Line | Data |
---|---|---|
a1477da3 AG |
1 | /* |
2 | * QEMU Hypervisor.framework support for Apple Silicon | |
3 | ||
4 | * Copyright 2020 Alexander Graf <agraf@csgraf.de> | |
219c101f | 5 | * Copyright 2020 Google LLC |
a1477da3 AG |
6 | * |
7 | * This work is licensed under the terms of the GNU GPL, version 2 or later. | |
8 | * See the COPYING file in the top-level directory. | |
9 | * | |
10 | */ | |
11 | ||
12 | #include "qemu/osdep.h" | |
a1477da3 AG |
13 | #include "qemu/error-report.h" |
14 | ||
15 | #include "sysemu/runstate.h" | |
16 | #include "sysemu/hvf.h" | |
17 | #include "sysemu/hvf_int.h" | |
18 | #include "sysemu/hw_accel.h" | |
585df85e | 19 | #include "hvf_arm.h" |
b5fb359c | 20 | #include "cpregs.h" |
a1477da3 AG |
21 | |
22 | #include <mach/mach_time.h> | |
23 | ||
24 | #include "exec/address-spaces.h" | |
25 | #include "hw/irq.h" | |
26 | #include "qemu/main-loop.h" | |
27 | #include "sysemu/cpus.h" | |
2c9c0bf9 | 28 | #include "arm-powerctl.h" |
a1477da3 AG |
29 | #include "target/arm/cpu.h" |
30 | #include "target/arm/internals.h" | |
31 | #include "trace/trace-target_arm_hvf.h" | |
32 | #include "migration/vmstate.h" | |
33 | ||
34 | #define HVF_SYSREG(crn, crm, op0, op1, op2) \ | |
35 | ENCODE_AA64_CP_REG(CP_REG_ARM64_SYSREG_CP, crn, crm, op0, op1, op2) | |
36 | #define PL1_WRITE_MASK 0x4 | |
37 | ||
ad99f64f AG |
38 | #define SYSREG_OP0_SHIFT 20 |
39 | #define SYSREG_OP0_MASK 0x3 | |
40 | #define SYSREG_OP0(sysreg) ((sysreg >> SYSREG_OP0_SHIFT) & SYSREG_OP0_MASK) | |
41 | #define SYSREG_OP1_SHIFT 14 | |
42 | #define SYSREG_OP1_MASK 0x7 | |
43 | #define SYSREG_OP1(sysreg) ((sysreg >> SYSREG_OP1_SHIFT) & SYSREG_OP1_MASK) | |
44 | #define SYSREG_CRN_SHIFT 10 | |
45 | #define SYSREG_CRN_MASK 0xf | |
46 | #define SYSREG_CRN(sysreg) ((sysreg >> SYSREG_CRN_SHIFT) & SYSREG_CRN_MASK) | |
47 | #define SYSREG_CRM_SHIFT 1 | |
48 | #define SYSREG_CRM_MASK 0xf | |
49 | #define SYSREG_CRM(sysreg) ((sysreg >> SYSREG_CRM_SHIFT) & SYSREG_CRM_MASK) | |
50 | #define SYSREG_OP2_SHIFT 17 | |
51 | #define SYSREG_OP2_MASK 0x7 | |
52 | #define SYSREG_OP2(sysreg) ((sysreg >> SYSREG_OP2_SHIFT) & SYSREG_OP2_MASK) | |
53 | ||
a1477da3 | 54 | #define SYSREG(op0, op1, crn, crm, op2) \ |
ad99f64f AG |
55 | ((op0 << SYSREG_OP0_SHIFT) | \ |
56 | (op1 << SYSREG_OP1_SHIFT) | \ | |
57 | (crn << SYSREG_CRN_SHIFT) | \ | |
58 | (crm << SYSREG_CRM_SHIFT) | \ | |
59 | (op2 << SYSREG_OP2_SHIFT)) | |
60 | #define SYSREG_MASK \ | |
61 | SYSREG(SYSREG_OP0_MASK, \ | |
62 | SYSREG_OP1_MASK, \ | |
63 | SYSREG_CRN_MASK, \ | |
64 | SYSREG_CRM_MASK, \ | |
65 | SYSREG_OP2_MASK) | |
a1477da3 AG |
66 | #define SYSREG_OSLAR_EL1 SYSREG(2, 0, 1, 0, 4) |
67 | #define SYSREG_OSLSR_EL1 SYSREG(2, 0, 1, 1, 4) | |
68 | #define SYSREG_OSDLR_EL1 SYSREG(2, 0, 1, 3, 4) | |
69 | #define SYSREG_CNTPCT_EL0 SYSREG(3, 3, 14, 0, 1) | |
dd43ac07 AG |
70 | #define SYSREG_PMCR_EL0 SYSREG(3, 3, 9, 12, 0) |
71 | #define SYSREG_PMUSERENR_EL0 SYSREG(3, 3, 9, 14, 0) | |
72 | #define SYSREG_PMCNTENSET_EL0 SYSREG(3, 3, 9, 12, 1) | |
73 | #define SYSREG_PMCNTENCLR_EL0 SYSREG(3, 3, 9, 12, 2) | |
74 | #define SYSREG_PMINTENCLR_EL1 SYSREG(3, 0, 9, 14, 2) | |
75 | #define SYSREG_PMOVSCLR_EL0 SYSREG(3, 3, 9, 12, 3) | |
76 | #define SYSREG_PMSWINC_EL0 SYSREG(3, 3, 9, 12, 4) | |
77 | #define SYSREG_PMSELR_EL0 SYSREG(3, 3, 9, 12, 5) | |
78 | #define SYSREG_PMCEID0_EL0 SYSREG(3, 3, 9, 12, 6) | |
79 | #define SYSREG_PMCEID1_EL0 SYSREG(3, 3, 9, 12, 7) | |
80 | #define SYSREG_PMCCNTR_EL0 SYSREG(3, 3, 9, 13, 0) | |
81 | #define SYSREG_PMCCFILTR_EL0 SYSREG(3, 3, 14, 15, 7) | |
a1477da3 AG |
82 | |
83 | #define WFX_IS_WFE (1 << 0) | |
84 | ||
85 | #define TMR_CTL_ENABLE (1 << 0) | |
86 | #define TMR_CTL_IMASK (1 << 1) | |
87 | #define TMR_CTL_ISTATUS (1 << 2) | |
88 | ||
2c9c0bf9 AG |
89 | static void hvf_wfi(CPUState *cpu); |
90 | ||
a1477da3 AG |
91 | typedef struct HVFVTimer { |
92 | /* Vtimer value during migration and paused state */ | |
93 | uint64_t vtimer_val; | |
94 | } HVFVTimer; | |
95 | ||
96 | static HVFVTimer vtimer; | |
97 | ||
585df85e PM |
98 | typedef struct ARMHostCPUFeatures { |
99 | ARMISARegisters isar; | |
100 | uint64_t features; | |
101 | uint64_t midr; | |
102 | uint32_t reset_sctlr; | |
103 | const char *dtb_compatible; | |
104 | } ARMHostCPUFeatures; | |
105 | ||
106 | static ARMHostCPUFeatures arm_host_cpu_features; | |
107 | ||
a1477da3 AG |
108 | struct hvf_reg_match { |
109 | int reg; | |
110 | uint64_t offset; | |
111 | }; | |
112 | ||
113 | static const struct hvf_reg_match hvf_reg_match[] = { | |
114 | { HV_REG_X0, offsetof(CPUARMState, xregs[0]) }, | |
115 | { HV_REG_X1, offsetof(CPUARMState, xregs[1]) }, | |
116 | { HV_REG_X2, offsetof(CPUARMState, xregs[2]) }, | |
117 | { HV_REG_X3, offsetof(CPUARMState, xregs[3]) }, | |
118 | { HV_REG_X4, offsetof(CPUARMState, xregs[4]) }, | |
119 | { HV_REG_X5, offsetof(CPUARMState, xregs[5]) }, | |
120 | { HV_REG_X6, offsetof(CPUARMState, xregs[6]) }, | |
121 | { HV_REG_X7, offsetof(CPUARMState, xregs[7]) }, | |
122 | { HV_REG_X8, offsetof(CPUARMState, xregs[8]) }, | |
123 | { HV_REG_X9, offsetof(CPUARMState, xregs[9]) }, | |
124 | { HV_REG_X10, offsetof(CPUARMState, xregs[10]) }, | |
125 | { HV_REG_X11, offsetof(CPUARMState, xregs[11]) }, | |
126 | { HV_REG_X12, offsetof(CPUARMState, xregs[12]) }, | |
127 | { HV_REG_X13, offsetof(CPUARMState, xregs[13]) }, | |
128 | { HV_REG_X14, offsetof(CPUARMState, xregs[14]) }, | |
129 | { HV_REG_X15, offsetof(CPUARMState, xregs[15]) }, | |
130 | { HV_REG_X16, offsetof(CPUARMState, xregs[16]) }, | |
131 | { HV_REG_X17, offsetof(CPUARMState, xregs[17]) }, | |
132 | { HV_REG_X18, offsetof(CPUARMState, xregs[18]) }, | |
133 | { HV_REG_X19, offsetof(CPUARMState, xregs[19]) }, | |
134 | { HV_REG_X20, offsetof(CPUARMState, xregs[20]) }, | |
135 | { HV_REG_X21, offsetof(CPUARMState, xregs[21]) }, | |
136 | { HV_REG_X22, offsetof(CPUARMState, xregs[22]) }, | |
137 | { HV_REG_X23, offsetof(CPUARMState, xregs[23]) }, | |
138 | { HV_REG_X24, offsetof(CPUARMState, xregs[24]) }, | |
139 | { HV_REG_X25, offsetof(CPUARMState, xregs[25]) }, | |
140 | { HV_REG_X26, offsetof(CPUARMState, xregs[26]) }, | |
141 | { HV_REG_X27, offsetof(CPUARMState, xregs[27]) }, | |
142 | { HV_REG_X28, offsetof(CPUARMState, xregs[28]) }, | |
143 | { HV_REG_X29, offsetof(CPUARMState, xregs[29]) }, | |
144 | { HV_REG_X30, offsetof(CPUARMState, xregs[30]) }, | |
145 | { HV_REG_PC, offsetof(CPUARMState, pc) }, | |
146 | }; | |
147 | ||
148 | static const struct hvf_reg_match hvf_fpreg_match[] = { | |
149 | { HV_SIMD_FP_REG_Q0, offsetof(CPUARMState, vfp.zregs[0]) }, | |
150 | { HV_SIMD_FP_REG_Q1, offsetof(CPUARMState, vfp.zregs[1]) }, | |
151 | { HV_SIMD_FP_REG_Q2, offsetof(CPUARMState, vfp.zregs[2]) }, | |
152 | { HV_SIMD_FP_REG_Q3, offsetof(CPUARMState, vfp.zregs[3]) }, | |
153 | { HV_SIMD_FP_REG_Q4, offsetof(CPUARMState, vfp.zregs[4]) }, | |
154 | { HV_SIMD_FP_REG_Q5, offsetof(CPUARMState, vfp.zregs[5]) }, | |
155 | { HV_SIMD_FP_REG_Q6, offsetof(CPUARMState, vfp.zregs[6]) }, | |
156 | { HV_SIMD_FP_REG_Q7, offsetof(CPUARMState, vfp.zregs[7]) }, | |
157 | { HV_SIMD_FP_REG_Q8, offsetof(CPUARMState, vfp.zregs[8]) }, | |
158 | { HV_SIMD_FP_REG_Q9, offsetof(CPUARMState, vfp.zregs[9]) }, | |
159 | { HV_SIMD_FP_REG_Q10, offsetof(CPUARMState, vfp.zregs[10]) }, | |
160 | { HV_SIMD_FP_REG_Q11, offsetof(CPUARMState, vfp.zregs[11]) }, | |
161 | { HV_SIMD_FP_REG_Q12, offsetof(CPUARMState, vfp.zregs[12]) }, | |
162 | { HV_SIMD_FP_REG_Q13, offsetof(CPUARMState, vfp.zregs[13]) }, | |
163 | { HV_SIMD_FP_REG_Q14, offsetof(CPUARMState, vfp.zregs[14]) }, | |
164 | { HV_SIMD_FP_REG_Q15, offsetof(CPUARMState, vfp.zregs[15]) }, | |
165 | { HV_SIMD_FP_REG_Q16, offsetof(CPUARMState, vfp.zregs[16]) }, | |
166 | { HV_SIMD_FP_REG_Q17, offsetof(CPUARMState, vfp.zregs[17]) }, | |
167 | { HV_SIMD_FP_REG_Q18, offsetof(CPUARMState, vfp.zregs[18]) }, | |
168 | { HV_SIMD_FP_REG_Q19, offsetof(CPUARMState, vfp.zregs[19]) }, | |
169 | { HV_SIMD_FP_REG_Q20, offsetof(CPUARMState, vfp.zregs[20]) }, | |
170 | { HV_SIMD_FP_REG_Q21, offsetof(CPUARMState, vfp.zregs[21]) }, | |
171 | { HV_SIMD_FP_REG_Q22, offsetof(CPUARMState, vfp.zregs[22]) }, | |
172 | { HV_SIMD_FP_REG_Q23, offsetof(CPUARMState, vfp.zregs[23]) }, | |
173 | { HV_SIMD_FP_REG_Q24, offsetof(CPUARMState, vfp.zregs[24]) }, | |
174 | { HV_SIMD_FP_REG_Q25, offsetof(CPUARMState, vfp.zregs[25]) }, | |
175 | { HV_SIMD_FP_REG_Q26, offsetof(CPUARMState, vfp.zregs[26]) }, | |
176 | { HV_SIMD_FP_REG_Q27, offsetof(CPUARMState, vfp.zregs[27]) }, | |
177 | { HV_SIMD_FP_REG_Q28, offsetof(CPUARMState, vfp.zregs[28]) }, | |
178 | { HV_SIMD_FP_REG_Q29, offsetof(CPUARMState, vfp.zregs[29]) }, | |
179 | { HV_SIMD_FP_REG_Q30, offsetof(CPUARMState, vfp.zregs[30]) }, | |
180 | { HV_SIMD_FP_REG_Q31, offsetof(CPUARMState, vfp.zregs[31]) }, | |
181 | }; | |
182 | ||
183 | struct hvf_sreg_match { | |
184 | int reg; | |
185 | uint32_t key; | |
186 | uint32_t cp_idx; | |
187 | }; | |
188 | ||
189 | static struct hvf_sreg_match hvf_sreg_match[] = { | |
190 | { HV_SYS_REG_DBGBVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 4) }, | |
191 | { HV_SYS_REG_DBGBCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 5) }, | |
192 | { HV_SYS_REG_DBGWVR0_EL1, HVF_SYSREG(0, 0, 14, 0, 6) }, | |
193 | { HV_SYS_REG_DBGWCR0_EL1, HVF_SYSREG(0, 0, 14, 0, 7) }, | |
194 | ||
195 | { HV_SYS_REG_DBGBVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 4) }, | |
196 | { HV_SYS_REG_DBGBCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 5) }, | |
197 | { HV_SYS_REG_DBGWVR1_EL1, HVF_SYSREG(0, 1, 14, 0, 6) }, | |
198 | { HV_SYS_REG_DBGWCR1_EL1, HVF_SYSREG(0, 1, 14, 0, 7) }, | |
199 | ||
200 | { HV_SYS_REG_DBGBVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 4) }, | |
201 | { HV_SYS_REG_DBGBCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 5) }, | |
202 | { HV_SYS_REG_DBGWVR2_EL1, HVF_SYSREG(0, 2, 14, 0, 6) }, | |
203 | { HV_SYS_REG_DBGWCR2_EL1, HVF_SYSREG(0, 2, 14, 0, 7) }, | |
204 | ||
205 | { HV_SYS_REG_DBGBVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 4) }, | |
206 | { HV_SYS_REG_DBGBCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 5) }, | |
207 | { HV_SYS_REG_DBGWVR3_EL1, HVF_SYSREG(0, 3, 14, 0, 6) }, | |
208 | { HV_SYS_REG_DBGWCR3_EL1, HVF_SYSREG(0, 3, 14, 0, 7) }, | |
209 | ||
210 | { HV_SYS_REG_DBGBVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 4) }, | |
211 | { HV_SYS_REG_DBGBCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 5) }, | |
212 | { HV_SYS_REG_DBGWVR4_EL1, HVF_SYSREG(0, 4, 14, 0, 6) }, | |
213 | { HV_SYS_REG_DBGWCR4_EL1, HVF_SYSREG(0, 4, 14, 0, 7) }, | |
214 | ||
215 | { HV_SYS_REG_DBGBVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 4) }, | |
216 | { HV_SYS_REG_DBGBCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 5) }, | |
217 | { HV_SYS_REG_DBGWVR5_EL1, HVF_SYSREG(0, 5, 14, 0, 6) }, | |
218 | { HV_SYS_REG_DBGWCR5_EL1, HVF_SYSREG(0, 5, 14, 0, 7) }, | |
219 | ||
220 | { HV_SYS_REG_DBGBVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 4) }, | |
221 | { HV_SYS_REG_DBGBCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 5) }, | |
222 | { HV_SYS_REG_DBGWVR6_EL1, HVF_SYSREG(0, 6, 14, 0, 6) }, | |
223 | { HV_SYS_REG_DBGWCR6_EL1, HVF_SYSREG(0, 6, 14, 0, 7) }, | |
224 | ||
225 | { HV_SYS_REG_DBGBVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 4) }, | |
226 | { HV_SYS_REG_DBGBCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 5) }, | |
227 | { HV_SYS_REG_DBGWVR7_EL1, HVF_SYSREG(0, 7, 14, 0, 6) }, | |
228 | { HV_SYS_REG_DBGWCR7_EL1, HVF_SYSREG(0, 7, 14, 0, 7) }, | |
229 | ||
230 | { HV_SYS_REG_DBGBVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 4) }, | |
231 | { HV_SYS_REG_DBGBCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 5) }, | |
232 | { HV_SYS_REG_DBGWVR8_EL1, HVF_SYSREG(0, 8, 14, 0, 6) }, | |
233 | { HV_SYS_REG_DBGWCR8_EL1, HVF_SYSREG(0, 8, 14, 0, 7) }, | |
234 | ||
235 | { HV_SYS_REG_DBGBVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 4) }, | |
236 | { HV_SYS_REG_DBGBCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 5) }, | |
237 | { HV_SYS_REG_DBGWVR9_EL1, HVF_SYSREG(0, 9, 14, 0, 6) }, | |
238 | { HV_SYS_REG_DBGWCR9_EL1, HVF_SYSREG(0, 9, 14, 0, 7) }, | |
239 | ||
240 | { HV_SYS_REG_DBGBVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 4) }, | |
241 | { HV_SYS_REG_DBGBCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 5) }, | |
242 | { HV_SYS_REG_DBGWVR10_EL1, HVF_SYSREG(0, 10, 14, 0, 6) }, | |
243 | { HV_SYS_REG_DBGWCR10_EL1, HVF_SYSREG(0, 10, 14, 0, 7) }, | |
244 | ||
245 | { HV_SYS_REG_DBGBVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 4) }, | |
246 | { HV_SYS_REG_DBGBCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 5) }, | |
247 | { HV_SYS_REG_DBGWVR11_EL1, HVF_SYSREG(0, 11, 14, 0, 6) }, | |
248 | { HV_SYS_REG_DBGWCR11_EL1, HVF_SYSREG(0, 11, 14, 0, 7) }, | |
249 | ||
250 | { HV_SYS_REG_DBGBVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 4) }, | |
251 | { HV_SYS_REG_DBGBCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 5) }, | |
252 | { HV_SYS_REG_DBGWVR12_EL1, HVF_SYSREG(0, 12, 14, 0, 6) }, | |
253 | { HV_SYS_REG_DBGWCR12_EL1, HVF_SYSREG(0, 12, 14, 0, 7) }, | |
254 | ||
255 | { HV_SYS_REG_DBGBVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 4) }, | |
256 | { HV_SYS_REG_DBGBCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 5) }, | |
257 | { HV_SYS_REG_DBGWVR13_EL1, HVF_SYSREG(0, 13, 14, 0, 6) }, | |
258 | { HV_SYS_REG_DBGWCR13_EL1, HVF_SYSREG(0, 13, 14, 0, 7) }, | |
259 | ||
260 | { HV_SYS_REG_DBGBVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 4) }, | |
261 | { HV_SYS_REG_DBGBCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 5) }, | |
262 | { HV_SYS_REG_DBGWVR14_EL1, HVF_SYSREG(0, 14, 14, 0, 6) }, | |
263 | { HV_SYS_REG_DBGWCR14_EL1, HVF_SYSREG(0, 14, 14, 0, 7) }, | |
264 | ||
265 | { HV_SYS_REG_DBGBVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 4) }, | |
266 | { HV_SYS_REG_DBGBCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 5) }, | |
267 | { HV_SYS_REG_DBGWVR15_EL1, HVF_SYSREG(0, 15, 14, 0, 6) }, | |
268 | { HV_SYS_REG_DBGWCR15_EL1, HVF_SYSREG(0, 15, 14, 0, 7) }, | |
269 | ||
270 | #ifdef SYNC_NO_RAW_REGS | |
271 | /* | |
272 | * The registers below are manually synced on init because they are | |
273 | * marked as NO_RAW. We still list them to make number space sync easier. | |
274 | */ | |
275 | { HV_SYS_REG_MDCCINT_EL1, HVF_SYSREG(0, 2, 2, 0, 0) }, | |
276 | { HV_SYS_REG_MIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 0) }, | |
277 | { HV_SYS_REG_MPIDR_EL1, HVF_SYSREG(0, 0, 3, 0, 5) }, | |
278 | { HV_SYS_REG_ID_AA64PFR0_EL1, HVF_SYSREG(0, 4, 3, 0, 0) }, | |
279 | #endif | |
280 | { HV_SYS_REG_ID_AA64PFR1_EL1, HVF_SYSREG(0, 4, 3, 0, 2) }, | |
281 | { HV_SYS_REG_ID_AA64DFR0_EL1, HVF_SYSREG(0, 5, 3, 0, 0) }, | |
282 | { HV_SYS_REG_ID_AA64DFR1_EL1, HVF_SYSREG(0, 5, 3, 0, 1) }, | |
283 | { HV_SYS_REG_ID_AA64ISAR0_EL1, HVF_SYSREG(0, 6, 3, 0, 0) }, | |
284 | { HV_SYS_REG_ID_AA64ISAR1_EL1, HVF_SYSREG(0, 6, 3, 0, 1) }, | |
285 | #ifdef SYNC_NO_MMFR0 | |
286 | /* We keep the hardware MMFR0 around. HW limits are there anyway */ | |
287 | { HV_SYS_REG_ID_AA64MMFR0_EL1, HVF_SYSREG(0, 7, 3, 0, 0) }, | |
288 | #endif | |
289 | { HV_SYS_REG_ID_AA64MMFR1_EL1, HVF_SYSREG(0, 7, 3, 0, 1) }, | |
290 | { HV_SYS_REG_ID_AA64MMFR2_EL1, HVF_SYSREG(0, 7, 3, 0, 2) }, | |
291 | ||
292 | { HV_SYS_REG_MDSCR_EL1, HVF_SYSREG(0, 2, 2, 0, 2) }, | |
293 | { HV_SYS_REG_SCTLR_EL1, HVF_SYSREG(1, 0, 3, 0, 0) }, | |
294 | { HV_SYS_REG_CPACR_EL1, HVF_SYSREG(1, 0, 3, 0, 2) }, | |
295 | { HV_SYS_REG_TTBR0_EL1, HVF_SYSREG(2, 0, 3, 0, 0) }, | |
296 | { HV_SYS_REG_TTBR1_EL1, HVF_SYSREG(2, 0, 3, 0, 1) }, | |
297 | { HV_SYS_REG_TCR_EL1, HVF_SYSREG(2, 0, 3, 0, 2) }, | |
298 | ||
299 | { HV_SYS_REG_APIAKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 0) }, | |
300 | { HV_SYS_REG_APIAKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 1) }, | |
301 | { HV_SYS_REG_APIBKEYLO_EL1, HVF_SYSREG(2, 1, 3, 0, 2) }, | |
302 | { HV_SYS_REG_APIBKEYHI_EL1, HVF_SYSREG(2, 1, 3, 0, 3) }, | |
303 | { HV_SYS_REG_APDAKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 0) }, | |
304 | { HV_SYS_REG_APDAKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 1) }, | |
305 | { HV_SYS_REG_APDBKEYLO_EL1, HVF_SYSREG(2, 2, 3, 0, 2) }, | |
306 | { HV_SYS_REG_APDBKEYHI_EL1, HVF_SYSREG(2, 2, 3, 0, 3) }, | |
307 | { HV_SYS_REG_APGAKEYLO_EL1, HVF_SYSREG(2, 3, 3, 0, 0) }, | |
308 | { HV_SYS_REG_APGAKEYHI_EL1, HVF_SYSREG(2, 3, 3, 0, 1) }, | |
309 | ||
310 | { HV_SYS_REG_SPSR_EL1, HVF_SYSREG(4, 0, 3, 0, 0) }, | |
311 | { HV_SYS_REG_ELR_EL1, HVF_SYSREG(4, 0, 3, 0, 1) }, | |
312 | { HV_SYS_REG_SP_EL0, HVF_SYSREG(4, 1, 3, 0, 0) }, | |
313 | { HV_SYS_REG_AFSR0_EL1, HVF_SYSREG(5, 1, 3, 0, 0) }, | |
314 | { HV_SYS_REG_AFSR1_EL1, HVF_SYSREG(5, 1, 3, 0, 1) }, | |
315 | { HV_SYS_REG_ESR_EL1, HVF_SYSREG(5, 2, 3, 0, 0) }, | |
316 | { HV_SYS_REG_FAR_EL1, HVF_SYSREG(6, 0, 3, 0, 0) }, | |
317 | { HV_SYS_REG_PAR_EL1, HVF_SYSREG(7, 4, 3, 0, 0) }, | |
318 | { HV_SYS_REG_MAIR_EL1, HVF_SYSREG(10, 2, 3, 0, 0) }, | |
319 | { HV_SYS_REG_AMAIR_EL1, HVF_SYSREG(10, 3, 3, 0, 0) }, | |
320 | { HV_SYS_REG_VBAR_EL1, HVF_SYSREG(12, 0, 3, 0, 0) }, | |
321 | { HV_SYS_REG_CONTEXTIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 1) }, | |
322 | { HV_SYS_REG_TPIDR_EL1, HVF_SYSREG(13, 0, 3, 0, 4) }, | |
323 | { HV_SYS_REG_CNTKCTL_EL1, HVF_SYSREG(14, 1, 3, 0, 0) }, | |
324 | { HV_SYS_REG_CSSELR_EL1, HVF_SYSREG(0, 0, 3, 2, 0) }, | |
325 | { HV_SYS_REG_TPIDR_EL0, HVF_SYSREG(13, 0, 3, 3, 2) }, | |
326 | { HV_SYS_REG_TPIDRRO_EL0, HVF_SYSREG(13, 0, 3, 3, 3) }, | |
327 | { HV_SYS_REG_CNTV_CTL_EL0, HVF_SYSREG(14, 3, 3, 3, 1) }, | |
328 | { HV_SYS_REG_CNTV_CVAL_EL0, HVF_SYSREG(14, 3, 3, 3, 2) }, | |
329 | { HV_SYS_REG_SP_EL1, HVF_SYSREG(4, 1, 3, 4, 0) }, | |
330 | }; | |
331 | ||
332 | int hvf_get_registers(CPUState *cpu) | |
333 | { | |
334 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
335 | CPUARMState *env = &arm_cpu->env; | |
336 | hv_return_t ret; | |
337 | uint64_t val; | |
338 | hv_simd_fp_uchar16_t fpval; | |
339 | int i; | |
340 | ||
341 | for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { | |
342 | ret = hv_vcpu_get_reg(cpu->hvf->fd, hvf_reg_match[i].reg, &val); | |
343 | *(uint64_t *)((void *)env + hvf_reg_match[i].offset) = val; | |
344 | assert_hvf_ok(ret); | |
345 | } | |
346 | ||
347 | for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { | |
348 | ret = hv_vcpu_get_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, | |
349 | &fpval); | |
350 | memcpy((void *)env + hvf_fpreg_match[i].offset, &fpval, sizeof(fpval)); | |
351 | assert_hvf_ok(ret); | |
352 | } | |
353 | ||
354 | val = 0; | |
355 | ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPCR, &val); | |
356 | assert_hvf_ok(ret); | |
357 | vfp_set_fpcr(env, val); | |
358 | ||
359 | val = 0; | |
360 | ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_FPSR, &val); | |
361 | assert_hvf_ok(ret); | |
362 | vfp_set_fpsr(env, val); | |
363 | ||
364 | ret = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_CPSR, &val); | |
365 | assert_hvf_ok(ret); | |
366 | pstate_write(env, val); | |
367 | ||
368 | for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { | |
369 | if (hvf_sreg_match[i].cp_idx == -1) { | |
370 | continue; | |
371 | } | |
372 | ||
373 | ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, &val); | |
374 | assert_hvf_ok(ret); | |
375 | ||
376 | arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx] = val; | |
377 | } | |
378 | assert(write_list_to_cpustate(arm_cpu)); | |
379 | ||
380 | aarch64_restore_sp(env, arm_current_el(env)); | |
381 | ||
382 | return 0; | |
383 | } | |
384 | ||
385 | int hvf_put_registers(CPUState *cpu) | |
386 | { | |
387 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
388 | CPUARMState *env = &arm_cpu->env; | |
389 | hv_return_t ret; | |
390 | uint64_t val; | |
391 | hv_simd_fp_uchar16_t fpval; | |
392 | int i; | |
393 | ||
394 | for (i = 0; i < ARRAY_SIZE(hvf_reg_match); i++) { | |
395 | val = *(uint64_t *)((void *)env + hvf_reg_match[i].offset); | |
396 | ret = hv_vcpu_set_reg(cpu->hvf->fd, hvf_reg_match[i].reg, val); | |
397 | assert_hvf_ok(ret); | |
398 | } | |
399 | ||
400 | for (i = 0; i < ARRAY_SIZE(hvf_fpreg_match); i++) { | |
401 | memcpy(&fpval, (void *)env + hvf_fpreg_match[i].offset, sizeof(fpval)); | |
402 | ret = hv_vcpu_set_simd_fp_reg(cpu->hvf->fd, hvf_fpreg_match[i].reg, | |
403 | fpval); | |
404 | assert_hvf_ok(ret); | |
405 | } | |
406 | ||
407 | ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPCR, vfp_get_fpcr(env)); | |
408 | assert_hvf_ok(ret); | |
409 | ||
410 | ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_FPSR, vfp_get_fpsr(env)); | |
411 | assert_hvf_ok(ret); | |
412 | ||
413 | ret = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_CPSR, pstate_read(env)); | |
414 | assert_hvf_ok(ret); | |
415 | ||
416 | aarch64_save_sp(env, arm_current_el(env)); | |
417 | ||
418 | assert(write_cpustate_to_list(arm_cpu, false)); | |
419 | for (i = 0; i < ARRAY_SIZE(hvf_sreg_match); i++) { | |
420 | if (hvf_sreg_match[i].cp_idx == -1) { | |
421 | continue; | |
422 | } | |
423 | ||
424 | val = arm_cpu->cpreg_values[hvf_sreg_match[i].cp_idx]; | |
425 | ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, hvf_sreg_match[i].reg, val); | |
426 | assert_hvf_ok(ret); | |
427 | } | |
428 | ||
429 | ret = hv_vcpu_set_vtimer_offset(cpu->hvf->fd, hvf_state->vtimer_offset); | |
430 | assert_hvf_ok(ret); | |
431 | ||
432 | return 0; | |
433 | } | |
434 | ||
435 | static void flush_cpu_state(CPUState *cpu) | |
436 | { | |
437 | if (cpu->vcpu_dirty) { | |
438 | hvf_put_registers(cpu); | |
439 | cpu->vcpu_dirty = false; | |
440 | } | |
441 | } | |
442 | ||
443 | static void hvf_set_reg(CPUState *cpu, int rt, uint64_t val) | |
444 | { | |
445 | hv_return_t r; | |
446 | ||
447 | flush_cpu_state(cpu); | |
448 | ||
449 | if (rt < 31) { | |
450 | r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_X0 + rt, val); | |
451 | assert_hvf_ok(r); | |
452 | } | |
453 | } | |
454 | ||
455 | static uint64_t hvf_get_reg(CPUState *cpu, int rt) | |
456 | { | |
457 | uint64_t val = 0; | |
458 | hv_return_t r; | |
459 | ||
460 | flush_cpu_state(cpu); | |
461 | ||
462 | if (rt < 31) { | |
463 | r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_X0 + rt, &val); | |
464 | assert_hvf_ok(r); | |
465 | } | |
466 | ||
467 | return val; | |
468 | } | |
469 | ||
585df85e PM |
470 | static bool hvf_arm_get_host_cpu_features(ARMHostCPUFeatures *ahcf) |
471 | { | |
472 | ARMISARegisters host_isar = {}; | |
473 | const struct isar_regs { | |
474 | int reg; | |
475 | uint64_t *val; | |
476 | } regs[] = { | |
477 | { HV_SYS_REG_ID_AA64PFR0_EL1, &host_isar.id_aa64pfr0 }, | |
478 | { HV_SYS_REG_ID_AA64PFR1_EL1, &host_isar.id_aa64pfr1 }, | |
479 | { HV_SYS_REG_ID_AA64DFR0_EL1, &host_isar.id_aa64dfr0 }, | |
480 | { HV_SYS_REG_ID_AA64DFR1_EL1, &host_isar.id_aa64dfr1 }, | |
481 | { HV_SYS_REG_ID_AA64ISAR0_EL1, &host_isar.id_aa64isar0 }, | |
482 | { HV_SYS_REG_ID_AA64ISAR1_EL1, &host_isar.id_aa64isar1 }, | |
483 | { HV_SYS_REG_ID_AA64MMFR0_EL1, &host_isar.id_aa64mmfr0 }, | |
484 | { HV_SYS_REG_ID_AA64MMFR1_EL1, &host_isar.id_aa64mmfr1 }, | |
485 | { HV_SYS_REG_ID_AA64MMFR2_EL1, &host_isar.id_aa64mmfr2 }, | |
486 | }; | |
487 | hv_vcpu_t fd; | |
488 | hv_return_t r = HV_SUCCESS; | |
489 | hv_vcpu_exit_t *exit; | |
490 | int i; | |
491 | ||
492 | ahcf->dtb_compatible = "arm,arm-v8"; | |
493 | ahcf->features = (1ULL << ARM_FEATURE_V8) | | |
494 | (1ULL << ARM_FEATURE_NEON) | | |
495 | (1ULL << ARM_FEATURE_AARCH64) | | |
496 | (1ULL << ARM_FEATURE_PMU) | | |
497 | (1ULL << ARM_FEATURE_GENERIC_TIMER); | |
498 | ||
499 | /* We set up a small vcpu to extract host registers */ | |
500 | ||
501 | if (hv_vcpu_create(&fd, &exit, NULL) != HV_SUCCESS) { | |
502 | return false; | |
503 | } | |
504 | ||
505 | for (i = 0; i < ARRAY_SIZE(regs); i++) { | |
506 | r |= hv_vcpu_get_sys_reg(fd, regs[i].reg, regs[i].val); | |
507 | } | |
508 | r |= hv_vcpu_get_sys_reg(fd, HV_SYS_REG_MIDR_EL1, &ahcf->midr); | |
509 | r |= hv_vcpu_destroy(fd); | |
510 | ||
511 | ahcf->isar = host_isar; | |
512 | ||
513 | /* | |
514 | * A scratch vCPU returns SCTLR 0, so let's fill our default with the M1 | |
515 | * boot SCTLR from https://github.com/AsahiLinux/m1n1/issues/97 | |
516 | */ | |
517 | ahcf->reset_sctlr = 0x30100180; | |
518 | /* | |
519 | * SPAN is disabled by default when SCTLR.SPAN=1. To improve compatibility, | |
520 | * let's disable it on boot and then allow guest software to turn it on by | |
521 | * setting it to 0. | |
522 | */ | |
523 | ahcf->reset_sctlr |= 0x00800000; | |
524 | ||
525 | /* Make sure we don't advertise AArch32 support for EL0/EL1 */ | |
526 | if ((host_isar.id_aa64pfr0 & 0xff) != 0x11) { | |
527 | return false; | |
528 | } | |
529 | ||
530 | return r == HV_SUCCESS; | |
531 | } | |
532 | ||
533 | void hvf_arm_set_cpu_features_from_host(ARMCPU *cpu) | |
534 | { | |
535 | if (!arm_host_cpu_features.dtb_compatible) { | |
536 | if (!hvf_enabled() || | |
537 | !hvf_arm_get_host_cpu_features(&arm_host_cpu_features)) { | |
538 | /* | |
539 | * We can't report this error yet, so flag that we need to | |
540 | * in arm_cpu_realizefn(). | |
541 | */ | |
542 | cpu->host_cpu_probe_failed = true; | |
543 | return; | |
544 | } | |
545 | } | |
546 | ||
547 | cpu->dtb_compatible = arm_host_cpu_features.dtb_compatible; | |
548 | cpu->isar = arm_host_cpu_features.isar; | |
549 | cpu->env.features = arm_host_cpu_features.features; | |
550 | cpu->midr = arm_host_cpu_features.midr; | |
551 | cpu->reset_sctlr = arm_host_cpu_features.reset_sctlr; | |
552 | } | |
553 | ||
a1477da3 AG |
554 | void hvf_arch_vcpu_destroy(CPUState *cpu) |
555 | { | |
556 | } | |
557 | ||
558 | int hvf_arch_init_vcpu(CPUState *cpu) | |
559 | { | |
560 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
561 | CPUARMState *env = &arm_cpu->env; | |
562 | uint32_t sregs_match_len = ARRAY_SIZE(hvf_sreg_match); | |
563 | uint32_t sregs_cnt = 0; | |
564 | uint64_t pfr; | |
565 | hv_return_t ret; | |
566 | int i; | |
567 | ||
53221552 | 568 | env->aarch64 = true; |
a1477da3 AG |
569 | asm volatile("mrs %0, cntfrq_el0" : "=r"(arm_cpu->gt_cntfrq_hz)); |
570 | ||
571 | /* Allocate enough space for our sysreg sync */ | |
572 | arm_cpu->cpreg_indexes = g_renew(uint64_t, arm_cpu->cpreg_indexes, | |
573 | sregs_match_len); | |
574 | arm_cpu->cpreg_values = g_renew(uint64_t, arm_cpu->cpreg_values, | |
575 | sregs_match_len); | |
576 | arm_cpu->cpreg_vmstate_indexes = g_renew(uint64_t, | |
577 | arm_cpu->cpreg_vmstate_indexes, | |
578 | sregs_match_len); | |
579 | arm_cpu->cpreg_vmstate_values = g_renew(uint64_t, | |
580 | arm_cpu->cpreg_vmstate_values, | |
581 | sregs_match_len); | |
582 | ||
583 | memset(arm_cpu->cpreg_values, 0, sregs_match_len * sizeof(uint64_t)); | |
584 | ||
585 | /* Populate cp list for all known sysregs */ | |
586 | for (i = 0; i < sregs_match_len; i++) { | |
587 | const ARMCPRegInfo *ri; | |
588 | uint32_t key = hvf_sreg_match[i].key; | |
589 | ||
590 | ri = get_arm_cp_reginfo(arm_cpu->cp_regs, key); | |
591 | if (ri) { | |
592 | assert(!(ri->type & ARM_CP_NO_RAW)); | |
593 | hvf_sreg_match[i].cp_idx = sregs_cnt; | |
594 | arm_cpu->cpreg_indexes[sregs_cnt++] = cpreg_to_kvm_id(key); | |
595 | } else { | |
596 | hvf_sreg_match[i].cp_idx = -1; | |
597 | } | |
598 | } | |
599 | arm_cpu->cpreg_array_len = sregs_cnt; | |
600 | arm_cpu->cpreg_vmstate_array_len = sregs_cnt; | |
601 | ||
602 | assert(write_cpustate_to_list(arm_cpu, false)); | |
603 | ||
604 | /* Set CP_NO_RAW system registers on init */ | |
605 | ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MIDR_EL1, | |
606 | arm_cpu->midr); | |
607 | assert_hvf_ok(ret); | |
608 | ||
609 | ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_MPIDR_EL1, | |
610 | arm_cpu->mp_affinity); | |
611 | assert_hvf_ok(ret); | |
612 | ||
613 | ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, &pfr); | |
614 | assert_hvf_ok(ret); | |
615 | pfr |= env->gicv3state ? (1 << 24) : 0; | |
616 | ret = hv_vcpu_set_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64PFR0_EL1, pfr); | |
617 | assert_hvf_ok(ret); | |
618 | ||
619 | /* We're limited to underlying hardware caps, override internal versions */ | |
620 | ret = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_ID_AA64MMFR0_EL1, | |
621 | &arm_cpu->isar.id_aa64mmfr0); | |
622 | assert_hvf_ok(ret); | |
623 | ||
624 | return 0; | |
625 | } | |
626 | ||
627 | void hvf_kick_vcpu_thread(CPUState *cpu) | |
628 | { | |
219c101f | 629 | cpus_kick_thread(cpu); |
a1477da3 AG |
630 | hv_vcpus_exit(&cpu->hvf->fd, 1); |
631 | } | |
632 | ||
633 | static void hvf_raise_exception(CPUState *cpu, uint32_t excp, | |
634 | uint32_t syndrome) | |
635 | { | |
636 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
637 | CPUARMState *env = &arm_cpu->env; | |
638 | ||
639 | cpu->exception_index = excp; | |
640 | env->exception.target_el = 1; | |
641 | env->exception.syndrome = syndrome; | |
642 | ||
643 | arm_cpu_do_interrupt(cpu); | |
644 | } | |
645 | ||
2c9c0bf9 AG |
646 | static void hvf_psci_cpu_off(ARMCPU *arm_cpu) |
647 | { | |
648 | int32_t ret = arm_set_cpu_off(arm_cpu->mp_affinity); | |
649 | assert(ret == QEMU_ARM_POWERCTL_RET_SUCCESS); | |
650 | } | |
651 | ||
652 | /* | |
653 | * Handle a PSCI call. | |
654 | * | |
655 | * Returns 0 on success | |
656 | * -1 when the PSCI call is unknown, | |
657 | */ | |
658 | static bool hvf_handle_psci_call(CPUState *cpu) | |
659 | { | |
660 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
661 | CPUARMState *env = &arm_cpu->env; | |
662 | uint64_t param[4] = { | |
663 | env->xregs[0], | |
664 | env->xregs[1], | |
665 | env->xregs[2], | |
666 | env->xregs[3] | |
667 | }; | |
668 | uint64_t context_id, mpidr; | |
669 | bool target_aarch64 = true; | |
670 | CPUState *target_cpu_state; | |
671 | ARMCPU *target_cpu; | |
672 | target_ulong entry; | |
673 | int target_el = 1; | |
674 | int32_t ret = 0; | |
675 | ||
676 | trace_hvf_psci_call(param[0], param[1], param[2], param[3], | |
677 | arm_cpu->mp_affinity); | |
678 | ||
679 | switch (param[0]) { | |
680 | case QEMU_PSCI_0_2_FN_PSCI_VERSION: | |
0dc71c70 | 681 | ret = QEMU_PSCI_VERSION_1_1; |
2c9c0bf9 AG |
682 | break; |
683 | case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: | |
684 | ret = QEMU_PSCI_0_2_RET_TOS_MIGRATION_NOT_REQUIRED; /* No trusted OS */ | |
685 | break; | |
686 | case QEMU_PSCI_0_2_FN_AFFINITY_INFO: | |
687 | case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: | |
688 | mpidr = param[1]; | |
689 | ||
690 | switch (param[2]) { | |
691 | case 0: | |
692 | target_cpu_state = arm_get_cpu_by_id(mpidr); | |
693 | if (!target_cpu_state) { | |
694 | ret = QEMU_PSCI_RET_INVALID_PARAMS; | |
695 | break; | |
696 | } | |
697 | target_cpu = ARM_CPU(target_cpu_state); | |
698 | ||
699 | ret = target_cpu->power_state; | |
700 | break; | |
701 | default: | |
702 | /* Everything above affinity level 0 is always on. */ | |
703 | ret = 0; | |
704 | } | |
705 | break; | |
706 | case QEMU_PSCI_0_2_FN_SYSTEM_RESET: | |
707 | qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); | |
708 | /* | |
709 | * QEMU reset and shutdown are async requests, but PSCI | |
710 | * mandates that we never return from the reset/shutdown | |
711 | * call, so power the CPU off now so it doesn't execute | |
712 | * anything further. | |
713 | */ | |
714 | hvf_psci_cpu_off(arm_cpu); | |
715 | break; | |
716 | case QEMU_PSCI_0_2_FN_SYSTEM_OFF: | |
717 | qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); | |
718 | hvf_psci_cpu_off(arm_cpu); | |
719 | break; | |
720 | case QEMU_PSCI_0_1_FN_CPU_ON: | |
721 | case QEMU_PSCI_0_2_FN_CPU_ON: | |
722 | case QEMU_PSCI_0_2_FN64_CPU_ON: | |
723 | mpidr = param[1]; | |
724 | entry = param[2]; | |
725 | context_id = param[3]; | |
726 | ret = arm_set_cpu_on(mpidr, entry, context_id, | |
727 | target_el, target_aarch64); | |
728 | break; | |
729 | case QEMU_PSCI_0_1_FN_CPU_OFF: | |
730 | case QEMU_PSCI_0_2_FN_CPU_OFF: | |
731 | hvf_psci_cpu_off(arm_cpu); | |
732 | break; | |
733 | case QEMU_PSCI_0_1_FN_CPU_SUSPEND: | |
734 | case QEMU_PSCI_0_2_FN_CPU_SUSPEND: | |
735 | case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: | |
736 | /* Affinity levels are not supported in QEMU */ | |
737 | if (param[1] & 0xfffe0000) { | |
738 | ret = QEMU_PSCI_RET_INVALID_PARAMS; | |
739 | break; | |
740 | } | |
741 | /* Powerdown is not supported, we always go into WFI */ | |
742 | env->xregs[0] = 0; | |
743 | hvf_wfi(cpu); | |
744 | break; | |
745 | case QEMU_PSCI_0_1_FN_MIGRATE: | |
746 | case QEMU_PSCI_0_2_FN_MIGRATE: | |
747 | ret = QEMU_PSCI_RET_NOT_SUPPORTED; | |
748 | break; | |
0dc71c70 AO |
749 | case QEMU_PSCI_1_0_FN_PSCI_FEATURES: |
750 | switch (param[1]) { | |
751 | case QEMU_PSCI_0_2_FN_PSCI_VERSION: | |
752 | case QEMU_PSCI_0_2_FN_MIGRATE_INFO_TYPE: | |
753 | case QEMU_PSCI_0_2_FN_AFFINITY_INFO: | |
754 | case QEMU_PSCI_0_2_FN64_AFFINITY_INFO: | |
755 | case QEMU_PSCI_0_2_FN_SYSTEM_RESET: | |
756 | case QEMU_PSCI_0_2_FN_SYSTEM_OFF: | |
757 | case QEMU_PSCI_0_1_FN_CPU_ON: | |
758 | case QEMU_PSCI_0_2_FN_CPU_ON: | |
759 | case QEMU_PSCI_0_2_FN64_CPU_ON: | |
760 | case QEMU_PSCI_0_1_FN_CPU_OFF: | |
761 | case QEMU_PSCI_0_2_FN_CPU_OFF: | |
762 | case QEMU_PSCI_0_1_FN_CPU_SUSPEND: | |
763 | case QEMU_PSCI_0_2_FN_CPU_SUSPEND: | |
764 | case QEMU_PSCI_0_2_FN64_CPU_SUSPEND: | |
765 | case QEMU_PSCI_1_0_FN_PSCI_FEATURES: | |
766 | ret = 0; | |
767 | break; | |
768 | case QEMU_PSCI_0_1_FN_MIGRATE: | |
769 | case QEMU_PSCI_0_2_FN_MIGRATE: | |
770 | default: | |
771 | ret = QEMU_PSCI_RET_NOT_SUPPORTED; | |
772 | } | |
773 | break; | |
2c9c0bf9 AG |
774 | default: |
775 | return false; | |
776 | } | |
777 | ||
778 | env->xregs[0] = ret; | |
779 | return true; | |
780 | } | |
781 | ||
7f6c295c AG |
782 | static bool is_id_sysreg(uint32_t reg) |
783 | { | |
784 | return SYSREG_OP0(reg) == 3 && | |
785 | SYSREG_OP1(reg) == 0 && | |
786 | SYSREG_CRN(reg) == 0 && | |
787 | SYSREG_CRM(reg) >= 1 && | |
788 | SYSREG_CRM(reg) < 8; | |
789 | } | |
790 | ||
a1477da3 AG |
791 | static int hvf_sysreg_read(CPUState *cpu, uint32_t reg, uint32_t rt) |
792 | { | |
793 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
794 | CPUARMState *env = &arm_cpu->env; | |
795 | uint64_t val = 0; | |
796 | ||
797 | switch (reg) { | |
798 | case SYSREG_CNTPCT_EL0: | |
799 | val = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) / | |
800 | gt_cntfrq_period_ns(arm_cpu); | |
801 | break; | |
dd43ac07 AG |
802 | case SYSREG_PMCR_EL0: |
803 | val = env->cp15.c9_pmcr; | |
804 | break; | |
805 | case SYSREG_PMCCNTR_EL0: | |
806 | pmu_op_start(env); | |
807 | val = env->cp15.c15_ccnt; | |
808 | pmu_op_finish(env); | |
809 | break; | |
810 | case SYSREG_PMCNTENCLR_EL0: | |
811 | val = env->cp15.c9_pmcnten; | |
812 | break; | |
813 | case SYSREG_PMOVSCLR_EL0: | |
814 | val = env->cp15.c9_pmovsr; | |
815 | break; | |
816 | case SYSREG_PMSELR_EL0: | |
817 | val = env->cp15.c9_pmselr; | |
818 | break; | |
819 | case SYSREG_PMINTENCLR_EL1: | |
820 | val = env->cp15.c9_pminten; | |
821 | break; | |
822 | case SYSREG_PMCCFILTR_EL0: | |
823 | val = env->cp15.pmccfiltr_el0; | |
824 | break; | |
825 | case SYSREG_PMCNTENSET_EL0: | |
826 | val = env->cp15.c9_pmcnten; | |
827 | break; | |
828 | case SYSREG_PMUSERENR_EL0: | |
829 | val = env->cp15.c9_pmuserenr; | |
830 | break; | |
831 | case SYSREG_PMCEID0_EL0: | |
832 | case SYSREG_PMCEID1_EL0: | |
833 | /* We can't really count anything yet, declare all events invalid */ | |
834 | val = 0; | |
835 | break; | |
a1477da3 AG |
836 | case SYSREG_OSLSR_EL1: |
837 | val = env->cp15.oslsr_el1; | |
838 | break; | |
839 | case SYSREG_OSDLR_EL1: | |
840 | /* Dummy register */ | |
841 | break; | |
842 | default: | |
7f6c295c AG |
843 | if (is_id_sysreg(reg)) { |
844 | /* ID system registers read as RES0 */ | |
845 | val = 0; | |
846 | break; | |
847 | } | |
a1477da3 AG |
848 | cpu_synchronize_state(cpu); |
849 | trace_hvf_unhandled_sysreg_read(env->pc, reg, | |
ad99f64f AG |
850 | SYSREG_OP0(reg), |
851 | SYSREG_OP1(reg), | |
852 | SYSREG_CRN(reg), | |
853 | SYSREG_CRM(reg), | |
854 | SYSREG_OP2(reg)); | |
a1477da3 AG |
855 | hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); |
856 | return 1; | |
857 | } | |
858 | ||
859 | trace_hvf_sysreg_read(reg, | |
ad99f64f AG |
860 | SYSREG_OP0(reg), |
861 | SYSREG_OP1(reg), | |
862 | SYSREG_CRN(reg), | |
863 | SYSREG_CRM(reg), | |
864 | SYSREG_OP2(reg), | |
a1477da3 AG |
865 | val); |
866 | hvf_set_reg(cpu, rt, val); | |
867 | ||
868 | return 0; | |
869 | } | |
870 | ||
dd43ac07 AG |
871 | static void pmu_update_irq(CPUARMState *env) |
872 | { | |
873 | ARMCPU *cpu = env_archcpu(env); | |
874 | qemu_set_irq(cpu->pmu_interrupt, (env->cp15.c9_pmcr & PMCRE) && | |
875 | (env->cp15.c9_pminten & env->cp15.c9_pmovsr)); | |
876 | } | |
877 | ||
878 | static bool pmu_event_supported(uint16_t number) | |
879 | { | |
880 | return false; | |
881 | } | |
882 | ||
883 | /* Returns true if the counter (pass 31 for PMCCNTR) should count events using | |
884 | * the current EL, security state, and register configuration. | |
885 | */ | |
886 | static bool pmu_counter_enabled(CPUARMState *env, uint8_t counter) | |
887 | { | |
888 | uint64_t filter; | |
889 | bool enabled, filtered = true; | |
890 | int el = arm_current_el(env); | |
891 | ||
892 | enabled = (env->cp15.c9_pmcr & PMCRE) && | |
893 | (env->cp15.c9_pmcnten & (1 << counter)); | |
894 | ||
895 | if (counter == 31) { | |
896 | filter = env->cp15.pmccfiltr_el0; | |
897 | } else { | |
898 | filter = env->cp15.c14_pmevtyper[counter]; | |
899 | } | |
900 | ||
901 | if (el == 0) { | |
902 | filtered = filter & PMXEVTYPER_U; | |
903 | } else if (el == 1) { | |
904 | filtered = filter & PMXEVTYPER_P; | |
905 | } | |
906 | ||
907 | if (counter != 31) { | |
908 | /* | |
909 | * If not checking PMCCNTR, ensure the counter is setup to an event we | |
910 | * support | |
911 | */ | |
912 | uint16_t event = filter & PMXEVTYPER_EVTCOUNT; | |
913 | if (!pmu_event_supported(event)) { | |
914 | return false; | |
915 | } | |
916 | } | |
917 | ||
918 | return enabled && !filtered; | |
919 | } | |
920 | ||
921 | static void pmswinc_write(CPUARMState *env, uint64_t value) | |
922 | { | |
923 | unsigned int i; | |
924 | for (i = 0; i < pmu_num_counters(env); i++) { | |
925 | /* Increment a counter's count iff: */ | |
926 | if ((value & (1 << i)) && /* counter's bit is set */ | |
927 | /* counter is enabled and not filtered */ | |
928 | pmu_counter_enabled(env, i) && | |
929 | /* counter is SW_INCR */ | |
930 | (env->cp15.c14_pmevtyper[i] & PMXEVTYPER_EVTCOUNT) == 0x0) { | |
931 | /* | |
932 | * Detect if this write causes an overflow since we can't predict | |
933 | * PMSWINC overflows like we can for other events | |
934 | */ | |
935 | uint32_t new_pmswinc = env->cp15.c14_pmevcntr[i] + 1; | |
936 | ||
937 | if (env->cp15.c14_pmevcntr[i] & ~new_pmswinc & INT32_MIN) { | |
938 | env->cp15.c9_pmovsr |= (1 << i); | |
939 | pmu_update_irq(env); | |
940 | } | |
941 | ||
942 | env->cp15.c14_pmevcntr[i] = new_pmswinc; | |
943 | } | |
944 | } | |
945 | } | |
946 | ||
a1477da3 AG |
947 | static int hvf_sysreg_write(CPUState *cpu, uint32_t reg, uint64_t val) |
948 | { | |
949 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
950 | CPUARMState *env = &arm_cpu->env; | |
951 | ||
952 | trace_hvf_sysreg_write(reg, | |
ad99f64f AG |
953 | SYSREG_OP0(reg), |
954 | SYSREG_OP1(reg), | |
955 | SYSREG_CRN(reg), | |
956 | SYSREG_CRM(reg), | |
957 | SYSREG_OP2(reg), | |
a1477da3 AG |
958 | val); |
959 | ||
960 | switch (reg) { | |
dd43ac07 AG |
961 | case SYSREG_PMCCNTR_EL0: |
962 | pmu_op_start(env); | |
963 | env->cp15.c15_ccnt = val; | |
964 | pmu_op_finish(env); | |
965 | break; | |
966 | case SYSREG_PMCR_EL0: | |
967 | pmu_op_start(env); | |
968 | ||
969 | if (val & PMCRC) { | |
970 | /* The counter has been reset */ | |
971 | env->cp15.c15_ccnt = 0; | |
972 | } | |
973 | ||
974 | if (val & PMCRP) { | |
975 | unsigned int i; | |
976 | for (i = 0; i < pmu_num_counters(env); i++) { | |
977 | env->cp15.c14_pmevcntr[i] = 0; | |
978 | } | |
979 | } | |
980 | ||
9323e79f PM |
981 | env->cp15.c9_pmcr &= ~PMCR_WRITABLE_MASK; |
982 | env->cp15.c9_pmcr |= (val & PMCR_WRITABLE_MASK); | |
dd43ac07 AG |
983 | |
984 | pmu_op_finish(env); | |
985 | break; | |
986 | case SYSREG_PMUSERENR_EL0: | |
987 | env->cp15.c9_pmuserenr = val & 0xf; | |
988 | break; | |
989 | case SYSREG_PMCNTENSET_EL0: | |
990 | env->cp15.c9_pmcnten |= (val & pmu_counter_mask(env)); | |
991 | break; | |
992 | case SYSREG_PMCNTENCLR_EL0: | |
993 | env->cp15.c9_pmcnten &= ~(val & pmu_counter_mask(env)); | |
994 | break; | |
995 | case SYSREG_PMINTENCLR_EL1: | |
996 | pmu_op_start(env); | |
997 | env->cp15.c9_pminten |= val; | |
998 | pmu_op_finish(env); | |
999 | break; | |
1000 | case SYSREG_PMOVSCLR_EL0: | |
1001 | pmu_op_start(env); | |
1002 | env->cp15.c9_pmovsr &= ~val; | |
1003 | pmu_op_finish(env); | |
1004 | break; | |
1005 | case SYSREG_PMSWINC_EL0: | |
1006 | pmu_op_start(env); | |
1007 | pmswinc_write(env, val); | |
1008 | pmu_op_finish(env); | |
1009 | break; | |
1010 | case SYSREG_PMSELR_EL0: | |
1011 | env->cp15.c9_pmselr = val & 0x1f; | |
1012 | break; | |
1013 | case SYSREG_PMCCFILTR_EL0: | |
1014 | pmu_op_start(env); | |
1015 | env->cp15.pmccfiltr_el0 = val & PMCCFILTR_EL0; | |
1016 | pmu_op_finish(env); | |
1017 | break; | |
a1477da3 AG |
1018 | case SYSREG_OSLAR_EL1: |
1019 | env->cp15.oslsr_el1 = val & 1; | |
1020 | break; | |
1021 | case SYSREG_OSDLR_EL1: | |
1022 | /* Dummy register */ | |
1023 | break; | |
1024 | default: | |
1025 | cpu_synchronize_state(cpu); | |
1026 | trace_hvf_unhandled_sysreg_write(env->pc, reg, | |
ad99f64f AG |
1027 | SYSREG_OP0(reg), |
1028 | SYSREG_OP1(reg), | |
1029 | SYSREG_CRN(reg), | |
1030 | SYSREG_CRM(reg), | |
1031 | SYSREG_OP2(reg)); | |
a1477da3 AG |
1032 | hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); |
1033 | return 1; | |
1034 | } | |
1035 | ||
1036 | return 0; | |
1037 | } | |
1038 | ||
1039 | static int hvf_inject_interrupts(CPUState *cpu) | |
1040 | { | |
1041 | if (cpu->interrupt_request & CPU_INTERRUPT_FIQ) { | |
1042 | trace_hvf_inject_fiq(); | |
1043 | hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_FIQ, | |
1044 | true); | |
1045 | } | |
1046 | ||
1047 | if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { | |
1048 | trace_hvf_inject_irq(); | |
1049 | hv_vcpu_set_pending_interrupt(cpu->hvf->fd, HV_INTERRUPT_TYPE_IRQ, | |
1050 | true); | |
1051 | } | |
1052 | ||
1053 | return 0; | |
1054 | } | |
1055 | ||
1056 | static uint64_t hvf_vtimer_val_raw(void) | |
1057 | { | |
1058 | /* | |
1059 | * mach_absolute_time() returns the vtimer value without the VM | |
1060 | * offset that we define. Add our own offset on top. | |
1061 | */ | |
1062 | return mach_absolute_time() - hvf_state->vtimer_offset; | |
1063 | } | |
1064 | ||
219c101f PC |
1065 | static uint64_t hvf_vtimer_val(void) |
1066 | { | |
1067 | if (!runstate_is_running()) { | |
1068 | /* VM is paused, the vtimer value is in vtimer.vtimer_val */ | |
1069 | return vtimer.vtimer_val; | |
1070 | } | |
1071 | ||
1072 | return hvf_vtimer_val_raw(); | |
1073 | } | |
1074 | ||
1075 | static void hvf_wait_for_ipi(CPUState *cpu, struct timespec *ts) | |
1076 | { | |
1077 | /* | |
1078 | * Use pselect to sleep so that other threads can IPI us while we're | |
1079 | * sleeping. | |
1080 | */ | |
1081 | qatomic_mb_set(&cpu->thread_kicked, false); | |
1082 | qemu_mutex_unlock_iothread(); | |
1083 | pselect(0, 0, 0, 0, ts, &cpu->hvf->unblock_ipi_mask); | |
1084 | qemu_mutex_lock_iothread(); | |
1085 | } | |
1086 | ||
1087 | static void hvf_wfi(CPUState *cpu) | |
1088 | { | |
1089 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
1090 | struct timespec ts; | |
1091 | hv_return_t r; | |
1092 | uint64_t ctl; | |
1093 | uint64_t cval; | |
1094 | int64_t ticks_to_sleep; | |
1095 | uint64_t seconds; | |
1096 | uint64_t nanos; | |
1097 | uint32_t cntfrq; | |
1098 | ||
1099 | if (cpu->interrupt_request & (CPU_INTERRUPT_HARD | CPU_INTERRUPT_FIQ)) { | |
1100 | /* Interrupt pending, no need to wait */ | |
1101 | return; | |
1102 | } | |
1103 | ||
1104 | r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); | |
1105 | assert_hvf_ok(r); | |
1106 | ||
1107 | if (!(ctl & 1) || (ctl & 2)) { | |
1108 | /* Timer disabled or masked, just wait for an IPI. */ | |
1109 | hvf_wait_for_ipi(cpu, NULL); | |
1110 | return; | |
1111 | } | |
1112 | ||
1113 | r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CVAL_EL0, &cval); | |
1114 | assert_hvf_ok(r); | |
1115 | ||
1116 | ticks_to_sleep = cval - hvf_vtimer_val(); | |
1117 | if (ticks_to_sleep < 0) { | |
1118 | return; | |
1119 | } | |
1120 | ||
1121 | cntfrq = gt_cntfrq_period_ns(arm_cpu); | |
1122 | seconds = muldiv64(ticks_to_sleep, cntfrq, NANOSECONDS_PER_SECOND); | |
1123 | ticks_to_sleep -= muldiv64(seconds, NANOSECONDS_PER_SECOND, cntfrq); | |
1124 | nanos = ticks_to_sleep * cntfrq; | |
1125 | ||
1126 | /* | |
1127 | * Don't sleep for less than the time a context switch would take, | |
1128 | * so that we can satisfy fast timer requests on the same CPU. | |
1129 | * Measurements on M1 show the sweet spot to be ~2ms. | |
1130 | */ | |
1131 | if (!seconds && nanos < (2 * SCALE_MS)) { | |
1132 | return; | |
1133 | } | |
1134 | ||
1135 | ts = (struct timespec) { seconds, nanos }; | |
1136 | hvf_wait_for_ipi(cpu, &ts); | |
1137 | } | |
1138 | ||
a1477da3 AG |
1139 | static void hvf_sync_vtimer(CPUState *cpu) |
1140 | { | |
1141 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
1142 | hv_return_t r; | |
1143 | uint64_t ctl; | |
1144 | bool irq_state; | |
1145 | ||
1146 | if (!cpu->hvf->vtimer_masked) { | |
1147 | /* We will get notified on vtimer changes by hvf, nothing to do */ | |
1148 | return; | |
1149 | } | |
1150 | ||
1151 | r = hv_vcpu_get_sys_reg(cpu->hvf->fd, HV_SYS_REG_CNTV_CTL_EL0, &ctl); | |
1152 | assert_hvf_ok(r); | |
1153 | ||
1154 | irq_state = (ctl & (TMR_CTL_ENABLE | TMR_CTL_IMASK | TMR_CTL_ISTATUS)) == | |
1155 | (TMR_CTL_ENABLE | TMR_CTL_ISTATUS); | |
1156 | qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], irq_state); | |
1157 | ||
1158 | if (!irq_state) { | |
1159 | /* Timer no longer asserting, we can unmask it */ | |
1160 | hv_vcpu_set_vtimer_mask(cpu->hvf->fd, false); | |
1161 | cpu->hvf->vtimer_masked = false; | |
1162 | } | |
1163 | } | |
1164 | ||
1165 | int hvf_vcpu_exec(CPUState *cpu) | |
1166 | { | |
1167 | ARMCPU *arm_cpu = ARM_CPU(cpu); | |
1168 | CPUARMState *env = &arm_cpu->env; | |
1169 | hv_vcpu_exit_t *hvf_exit = cpu->hvf->exit; | |
1170 | hv_return_t r; | |
1171 | bool advance_pc = false; | |
1172 | ||
1173 | if (hvf_inject_interrupts(cpu)) { | |
1174 | return EXCP_INTERRUPT; | |
1175 | } | |
1176 | ||
1177 | if (cpu->halted) { | |
1178 | return EXCP_HLT; | |
1179 | } | |
1180 | ||
1181 | flush_cpu_state(cpu); | |
1182 | ||
1183 | qemu_mutex_unlock_iothread(); | |
1184 | assert_hvf_ok(hv_vcpu_run(cpu->hvf->fd)); | |
1185 | ||
1186 | /* handle VMEXIT */ | |
1187 | uint64_t exit_reason = hvf_exit->reason; | |
1188 | uint64_t syndrome = hvf_exit->exception.syndrome; | |
1189 | uint32_t ec = syn_get_ec(syndrome); | |
1190 | ||
1191 | qemu_mutex_lock_iothread(); | |
1192 | switch (exit_reason) { | |
1193 | case HV_EXIT_REASON_EXCEPTION: | |
1194 | /* This is the main one, handle below. */ | |
1195 | break; | |
1196 | case HV_EXIT_REASON_VTIMER_ACTIVATED: | |
1197 | qemu_set_irq(arm_cpu->gt_timer_outputs[GTIMER_VIRT], 1); | |
1198 | cpu->hvf->vtimer_masked = true; | |
1199 | return 0; | |
1200 | case HV_EXIT_REASON_CANCELED: | |
1201 | /* we got kicked, no exit to process */ | |
1202 | return 0; | |
1203 | default: | |
d385a605 | 1204 | g_assert_not_reached(); |
a1477da3 AG |
1205 | } |
1206 | ||
1207 | hvf_sync_vtimer(cpu); | |
1208 | ||
1209 | switch (ec) { | |
1210 | case EC_DATAABORT: { | |
1211 | bool isv = syndrome & ARM_EL_ISV; | |
1212 | bool iswrite = (syndrome >> 6) & 1; | |
1213 | bool s1ptw = (syndrome >> 7) & 1; | |
1214 | uint32_t sas = (syndrome >> 22) & 3; | |
1215 | uint32_t len = 1 << sas; | |
1216 | uint32_t srt = (syndrome >> 16) & 0x1f; | |
5fd6a3e2 | 1217 | uint32_t cm = (syndrome >> 8) & 0x1; |
a1477da3 AG |
1218 | uint64_t val = 0; |
1219 | ||
1220 | trace_hvf_data_abort(env->pc, hvf_exit->exception.virtual_address, | |
1221 | hvf_exit->exception.physical_address, isv, | |
1222 | iswrite, s1ptw, len, srt); | |
1223 | ||
5fd6a3e2 AG |
1224 | if (cm) { |
1225 | /* We don't cache MMIO regions */ | |
1226 | advance_pc = true; | |
1227 | break; | |
1228 | } | |
1229 | ||
a1477da3 AG |
1230 | assert(isv); |
1231 | ||
1232 | if (iswrite) { | |
1233 | val = hvf_get_reg(cpu, srt); | |
1234 | address_space_write(&address_space_memory, | |
1235 | hvf_exit->exception.physical_address, | |
1236 | MEMTXATTRS_UNSPECIFIED, &val, len); | |
1237 | } else { | |
1238 | address_space_read(&address_space_memory, | |
1239 | hvf_exit->exception.physical_address, | |
1240 | MEMTXATTRS_UNSPECIFIED, &val, len); | |
1241 | hvf_set_reg(cpu, srt, val); | |
1242 | } | |
1243 | ||
1244 | advance_pc = true; | |
1245 | break; | |
1246 | } | |
1247 | case EC_SYSTEMREGISTERTRAP: { | |
1248 | bool isread = (syndrome >> 0) & 1; | |
1249 | uint32_t rt = (syndrome >> 5) & 0x1f; | |
1250 | uint32_t reg = syndrome & SYSREG_MASK; | |
1251 | uint64_t val; | |
1252 | int ret = 0; | |
1253 | ||
1254 | if (isread) { | |
1255 | ret = hvf_sysreg_read(cpu, reg, rt); | |
1256 | } else { | |
1257 | val = hvf_get_reg(cpu, rt); | |
1258 | ret = hvf_sysreg_write(cpu, reg, val); | |
1259 | } | |
1260 | ||
1261 | advance_pc = !ret; | |
1262 | break; | |
1263 | } | |
1264 | case EC_WFX_TRAP: | |
1265 | advance_pc = true; | |
219c101f PC |
1266 | if (!(syndrome & WFX_IS_WFE)) { |
1267 | hvf_wfi(cpu); | |
1268 | } | |
a1477da3 AG |
1269 | break; |
1270 | case EC_AA64_HVC: | |
1271 | cpu_synchronize_state(cpu); | |
2c9c0bf9 AG |
1272 | if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_HVC) { |
1273 | if (!hvf_handle_psci_call(cpu)) { | |
1274 | trace_hvf_unknown_hvc(env->xregs[0]); | |
1275 | /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ | |
1276 | env->xregs[0] = -1; | |
1277 | } | |
1278 | } else { | |
1279 | trace_hvf_unknown_hvc(env->xregs[0]); | |
1280 | hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | |
1281 | } | |
a1477da3 AG |
1282 | break; |
1283 | case EC_AA64_SMC: | |
1284 | cpu_synchronize_state(cpu); | |
2c9c0bf9 AG |
1285 | if (arm_cpu->psci_conduit == QEMU_PSCI_CONDUIT_SMC) { |
1286 | advance_pc = true; | |
1287 | ||
1288 | if (!hvf_handle_psci_call(cpu)) { | |
1289 | trace_hvf_unknown_smc(env->xregs[0]); | |
1290 | /* SMCCC 1.3 section 5.2 says every unknown SMCCC call returns -1 */ | |
1291 | env->xregs[0] = -1; | |
1292 | } | |
1293 | } else { | |
1294 | trace_hvf_unknown_smc(env->xregs[0]); | |
1295 | hvf_raise_exception(cpu, EXCP_UDEF, syn_uncategorized()); | |
1296 | } | |
a1477da3 AG |
1297 | break; |
1298 | default: | |
1299 | cpu_synchronize_state(cpu); | |
1300 | trace_hvf_exit(syndrome, ec, env->pc); | |
1301 | error_report("0x%llx: unhandled exception ec=0x%x", env->pc, ec); | |
1302 | } | |
1303 | ||
1304 | if (advance_pc) { | |
1305 | uint64_t pc; | |
1306 | ||
1307 | flush_cpu_state(cpu); | |
1308 | ||
1309 | r = hv_vcpu_get_reg(cpu->hvf->fd, HV_REG_PC, &pc); | |
1310 | assert_hvf_ok(r); | |
1311 | pc += 4; | |
1312 | r = hv_vcpu_set_reg(cpu->hvf->fd, HV_REG_PC, pc); | |
1313 | assert_hvf_ok(r); | |
1314 | } | |
1315 | ||
1316 | return 0; | |
1317 | } | |
1318 | ||
1319 | static const VMStateDescription vmstate_hvf_vtimer = { | |
1320 | .name = "hvf-vtimer", | |
1321 | .version_id = 1, | |
1322 | .minimum_version_id = 1, | |
1323 | .fields = (VMStateField[]) { | |
1324 | VMSTATE_UINT64(vtimer_val, HVFVTimer), | |
1325 | VMSTATE_END_OF_LIST() | |
1326 | }, | |
1327 | }; | |
1328 | ||
1329 | static void hvf_vm_state_change(void *opaque, bool running, RunState state) | |
1330 | { | |
1331 | HVFVTimer *s = opaque; | |
1332 | ||
1333 | if (running) { | |
1334 | /* Update vtimer offset on all CPUs */ | |
1335 | hvf_state->vtimer_offset = mach_absolute_time() - s->vtimer_val; | |
1336 | cpu_synchronize_all_states(); | |
1337 | } else { | |
1338 | /* Remember vtimer value on every pause */ | |
1339 | s->vtimer_val = hvf_vtimer_val_raw(); | |
1340 | } | |
1341 | } | |
1342 | ||
1343 | int hvf_arch_init(void) | |
1344 | { | |
1345 | hvf_state->vtimer_offset = mach_absolute_time(); | |
1346 | vmstate_register(NULL, 0, &vmstate_hvf_vtimer, &vtimer); | |
1347 | qemu_add_vm_change_state_handler(hvf_vm_state_change, &vtimer); | |
1348 | return 0; | |
1349 | } |