]>
Commit | Line | Data |
---|---|---|
7c8c5e6a MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * Derived from arch/arm/kvm/coproc.c: | |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
7 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | |
8 | * Christoffer Dall <c.dall@virtualopensystems.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License, version 2, as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | #include <linux/mm.h> | |
24 | #include <linux/kvm_host.h> | |
25 | #include <linux/uaccess.h> | |
26 | #include <asm/kvm_arm.h> | |
27 | #include <asm/kvm_host.h> | |
28 | #include <asm/kvm_emulate.h> | |
29 | #include <asm/kvm_coproc.h> | |
9d218a1f | 30 | #include <asm/kvm_mmu.h> |
7c8c5e6a MZ |
31 | #include <asm/cacheflush.h> |
32 | #include <asm/cputype.h> | |
33 | #include <trace/events/kvm.h> | |
34 | ||
35 | #include "sys_regs.h" | |
36 | ||
37 | /* | |
38 | * All of this file is extremly similar to the ARM coproc.c, but the | |
39 | * types are different. My gut feeling is that it should be pretty | |
40 | * easy to merge, but that would be an ABI breakage -- again. VFP | |
41 | * would also need to be abstracted. | |
62a89c44 MZ |
42 | * |
43 | * For AArch32, we only take care of what is being trapped. Anything | |
44 | * that has to do with init and userspace access has to go via the | |
45 | * 64bit interface. | |
7c8c5e6a MZ |
46 | */ |
47 | ||
48 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | |
49 | static u32 cache_levels; | |
50 | ||
51 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | |
52 | #define CSSELR_MAX 12 | |
53 | ||
54 | /* Which cache CCSIDR represents depends on CSSELR value. */ | |
55 | static u32 get_ccsidr(u32 csselr) | |
56 | { | |
57 | u32 ccsidr; | |
58 | ||
59 | /* Make sure noone else changes CSSELR during this! */ | |
60 | local_irq_disable(); | |
61 | /* Put value into CSSELR */ | |
62 | asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); | |
63 | isb(); | |
64 | /* Read result out of CCSIDR */ | |
65 | asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); | |
66 | local_irq_enable(); | |
67 | ||
68 | return ccsidr; | |
69 | } | |
70 | ||
71 | static void do_dc_cisw(u32 val) | |
72 | { | |
73 | asm volatile("dc cisw, %x0" : : "r" (val)); | |
98f7685e | 74 | dsb(ish); |
7c8c5e6a MZ |
75 | } |
76 | ||
77 | static void do_dc_csw(u32 val) | |
78 | { | |
79 | asm volatile("dc csw, %x0" : : "r" (val)); | |
98f7685e | 80 | dsb(ish); |
7c8c5e6a MZ |
81 | } |
82 | ||
83 | /* See note at ARM ARM B1.14.4 */ | |
84 | static bool access_dcsw(struct kvm_vcpu *vcpu, | |
85 | const struct sys_reg_params *p, | |
86 | const struct sys_reg_desc *r) | |
87 | { | |
88 | unsigned long val; | |
89 | int cpu; | |
90 | ||
91 | if (!p->is_write) | |
92 | return read_from_write_only(vcpu, p); | |
93 | ||
94 | cpu = get_cpu(); | |
95 | ||
96 | cpumask_setall(&vcpu->arch.require_dcache_flush); | |
97 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | |
98 | ||
99 | /* If we were already preempted, take the long way around */ | |
100 | if (cpu != vcpu->arch.last_pcpu) { | |
101 | flush_cache_all(); | |
102 | goto done; | |
103 | } | |
104 | ||
105 | val = *vcpu_reg(vcpu, p->Rt); | |
106 | ||
107 | switch (p->CRm) { | |
108 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | |
109 | case 14: /* DCCISW */ | |
110 | do_dc_cisw(val); | |
111 | break; | |
112 | ||
113 | case 10: /* DCCSW */ | |
114 | do_dc_csw(val); | |
115 | break; | |
116 | } | |
117 | ||
118 | done: | |
119 | put_cpu(); | |
120 | ||
121 | return true; | |
122 | } | |
123 | ||
4d44923b MZ |
124 | /* |
125 | * Generic accessor for VM registers. Only called as long as HCR_TVM | |
126 | * is set. | |
127 | */ | |
128 | static bool access_vm_reg(struct kvm_vcpu *vcpu, | |
129 | const struct sys_reg_params *p, | |
130 | const struct sys_reg_desc *r) | |
131 | { | |
132 | unsigned long val; | |
133 | ||
134 | BUG_ON(!p->is_write); | |
135 | ||
136 | val = *vcpu_reg(vcpu, p->Rt); | |
f0a3eaff | 137 | if (!p->is_aarch32 || !p->is_32bit) |
4d44923b | 138 | vcpu_sys_reg(vcpu, r->reg) = val; |
f0a3eaff VK |
139 | else |
140 | vcpu_cp15_64_low(vcpu, r->reg) = val & 0xffffffffUL; | |
141 | ||
4d44923b MZ |
142 | return true; |
143 | } | |
144 | ||
145 | /* | |
146 | * SCTLR_EL1 accessor. Only called as long as HCR_TVM is set. If the | |
147 | * guest enables the MMU, we stop trapping the VM sys_regs and leave | |
148 | * it in complete control of the caches. | |
149 | */ | |
150 | static bool access_sctlr(struct kvm_vcpu *vcpu, | |
151 | const struct sys_reg_params *p, | |
152 | const struct sys_reg_desc *r) | |
153 | { | |
154 | access_vm_reg(vcpu, p, r); | |
155 | ||
9d218a1f | 156 | if (vcpu_has_cache_enabled(vcpu)) { /* MMU+Caches enabled? */ |
4d44923b | 157 | vcpu->arch.hcr_el2 &= ~HCR_TVM; |
9d218a1f MZ |
158 | stage2_flush_vm(vcpu->kvm); |
159 | } | |
4d44923b MZ |
160 | |
161 | return true; | |
162 | } | |
163 | ||
7c8c5e6a MZ |
164 | /* |
165 | * We could trap ID_DFR0 and tell the guest we don't support performance | |
166 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | |
167 | * NAKed, so it will read the PMCR anyway. | |
168 | * | |
169 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | |
170 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | |
171 | * all PM registers, which doesn't crash the guest kernel at least. | |
172 | */ | |
173 | static bool pm_fake(struct kvm_vcpu *vcpu, | |
174 | const struct sys_reg_params *p, | |
175 | const struct sys_reg_desc *r) | |
176 | { | |
177 | if (p->is_write) | |
178 | return ignore_write(vcpu, p); | |
179 | else | |
180 | return read_zero(vcpu, p); | |
181 | } | |
182 | ||
183 | static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |
184 | { | |
185 | u64 amair; | |
186 | ||
187 | asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); | |
188 | vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; | |
189 | } | |
190 | ||
191 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |
192 | { | |
193 | /* | |
194 | * Simply map the vcpu_id into the Aff0 field of the MPIDR. | |
195 | */ | |
196 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); | |
197 | } | |
198 | ||
199 | /* | |
200 | * Architected system registers. | |
201 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | |
202 | */ | |
203 | static const struct sys_reg_desc sys_reg_descs[] = { | |
204 | /* DC ISW */ | |
205 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), | |
206 | access_dcsw }, | |
207 | /* DC CSW */ | |
208 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), | |
209 | access_dcsw }, | |
210 | /* DC CISW */ | |
211 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), | |
212 | access_dcsw }, | |
213 | ||
62a89c44 MZ |
214 | /* TEECR32_EL1 */ |
215 | { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | |
216 | NULL, reset_val, TEECR32_EL1, 0 }, | |
217 | /* TEEHBR32_EL1 */ | |
218 | { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), | |
219 | NULL, reset_val, TEEHBR32_EL1, 0 }, | |
220 | /* DBGVCR32_EL2 */ | |
221 | { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), | |
222 | NULL, reset_val, DBGVCR32_EL2, 0 }, | |
223 | ||
7c8c5e6a MZ |
224 | /* MPIDR_EL1 */ |
225 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), | |
226 | NULL, reset_mpidr, MPIDR_EL1 }, | |
227 | /* SCTLR_EL1 */ | |
228 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | |
4d44923b | 229 | access_sctlr, reset_val, SCTLR_EL1, 0x00C50078 }, |
7c8c5e6a MZ |
230 | /* CPACR_EL1 */ |
231 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), | |
232 | NULL, reset_val, CPACR_EL1, 0 }, | |
233 | /* TTBR0_EL1 */ | |
234 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), | |
4d44923b | 235 | access_vm_reg, reset_unknown, TTBR0_EL1 }, |
7c8c5e6a MZ |
236 | /* TTBR1_EL1 */ |
237 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), | |
4d44923b | 238 | access_vm_reg, reset_unknown, TTBR1_EL1 }, |
7c8c5e6a MZ |
239 | /* TCR_EL1 */ |
240 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), | |
4d44923b | 241 | access_vm_reg, reset_val, TCR_EL1, 0 }, |
7c8c5e6a MZ |
242 | |
243 | /* AFSR0_EL1 */ | |
244 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), | |
4d44923b | 245 | access_vm_reg, reset_unknown, AFSR0_EL1 }, |
7c8c5e6a MZ |
246 | /* AFSR1_EL1 */ |
247 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), | |
4d44923b | 248 | access_vm_reg, reset_unknown, AFSR1_EL1 }, |
7c8c5e6a MZ |
249 | /* ESR_EL1 */ |
250 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), | |
4d44923b | 251 | access_vm_reg, reset_unknown, ESR_EL1 }, |
7c8c5e6a MZ |
252 | /* FAR_EL1 */ |
253 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | |
4d44923b | 254 | access_vm_reg, reset_unknown, FAR_EL1 }, |
1bbd8054 MZ |
255 | /* PAR_EL1 */ |
256 | { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), | |
257 | NULL, reset_unknown, PAR_EL1 }, | |
7c8c5e6a MZ |
258 | |
259 | /* PMINTENSET_EL1 */ | |
260 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | |
261 | pm_fake }, | |
262 | /* PMINTENCLR_EL1 */ | |
263 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), | |
264 | pm_fake }, | |
265 | ||
266 | /* MAIR_EL1 */ | |
267 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), | |
4d44923b | 268 | access_vm_reg, reset_unknown, MAIR_EL1 }, |
7c8c5e6a MZ |
269 | /* AMAIR_EL1 */ |
270 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), | |
4d44923b | 271 | access_vm_reg, reset_amair_el1, AMAIR_EL1 }, |
7c8c5e6a MZ |
272 | |
273 | /* VBAR_EL1 */ | |
274 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), | |
275 | NULL, reset_val, VBAR_EL1, 0 }, | |
276 | /* CONTEXTIDR_EL1 */ | |
277 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), | |
4d44923b | 278 | access_vm_reg, reset_val, CONTEXTIDR_EL1, 0 }, |
7c8c5e6a MZ |
279 | /* TPIDR_EL1 */ |
280 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), | |
281 | NULL, reset_unknown, TPIDR_EL1 }, | |
282 | ||
283 | /* CNTKCTL_EL1 */ | |
284 | { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), | |
285 | NULL, reset_val, CNTKCTL_EL1, 0}, | |
286 | ||
287 | /* CSSELR_EL1 */ | |
288 | { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | |
289 | NULL, reset_unknown, CSSELR_EL1 }, | |
290 | ||
291 | /* PMCR_EL0 */ | |
292 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), | |
293 | pm_fake }, | |
294 | /* PMCNTENSET_EL0 */ | |
295 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), | |
296 | pm_fake }, | |
297 | /* PMCNTENCLR_EL0 */ | |
298 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), | |
299 | pm_fake }, | |
300 | /* PMOVSCLR_EL0 */ | |
301 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), | |
302 | pm_fake }, | |
303 | /* PMSWINC_EL0 */ | |
304 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), | |
305 | pm_fake }, | |
306 | /* PMSELR_EL0 */ | |
307 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), | |
308 | pm_fake }, | |
309 | /* PMCEID0_EL0 */ | |
310 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), | |
311 | pm_fake }, | |
312 | /* PMCEID1_EL0 */ | |
313 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), | |
314 | pm_fake }, | |
315 | /* PMCCNTR_EL0 */ | |
316 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), | |
317 | pm_fake }, | |
318 | /* PMXEVTYPER_EL0 */ | |
319 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), | |
320 | pm_fake }, | |
321 | /* PMXEVCNTR_EL0 */ | |
322 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), | |
323 | pm_fake }, | |
324 | /* PMUSERENR_EL0 */ | |
325 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), | |
326 | pm_fake }, | |
327 | /* PMOVSSET_EL0 */ | |
328 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), | |
329 | pm_fake }, | |
330 | ||
331 | /* TPIDR_EL0 */ | |
332 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), | |
333 | NULL, reset_unknown, TPIDR_EL0 }, | |
334 | /* TPIDRRO_EL0 */ | |
335 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), | |
336 | NULL, reset_unknown, TPIDRRO_EL0 }, | |
62a89c44 MZ |
337 | |
338 | /* DACR32_EL2 */ | |
339 | { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), | |
340 | NULL, reset_unknown, DACR32_EL2 }, | |
341 | /* IFSR32_EL2 */ | |
342 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), | |
343 | NULL, reset_unknown, IFSR32_EL2 }, | |
344 | /* FPEXC32_EL2 */ | |
345 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), | |
346 | NULL, reset_val, FPEXC32_EL2, 0x70 }, | |
347 | }; | |
348 | ||
4d44923b MZ |
349 | /* |
350 | * Trapped cp15 registers. TTBR0/TTBR1 get a double encoding, | |
351 | * depending on the way they are accessed (as a 32bit or a 64bit | |
352 | * register). | |
353 | */ | |
62a89c44 | 354 | static const struct sys_reg_desc cp15_regs[] = { |
4d44923b MZ |
355 | { Op1( 0), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, |
356 | { Op1( 0), CRn( 1), CRm( 0), Op2( 0), access_sctlr, NULL, c1_SCTLR }, | |
357 | { Op1( 0), CRn( 2), CRm( 0), Op2( 0), access_vm_reg, NULL, c2_TTBR0 }, | |
358 | { Op1( 0), CRn( 2), CRm( 0), Op2( 1), access_vm_reg, NULL, c2_TTBR1 }, | |
359 | { Op1( 0), CRn( 2), CRm( 0), Op2( 2), access_vm_reg, NULL, c2_TTBCR }, | |
360 | { Op1( 0), CRn( 3), CRm( 0), Op2( 0), access_vm_reg, NULL, c3_DACR }, | |
361 | { Op1( 0), CRn( 5), CRm( 0), Op2( 0), access_vm_reg, NULL, c5_DFSR }, | |
362 | { Op1( 0), CRn( 5), CRm( 0), Op2( 1), access_vm_reg, NULL, c5_IFSR }, | |
363 | { Op1( 0), CRn( 5), CRm( 1), Op2( 0), access_vm_reg, NULL, c5_ADFSR }, | |
364 | { Op1( 0), CRn( 5), CRm( 1), Op2( 1), access_vm_reg, NULL, c5_AIFSR }, | |
365 | { Op1( 0), CRn( 6), CRm( 0), Op2( 0), access_vm_reg, NULL, c6_DFAR }, | |
366 | { Op1( 0), CRn( 6), CRm( 0), Op2( 2), access_vm_reg, NULL, c6_IFAR }, | |
367 | ||
62a89c44 MZ |
368 | /* |
369 | * DC{C,I,CI}SW operations: | |
370 | */ | |
371 | { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, | |
372 | { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, | |
373 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, | |
4d44923b | 374 | |
62a89c44 MZ |
375 | { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, |
376 | { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, | |
377 | { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, | |
378 | { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake }, | |
379 | { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake }, | |
380 | { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake }, | |
381 | { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake }, | |
382 | { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake }, | |
383 | { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake }, | |
384 | { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake }, | |
385 | { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, | |
386 | { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, | |
387 | { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, | |
4d44923b MZ |
388 | |
389 | { Op1( 0), CRn(10), CRm( 2), Op2( 0), access_vm_reg, NULL, c10_PRRR }, | |
390 | { Op1( 0), CRn(10), CRm( 2), Op2( 1), access_vm_reg, NULL, c10_NMRR }, | |
391 | { Op1( 0), CRn(10), CRm( 3), Op2( 0), access_vm_reg, NULL, c10_AMAIR0 }, | |
392 | { Op1( 0), CRn(10), CRm( 3), Op2( 1), access_vm_reg, NULL, c10_AMAIR1 }, | |
393 | { Op1( 0), CRn(13), CRm( 0), Op2( 1), access_vm_reg, NULL, c13_CID }, | |
394 | ||
395 | { Op1( 1), CRn( 0), CRm( 2), Op2( 0), access_vm_reg, NULL, c2_TTBR1 }, | |
7c8c5e6a MZ |
396 | }; |
397 | ||
398 | /* Target specific emulation tables */ | |
399 | static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; | |
400 | ||
401 | void kvm_register_target_sys_reg_table(unsigned int target, | |
402 | struct kvm_sys_reg_target_table *table) | |
403 | { | |
404 | target_tables[target] = table; | |
405 | } | |
406 | ||
407 | /* Get specific register table for this target. */ | |
62a89c44 MZ |
408 | static const struct sys_reg_desc *get_target_table(unsigned target, |
409 | bool mode_is_64, | |
410 | size_t *num) | |
7c8c5e6a MZ |
411 | { |
412 | struct kvm_sys_reg_target_table *table; | |
413 | ||
414 | table = target_tables[target]; | |
62a89c44 MZ |
415 | if (mode_is_64) { |
416 | *num = table->table64.num; | |
417 | return table->table64.table; | |
418 | } else { | |
419 | *num = table->table32.num; | |
420 | return table->table32.table; | |
421 | } | |
7c8c5e6a MZ |
422 | } |
423 | ||
424 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, | |
425 | const struct sys_reg_desc table[], | |
426 | unsigned int num) | |
427 | { | |
428 | unsigned int i; | |
429 | ||
430 | for (i = 0; i < num; i++) { | |
431 | const struct sys_reg_desc *r = &table[i]; | |
432 | ||
433 | if (params->Op0 != r->Op0) | |
434 | continue; | |
435 | if (params->Op1 != r->Op1) | |
436 | continue; | |
437 | if (params->CRn != r->CRn) | |
438 | continue; | |
439 | if (params->CRm != r->CRm) | |
440 | continue; | |
441 | if (params->Op2 != r->Op2) | |
442 | continue; | |
443 | ||
444 | return r; | |
445 | } | |
446 | return NULL; | |
447 | } | |
448 | ||
62a89c44 MZ |
449 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) |
450 | { | |
451 | kvm_inject_undefined(vcpu); | |
452 | return 1; | |
453 | } | |
454 | ||
455 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
456 | { | |
457 | kvm_inject_undefined(vcpu); | |
458 | return 1; | |
459 | } | |
460 | ||
461 | static void emulate_cp15(struct kvm_vcpu *vcpu, | |
462 | const struct sys_reg_params *params) | |
463 | { | |
464 | size_t num; | |
465 | const struct sys_reg_desc *table, *r; | |
466 | ||
467 | table = get_target_table(vcpu->arch.target, false, &num); | |
468 | ||
469 | /* Search target-specific then generic table. */ | |
470 | r = find_reg(params, table, num); | |
471 | if (!r) | |
472 | r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); | |
473 | ||
474 | if (likely(r)) { | |
475 | /* | |
476 | * Not having an accessor means that we have | |
477 | * configured a trap that we don't know how to | |
478 | * handle. This certainly qualifies as a gross bug | |
479 | * that should be fixed right away. | |
480 | */ | |
481 | BUG_ON(!r->access); | |
482 | ||
483 | if (likely(r->access(vcpu, params, r))) { | |
484 | /* Skip instruction, since it was emulated */ | |
485 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
486 | return; | |
487 | } | |
488 | /* If access function fails, it should complain. */ | |
489 | } | |
490 | ||
491 | kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); | |
492 | print_sys_reg_instr(params); | |
493 | kvm_inject_undefined(vcpu); | |
494 | } | |
495 | ||
496 | /** | |
497 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access | |
498 | * @vcpu: The VCPU pointer | |
499 | * @run: The kvm_run struct | |
500 | */ | |
501 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
502 | { | |
503 | struct sys_reg_params params; | |
504 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | |
505 | int Rt2 = (hsr >> 10) & 0xf; | |
506 | ||
2072d29c MZ |
507 | params.is_aarch32 = true; |
508 | params.is_32bit = false; | |
62a89c44 MZ |
509 | params.CRm = (hsr >> 1) & 0xf; |
510 | params.Rt = (hsr >> 5) & 0xf; | |
511 | params.is_write = ((hsr & 1) == 0); | |
512 | ||
513 | params.Op0 = 0; | |
514 | params.Op1 = (hsr >> 16) & 0xf; | |
515 | params.Op2 = 0; | |
516 | params.CRn = 0; | |
517 | ||
518 | /* | |
519 | * Massive hack here. Store Rt2 in the top 32bits so we only | |
520 | * have one register to deal with. As we use the same trap | |
521 | * backends between AArch32 and AArch64, we get away with it. | |
522 | */ | |
523 | if (params.is_write) { | |
524 | u64 val = *vcpu_reg(vcpu, params.Rt); | |
525 | val &= 0xffffffff; | |
526 | val |= *vcpu_reg(vcpu, Rt2) << 32; | |
527 | *vcpu_reg(vcpu, params.Rt) = val; | |
528 | } | |
529 | ||
530 | emulate_cp15(vcpu, ¶ms); | |
531 | ||
532 | /* Do the opposite hack for the read side */ | |
533 | if (!params.is_write) { | |
534 | u64 val = *vcpu_reg(vcpu, params.Rt); | |
535 | val >>= 32; | |
536 | *vcpu_reg(vcpu, Rt2) = val; | |
537 | } | |
538 | ||
539 | return 1; | |
540 | } | |
541 | ||
542 | /** | |
543 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | |
544 | * @vcpu: The VCPU pointer | |
545 | * @run: The kvm_run struct | |
546 | */ | |
547 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
548 | { | |
549 | struct sys_reg_params params; | |
550 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | |
551 | ||
2072d29c MZ |
552 | params.is_aarch32 = true; |
553 | params.is_32bit = true; | |
62a89c44 MZ |
554 | params.CRm = (hsr >> 1) & 0xf; |
555 | params.Rt = (hsr >> 5) & 0xf; | |
556 | params.is_write = ((hsr & 1) == 0); | |
557 | params.CRn = (hsr >> 10) & 0xf; | |
558 | params.Op0 = 0; | |
559 | params.Op1 = (hsr >> 14) & 0x7; | |
560 | params.Op2 = (hsr >> 17) & 0x7; | |
561 | ||
562 | emulate_cp15(vcpu, ¶ms); | |
563 | return 1; | |
564 | } | |
565 | ||
7c8c5e6a MZ |
566 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, |
567 | const struct sys_reg_params *params) | |
568 | { | |
569 | size_t num; | |
570 | const struct sys_reg_desc *table, *r; | |
571 | ||
62a89c44 | 572 | table = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
573 | |
574 | /* Search target-specific then generic table. */ | |
575 | r = find_reg(params, table, num); | |
576 | if (!r) | |
577 | r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
578 | ||
579 | if (likely(r)) { | |
580 | /* | |
581 | * Not having an accessor means that we have | |
582 | * configured a trap that we don't know how to | |
583 | * handle. This certainly qualifies as a gross bug | |
584 | * that should be fixed right away. | |
585 | */ | |
586 | BUG_ON(!r->access); | |
587 | ||
588 | if (likely(r->access(vcpu, params, r))) { | |
589 | /* Skip instruction, since it was emulated */ | |
590 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
591 | return 1; | |
592 | } | |
593 | /* If access function fails, it should complain. */ | |
594 | } else { | |
595 | kvm_err("Unsupported guest sys_reg access at: %lx\n", | |
596 | *vcpu_pc(vcpu)); | |
597 | print_sys_reg_instr(params); | |
598 | } | |
599 | kvm_inject_undefined(vcpu); | |
600 | return 1; | |
601 | } | |
602 | ||
603 | static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, | |
604 | const struct sys_reg_desc *table, size_t num) | |
605 | { | |
606 | unsigned long i; | |
607 | ||
608 | for (i = 0; i < num; i++) | |
609 | if (table[i].reset) | |
610 | table[i].reset(vcpu, &table[i]); | |
611 | } | |
612 | ||
613 | /** | |
614 | * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access | |
615 | * @vcpu: The VCPU pointer | |
616 | * @run: The kvm_run struct | |
617 | */ | |
618 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
619 | { | |
620 | struct sys_reg_params params; | |
621 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); | |
622 | ||
2072d29c MZ |
623 | params.is_aarch32 = false; |
624 | params.is_32bit = false; | |
7c8c5e6a MZ |
625 | params.Op0 = (esr >> 20) & 3; |
626 | params.Op1 = (esr >> 14) & 0x7; | |
627 | params.CRn = (esr >> 10) & 0xf; | |
628 | params.CRm = (esr >> 1) & 0xf; | |
629 | params.Op2 = (esr >> 17) & 0x7; | |
630 | params.Rt = (esr >> 5) & 0x1f; | |
631 | params.is_write = !(esr & 1); | |
632 | ||
633 | return emulate_sys_reg(vcpu, ¶ms); | |
634 | } | |
635 | ||
636 | /****************************************************************************** | |
637 | * Userspace API | |
638 | *****************************************************************************/ | |
639 | ||
640 | static bool index_to_params(u64 id, struct sys_reg_params *params) | |
641 | { | |
642 | switch (id & KVM_REG_SIZE_MASK) { | |
643 | case KVM_REG_SIZE_U64: | |
644 | /* Any unused index bits means it's not valid. */ | |
645 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | |
646 | | KVM_REG_ARM_COPROC_MASK | |
647 | | KVM_REG_ARM64_SYSREG_OP0_MASK | |
648 | | KVM_REG_ARM64_SYSREG_OP1_MASK | |
649 | | KVM_REG_ARM64_SYSREG_CRN_MASK | |
650 | | KVM_REG_ARM64_SYSREG_CRM_MASK | |
651 | | KVM_REG_ARM64_SYSREG_OP2_MASK)) | |
652 | return false; | |
653 | params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) | |
654 | >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); | |
655 | params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) | |
656 | >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); | |
657 | params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) | |
658 | >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); | |
659 | params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) | |
660 | >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); | |
661 | params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) | |
662 | >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); | |
663 | return true; | |
664 | default: | |
665 | return false; | |
666 | } | |
667 | } | |
668 | ||
669 | /* Decode an index value, and find the sys_reg_desc entry. */ | |
670 | static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, | |
671 | u64 id) | |
672 | { | |
673 | size_t num; | |
674 | const struct sys_reg_desc *table, *r; | |
675 | struct sys_reg_params params; | |
676 | ||
677 | /* We only do sys_reg for now. */ | |
678 | if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) | |
679 | return NULL; | |
680 | ||
681 | if (!index_to_params(id, ¶ms)) | |
682 | return NULL; | |
683 | ||
62a89c44 | 684 | table = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
685 | r = find_reg(¶ms, table, num); |
686 | if (!r) | |
687 | r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
688 | ||
689 | /* Not saved in the sys_reg array? */ | |
690 | if (r && !r->reg) | |
691 | r = NULL; | |
692 | ||
693 | return r; | |
694 | } | |
695 | ||
696 | /* | |
697 | * These are the invariant sys_reg registers: we let the guest see the | |
698 | * host versions of these, so they're part of the guest state. | |
699 | * | |
700 | * A future CPU may provide a mechanism to present different values to | |
701 | * the guest, or a future kvm may trap them. | |
702 | */ | |
703 | ||
704 | #define FUNCTION_INVARIANT(reg) \ | |
705 | static void get_##reg(struct kvm_vcpu *v, \ | |
706 | const struct sys_reg_desc *r) \ | |
707 | { \ | |
708 | u64 val; \ | |
709 | \ | |
710 | asm volatile("mrs %0, " __stringify(reg) "\n" \ | |
711 | : "=r" (val)); \ | |
712 | ((struct sys_reg_desc *)r)->val = val; \ | |
713 | } | |
714 | ||
715 | FUNCTION_INVARIANT(midr_el1) | |
716 | FUNCTION_INVARIANT(ctr_el0) | |
717 | FUNCTION_INVARIANT(revidr_el1) | |
718 | FUNCTION_INVARIANT(id_pfr0_el1) | |
719 | FUNCTION_INVARIANT(id_pfr1_el1) | |
720 | FUNCTION_INVARIANT(id_dfr0_el1) | |
721 | FUNCTION_INVARIANT(id_afr0_el1) | |
722 | FUNCTION_INVARIANT(id_mmfr0_el1) | |
723 | FUNCTION_INVARIANT(id_mmfr1_el1) | |
724 | FUNCTION_INVARIANT(id_mmfr2_el1) | |
725 | FUNCTION_INVARIANT(id_mmfr3_el1) | |
726 | FUNCTION_INVARIANT(id_isar0_el1) | |
727 | FUNCTION_INVARIANT(id_isar1_el1) | |
728 | FUNCTION_INVARIANT(id_isar2_el1) | |
729 | FUNCTION_INVARIANT(id_isar3_el1) | |
730 | FUNCTION_INVARIANT(id_isar4_el1) | |
731 | FUNCTION_INVARIANT(id_isar5_el1) | |
732 | FUNCTION_INVARIANT(clidr_el1) | |
733 | FUNCTION_INVARIANT(aidr_el1) | |
734 | ||
735 | /* ->val is filled in by kvm_sys_reg_table_init() */ | |
736 | static struct sys_reg_desc invariant_sys_regs[] = { | |
737 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), | |
738 | NULL, get_midr_el1 }, | |
739 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), | |
740 | NULL, get_revidr_el1 }, | |
741 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), | |
742 | NULL, get_id_pfr0_el1 }, | |
743 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), | |
744 | NULL, get_id_pfr1_el1 }, | |
745 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), | |
746 | NULL, get_id_dfr0_el1 }, | |
747 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), | |
748 | NULL, get_id_afr0_el1 }, | |
749 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), | |
750 | NULL, get_id_mmfr0_el1 }, | |
751 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), | |
752 | NULL, get_id_mmfr1_el1 }, | |
753 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), | |
754 | NULL, get_id_mmfr2_el1 }, | |
755 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), | |
756 | NULL, get_id_mmfr3_el1 }, | |
757 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), | |
758 | NULL, get_id_isar0_el1 }, | |
759 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), | |
760 | NULL, get_id_isar1_el1 }, | |
761 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), | |
762 | NULL, get_id_isar2_el1 }, | |
763 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), | |
764 | NULL, get_id_isar3_el1 }, | |
765 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), | |
766 | NULL, get_id_isar4_el1 }, | |
767 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), | |
768 | NULL, get_id_isar5_el1 }, | |
769 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), | |
770 | NULL, get_clidr_el1 }, | |
771 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), | |
772 | NULL, get_aidr_el1 }, | |
773 | { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), | |
774 | NULL, get_ctr_el0 }, | |
775 | }; | |
776 | ||
26c99af1 | 777 | static int reg_from_user(u64 *val, const void __user *uaddr, u64 id) |
7c8c5e6a | 778 | { |
7c8c5e6a MZ |
779 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) |
780 | return -EFAULT; | |
781 | return 0; | |
782 | } | |
783 | ||
26c99af1 | 784 | static int reg_to_user(void __user *uaddr, const u64 *val, u64 id) |
7c8c5e6a | 785 | { |
7c8c5e6a MZ |
786 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) |
787 | return -EFAULT; | |
788 | return 0; | |
789 | } | |
790 | ||
791 | static int get_invariant_sys_reg(u64 id, void __user *uaddr) | |
792 | { | |
793 | struct sys_reg_params params; | |
794 | const struct sys_reg_desc *r; | |
795 | ||
796 | if (!index_to_params(id, ¶ms)) | |
797 | return -ENOENT; | |
798 | ||
799 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | |
800 | if (!r) | |
801 | return -ENOENT; | |
802 | ||
803 | return reg_to_user(uaddr, &r->val, id); | |
804 | } | |
805 | ||
806 | static int set_invariant_sys_reg(u64 id, void __user *uaddr) | |
807 | { | |
808 | struct sys_reg_params params; | |
809 | const struct sys_reg_desc *r; | |
810 | int err; | |
811 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ | |
812 | ||
813 | if (!index_to_params(id, ¶ms)) | |
814 | return -ENOENT; | |
815 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | |
816 | if (!r) | |
817 | return -ENOENT; | |
818 | ||
819 | err = reg_from_user(&val, uaddr, id); | |
820 | if (err) | |
821 | return err; | |
822 | ||
823 | /* This is what we mean by invariant: you can't change it. */ | |
824 | if (r->val != val) | |
825 | return -EINVAL; | |
826 | ||
827 | return 0; | |
828 | } | |
829 | ||
830 | static bool is_valid_cache(u32 val) | |
831 | { | |
832 | u32 level, ctype; | |
833 | ||
834 | if (val >= CSSELR_MAX) | |
835 | return -ENOENT; | |
836 | ||
837 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | |
838 | level = (val >> 1); | |
839 | ctype = (cache_levels >> (level * 3)) & 7; | |
840 | ||
841 | switch (ctype) { | |
842 | case 0: /* No cache */ | |
843 | return false; | |
844 | case 1: /* Instruction cache only */ | |
845 | return (val & 1); | |
846 | case 2: /* Data cache only */ | |
847 | case 4: /* Unified cache */ | |
848 | return !(val & 1); | |
849 | case 3: /* Separate instruction and data caches */ | |
850 | return true; | |
851 | default: /* Reserved: we can't know instruction or data. */ | |
852 | return false; | |
853 | } | |
854 | } | |
855 | ||
856 | static int demux_c15_get(u64 id, void __user *uaddr) | |
857 | { | |
858 | u32 val; | |
859 | u32 __user *uval = uaddr; | |
860 | ||
861 | /* Fail if we have unknown bits set. */ | |
862 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | |
863 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | |
864 | return -ENOENT; | |
865 | ||
866 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | |
867 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | |
868 | if (KVM_REG_SIZE(id) != 4) | |
869 | return -ENOENT; | |
870 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | |
871 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | |
872 | if (!is_valid_cache(val)) | |
873 | return -ENOENT; | |
874 | ||
875 | return put_user(get_ccsidr(val), uval); | |
876 | default: | |
877 | return -ENOENT; | |
878 | } | |
879 | } | |
880 | ||
881 | static int demux_c15_set(u64 id, void __user *uaddr) | |
882 | { | |
883 | u32 val, newval; | |
884 | u32 __user *uval = uaddr; | |
885 | ||
886 | /* Fail if we have unknown bits set. */ | |
887 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | |
888 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | |
889 | return -ENOENT; | |
890 | ||
891 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | |
892 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | |
893 | if (KVM_REG_SIZE(id) != 4) | |
894 | return -ENOENT; | |
895 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | |
896 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | |
897 | if (!is_valid_cache(val)) | |
898 | return -ENOENT; | |
899 | ||
900 | if (get_user(newval, uval)) | |
901 | return -EFAULT; | |
902 | ||
903 | /* This is also invariant: you can't change it. */ | |
904 | if (newval != get_ccsidr(val)) | |
905 | return -EINVAL; | |
906 | return 0; | |
907 | default: | |
908 | return -ENOENT; | |
909 | } | |
910 | } | |
911 | ||
912 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
913 | { | |
914 | const struct sys_reg_desc *r; | |
915 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | |
916 | ||
917 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | |
918 | return demux_c15_get(reg->id, uaddr); | |
919 | ||
920 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | |
921 | return -ENOENT; | |
922 | ||
923 | r = index_to_sys_reg_desc(vcpu, reg->id); | |
924 | if (!r) | |
925 | return get_invariant_sys_reg(reg->id, uaddr); | |
926 | ||
927 | return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); | |
928 | } | |
929 | ||
930 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
931 | { | |
932 | const struct sys_reg_desc *r; | |
933 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | |
934 | ||
935 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | |
936 | return demux_c15_set(reg->id, uaddr); | |
937 | ||
938 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | |
939 | return -ENOENT; | |
940 | ||
941 | r = index_to_sys_reg_desc(vcpu, reg->id); | |
942 | if (!r) | |
943 | return set_invariant_sys_reg(reg->id, uaddr); | |
944 | ||
945 | return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); | |
946 | } | |
947 | ||
948 | static unsigned int num_demux_regs(void) | |
949 | { | |
950 | unsigned int i, count = 0; | |
951 | ||
952 | for (i = 0; i < CSSELR_MAX; i++) | |
953 | if (is_valid_cache(i)) | |
954 | count++; | |
955 | ||
956 | return count; | |
957 | } | |
958 | ||
959 | static int write_demux_regids(u64 __user *uindices) | |
960 | { | |
efd48cea | 961 | u64 val = KVM_REG_ARM64 | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; |
7c8c5e6a MZ |
962 | unsigned int i; |
963 | ||
964 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | |
965 | for (i = 0; i < CSSELR_MAX; i++) { | |
966 | if (!is_valid_cache(i)) | |
967 | continue; | |
968 | if (put_user(val | i, uindices)) | |
969 | return -EFAULT; | |
970 | uindices++; | |
971 | } | |
972 | return 0; | |
973 | } | |
974 | ||
975 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg) | |
976 | { | |
977 | return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | | |
978 | KVM_REG_ARM64_SYSREG | | |
979 | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | | |
980 | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | | |
981 | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | | |
982 | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | | |
983 | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); | |
984 | } | |
985 | ||
986 | static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) | |
987 | { | |
988 | if (!*uind) | |
989 | return true; | |
990 | ||
991 | if (put_user(sys_reg_to_index(reg), *uind)) | |
992 | return false; | |
993 | ||
994 | (*uind)++; | |
995 | return true; | |
996 | } | |
997 | ||
998 | /* Assumed ordered tables, see kvm_sys_reg_table_init. */ | |
999 | static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) | |
1000 | { | |
1001 | const struct sys_reg_desc *i1, *i2, *end1, *end2; | |
1002 | unsigned int total = 0; | |
1003 | size_t num; | |
1004 | ||
1005 | /* We check for duplicates here, to allow arch-specific overrides. */ | |
62a89c44 | 1006 | i1 = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
1007 | end1 = i1 + num; |
1008 | i2 = sys_reg_descs; | |
1009 | end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); | |
1010 | ||
1011 | BUG_ON(i1 == end1 || i2 == end2); | |
1012 | ||
1013 | /* Walk carefully, as both tables may refer to the same register. */ | |
1014 | while (i1 || i2) { | |
1015 | int cmp = cmp_sys_reg(i1, i2); | |
1016 | /* target-specific overrides generic entry. */ | |
1017 | if (cmp <= 0) { | |
1018 | /* Ignore registers we trap but don't save. */ | |
1019 | if (i1->reg) { | |
1020 | if (!copy_reg_to_user(i1, &uind)) | |
1021 | return -EFAULT; | |
1022 | total++; | |
1023 | } | |
1024 | } else { | |
1025 | /* Ignore registers we trap but don't save. */ | |
1026 | if (i2->reg) { | |
1027 | if (!copy_reg_to_user(i2, &uind)) | |
1028 | return -EFAULT; | |
1029 | total++; | |
1030 | } | |
1031 | } | |
1032 | ||
1033 | if (cmp <= 0 && ++i1 == end1) | |
1034 | i1 = NULL; | |
1035 | if (cmp >= 0 && ++i2 == end2) | |
1036 | i2 = NULL; | |
1037 | } | |
1038 | return total; | |
1039 | } | |
1040 | ||
1041 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) | |
1042 | { | |
1043 | return ARRAY_SIZE(invariant_sys_regs) | |
1044 | + num_demux_regs() | |
1045 | + walk_sys_regs(vcpu, (u64 __user *)NULL); | |
1046 | } | |
1047 | ||
1048 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
1049 | { | |
1050 | unsigned int i; | |
1051 | int err; | |
1052 | ||
1053 | /* Then give them all the invariant registers' indices. */ | |
1054 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { | |
1055 | if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) | |
1056 | return -EFAULT; | |
1057 | uindices++; | |
1058 | } | |
1059 | ||
1060 | err = walk_sys_regs(vcpu, uindices); | |
1061 | if (err < 0) | |
1062 | return err; | |
1063 | uindices += err; | |
1064 | ||
1065 | return write_demux_regids(uindices); | |
1066 | } | |
1067 | ||
1068 | void kvm_sys_reg_table_init(void) | |
1069 | { | |
1070 | unsigned int i; | |
1071 | struct sys_reg_desc clidr; | |
1072 | ||
1073 | /* Make sure tables are unique and in order. */ | |
1074 | for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) | |
1075 | BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); | |
1076 | ||
1077 | /* We abuse the reset function to overwrite the table itself. */ | |
1078 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) | |
1079 | invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); | |
1080 | ||
1081 | /* | |
1082 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | |
1083 | * | |
1084 | * If software reads the Cache Type fields from Ctype1 | |
1085 | * upwards, once it has seen a value of 0b000, no caches | |
1086 | * exist at further-out levels of the hierarchy. So, for | |
1087 | * example, if Ctype3 is the first Cache Type field with a | |
1088 | * value of 0b000, the values of Ctype4 to Ctype7 must be | |
1089 | * ignored. | |
1090 | */ | |
1091 | get_clidr_el1(NULL, &clidr); /* Ugly... */ | |
1092 | cache_levels = clidr.val; | |
1093 | for (i = 0; i < 7; i++) | |
1094 | if (((cache_levels >> (i*3)) & 7) == 0) | |
1095 | break; | |
1096 | /* Clear all higher bits. */ | |
1097 | cache_levels &= (1 << (i*3))-1; | |
1098 | } | |
1099 | ||
1100 | /** | |
1101 | * kvm_reset_sys_regs - sets system registers to reset value | |
1102 | * @vcpu: The VCPU pointer | |
1103 | * | |
1104 | * This function finds the right table above and sets the registers on the | |
1105 | * virtual CPU struct to their architecturally defined reset values. | |
1106 | */ | |
1107 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | |
1108 | { | |
1109 | size_t num; | |
1110 | const struct sys_reg_desc *table; | |
1111 | ||
1112 | /* Catch someone adding a register without putting in reset entry. */ | |
1113 | memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); | |
1114 | ||
1115 | /* Generic chip reset first (so target could override). */ | |
1116 | reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
1117 | ||
62a89c44 | 1118 | table = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
1119 | reset_sys_reg_descs(vcpu, table, num); |
1120 | ||
1121 | for (num = 1; num < NR_SYS_REGS; num++) | |
1122 | if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | |
1123 | panic("Didn't reset vcpu_sys_reg(%zi)", num); | |
1124 | } |