]>
Commit | Line | Data |
---|---|---|
7c8c5e6a MZ |
1 | /* |
2 | * Copyright (C) 2012,2013 - ARM Ltd | |
3 | * Author: Marc Zyngier <marc.zyngier@arm.com> | |
4 | * | |
5 | * Derived from arch/arm/kvm/coproc.c: | |
6 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University | |
7 | * Authors: Rusty Russell <rusty@rustcorp.com.au> | |
8 | * Christoffer Dall <c.dall@virtualopensystems.com> | |
9 | * | |
10 | * This program is free software; you can redistribute it and/or modify | |
11 | * it under the terms of the GNU General Public License, version 2, as | |
12 | * published by the Free Software Foundation. | |
13 | * | |
14 | * This program is distributed in the hope that it will be useful, | |
15 | * but WITHOUT ANY WARRANTY; without even the implied warranty of | |
16 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | |
17 | * GNU General Public License for more details. | |
18 | * | |
19 | * You should have received a copy of the GNU General Public License | |
20 | * along with this program. If not, see <http://www.gnu.org/licenses/>. | |
21 | */ | |
22 | ||
23 | #include <linux/mm.h> | |
24 | #include <linux/kvm_host.h> | |
25 | #include <linux/uaccess.h> | |
26 | #include <asm/kvm_arm.h> | |
27 | #include <asm/kvm_host.h> | |
28 | #include <asm/kvm_emulate.h> | |
29 | #include <asm/kvm_coproc.h> | |
30 | #include <asm/cacheflush.h> | |
31 | #include <asm/cputype.h> | |
32 | #include <trace/events/kvm.h> | |
33 | ||
34 | #include "sys_regs.h" | |
35 | ||
36 | /* | |
37 | * All of this file is extremly similar to the ARM coproc.c, but the | |
38 | * types are different. My gut feeling is that it should be pretty | |
39 | * easy to merge, but that would be an ABI breakage -- again. VFP | |
40 | * would also need to be abstracted. | |
62a89c44 MZ |
41 | * |
42 | * For AArch32, we only take care of what is being trapped. Anything | |
43 | * that has to do with init and userspace access has to go via the | |
44 | * 64bit interface. | |
7c8c5e6a MZ |
45 | */ |
46 | ||
47 | /* 3 bits per cache level, as per CLIDR, but non-existent caches always 0 */ | |
48 | static u32 cache_levels; | |
49 | ||
50 | /* CSSELR values; used to index KVM_REG_ARM_DEMUX_ID_CCSIDR */ | |
51 | #define CSSELR_MAX 12 | |
52 | ||
53 | /* Which cache CCSIDR represents depends on CSSELR value. */ | |
54 | static u32 get_ccsidr(u32 csselr) | |
55 | { | |
56 | u32 ccsidr; | |
57 | ||
58 | /* Make sure noone else changes CSSELR during this! */ | |
59 | local_irq_disable(); | |
60 | /* Put value into CSSELR */ | |
61 | asm volatile("msr csselr_el1, %x0" : : "r" (csselr)); | |
62 | isb(); | |
63 | /* Read result out of CCSIDR */ | |
64 | asm volatile("mrs %0, ccsidr_el1" : "=r" (ccsidr)); | |
65 | local_irq_enable(); | |
66 | ||
67 | return ccsidr; | |
68 | } | |
69 | ||
70 | static void do_dc_cisw(u32 val) | |
71 | { | |
72 | asm volatile("dc cisw, %x0" : : "r" (val)); | |
73 | dsb(); | |
74 | } | |
75 | ||
76 | static void do_dc_csw(u32 val) | |
77 | { | |
78 | asm volatile("dc csw, %x0" : : "r" (val)); | |
79 | dsb(); | |
80 | } | |
81 | ||
82 | /* See note at ARM ARM B1.14.4 */ | |
83 | static bool access_dcsw(struct kvm_vcpu *vcpu, | |
84 | const struct sys_reg_params *p, | |
85 | const struct sys_reg_desc *r) | |
86 | { | |
87 | unsigned long val; | |
88 | int cpu; | |
89 | ||
90 | if (!p->is_write) | |
91 | return read_from_write_only(vcpu, p); | |
92 | ||
93 | cpu = get_cpu(); | |
94 | ||
95 | cpumask_setall(&vcpu->arch.require_dcache_flush); | |
96 | cpumask_clear_cpu(cpu, &vcpu->arch.require_dcache_flush); | |
97 | ||
98 | /* If we were already preempted, take the long way around */ | |
99 | if (cpu != vcpu->arch.last_pcpu) { | |
100 | flush_cache_all(); | |
101 | goto done; | |
102 | } | |
103 | ||
104 | val = *vcpu_reg(vcpu, p->Rt); | |
105 | ||
106 | switch (p->CRm) { | |
107 | case 6: /* Upgrade DCISW to DCCISW, as per HCR.SWIO */ | |
108 | case 14: /* DCCISW */ | |
109 | do_dc_cisw(val); | |
110 | break; | |
111 | ||
112 | case 10: /* DCCSW */ | |
113 | do_dc_csw(val); | |
114 | break; | |
115 | } | |
116 | ||
117 | done: | |
118 | put_cpu(); | |
119 | ||
120 | return true; | |
121 | } | |
122 | ||
123 | /* | |
124 | * We could trap ID_DFR0 and tell the guest we don't support performance | |
125 | * monitoring. Unfortunately the patch to make the kernel check ID_DFR0 was | |
126 | * NAKed, so it will read the PMCR anyway. | |
127 | * | |
128 | * Therefore we tell the guest we have 0 counters. Unfortunately, we | |
129 | * must always support PMCCNTR (the cycle counter): we just RAZ/WI for | |
130 | * all PM registers, which doesn't crash the guest kernel at least. | |
131 | */ | |
132 | static bool pm_fake(struct kvm_vcpu *vcpu, | |
133 | const struct sys_reg_params *p, | |
134 | const struct sys_reg_desc *r) | |
135 | { | |
136 | if (p->is_write) | |
137 | return ignore_write(vcpu, p); | |
138 | else | |
139 | return read_zero(vcpu, p); | |
140 | } | |
141 | ||
142 | static void reset_amair_el1(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |
143 | { | |
144 | u64 amair; | |
145 | ||
146 | asm volatile("mrs %0, amair_el1\n" : "=r" (amair)); | |
147 | vcpu_sys_reg(vcpu, AMAIR_EL1) = amair; | |
148 | } | |
149 | ||
150 | static void reset_mpidr(struct kvm_vcpu *vcpu, const struct sys_reg_desc *r) | |
151 | { | |
152 | /* | |
153 | * Simply map the vcpu_id into the Aff0 field of the MPIDR. | |
154 | */ | |
155 | vcpu_sys_reg(vcpu, MPIDR_EL1) = (1UL << 31) | (vcpu->vcpu_id & 0xff); | |
156 | } | |
157 | ||
158 | /* | |
159 | * Architected system registers. | |
160 | * Important: Must be sorted ascending by Op0, Op1, CRn, CRm, Op2 | |
161 | */ | |
162 | static const struct sys_reg_desc sys_reg_descs[] = { | |
163 | /* DC ISW */ | |
164 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b0110), Op2(0b010), | |
165 | access_dcsw }, | |
166 | /* DC CSW */ | |
167 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1010), Op2(0b010), | |
168 | access_dcsw }, | |
169 | /* DC CISW */ | |
170 | { Op0(0b01), Op1(0b000), CRn(0b0111), CRm(0b1110), Op2(0b010), | |
171 | access_dcsw }, | |
172 | ||
62a89c44 MZ |
173 | /* TEECR32_EL1 */ |
174 | { Op0(0b10), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | |
175 | NULL, reset_val, TEECR32_EL1, 0 }, | |
176 | /* TEEHBR32_EL1 */ | |
177 | { Op0(0b10), Op1(0b010), CRn(0b0001), CRm(0b0000), Op2(0b000), | |
178 | NULL, reset_val, TEEHBR32_EL1, 0 }, | |
179 | /* DBGVCR32_EL2 */ | |
180 | { Op0(0b10), Op1(0b100), CRn(0b0000), CRm(0b0111), Op2(0b000), | |
181 | NULL, reset_val, DBGVCR32_EL2, 0 }, | |
182 | ||
7c8c5e6a MZ |
183 | /* MPIDR_EL1 */ |
184 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b101), | |
185 | NULL, reset_mpidr, MPIDR_EL1 }, | |
186 | /* SCTLR_EL1 */ | |
187 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b000), | |
188 | NULL, reset_val, SCTLR_EL1, 0x00C50078 }, | |
189 | /* CPACR_EL1 */ | |
190 | { Op0(0b11), Op1(0b000), CRn(0b0001), CRm(0b0000), Op2(0b010), | |
191 | NULL, reset_val, CPACR_EL1, 0 }, | |
192 | /* TTBR0_EL1 */ | |
193 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b000), | |
194 | NULL, reset_unknown, TTBR0_EL1 }, | |
195 | /* TTBR1_EL1 */ | |
196 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b001), | |
197 | NULL, reset_unknown, TTBR1_EL1 }, | |
198 | /* TCR_EL1 */ | |
199 | { Op0(0b11), Op1(0b000), CRn(0b0010), CRm(0b0000), Op2(0b010), | |
200 | NULL, reset_val, TCR_EL1, 0 }, | |
201 | ||
202 | /* AFSR0_EL1 */ | |
203 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b000), | |
204 | NULL, reset_unknown, AFSR0_EL1 }, | |
205 | /* AFSR1_EL1 */ | |
206 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0001), Op2(0b001), | |
207 | NULL, reset_unknown, AFSR1_EL1 }, | |
208 | /* ESR_EL1 */ | |
209 | { Op0(0b11), Op1(0b000), CRn(0b0101), CRm(0b0010), Op2(0b000), | |
210 | NULL, reset_unknown, ESR_EL1 }, | |
211 | /* FAR_EL1 */ | |
212 | { Op0(0b11), Op1(0b000), CRn(0b0110), CRm(0b0000), Op2(0b000), | |
213 | NULL, reset_unknown, FAR_EL1 }, | |
1bbd8054 MZ |
214 | /* PAR_EL1 */ |
215 | { Op0(0b11), Op1(0b000), CRn(0b0111), CRm(0b0100), Op2(0b000), | |
216 | NULL, reset_unknown, PAR_EL1 }, | |
7c8c5e6a MZ |
217 | |
218 | /* PMINTENSET_EL1 */ | |
219 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b001), | |
220 | pm_fake }, | |
221 | /* PMINTENCLR_EL1 */ | |
222 | { Op0(0b11), Op1(0b000), CRn(0b1001), CRm(0b1110), Op2(0b010), | |
223 | pm_fake }, | |
224 | ||
225 | /* MAIR_EL1 */ | |
226 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0010), Op2(0b000), | |
227 | NULL, reset_unknown, MAIR_EL1 }, | |
228 | /* AMAIR_EL1 */ | |
229 | { Op0(0b11), Op1(0b000), CRn(0b1010), CRm(0b0011), Op2(0b000), | |
230 | NULL, reset_amair_el1, AMAIR_EL1 }, | |
231 | ||
232 | /* VBAR_EL1 */ | |
233 | { Op0(0b11), Op1(0b000), CRn(0b1100), CRm(0b0000), Op2(0b000), | |
234 | NULL, reset_val, VBAR_EL1, 0 }, | |
235 | /* CONTEXTIDR_EL1 */ | |
236 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b001), | |
237 | NULL, reset_val, CONTEXTIDR_EL1, 0 }, | |
238 | /* TPIDR_EL1 */ | |
239 | { Op0(0b11), Op1(0b000), CRn(0b1101), CRm(0b0000), Op2(0b100), | |
240 | NULL, reset_unknown, TPIDR_EL1 }, | |
241 | ||
242 | /* CNTKCTL_EL1 */ | |
243 | { Op0(0b11), Op1(0b000), CRn(0b1110), CRm(0b0001), Op2(0b000), | |
244 | NULL, reset_val, CNTKCTL_EL1, 0}, | |
245 | ||
246 | /* CSSELR_EL1 */ | |
247 | { Op0(0b11), Op1(0b010), CRn(0b0000), CRm(0b0000), Op2(0b000), | |
248 | NULL, reset_unknown, CSSELR_EL1 }, | |
249 | ||
250 | /* PMCR_EL0 */ | |
251 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b000), | |
252 | pm_fake }, | |
253 | /* PMCNTENSET_EL0 */ | |
254 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b001), | |
255 | pm_fake }, | |
256 | /* PMCNTENCLR_EL0 */ | |
257 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b010), | |
258 | pm_fake }, | |
259 | /* PMOVSCLR_EL0 */ | |
260 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b011), | |
261 | pm_fake }, | |
262 | /* PMSWINC_EL0 */ | |
263 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b100), | |
264 | pm_fake }, | |
265 | /* PMSELR_EL0 */ | |
266 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b101), | |
267 | pm_fake }, | |
268 | /* PMCEID0_EL0 */ | |
269 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b110), | |
270 | pm_fake }, | |
271 | /* PMCEID1_EL0 */ | |
272 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1100), Op2(0b111), | |
273 | pm_fake }, | |
274 | /* PMCCNTR_EL0 */ | |
275 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b000), | |
276 | pm_fake }, | |
277 | /* PMXEVTYPER_EL0 */ | |
278 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b001), | |
279 | pm_fake }, | |
280 | /* PMXEVCNTR_EL0 */ | |
281 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1101), Op2(0b010), | |
282 | pm_fake }, | |
283 | /* PMUSERENR_EL0 */ | |
284 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b000), | |
285 | pm_fake }, | |
286 | /* PMOVSSET_EL0 */ | |
287 | { Op0(0b11), Op1(0b011), CRn(0b1001), CRm(0b1110), Op2(0b011), | |
288 | pm_fake }, | |
289 | ||
290 | /* TPIDR_EL0 */ | |
291 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b010), | |
292 | NULL, reset_unknown, TPIDR_EL0 }, | |
293 | /* TPIDRRO_EL0 */ | |
294 | { Op0(0b11), Op1(0b011), CRn(0b1101), CRm(0b0000), Op2(0b011), | |
295 | NULL, reset_unknown, TPIDRRO_EL0 }, | |
62a89c44 MZ |
296 | |
297 | /* DACR32_EL2 */ | |
298 | { Op0(0b11), Op1(0b100), CRn(0b0011), CRm(0b0000), Op2(0b000), | |
299 | NULL, reset_unknown, DACR32_EL2 }, | |
300 | /* IFSR32_EL2 */ | |
301 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0000), Op2(0b001), | |
302 | NULL, reset_unknown, IFSR32_EL2 }, | |
303 | /* FPEXC32_EL2 */ | |
304 | { Op0(0b11), Op1(0b100), CRn(0b0101), CRm(0b0011), Op2(0b000), | |
305 | NULL, reset_val, FPEXC32_EL2, 0x70 }, | |
306 | }; | |
307 | ||
308 | /* Trapped cp15 registers */ | |
309 | static const struct sys_reg_desc cp15_regs[] = { | |
310 | /* | |
311 | * DC{C,I,CI}SW operations: | |
312 | */ | |
313 | { Op1( 0), CRn( 7), CRm( 6), Op2( 2), access_dcsw }, | |
314 | { Op1( 0), CRn( 7), CRm(10), Op2( 2), access_dcsw }, | |
315 | { Op1( 0), CRn( 7), CRm(14), Op2( 2), access_dcsw }, | |
316 | { Op1( 0), CRn( 9), CRm(12), Op2( 0), pm_fake }, | |
317 | { Op1( 0), CRn( 9), CRm(12), Op2( 1), pm_fake }, | |
318 | { Op1( 0), CRn( 9), CRm(12), Op2( 2), pm_fake }, | |
319 | { Op1( 0), CRn( 9), CRm(12), Op2( 3), pm_fake }, | |
320 | { Op1( 0), CRn( 9), CRm(12), Op2( 5), pm_fake }, | |
321 | { Op1( 0), CRn( 9), CRm(12), Op2( 6), pm_fake }, | |
322 | { Op1( 0), CRn( 9), CRm(12), Op2( 7), pm_fake }, | |
323 | { Op1( 0), CRn( 9), CRm(13), Op2( 0), pm_fake }, | |
324 | { Op1( 0), CRn( 9), CRm(13), Op2( 1), pm_fake }, | |
325 | { Op1( 0), CRn( 9), CRm(13), Op2( 2), pm_fake }, | |
326 | { Op1( 0), CRn( 9), CRm(14), Op2( 0), pm_fake }, | |
327 | { Op1( 0), CRn( 9), CRm(14), Op2( 1), pm_fake }, | |
328 | { Op1( 0), CRn( 9), CRm(14), Op2( 2), pm_fake }, | |
7c8c5e6a MZ |
329 | }; |
330 | ||
331 | /* Target specific emulation tables */ | |
332 | static struct kvm_sys_reg_target_table *target_tables[KVM_ARM_NUM_TARGETS]; | |
333 | ||
334 | void kvm_register_target_sys_reg_table(unsigned int target, | |
335 | struct kvm_sys_reg_target_table *table) | |
336 | { | |
337 | target_tables[target] = table; | |
338 | } | |
339 | ||
340 | /* Get specific register table for this target. */ | |
62a89c44 MZ |
341 | static const struct sys_reg_desc *get_target_table(unsigned target, |
342 | bool mode_is_64, | |
343 | size_t *num) | |
7c8c5e6a MZ |
344 | { |
345 | struct kvm_sys_reg_target_table *table; | |
346 | ||
347 | table = target_tables[target]; | |
62a89c44 MZ |
348 | if (mode_is_64) { |
349 | *num = table->table64.num; | |
350 | return table->table64.table; | |
351 | } else { | |
352 | *num = table->table32.num; | |
353 | return table->table32.table; | |
354 | } | |
7c8c5e6a MZ |
355 | } |
356 | ||
357 | static const struct sys_reg_desc *find_reg(const struct sys_reg_params *params, | |
358 | const struct sys_reg_desc table[], | |
359 | unsigned int num) | |
360 | { | |
361 | unsigned int i; | |
362 | ||
363 | for (i = 0; i < num; i++) { | |
364 | const struct sys_reg_desc *r = &table[i]; | |
365 | ||
366 | if (params->Op0 != r->Op0) | |
367 | continue; | |
368 | if (params->Op1 != r->Op1) | |
369 | continue; | |
370 | if (params->CRn != r->CRn) | |
371 | continue; | |
372 | if (params->CRm != r->CRm) | |
373 | continue; | |
374 | if (params->Op2 != r->Op2) | |
375 | continue; | |
376 | ||
377 | return r; | |
378 | } | |
379 | return NULL; | |
380 | } | |
381 | ||
62a89c44 MZ |
382 | int kvm_handle_cp14_load_store(struct kvm_vcpu *vcpu, struct kvm_run *run) |
383 | { | |
384 | kvm_inject_undefined(vcpu); | |
385 | return 1; | |
386 | } | |
387 | ||
388 | int kvm_handle_cp14_access(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
389 | { | |
390 | kvm_inject_undefined(vcpu); | |
391 | return 1; | |
392 | } | |
393 | ||
394 | static void emulate_cp15(struct kvm_vcpu *vcpu, | |
395 | const struct sys_reg_params *params) | |
396 | { | |
397 | size_t num; | |
398 | const struct sys_reg_desc *table, *r; | |
399 | ||
400 | table = get_target_table(vcpu->arch.target, false, &num); | |
401 | ||
402 | /* Search target-specific then generic table. */ | |
403 | r = find_reg(params, table, num); | |
404 | if (!r) | |
405 | r = find_reg(params, cp15_regs, ARRAY_SIZE(cp15_regs)); | |
406 | ||
407 | if (likely(r)) { | |
408 | /* | |
409 | * Not having an accessor means that we have | |
410 | * configured a trap that we don't know how to | |
411 | * handle. This certainly qualifies as a gross bug | |
412 | * that should be fixed right away. | |
413 | */ | |
414 | BUG_ON(!r->access); | |
415 | ||
416 | if (likely(r->access(vcpu, params, r))) { | |
417 | /* Skip instruction, since it was emulated */ | |
418 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
419 | return; | |
420 | } | |
421 | /* If access function fails, it should complain. */ | |
422 | } | |
423 | ||
424 | kvm_err("Unsupported guest CP15 access at: %08lx\n", *vcpu_pc(vcpu)); | |
425 | print_sys_reg_instr(params); | |
426 | kvm_inject_undefined(vcpu); | |
427 | } | |
428 | ||
429 | /** | |
430 | * kvm_handle_cp15_64 -- handles a mrrc/mcrr trap on a guest CP15 access | |
431 | * @vcpu: The VCPU pointer | |
432 | * @run: The kvm_run struct | |
433 | */ | |
434 | int kvm_handle_cp15_64(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
435 | { | |
436 | struct sys_reg_params params; | |
437 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | |
438 | int Rt2 = (hsr >> 10) & 0xf; | |
439 | ||
2072d29c MZ |
440 | params.is_aarch32 = true; |
441 | params.is_32bit = false; | |
62a89c44 MZ |
442 | params.CRm = (hsr >> 1) & 0xf; |
443 | params.Rt = (hsr >> 5) & 0xf; | |
444 | params.is_write = ((hsr & 1) == 0); | |
445 | ||
446 | params.Op0 = 0; | |
447 | params.Op1 = (hsr >> 16) & 0xf; | |
448 | params.Op2 = 0; | |
449 | params.CRn = 0; | |
450 | ||
451 | /* | |
452 | * Massive hack here. Store Rt2 in the top 32bits so we only | |
453 | * have one register to deal with. As we use the same trap | |
454 | * backends between AArch32 and AArch64, we get away with it. | |
455 | */ | |
456 | if (params.is_write) { | |
457 | u64 val = *vcpu_reg(vcpu, params.Rt); | |
458 | val &= 0xffffffff; | |
459 | val |= *vcpu_reg(vcpu, Rt2) << 32; | |
460 | *vcpu_reg(vcpu, params.Rt) = val; | |
461 | } | |
462 | ||
463 | emulate_cp15(vcpu, ¶ms); | |
464 | ||
465 | /* Do the opposite hack for the read side */ | |
466 | if (!params.is_write) { | |
467 | u64 val = *vcpu_reg(vcpu, params.Rt); | |
468 | val >>= 32; | |
469 | *vcpu_reg(vcpu, Rt2) = val; | |
470 | } | |
471 | ||
472 | return 1; | |
473 | } | |
474 | ||
475 | /** | |
476 | * kvm_handle_cp15_32 -- handles a mrc/mcr trap on a guest CP15 access | |
477 | * @vcpu: The VCPU pointer | |
478 | * @run: The kvm_run struct | |
479 | */ | |
480 | int kvm_handle_cp15_32(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
481 | { | |
482 | struct sys_reg_params params; | |
483 | u32 hsr = kvm_vcpu_get_hsr(vcpu); | |
484 | ||
2072d29c MZ |
485 | params.is_aarch32 = true; |
486 | params.is_32bit = true; | |
62a89c44 MZ |
487 | params.CRm = (hsr >> 1) & 0xf; |
488 | params.Rt = (hsr >> 5) & 0xf; | |
489 | params.is_write = ((hsr & 1) == 0); | |
490 | params.CRn = (hsr >> 10) & 0xf; | |
491 | params.Op0 = 0; | |
492 | params.Op1 = (hsr >> 14) & 0x7; | |
493 | params.Op2 = (hsr >> 17) & 0x7; | |
494 | ||
495 | emulate_cp15(vcpu, ¶ms); | |
496 | return 1; | |
497 | } | |
498 | ||
7c8c5e6a MZ |
499 | static int emulate_sys_reg(struct kvm_vcpu *vcpu, |
500 | const struct sys_reg_params *params) | |
501 | { | |
502 | size_t num; | |
503 | const struct sys_reg_desc *table, *r; | |
504 | ||
62a89c44 | 505 | table = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
506 | |
507 | /* Search target-specific then generic table. */ | |
508 | r = find_reg(params, table, num); | |
509 | if (!r) | |
510 | r = find_reg(params, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
511 | ||
512 | if (likely(r)) { | |
513 | /* | |
514 | * Not having an accessor means that we have | |
515 | * configured a trap that we don't know how to | |
516 | * handle. This certainly qualifies as a gross bug | |
517 | * that should be fixed right away. | |
518 | */ | |
519 | BUG_ON(!r->access); | |
520 | ||
521 | if (likely(r->access(vcpu, params, r))) { | |
522 | /* Skip instruction, since it was emulated */ | |
523 | kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu)); | |
524 | return 1; | |
525 | } | |
526 | /* If access function fails, it should complain. */ | |
527 | } else { | |
528 | kvm_err("Unsupported guest sys_reg access at: %lx\n", | |
529 | *vcpu_pc(vcpu)); | |
530 | print_sys_reg_instr(params); | |
531 | } | |
532 | kvm_inject_undefined(vcpu); | |
533 | return 1; | |
534 | } | |
535 | ||
536 | static void reset_sys_reg_descs(struct kvm_vcpu *vcpu, | |
537 | const struct sys_reg_desc *table, size_t num) | |
538 | { | |
539 | unsigned long i; | |
540 | ||
541 | for (i = 0; i < num; i++) | |
542 | if (table[i].reset) | |
543 | table[i].reset(vcpu, &table[i]); | |
544 | } | |
545 | ||
546 | /** | |
547 | * kvm_handle_sys_reg -- handles a mrs/msr trap on a guest sys_reg access | |
548 | * @vcpu: The VCPU pointer | |
549 | * @run: The kvm_run struct | |
550 | */ | |
551 | int kvm_handle_sys_reg(struct kvm_vcpu *vcpu, struct kvm_run *run) | |
552 | { | |
553 | struct sys_reg_params params; | |
554 | unsigned long esr = kvm_vcpu_get_hsr(vcpu); | |
555 | ||
2072d29c MZ |
556 | params.is_aarch32 = false; |
557 | params.is_32bit = false; | |
7c8c5e6a MZ |
558 | params.Op0 = (esr >> 20) & 3; |
559 | params.Op1 = (esr >> 14) & 0x7; | |
560 | params.CRn = (esr >> 10) & 0xf; | |
561 | params.CRm = (esr >> 1) & 0xf; | |
562 | params.Op2 = (esr >> 17) & 0x7; | |
563 | params.Rt = (esr >> 5) & 0x1f; | |
564 | params.is_write = !(esr & 1); | |
565 | ||
566 | return emulate_sys_reg(vcpu, ¶ms); | |
567 | } | |
568 | ||
569 | /****************************************************************************** | |
570 | * Userspace API | |
571 | *****************************************************************************/ | |
572 | ||
573 | static bool index_to_params(u64 id, struct sys_reg_params *params) | |
574 | { | |
575 | switch (id & KVM_REG_SIZE_MASK) { | |
576 | case KVM_REG_SIZE_U64: | |
577 | /* Any unused index bits means it's not valid. */ | |
578 | if (id & ~(KVM_REG_ARCH_MASK | KVM_REG_SIZE_MASK | |
579 | | KVM_REG_ARM_COPROC_MASK | |
580 | | KVM_REG_ARM64_SYSREG_OP0_MASK | |
581 | | KVM_REG_ARM64_SYSREG_OP1_MASK | |
582 | | KVM_REG_ARM64_SYSREG_CRN_MASK | |
583 | | KVM_REG_ARM64_SYSREG_CRM_MASK | |
584 | | KVM_REG_ARM64_SYSREG_OP2_MASK)) | |
585 | return false; | |
586 | params->Op0 = ((id & KVM_REG_ARM64_SYSREG_OP0_MASK) | |
587 | >> KVM_REG_ARM64_SYSREG_OP0_SHIFT); | |
588 | params->Op1 = ((id & KVM_REG_ARM64_SYSREG_OP1_MASK) | |
589 | >> KVM_REG_ARM64_SYSREG_OP1_SHIFT); | |
590 | params->CRn = ((id & KVM_REG_ARM64_SYSREG_CRN_MASK) | |
591 | >> KVM_REG_ARM64_SYSREG_CRN_SHIFT); | |
592 | params->CRm = ((id & KVM_REG_ARM64_SYSREG_CRM_MASK) | |
593 | >> KVM_REG_ARM64_SYSREG_CRM_SHIFT); | |
594 | params->Op2 = ((id & KVM_REG_ARM64_SYSREG_OP2_MASK) | |
595 | >> KVM_REG_ARM64_SYSREG_OP2_SHIFT); | |
596 | return true; | |
597 | default: | |
598 | return false; | |
599 | } | |
600 | } | |
601 | ||
602 | /* Decode an index value, and find the sys_reg_desc entry. */ | |
603 | static const struct sys_reg_desc *index_to_sys_reg_desc(struct kvm_vcpu *vcpu, | |
604 | u64 id) | |
605 | { | |
606 | size_t num; | |
607 | const struct sys_reg_desc *table, *r; | |
608 | struct sys_reg_params params; | |
609 | ||
610 | /* We only do sys_reg for now. */ | |
611 | if ((id & KVM_REG_ARM_COPROC_MASK) != KVM_REG_ARM64_SYSREG) | |
612 | return NULL; | |
613 | ||
614 | if (!index_to_params(id, ¶ms)) | |
615 | return NULL; | |
616 | ||
62a89c44 | 617 | table = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
618 | r = find_reg(¶ms, table, num); |
619 | if (!r) | |
620 | r = find_reg(¶ms, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
621 | ||
622 | /* Not saved in the sys_reg array? */ | |
623 | if (r && !r->reg) | |
624 | r = NULL; | |
625 | ||
626 | return r; | |
627 | } | |
628 | ||
629 | /* | |
630 | * These are the invariant sys_reg registers: we let the guest see the | |
631 | * host versions of these, so they're part of the guest state. | |
632 | * | |
633 | * A future CPU may provide a mechanism to present different values to | |
634 | * the guest, or a future kvm may trap them. | |
635 | */ | |
636 | ||
637 | #define FUNCTION_INVARIANT(reg) \ | |
638 | static void get_##reg(struct kvm_vcpu *v, \ | |
639 | const struct sys_reg_desc *r) \ | |
640 | { \ | |
641 | u64 val; \ | |
642 | \ | |
643 | asm volatile("mrs %0, " __stringify(reg) "\n" \ | |
644 | : "=r" (val)); \ | |
645 | ((struct sys_reg_desc *)r)->val = val; \ | |
646 | } | |
647 | ||
648 | FUNCTION_INVARIANT(midr_el1) | |
649 | FUNCTION_INVARIANT(ctr_el0) | |
650 | FUNCTION_INVARIANT(revidr_el1) | |
651 | FUNCTION_INVARIANT(id_pfr0_el1) | |
652 | FUNCTION_INVARIANT(id_pfr1_el1) | |
653 | FUNCTION_INVARIANT(id_dfr0_el1) | |
654 | FUNCTION_INVARIANT(id_afr0_el1) | |
655 | FUNCTION_INVARIANT(id_mmfr0_el1) | |
656 | FUNCTION_INVARIANT(id_mmfr1_el1) | |
657 | FUNCTION_INVARIANT(id_mmfr2_el1) | |
658 | FUNCTION_INVARIANT(id_mmfr3_el1) | |
659 | FUNCTION_INVARIANT(id_isar0_el1) | |
660 | FUNCTION_INVARIANT(id_isar1_el1) | |
661 | FUNCTION_INVARIANT(id_isar2_el1) | |
662 | FUNCTION_INVARIANT(id_isar3_el1) | |
663 | FUNCTION_INVARIANT(id_isar4_el1) | |
664 | FUNCTION_INVARIANT(id_isar5_el1) | |
665 | FUNCTION_INVARIANT(clidr_el1) | |
666 | FUNCTION_INVARIANT(aidr_el1) | |
667 | ||
668 | /* ->val is filled in by kvm_sys_reg_table_init() */ | |
669 | static struct sys_reg_desc invariant_sys_regs[] = { | |
670 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b000), | |
671 | NULL, get_midr_el1 }, | |
672 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0000), Op2(0b110), | |
673 | NULL, get_revidr_el1 }, | |
674 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b000), | |
675 | NULL, get_id_pfr0_el1 }, | |
676 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b001), | |
677 | NULL, get_id_pfr1_el1 }, | |
678 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b010), | |
679 | NULL, get_id_dfr0_el1 }, | |
680 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b011), | |
681 | NULL, get_id_afr0_el1 }, | |
682 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b100), | |
683 | NULL, get_id_mmfr0_el1 }, | |
684 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b101), | |
685 | NULL, get_id_mmfr1_el1 }, | |
686 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b110), | |
687 | NULL, get_id_mmfr2_el1 }, | |
688 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0001), Op2(0b111), | |
689 | NULL, get_id_mmfr3_el1 }, | |
690 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b000), | |
691 | NULL, get_id_isar0_el1 }, | |
692 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b001), | |
693 | NULL, get_id_isar1_el1 }, | |
694 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b010), | |
695 | NULL, get_id_isar2_el1 }, | |
696 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b011), | |
697 | NULL, get_id_isar3_el1 }, | |
698 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b100), | |
699 | NULL, get_id_isar4_el1 }, | |
700 | { Op0(0b11), Op1(0b000), CRn(0b0000), CRm(0b0010), Op2(0b101), | |
701 | NULL, get_id_isar5_el1 }, | |
702 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b001), | |
703 | NULL, get_clidr_el1 }, | |
704 | { Op0(0b11), Op1(0b001), CRn(0b0000), CRm(0b0000), Op2(0b111), | |
705 | NULL, get_aidr_el1 }, | |
706 | { Op0(0b11), Op1(0b011), CRn(0b0000), CRm(0b0000), Op2(0b001), | |
707 | NULL, get_ctr_el0 }, | |
708 | }; | |
709 | ||
710 | static int reg_from_user(void *val, const void __user *uaddr, u64 id) | |
711 | { | |
712 | /* This Just Works because we are little endian. */ | |
713 | if (copy_from_user(val, uaddr, KVM_REG_SIZE(id)) != 0) | |
714 | return -EFAULT; | |
715 | return 0; | |
716 | } | |
717 | ||
718 | static int reg_to_user(void __user *uaddr, const void *val, u64 id) | |
719 | { | |
720 | /* This Just Works because we are little endian. */ | |
721 | if (copy_to_user(uaddr, val, KVM_REG_SIZE(id)) != 0) | |
722 | return -EFAULT; | |
723 | return 0; | |
724 | } | |
725 | ||
726 | static int get_invariant_sys_reg(u64 id, void __user *uaddr) | |
727 | { | |
728 | struct sys_reg_params params; | |
729 | const struct sys_reg_desc *r; | |
730 | ||
731 | if (!index_to_params(id, ¶ms)) | |
732 | return -ENOENT; | |
733 | ||
734 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | |
735 | if (!r) | |
736 | return -ENOENT; | |
737 | ||
738 | return reg_to_user(uaddr, &r->val, id); | |
739 | } | |
740 | ||
741 | static int set_invariant_sys_reg(u64 id, void __user *uaddr) | |
742 | { | |
743 | struct sys_reg_params params; | |
744 | const struct sys_reg_desc *r; | |
745 | int err; | |
746 | u64 val = 0; /* Make sure high bits are 0 for 32-bit regs */ | |
747 | ||
748 | if (!index_to_params(id, ¶ms)) | |
749 | return -ENOENT; | |
750 | r = find_reg(¶ms, invariant_sys_regs, ARRAY_SIZE(invariant_sys_regs)); | |
751 | if (!r) | |
752 | return -ENOENT; | |
753 | ||
754 | err = reg_from_user(&val, uaddr, id); | |
755 | if (err) | |
756 | return err; | |
757 | ||
758 | /* This is what we mean by invariant: you can't change it. */ | |
759 | if (r->val != val) | |
760 | return -EINVAL; | |
761 | ||
762 | return 0; | |
763 | } | |
764 | ||
765 | static bool is_valid_cache(u32 val) | |
766 | { | |
767 | u32 level, ctype; | |
768 | ||
769 | if (val >= CSSELR_MAX) | |
770 | return -ENOENT; | |
771 | ||
772 | /* Bottom bit is Instruction or Data bit. Next 3 bits are level. */ | |
773 | level = (val >> 1); | |
774 | ctype = (cache_levels >> (level * 3)) & 7; | |
775 | ||
776 | switch (ctype) { | |
777 | case 0: /* No cache */ | |
778 | return false; | |
779 | case 1: /* Instruction cache only */ | |
780 | return (val & 1); | |
781 | case 2: /* Data cache only */ | |
782 | case 4: /* Unified cache */ | |
783 | return !(val & 1); | |
784 | case 3: /* Separate instruction and data caches */ | |
785 | return true; | |
786 | default: /* Reserved: we can't know instruction or data. */ | |
787 | return false; | |
788 | } | |
789 | } | |
790 | ||
791 | static int demux_c15_get(u64 id, void __user *uaddr) | |
792 | { | |
793 | u32 val; | |
794 | u32 __user *uval = uaddr; | |
795 | ||
796 | /* Fail if we have unknown bits set. */ | |
797 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | |
798 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | |
799 | return -ENOENT; | |
800 | ||
801 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | |
802 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | |
803 | if (KVM_REG_SIZE(id) != 4) | |
804 | return -ENOENT; | |
805 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | |
806 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | |
807 | if (!is_valid_cache(val)) | |
808 | return -ENOENT; | |
809 | ||
810 | return put_user(get_ccsidr(val), uval); | |
811 | default: | |
812 | return -ENOENT; | |
813 | } | |
814 | } | |
815 | ||
816 | static int demux_c15_set(u64 id, void __user *uaddr) | |
817 | { | |
818 | u32 val, newval; | |
819 | u32 __user *uval = uaddr; | |
820 | ||
821 | /* Fail if we have unknown bits set. */ | |
822 | if (id & ~(KVM_REG_ARCH_MASK|KVM_REG_SIZE_MASK|KVM_REG_ARM_COPROC_MASK | |
823 | | ((1 << KVM_REG_ARM_COPROC_SHIFT)-1))) | |
824 | return -ENOENT; | |
825 | ||
826 | switch (id & KVM_REG_ARM_DEMUX_ID_MASK) { | |
827 | case KVM_REG_ARM_DEMUX_ID_CCSIDR: | |
828 | if (KVM_REG_SIZE(id) != 4) | |
829 | return -ENOENT; | |
830 | val = (id & KVM_REG_ARM_DEMUX_VAL_MASK) | |
831 | >> KVM_REG_ARM_DEMUX_VAL_SHIFT; | |
832 | if (!is_valid_cache(val)) | |
833 | return -ENOENT; | |
834 | ||
835 | if (get_user(newval, uval)) | |
836 | return -EFAULT; | |
837 | ||
838 | /* This is also invariant: you can't change it. */ | |
839 | if (newval != get_ccsidr(val)) | |
840 | return -EINVAL; | |
841 | return 0; | |
842 | default: | |
843 | return -ENOENT; | |
844 | } | |
845 | } | |
846 | ||
847 | int kvm_arm_sys_reg_get_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
848 | { | |
849 | const struct sys_reg_desc *r; | |
850 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | |
851 | ||
852 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | |
853 | return demux_c15_get(reg->id, uaddr); | |
854 | ||
855 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | |
856 | return -ENOENT; | |
857 | ||
858 | r = index_to_sys_reg_desc(vcpu, reg->id); | |
859 | if (!r) | |
860 | return get_invariant_sys_reg(reg->id, uaddr); | |
861 | ||
862 | return reg_to_user(uaddr, &vcpu_sys_reg(vcpu, r->reg), reg->id); | |
863 | } | |
864 | ||
865 | int kvm_arm_sys_reg_set_reg(struct kvm_vcpu *vcpu, const struct kvm_one_reg *reg) | |
866 | { | |
867 | const struct sys_reg_desc *r; | |
868 | void __user *uaddr = (void __user *)(unsigned long)reg->addr; | |
869 | ||
870 | if ((reg->id & KVM_REG_ARM_COPROC_MASK) == KVM_REG_ARM_DEMUX) | |
871 | return demux_c15_set(reg->id, uaddr); | |
872 | ||
873 | if (KVM_REG_SIZE(reg->id) != sizeof(__u64)) | |
874 | return -ENOENT; | |
875 | ||
876 | r = index_to_sys_reg_desc(vcpu, reg->id); | |
877 | if (!r) | |
878 | return set_invariant_sys_reg(reg->id, uaddr); | |
879 | ||
880 | return reg_from_user(&vcpu_sys_reg(vcpu, r->reg), uaddr, reg->id); | |
881 | } | |
882 | ||
883 | static unsigned int num_demux_regs(void) | |
884 | { | |
885 | unsigned int i, count = 0; | |
886 | ||
887 | for (i = 0; i < CSSELR_MAX; i++) | |
888 | if (is_valid_cache(i)) | |
889 | count++; | |
890 | ||
891 | return count; | |
892 | } | |
893 | ||
894 | static int write_demux_regids(u64 __user *uindices) | |
895 | { | |
896 | u64 val = KVM_REG_ARM | KVM_REG_SIZE_U32 | KVM_REG_ARM_DEMUX; | |
897 | unsigned int i; | |
898 | ||
899 | val |= KVM_REG_ARM_DEMUX_ID_CCSIDR; | |
900 | for (i = 0; i < CSSELR_MAX; i++) { | |
901 | if (!is_valid_cache(i)) | |
902 | continue; | |
903 | if (put_user(val | i, uindices)) | |
904 | return -EFAULT; | |
905 | uindices++; | |
906 | } | |
907 | return 0; | |
908 | } | |
909 | ||
910 | static u64 sys_reg_to_index(const struct sys_reg_desc *reg) | |
911 | { | |
912 | return (KVM_REG_ARM64 | KVM_REG_SIZE_U64 | | |
913 | KVM_REG_ARM64_SYSREG | | |
914 | (reg->Op0 << KVM_REG_ARM64_SYSREG_OP0_SHIFT) | | |
915 | (reg->Op1 << KVM_REG_ARM64_SYSREG_OP1_SHIFT) | | |
916 | (reg->CRn << KVM_REG_ARM64_SYSREG_CRN_SHIFT) | | |
917 | (reg->CRm << KVM_REG_ARM64_SYSREG_CRM_SHIFT) | | |
918 | (reg->Op2 << KVM_REG_ARM64_SYSREG_OP2_SHIFT)); | |
919 | } | |
920 | ||
921 | static bool copy_reg_to_user(const struct sys_reg_desc *reg, u64 __user **uind) | |
922 | { | |
923 | if (!*uind) | |
924 | return true; | |
925 | ||
926 | if (put_user(sys_reg_to_index(reg), *uind)) | |
927 | return false; | |
928 | ||
929 | (*uind)++; | |
930 | return true; | |
931 | } | |
932 | ||
933 | /* Assumed ordered tables, see kvm_sys_reg_table_init. */ | |
934 | static int walk_sys_regs(struct kvm_vcpu *vcpu, u64 __user *uind) | |
935 | { | |
936 | const struct sys_reg_desc *i1, *i2, *end1, *end2; | |
937 | unsigned int total = 0; | |
938 | size_t num; | |
939 | ||
940 | /* We check for duplicates here, to allow arch-specific overrides. */ | |
62a89c44 | 941 | i1 = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
942 | end1 = i1 + num; |
943 | i2 = sys_reg_descs; | |
944 | end2 = sys_reg_descs + ARRAY_SIZE(sys_reg_descs); | |
945 | ||
946 | BUG_ON(i1 == end1 || i2 == end2); | |
947 | ||
948 | /* Walk carefully, as both tables may refer to the same register. */ | |
949 | while (i1 || i2) { | |
950 | int cmp = cmp_sys_reg(i1, i2); | |
951 | /* target-specific overrides generic entry. */ | |
952 | if (cmp <= 0) { | |
953 | /* Ignore registers we trap but don't save. */ | |
954 | if (i1->reg) { | |
955 | if (!copy_reg_to_user(i1, &uind)) | |
956 | return -EFAULT; | |
957 | total++; | |
958 | } | |
959 | } else { | |
960 | /* Ignore registers we trap but don't save. */ | |
961 | if (i2->reg) { | |
962 | if (!copy_reg_to_user(i2, &uind)) | |
963 | return -EFAULT; | |
964 | total++; | |
965 | } | |
966 | } | |
967 | ||
968 | if (cmp <= 0 && ++i1 == end1) | |
969 | i1 = NULL; | |
970 | if (cmp >= 0 && ++i2 == end2) | |
971 | i2 = NULL; | |
972 | } | |
973 | return total; | |
974 | } | |
975 | ||
976 | unsigned long kvm_arm_num_sys_reg_descs(struct kvm_vcpu *vcpu) | |
977 | { | |
978 | return ARRAY_SIZE(invariant_sys_regs) | |
979 | + num_demux_regs() | |
980 | + walk_sys_regs(vcpu, (u64 __user *)NULL); | |
981 | } | |
982 | ||
983 | int kvm_arm_copy_sys_reg_indices(struct kvm_vcpu *vcpu, u64 __user *uindices) | |
984 | { | |
985 | unsigned int i; | |
986 | int err; | |
987 | ||
988 | /* Then give them all the invariant registers' indices. */ | |
989 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) { | |
990 | if (put_user(sys_reg_to_index(&invariant_sys_regs[i]), uindices)) | |
991 | return -EFAULT; | |
992 | uindices++; | |
993 | } | |
994 | ||
995 | err = walk_sys_regs(vcpu, uindices); | |
996 | if (err < 0) | |
997 | return err; | |
998 | uindices += err; | |
999 | ||
1000 | return write_demux_regids(uindices); | |
1001 | } | |
1002 | ||
1003 | void kvm_sys_reg_table_init(void) | |
1004 | { | |
1005 | unsigned int i; | |
1006 | struct sys_reg_desc clidr; | |
1007 | ||
1008 | /* Make sure tables are unique and in order. */ | |
1009 | for (i = 1; i < ARRAY_SIZE(sys_reg_descs); i++) | |
1010 | BUG_ON(cmp_sys_reg(&sys_reg_descs[i-1], &sys_reg_descs[i]) >= 0); | |
1011 | ||
1012 | /* We abuse the reset function to overwrite the table itself. */ | |
1013 | for (i = 0; i < ARRAY_SIZE(invariant_sys_regs); i++) | |
1014 | invariant_sys_regs[i].reset(NULL, &invariant_sys_regs[i]); | |
1015 | ||
1016 | /* | |
1017 | * CLIDR format is awkward, so clean it up. See ARM B4.1.20: | |
1018 | * | |
1019 | * If software reads the Cache Type fields from Ctype1 | |
1020 | * upwards, once it has seen a value of 0b000, no caches | |
1021 | * exist at further-out levels of the hierarchy. So, for | |
1022 | * example, if Ctype3 is the first Cache Type field with a | |
1023 | * value of 0b000, the values of Ctype4 to Ctype7 must be | |
1024 | * ignored. | |
1025 | */ | |
1026 | get_clidr_el1(NULL, &clidr); /* Ugly... */ | |
1027 | cache_levels = clidr.val; | |
1028 | for (i = 0; i < 7; i++) | |
1029 | if (((cache_levels >> (i*3)) & 7) == 0) | |
1030 | break; | |
1031 | /* Clear all higher bits. */ | |
1032 | cache_levels &= (1 << (i*3))-1; | |
1033 | } | |
1034 | ||
1035 | /** | |
1036 | * kvm_reset_sys_regs - sets system registers to reset value | |
1037 | * @vcpu: The VCPU pointer | |
1038 | * | |
1039 | * This function finds the right table above and sets the registers on the | |
1040 | * virtual CPU struct to their architecturally defined reset values. | |
1041 | */ | |
1042 | void kvm_reset_sys_regs(struct kvm_vcpu *vcpu) | |
1043 | { | |
1044 | size_t num; | |
1045 | const struct sys_reg_desc *table; | |
1046 | ||
1047 | /* Catch someone adding a register without putting in reset entry. */ | |
1048 | memset(&vcpu->arch.ctxt.sys_regs, 0x42, sizeof(vcpu->arch.ctxt.sys_regs)); | |
1049 | ||
1050 | /* Generic chip reset first (so target could override). */ | |
1051 | reset_sys_reg_descs(vcpu, sys_reg_descs, ARRAY_SIZE(sys_reg_descs)); | |
1052 | ||
62a89c44 | 1053 | table = get_target_table(vcpu->arch.target, true, &num); |
7c8c5e6a MZ |
1054 | reset_sys_reg_descs(vcpu, table, num); |
1055 | ||
1056 | for (num = 1; num < NR_SYS_REGS; num++) | |
1057 | if (vcpu_sys_reg(vcpu, num) == 0x4242424242424242) | |
1058 | panic("Didn't reset vcpu_sys_reg(%zi)", num); | |
1059 | } |